repo_name
stringlengths
6
100
path
stringlengths
4
191
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
935
727k
license
stringclasses
15 values
AlexRobson/scikit-learn
examples/linear_model/plot_omp.py
385
2263
""" =========================== Orthogonal Matching Pursuit =========================== Using orthogonal matching pursuit for recovering a sparse signal from a noisy measurement encoded with a dictionary """ print(__doc__) import matplotlib.pyplot as plt import numpy as np from sklearn.linear_model import OrthogonalMatchingPursuit from sklearn.linear_model import OrthogonalMatchingPursuitCV from sklearn.datasets import make_sparse_coded_signal n_components, n_features = 512, 100 n_nonzero_coefs = 17 # generate the data ################### # y = Xw # |x|_0 = n_nonzero_coefs y, X, w = make_sparse_coded_signal(n_samples=1, n_components=n_components, n_features=n_features, n_nonzero_coefs=n_nonzero_coefs, random_state=0) idx, = w.nonzero() # distort the clean signal ########################## y_noisy = y + 0.05 * np.random.randn(len(y)) # plot the sparse signal ######################## plt.figure(figsize=(7, 7)) plt.subplot(4, 1, 1) plt.xlim(0, 512) plt.title("Sparse signal") plt.stem(idx, w[idx]) # plot the noise-free reconstruction #################################### omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs) omp.fit(X, y) coef = omp.coef_ idx_r, = coef.nonzero() plt.subplot(4, 1, 2) plt.xlim(0, 512) plt.title("Recovered signal from noise-free measurements") plt.stem(idx_r, coef[idx_r]) # plot the noisy reconstruction ############################### omp.fit(X, y_noisy) coef = omp.coef_ idx_r, = coef.nonzero() plt.subplot(4, 1, 3) plt.xlim(0, 512) plt.title("Recovered signal from noisy measurements") plt.stem(idx_r, coef[idx_r]) # plot the noisy reconstruction with number of non-zeros set by CV ################################################################## omp_cv = OrthogonalMatchingPursuitCV() omp_cv.fit(X, y_noisy) coef = omp_cv.coef_ idx_r, = coef.nonzero() plt.subplot(4, 1, 4) plt.xlim(0, 512) plt.title("Recovered signal from noisy measurements with CV") plt.stem(idx_r, coef[idx_r]) plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38) plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit', fontsize=16) plt.show()
bsd-3-clause
shl198/Pipeline
RibosomeProfilePipeline/06_Proteomaps.py
2
24831
from __future__ import division import os import pandas as pd from natsort import natsorted from f02_RiboDataModule import * from Modules.f05_IDConvert import addGeneIDorNameForDESeqResult signalP_path = '/data/shangzhong/RibosomeProfiling/signalP_part' ribo_bam_path = '/data/shangzhong/RibosomeProfiling/Ribo_align/bam' ribo_gene_count_path = ribo_bam_path + '/04_gene_total_count' rna_bam_path = '/data/shangzhong/RibosomeProfiling/TotalRNA_align' rna_gene_count_path = rna_bam_path + '/01_gene_count' #=============================================================================== # 1. generate count data (rpm) for day3 and day6 proteomaps #=============================================================================== # =========== convert cho genes to mouse genes ======================== def splitMouseIDs(gene_id): if ';' in gene_id: return gene_id.split(';') else: return gene_id.split(',') def cho2mouseID(dic,gene): if gene in dic: return dic[gene] else: return '0' def choID2mouseID_dic(cho2musFile,other_dict): """This function read cho mouse gene id mapping file and tranfer it to dictionary * cho2musFile: str. Filename has 2 columns, 1st is cho gene id, 2nd has mouse gene id. * other_dict: dictionary. The addition gene id mapping added to the final dictionary. """ cho2mus_df = pd.read_csv(cho2musFile,header=None,sep='\t',names=['cho','mus']) cho2mus_df = cho2mus_df.astype(str) cho2mus_df['mus'] = cho2mus_df['mus'].map(lambda x: splitMouseIDs(x)) cho_mus_dict = cho2mus_df.set_index('cho')['mus'].to_dict() for key in other_dict: if key in cho_mus_dict: continue else: cho_mus_dict[key] = other_dict[key] return cho_mus_dict def cho_2mouse_count(gene_count_df,cho_geneIDs,cho_mus_dict): """ This function change the count dataframe with cho gene id to mouse id * gene_count_df: df. * cho_geneIDs: list. A list of cho_geneIDs * cho_mus_dict: dictionary. {choid:[mouse ids]} return mouse_df """ n = 0 columns = gene_count_df.columns.tolist() mouse_df = pd.DataFrame(columns=columns) for gene in cho_geneIDs: if gene in cho_mus_dict: mus_gene = cho_mus_dict[gene] else: n = n + 1 print gene,'is not in the cho_mus_dictionary' continue unit_count = (gene_count_df.loc[gene]/float(len(mus_gene))).tolist() for g in mus_gene: if g not in mouse_df.index: mouse_df.loc[g] = unit_count else: mouse_df.loc[g] = mouse_df.loc[g] + unit_count return mouse_df def get_func_gene_dic(tree_map_file,cho_gene_id_file,cho_mouse_map_file): """ This function generates dictionary {function category:[cho ids]} for proteomaps. Proteomaps only has mouse id, so we need to include cho mouse gene id mapping. * tree_map_file: tree map generated in proteomaps. Mus musculus (adapted by SL 20151219).tmd * cho_gene_percent_rep_file: str. the only requirement is that the first column is gene id * cho_mouse_map_file: str. File that has cho mouse gene id mapping. return two dictionaries func_gene_dic, gene_multi_fuc. The second one stores genes that map to many functional categories. """ #----------- 1). read the function, kegg, geneid dataframe ---------------- fn = tree_map_file func_k_g_df = pd.read_csv(fn,sep='\t',header=0,usecols=[2,4,5],names=['func','kegg','geneid']) func_k_g_df = func_k_g_df.dropna() func_k_g_df['geneid'] = func_k_g_df['geneid'].map(lambda x: x.split(':')[1]) #----------- 2). read the gene id file --------------------- cho_gene_perc_file = cho_gene_id_file cho_gene_perc_df = pd.read_csv(cho_gene_perc_file,sep='\t',header=0,index_col=0) #----------- 3). generate dictionary {cho:mouse} ------------------------ cho2musFile = cho_mouse_map_file other_dict = {'heavychain':['heavychain'],'lightchain':['lightchain'],'NeoRKanR':['NeoRKanR']} cho_mus_dict = choID2mouseID_dic(cho2musFile,other_dict) #----------- 4). get category percentage -------------- funcs = list(set(func_k_g_df['func'].tolist())) func_gene_dic = {} for f in funcs: func_gene_dic[f] = [] genes = list(set(cho_gene_perc_df.index)) gene_multi_func = {} # store genes that map to multiple categories for g in genes: try: mouse = cho_mus_dict[g] except: #print g,'fail to map to mouse gene' continue cri = func_k_g_df['geneid'].map(lambda x: x in mouse) mouse_df = func_k_g_df[cri] if mouse_df.empty: #print mouse,'not in proteomap tree' continue categories = list(set(mouse_df['func'].tolist())) # if length is 1, it means cho gene map to category uniquely if len(categories) == 1: cate = categories[0] func_gene_dic[cate].append(g) else: print g,'maps to',categories gene_multi_func[g]=categories return func_gene_dic,gene_multi_func # # 1). generate dictionary {cho:mouse} # cho2musFile = '/data/shangzhong/CHO2Mouse/finalMergeWithmRNA.final.txt' # cho2mus_df = pd.read_csv(cho2musFile,header=None,sep='\t',names=['cho','mus']) # cho2mus_df = cho2mus_df.astype(str) # cho2mus_df['mus'] = cho2mus_df['mus'].map(lambda x: splitMouseIDs(x)) # cho_mus_dict = cho2mus_df.set_index('cho')['mus'].to_dict() # cho_mus_dict['heavychain'] = ['heavychain'] # cho_mus_dict['lightchain'] = ['lightchain'] # cho_mus_dict['NeoRKanR'] = ['NeoRKanR'] # # 2). read rpm data data # gene_countFile = signalP_path + '/08_ribo_rna_rpm.csv' # gene_count_df = pd.read_csv(gene_countFile,sep='\t',header=0,index_col=0,names=['geneid','ribo_day3','ribo_day6','rna_day3','rna_day6']) # cho_geneIDs = gene_count_df.index.astype(str).tolist() # # mouse_df = cho_2mouse_count(gene_count_df,cho_geneIDs,cho_mus_dict) # mouse_df[['ribo_day3']].to_csv(signalP_path+'/09_day3_rpm.csv',sep='\t',header=None) # mouse_df[['ribo_day6']].to_csv(signalP_path+'/09_day6_rpm.csv',sep='\t',header=None) # mouse_df.to_csv(signalP_path+'/10_mouse_ribo_rna_rpm.csv',sep='\t') # # # 3) build replicate count dataframe # os.chdir(ribo_gene_count_path) # ribo_count_files = [f for f in os.listdir(ribo_gene_count_path) if f.endswith('Count.txt')] # ribo_count_files = natsorted(ribo_count_files) # dfs = [] # for f in ribo_count_files: # df = pd.read_csv(f,header=0,sep='\t',index_col=0,usecols=[0,1],names=['geneid','ribo_'+f[:3]]) # dfs.append(df) # # os.chdir(rna_gene_count_path) # rna_count_files = [f for f in os.listdir(rna_gene_count_path) if f.endswith('Count.txt')] # rna_count_files = natsorted(rna_count_files) # for f in rna_count_files: # df = pd.read_csv(f,header=0,sep='\t',index_col=0,usecols=[0,1],names=['geneid','rna_'+f[:3]]) # dfs.append(df) # ribo_rna_count_df = pd.concat(dfs,axis=1,join='inner') # # mouse_df = cho_2mouse_count(ribo_rna_count_df,cho_geneIDs,cho_mus_dict) # mouse_df.to_csv(signalP_path+'/10_mouse_ribo_rna_count.csv',sep='\t') # #----------- calculate each gene percentage for each replicate ------------ # total = ribo_rna_count_df.sum().tolist() # percent_df = ribo_rna_count_df.div(total) # columns = ['ribo_day3_rep1','ribo_day3_rep2','ribo_day3_rep3', # 'ribo_day6_rep1','ribo_day6_rep2','ribo_day6_rep3', # 'rna_day3_rep1','rna_day3_rep2','rna_day3_rep3', # 'rna_day6_rep1','rna_day6_rep2','rna_day6_rep3'] # percent_df.columns = columns # percent_df['ribo_day3_mean'] = percent_df.iloc[:,0:3].mean(axis=1) # percent_df['ribo_day6_mean'] = percent_df.iloc[:,3:6].mean(axis=1) # percent_df['rna_day3_mean'] = percent_df.iloc[:,6:9].mean(axis=1) # percent_df['rna_day6_mean'] = percent_df.iloc[:,9:12].mean(axis=1) # # percent_df['ribo_day3_std'] = percent_df.iloc[:,0:3].std(axis=1) # percent_df['ribo_day6_std'] = percent_df.iloc[:,3:6].std(axis=1) # percent_df['rna_day3_std'] = percent_df.iloc[:,6:9].std(axis=1) # percent_df['rna_day6_std'] = percent_df.iloc[:,9:12].std(axis=1) # outFile = signalP_path + '/12_cho_gene_percent_rep.csv' # percent_df.to_csv(outFile,sep='\t') # # add gene symbol to the file # MapFile = '/data/shangzhong/Database/cho/gff_chok1_ID_symbol.txt' # addGeneIDorNameForDESeqResult(signalP_path + '/12_cho_gene_percent_rep.csv',MapFile,addType='gene_symbol',IDVersion='no') ######## the following is faster, but needs more code. # # # # ribo_day3_dict = {}; ribo_day6_dict={}; rna_day3_dict = {}; rna_day6_dict = {} # # # # for gene in cho_geneIDs: # # # # if gene in cho_mus_dict: # # # # mus_gene = cho_mus_dict[gene] # # # # else: # # # # n = n + 1 # # # # print gene,'is not in the cho_mus_dictionary' # # # # continue # # # # # # # # per_count3 = gene_count_df.loc[gene,'ribo_day3']/len(mus_gene) # evenly distribute to all the mus_gene # # # # per_count6 = gene_count_df.loc[gene,'ribo_day6']/len(mus_gene) # # # # rna_count3 = gene_count_df.loc[gene,'rna_day3']/len(mus_gene) # # # # rna_count6 = gene_count_df.loc[gene,'rna_day6']/len(mus_gene) # # # # for mus in mus_gene: # # # # if mus not in ribo_day3_dict: # # # # ribo_day3_dict[mus] = per_count3 # # # # ribo_day6_dict[mus] = per_count6 # # # # rna_day3_dict[mus] = rna_count3 # # # # rna_day6_dict[mus] = rna_count6 # # # # else: # # # # ribo_day3_dict[mus] = ribo_day3_dict[mus] + per_count3 # # # # ribo_day6_dict[mus] = ribo_day6_dict[mus] + per_count6 # # # # rna_day3_dict[mus] = rna_day3_dict[mus] + rna_count3 # # # # rna_day6_dict[mus] = rna_day6_dict[mus] + rna_count6 # # # # ribo_day3_df = pd.DataFrame(ribo_day3_dict.items(),columns=['gene','ribo_day3']) # # # # ribo_day6_df = pd.DataFrame(ribo_day6_dict.items(),columns=['gene','ribo_day6']) # # # # rna_day3_df = pd.DataFrame(rna_day3_dict.items(),columns=['gene','rna_day3']) # # # # rna_day6_df = pd.DataFrame(rna_day6_dict.items(),columns=['gene','rna_day6']) # # # # ribo_day3_df = ribo_day3_df.set_index(['gene']) # # # # ribo_day6_df = ribo_day6_df.set_index(['gene']) # # # # rna_day3_df = rna_day3_df.set_index(['gene']) # # # # rna_day6_df = rna_day6_df.set_index(['gene']) # # # # # # # # res_df = pd.concat([ribo_day3_df,ribo_day6_df,rna_day3_df,rna_day6_df],axis=1) # # # # ribo_day3_df.to_csv(signalP_path+'/09_day3_rpm.csv',sep='\t',header=None) # # # # ribo_day6_df.to_csv(signalP_path+'/09_day6_rpm.csv',sep='\t',header=None) # # # # res_df.to_csv(signalP_path+'/10_mouse_ribo_rna_rpm.csv',sep='\t') # # # # print n # #=============================================================================== # # 2. functional protein class percentage (percentage of each broad term) # # this calculation is based on mouse id that only maps to proteomaps. The next part is based on cho id. # #=============================================================================== # #--------------- 1. read tree file get dict {function:[kegg ids]}------------------ # treeFile = '/data/shangzhong/RibosomeProfiling/figures/Proteomaps/KO_gene_hierarchy_2014.tms' # handle = open(treeFile,'r') # fun_kegg_dic = {} # func = '' # for line in handle: # item = line.rstrip().split('\t') # if len(item) == 1 or len(item) == 3: # continue # if len(item) == 2: # func = item[1] # if len(item) == 4: # if func in fun_kegg_dic: # fun_kegg_dic[func].append(item[3]) # else: # function not in the dictionary # fun_kegg_dic[func] = [] # #--------------- 2. read kegg gene mapping file get dict {kegg id:[gene ids]}------------------ # kegg_gene_file = '/data/shangzhong/RibosomeProfiling/figures/Proteomaps/mmu_mapping.csv' # k_g_df = pd.read_csv(kegg_gene_file,sep='\t',header=None,names=['geneid','genename','kegg','fullname']) # k_g_df['geneid'] = k_g_df['geneid'].astype(str) # k_g_dic = {k:list(v) for k,v in k_g_df.groupby('kegg')['geneid']} # g_k_df = k_g_df[['geneid','kegg']] # #--------------- 3. build function:gene dictionary --------------- # func_gene_dic = {} # n = 0 # m = 0 # for func in fun_kegg_dic: # func_gene_dic[func] = [] # keggs = fun_kegg_dic[func] # for k in keggs: # try: # genes = k_g_dic[k] # func_gene_dic[func].extend(genes) # except: # n = n + 1 # print k,'not in the k_g_dic' # func_gene_dic has more kegg than k_g_dic # func_gene_dic['heavychain'] = ['heavychain'] # func_gene_dic['lightchain'] = ['lightchain'] # func_gene_dic['NeoRKanR'] = ['NeoRKanR'] # db_genes = [] # for key in func_gene_dic: # db_genes.extend(func_gene_dic[key]) # gene_func_dic = {value:key for key, values in func_gene_dic.iteritems() for value in values} # gene_func_df = pd.DataFrame(gene_func_dic.items(),columns=['geneid','category']) # g_k_func_df = pd.merge(g_k_df,gene_func_df,on='geneid') # g_k_func_df.to_csv(signalP_path + '/14_mouse_gene_kegg_func.csv',sep='\t',index=False) # #---------------- 4. calculate percentage for each category in proteomaps------- # # This calculation is based on the abundant file that is submitted to proteompas, so the replicates are already merged. # ribo_rna_rpm_file = signalP_path +'/10_mouse_ribo_rna_rpm.csv' # ribo_rna_rpm_df = pd.read_csv(ribo_rna_rpm_file,sep='\t',header=0,index_col=0) # ribo_rna_rpm_df = ribo_rna_rpm_df[ribo_rna_rpm_df.index.isin(db_genes)] # total = ribo_rna_rpm_df.sum().tolist() # outFile = signalP_path + '/11_proteomap_percentage.csv' # handle = open(outFile,'w') # handle.write('\t'.join(['Category','ribo_day3','ribo_day6','rna_day3','rna_day6'])+'\n') # for func in func_gene_dic: # genes = func_gene_dic[func] # sub_df = ribo_rna_rpm_df[ribo_rna_rpm_df.index.isin(genes)] # if sub_df.empty: # continue # percent = ((sub_df.sum()).div(total)).tolist() # percent = [str(p) for p in percent] # handle.write(func+'\t'+'\t'.join(percent)+'\n') # handle.close() # df = pd.read_csv(outFile,sep='\t',header=0,index_col=0) # df = df.sort(['ribo_day3'],ascending=False) # df.to_csv(outFile,sep='\t') # #----------------- 5. calculate the percentage for each replicate --------- # # This calculation is based on the raw count data of each replicate. # ribo_rna_count_file = signalP_path + '/10_mouse_ribo_rna_count.csv' # ribo_rna_count_df = pd.read_csv(ribo_rna_count_file,sep='\t',header=0,index_col=0) # ribo_rna_count_df = ribo_rna_count_df[ribo_rna_count_df.index.isin(db_genes)] # total = ribo_rna_count_df.sum().tolist() # gene_percent_df = ribo_rna_count_df.div(total) # columns = ['ribo_day3_rep1','ribo_day3_rep2','ribo_day3_rep3', # 'ribo_day6_rep1','ribo_day6_rep2','ribo_day6_rep3', # 'rna_day3_rep1','rna_day3_rep2','rna_day3_rep3', # 'rna_day6_rep1','rna_day6_rep2','rna_day6_rep3'] # gene_percent_df.columns = columns # # outFile = signalP_path + '/11_proteomap_percentage_rep.csv' # handle = open(outFile,'w') # handle.write('Category'+'\t'+'\t'.join(columns)+'\n') # for func in func_gene_dic: # genes = func_gene_dic[func] # sub_df = ribo_rna_count_df[ribo_rna_count_df.index.isin(genes)] # if sub_df.empty: # continue # percent = ((sub_df.sum()).div(total)).tolist() # percent = [str(p) for p in percent] # handle.write(func+'\t'+'\t'.join(percent)+'\n') # handle.close() # df = pd.read_csv(outFile,sep='\t',header=0,index_col=0) # df = df.sort([columns[0]],ascending=False) # # df['ribo_day3_std'] = df.iloc[:,0:3].std(axis=1) # df['ribo_day6_std'] = df.iloc[:,3:6].std(axis=1) # df['rna_day3_std'] = df.iloc[:,6:9].std(axis=1) # df['rna_day6_std'] = df.iloc[:,9:12].std(axis=1) # df.to_csv(outFile,sep='\t') # # gene_percent_df['ribo_day3_std'] = gene_percent_df.iloc[:,0:3].std(axis=1) # gene_percent_df['ribo_day6_std'] = gene_percent_df.iloc[:,3:6].std(axis=1) # gene_percent_df['rna_day3_std'] = gene_percent_df.iloc[:,6:9].std(axis=1) # gene_percent_df['rna_day6_std'] = gene_percent_df.iloc[:,9:12].std(axis=1) # gene_percent_df.to_csv(signalP_path + '/12_mouse_gene_percent_rep.csv',sep='\t') #=============================================================================== # 3. functional protein class percentage (percentage of each broad term) # This calculation is based on cho id. We provide table with percentage for each cho gene id, # Then we find what category in proteomaps does each cho gene maps and then get the totoal percentage of each broad category. # #=============================================================================== # tree_map_file = signalP_path + '/Mus musculus (adapted by SL 20151219).tmd' # cho_gene_perc_file = signalP_path + '/12_cho_gene_percent_rep.csv' # cho2musFile = '/data/shangzhong/CHO2Mouse/finalMergeWithmRNA.final.txt' # func_gene_dic,gene_multi_func = func_gene_dic(tree_map_file,cho_gene_perc_file,cho2musFile) # # n = 0 # for key in func_gene_dic: # n += len(func_gene_dic[key]) # print n,'cho genes map to categories' # # build {func:[g,2]} 2 means the cho gene maps to 2 functional categories # multi_func_gene = {} # for g in gene_multi_func: # funcs = gene_multi_func[g] # for f in funcs: # function # if f in multi_func_gene: # multi_func_gene[f].append([g,len(funcs)]) # else: # multi_func_gene[f] = [[g,len(funcs)]] # print multi_func_gene # # output the category to file # outFile = signalP_path + '/17_cho_proteomap_percent_rep.csv' # handle = open(outFile,'w') # handle.write('\t'.join(cho_gene_perc_df.columns)+'\n') # for func in func_gene_dic: # genes = func_gene_dic[func] # sub_df = cho_gene_perc_df[cho_gene_perc_df.index.isin(genes)] # if sub_df.empty: # continue # percent = ((sub_df.sum())).tolist() # # for each func, find the mapped gene and then get how many func that gene map to # if func in multi_func_gene: # for gene_n in multi_func_gene[func]: # gene_percent = (cho_gene_perc_df[cho_gene_perc_df.index == gene_n[0]]/float(gene_n[1])).values[0] # percent = [float(m) + float(n) for m,n in zip(percent,gene_percent)] # percent = [str(p) for p in percent] # handle.write(func+'\t'+'\t'.join(percent)+'\n') # handle.close() #=============================================================================== # 4. add functional categories to cho gene percent rep #=============================================================================== def addCategory(geneid,gene_func_dic): """add which category does a cho gene id map to * geneid: str. Gene id * func_gene_dic: {gene: [functional categories]} """ if geneid in gene_func_dic: return ';'.join(gene_func_dic[geneid]) else: None # #----------- 1 .get the gene_function dictionary ------------------ # tree_map_file = signalP_path + '/Mus musculus (adapted by SL 20151219).tmd' # cho_gene_perc_file = signalP_path + '/12_cho_gene_percent_rep.csv' # cho2musFile = '/data/shangzhong/CHO2Mouse/finalMergeWithmRNA.final.txt' # func_gene_dic,gene_multi_func = get_func_gene_dic(tree_map_file,cho_gene_perc_file,cho2musFile) # gene_func_dic = {value:[key] for key,values in func_gene_dic.iteritems() for value in values} # gene_func_dic.update(gene_multi_func) # # for key in gene_func_dic: # if len(gene_func_dic[key]) > 1: # print key, gene_func_dic[key] # #----------- 2. add the category ---------------------------------- # cho_gene_perc_name_file = signalP_path + '/12_cho_gene_percent_rep.name.csv' # cho_gene_perc_df = pd.read_csv(cho_gene_perc_name_file,sep='\t',header=0) # cho_gene_perc_df['category'] = cho_gene_perc_df['geneid'].map(lambda x: addCategory(x,gene_func_dic)) # cho_gene_perc_df = cho_gene_perc_df.sort('gene_short_name') # cho_gene_perc_df.to_csv(signalP_path + '/12_cho_gene_percent_rep.name.cate.csv',index=False) #### rethink from here #=============================================================================== # 2. custamize protein annotation for proteomaps #=============================================================================== """ # 1. read the id file human_disease = '/data/shangzhong/RibosomeProfiling/figures/Proteomaps/KO_human_disease.txt' human_disease_df = pd.read_csv(human_disease,sep='\t',header=0,names=['path_name','kegg_id']) name_id_dict = {k:list(v) for k,v in human_disease_df.groupby('kegg_id')['path_name']} kegg = human_disease_df['kegg_id'].tolist() # 2. read mouse id mapping mmu = '/data/shangzhong/RibosomeProfiling/figures/Proteomaps/mmu_mapping.csv' mmu_df = pd.read_csv(mmu,sep='\t',header=None,names=['GeneID','GeneSymbol','kegg_id','gene_name']) # 3. generate the annotation table criteria = mmu_df['kegg_id'].map(lambda x: x in kegg) anno_df = mmu_df[criteria] anno_df = anno_df.reset_index() anno_df['Pathway'] = anno_df['kegg_id'].apply(lambda x: name_id_dict[x][0]) anno_df['Org'] = pd.Series(['mmu'] * (anno_df.shape[0])) anno_df = anno_df[['Org','GeneID','GeneSymbol','kegg_id','Pathway','gene_name']] anno_df.to_csv(mmu[:-3]+'reanno.csv',sep='\t',index=False) """ """ # 1) get total count for each sample path = '/data/shangzhong/RibosomeProfiling/Ribo_align/01_cov5end' os.chdir(path) coverFiles = [f for f in os.listdir(path) if f.endswith('sort.5endCov.txt')] coverFiles = natsorted(coverFiles) totalCount = [] for f in coverFiles: df = pd.read_csv(f,header=None,names=['coverage','Chr','pos'],delim_whitespace=True) total = df['coverage'].sum() totalCount.append(total) # 2) calculate percentage of reads that are included by the genes mappable in the cho2mouse id map raw_count_file = '/data/shangzhong/RibosomeProfiling/cho_pr/12_pr_gene_rawCount.txt' raw_count_df = pd.read_csv(raw_count_file,sep='\t',header=0) # map file cho2musFile = '/data/shangzhong/CHO2Mouse/finalMergeWithmRNA.final.txt' cho2mus_df = pd.read_csv(cho2musFile,header=None,sep='\t',names=['cho','mus']) cho2mus_df = cho2mus_df.astype(str) mappableIDs = cho2mus_df['cho'].tolist() print totalCount cri = raw_count_df['GeneID'].map(lambda x: x in mappableIDs) filtered_genes = raw_count_df[cri] sum_count = filtered_genes.iloc[:,1:].sum(axis=0).tolist() percent = [c/total for c,total in zip(sum_count,totalCount)] print 'All genes mapping count percent is:',percent cri = raw_count_df['GeneID'].map(lambda x: x in ['heavychain','lightchain','NeoRKanR']) filtered_genes = raw_count_df[cri] sum_count = filtered_genes.iloc[:,1:].sum(axis=0).tolist() percent = [c/total for c,total in zip(sum_count,totalCount)] print 'Antibody mapping count percent is:',percent """ #=============================================================================== # 4. get how much percent of protein coding genes mapping reads are representaed by proteomaps #=============================================================================== def mappableID(gene,cho_mus_dict,mouseMapIDs): if str(gene) in cho_mus_dict: return set(cho_mus_dict[str(gene)]).intersection(mouseMapIDs) != set() else: return False """ # 1) get the cho 2 mouse dictionary cho2musFile = '/data/shangzhong/CHO2Mouse/finalMergeWithmRNA.final.txt' cho2mus_df = pd.read_csv(cho2musFile,header=None,sep='\t',names=['cho','mus']) cho2mus_df = cho2mus_df.astype(str) cho2mus_df['mus'] = cho2mus_df['mus'].map(lambda x: splitMouseIDs(x)) cho_mus_dict = cho2mus_df.set_index('cho')['mus'].to_dict() # 2) get the mouse mappable ids in proteomaps mmuMapFile = '/data/shangzhong/RibosomeProfiling/figures/Proteomaps/mmu_mapping.csv' mmuMap_df = pd.read_csv(mmuMapFile,sep='\t',header=None,usecols=[0],names=['GeneID']) mouseMapIDs = mmuMap_df['GeneID'].astype(str).tolist() # 3) read gene coverage file gene_rawCount_file = '/data/shangzhong/RibosomeProfiling/cho_pr/12_pr_gene_rawCount.txt' gene_raw_count_df = pd.read_csv(gene_rawCount_file,sep='\t',header=0,index_col=0) antibody_df = gene_raw_count_df[gene_raw_count_df.index.isin(['heavychain','lightchain','NeoRKanR'])] # 3 antibody raw count total = gene_raw_count_df.sum().tolist() # total number of reads that map to protein coding genes cri = gene_raw_count_df.index.map(lambda x: mappableID(x,cho_mus_dict,mouseMapIDs)) mappable_df = gene_raw_count_df[cri].append(antibody_df) # genes in cho that can be mapped to proteomaps count_sum = mappable_df.sum().tolist() # total count of the reads that are represented by proteomaps percent = [m/n for m,n in zip(count_sum,total)] print total print count_sum print percent antibody_df.div(count_sum) """
mit
efiring/scipy
scipy/special/c_misc/struve_convergence.py
76
3725
""" Convergence regions of the expansions used in ``struve.c`` Note that for v >> z both functions tend rapidly to 0, and for v << -z, they tend to infinity. The floating-point functions over/underflow in the lower left and right corners of the figure. Figure legend ============= Red region Power series is close (1e-12) to the mpmath result Blue region Asymptotic series is close to the mpmath result Green region Bessel series is close to the mpmath result Dotted colored lines Boundaries of the regions Solid colored lines Boundaries estimated by the routine itself. These will be used for determining which of the results to use. Black dashed line The line z = 0.7*|v| + 12 """ from __future__ import absolute_import, division, print_function import numpy as np import matplotlib.pyplot as plt try: import mpmath except: from sympy import mpmath def err_metric(a, b, atol=1e-290): m = abs(a - b) / (atol + abs(b)) m[np.isinf(b) & (a == b)] = 0 return m def do_plot(is_h=True): from scipy.special._ufuncs import \ _struve_power_series, _struve_asymp_large_z, _struve_bessel_series vs = np.linspace(-1000, 1000, 91) zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]]) rp = _struve_power_series(vs[:,None], zs[None,:], is_h) ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h) rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h) mpmath.mp.dps = 50 if is_h: sh = lambda v, z: float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z))) else: sh = lambda v, z: float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z))) ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:]) err_a = err_metric(ra[0], ex) + 1e-300 err_p = err_metric(rp[0], ex) + 1e-300 err_b = err_metric(rb[0], ex) + 1e-300 err_est_a = abs(ra[1]/ra[0]) err_est_p = abs(rp[1]/rp[0]) err_est_b = abs(rb[1]/rb[0]) z_cutoff = 0.7*abs(vs) + 12 levels = [-1000, -12] plt.cla() plt.hold(1) plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1) plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1) plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1) plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':']) plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':']) plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':']) lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-']) la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-']) lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-']) plt.clabel(lp, fmt={-1000: 'P', -12: 'P'}) plt.clabel(la, fmt={-1000: 'A', -12: 'A'}) plt.clabel(lb, fmt={-1000: 'B', -12: 'B'}) plt.plot(vs, z_cutoff, 'k--') plt.xlim(vs.min(), vs.max()) plt.ylim(zs.min(), zs.max()) plt.xlabel('v') plt.ylabel('z') def main(): plt.clf() plt.subplot(121) do_plot(True) plt.title('Struve H') plt.subplot(122) do_plot(False) plt.title('Struve L') plt.savefig('struve_convergence.png') plt.show() if __name__ == "__main__": import os import sys if '--main' in sys.argv: main() else: import subprocess subprocess.call([sys.executable, os.path.join('..', '..', '..', 'runtests.py'), '-g', '--python', __file__, '--main'])
bsd-3-clause
liang42hao/bokeh
bokeh/charts/builder/dot_builder.py
43
6160
"""This is the Bokeh charts interface. It gives you a high level API to build complex plot is a simple way. This is the Dot class which lets you build your Dot charts just passing the arguments to the Chart class and calling the proper functions. """ #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from __future__ import absolute_import import numpy as np try: import pandas as pd except ImportError: pd = None from ..utils import chunk, cycle_colors, make_scatter from .._builder import Builder, create_and_build from ...models import ColumnDataSource, FactorRange, GlyphRenderer, Range1d from ...models.glyphs import Segment from ...properties import Any, Bool, Either, List def Dot(values, cat=None, stem=True, xscale="categorical", yscale="linear", xgrid=False, ygrid=True, **kws): """ Create a dot chart using :class:`DotBuilder <bokeh.charts.builder.dot_builder.DotBuilder>` to render the geometry from values and cat. Args: values (iterable): iterable 2d representing the data series values matrix. cat (list or bool, optional): list of string representing the categories. Defaults to None. In addition the the parameters specific to this chart, :ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters. Returns: a new :class:`Chart <bokeh.charts.Chart>` Examples: .. bokeh-plot:: :source-position: above from collections import OrderedDict from bokeh.charts import Dot, output_file, show # dict, OrderedDict, lists, arrays and DataFrames are valid inputs xyvalues = OrderedDict() xyvalues['python']=[2, 5] xyvalues['pypy']=[12, 40] xyvalues['jython']=[22, 30] dot = Dot(xyvalues, ['cpu1', 'cpu2'], title='dots') output_file('dot.html') show(dot) """ return create_and_build( DotBuilder, values, cat=cat, stem=stem, xscale=xscale, yscale=yscale, xgrid=xgrid, ygrid=ygrid, **kws ) #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- class DotBuilder(Builder): """This is the Dot class and it is in charge of plotting Dot chart in an easy and intuitive way. Essentially, it provides a way to ingest the data, make the proper calculations and push the references into a source object. We additionally make calculations for the ranges. And finally add the needed glyphs (segments and circles) taking the references from the source. """ cat = Either(Bool, List(Any), help=""" List of string representing the categories. (Defaults to None.) """) stem = Bool(True, help=""" Whether to draw a stem from each do to the axis. """) def _process_data(self): """Take the Dot data from the input **value. It calculates the chart properties accordingly. Then build a dict containing references to all the calculated points to be used by the rect glyph inside the ``_yield_renderers`` method. """ if not self.cat: self.cat = [str(x) for x in self._values.index] self._data = dict(cat=self.cat, zero=np.zeros(len(self.cat))) # list to save all the attributes we are going to create # list to save all the groups available in the incoming input # Grouping self._groups.extend(self._values.keys()) step = np.linspace(0, 1.0, len(self._values.keys()) + 1, endpoint=False) for i, (val, values) in enumerate(self._values.items()): # original y value self.set_and_get("", val, values) # x value cats = [c + ":" + str(step[i + 1]) for c in self.cat] self.set_and_get("cat", val, cats) # zeros self.set_and_get("z_", val, np.zeros(len(values))) # segment top y value self.set_and_get("seg_top_", val, values) def _set_sources(self): """Push the Dot data into the ColumnDataSource and calculate the proper ranges. """ self._source = ColumnDataSource(self._data) self.x_range = FactorRange(factors=self._source.data["cat"]) cat = [i for i in self._attr if not i.startswith(("cat",))] end = 1.1 * max(max(self._data[i]) for i in cat) self.y_range = Range1d(start=0, end=end) def _yield_renderers(self): """Use the rect glyphs to display the bars. Takes reference points from data loaded at the source and renders circle glyphs (and segments) on the related coordinates. """ self._tuples = list(chunk(self._attr, 4)) colors = cycle_colors(self._tuples, self.palette) # quartet elements are: [data, cat, zeros, segment_top] for i, quartet in enumerate(self._tuples): # draw segment first so when scatter will be place on top of it # and it won't show segment chunk on top of the circle if self.stem: glyph = Segment( x0=quartet[1], y0=quartet[2], x1=quartet[1], y1=quartet[3], line_color="black", line_width=2 ) yield GlyphRenderer(data_source=self._source, glyph=glyph) renderer = make_scatter( self._source, quartet[1], quartet[0], 'circle', colors[i - 1], line_color='black', size=15, fill_alpha=1., ) self._legends.append((self._groups[i], [renderer])) yield renderer
bsd-3-clause
sumspr/scikit-learn
sklearn/cluster/birch.py
207
22706
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Joel Nothman <joel.nothman@gmail.com> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy import sparse from math import sqrt from ..metrics.pairwise import euclidean_distances from ..base import TransformerMixin, ClusterMixin, BaseEstimator from ..externals.six.moves import xrange from ..utils import check_array from ..utils.extmath import row_norms, safe_sparse_dot from ..utils.validation import NotFittedError, check_is_fitted from .hierarchical import AgglomerativeClustering def _iterate_sparse_X(X): """This little hack returns a densified row when iterating over a sparse matrix, insted of constructing a sparse matrix for every row that is expensive. """ n_samples = X.shape[0] X_indices = X.indices X_data = X.data X_indptr = X.indptr for i in xrange(n_samples): row = np.zeros(X.shape[1]) startptr, endptr = X_indptr[i], X_indptr[i + 1] nonzero_indices = X_indices[startptr:endptr] row[nonzero_indices] = X_data[startptr:endptr] yield row def _split_node(node, threshold, branching_factor): """The node has to be split if there is no place for a new subcluster in the node. 1. Two empty nodes and two empty subclusters are initialized. 2. The pair of distant subclusters are found. 3. The properties of the empty subclusters and nodes are updated according to the nearest distance between the subclusters to the pair of distant subclusters. 4. The two nodes are set as children to the two subclusters. """ new_subcluster1 = _CFSubcluster() new_subcluster2 = _CFSubcluster() new_node1 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_node2 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_subcluster1.child_ = new_node1 new_subcluster2.child_ = new_node2 if node.is_leaf: if node.prev_leaf_ is not None: node.prev_leaf_.next_leaf_ = new_node1 new_node1.prev_leaf_ = node.prev_leaf_ new_node1.next_leaf_ = new_node2 new_node2.prev_leaf_ = new_node1 new_node2.next_leaf_ = node.next_leaf_ if node.next_leaf_ is not None: node.next_leaf_.prev_leaf_ = new_node2 dist = euclidean_distances( node.centroids_, Y_norm_squared=node.squared_norm_, squared=True) n_clusters = dist.shape[0] farthest_idx = np.unravel_index( dist.argmax(), (n_clusters, n_clusters)) node1_dist, node2_dist = dist[[farthest_idx]] node1_closer = node1_dist < node2_dist for idx, subcluster in enumerate(node.subclusters_): if node1_closer[idx]: new_node1.append_subcluster(subcluster) new_subcluster1.update(subcluster) else: new_node2.append_subcluster(subcluster) new_subcluster2.update(subcluster) return new_subcluster1, new_subcluster2 class _CFNode(object): """Each node in a CFTree is called a CFNode. The CFNode can have a maximum of branching_factor number of CFSubclusters. Parameters ---------- threshold : float Threshold needed for a new subcluster to enter a CFSubcluster. branching_factor : int Maximum number of CF subclusters in each node. is_leaf : bool We need to know if the CFNode is a leaf or not, in order to retrieve the final subclusters. n_features : int The number of features. Attributes ---------- subclusters_ : array-like list of subclusters for a particular CFNode. prev_leaf_ : _CFNode prev_leaf. Useful only if is_leaf is True. next_leaf_ : _CFNode next_leaf. Useful only if is_leaf is True. the final subclusters. init_centroids_ : ndarray, shape (branching_factor + 1, n_features) manipulate ``init_centroids_`` throughout rather than centroids_ since the centroids are just a view of the ``init_centroids_`` . init_sq_norm_ : ndarray, shape (branching_factor + 1,) manipulate init_sq_norm_ throughout. similar to ``init_centroids_``. centroids_ : ndarray view of ``init_centroids_``. squared_norm_ : ndarray view of ``init_sq_norm_``. """ def __init__(self, threshold, branching_factor, is_leaf, n_features): self.threshold = threshold self.branching_factor = branching_factor self.is_leaf = is_leaf self.n_features = n_features # The list of subclusters, centroids and squared norms # to manipulate throughout. self.subclusters_ = [] self.init_centroids_ = np.zeros((branching_factor + 1, n_features)) self.init_sq_norm_ = np.zeros((branching_factor + 1)) self.squared_norm_ = [] self.prev_leaf_ = None self.next_leaf_ = None def append_subcluster(self, subcluster): n_samples = len(self.subclusters_) self.subclusters_.append(subcluster) self.init_centroids_[n_samples] = subcluster.centroid_ self.init_sq_norm_[n_samples] = subcluster.sq_norm_ # Keep centroids and squared norm as views. In this way # if we change init_centroids and init_sq_norm_, it is # sufficient, self.centroids_ = self.init_centroids_[:n_samples + 1, :] self.squared_norm_ = self.init_sq_norm_[:n_samples + 1] def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2): """Remove a subcluster from a node and update it with the split subclusters. """ ind = self.subclusters_.index(subcluster) self.subclusters_[ind] = new_subcluster1 self.init_centroids_[ind] = new_subcluster1.centroid_ self.init_sq_norm_[ind] = new_subcluster1.sq_norm_ self.append_subcluster(new_subcluster2) def insert_cf_subcluster(self, subcluster): """Insert a new subcluster into the node.""" if not self.subclusters_: self.append_subcluster(subcluster) return False threshold = self.threshold branching_factor = self.branching_factor # We need to find the closest subcluster among all the # subclusters so that we can insert our new subcluster. dist_matrix = np.dot(self.centroids_, subcluster.centroid_) dist_matrix *= -2. dist_matrix += self.squared_norm_ closest_index = np.argmin(dist_matrix) closest_subcluster = self.subclusters_[closest_index] # If the subcluster has a child, we need a recursive strategy. if closest_subcluster.child_ is not None: split_child = closest_subcluster.child_.insert_cf_subcluster( subcluster) if not split_child: # If it is determined that the child need not be split, we # can just update the closest_subcluster closest_subcluster.update(subcluster) self.init_centroids_[closest_index] = \ self.subclusters_[closest_index].centroid_ self.init_sq_norm_[closest_index] = \ self.subclusters_[closest_index].sq_norm_ return False # things not too good. we need to redistribute the subclusters in # our child node, and add a new subcluster in the parent # subcluster to accomodate the new child. else: new_subcluster1, new_subcluster2 = _split_node( closest_subcluster.child_, threshold, branching_factor) self.update_split_subclusters( closest_subcluster, new_subcluster1, new_subcluster2) if len(self.subclusters_) > self.branching_factor: return True return False # good to go! else: merged = closest_subcluster.merge_subcluster( subcluster, self.threshold) if merged: self.init_centroids_[closest_index] = \ closest_subcluster.centroid_ self.init_sq_norm_[closest_index] = \ closest_subcluster.sq_norm_ return False # not close to any other subclusters, and we still # have space, so add. elif len(self.subclusters_) < self.branching_factor: self.append_subcluster(subcluster) return False # We do not have enough space nor is it closer to an # other subcluster. We need to split. else: self.append_subcluster(subcluster) return True class _CFSubcluster(object): """Each subcluster in a CFNode is called a CFSubcluster. A CFSubcluster can have a CFNode has its child. Parameters ---------- linear_sum : ndarray, shape (n_features,), optional Sample. This is kept optional to allow initialization of empty subclusters. Attributes ---------- n_samples_ : int Number of samples that belong to each subcluster. linear_sum_ : ndarray Linear sum of all the samples in a subcluster. Prevents holding all sample data in memory. squared_sum_ : float Sum of the squared l2 norms of all samples belonging to a subcluster. centroid_ : ndarray Centroid of the subcluster. Prevent recomputing of centroids when ``CFNode.centroids_`` is called. child_ : _CFNode Child Node of the subcluster. Once a given _CFNode is set as the child of the _CFNode, it is set to ``self.child_``. sq_norm_ : ndarray Squared norm of the subcluster. Used to prevent recomputing when pairwise minimum distances are computed. """ def __init__(self, linear_sum=None): if linear_sum is None: self.n_samples_ = 0 self.squared_sum_ = 0.0 self.linear_sum_ = 0 else: self.n_samples_ = 1 self.centroid_ = self.linear_sum_ = linear_sum self.squared_sum_ = self.sq_norm_ = np.dot( self.linear_sum_, self.linear_sum_) self.child_ = None def update(self, subcluster): self.n_samples_ += subcluster.n_samples_ self.linear_sum_ += subcluster.linear_sum_ self.squared_sum_ += subcluster.squared_sum_ self.centroid_ = self.linear_sum_ / self.n_samples_ self.sq_norm_ = np.dot(self.centroid_, self.centroid_) def merge_subcluster(self, nominee_cluster, threshold): """Check if a cluster is worthy enough to be merged. If yes then merge. """ new_ss = self.squared_sum_ + nominee_cluster.squared_sum_ new_ls = self.linear_sum_ + nominee_cluster.linear_sum_ new_n = self.n_samples_ + nominee_cluster.n_samples_ new_centroid = (1 / new_n) * new_ls new_norm = np.dot(new_centroid, new_centroid) dot_product = (-2 * new_n) * new_norm sq_radius = (new_ss + dot_product) / new_n + new_norm if sq_radius <= threshold ** 2: (self.n_samples_, self.linear_sum_, self.squared_sum_, self.centroid_, self.sq_norm_) = \ new_n, new_ls, new_ss, new_centroid, new_norm return True return False @property def radius(self): """Return radius of the subcluster""" dot_product = -2 * np.dot(self.linear_sum_, self.centroid_) return sqrt( ((self.squared_sum_ + dot_product) / self.n_samples_) + self.sq_norm_) class Birch(BaseEstimator, TransformerMixin, ClusterMixin): """Implements the Birch clustering algorithm. Every new sample is inserted into the root of the Clustering Feature Tree. It is then clubbed together with the subcluster that has the centroid closest to the new sample. This is done recursively till it ends up at the subcluster of the leaf of the tree has the closest centroid. Read more in the :ref:`User Guide <birch>`. Parameters ---------- threshold : float, default 0.5 The radius of the subcluster obtained by merging a new sample and the closest subcluster should be lesser than the threshold. Otherwise a new subcluster is started. branching_factor : int, default 50 Maximum number of CF subclusters in each node. If a new samples enters such that the number of subclusters exceed the branching_factor then the node has to be split. The corresponding parent also has to be split and if the number of subclusters in the parent is greater than the branching factor, then it has to be split recursively. n_clusters : int, instance of sklearn.cluster model, default None Number of clusters after the final clustering step, which treats the subclusters from the leaves as new samples. By default, this final clustering step is not performed and the subclusters are returned as they are. If a model is provided, the model is fit treating the subclusters as new samples and the initial data is mapped to the label of the closest subcluster. If an int is provided, the model fit is AgglomerativeClustering with n_clusters set to the int. compute_labels : bool, default True Whether or not to compute labels for each fit. copy : bool, default True Whether or not to make a copy of the given data. If set to False, the initial data will be overwritten. Attributes ---------- root_ : _CFNode Root of the CFTree. dummy_leaf_ : _CFNode Start pointer to all the leaves. subcluster_centers_ : ndarray, Centroids of all subclusters read directly from the leaves. subcluster_labels_ : ndarray, Labels assigned to the centroids of the subclusters after they are clustered globally. labels_ : ndarray, shape (n_samples,) Array of labels assigned to the input data. if partial_fit is used instead of fit, they are assigned to the last batch of data. Examples -------- >>> from sklearn.cluster import Birch >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]] >>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5, ... compute_labels=True) >>> brc.fit(X) Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None, threshold=0.5) >>> brc.predict(X) array([0, 0, 0, 1, 1, 1]) References ---------- * Tian Zhang, Raghu Ramakrishnan, Maron Livny BIRCH: An efficient data clustering method for large databases. http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf * Roberto Perdisci JBirch - Java implementation of BIRCH clustering algorithm https://code.google.com/p/jbirch/ """ def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3, compute_labels=True, copy=True): self.threshold = threshold self.branching_factor = branching_factor self.n_clusters = n_clusters self.compute_labels = compute_labels self.copy = copy def fit(self, X, y=None): """ Build a CF Tree for the input data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. """ self.fit_, self.partial_fit_ = True, False return self._fit(X) def _fit(self, X): X = check_array(X, accept_sparse='csr', copy=self.copy) threshold = self.threshold branching_factor = self.branching_factor if branching_factor <= 1: raise ValueError("Branching_factor should be greater than one.") n_samples, n_features = X.shape # If partial_fit is called for the first time or fit is called, we # start a new tree. partial_fit = getattr(self, 'partial_fit_') has_root = getattr(self, 'root_', None) if getattr(self, 'fit_') or (partial_fit and not has_root): # The first root is the leaf. Manipulate this object throughout. self.root_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) # To enable getting back subclusters. self.dummy_leaf_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) self.dummy_leaf_.next_leaf_ = self.root_ self.root_.prev_leaf_ = self.dummy_leaf_ # Cannot vectorize. Enough to convince to use cython. if not sparse.issparse(X): iter_func = iter else: iter_func = _iterate_sparse_X for sample in iter_func(X): subcluster = _CFSubcluster(linear_sum=sample) split = self.root_.insert_cf_subcluster(subcluster) if split: new_subcluster1, new_subcluster2 = _split_node( self.root_, threshold, branching_factor) del self.root_ self.root_ = _CFNode(threshold, branching_factor, is_leaf=False, n_features=n_features) self.root_.append_subcluster(new_subcluster1) self.root_.append_subcluster(new_subcluster2) centroids = np.concatenate([ leaf.centroids_ for leaf in self._get_leaves()]) self.subcluster_centers_ = centroids self._global_clustering(X) return self def _get_leaves(self): """ Retrieve the leaves of the CF Node. Returns ------- leaves: array-like List of the leaf nodes. """ leaf_ptr = self.dummy_leaf_.next_leaf_ leaves = [] while leaf_ptr is not None: leaves.append(leaf_ptr) leaf_ptr = leaf_ptr.next_leaf_ return leaves def partial_fit(self, X=None, y=None): """ Online learning. Prevents rebuilding of CFTree from scratch. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features), None Input data. If X is not provided, only the global clustering step is done. """ self.partial_fit_, self.fit_ = True, False if X is None: # Perform just the final global clustering step. self._global_clustering() return self else: self._check_fit(X) return self._fit(X) def _check_fit(self, X): is_fitted = hasattr(self, 'subcluster_centers_') # Called by partial_fit, before fitting. has_partial_fit = hasattr(self, 'partial_fit_') # Should raise an error if one does not fit before predicting. if not (is_fitted or has_partial_fit): raise NotFittedError("Fit training data before predicting") if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]: raise ValueError( "Training data and predicted data do " "not have same number of features.") def predict(self, X): """ Predict data using the ``centroids_`` of subclusters. Avoid computation of the row norms of X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- labels: ndarray, shape(n_samples) Labelled data. """ X = check_array(X, accept_sparse='csr') self._check_fit(X) reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T) reduced_distance *= -2 reduced_distance += self._subcluster_norms return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)] def transform(self, X, y=None): """ Transform X into subcluster centroids dimension. Each dimension represents the distance from the sample point to each cluster centroid. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters) Transformed data. """ check_is_fitted(self, 'subcluster_centers_') return euclidean_distances(X, self.subcluster_centers_) def _global_clustering(self, X=None): """ Global clustering for the subclusters obtained after fitting """ clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = (X is not None) and self.compute_labels # Preprocessing for the global clustering. not_enough_centroids = False if isinstance(clusterer, int): clusterer = AgglomerativeClustering( n_clusters=self.n_clusters) # There is no need to perform the global clustering step. if len(centroids) < self.n_clusters: not_enough_centroids = True elif (clusterer is not None and not hasattr(clusterer, 'fit_predict')): raise ValueError("n_clusters should be an instance of " "ClusterMixin or an int") # To use in predict to avoid recalculation. self._subcluster_norms = row_norms( self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn( "Number of subclusters found (%d) by Birch is less " "than (%d). Decrease the threshold." % (len(centroids), self.n_clusters)) else: # The global clustering step that clusters the subclusters of # the leaves. It assumes the centroids of the subclusters as # samples and finds the final centroids. self.subcluster_labels_ = clusterer.fit_predict( self.subcluster_centers_) if compute_labels: self.labels_ = self.predict(X)
bsd-3-clause
bb-blud/tictac
utilities.py
2
6914
""" This module houses utility functions used to train agents, play multiple games, graph statistics, as well as some diagnostic printout functions. """ import pickle import numpy as np from collections import deque import matplotlib.pyplot as plt from game_state import GameState from player_agent import DecisionPlayer, QMap def setupGame(globalQM, game_size, policies, learning=False, marks=['X','O'], p1QM=None, p2QM=None, d1=2, d2=2): """ Set up all parameters for a game to be played. Returns a GameState instance """ gs = GameState(game_size, learning) gs.setQMap(globalQM) player1 = DecisionPlayer(marks[0], gs, policies[0], d1) if p1QM is not None: player1.setInnerQ(p1QM) player2 = DecisionPlayer(marks[1], gs, policies[1], d2) if p2QM is not None: player2.setInnerQ(p2QM) gs.setPlayers(player1, player2) return gs def getRatios(game_state, n_games, debug=False): """ Play games and return totals for win:draw:loss divided by total games """ QM, tally, conv = playGames(game_state, n_games, debug=debug) ts = [1.*tally[(True, False)], 1.*tally[(False, True)], 1.*tally[(False,False)]] s = sum(ts) return [ts[0]/s, ts[2]/s, ts[1]/s] def fightDuels(QMs, duels, size, n_games, **kwargs): """ Convenience method multiple duels can be played and an array of final ratio arrays is returned """ ratios = [] p1depths = kwargs.get('p1ds', [2 for _ in range(len(duels))]) # get the depths for each game or set all to 2 if p2depths = kwargs.get('p2ds', [2 for _ in range(len(duels))]) # depths are not specified. for i, duel in enumerate(duels): ratios.append(getRatios(setupGame(QMs[i], size, duel, d1 = p1depths[i], d2 = p2depths[i] ), n_games) ) return ratios def playGames(game_state, n_games, check_convergence=True, debug=False): """ This function will play n_game amount of games iteratively and by default check for game convergence, in which case it will exit early so as to stop playing the same game repeatedly. """ ####### Tally ######## tally = { (True, False) : 0, (False, True) : 0, (False, False) : 0 } is_converging = False ##### Initialize ##### gs = game_state #### Crude convergence test #### repeats = deque() # if check_convergence: # A buffer of the last 10 games played is created buffer_size = 10 # if gs.learning: # buffer_size = 3*n_games for game in range(buffer_size): # while not gs.game_finished: # Filling buffer gs.takeStep() if debug: gs.printGame() repeats.append(gs.game_sequence) gs.resetGame() ######### Play Games ######### current = [] # for game in range(n_games): # Main game playing loop while not gs.game_finished: gs.takeStep() if debug: gs.printGame() if debug: gs.printGame() print tally[ (gs.players[0].is_winner, gs.players[1].is_winner) ] += 1 ##Update game tally if debug: for p in gs.players: print p.mark, "is winner: ", p.is_winner print tally ## Exit if converging ## current = gs.game_sequence repeats.append(current) #Append the last game played to buffer repeats.popleft() #remove the last game from the buffer if debug: for r in repeats: print r print "**" print # If all games in the repeats buffer are equal then players have converged to a single game if False not in (current == g for g in repeats) and check_convergence: print "CONVERGENCE ON GAME NUMBER: ", game, [p.policy for p in gs.players] is_converging = True break ## ## ## ## gs.resetGame() return gs.QM, tally, is_converging def multiTrain(game_state, runs, batch_size): ## Support function plays batch_size amount of games ## def playBatch(ns, game_state, batch_size): gs = game_state for game in range(batch_size): gs.QM = ns.QM while not gs.game_finished: gs.takeStep() ns.QM = gs.QM gs.resetGame() ## ## ## ## ## ## ## ## import multiprocessing manager = multiprocessing.Manager() ns = manager.Namespace() ns.QM = game_state.QM #ns.Q = game_state.QM.Q jobs = [] for _ in range(runs): p = multiprocessing.Process(target=playBatch, args=(ns, game_state, batch_size) ) jobs.append(p) p.start() for p in jobs: p.join() return ns.QM def graphStats(columns, data, strategy): #Based on http://matplotlib.org/examples/pylab_examples/table_demo.html rows = ['win', 'draw', 'loss'] values = np.arange(0, 1, .1) value_increment = 2 # Get some pastel shades for the colors colors = plt.cm.BuPu(np.linspace(0, 0.5, len(rows)))[::-1] n_rows = len(data) index = np.arange(len(columns)) + 0.3 bar_width = 0.4 # Initialize the vertical-offset for the stacked bar chart. y_offset = np.array([0.0] * len(columns)) # Plot bars and create text labels for the table cell_text = [] for row in range(n_rows): plt.bar(index, data[row], bar_width, bottom=y_offset, color=colors[row]) y_offset = y_offset + data[row] cell_text.append(['%.4f' % x for x in data[row]]) # Add a table at the bottom of the axes the_table = plt.table(cellText=cell_text, rowLabels=rows, rowColours=colors, colLabels=columns, loc='bottom') # Adjust layout to make room for the table: plt.subplots_adjust(left=0.2, bottom=0.2) plt.ylabel("win/draw/loss fraction".format(value_increment)) plt.yticks(values * value_increment, ['%.2f' % (val*value_increment) for val in values]) plt.xticks([]) plt.title('Ratio Comparison ' + strategy) plt.show() def printTally(game_state, n_games): QM, tally, conv = playGames(game_state, n_games) ts = [1.*tally[(True, False)], 1.*tally[(False, True)], 1.*tally[(False,False)]] s = sum(ts) print "{} : {} : {}".format(ts[1]/s, ts[2]/s, ts[0]/s) def exploreQ(QM,d): Q = QM.Q M = max(len(seq) for seq in Q.keys()) for k in range(1,M/d): print "Explored Moves at step", k explored = sorted( [(seq , Q[seq]) for seq in Q if len(seq) == k], key=lambda t:t[1] ) for seq, val in explored: print seq, val print
mit
santis19/fatiando
gallery/gravmag/forward_modeling_sphere_mag.py
6
2276
""" Forward modeling magnetic data using spheres in Cartesian coordinates ----------------------------------------------------------------------- The :mod:`fatiando.gravmag` has many functions for forward modeling gravity and magnetic data. Here we'll show how to build a model out of spheres and calculate the total field magnetic anomaly and the 3 components of the magnetic induction. """ from __future__ import division, print_function from fatiando import mesher, gridder, utils from fatiando.gravmag import sphere import matplotlib.pyplot as plt import numpy as np # Create a model using geometric objects from fatiando.mesher # Each model element has a dictionary with its physical properties. # The spheres have different total magnetization vectors (total = induced + # remanent + any other effects). Notice that the magnetization has to be a # vector. Function utils.ang2vec converts intensity, inclination, and # declination into a 3 component vector for easier handling. model = [ mesher.Sphere(x=10e3, y=10e3, z=2e3, radius=1.5e3, props={'magnetization': utils.ang2vec(1, inc=50, dec=-30)}), mesher.Sphere(x=20e3, y=20e3, z=2e3, radius=1.5e3, props={'magnetization': utils.ang2vec(1, inc=-70, dec=30)})] # Set the inclination and declination of the geomagnetic field. inc, dec = -10, 13 # Create a regular grid at a constant height shape = (300, 300) area = [0, 30e3, 0, 30e3] x, y, z = gridder.regular(area, shape, z=-10) fields = [ ['Total field Anomaly (nt)', sphere.tf(x, y, z, model, inc, dec)], ['Bx (nT)', sphere.bx(x, y, z, model)], ['By (nT)', sphere.by(x, y, z, model)], ['Bz (nT)', sphere.bz(x, y, z, model)], ] # Make maps of all fields calculated fig = plt.figure(figsize=(7, 6)) plt.rcParams['font.size'] = 10 X, Y = x.reshape(shape)/1000, y.reshape(shape)/1000 for i, tmp in enumerate(fields): ax = plt.subplot(2, 2, i + 1) field, data = tmp scale = np.abs([data.min(), data.max()]).max() ax.set_title(field) plot = ax.pcolormesh(Y, X, data.reshape(shape), cmap='RdBu_r', vmin=-scale, vmax=scale) plt.colorbar(plot, ax=ax, aspect=30, pad=0) ax.set_xlabel('y (km)') ax.set_ylabel('x (km)') plt.tight_layout(pad=0.5) plt.show()
bsd-3-clause
anne-urai/serialDDM
graphicalModels/examples/nogray.py
7
1141
""" Alternative Observed Node Styles ================================ .. module:: daft This model is the same as `the classic </examples/classic>`_ model but the "observed" :class:`Node` is indicated by a double outline instead of shading. This particular example uses the ``inner`` style but ``outer`` is also an option for a different look. """ from matplotlib import rc rc("font", family="serif", size=12) rc("text", usetex=True) import daft pgm = daft.PGM([2.3, 2.05], origin=[0.3, 0.3], observed_style="inner") # Hierarchical parameters. pgm.add_node(daft.Node("alpha", r"$\alpha$", 0.5, 2, fixed=True)) pgm.add_node(daft.Node("beta", r"$\beta$", 1.5, 2)) # Latent variable. pgm.add_node(daft.Node("w", r"$w_n$", 1, 1)) # Data. pgm.add_node(daft.Node("x", r"$x_n$", 2, 1, observed=True)) # Add in the edges. pgm.add_edge("alpha", "beta") pgm.add_edge("beta", "w") pgm.add_edge("w", "x") pgm.add_edge("beta", "x") # And a plate. pgm.add_plate(daft.Plate([0.5, 0.5, 2, 1], label=r"$n = 1, \ldots, N$", shift=-0.1)) # Render and save. pgm.render() pgm.figure.savefig("nogray.pdf") pgm.figure.savefig("nogray.png", dpi=150)
mit
antoinecarme/pyaf
tests/temporal_hierarchy/test_temporal_demo_minutely_T_H_12H_D.py
1
1538
# %matplotlib inline import pyaf import numpy as np import pandas as pd DATA_FREQ = 'T' PERIODS = ["T" , "H", "12H" , "D"] H = 360 N = H * 10 lDateColumn = "Date" lSignalVar = "Signal"; START_TIME = "2001-01-25" # generate a daily signal covering one year 2016 in a pandas dataframe np.random.seed(seed=1960) df_train = pd.DataFrame({lDateColumn : pd.date_range(start=START_TIME, periods=N, freq=DATA_FREQ), lSignalVar : (np.arange(N)//40 + np.arange(N) % 21 + np.random.randn(N))}) # print(df_train.head(N)) lHierarchy = {}; lHierarchy['Levels'] = None; lHierarchy['Data'] = None; lHierarchy['Groups']= {}; lHierarchy['Periods']= PERIODS lHierarchy['Type'] = "Temporal"; # create a model to plot the hierarchy. import pyaf.HierarchicalForecastEngine as hautof lEngine = hautof.cHierarchicalForecastEngine() lSignalHierarchy = lEngine.plot_Hierarchy(df_train , lDateColumn, lSignalVar, H, lHierarchy, None); # print(lSignalHierarchy.__dict__) # create a hierarchical model and train it import pyaf.HierarchicalForecastEngine as hautof lEngine = hautof.cHierarchicalForecastEngine() # lEngine.mOptions.mNbCores = 1 lEngine.mOptions.mHierarchicalCombinationMethod = ["BU" , 'TD' , 'MO' , 'OC']; lSignalHierarchy = lEngine.train(df_train , lDateColumn, lSignalVar, H, lHierarchy, None); lEngine.getModelInfo(); dfapp_in = df_train.copy(); dfapp_in.info() dfapp_in.tail() dfapp_out = lEngine.forecast(dfapp_in, H); dfapp_out.info() print(dfapp_out.tail())
bsd-3-clause
lfairchild/PmagPy
pmagpy/mapping/map_magic.py
2
35331
#!/usr/bin/env python # map_MagIC.py: -*- Python -*- DESCRIPTIVE TEXT. # # Copyright (c) 2014 Lori Jonestrask # Author: Lori Jonestrask (mintblue87@gmail.com) . import json from pmagpy.data_model3 import DataModel from . import maps def mapping(dictionary, mapping): """ takes in a dictionary and a mapping which contains new key names, and returns a new dictionary with the updated key names, i.e.: dictionary = {'a': 1, 'b': 2, 'c': 3} mapping = {'a': 'aa', 'c': 'cc'} mapped_dictionary = mapping(dictionary, mapping) mapped_dictionary = {'aa': 1, b, 2, 'cc': 3} """ mapped_dictionary = {} for key, value in dictionary.items(): if key in list(mapping.keys()): new_key = mapping[key] # if there is already a mapped value, try to figure out which value to use # (i.e., if both er_synthetic_name and er_specimen_name are in one measurement file) if new_key in mapped_dictionary: if hasattr(value, 'any'): if not value.any(): # if new value is null, leave the old value there continue if hasattr(mapped_dictionary, 'any'): if value.any() and not mapped_dictionary[new_key].any(): # choose the one that has a non-null value mapped_dictionary[new_key] = value elif value.any() and mapped_dictionary[new_key].any(): # if both have values, choose which one to replace and warn #print('-W- Two possible values found for {}'.format(new_key)) #print(' Replacing {} with {}'.format(mapped_dictionary[new_key], value)) mapped_dictionary[new_key] = value else: if value.any() and not mapped_dictionary[new_key].any(): # choose the one that has a non-null value mapped_dictionary[new_key] = value elif value.any() and mapped_dictionary[new_key].any(): # if both have values, choose which one to replace and warn #print('-W- Two possible values found for {}'.format(new_key)) #print(' Replacing {} with {}'.format(mapped_dictionary[new_key], value)) mapped_dictionary[new_key] = value else: if not value: # if new value is null, leave the old value there continue elif value and not mapped_dictionary[new_key]: # choose the one that has a non-null value mapped_dictionary[new_key] = value elif value and mapped_dictionary[new_key]: # if both have values, choose which one to replace and warn #print('-W- Two possible values found for {}'.format(new_key)) #print(' Replacing {} with {}'.format(mapped_dictionary[new_key], value)) mapped_dictionary[new_key] = value # if there is no mapped_value already: else: mapped_dictionary[new_key] = value else: # if this line is left in, it gives everything from the original dictionary mapped_dictionary[key] = value return mapped_dictionary #mapped_pars = mapping(Pint_pars.pars, a_map) # mapping between SPD & Magic 2 spd = ['R_corr2', 'PCA_sigma_int_Free', 'PCA_sigma_max_Free', 'n_tail', 'delta_pal', 'DRAT_tail', 'MD_VDS', 'n_add', 'delta_AC', 'y_Arai_mean', 'MAD_Free', 'n_ptrm', 'DRAT', 'IZZI_MD', 'FRAC', 'CDRAT', 'Dec_Free', 'mean_DEV', 'DRATS', 'Z', 'max_DEV', 'fail_arai_beta_box_scatter', 'GAP-MAX', 'pTRM_MAD_Free', 'ptrms_dec_Free', 'MAD_Anc', 'fail_ptrm_beta_box_scatter', 'ptrms_angle_Free', 'scat_bounding_line_low', 'PCA_sigma_min_Free', 'B_anc', 'SCAT', 'R_det2', 'best_fit_vector_Free', 'specimen_b_beta', 'specimen_YT', 'delta_CK', 'lab_dc_field', 'Inc_Free', 'mean_DRAT', 'theta', 'max_ptrm_check', 'tmin', 'x_Arai_mean', 'fail_tail_beta_box_scatter', 'delta_TR', 'alpha', 'alpha_prime', 'specimen_fvds', 'specimen_b_sigma', 'specimen_b', 'specimen_g', 'specimen_f', 'tmax', 'specimen_int_n', 'specimen_q', 'DANG', 'ptrms_inc_Free', 'SSE', 'gamma', 'scat_bounding_line_high', 'specimen_k', 'specimen_int_crm', 'specimen_dt', 'specimen_k_prime', 'specimen_k_prime_SSE'] magic = ['specimen_coeff_det_sq', 'specimen_PCA_sigma_int', 'specimen_PCA_sigma_max', 'specimen_int_ptrm_tail_n', 'specimen_dpal', 'specimen_tail_drat', 'specimen_md', 'specimen_ac_n', 'specimen_dac', 'specimen_cm_y', 'specimen_int_mad', 'specimen_int_ptrm_n', 'specimen_drat', 'specimen_z_md', 'specimen_frac', 'specimen_cdrat', 'specimen_dec', 'specimen_mdev', 'specimen_drats', 'specimen_z', 'specimen_maxdev', 'fail_arai_beta_box_scatter', 'specimen_gmax', 'specimen_ptrms_mad', 'specimen_ptrms_dec', 'specimen_int_mad_anc', 'fail_ptrm_beta_box_scatter', 'specimen_ptrms_angle', 'specimen_scat_bounding_line_low', 'specimen_PCA_sigma_min', 'specimen_int_uT', 'specimen_scat', 'specimen_r_sq', 'specimen_PCA_v1', 'specimen_b_beta', 'specimen_YT', 'specimen_dck', 'lab_dc_field', 'specimen_inc', 'specimen_mdrat', 'specimen_theta', 'specimen_ptrm', 'measurement_step_min', 'specimen_cm_x', 'fail_tail_beta_box_scatter', 'specimen_dtr', 'specimen_int_alpha', 'specimen_alpha_prime', 'specimen_fvds', 'specimen_b_sigma', 'specimen_b', 'specimen_g', 'specimen_f', 'measurement_step_max', 'specimen_int_n', 'specimen_q', 'specimen_int_dang', 'specimen_ptrms_inc', 'specimen_k_sse', 'specimen_gamma', 'specimen_scat_bounding_line_high', 'specimen_k', 'specimen_int_crm', 'specimen_dt', 'specimen_k_prime', 'specimen_k_prime_sse'] spd2magic_map = dict(list(zip(spd, magic))) magic2spd_map = dict(list(zip(magic, spd))) def cache_mappings(file_path): """ Make a full mapping for 2 --> 3 columns. Output the mapping to json in the specified file_path. Note: This file is currently called maps.py, full path is PmagPy/pmagpy/mapping/maps.py. Parameters ---------- file_path : string with full file path to dump mapping json. Returns --------- maps : nested dictionary with format {table_name: {magic2_col_name: magic3_col_name, ...}, ...} """ def get_2_to_3(dm_type, dm): table_names3_2_table_names2 = {'measurements': ['magic_measurements'], 'locations': ['er_locations'], 'sites': ['er_sites', 'pmag_sites'], 'samples': ['er_samples', 'pmag_samples'], 'specimens': ['er_specimens', 'pmag_specimens'], 'ages': ['er_ages'], 'criteria': ['pmag_criteria'], 'images': ['er_images'], 'contribution': []} table_names3 = table_names3_2_table_names2[dm_type] dictionary = {} for label, row in dm.iterrows(): # if there are one or more corresponding 2.5 columns: if isinstance(row['previous_columns'], list): for previous_values in row['previous_columns']: previous_table = previous_values['table'] previous_value = previous_values['column'] if previous_table in table_names3: add_to_dict(previous_value, label, dictionary) elif previous_table in ["pmag_results", "rmag_results"]: if label not in dictionary.values(): if previous_value not in dictionary.keys(): add_to_dict(previous_value, label, dictionary) return dictionary def add_to_dict(key, value, dictionary): if key in dictionary: if value != dictionary[key]: print('W- OVERWRITING') print('was:', key, dictionary[key]) print('now:', key, value) dictionary[key] = value # begin data_model = DataModel() maps = {} for table_name in data_model.dm: dm = data_model.dm[table_name] new_mapping = get_2_to_3(table_name, dm) maps[table_name] = new_mapping # write maps out to file f = open(file_path, 'w') f.write("all_maps = ") json.dump(maps, f) f.close() return maps # Mappings between magic2 and magic3 add_to_all = {'er_location_name': 'location', 'er_site_name': 'site', 'er_sample_name': 'sample', 'er_specimen_name': 'specimen', 'er_sample_names': 'samples', 'er_specimen_names': 'specimens'} # measurement data translation magic_measurements.txt -> measurements.txt meas_magic2_2_magic3_map = maps.all_maps['measurements'] meas_magic2_2_magic3_map.update(add_to_all) # measurement data translation measurements.txt -> magic_measurements.txt meas_magic3_2_magic2_map = {v: k for k, v in list(meas_magic2_2_magic3_map.items())} measurements = {'timestamp': 'measurement_date', 'specimen': 'er_specimen_name'} # 'number': 'measurement_number'} # treat_step_num: measurement_number meas_magic3_2_magic2_map.update(measurements) def get_thellier_gui_meas_mapping(input_df, output=2): """ Get the appropriate mapping for translating measurements in Thellier GUI. This requires special handling for treat_step_num/measurement/measurement_number. Parameters ---------- input_df : pandas DataFrame MagIC records output : int output to this MagIC data model (2 or 3) Output -------- mapping : dict (used in convert_meas_df_thellier_gui) """ if int(output) == 2: thellier_gui_meas3_2_meas2_map = meas_magic3_2_magic2_map.copy() if 'treat_step_num' in input_df.columns: thellier_gui_meas3_2_meas2_map.update( {'treat_step_num': 'measurement_number'}) thellier_gui_meas3_2_meas2_map.pop('measurement') return thellier_gui_meas3_2_meas2_map # 2 --> 3 else: thellier_gui_meas2_2_meas3_map = meas_magic2_2_magic3_map.copy() if 'measurement' in input_df.columns: thellier_gui_meas2_2_meas3_map.pop('measurement_number') try: res = int(input_df.iloc[0]['measurement_number']) if res < 100: thellier_gui_meas2_2_meas3_map['measurement_number'] = 'treat_step_num' except ValueError as ex: pass return thellier_gui_meas2_2_meas3_map def convert_meas_df_thellier_gui(meas_df_in, output): """ Take a measurement dataframe and convert column names from MagIC 2 --> 3 or vice versa. Use treat_step_num --> measurement_number if available, otherwise measurement --> measurement_number. Parameters ---------- meas_df_in : pandas DataFrame input dataframe with measurement data output : int output to MagIC 2 or MagIC 3 """ output = int(output) meas_mapping = get_thellier_gui_meas_mapping(meas_df_in, output) meas_df_out = meas_df_in.rename(columns=meas_mapping) if 'measurement' not in meas_df_out.columns: meas_df_out['measurement'] = meas_df_in['measurement'] return meas_df_out # specimen data translation pmag_speciemns,er_specimens -> specimens.txt spec_magic2_2_magic3_map = maps.all_maps['specimens'] spec_magic2_2_magic3_map.update(add_to_all) spec_magic3_2_magic2_map = {v: k for k, v in list(spec_magic2_2_magic3_map.items())} spec_magic2_2_magic3_map['specimen_inferred_age'] = 'age' specimens = {'external_database_ids': 'external_database_ids', 'dir_comp': 'specimen_comp_name', 'specimen': 'er_specimen_name'} spec_magic3_2_magic2_map.update(specimens) # sample data translation pmag_samples/er_samples => samples samp_magic2_2_magic3_map = maps.all_maps['samples'] samp_magic2_2_magic3_map.update(add_to_all) # sample data translation samples => pmag_samples/er_samples samp_magic3_2_magic2_map = {v: k for k, v in list(samp_magic2_2_magic3_map.items())} samp_magic2_2_magic3_map['sample_inferred_age'] = 'age' samples = {'specimens': 'er_specimen_names', 'dir_comp_name': 'sample_comp_name', 'timestamp': 'sample_date', 'external_database_ids': 'external_database_ids', 'core_depth': 'sample_core_depth', 'composite_depth': 'sample_composite_depth'} samp_magic3_2_magic2_map.update(samples) # site data translation pmag_sites,er_sites -> sites.txt site_magic2_2_magic3_map = maps.all_maps['sites'] site_magic2_2_magic3_map.update(add_to_all) # site data translation er_sites/pmag_sites --> sites site_magic3_2_magic2_map = {v: k for k, v in list(site_magic2_2_magic3_map.items())} sites = {'dir_comp_name': 'site_comp_name', 'specimens': 'er_specimen_names'} site_magic3_2_magic2_map.update(sites) # location data translation er_locations -> locations loc_magic2_2_magic3_map = maps.all_maps['locations'] loc_magic2_2_magic3_map.update(add_to_all) locations = {'location_begin_lat': 'lat_s', 'location_begin_lon': 'lon_e', 'location_end_lat': 'lat_n', 'location_end_lon': 'lon_w'} loc_magic2_2_magic3_map.update(locations) # location data translation locations -> er_locations loc_magic3_2_magic2_map = {v: k for k, v in list(loc_magic2_2_magic3_map.items())} locations = {'lat_s': 'location_begin_lat', 'lat_n': 'location_end_lat', 'lon_e': 'location_begin_lon', 'lon_w': 'location_end_lon'} loc_magic3_2_magic2_map.update(locations) # anisotropy mapping aniso_magic3_2_magic2_map = {'specimen': 'er_specimen_name', 'aniso_type': 'anisotropy_type', 'description': 'result_description', 'aniso_ftest': 'anisotropy_ftest', 'aniso_ftest12': 'anisotropy_ftest12', 'aniso_ftest23': 'anisotropy_ftest23', 'aniso_s_mean': 'anisotropy_mean', 'aniso_s_n_measurements': 'anisotropy_n', 'aniso_s_sigma': 'anisotropy_sigma', 'aniso_s_unit': 'anisotropy_unit', 'aniso_tilt_correction': 'anisotropy_tilt_correction', "aniso_alt": 'anisotropy_alt', 'experiments': 'magic_experiment_names', 'sample': 'er_sample_name'} aniso_magic2_2_magic3_map = {'anisotropy_ftest23': 'aniso_ftest23', 'anisotropy_ftest': 'aniso_ftest', 'anisotropy_sigma': 'aniso_s_sigma', 'anisotropy_type': 'aniso_type', 'anisotropy_ftest12': 'aniso_ftest12', 'anisotropy_tilt_correction': 'aniso_tilt_correction', 'er_specimen_name': 'specimen', 'anisotropy_unit': 'aniso_s_unit', 'anisotropy_mean': 'aniso_s_mean', 'result_description': 'description', 'anisotropy_n': 'aniso_s_n_measurements', 'pmag_criteria_codes': 'criteria', 'result_quality': 'result_quality', 'anisotropy_alt': 'aniso_alt', 'magic_method_codes': 'method_codes', 'magic_experiment_names': 'experiments', 'er_sample_name': 'sample'} # images data translation er_images -> images image_magic2_2_magic3_map = maps.all_maps['images'] # images data translation images -> er_images image_magic2_2_magic_3_map = { v: k for (k, v) in list(image_magic2_2_magic3_map.items())} images = {'specimen': 'er_specimen_name', 'description': 'image_description', 'timestamp': 'image_date'} image_magic2_2_magic3_map.update(images) # ages data translation er_ages -> ages age_magic2_2_magic3_map = maps.all_maps['ages'] # images data translation images -> er_images age_magic2_2_magic_3_map = { v: k for (k, v) in list(age_magic2_2_magic3_map.items())} # images = {'specimen': 'er_specimen_name', 'description': 'image_description', # 'timestamp': 'image_date'} # image_magic2_2_magic3_map.update(images) # translation orientation format --> 3.0. # orientation headers: not all have a 3.0 sample equivalent (like mag_azimuth, for instance) # site_name sample_name mag_azimuth field_dip date lat long sample_lithology sample_type sample_class shadow_angle hhmm stratigraphic_height bedding_dip_direction bedding_dip GPS_baseline image_name image_look image_photographer participants method_codes site_description sample_description GPS_Az, sample_igsn, sample_texture, sample_cooling_rate, cooling_rate_corr, cooling_rate_mcd orient_magic_2_magic3_map = {"sample_name": "sample", "site_name": "site", "long": "lon", "sample_lithology": "lithologies", "sample_type": "geologic_types", "sample_class": "geologic_classes", "bedding_dip": "bed_dip", "field_dip": "dip", "bedding_dip_direction": "bed_dip_direction", "orientation_flag": "orientation_quality"} # 3.0 --> orientation format magic3_2_orient_magic_map = { v: k for (k, v) in list(orient_magic_2_magic3_map.items())} meas_magic2 = list(meas_magic3_2_magic2_map.values()) spec_magic2 = list(spec_magic3_2_magic2_map.values()) samp_magic2 = list(samp_magic3_2_magic2_map.values()) site_magic2 = list(site_magic3_2_magic2_map.values()) # meas_magic3 = meas_magic3_2_magic2_map.keys() # why are these here? spec_magic3 = list(spec_magic2_2_magic3_map.keys()) #samp_magic3 = samp_magic3_2_magic2_map.keys() site_magic3 = list(site_magic3_2_magic2_map.keys()) # Data conversion for specific types of data def convert_intensity_criteria(direction, crit): magic2 = ['specimen_coeff_det_sq', 'specimen_int_ptrm_tail_n', 'specimen_dpal', 'specimen_tail_drat', 'specimen_md', 'specimen_ac_n', 'specimen_dac', 'specimen_int_mad', 'specimen_int_ptrm_n', 'specimen_drat', 'specimen_z_md', 'specimen_frac', 'specimen_cdrat', 'specimen_dec', 'specimen_mdev', 'specimen_drats', 'specimen_z', 'specimen_maxdev', 'specimen_gmax', 'specimen_int_mad_anc', 'specimen_scat', 'specimen_r_sq', 'specimen_b_beta', 'specimen_dck', 'lab_dc_field', 'specimen_inc', 'specimen_mdrat', 'specimen_theta', 'specimen_ptrm', 'measurement_step_min', 'specimen_dtr', 'specimen_int_alpha', 'specimen_fvds', 'specimen_b_sigma', 'specimen_b', 'specimen_g', 'specimen_f', 'measurement_step_max', 'specimen_int_n', 'specimen_q', 'specimen_int_dang', 'specimen_k_sse', 'specimen_gamma', 'specimen_k', 'specimen_int_crm', 'specimen_dt', 'specimen_k_prime', 'specimen_k_prime_sse', 'sample_int_n', 'sample_int_sigma_perc', 'sample_int_sigma', 'site_int_n', 'site_int_sigma_perc', 'site_int_sigma', 'pmag_criteria_code', 'sample_aniso_mean', 'specimen_aniso_ftest_flag', 'anisotropy_alt', 'site_aniso_mean'] magic3 = ['specimens.int_r2_det', 'specimens.int_n_ptrm_tail', 'specimens.int_dpal', 'specimens.int_drat_tail', 'specimens.int_md', 'specimens.int_n_ac', 'specimens.int_dac', 'specimens.int_mad_free', 'specimens.int_n_ptrm', 'specimens.int_drat', 'specimens.int_z_md', 'specimens.int_frac', 'specimens.int_cdrat', 'specimens.dir_dec', 'specimens.int_mdev', 'specimens.int_drats', 'specimens.int_z', 'specimens.int_maxdev', 'specimens.int_gmax', 'specimens.int_mad_anc', 'specimens.int_scat', 'specimens.int_r2_corr', 'specimens.int_b_beta', 'specimens.int_dck', 'specimens.treat_dc_field', 'specimens.dir_inc', 'specimens.int_mdrat', 'specimens.int_theta', 'specimens.int_ptrm', 'specimens.meas_step_min', 'specimens.int_dtr', 'specimens.int_alpha', 'specimens.int_fvds', 'specimens.int_b_sigma', 'specimens.int_b', 'specimens.int_g', 'specimens.int_f', 'specimens.meas_step_max', 'specimens.int_n_measurements', 'specimens.int_q', 'specimens.int_dang', 'specimens.int_k_sse', 'specimens.int_gamma', 'specimens.int_k', 'specimens.int_crm', 'specimens.int_dt', 'specimens.int_k_prime', 'specimens.int_k_prime_sse', 'samples.int_n_specimens', 'samples.int_abs_sigma_perc', 'samples.int_abs_sigma', 'sites.int_n_specimens', 'sites.int_abs_sigma_perc', 'sites.int_abs_sigma', 'criterion', 'samples.int_corr_aniso_mean', 'specimens.aniso_ftest_flag', 'specimens.aniso_alt', 'sites.int_corr_aniso_mean'] if direction == 'magic2': if crit in magic3: return magic2[magic3.index(crit)] else: return "" else: if crit in magic2: return magic3[magic2.index(crit)] else: return crit def convert_direction_criteria(direction, crit): if direction == 'magic2': try: if 'specimens.' in crit: return spec_magic3_2_magic2_map[crit.lstrip('specimens.')] elif 'samples.' in crit: return samp_magic3_2_magic2_map[crit.lstrip('samples.')] elif 'sites.' in crit: return site_magic3_2_magic2_map[crit.lstrip('sites.')] else: return "" except KeyError as e: return "" else: try: if 'specimen' in crit: return 'specimens.' + spec_magic2_2_magic3_map[crit] elif 'sample' in crit: return 'samples.' + samp_magic2_2_magic3_map[crit] elif 'site' in crit: return 'sites.' + site_magic2_2_magic3_map[crit] else: return "" except KeyError as e: return "" def convert_meas(direction, Rec): """ converts measurments tables from magic 2 to 3 (direction=magic3) or from model 3 to 2.5 (direction=magic2) [not available] """ if direction == 'magic3': columns = meas_magic2_2_magic3_map MeasRec = {} for key in columns: if key in list(Rec.keys()): # transfer info and change column name to data model 3.0 MeasRec[columns[key]] = Rec[key] return MeasRec else: # haven't added this way yet pass def convert_spec(direction, Rec): if direction == 'magic3': columns = spec_magic2_2_magic3_map SpecRec = {} for key in columns: if key in list(Rec.keys()): # transfer info and change column name to data model 3.0 SpecRec[columns[key]] = Rec[key] return SpecRec else: # haven't added this way yet pass def convert_samp(direction, Rec): if direction == 'magic3': columns = samp_magic2_2_magic3_map SampRec = {} for key in columns: if key in list(Rec.keys()): # transfer info and change column name to data model 3.0 SampRec[columns[key]] = Rec[key] return SampRec else: # haven't added this way yet pass def convert_site(direction, Rec): if direction == 'magic3': columns = site_magic2_2_magic3_map SiteRec = {} for key in columns: if key in list(Rec.keys()): # transfer info and change column name to data model 3.0 SiteRec[columns[key]] = Rec[key] return SiteRec else: # haven't added this way yet pass def convert_aniso(direction, AniSpec): if direction == 'magic2': columns = aniso_magic3_2_magic2_map AniRec = {} s_data = AniSpec['aniso_s'].split(':') for key in columns: if key in list(AniSpec.keys()): # transfer info and change column name to data model 2.5 AniRec[columns[key]] = AniSpec[key] AniRec['anisotropy_s1'] = s_data[0] # need to add these things AniRec['anisotropy_s2'] = s_data[1] AniRec['anisotropy_s3'] = s_data[2] AniRec['anisotropy_s4'] = s_data[3] AniRec['anisotropy_s5'] = s_data[4] AniRec['anisotropy_s6'] = s_data[5] AniRec['anisotropy_F_crit'] = "" if 'result_description' in list(AniSpec.keys()): result_description = AniSpec['result_description'].split(";") for description in result_description: if "Critical F" in description: desc = description.split(":") AniRec['anisotropy_F_crit'] = float(desc[1]) return AniRec # converted to 2.5 else: # upgrade to 3.0 columns = aniso_magic2_2_magic3_map # first fix aniso_s AniRec = {} for key in columns: if key in list(AniSpec.keys()): # transfer info and change column name to data model 3.0 AniRec[columns[key]] = AniSpec[key] s_string = "" s_string = s_string + str(AniSpec['anisotropy_s1']) + ' : ' s_string = s_string + str(AniSpec['anisotropy_s2']) + ' : ' s_string = s_string + str(AniSpec['anisotropy_s3']) + ' : ' s_string = s_string + str(AniSpec['anisotropy_s4']) + ' : ' s_string = s_string + str(AniSpec['anisotropy_s5']) + ' : ' s_string = s_string + str(AniSpec['anisotropy_s6']) AniRec['aniso_s'] = s_string # do V1, etc. here # V1: Anisotropy eigenparameters for the maximum eigenvalue (T1), a colon-delimited list of tau (T1), dec, inc, confidence ellipse type, and confidence ellipse parameters v_string = AniSpec['anisotropy_t1']+" : " + \ AniSpec['anisotropy_v1_dec']+" : "+AniSpec['anisotropy_v1_inc'] AniRec['aniso_v1'] = v_string v_string = AniSpec['anisotropy_t2']+" : " + \ AniSpec['anisotropy_v2_dec']+" : "+AniSpec['anisotropy_v2_inc'] AniRec['aniso_v2'] = v_string v_string = AniSpec['anisotropy_t3']+" : " + \ AniSpec['anisotropy_v3_dec']+" : "+AniSpec['anisotropy_v3_inc'] AniRec['aniso_v3'] = v_string return AniRec def convert_site_dm3_table_intensity(sites_df): """ Convert MagIC site headers to short/readable headers for a figure (used by ipmag.sites_extract) Intensity data only. Parameters ---------- sites_df : pandas DataFrame sites information Returns --------- int_df : pandas DataFrame intensity site data with easily readable headers """ # now for the intensities has_vadms, has_vdms = False, False if 'int_abs' not in sites_df: sites_df['int_abs'] = None if 'int_n_samples' not in sites_df: sites_df['int_n_samples'] = None int_df = sites_df.copy().dropna(subset=['int_abs']) int_df['int_n_samples'] = int_df['int_n_samples'].values.astype('int') if len(int_df) > 0: int_df['int_abs_uT'] = 1e6*int_df.int_abs.values # convert to uT int_df['int_abs_sigma_uT'] = 1e6 * \ int_df.int_abs_sigma.values # convert to uT int_df['int_abs_uT'] = int_df['int_abs_uT'].values.astype('int') int_df['int_abs_sigma_uT'] = int_df['int_abs_sigma_uT'].values.astype( 'int') int_df['int_abs_sigma_perc'] = int_df['int_abs_sigma_perc'].values.astype( 'int') IntCols = ["Site", "N", "B", "B sigma", "sigma (%)"] if 'vadm' in int_df.columns: test_vadm = int_df.dropna(subset=['vadm']) if len(test_vadm) > 0: has_vadms = True if 'vdm' in int_df.columns: test_vdm = int_df.dropna(subset=['vdm']) if len(test_vdm) > 0: has_vdms = True if has_vadms: IntCols.append("VADM") IntCols.append("VADM sigma") if has_vdms: IntCols.append("VDM") IntCols.append("VDM sigma") if not has_vadms and not has_vdms: int_df = int_df[['site', 'int_n_samples', 'int_abs_uT', 'int_abs_sigma_uT', 'int_abs_sigma_perc']] if has_vadms and not has_vdms: int_df.sort_values(by=['site', 'vadm'], ascending=False, inplace=True) int_df.drop_duplicates(subset=['int_abs_uT', 'site'], inplace=True) int_df['vadm_ZAm2'] = 1e-21*int_df.vadm.values int_df['vadm_sigma_ZAm2'] = 1e-21*int_df.vadm_sigma.values int_df = int_df[['site', 'int_n_samples', 'int_abs_uT', 'int_abs_sigma_uT', 'int_abs_sigma_perc', 'vadm_ZAm2', 'vadm_ZAm2_sigma']] if not has_vadms and has_vdms: int_df.sort_values(by=['site', 'vdm'], ascending=False, inplace=True) int_df.drop_duplicates(subset=['int_abs_uT', 'site'], inplace=True) int_df['vdm_ZAm2'] = 1e-21*int_df.vdm.values() int_df['vdm_sigma_ZAm2'] = 1e-21*int_df.vdm_sigma.values() int_df = int_df[['site', 'int_n_samples', 'int_abs_uT', 'int_abs_sigma_uT', 'int_abs_sigma_perc', 'vdm_ZAm2', 'vdm_ZAm2_sigma']] if has_vadms and has_vdms: int_df.sort_values(by=['site', 'vadm'], ascending=False, inplace=True) int_df.drop_duplicates(subset=['int_abs_uT', 'site'], inplace=True) int_df['vadm_ZAm2'] = 1e-21*int_df.vadm.values int_df['vadm_sigma_ZAm2'] = 1e-21*int_df.vadm_sigma.values int_df['vdm_ZAm2'] = 1e-21*int_df.vdm.values int_df['vdm_sigma_ZAm2'] = 1e-21*int_df.vdm_sigma.values int_df = int_df[['site', 'int_n_samples', 'int_abs_uT', 'int_abs_sigma_uT', 'int_abs_sigma_perc', 'vadm_ZAm2', 'vadm_sigma_ZAm2', 'vdm_ZAm2', 'vdm_sigma_ZAm2']] int_df.columns = IntCols int_df.sort_values(by=['Site'], inplace=True, ascending=True) int_df.fillna(value='', inplace=True) return int_df def convert_site_dm3_table_directions(sites_df): """ Convert MagIC site headers to short/readable headers for a figure (used by ipmag.sites_extract) Directional table only. Parameters ---------- sites_df : pandas DataFrame sites information Returns --------- dir_df : pandas DataFrame directional site data with easily readable headers """ # directional # do directional stuff first # a few things need cleaning up dir_df = sites_df.copy().dropna( subset=['dir_dec', 'dir_inc']) # delete blank directions # sort by absolute value of vgp_lat in order to eliminate duplicate rows for # directions put in by accident on intensity rows DirCols = ["Site", "TC (%)", "Dec.", "Inc.", "N", "k ", "R", "a95", "VGP Lat", "VGP Long"] columns = ['site', 'dir_tilt_correction', 'dir_dec', 'dir_inc', 'dir_n_samples', 'dir_k', 'dir_r', 'dir_alpha95', 'vgp_lat', 'vgp_lon'] dm3_to_readable = dict(zip(columns, DirCols)) if len(dir_df) > 0: for col in ['dir_n_samples', 'dir_tilt_correction']: if col in dir_df.columns: dir_df[col] = dir_df[col].values.astype('int') columns = dir_df.columns.intersection(columns) has_vgps = False if 'vgp_lat' in dir_df.columns: test_vgp = dir_df.dropna(subset=['vgp_lat', 'vgp_lon']) if len(test_vgp) > 0: has_vgps = True if has_vgps: dir_df['vgp_lat_abs'] = dir_df.vgp_lat.abs() dir_df.sort_values(by=['site', 'vgp_lat_abs'], ascending=False, inplace=True) dir_df = dir_df[columns] # this will take the first record for each site's directions (including VGP lat if present) dir_df.drop_duplicates( subset=['dir_dec', 'dir_inc', 'site'], inplace=True) else: dir_df.drop_duplicates( subset=['dir_dec', 'dir_inc', 'site'], inplace=True) dir_df = dir_df[['site', 'dir_tilt_correction', 'dir_dec', 'dir_inc', 'dir_n_samples', 'dir_k', 'dir_r', 'dir_alpha95']] dir_df.rename(dm3_to_readable, axis='columns', inplace=True) dir_df.sort_values(by=['Site'], inplace=True, ascending=True) new_cols = list(dir_df.columns.drop('Site')) dir_df = dir_df[['Site'] + new_cols] return dir_df def convert_specimen_dm3_table(spec_df): """ Convert MagIC specimen headers to short/readable headers for a figure (used by ipmag.specimens_extract) """ from pmagpy import data_model3 as dm3 # get the data model dm = dm3.DataModel() # get the specimen absolute intensity column headers meas_group = ['meas_step_min', 'meas_step_max', 'meas_step_unit'] pint_group = dm.get_group_headers('specimens', 'Paleointensity') arai_group = dm.get_group_headers( 'specimens', 'Paleointensity Arai Statistics') # filter out the relative intensity stuff dm3_columns = list(meas_group)+list(pint_group)+list(arai_group) dm3_columns = filter(lambda x: '_rel' not in x, dm3_columns) # apply to specimen dataframe meas_group_columns = ['meas_step_min', 'meas_step_max', 'meas_step_unit'] pint_group_columns = list(spec_df.columns.intersection(pint_group)) arai_group_columns = list(spec_df.columns.intersection(arai_group)) columns = ['specimen', 'sample']+meas_group_columns + \ pint_group_columns+arai_group_columns spec_df = spec_df.copy()[columns] muT_list = ['int_abs', 'int_abs_sigma', 'int_treat_dc_field'] meas_list = ['meas_step_min', 'meas_step_max'] for el in muT_list: if el in columns: spec_df[el] = 1e6*spec_df[el] if 'meas_step_unit' in columns: for el in meas_list: spec_df.loc[spec_df['meas_step_unit'] == 'K', el] = spec_df[el]-273 spec_df.loc[spec_df['meas_step_unit'] == 'T', el] = 1e3*spec_df[el] spec_df.loc[spec_df['meas_step_unit'] == 'K', 'meas_step_unit'] = 'C' spec_df.loc[spec_df['meas_step_unit'] == 'T', 'meas_step_unit'] = 'mT' spec_df['meas_step_min'] = spec_df['meas_step_min'].astype('int') spec_df['meas_step_max'] = spec_df['meas_step_max'].astype('int') dm3_columns = ['meas_step_min', 'meas_step_max', 'meas_step_unit', 'int_abs', 'int_abs_sigma', 'int_abs_sigma_perc', 'int_n_measurements', 'int_corr', 'int_corr_cooling_rate', 'int_corr_aniso', 'int_corr_nlt', 'int_corr_arm', 'int_viscosity_index', 'int_treat_dc_field', 'int_b', 'int_b_sigma', 'int_b_beta', 'int_rsc', 'int_f', 'int_fvds', 'int_frac', 'int_g', 'int_gmax', 'int_q', 'int_w', 'int_k', 'int_k_sse', 'int_k_prime', 'int_k_prime_sse', 'int_scat', 'int_r2_corr', 'int_r2_det', 'int_z', 'int_z_md'] table_columns = ['Min', 'Max', 'Units', 'B (uT)', 'sigma', 'percent', 'N', 'c/u', 'CR', 'Aniso.', 'NLT', 'AARM', 'VI', 'Lab Field', 'b', 'b sigma', 'beta', 'R2', 'f', 'fvds', 'frac', 'g', 'gap max', 'q', 'w', 'k', 'k sse', 'k prime', 'k prime sse', 'scat', 'r2 corr', 'r2 det', 'Z', 'Z md'] spec_mapping = dict(list(zip(dm3_columns, table_columns))) spec_df_out = spec_df.rename(columns=spec_mapping) if 'N' in spec_df_out.columns: spec_df_out['N'] = spec_df_out['N'].astype('int') if 'Lab Field' in spec_df_out.columns: spec_df_out['Lab Field'] = spec_df_out['Lab Field'].round().astype('int') return spec_df_out
bsd-3-clause
beni55/dadi
dadi/Plotting.py
11
26633
""" Routines for Plotting comparisons between model and data. These can serve as inspiration for custom routines for one's own purposes. Note that all the plotting is done with pylab. To see additional pylab methods: "import pylab; help(pylab)". Pylab's many functions are documented at http://matplotlib.sourceforge.net/contents.html """ import matplotlib import pylab import numpy #: Custom ticks that label only the lowest and highest bins in an FS plot. class _sfsTickLocator(matplotlib.ticker.Locator): def __call__(self): 'Return the locations of the ticks' try: vmin, vmax = self.axis.get_view_interval() dmin, dmax = self.axis.get_data_interval() except AttributeError: self.verify_intervals() vmin, vmax = self.viewInterval.get_bounds() dmin, dmax = self.dataInterval.get_bounds() tmin = max(vmin, dmin) tmax = min(vmax, dmax) return numpy.array([round(tmin)+0.5, round(tmax)-0.5]) #: Custom tick formatter _ctf = matplotlib.ticker.FuncFormatter(lambda x,pos: '%i' % (x-0.4)) from dadi import Numerics, Inference def plot_1d_fs(fs, fig_num=None): """ Plot a 1-dimensional frequency spectrum. fs: 1-dimensional Spectrum fig_num: Clear and use figure fig_num for display. If None, an new figure window is created. Note that all the plotting is done with pylab. To see additional pylab methods: "import pylab; help(pylab)". Pylab's many functions are documented at http://matplotlib.sourceforge.net/contents.html """ if fig_num is None: fig = pylab.gcf() else: fig = pylab.figure(fig_num, figsize=(7,7)) fig.clear() ax = fig.add_subplot(1,1,1) ax.semilogy(fs, '-ob') ax.set_xlim(0, fs.sample_sizes[0]) fig.show() def plot_1d_comp_multinom(model, data, fig_num=None, residual='Anscombe', plot_masked=False): """ Mulitnomial comparison between 1d model and data. model: 1-dimensional model SFS data: 1-dimensional data SFS fig_num: Clear and use figure fig_num for display. If None, an new figure window is created. residual: 'Anscombe' for Anscombe residuals, which are more normally distributed for Poisson sampling. 'linear' for the linear residuals, which can be less biased. plot_masked: Additionally plots (in open circles) results for points in the model or data that were masked. This comparison is multinomial in that it rescales the model to optimally fit the data. """ model = Inference.optimally_scaled_sfs(model, data) plot_1d_comp_Poisson(model, data, fig_num, residual, plot_masked) def plot_1d_comp_Poisson(model, data, fig_num=None, residual='Anscombe', plot_masked=False): """ Poisson comparison between 1d model and data. model: 1-dimensional model SFS data: 1-dimensional data SFS fig_num: Clear and use figure fig_num for display. If None, an new figure window is created. residual: 'Anscombe' for Anscombe residuals, which are more normally distributed for Poisson sampling. 'linear' for the linear residuals, which can be less biased. plot_masked: Additionally plots (in open circles) results for points in the model or data that were masked. """ if fig_num is None: f = pylab.gcf() else: f = pylab.figure(fig_num, figsize=(7,7)) pylab.clf() if data.folded and not model.folded: model = model.fold() masked_model, masked_data = Numerics.intersect_masks(model, data) ax = pylab.subplot(2,1,1) pylab.semilogy(masked_data, '-ob') pylab.semilogy(masked_model, '-or') if plot_masked: pylab.semilogy(masked_data.data, '--ob', mfc='w', zorder=-100) pylab.semilogy(masked_model.data, '--or', mfc='w', zorder=-100) pylab.subplot(2,1,2, sharex = ax) if residual == 'Anscombe': resid = Inference.Anscombe_Poisson_residual(masked_model, masked_data) elif residual == 'linear': resid = Inference.linear_Poisson_residual(masked_model, masked_data) else: raise ValueError("Unknown class of residual '%s'." % residual) pylab.plot(resid, '-og') if plot_masked: pylab.plot(resid.data, '--og', mfc='w', zorder=-100) ax.set_xlim(0, data.shape[0]-1) pylab.show() def plot_single_2d_sfs(sfs, vmin=None, vmax=None, ax=None, pop_ids=None, extend='neither', colorbar=True): """ Heatmap of single 2d SFS. If vmax is greater than a factor of 10, plot on log scale. sfs: SFS to plot vmin: Values in sfs below vmin are masked in plot. vmax: Values in sfs above vmax saturate the color spectrum. ax: Axes object to plot into. If None, the result of pylab.gca() is used. pop_ids: If not None, override pop_ids stored in Spectrum. extend: Whether the colorbar should have 'extension' arrows. See help(pylab.colorbar) for more details. colorbar: Should we plot a colorbar? """ if ax is None: ax = pylab.gca() if vmin is None: vmin = sfs.min() if vmax is None: vmax = sfs.max() pylab.cm.hsv.set_under('w') if vmax / vmin > 10: # Under matplotlib 1.0.1, default LogFormatter omits some tick lines. # This works more consistently. norm = matplotlib.colors.LogNorm(vmin=vmin*(1-1e-3), vmax=vmax*(1+1e-3)) format = matplotlib.ticker.LogFormatterMathtext() else: norm = matplotlib.colors.Normalize(vmin=vmin*(1-1e-3), vmax=vmax*(1+1e-3)) format = None mappable=ax.pcolor(numpy.ma.masked_where(sfs<vmin, sfs), cmap=pylab.cm.hsv, shading='flat', norm=norm) ax.figure.colorbar(mappable, extend=extend, format=format) if not colorbar: del ax.figure.axes[-1] ax.plot([0,sfs.shape[1]],[0, sfs.shape[0]], '-k', lw=0.2) if pop_ids is None: if sfs.pop_ids is not None: pop_ids = sfs.pop_ids else: pop_ids = ['pop0','pop1'] ax.set_ylabel(pop_ids[0], horizontalalignment='left') ax.set_xlabel(pop_ids[1], verticalalignment='bottom') ax.xaxis.set_major_formatter(_ctf) ax.xaxis.set_major_locator(_sfsTickLocator()) ax.yaxis.set_major_formatter(_ctf) ax.yaxis.set_major_locator(_sfsTickLocator()) for tick in ax.xaxis.get_ticklines() + ax.yaxis.get_ticklines(): tick.set_visible(False) ax.set_xlim(0, sfs.shape[1]) ax.set_ylim(0, sfs.shape[0]) def plot_2d_resid(resid, resid_range=None, ax=None, pop_ids=None, extend='neither', colorbar=True): """ Linear heatmap of 2d residual array. sfs: Residual array to plot. resid_range: Values > resid range or < resid_range saturate the color spectrum. ax: Axes object to plot into. If None, the result of pylab.gca() is used. pop_ids: If not None, override pop_ids stored in Spectrum. extend: Whether the colorbar should have 'extension' arrows. See help(pylab.colorbar) for more details. colorbar: Should we plot a colorbar? """ if ax is None: ax = pylab.gca() if resid_range is None: resid_range = abs(resid).max() mappable=ax.pcolor(resid, cmap=pylab.cm.RdBu_r, vmin=-resid_range, vmax=resid_range, shading='flat') cbticks = [-resid_range, 0, resid_range] format = matplotlib.ticker.FormatStrFormatter('%.2g') ax.figure.colorbar(mappable, ticks=cbticks, format=format, extend=extend) if not colorbar: del ax.figure.axes[-1] ax.plot([0,resid.shape[1]],[0, resid.shape[0]], '-k', lw=0.2) if pop_ids is None: if resid.pop_ids is not None: pop_ids = resid.pop_ids else: pop_ids = ['pop0','pop1'] ax.set_ylabel(pop_ids[0], horizontalalignment='left') ax.set_xlabel(pop_ids[1], verticalalignment='bottom') ax.xaxis.set_major_formatter(_ctf) ax.xaxis.set_major_locator(_sfsTickLocator()) ax.yaxis.set_major_formatter(_ctf) ax.yaxis.set_major_locator(_sfsTickLocator()) for tick in ax.xaxis.get_ticklines() + ax.yaxis.get_ticklines(): tick.set_visible(False) ax.set_xlim(0, resid.shape[1]) ax.set_ylim(0, resid.shape[0]) # Used to determine whether colorbars should have 'extended' arrows _extend_mapping = {(True, True): 'neither', (False, True): 'min', (True, False): 'max', (False, False): 'both'} def plot_2d_comp_multinom(model, data, vmin=None, vmax=None, resid_range=None, fig_num=None, pop_ids=None, residual='Anscombe', adjust=True): """ Mulitnomial comparison between 2d model and data. model: 2-dimensional model SFS data: 2-dimensional data SFS vmin, vmax: Minimum and maximum values plotted for sfs are vmin and vmax respectively. resid_range: Residual plot saturates at +- resid_range. fig_num: Clear and use figure fig_num for display. If None, an new figure window is created. pop_ids: If not None, override pop_ids stored in Spectrum. residual: 'Anscombe' for Anscombe residuals, which are more normally distributed for Poisson sampling. 'linear' for the linear residuals, which can be less biased. adjust: Should method use automatic 'subplots_adjust'? For advanced manipulation of plots, it may be useful to make this False. This comparison is multinomial in that it rescales the model to optimally fit the data. """ model = Inference.optimally_scaled_sfs(model, data) plot_2d_comp_Poisson(model, data, vmin=vmin, vmax=vmax, resid_range=resid_range, fig_num=fig_num, pop_ids=pop_ids, residual=residual, adjust=adjust) def plot_2d_comp_Poisson(model, data, vmin=None, vmax=None, resid_range=None, fig_num=None, pop_ids=None, residual='Anscombe', adjust=True): """ Poisson comparison between 2d model and data. model: 2-dimensional model SFS data: 2-dimensional data SFS vmin, vmax: Minimum and maximum values plotted for sfs are vmin and vmax respectively. resid_range: Residual plot saturates at +- resid_range. fig_num: Clear and use figure fig_num for display. If None, an new figure window is created. pop_ids: If not None, override pop_ids stored in Spectrum. residual: 'Anscombe' for Anscombe residuals, which are more normally distributed for Poisson sampling. 'linear' for the linear residuals, which can be less biased. adjust: Should method use automatic 'subplots_adjust'? For advanced manipulation of plots, it may be useful to make this False. """ if data.folded and not model.folded: model = model.fold() masked_model, masked_data = Numerics.intersect_masks(model, data) if fig_num is None: f = pylab.gcf() else: f = pylab.figure(fig_num, figsize=(7,7)) pylab.clf() if adjust: pylab.subplots_adjust(bottom=0.07, left=0.07, top=0.94, right=0.95, hspace=0.26, wspace=0.26) max_toplot = max(masked_model.max(), masked_data.max()) min_toplot = min(masked_model.min(), masked_data.min()) if vmax is None: vmax = max_toplot if vmin is None: vmin = min_toplot extend = _extend_mapping[vmin <= min_toplot, vmax >= max_toplot] if pop_ids is not None: data_pop_ids = model_pop_ids = resid_pop_ids = pop_ids if len(pop_ids) != 2: raise ValueError('pop_ids must be of length 2.') else: data_pop_ids = masked_data.pop_ids model_pop_ids = masked_model.pop_ids if masked_model.pop_ids is None: model_pop_ids = data_pop_ids if model_pop_ids == data_pop_ids: resid_pop_ids = model_pop_ids else: resid_pop_ids = None ax = pylab.subplot(2,2,1) plot_single_2d_sfs(masked_data, vmin=vmin, vmax=vmax, pop_ids=data_pop_ids, colorbar=False) ax.set_title('data') ax2 = pylab.subplot(2,2,2, sharex=ax, sharey=ax) plot_single_2d_sfs(masked_model, vmin=vmin, vmax=vmax, pop_ids=model_pop_ids, extend=extend ) ax2.set_title('model') if residual == 'Anscombe': resid = Inference.Anscombe_Poisson_residual(masked_model, masked_data, mask=vmin) elif residual == 'linear': resid = Inference.linear_Poisson_residual(masked_model, masked_data, mask=vmin) else: raise ValueError("Unknown class of residual '%s'." % residual) if resid_range is None: resid_range = max((abs(resid.max()), abs(resid.min()))) resid_extend = _extend_mapping[-resid_range <= resid.min(), resid_range >= resid.max()] ax3 = pylab.subplot(2,2,3, sharex=ax, sharey=ax) plot_2d_resid(resid, resid_range, pop_ids=resid_pop_ids, extend=resid_extend) ax3.set_title('residuals') ax = pylab.subplot(2,2,4) flatresid = numpy.compress(numpy.logical_not(resid.mask.ravel()), resid.ravel()) ax.hist(flatresid, bins=20, normed=True) ax.set_title('residuals') ax.set_yticks([]) pylab.show() def plot_3d_comp_multinom(model, data, vmin=None, vmax=None, resid_range=None, fig_num=None, pop_ids=None, residual='Anscombe', adjust=True): """ Multinomial comparison between 3d model and data. model: 3-dimensional model SFS data: 3-dimensional data SFS vmin, vmax: Minimum and maximum values plotted for sfs are vmin and vmax respectively. resid_range: Residual plot saturates at +- resid_range. fig_num: Clear and use figure fig_num for display. If None, an new figure window is created. pop_ids: If not None, override pop_ids stored in Spectrum. residual: 'Anscombe' for Anscombe residuals, which are more normally distributed for Poisson sampling. 'linear' for the linear residuals, which can be less biased. adjust: Should method use automatic 'subplots_adjust'? For advanced manipulation of plots, it may be useful to make this False. This comparison is multinomial in that it rescales the model to optimally fit the data. """ model = Inference.optimally_scaled_sfs(model, data) plot_3d_comp_Poisson(model, data, vmin=vmin, vmax=vmax, resid_range=resid_range, fig_num=fig_num, pop_ids=pop_ids, residual=residual, adjust=adjust) def plot_3d_comp_Poisson(model, data, vmin=None, vmax=None, resid_range=None, fig_num=None, pop_ids=None, residual='Anscombe', adjust=True): """ Poisson comparison between 3d model and data. model: 3-dimensional model SFS data: 3-dimensional data SFS vmin, vmax: Minimum and maximum values plotted for sfs are vmin and vmax respectively. resid_range: Residual plot saturates at +- resid_range. fig_num: Clear and use figure fig_num for display. If None, an new figure window is created. pop_ids: If not None, override pop_ids stored in Spectrum. residual: 'Anscombe' for Anscombe residuals, which are more normally distributed for Poisson sampling. 'linear' for the linear residuals, which can be less biased. adjust: Should method use automatic 'subplots_adjust'? For advanced manipulation of plots, it may be useful to make this False. """ if data.folded and not model.folded: model = model.fold() masked_model, masked_data = Numerics.intersect_masks(model, data) if fig_num is None: f = pylab.gcf() else: f = pylab.figure(fig_num, figsize=(8,10)) pylab.clf() if adjust: pylab.subplots_adjust(bottom=0.07, left=0.07, top=0.95, right=0.95) modelmax = max(masked_model.sum(axis=sax).max() for sax in range(3)) datamax = max(masked_data.sum(axis=sax).max() for sax in range(3)) modelmin = min(masked_model.sum(axis=sax).min() for sax in range(3)) datamin = min(masked_data.sum(axis=sax).min() for sax in range(3)) max_toplot = max(modelmax, datamax) min_toplot = min(modelmin, datamin) if vmax is None: vmax = max_toplot if vmin is None: vmin = min_toplot extend = _extend_mapping[vmin <= min_toplot, vmax >= max_toplot] # Calculate the residuals if residual == 'Anscombe': resids = [Inference.\ Anscombe_Poisson_residual(masked_model.sum(axis=2-sax), masked_data.sum(axis=2-sax), mask=vmin) for sax in range(3)] elif residual == 'linear': resids =[Inference.\ linear_Poisson_residual(masked_model.sum(axis=2-sax), masked_data.sum(axis=2-sax), mask=vmin) for sax in range(3)] else: raise ValueError("Unknown class of residual '%s'." % residual) min_resid = min([r.min() for r in resids]) max_resid = max([r.max() for r in resids]) if resid_range is None: resid_range = max((abs(max_resid), abs(min_resid))) resid_extend = _extend_mapping[-resid_range <= min_resid, resid_range >= max_resid] if pop_ids is not None: if len(pop_ids) != 3: raise ValueError('pop_ids must be of length 3.') data_ids = model_ids = resid_ids = pop_ids else: data_ids = masked_data.pop_ids model_ids = masked_model.pop_ids if model_ids is None: model_ids = data_ids if model_ids == data_ids: resid_ids = model_ids else: resid_ids = None for sax in range(3): marg_data = masked_data.sum(axis=2-sax) marg_model = masked_model.sum(axis=2-sax) curr_ids = [] for ids in [data_ids, model_ids, resid_ids]: if ids is None: ids = ['pop0', 'pop1', 'pop2'] if ids is not None: ids = list(ids) del ids[2-sax] curr_ids.append(ids) ax = pylab.subplot(4,3,sax+1) plot_colorbar = (sax == 2) plot_single_2d_sfs(marg_data, vmin=vmin, vmax=vmax, pop_ids=curr_ids[0], extend=extend, colorbar=plot_colorbar) pylab.subplot(4,3,sax+4, sharex=ax, sharey=ax) plot_single_2d_sfs(marg_model, vmin=vmin, vmax=vmax, pop_ids=curr_ids[1], extend=extend, colorbar=False) resid = resids[sax] pylab.subplot(4,3,sax+7, sharex=ax, sharey=ax) plot_2d_resid(resid, resid_range, pop_ids=curr_ids[2], extend=resid_extend, colorbar=plot_colorbar) ax = pylab.subplot(4,3,sax+10) flatresid = numpy.compress(numpy.logical_not(resid.mask.ravel()), resid.ravel()) ax.hist(flatresid, bins=20, normed=True) ax.set_yticks([]) pylab.show() def plot_3d_spectrum(fs, fignum=None, vmin=None, vmax=None, pop_ids=None): """ Logarithmic heatmap of single 3d FS. Note that this method is slow, because it relies on matplotlib's software rendering. For faster and better looking plots, use plot_3d_spectrum_mayavi. fs: FS to plot vmin: Values in fs below vmin are masked in plot. vmax: Values in fs above vmax saturate the color spectrum. fignum: Figure number to plot into. If None, a new figure will be created. pop_ids: If not None, override pop_ids stored in Spectrum. """ import mpl_toolkits.mplot3d as mplot3d fig = pylab.figure(fignum) ax = mplot3d.Axes3D(fig) if vmin is None: vmin = fs.min() if vmax is None: vmax = fs.max() # Which entries should I plot? toplot = numpy.logical_not(fs.mask) toplot = numpy.logical_and(toplot, fs.data >= vmin) # Figure out the color mapping. normalized = (numpy.log(fs)-numpy.log(vmin))\ /(numpy.log(vmax)-numpy.log(vmin)) normalized = numpy.minimum(normalized, 1) colors = pylab.cm.hsv(normalized) # We draw by calculating which faces are visible and including each as a # polygon. polys, polycolors = [],[] for ii in range(fs.shape[0]): for jj in range(fs.shape[1]): for kk in range(fs.shape[2]): if not toplot[ii,jj,kk]: continue if kk < fs.shape[2]-1 and toplot[ii,jj,kk+1]: pass else: polys.append([[ii-0.5,jj+0.5,kk+0.5],[ii+0.5,jj+0.5,kk+0.5], [ii+0.5,jj-0.5,kk+0.5],[ii-0.5,jj-0.5,kk+0.5]] ) polycolors.append(colors[ii,jj,kk]) if kk > 0 and toplot[ii,jj,kk-1]: pass else: polys.append([[ii-0.5,jj+0.5,kk-0.5],[ii+0.5,jj+0.5,kk-0.5], [ii+0.5,jj-0.5,kk-0.5],[ii-0.5,jj-0.5,kk-0.5]] ) polycolors.append(colors[ii,jj,kk]) if jj < fs.shape[1]-1 and toplot[ii,jj+1,kk]: pass else: polys.append([[ii-0.5,jj+0.5,kk+0.5],[ii+0.5,jj+0.5,kk+0.5], [ii+0.5,jj+0.5,kk-0.5],[ii-0.5,jj+0.5,kk-0.5]] ) polycolors.append(colors[ii,jj,kk]) if jj > 0 and toplot[ii,jj-1,kk]: pass else: polys.append([[ii-0.5,jj-0.5,kk+0.5],[ii+0.5,jj-0.5,kk+0.5], [ii+0.5,jj-0.5,kk-0.5],[ii-0.5,jj-0.5,kk-0.5]] ) polycolors.append(colors[ii,jj,kk]) if ii < fs.shape[0]-1 and toplot[ii+1,jj,kk]: pass else: polys.append([[ii+0.5,jj-0.5,kk+0.5],[ii+0.5,jj+0.5,kk+0.5], [ii+0.5,jj+0.5,kk-0.5],[ii+0.5,jj-0.5,kk-0.5]] ) polycolors.append(colors[ii,jj,kk]) if ii > 0 and toplot[ii-1,jj,kk]: pass else: polys.append([[ii-0.5,jj-0.5,kk+0.5],[ii-0.5,jj+0.5,kk+0.5], [ii-0.5,jj+0.5,kk-0.5],[ii-0.5,jj-0.5,kk-0.5]] ) polycolors.append(colors[ii,jj,kk]) polycoll = mplot3d.art3d.Poly3DCollection(polys, facecolor=polycolors, edgecolor='k', linewidths=0.5) ax.add_collection(polycoll) # Set the limits ax.set_xlim3d(-0.5,fs.shape[0]-0.5) ax.set_ylim3d(-0.5,fs.shape[1]-0.5) ax.set_zlim3d(-0.5,fs.shape[2]-0.5) if pop_ids is None: if fs.pop_ids is not None: pop_ids = fs.pop_ids else: pop_ids = ['pop0','pop1','pop2'] ax.set_xlabel(pop_ids[0], horizontalalignment='left') ax.set_ylabel(pop_ids[1], verticalalignment='bottom') ax.set_zlabel(pop_ids[2], verticalalignment='bottom') # XXX: I can't set the axis ticks to be just the endpoints. pylab.show() def plot_3d_spectrum_mayavi(fs, fignum=None, vmin=None, vmax=None, pop_ids=None): """ Logarithmic heatmap of single 3d FS. This method relies on MayaVi2's mlab interface. See http://code.enthought.com/projects/mayavi/docs/development/html/mayavi/mlab.html . To edit plot properties, click leftmost icon in the toolbar. If you get an ImportError upon calling this function, it is likely that you don't have mayavi installed. fs: FS to plot vmin: Values in fs below vmin are masked in plot. vmax: Values in fs above vmax saturate the color spectrum. fignum: Figure number to plot into. If None, a new figure will be created. Note that these are MayaVi figures, which are separate from matplotlib figures. pop_ids: If not None, override pop_ids stored in Spectrum. """ from enthought.mayavi import mlab fig = mlab.figure(fignum, bgcolor=(1,1,1)) mlab.clf(fig) if vmin is None: vmin = fs.min() if vmax is None: vmax = fs.max() # Which entries should I plot? toplot = numpy.logical_not(fs.mask) toplot = numpy.logical_and(toplot, fs.data >= vmin) # For the color mapping normalized = (numpy.log(fs)-numpy.log(vmin))\ /(numpy.log(vmax)-numpy.log(vmin)) normalized = numpy.minimum(normalized, 1) xs,ys,zs = numpy.indices(fs.shape) flat_xs = xs.flatten() flat_ys = ys.flatten() flat_zs = zs.flatten() flat_toplot = toplot.flatten() mlab.barchart(flat_xs[flat_toplot], flat_ys[flat_toplot], flat_zs[flat_toplot], normalized.flatten()[flat_toplot], colormap='hsv', scale_mode='none', lateral_scale=1, figure=fig) if pop_ids is None: if fs.pop_ids is not None: pop_ids = fs.pop_ids else: pop_ids = ['pop0','pop1','pop2'] a = mlab.axes(xlabel=pop_ids[0],ylabel=pop_ids[1],zlabel=pop_ids[2], figure=fig, color=(0,0,0)) a.axes.label_format = "" a.title_text_property.color = (0,0,0) mlab.text3d(fs.sample_sizes[0],fs.sample_sizes[1],fs.sample_sizes[2]+1, '(%i,%i,%i)'%tuple(fs.sample_sizes), scale=0.75, figure=fig, color=(0,0,0)) mlab.view(azimuth=-40, elevation=65, distance='auto', focalpoint='auto') mlab.show()
bsd-3-clause
CarterBain/AlephNull
tests/test_data_util.py
4
3092
# # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from collections import deque import numpy as np import pandas as pd import pandas.util.testing as tm from zipline.utils.data import RollingPanel class TestRollingPanel(unittest.TestCase): def test_basics(self): items = ['foo', 'bar', 'baz'] minor = ['A', 'B', 'C', 'D'] window = 10 rp = RollingPanel(window, items, minor, cap_multiple=2) dates = pd.date_range('2000-01-01', periods=30, tz='utc') major_deque = deque() frames = {} for i in range(30): frame = pd.DataFrame(np.random.randn(3, 4), index=items, columns=minor) date = dates[i] rp.add_frame(date, frame) frames[date] = frame major_deque.append(date) if i >= window: major_deque.popleft() result = rp.get_current() expected = pd.Panel(frames, items=list(major_deque), major_axis=items, minor_axis=minor) tm.assert_panel_equal(result, expected.swapaxes(0, 1)) def f(option='clever', n=500, copy=False): items = range(5) minor = range(20) window = 100 periods = n dates = pd.date_range('2000-01-01', periods=periods, tz='utc') frames = {} if option == 'clever': rp = RollingPanel(window, items, minor, cap_multiple=2) major_deque = deque() dummy = pd.DataFrame(np.random.randn(len(items), len(minor)), index=items, columns=minor) for i in range(periods): frame = dummy * (1 + 0.001 * i) date = dates[i] rp.add_frame(date, frame) frames[date] = frame major_deque.append(date) if i >= window: del frames[major_deque.popleft()] result = rp.get_current() if copy: result = result.copy() else: major_deque = deque() dummy = pd.DataFrame(np.random.randn(len(items), len(minor)), index=items, columns=minor) for i in range(periods): frame = dummy * (1 + 0.001 * i) date = dates[i] frames[date] = frame major_deque.append(date) if i >= window: del frames[major_deque.popleft()] result = pd.Panel(frames, items=list(major_deque), major_axis=items, minor_axis=minor)
apache-2.0
cdegroc/scikit-learn
sklearn/ensemble/tests/test_forest.py
1
10632
""" Testing for the forest module (sklearn.ensemble.forest). """ # Authors: Gilles Louppe, Brian Holt # License: BSD 3 import numpy as np from numpy.testing import assert_array_equal from numpy.testing import assert_array_almost_equal from numpy.testing import assert_equal from numpy.testing import assert_almost_equal from nose.tools import assert_true from sklearn.grid_search import GridSearchCV from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesRegressor from sklearn import datasets # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() np.random.seed([1]) perm = np.random.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = np.random.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] def test_classification_toy(): """Check classification on a toy dataset.""" # Random forest clf = RandomForestClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) clf = RandomForestClassifier(n_estimators=10, max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) # Extra-trees clf = ExtraTreesClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) clf = ExtraTreesClassifier(n_estimators=10, max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) def test_iris(): """Check consistency on dataset iris.""" for c in ("gini", "entropy"): # Random forest clf = RandomForestClassifier(n_estimators=10, criterion=c, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert score > 0.9, "Failed with criterion %s and score = %f" % (c, score) clf = RandomForestClassifier(n_estimators=10, criterion=c, max_features=2, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert score > 0.5, "Failed with criterion %s and score = %f" % (c, score) # Extra-trees clf = ExtraTreesClassifier(n_estimators=10, criterion=c, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert score > 0.9, "Failed with criterion %s and score = %f" % (c, score) clf = ExtraTreesClassifier(n_estimators=10, criterion=c, max_features=2, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert score > 0.9, "Failed with criterion %s and score = %f" % (c, score) def test_boston(): """Check consistency on dataset boston house prices.""" for c in ("mse",): # Random forest clf = RandomForestRegressor(n_estimators=5, criterion=c, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert score < 3, ("Failed with max_features=None, " "criterion %s and score = %f" % (c, score)) clf = RandomForestRegressor(n_estimators=5, criterion=c, max_features=6, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert score < 3, ("Failed with max_features=None, " "criterion %s and score = %f" % (c, score)) # Extra-trees clf = ExtraTreesRegressor(n_estimators=5, criterion=c, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert score < 3, ("Failed with max_features=None, " "criterion %s and score = %f" % (c, score)) clf = ExtraTreesRegressor(n_estimators=5, criterion=c, max_features=6, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert score < 3, ("Failed with max_features=None, " "criterion %s and score = %f" % (c, score)) def test_probability(): """Predict probabilities.""" # Random forest clf = RandomForestClassifier(n_estimators=10, random_state=1, max_features=1, max_depth=1) clf.fit(iris.data, iris.target) assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])) assert_array_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))) # Extra-trees clf = ExtraTreesClassifier(n_estimators=10, random_state=1, max_features=1, max_depth=1) clf.fit(iris.data, iris.target) assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])) assert_array_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))) def test_importances(): """Check variable importances.""" X, y = datasets.make_classification(n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) clf = RandomForestClassifier(n_estimators=10, compute_importances=True) clf.fit(X, y) importances = clf.feature_importances_ n_important = sum(importances > 0.1) assert_equal(importances.shape[0], 10) assert_equal(n_important, 3) X_new = clf.transform(X, threshold="mean") assert_true(0 < X_new.shape[1] < X.shape[1]) def test_oob_score_classification(): """Check that oob prediction is as acurate as usual prediction on the training set. Not really a good test that prediction is independent.""" clf = RandomForestClassifier(oob_score=True) clf.fit(X, y) training_score = clf.score(X, y) assert_almost_equal(training_score, clf.oob_score_) def test_oob_score_regression(): """Check that oob prediction is pessimistic estimate. Not really a good test that prediction is independent.""" clf = RandomForestRegressor(n_estimators=30, oob_score=True) n_samples = boston.data.shape[0] clf.fit(boston.data[:n_samples / 2, :], boston.target[:n_samples / 2]) test_score = clf.score(boston.data[n_samples / 2:, :], boston.target[n_samples / 2:]) assert_true(test_score > clf.oob_score_) assert_true(clf.oob_score_ > .8) def test_gridsearch(): """Check that base trees can be grid-searched.""" # Random forest forest = RandomForestClassifier() parameters = {'n_estimators': (1, 2), 'max_depth': (1, 2)} clf = GridSearchCV(forest, parameters) clf.fit(iris.data, iris.target) # Extra-trees forest = ExtraTreesClassifier() parameters = {'n_estimators': (1, 2), 'max_depth': (1, 2)} clf = GridSearchCV(forest, parameters) clf.fit(iris.data, iris.target) def test_parallel(): """Check parallel computations.""" # Classification forest = RandomForestClassifier(n_estimators=10, n_jobs=3, random_state=0) forest.fit(iris.data, iris.target) assert_true(10 == len(forest)) forest.set_params(n_jobs=1) y1 = forest.predict(iris.data) forest.set_params(n_jobs=2) y2 = forest.predict(iris.data) assert_array_equal(y1, y2) # Regression forest = RandomForestRegressor(n_estimators=10, n_jobs=3, random_state=0) forest.fit(boston.data, boston.target) assert_true(10 == len(forest)) forest.set_params(n_jobs=1) y1 = forest.predict(boston.data) forest.set_params(n_jobs=2) y2 = forest.predict(boston.data) assert_array_almost_equal(y1, y2, 10) # Use all cores on the classification dataset forest = RandomForestClassifier(n_jobs=-1) forest.fit(iris.data, iris.target) def test_pickle(): """Check pickability.""" import pickle # Random forest obj = RandomForestClassifier() obj.fit(iris.data, iris.target) score = obj.score(iris.data, iris.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(iris.data, iris.target) assert_true(score == score2) obj = RandomForestRegressor() obj.fit(boston.data, boston.target) score = obj.score(boston.data, boston.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(boston.data, boston.target) assert_true(score == score2) # Extra-trees obj = ExtraTreesClassifier() obj.fit(iris.data, iris.target) score = obj.score(iris.data, iris.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(iris.data, iris.target) assert_true(score == score2) obj = ExtraTreesRegressor() obj.fit(boston.data, boston.target) score = obj.score(boston.data, boston.target) s = pickle.dumps(obj) obj2 = pickle.loads(s) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(boston.data, boston.target) assert_true(score == score2) if __name__ == "__main__": import nose nose.runmodule()
bsd-3-clause
paolodedios/tensorflow
tensorflow/python/kernel_tests/constant_op_eager_test.py
6
22324
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ConstantOp.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes as dtypes_lib from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.util import compat # TODO(josh11b): add tests with lists/tuples, Shape. # TODO(ashankar): Collapse with tests in constant_op_test.py and use something # like the test_util.run_in_graph_and_eager_modes decorator to confirm # equivalence between graph and eager execution. class ConstantTest(test.TestCase): def _testCpu(self, x): np_ans = np.array(x) with context.device("/device:CPU:0"): tf_ans = ops.convert_to_tensor(x).numpy() if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]: self.assertAllClose(np_ans, tf_ans) else: self.assertAllEqual(np_ans, tf_ans) def _testGpu(self, x): device = test_util.gpu_device_name() if device: np_ans = np.array(x) with context.device(device): tf_ans = ops.convert_to_tensor(x).numpy() if np_ans.dtype in [np.float32, np.float64, np.complex64, np.complex128]: self.assertAllClose(np_ans, tf_ans) else: self.assertAllEqual(np_ans, tf_ans) def _testAll(self, x): self._testCpu(x) self._testGpu(x) def testFloat(self): self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32)) self._testAll( np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float32)) self._testAll(np.empty((2, 0, 5)).astype(np.float32)) orig = [-1.0, 2.0, 0.0] tf_ans = constant_op.constant(orig) self.assertEqual(dtypes_lib.float32, tf_ans.dtype) self.assertAllClose(np.array(orig), tf_ans.numpy()) # Mix floats and ints orig = [-1.5, 2, 0] tf_ans = constant_op.constant(orig) self.assertEqual(dtypes_lib.float32, tf_ans.dtype) self.assertAllClose(np.array(orig), tf_ans.numpy()) orig = [-5, 2.5, 0] tf_ans = constant_op.constant(orig) self.assertEqual(dtypes_lib.float32, tf_ans.dtype) self.assertAllClose(np.array(orig), tf_ans.numpy()) # Mix floats and ints that don't fit in int32 orig = [1, 2**42, 0.5] tf_ans = constant_op.constant(orig) self.assertEqual(dtypes_lib.float32, tf_ans.dtype) self.assertAllClose(np.array(orig), tf_ans.numpy()) def testDouble(self): self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float64)) self._testAll( np.random.normal(size=30).reshape([2, 3, 5]).astype(np.float64)) self._testAll(np.empty((2, 0, 5)).astype(np.float64)) orig = [-5, 2.5, 0] tf_ans = constant_op.constant(orig, dtypes_lib.float64) self.assertEqual(dtypes_lib.float64, tf_ans.dtype) self.assertAllClose(np.array(orig), tf_ans.numpy()) # This integer is not exactly representable as a double, gets rounded. tf_ans = constant_op.constant(2**54 + 1, dtypes_lib.float64) self.assertEqual(2**54, tf_ans.numpy()) # This integer is larger than all non-infinite numbers representable # by a double, raises an exception. with self.assertRaisesRegex(ValueError, "out-of-range integer"): constant_op.constant(10**310, dtypes_lib.float64) def testInt32(self): self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int32)) self._testAll( (100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int32)) self._testAll(np.empty((2, 0, 5)).astype(np.int32)) self._testAll([-1, 2]) def testInt64(self): self._testAll(np.arange(-15, 15).reshape([2, 3, 5]).astype(np.int64)) self._testAll( (100 * np.random.normal(size=30)).reshape([2, 3, 5]).astype(np.int64)) self._testAll(np.empty((2, 0, 5)).astype(np.int64)) # Should detect out of range for int32 and use int64 instead. orig = [2, 2**48, -2**48] tf_ans = constant_op.constant(orig) self.assertEqual(dtypes_lib.int64, tf_ans.dtype) self.assertAllClose(np.array(orig), tf_ans.numpy()) # Out of range for an int64 with self.assertRaisesRegex(ValueError, "out-of-range integer"): constant_op.constant([2**72]) def testComplex64(self): self._testAll( np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5]).astype(np.complex64)) self._testAll( np.complex(1, 2) * np.random.normal(size=30).reshape([2, 3, 5]).astype(np.complex64)) self._testAll(np.empty((2, 0, 5)).astype(np.complex64)) def testComplex128(self): self._testAll( np.complex(1, 2) * np.arange(-15, 15).reshape([2, 3, 5 ]).astype(np.complex128)) self._testAll( np.complex(1, 2) * np.random.normal(size=30).reshape( [2, 3, 5]).astype(np.complex128)) self._testAll(np.empty((2, 0, 5)).astype(np.complex128)) @test_util.disable_tfrt("support creating string tensors from empty " "numpy arrays.") def testString(self): val = [compat.as_bytes(str(x)) for x in np.arange(-15, 15)] self._testCpu(np.array(val).reshape([2, 3, 5])) self._testCpu(np.empty((2, 0, 5)).astype(np.str_)) def testStringWithNulls(self): val = ops.convert_to_tensor(b"\0\0\0\0").numpy() self.assertEqual(len(val), 4) self.assertEqual(val, b"\0\0\0\0") val = ops.convert_to_tensor(b"xx\0xx").numpy() self.assertEqual(len(val), 5) self.assertAllEqual(val, b"xx\0xx") nested = [[b"\0\0\0\0", b"xx\0xx"], [b"\0_\0_\0_\0", b"\0"]] val = ops.convert_to_tensor(nested).numpy() # NOTE(mrry): Do not use assertAllEqual, because it converts nested to a # numpy array, which loses the null terminators. self.assertEqual(val.tolist(), nested) def testStringConstantOp(self): s = constant_op.constant("uiuc") self.assertEqual(s.numpy().decode("utf-8"), "uiuc") s_array = constant_op.constant(["mit", "stanford"]) self.assertAllEqual(s_array.numpy(), ["mit", "stanford"]) with ops.device("/cpu:0"): s = constant_op.constant("cmu") self.assertEqual(s.numpy().decode("utf-8"), "cmu") s_array = constant_op.constant(["berkeley", "ucla"]) self.assertAllEqual(s_array.numpy(), ["berkeley", "ucla"]) def testExplicitShapeNumPy(self): c = constant_op.constant( np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32), shape=[2, 3, 5]) self.assertEqual(c.get_shape(), [2, 3, 5]) def testImplicitShapeNumPy(self): c = constant_op.constant( np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32)) self.assertEqual(c.get_shape(), [2, 3, 5]) def testExplicitShapeList(self): c = constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[7]) self.assertEqual(c.get_shape(), [7]) def testExplicitShapeFill(self): c = constant_op.constant(12, shape=[7]) self.assertEqual(c.get_shape(), [7]) self.assertAllEqual([12, 12, 12, 12, 12, 12, 12], c.numpy()) def testExplicitShapeReshape(self): c = constant_op.constant( np.arange(-15, 15).reshape([2, 3, 5]).astype(np.float32), shape=[5, 2, 3]) self.assertEqual(c.get_shape(), [5, 2, 3]) def testImplicitShapeList(self): c = constant_op.constant([1, 2, 3, 4, 5, 6, 7]) self.assertEqual(c.get_shape(), [7]) def testExplicitShapeNumber(self): c = constant_op.constant(1, shape=[1]) self.assertEqual(c.get_shape(), [1]) def testImplicitShapeNumber(self): c = constant_op.constant(1) self.assertEqual(c.get_shape(), []) def testShapeTooBig(self): with self.assertRaises(TypeError): constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[10]) def testShapeTooSmall(self): with self.assertRaises(TypeError): constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5]) def testShapeWrong(self): with self.assertRaisesRegex(TypeError, None): constant_op.constant([1, 2, 3, 4, 5, 6, 7], shape=[5]) def testShape(self): self._testAll(constant_op.constant([1]).get_shape()) def testDimension(self): x = constant_op.constant([1]).shape[0] self._testAll(x) def testDimensionList(self): x = [constant_op.constant([1]).shape[0]] self._testAll(x) # Mixing with regular integers is fine too self._testAll([1] + x) self._testAll(x + [1]) def testDimensionTuple(self): x = constant_op.constant([1]).shape[0] self._testAll((x,)) self._testAll((1, x)) self._testAll((x, 1)) def testInvalidLength(self): class BadList(list): def __init__(self): super(BadList, self).__init__([1, 2, 3]) # pylint: disable=invalid-length-returned def __len__(self): # pylint: disable=invalid-length-returned return -1 with self.assertRaisesRegex(ValueError, "should return >= 0"): constant_op.constant([BadList()]) with self.assertRaisesRegex(ValueError, "mixed types"): constant_op.constant([1, 2, BadList()]) with self.assertRaisesRegex(ValueError, "should return >= 0"): constant_op.constant(BadList()) with self.assertRaisesRegex(ValueError, "should return >= 0"): constant_op.constant([[BadList(), 2], 3]) with self.assertRaisesRegex(ValueError, "should return >= 0"): constant_op.constant([BadList(), [1, 2, 3]]) with self.assertRaisesRegex(ValueError, "should return >= 0"): constant_op.constant([BadList(), []]) # TODO(allenl, josh11b): These cases should return exceptions rather than # working (currently shape checking only checks the first element of each # sequence recursively). Maybe the first one is fine, but the second one # silently truncating is rather bad. # with self.assertRaisesRegex(ValueError, "should return >= 0"): # constant_op.constant([[3, 2, 1], BadList()]) # with self.assertRaisesRegex(ValueError, "should return >= 0"): # constant_op.constant([[], BadList()]) def testSparseValuesRaiseErrors(self): with self.assertRaisesRegex(ValueError, "non-rectangular Python sequence"): constant_op.constant([[1, 2], [3]], dtype=dtypes_lib.int32) with self.assertRaisesRegex(ValueError, None): constant_op.constant([[1, 2], [3]]) with self.assertRaisesRegex(ValueError, None): constant_op.constant([[1, 2], [3], [4, 5]]) # TODO(ashankar): This test fails with graph construction since # tensor_util.make_tensor_proto (invoked from constant_op.constant) # does not handle iterables (it relies on numpy conversion). # For consistency, should graph construction handle Python objects # that implement the sequence protocol (but not numpy conversion), # or should eager execution fail on such sequences? def testCustomSequence(self): # This is inspired by how many objects in pandas are implemented: # - They implement the Python sequence protocol # - But may raise a KeyError on __getitem__(self, 0) # See https://github.com/tensorflow/tensorflow/issues/20347 class MySeq(object): def __getitem__(self, key): if key != 1 and key != 3: raise KeyError(key) return key def __len__(self): return 2 def __iter__(self): l = list([1, 3]) return l.__iter__() self.assertAllEqual([1, 3], self.evaluate(constant_op.constant(MySeq()))) class AsTensorTest(test.TestCase): def testAsTensorForTensorInput(self): t = constant_op.constant(10.0) x = ops.convert_to_tensor(t) self.assertIs(t, x) def testAsTensorForNonTensorInput(self): x = ops.convert_to_tensor(10.0) self.assertTrue(isinstance(x, ops.EagerTensor)) class ZerosTest(test.TestCase): def _Zeros(self, shape): ret = array_ops.zeros(shape) self.assertEqual(shape, ret.get_shape()) return ret.numpy() def testConst(self): self.assertTrue( np.array_equal(self._Zeros([2, 3]), np.array([[0] * 3] * 2))) def testScalar(self): self.assertEqual(0, self._Zeros([])) self.assertEqual(0, self._Zeros(())) scalar = array_ops.zeros(constant_op.constant([], dtype=dtypes_lib.int32)) self.assertEqual(0, scalar.numpy()) def testDynamicSizes(self): np_ans = np.array([[0] * 3] * 2) # Creates a tensor of 2 x 3. d = array_ops.fill([2, 3], 12., name="fill") # Constructs a tensor of zeros of the same dimensions as "d". z = array_ops.zeros(array_ops.shape(d)) out = z.numpy() self.assertAllEqual(np_ans, out) self.assertShapeEqual(np_ans, d) self.assertShapeEqual(np_ans, z) def testDtype(self): d = array_ops.fill([2, 3], 12., name="fill") self.assertEqual(d.get_shape(), [2, 3]) # Test default type for both constant size and dynamic size z = array_ops.zeros([2, 3]) self.assertEqual(z.dtype, dtypes_lib.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.numpy(), np.zeros([2, 3])) z = array_ops.zeros(array_ops.shape(d)) self.assertEqual(z.dtype, dtypes_lib.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.numpy(), np.zeros([2, 3])) # Test explicit type control for dtype in [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64, dtypes_lib.bool, # TODO(josh11b): Support string type here. # dtypes_lib.string ]: z = array_ops.zeros([2, 3], dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) z_value = z.numpy() self.assertFalse(np.any(z_value)) self.assertEqual((2, 3), z_value.shape) z = array_ops.zeros(array_ops.shape(d), dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) z_value = z.numpy() self.assertFalse(np.any(z_value)) self.assertEqual((2, 3), z_value.shape) class ZerosLikeTest(test.TestCase): def _compareZeros(self, dtype, use_gpu): # Creates a tensor of non-zero values with shape 2 x 3. # NOTE(kearnes): The default numpy dtype associated with tf.string is # np.object (and can't be changed without breaking a lot things), which # causes a TypeError in constant_op.constant below. Here we catch the # special case of tf.string and set the numpy dtype appropriately. if dtype == dtypes_lib.string: numpy_dtype = np.string_ else: numpy_dtype = dtype.as_numpy_dtype d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype) # Constructs a tensor of zeros of the same dimensions and type as "d". z_var = array_ops.zeros_like(d) # Test that the type is correct self.assertEqual(z_var.dtype, dtype) # Test that the shape is correct self.assertEqual([2, 3], z_var.get_shape()) # Test that the value is correct z_value = z_var.numpy() self.assertFalse(np.any(z_value)) self.assertEqual((2, 3), z_value.shape) @test_util.disable_tfrt("b/169112823: unsupported dtype for Op:ZerosLike.") def testZerosLikeCPU(self): for dtype in [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64, # TODO(josh11b): Support string type here. # dtypes_lib.string ]: self._compareZeros(dtype, use_gpu=False) @test_util.disable_tfrt("b/169112823: unsupported dtype for Op:ZerosLike.") def testZerosLikeGPU(self): for dtype in [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32, dtypes_lib.bool, dtypes_lib.int64, # TODO(josh11b): Support string type here. # dtypes_lib.string ]: self._compareZeros(dtype, use_gpu=True) @test_util.disable_tfrt("b/169112823: unsupported dtype for Op:ZerosLike.") def testZerosLikeDtype(self): # Make sure zeros_like works even for dtypes that cannot be cast between shape = (3, 5) dtypes = np.float32, np.complex64 for in_type in dtypes: x = np.arange(15).astype(in_type).reshape(*shape) for out_type in dtypes: y = array_ops.zeros_like(x, dtype=out_type).numpy() self.assertEqual(y.dtype, out_type) self.assertEqual(y.shape, shape) self.assertAllEqual(y, np.zeros(shape, dtype=out_type)) class OnesTest(test.TestCase): def _Ones(self, shape): ret = array_ops.ones(shape) self.assertEqual(shape, ret.get_shape()) return ret.numpy() def testConst(self): self.assertTrue(np.array_equal(self._Ones([2, 3]), np.array([[1] * 3] * 2))) def testScalar(self): self.assertEqual(1, self._Ones([])) self.assertEqual(1, self._Ones(())) scalar = array_ops.ones(constant_op.constant([], dtype=dtypes_lib.int32)) self.assertEqual(1, scalar.numpy()) def testDynamicSizes(self): np_ans = np.array([[1] * 3] * 2) # Creates a tensor of 2 x 3. d = array_ops.fill([2, 3], 12., name="fill") # Constructs a tensor of ones of the same dimensions as "d". z = array_ops.ones(array_ops.shape(d)) out = z.numpy() self.assertAllEqual(np_ans, out) self.assertShapeEqual(np_ans, d) self.assertShapeEqual(np_ans, z) def testDtype(self): d = array_ops.fill([2, 3], 12., name="fill") self.assertEqual(d.get_shape(), [2, 3]) # Test default type for both constant size and dynamic size z = array_ops.ones([2, 3]) self.assertEqual(z.dtype, dtypes_lib.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.numpy(), np.ones([2, 3])) z = array_ops.ones(array_ops.shape(d)) self.assertEqual(z.dtype, dtypes_lib.float32) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.numpy(), np.ones([2, 3])) # Test explicit type control for dtype in (dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64, dtypes_lib.bool): z = array_ops.ones([2, 3], dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.numpy(), np.ones([2, 3])) z = array_ops.ones(array_ops.shape(d), dtype=dtype) self.assertEqual(z.dtype, dtype) self.assertEqual([2, 3], z.get_shape()) self.assertAllEqual(z.numpy(), np.ones([2, 3])) class OnesLikeTest(test.TestCase): def testOnesLike(self): for dtype in [ dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int32, dtypes_lib.uint8, dtypes_lib.int16, dtypes_lib.int8, dtypes_lib.complex64, dtypes_lib.complex128, dtypes_lib.int64 ]: numpy_dtype = dtype.as_numpy_dtype # Creates a tensor of non-zero values with shape 2 x 3. d = constant_op.constant(np.ones((2, 3), dtype=numpy_dtype), dtype=dtype) # Constructs a tensor of zeros of the same dimensions and type as "d". z_var = array_ops.ones_like(d) # Test that the type is correct self.assertEqual(z_var.dtype, dtype) z_value = z_var.numpy() # Test that the value is correct self.assertTrue(np.array_equal(z_value, np.array([[1] * 3] * 2))) self.assertEqual([2, 3], z_var.get_shape()) class FillTest(test.TestCase): def _compare(self, dims, val, np_ans, use_gpu): ctx = context.context() device = "GPU:0" if (use_gpu and ctx.num_gpus()) else "CPU:0" with ops.device(device): tf_ans = array_ops.fill(dims, val, name="fill") out = tf_ans.numpy() self.assertAllClose(np_ans, out) def _compareAll(self, dims, val, np_ans): self._compare(dims, val, np_ans, False) self._compare(dims, val, np_ans, True) def testFillFloat(self): np_ans = np.array([[3.1415] * 3] * 2).astype(np.float32) self._compareAll([2, 3], np_ans[0][0], np_ans) def testFillDouble(self): np_ans = np.array([[3.1415] * 3] * 2).astype(np.float64) self._compareAll([2, 3], np_ans[0][0], np_ans) def testFillInt32(self): np_ans = np.array([[42] * 3] * 2).astype(np.int32) self._compareAll([2, 3], np_ans[0][0], np_ans) def testFillInt64(self): np_ans = np.array([[-42] * 3] * 2).astype(np.int64) self._compareAll([2, 3], np_ans[0][0], np_ans) def testFillComplex64(self): np_ans = np.array([[0.15] * 3] * 2).astype(np.complex64) self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False) def testFillComplex128(self): np_ans = np.array([[0.15] * 3] * 2).astype(np.complex128) self._compare([2, 3], np_ans[0][0], np_ans, use_gpu=False) def testFillString(self): np_ans = np.array([[b"yolo"] * 3] * 2) tf_ans = array_ops.fill([2, 3], np_ans[0][0], name="fill").numpy() self.assertAllEqual(np_ans, tf_ans) def testFillNegative(self): for shape in (-1,), (2, -1), (-1, 2), (-2), (-3): with self.assertRaises(errors_impl.InvalidArgumentError): array_ops.fill(shape, 7) def testShapeFunctionEdgeCases(self): # Non-vector dimensions. with self.assertRaises(errors_impl.InvalidArgumentError): array_ops.fill([[0, 1], [2, 3]], 1.0) # Non-scalar value. with self.assertRaises(errors_impl.InvalidArgumentError): array_ops.fill([3, 2], [1.0, 2.0]) if __name__ == "__main__": test.main()
apache-2.0
henzer/EMOTRIX
emotrix/Main.py
2
4069
# -*- coding: utf-8 -*- import matplotlib.pyplot as plt import matplotlib.animation as animation import time from RawData import RawData from HeadsetEmotiv import HeadsetEmotiv from emotrix import Emotrix from BlockData import BlockData from TimeBuffer import TimeBuffer from Bracelet import Bracelet import logging import time import Tkinter class Main(object): def __init__(self): #Creacion del objeto HeadsetEmotiv time = 5 self.buffer_emotion = TimeBuffer(time) self.e = Emotrix() self.e.training2() print "Iniciando la lectura desde Emotiv" self.he = HeadsetEmotiv(time) self.he.start() self.root = Tkinter.Tk() self.root.title('EMOTRIX') self.boton1 = Tkinter.Button(self.root, text="EMOTRIX", command=self.show_emotrix, width=50, height=5) self.boton2 = Tkinter.Button(self.root, text="EMOTIV", command=self.show_emotiv, width=50, height=5) self.boton3 = Tkinter.Button(self.root, text="PULSERA", command=self.show_pulsera, width=50, height=5) self.boton4 = Tkinter.Button(self.root, text="DETECTAR", command=self.show_emotion, width=50, height=5) self.boton1.grid(row=1, column=1) self.boton2.grid(row=2, column=1) self.boton3.grid(row=3, column=1) self.boton4.grid(row=4, column=1) self.root.mainloop() def show_emotiv(self): self.fig = plt.figure() self.ax1 = self.fig.add_subplot(1, 1, 1) ani = animation.FuncAnimation(self.fig, self.animate, interval=10) plt.show() def show_emotrix(self): self.fig = plt.figure() self.ax1 = self.fig.add_subplot(1, 1, 1) ani = animation.FuncAnimation(self.fig, self.animate, interval=10) plt.show() def show_pulsera(self): print "Iniciando Matias" puerto = 'COM3' self.bracelet = Bracelet(logging.INFO) try: self.bracelet.connect(puerto, 115200) except Exception, e: raise e self.bracelet.startReading(persist_data=False) self.fig = plt.figure() self.ax1 = self.fig.add_subplot(1, 1, 1) ani = animation.FuncAnimation(self.fig, self.animate_pulsera, interval=10) plt.show() def show_emotion(self): self.fig = plt.figure() self.ax1 = self.fig.add_subplot(1, 1, 1) ani = animation.FuncAnimation(self.fig, self.animate_emotion, interval=10) plt.show() def animate_emotion(self, i): b = BlockData(f3=self.he.f3.getAll(), f4=self.he.f4.getAll(), af3=self.he.af3.getAll(), af4=self.he.af4.getAll()) result = self.e.detect_emotion(b) print result if result[0]=='HAPPY': self.buffer_emotion.insert(-50) elif result[0]=='SAD': self.buffer_emotion.insert(50) else: self.buffer_emotion.insert(0) self.ax1.clear() self.ax1.set_title("Deteccion de emociones"); self.ax1.set_xlabel('Tiempo'); self.ax1.set_ylabel('Emociones'); self.ax1.text(3,55,"Felicidad", fontsize=15) self.ax1.text(3, 0, "Neutral", fontsize=15) self.ax1.text(3, -60, "Tristeza", fontsize=15) self.ax1.set_ylim([-100, 100]) self.ax1.plot(self.buffer_emotion.getAll(), marker="o") def animate(self, i): self.ax1.clear() self.ax1.set_title("Electrodos F3, F4, AF3, AF4"); self.ax1.set_xlabel('Tiempo') self.ax1.set_ylabel('Amplitud') self.ax1.plot(self.he.f3.getAll(), "r") self.ax1.plot(self.he.f4.getAll(), "b") self.ax1.plot(self.he.af3.getAll(), "y") self.ax1.plot(self.he.af4.getAll(), "k") def animate_pulsera(self, i): self.ax1.clear() self.ax1.set_title("Brazalete"); self.ax1.set_xlabel('Tiempo'); self.ax1.set_ylabel('Amplitud'); self.ax1.set_ylim([300, 800]); self.ax1.plot(self.bracelet.device_buffer.getAll(), "r") def stop(self): plt.close() main = Main() # main.show()
bsd-2-clause
Sentient07/scikit-learn
sklearn/pipeline.py
13
30670
""" The :mod:`sklearn.pipeline` module implements utilities to build a composite estimator, as a chain of transforms and estimators. """ # Author: Edouard Duchesnay # Gael Varoquaux # Virgile Fritsch # Alexandre Gramfort # Lars Buitinck # License: BSD from collections import defaultdict from abc import ABCMeta, abstractmethod import numpy as np from scipy import sparse from .base import clone, BaseEstimator, TransformerMixin from .externals.joblib import Parallel, delayed, Memory from .externals import six from .utils import tosequence from .utils.metaestimators import if_delegate_has_method __all__ = ['Pipeline', 'FeatureUnion'] class _BasePipeline(six.with_metaclass(ABCMeta, BaseEstimator)): """Handles parameter management for classifiers composed of named steps. """ @abstractmethod def __init__(self): pass def _replace_step(self, steps_attr, name, new_val): # assumes `name` is a valid step name new_steps = getattr(self, steps_attr)[:] for i, (step_name, _) in enumerate(new_steps): if step_name == name: new_steps[i] = (name, new_val) break setattr(self, steps_attr, new_steps) def _get_params(self, steps_attr, deep=True): out = super(_BasePipeline, self).get_params(deep=False) if not deep: return out steps = getattr(self, steps_attr) out.update(steps) for name, estimator in steps: if estimator is None: continue for key, value in six.iteritems(estimator.get_params(deep=True)): out['%s__%s' % (name, key)] = value return out def _set_params(self, steps_attr, **params): # Ensure strict ordering of parameter setting: # 1. All steps if steps_attr in params: setattr(self, steps_attr, params.pop(steps_attr)) # 2. Step replacement step_names, _ = zip(*getattr(self, steps_attr)) for name in list(six.iterkeys(params)): if '__' not in name and name in step_names: self._replace_step(steps_attr, name, params.pop(name)) # 3. Step parameters and other initilisation arguments super(_BasePipeline, self).set_params(**params) return self def _validate_names(self, names): if len(set(names)) != len(names): raise ValueError('Names provided are not unique: ' '{0!r}'.format(list(names))) invalid_names = set(names).intersection(self.get_params(deep=False)) if invalid_names: raise ValueError('Step names conflict with constructor arguments: ' '{0!r}'.format(sorted(invalid_names))) invalid_names = [name for name in names if '__' in name] if invalid_names: raise ValueError('Step names must not contain __: got ' '{0!r}'.format(invalid_names)) class Pipeline(_BasePipeline): """Pipeline of transforms with a final estimator. Sequentially apply a list of transforms and a final estimator. Intermediate steps of the pipeline must be 'transforms', that is, they must implement fit and transform methods. The final estimator only needs to implement fit. The transformers in the pipeline can be cached using ```memory`` argument. The purpose of the pipeline is to assemble several steps that can be cross-validated together while setting different parameters. For this, it enables setting parameters of the various steps using their names and the parameter name separated by a '__', as in the example below. A step's estimator may be replaced entirely by setting the parameter with its name to another estimator, or a transformer removed by setting to None. Read more in the :ref:`User Guide <pipeline>`. Parameters ---------- steps : list List of (name, transform) tuples (implementing fit/transform) that are chained, in the order in which they are chained, with the last object an estimator. memory : Instance of joblib.Memory or string, optional (default=None) Used to caching the fitted transformers of the transformer of the pipeline. By default, no cache is performed. If a string is given, it is the path to the caching directory. Enabling caching triggers a clone of the transformers before fitting. Therefore, the transformer instance given to the pipeline cannot be inspected directly. Use the attribute ``named_steps`` or ``steps`` to inspect estimators within the pipeline. Caching the transformers is advantageous when fitting is time consuming. Attributes ---------- named_steps : dict Read-only attribute to access any step parameter by user given name. Keys are step names and values are steps parameters. Examples -------- >>> from sklearn import svm >>> from sklearn.datasets import samples_generator >>> from sklearn.feature_selection import SelectKBest >>> from sklearn.feature_selection import f_regression >>> from sklearn.pipeline import Pipeline >>> # generate some data to play with >>> X, y = samples_generator.make_classification( ... n_informative=5, n_redundant=0, random_state=42) >>> # ANOVA SVM-C >>> anova_filter = SelectKBest(f_regression, k=5) >>> clf = svm.SVC(kernel='linear') >>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)]) >>> # You can set the parameters using the names issued >>> # For instance, fit using a k of 10 in the SelectKBest >>> # and a parameter 'C' of the svm >>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Pipeline(memory=None, steps=[('anova', SelectKBest(...)), ('svc', SVC(...))]) >>> prediction = anova_svm.predict(X) >>> anova_svm.score(X, y) # doctest: +ELLIPSIS 0.829... >>> # getting the selected features chosen by anova_filter >>> anova_svm.named_steps['anova'].get_support() ... # doctest: +NORMALIZE_WHITESPACE array([False, False, True, True, False, False, True, True, False, True, False, True, True, False, True, False, True, True, False, False], dtype=bool) """ # BaseEstimator interface def __init__(self, steps, memory=None): # shallow copy of steps self.steps = tosequence(steps) self._validate_steps() self.memory = memory def get_params(self, deep=True): """Get parameters for this estimator. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ return self._get_params('steps', deep=deep) def set_params(self, **kwargs): """Set the parameters of this estimator. Valid parameter keys can be listed with ``get_params()``. Returns ------- self """ self._set_params('steps', **kwargs) return self def _validate_steps(self): names, estimators = zip(*self.steps) # validate names self._validate_names(names) # validate estimators transformers = estimators[:-1] estimator = estimators[-1] for t in transformers: if t is None: continue if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(t, "transform")): raise TypeError("All intermediate steps should be " "transformers and implement fit and transform." " '%s' (type %s) doesn't" % (t, type(t))) # We allow last estimator to be None as an identity transformation if estimator is not None and not hasattr(estimator, "fit"): raise TypeError("Last step of Pipeline should implement fit. " "'%s' (type %s) doesn't" % (estimator, type(estimator))) @property def _estimator_type(self): return self.steps[-1][1]._estimator_type @property def named_steps(self): return dict(self.steps) @property def _final_estimator(self): return self.steps[-1][1] # Estimator interface def _fit(self, X, y=None, **fit_params): self._validate_steps() # Setup the memory memory = self.memory if memory is None: memory = Memory(cachedir=None, verbose=0) elif isinstance(memory, six.string_types): memory = Memory(cachedir=memory, verbose=0) elif not isinstance(memory, Memory): raise ValueError("'memory' should either be a string or" " a joblib.Memory instance, got" " 'memory={!r}' instead.".format(memory)) fit_transform_one_cached = memory.cache(_fit_transform_one) fit_params_steps = dict((name, {}) for name, step in self.steps if step is not None) for pname, pval in six.iteritems(fit_params): step, param = pname.split('__', 1) fit_params_steps[step][param] = pval Xt = X for step_idx, (name, transformer) in enumerate(self.steps[:-1]): if transformer is None: pass else: if memory.cachedir is None: # we do not clone when caching is disabled to preserve # backward compatibility cloned_transformer = transformer else: cloned_transformer = clone(transformer) # Fit or load from cache the current transfomer Xt, fitted_transformer = fit_transform_one_cached( cloned_transformer, None, Xt, y, **fit_params_steps[name]) # Replace the transformer of the step with the fitted # transformer. This is necessary when loading the transformer # from the cache. self.steps[step_idx] = (name, fitted_transformer) if self._final_estimator is None: return Xt, {} return Xt, fit_params_steps[self.steps[-1][0]] def fit(self, X, y=None, **fit_params): """Fit the model Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. Returns ------- self : Pipeline This estimator """ Xt, fit_params = self._fit(X, y, **fit_params) if self._final_estimator is not None: self._final_estimator.fit(Xt, y, **fit_params) return self def fit_transform(self, X, y=None, **fit_params): """Fit the model and transform with the final estimator Fits all the transforms one after the other and transforms the data, then uses fit_transform on transformed data with the final estimator. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. Returns ------- Xt : array-like, shape = [n_samples, n_transformed_features] Transformed samples """ last_step = self._final_estimator Xt, fit_params = self._fit(X, y, **fit_params) if hasattr(last_step, 'fit_transform'): return last_step.fit_transform(Xt, y, **fit_params) elif last_step is None: return Xt else: return last_step.fit(Xt, y, **fit_params).transform(Xt) @if_delegate_has_method(delegate='_final_estimator') def predict(self, X): """Apply transforms to the data, and predict with the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_pred : array-like """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) return self.steps[-1][-1].predict(Xt) @if_delegate_has_method(delegate='_final_estimator') def fit_predict(self, X, y=None, **fit_params): """Applies fit_predict of last step in pipeline after transforms. Applies fit_transforms of a pipeline to the data, followed by the fit_predict method of the final estimator in the pipeline. Valid only if the final estimator implements fit_predict. Parameters ---------- X : iterable Training data. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Training targets. Must fulfill label requirements for all steps of the pipeline. **fit_params : dict of string -> object Parameters passed to the ``fit`` method of each step, where each parameter name is prefixed such that parameter ``p`` for step ``s`` has key ``s__p``. Returns ------- y_pred : array-like """ Xt, fit_params = self._fit(X, y, **fit_params) return self.steps[-1][-1].fit_predict(Xt, y, **fit_params) @if_delegate_has_method(delegate='_final_estimator') def predict_proba(self, X): """Apply transforms, and predict_proba of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_proba : array-like, shape = [n_samples, n_classes] """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_proba(Xt) @if_delegate_has_method(delegate='_final_estimator') def decision_function(self, X): """Apply transforms, and decision_function of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_score : array-like, shape = [n_samples, n_classes] """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) return self.steps[-1][-1].decision_function(Xt) @if_delegate_has_method(delegate='_final_estimator') def predict_log_proba(self, X): """Apply transforms, and predict_log_proba of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_score : array-like, shape = [n_samples, n_classes] """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) return self.steps[-1][-1].predict_log_proba(Xt) @property def transform(self): """Apply transforms, and transform with the final estimator This also works where final estimator is ``None``: all prior transformations are applied. Parameters ---------- X : iterable Data to transform. Must fulfill input requirements of first step of the pipeline. Returns ------- Xt : array-like, shape = [n_samples, n_transformed_features] """ # _final_estimator is None or has transform, otherwise attribute error if self._final_estimator is not None: self._final_estimator.transform return self._transform def _transform(self, X): Xt = X for name, transform in self.steps: if transform is not None: Xt = transform.transform(Xt) return Xt @property def inverse_transform(self): """Apply inverse transformations in reverse order All estimators in the pipeline must support ``inverse_transform``. Parameters ---------- Xt : array-like, shape = [n_samples, n_transformed_features] Data samples, where ``n_samples`` is the number of samples and ``n_features`` is the number of features. Must fulfill input requirements of last step of pipeline's ``inverse_transform`` method. Returns ------- Xt : array-like, shape = [n_samples, n_features] """ # raise AttributeError if necessary for hasattr behaviour for name, transform in self.steps: if transform is not None: transform.inverse_transform return self._inverse_transform def _inverse_transform(self, X): Xt = X for name, transform in self.steps[::-1]: if transform is not None: Xt = transform.inverse_transform(Xt) return Xt @if_delegate_has_method(delegate='_final_estimator') def score(self, X, y=None, sample_weight=None): """Apply transforms, and score with the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. y : iterable, default=None Targets used for scoring. Must fulfill label requirements for all steps of the pipeline. sample_weight : array-like, default=None If not None, this argument is passed as ``sample_weight`` keyword argument to the ``score`` method of the final estimator. Returns ------- score : float """ Xt = X for name, transform in self.steps[:-1]: if transform is not None: Xt = transform.transform(Xt) score_params = {} if sample_weight is not None: score_params['sample_weight'] = sample_weight return self.steps[-1][-1].score(Xt, y, **score_params) @property def classes_(self): return self.steps[-1][-1].classes_ @property def _pairwise(self): # check if first estimator expects pairwise input return getattr(self.steps[0][1], '_pairwise', False) def _name_estimators(estimators): """Generate names for estimators.""" names = [type(estimator).__name__.lower() for estimator in estimators] namecount = defaultdict(int) for est, name in zip(estimators, names): namecount[name] += 1 for k, v in list(six.iteritems(namecount)): if v == 1: del namecount[k] for i in reversed(range(len(estimators))): name = names[i] if name in namecount: names[i] += "-%d" % namecount[name] namecount[name] -= 1 return list(zip(names, estimators)) def make_pipeline(*steps): """Construct a Pipeline from the given estimators. This is a shorthand for the Pipeline constructor; it does not require, and does not permit, naming the estimators. Instead, their names will be set to the lowercase of their types automatically. Examples -------- >>> from sklearn.naive_bayes import GaussianNB >>> from sklearn.preprocessing import StandardScaler >>> make_pipeline(StandardScaler(), GaussianNB(priors=None)) ... # doctest: +NORMALIZE_WHITESPACE Pipeline(memory=None, steps=[('standardscaler', StandardScaler(copy=True, with_mean=True, with_std=True)), ('gaussiannb', GaussianNB(priors=None))]) Returns ------- p : Pipeline """ return Pipeline(_name_estimators(steps)) def _fit_one_transformer(transformer, X, y): return transformer.fit(X, y) def _transform_one(transformer, weight, X): res = transformer.transform(X) # if we have a weight for this transformer, multiply output if weight is None: return res return res * weight def _fit_transform_one(transformer, weight, X, y, **fit_params): if hasattr(transformer, 'fit_transform'): res = transformer.fit_transform(X, y, **fit_params) else: res = transformer.fit(X, y, **fit_params).transform(X) # if we have a weight for this transformer, multiply output if weight is None: return res, transformer return res * weight, transformer class FeatureUnion(_BasePipeline, TransformerMixin): """Concatenates results of multiple transformer objects. This estimator applies a list of transformer objects in parallel to the input data, then concatenates the results. This is useful to combine several feature extraction mechanisms into a single transformer. Parameters of the transformers may be set using its name and the parameter name separated by a '__'. A transformer may be replaced entirely by setting the parameter with its name to another transformer, or removed by setting to ``None``. Read more in the :ref:`User Guide <feature_union>`. Parameters ---------- transformer_list : list of (string, transformer) tuples List of transformer objects to be applied to the data. The first half of each tuple is the name of the transformer. n_jobs : int, optional Number of jobs to run in parallel (default 1). transformer_weights : dict, optional Multiplicative weights for features per transformer. Keys are transformer names, values the weights. """ def __init__(self, transformer_list, n_jobs=1, transformer_weights=None): self.transformer_list = tosequence(transformer_list) self.n_jobs = n_jobs self.transformer_weights = transformer_weights self._validate_transformers() def get_params(self, deep=True): """Get parameters for this estimator. Parameters ---------- deep : boolean, optional If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : mapping of string to any Parameter names mapped to their values. """ return self._get_params('transformer_list', deep=deep) def set_params(self, **kwargs): """Set the parameters of this estimator. Valid parameter keys can be listed with ``get_params()``. Returns ------- self """ self._set_params('transformer_list', **kwargs) return self def _validate_transformers(self): names, transformers = zip(*self.transformer_list) # validate names self._validate_names(names) # validate estimators for t in transformers: if t is None: continue if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(t, "transform")): raise TypeError("All estimators should implement fit and " "transform. '%s' (type %s) doesn't" % (t, type(t))) def _iter(self): """Generate (name, est, weight) tuples excluding None transformers """ get_weight = (self.transformer_weights or {}).get return ((name, trans, get_weight(name)) for name, trans in self.transformer_list if trans is not None) def get_feature_names(self): """Get feature names from all transformers. Returns ------- feature_names : list of strings Names of the features produced by transform. """ feature_names = [] for name, trans, weight in self._iter(): if not hasattr(trans, 'get_feature_names'): raise AttributeError("Transformer %s (type %s) does not " "provide get_feature_names." % (str(name), type(trans).__name__)) feature_names.extend([name + "__" + f for f in trans.get_feature_names()]) return feature_names def fit(self, X, y=None): """Fit all transformers using X. Parameters ---------- X : iterable or array-like, depending on transformers Input data, used to fit transformers. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- self : FeatureUnion This estimator """ self._validate_transformers() transformers = Parallel(n_jobs=self.n_jobs)( delayed(_fit_one_transformer)(trans, X, y) for _, trans, _ in self._iter()) self._update_transformer_list(transformers) return self def fit_transform(self, X, y=None, **fit_params): """Fit all transformers, transform the data and concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. y : array-like, shape (n_samples, ...), optional Targets for supervised learning. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ self._validate_transformers() result = Parallel(n_jobs=self.n_jobs)( delayed(_fit_transform_one)(trans, weight, X, y, **fit_params) for name, trans, weight in self._iter()) if not result: # All transformers are None return np.zeros((X.shape[0], 0)) Xs, transformers = zip(*result) self._update_transformer_list(transformers) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def transform(self, X): """Transform X separately by each transformer, concatenate results. Parameters ---------- X : iterable or array-like, depending on transformers Input data to be transformed. Returns ------- X_t : array-like or sparse matrix, shape (n_samples, sum_n_components) hstack of results of transformers. sum_n_components is the sum of n_components (output dimension) over transformers. """ Xs = Parallel(n_jobs=self.n_jobs)( delayed(_transform_one)(trans, weight, X) for name, trans, weight in self._iter()) if not Xs: # All transformers are None return np.zeros((X.shape[0], 0)) if any(sparse.issparse(f) for f in Xs): Xs = sparse.hstack(Xs).tocsr() else: Xs = np.hstack(Xs) return Xs def _update_transformer_list(self, transformers): transformers = iter(transformers) self.transformer_list[:] = [ (name, None if old is None else next(transformers)) for name, old in self.transformer_list ] def make_union(*transformers, **kwargs): """Construct a FeatureUnion from the given transformers. This is a shorthand for the FeatureUnion constructor; it does not require, and does not permit, naming the transformers. Instead, they will be given names automatically based on their types. It also does not allow weighting. Parameters ---------- *transformers : list of estimators n_jobs : int, optional Number of jobs to run in parallel (default 1). Returns ------- f : FeatureUnion Examples -------- >>> from sklearn.decomposition import PCA, TruncatedSVD >>> from sklearn.pipeline import make_union >>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE FeatureUnion(n_jobs=1, transformer_list=[('pca', PCA(copy=True, iterated_power='auto', n_components=None, random_state=None, svd_solver='auto', tol=0.0, whiten=False)), ('truncatedsvd', TruncatedSVD(algorithm='randomized', n_components=2, n_iter=5, random_state=None, tol=0.0))], transformer_weights=None) """ n_jobs = kwargs.pop('n_jobs', 1) if kwargs: # We do not currently support `transformer_weights` as we may want to # change its type spec in make_union raise TypeError('Unknown keyword arguments: "{}"' .format(list(kwargs.keys())[0])) return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs)
bsd-3-clause
Afanc/parazite
src/main.py
1
43894
# -*- coding: utf-8 -*- #!/usr/bin/env python from kivy.app import App from kivy.clock import Clock from kivy.properties import NumericProperty, ReferenceListProperty, ListProperty from kivy.vector import Vector from kivy.uix.widget import Widget from kivy.core.window import Window from kivy.uix.label import Label from kivy.uix.button import Button from kivy.graphics import * from kivy.app import App from functools import partial import matplotlib.pyplot as plt import csv import time import os import sys import shutil from quadtree import Quadtree from collision import * from parazite1 import * from healthy import * from trade_off import trade_off from CHANGING_CONST import * Window.size = (1000, 800) balls_dictionnary = {} #id:[widget_ball,individual, position] # pour les individus vivants strain_dictionary = {} #{souche:[vir,transmission, guérison][liste des infectés]} contient toutes les souches qui ont existé dico_of_strains_for_csv = {} #{souche:variable.writerow()} list_of_healthies = [] #liste des individus sains vivants list_of_parazites = [] #liste des parasites vivants if os.path.isfile('csv_nb_total_sains_infectes') and 'y' != raw_input("le fichier csv_nb_total_sains_infectes existe déjà, le remplaçer? (y/n)"): exit() #écrit les headers dans le dossier else: with open("nb_total_sains_infectes.csv","w") as csv_nb_total_sains_infectes: #creer le fichier des données générales de la population writer = csv.writer(csv_nb_total_sains_infectes) writer.writerow(["temps","population totale","individus sains","individus infectés","pourcentage de la population infectée", "virulence moyenne","taux de guérison","taux de transmission"]) #header des données générales de la pop if os.path.isdir('data') and 'y' != raw_input("le dossier data existe déja, le remplaçer?"):exit() else: shutil.rmtree('data') os.mkdir('data') def create_id(): '''crée un nouvel id pour chaque nouveau parasite''' global compteur_id # on appelle le compteur_id qui permet de créer des ID nouveaux idd = "ID" + str(compteur_id) # les ID sont un string qui composés d'ID + un numéro unique compteur_id += 1 # on incrémente le compteur global return idd # la fonction retourne l'idd def add_one_healthy() : '''ajoute une individu sain''' try: temp = create_id() # on commence par lui créer un ID list_of_healthies.append(Healthy(temp)) # on l'ajoute à la liste des individus vivants return list_of_healthies[-1] # la fonction add_one_healthy retourne l'individu crée except: print "could not add health" # imprime un message si il n'arrive pas à le créer def add_one_parazite(effect = None) : '''ajoute un parasite, peut prendre un effet 'effect' en argument''' try: temp_id = create_id() #commence par créer un ID if effect != None and TRADE_OFF == 'leo': #si un effet est donné en argument ... effect = float(effect) attribute = trade_off(effect_arg = effect) #on utillise la fonction trade_off pour temp_vir = attribute[0] #créer une virulence temp_trans = attribute[1] #crée un taux de transmision temp_recov = attribute[2]#crée un taux de guérison elif TRADE_OFF == 'leo' : #si rien n'est donné en argument et que la méthode de trade_off est "léo" attribute = trade_off() #on utilise trade off sans spécifier d'effet temp_vir = attribute[0] # pour créer une virulance, temp_trans = attribute[1] #un taux de transmision, temp_recov = attribute[2] # un taux de guérison. elif TRADE_OFF == 'dada': # Enfin, si la méthode de trade_off est 'dada', on crée temp_vir = uniform(0,1) #une virulance, temp_trans = uniform(0,1) # un taux de transmission, temp_recov = uniform(0,1) # une probabilité de guérison. norm = BASE_FITNESS/(temp_vir + temp_trans + temp_recov) # On crée une variable pour les normaliser temp_vir *= norm # et on normalise les trois paramètre. temp_trans *= norm temp_recov *= norm list_of_parazites.append(Parazite(temp_vir, temp_trans, temp_recov, temp_id)) #on ajoute le parasite à la liste temp_strain = list(list_of_parazites[-1].getStrain()) # la souche est pour l'instant "[]" par défaut temp_strain.append(list_of_parazites[-1].getIdd()) # on lui ajoute l'ID dans la liste temp_strain = str('Souche-' + temp_strain[0][2:])# On modifie la variable temporaire pour que les souches et les ID soient bien distincts list_of_parazites[-1].setStrain(temp_strain) #On modifie la souche du parasite pour la souche temporaire strain_dictionary[temp_strain] = [[temp_vir, temp_trans, temp_recov],[str(temp_id)]] #et on stocke la souche et l'individu infecté dans le dico des souches New_strain_in_csv(temp_strain, temp_vir, temp_trans, temp_recov) return list_of_parazites[-1] # la fonction retourne l'individu crée except: print "could not add parazite: problem" #messsge d'erreur def kill(root,p): '''tue un individu, prend un individu 'p' en argument''' if not isinstance(p, Individual): # vérifie que l'instance donnée en argument soit un individu print "%s doit être un individu pour être tué" % str(p) # sinon imprime un message d'erreur return # et ne retourne rien elif isinstance(p, Healthy): # si c'est un healthy.... list_of_healthies.remove(p) #il faut le retirer de la bonne liste elif isinstance(p, Parazite): # si c'est un parasite list_of_parazites.remove(p) # il faut le retirer de la bonne liste root.remove_widget(balls_dictionnary[p.getIdd()][0]) #puis on enlève la widget (gui) del balls_dictionnary[p.getIdd()][0] #puis on gère le dico des balles, on tue l'objet del balls_dictionnary[p.getIdd()] #on tue l'entrée dans le dico del p #et enfin on tue l'objet def reproduce(root,p): '''duplique un individu, prend un individu 'p' en argument''' ball = Ball() # crée une balle et la stocke dans la variable ball x = uniform(0,1) # crée une nombre aléatoire entre zéro et un.... ball.center = (balls_dictionnary[p.getIdd()][0].center[0] + x, balls_dictionnary[p.getIdd()][0].center[1] + (1-x)) # pour le placement de la boule ball.velocity = balls_dictionnary[p.getIdd()][0].velocity root.add_widget(ball) #ajoute la balle au container healthy = add_one_healthy() balls_dictionnary[healthy.getIdd()] = [ball, healthy, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] if isinstance(p, Parazite): # si son parent est parasité infect_him(p, balls_dictionnary[healthy.getIdd()][1], parazites_reproducing=True) #l'enfant est parasité par la même souche à la naissance... random_mutation_on(balls_dictionnary[list_of_parazites[-1].getIdd()][1], 'reproduction') #si il n'y a pas de mutation try : if uniform(0,1) > TRANSMISSION_RESISTANCE: #permet de lancer le programme avec ou sans le passage des résistance for i in p.getResistances() : # Chaque resistance du parent list_of_healthies[-1].addResistance(i) # est passée au jeune except : print "\n _________________________________________________\nerreur dans reproduce !\n______________________________________________\n" if isinstance(p, Healthy): #si healthy if uniform(0,1) < TRANSMISSION_RESISTANCE: # si le progrmme est lancé avec GENERATION_RESISTANCE = 1 for i in p.getResistances(): # chaque resistance du parent list_of_healthies[-1].addResistance(i) #est passée au jeune def guerison(p): '''gueris un parasite, prend un parasite 'p' en argument ''' try: list_of_healthies.append(Healthy(p.getIdd())) if uniform(0,1) < RESISTANCE_AFTER_RECOVERY: for i in p.getResistances() : list_of_healthies[-1].addResistance(i) list_of_healthies[-1].addResistance(p.getStrain()) list_of_parazites.remove(p) balls_dictionnary[p.getIdd()][1] = list_of_healthies[-1] balls_dictionnary[p.getIdd()][0].set_col(BASE_COLOR) except : print "problème dans guérison" def cure_the_lucky_ones(dt) : '''On parcourt la liste de tous les parasites. A chaque itération on tire un nombre au hasard entre 0 et 1 et on le compare à la constante BASE_CHANCE_OF_HEALING * la probabilité de guérison spécifique au parasite en question. Si le nombre tiré est inférieur on transforme le parasite en individu sain en appelant la fonction guérison. ''' if TRADE_OFF == 'dada': for i in iter(list_of_parazites): #Parcours la liste des parasites. if uniform(0,1) < BASE_CHANCE_OF_HEALING *(1-i.getRecovProb()) : # Si la probabilité de guérison de base * 1 + la probabilité de guérison spécifique au parasite est plus grande qu'un nombre au hasard entre 0 et 1 guerison(i) #l'individu est guéri. !si le taux de guérison vaut 1, plus de chances de guérir. elif TRADE_OFF == 'leo' : for i in iter(list_of_parazites): #Parcours la liste des parasites. if uniform(0,1) < BASE_CHANCE_OF_HEALING *(1+i.getRecovProb()) : # Si la probabilité de guérison de base * 1 + la probabilité de guérison spécifique au parasite est plus grande qu'un nombre au hasard entre 0 et 1 guerison(i) #l'individu est guéri. !si le taux de guérison vaut 1, plus de chances de guérir. def mutate_those_who_wish(dt) : '''On parcourt la liste des parasites. Si la probabilité de mutation spontanée) de base (CHANCE_OF_MUTATION_ON_NOTHING est plus grande qu'un nombre au hasard entre 0 et 1, on utilise la fonction random_mutation_on pour faire muter un parasite, avec l'argument what = living ''' for i in iter(list_of_parazites) : #Parcours la liste des parasites. if uniform(0,1) < CHANCE_OF_MUTATION_ON_NOTHING : # Si la probabilité de mutation spontanée de base est plus grande qu'un nombre au hasard entre 0 et 1 random_mutation_on(i,'living') # on utilise la fonction pour faire muter un parasites, avec l'argument what = 'living' def kill_those_who_have_to_die(root,dt) : '''On parcourt la liste des parasites et des individus sains, on compare une valeur tirée au hasard avec la constante DYING_PROB qui représente leur risque de mourir à chaque instant, si la valeur tirée est inférieure l’individu meurt. Les parasites ont plus de risque de mourir qui dépend de leur virulence car la valeur DYING_PROB est multiplié par la valeur de virulence du parasite et qui dépend de chaque souche. ''' for i in list_of_healthies: if uniform(0,1) < DYING_PROB : #! RecovProb = 1 --> aucune chance de recover kill(root,i) for i in list_of_parazites: if uniform(0,1) < DYING_PROB*(1 + balls_dictionnary[i.getIdd()][1].getVir()) : #! RecovProb = 1 --> aucune chance de recover kill(root,i) def reproduce_those_who_have_to(root,dt) : '''On parcourt la liste de tous les individus sains, puis celle des parasites. Pour chaque individu on tire une valeur au hasard et on compare avec la constante REPRODUCTION_PROB, si la valeur tirée au hasard est inférieure à la constante, on appelle la fonction reproduce et l’individu se reproduit. ''' for i in list_of_healthies: if uniform(0,1) < REPRODUCTION_PROB : #! RecovProb = 1 --> aucune chance de recover reproduce(root, i) for i in list_of_parazites: if uniform(0,1) < REPRODUCTION_PROB : #! RecovProb = 1 --> aucune chance de recover reproduce(root,i) def random_mutation_on(para_i, what) : '''Cette fonction applique la mutation selon le cas donné en argument, 'infection', 'reproduction' ou 'living' et selon la façon de calculer les trades-off 'leo' ou 'dada' ''' chance = 0 fit_change = 0 if what == 'infection' : chance = CHANCE_OF_MUTATION_ON_INFECTION fit_change = MAX_FITNESS_CHANGE_ON_INFECTION elif what == 'reproduction' : chance = CHANCE_OF_MUTATION_ON_REPRODUCTION fit_change = MAX_FITNESS_CHANGE_ON_REPRODUCTION elif what == 'living' : chance = CHANCE_OF_MUTATION_ON_NOTHING fit_change = MAX_FITNESS_CHANGE_ON_NOTHING if uniform(0,1) < chance: #prob. de mutation if TRADE_OFF == 'dada': old_attributes = [para_i.getVir(), para_i.getTransmRate(), para_i.getRecovProb()] attribute_functions = {'0':para_i.set_New_Vir, '1':para_i.set_New_TransmRate, '2':para_i.set_New_RecovProb, '3': para_i.setStrain([])} sign = randint(0,1)*2-1 rand_mod = sign*(uniform(0, fit_change)) #modificateur valant au max 1+0.2 (p. ex) rand_index = randint(0,2) new_value = max(min(old_attributes[rand_index] * (1+rand_mod), 1),0) #new attribute = 1.2*old attribute (au max) attribute_functions[str(rand_index)](new_value) #on appelle la fonction correspondante elif TRADE_OFF == 'leo': trade_off(para_i) if what == 'living': temp_idd = para_i.getIdd() + 'mut' balls_dictionnary[temp_idd] = balls_dictionnary[para_i.getIdd()] del balls_dictionnary[para_i.getIdd()] para_i.setIdd(temp_idd) #nouvelle souche new_strain = para_i.getIdd() new_strain = 'Souche-' + new_strain[2:] para_i.setStrain(new_strain) strain_dictionary[new_strain] = [[para_i.getVir(), para_i.getTransmRate(), para_i.getRecovProb],[para_i.getIdd()]] New_strain_in_csv(new_strain,para_i.getVir(),para_i.getTransmRate(), para_i.getRecovProb()) #ajoute la souche au csv x = randint(0,2) random_color = list(balls_dictionnary[list_of_parazites[-1].getIdd()][0].get_col()) random_color[x] = max(min(uniform(-1,1)+random_color[x], 1),0) balls_dictionnary[list_of_parazites[-1].getIdd()][0].set_col(tuple(random_color)) else: if what == 'infection': #ça marche parce qu'on ne teste plus parasite reproducting strain_dictionary[para_i.getStrain()][1].append(para_i.getIdd()) def infect_him(para_i,heal_i, parazites_reproducing=False) : resistant = False testing_par = para_i.getStrain() if testing_par in heal_i.getResistances() : resistant = True if not resistant : temp_par = list(para_i.getPar()) temp_par.append(para_i.getIdd()) temp_strain = para_i.getStrain() list_of_parazites.append(Parazite(para_i.getVir(), para_i.getTransmRate(), para_i.getRecovProb(), heal_i.getIdd(), temp_par, temp_strain)) for i in heal_i.getResistances() : list_of_parazites[-1].addResistance(i) list_of_healthies.remove(heal_i) balls_dictionnary[heal_i.getIdd()][1] = list_of_parazites[-1] balls_dictionnary[list_of_parazites[-1].getIdd()][0].set_col(balls_dictionnary[para_i.getIdd()][0].get_col()) if random_mutation_on(list_of_parazites[-1], 'infection') : x = randint(0,2) random_color = list(balls_dictionnary[list_of_parazites[-1].getIdd()][0].get_col()) random_color[x] = min(uniform(0,1)*uniform(0,1), 1) balls_dictionnary[list_of_parazites[-1].getIdd()][0].set_col(tuple(random_color)) def parazite_against_parazite(p1,p2) : if round(p1.getVir(), 2) > round(p2.getVir(),2) : if uniform(0,1) < INFECTION_CHANCE *(1+p1.getTransmRate()) : p2.setVir(p1.getVir()) p2.setTransmRate(p1.getTransmRate()) p2.setRecovProb(p1.getRecovProb()) balls_dictionnary[p2.getIdd()][0].set_col(balls_dictionnary[p1.getIdd()][0].get_col()) p2.setStrain(p1.getStrain()) strain_dictionary[p1.getStrain()][1].append(p2.getIdd()) elif p2.getVir() > p1.getVir() : parazite_against_parazite(p2,p1) def actions_when_collision(p1,p2): possible_classes = [Healthy, Parazite, Parazite] if isinstance(p1, tuple(possible_classes)) : # si c'est l'un des deux possible_classes.remove(type(p1)) #on l'enlève if isinstance(p2, tuple(possible_classes)): #si c'est l'autre if type(p2) == type(p1) : #si ce sont deux parazites if uniform(0,1) < PARAZITE_FIGHT_CHANCE : parazite_against_parazite(p1,p2) else : if isinstance(p2, Parazite) : p1,p2 = p2,p1 #on veut que p1 soit le parazite (lisibilité) if uniform(0,1) < INFECTION_CHANCE *(1+p1.getTransmRate()) : #là aussi, infection chance cap at 0.5 infect_him(p1,p2) def New_strain_in_csv(tempStrain, tempVir, tempTrans, tempRecov): if MODE == "war" or MODE == 'all_night_long': with open("data/" + tempStrain +".csv","w") as NewStrainFile: dico_of_strains_for_csv[tempStrain] = csv.writer(NewStrainFile) #création du fichier .csv avec le nom de la souche dico_of_strains_for_csv[tempStrain].writerow(["temps[s]","Souche","nombre infection secondaires","population totale en vie","parasites de cette souche en vie","pourcentage de la population de parasites", #header du csv "virulence: "+ str(tempVir), "taux de transmision: "+ str(tempTrans), "probabilité de guérison contre le parasite: " + str(tempRecov)]) #-----------------------main -------------------------- class mainApp(App) : """Represents the whole application.""" def build(self) : """Gestion temporelle des fonctions. Passe les argument "globaux" par défaut. """ global Event root = BallsContainer() args = None #passage d'argument if len(sys.argv) > 1 : args = sys.argv[1] Clock.schedule_once(partial(root.update_files, filename = args),1.1) #on attend que la fenêtre soit lancée Event = Clock.schedule_interval(partial(root.update_files, filename = args), 60*DELTA_TIME) Clock.schedule_once(root.start_balls,1) #on attend que la fenêtre soit lancée Clock.schedule_once(root.update_life_and_death,1.1) #on attend que la fenêtre soit lancée Clock.schedule_interval(root.update, DELTA_TIME) Clock.schedule_interval(root.update_life_and_death, 60*DELTA_TIME) #ça ça marche Window.bind(on_key_down=root.Keyboard) #pour le clavier return root #-----------------------Main-------------------------------------- # ----------------------Balls container-------------------------- class BallsContainer(Widget): """Classe du widget principal, qui contient les boules """ pause = False faster_events = [] num_healthies = NumericProperty(0) num_parazites = NumericProperty(0) nb_coll, mean_vir, mean_trans, mean_recov,last_clock, duration, paused = NumericProperty(0),NumericProperty(0),NumericProperty(0),NumericProperty(0),NumericProperty(0), NumericProperty(0), NumericProperty(0) top_idds = ListProperty([[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]]) temp_widg_to_remove_list = [] # starting_time = time.time() def start_balls(self,dt): ''' Est apellée une fois, au temps dt(défini dans build). Ajoute les boules. ''' for i in range(0,NB_SAINS): ball = Ball() ball.center = (randint(self.x, self.x+self.width), randint(self.y, self.y+self.height)) ball.velocity = (-MAX_BALL_SPEED + random() * (2 * MAX_BALL_SPEED), #à revoir -MAX_BALL_SPEED + random() * (2 * MAX_BALL_SPEED)) self.add_widget(ball) healthy = add_one_healthy() balls_dictionnary[healthy.getIdd()] = [ball, healthy, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] for i in range(0,NB_PARASITE): ball = Ball() ball.center = (randint(self.x, self.x+self.width), randint(self.y, self.y+self.height)) ball.velocity = (-MAX_BALL_SPEED + random() * (2 * MAX_BALL_SPEED), #à revoir -MAX_BALL_SPEED + random() * (2 * MAX_BALL_SPEED)) self.add_widget(ball) ball.set_col((uniform(0,1),uniform(0,1),0)) if len(sys.argv) > 2 : #ICI on check pour theory tester parazite = add_one_parazite(sys.argv[2]) #sys.argv[2] = effect = charge parasitaire else : parazite = add_one_parazite() balls_dictionnary[parazite.getIdd()] = [ball, parazite, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] if len(sys.argv) > 2 : #mode theory tester for _ in range(10) : reproduce(self, list_of_parazites[-1]) #@profile def update(self,dt): '''Sert de manager pour les position et les vitesse des boules à l'aide des dictionnaires. Appelle les fonctions qui gèrent les collisions. Est appelé tousles dt, un interval de temps défini dans build. ''' quad = Quadtree(0,[self.x,self.x + self.width, self.y, self.y + self.height]) for i in balls_dictionnary.keys() : pos = balls_dictionnary[i][0] #gotta update position (dic) here ! before the quad ! balls_dictionnary[i][2] = [pos.x, pos.x + pos.width, pos.y, pos.y + pos.height] quad.insert(balls_dictionnary[i][2], i) #we insert the balls in the quad for i in balls_dictionnary.keys() : temp_balls = quad.fetch(balls_dictionnary[i][2],i) #fetch the collisions for each ball temp_keys = [k[1] for k in temp_balls] #get the keys of those collisions other_balls = {key:balls_dictionnary[key] for key in temp_keys} #create a new dic with the collisions for j in other_balls.keys(): #and for each of those collisions, action ! if physical_collision2(balls_dictionnary[i][0], other_balls[j][0]): actions_when_collision(balls_dictionnary[i][1], other_balls[j][1]) self.nb_coll += 1 physical_wall_collisions2(balls_dictionnary[i][0], self) #-------------- update balls here ----------------- balls_dictionnary[i][0].update(dt) #update the positions of the balls (widget) #-------------- update balls here ----------------- def update_life_and_death(self,dt): '''Appelé tous les dt(défini dans build, normalement toutes les secondes), met à jour toutes les morts, guérison, reproduction. En mode théory tester, la fonction shall_we_kill_the_simulation, en mode all_night_long, la fonction all_nighter.''' kill_those_who_have_to_die(self,dt) reproduce_those_who_have_to(self,dt) cure_the_lucky_ones(dt) mutate_those_who_wish(dt) self.shall_we_kill_the_simulation(dt) self.all_nighter() def update_files(self, dt, filename = None) : '''Sert à la gestion temporelle de l'écriture dans les fichiers. appelé tous les dt(défini dans build, normalement toutes les secondes)''' self.update_numbers(filename) self.update_data_files(dt) def all_nighter(self) : ''' Uniquement utilisé dans le mode all_night_long, qui permet de maintenir la population à un nombre gérable par l'ordinateur''' global REPRODUCTION_PROB, DYING_PROB if MODE=='all_night_long' and len(list_of_parazites) < 1: for i in range (0,NB_PARASITE): ball = Ball() ball.center = (randint(self.x, self.x+self.width), randint(self.y, self.y+self.height)) ball.velocity = (-MAX_BALL_SPEED + random() * (2 * MAX_BALL_SPEED), #à revoir -MAX_BALL_SPEED + random() * (2 * MAX_BALL_SPEED)) self.add_widget(ball) ball.set_col((uniform(0,1),uniform(0,1),0)) parazite = add_one_parazite() balls_dictionnary[parazite.getIdd()] = [ball, parazite, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] if len(list_of_healthies) + len(list_of_parazites) > 300 and MODE != 'war': DYING_PROB = ROOF_DYING_PROB elif len(list_of_healthies)<= 250 and MODE != 'war': DYING_PROB = STOCK_DYING_PROB else : pass if len(list_of_healthies) + len(list_of_parazites) < 50 and MODE != 'war': REPRODUCTION_PROB = BOTTOM_REPRODUCTION_PROB elif len(list_of_healthies) + len(list_of_parazites) > 50 and MODE != 'war': REPRODUCTION_PROB = STOCK_REPRODUCTION_PROB def update_numbers(self, filename = None) : ''' Met à jour les chiffre affichés''' self.num_parazites = len(list_of_parazites) self.num_healthies = len(list_of_healthies) sumvir, sumrecov, sumtrans = 0,0,0 tempdic = {} self.top_idds = [[0,0,0,0,0],[0,0,0,0,0],[0,0,0,0,0]] for i in balls_dictionnary.keys() : if isinstance(balls_dictionnary[i][1], Parazite) : sumvir += float(balls_dictionnary[i][1].getVir()) sumtrans += float(balls_dictionnary[i][1].getTransmRate()) sumrecov += float(balls_dictionnary[i][1].getRecovProb()) if balls_dictionnary[i][1].getStrain() in tempdic.keys() : tempdic[balls_dictionnary[i][1].getStrain()][0] += 1 else : tempdic[balls_dictionnary[i][1].getStrain()] = [1, balls_dictionnary[i][1].getIdd()] tempdic2 = dict(tempdic) for i in range(0,3) : if len(tempdic2) == 0 : self.top_idds[i] = [0,0,0,0,0,[1,1,1]] else : key = self.idd_max(tempdic2) ind = balls_dictionnary[tempdic2[key][1]] self.top_idds[i]= ['ID'+key[7:], tempdic2[key][0], ind[1].getVir(), ind[1].getTransmRate(), ind[1].getRecovProb(), ind[0].get_col()] #add [souche,number,vir,trans,recov,color] del tempdic2[key] try : self.mean_trans = sumtrans/len(list_of_parazites) self.mean_recov = sumrecov/len(list_of_parazites) self.mean_vir = sumvir/len(list_of_parazites) except: self.mean_vir, self.mean_trans, self.mean_recov = 0,0,0 #_---------------gestion du temps sans les pauses-------- if self.pause == False: self.duration = time.clock()- self.paused #------------------------------- Enregistrement des données-------------------- if filename is not None or len(sys.argv) > 1: if len(sys.argv) > 2 : arg = "data_effect_tester/"+str(sys.argv[2]) + '.csv' #si on mass-effect print "saved in data_effect_tester/" elif len(sys.argv) > 1 : arg = "data_per_param/"+str(sys.argv[1]) + '.csv' #si on other param else : arg = "data_per_param/"+str(filename)+'.csv' if not isfile(str(arg)) : with open(str(arg), 'w') as par: csv_file = csv.writer(par, delimiter=',') if len(sys.argv) > 2 : #si on mass-effect csv_file.writerow(['time', 'secondary_infections']) elif len(sys.argv) > 1: csv_file.writerow(['time', 'healthies', 'parazites', 'mean_vir', 'mean_trans', 'mean_recov']) else : with open(str(arg), 'a') as par : csv_file =csv.writer(par, delimiter=',') if len(sys.argv) > 2 and len(strain_dictionary) > 0: #si on mass-effect csv_file.writerow([self.duration,len(strain_dictionary['Souche-101'][1])]) elif len(sys.argv) > 1 : #si on other param csv_file.writerow([self.duration,len(list_of_healthies), len(list_of_parazites),self.mean_vir,self.mean_trans,self.mean_recov]) #=========GUI BULLSHIT================================== temp_wig = [] for c in self.children: if not isinstance(c, Ball) : temp_wig.append(c) #pour éviter de trop grosses boucles if isinstance(temp_wig[0], Label) : #magouille parce que les widgets font n'importe quoi temp_wig = list(reversed(temp_wig)) for c in temp_wig : #tout ça rien que pour avoir un ordre de widget agréable, pff if isinstance(c,Button) : self.remove_widget(c) self.add_widget(c) for c in temp_wig : if isinstance(c,Label) : self.remove_widget(c) self.add_widget(c) for c in self.temp_widg_to_remove_list : self.remove_widget(c) for i in range(0,3) : ball = Ball() ball.size = 10,10 ball.center = (self.width-112,self.height-80*i-17) ball.velocity = (0,0) if len(self.top_idds) > i: ball.set_col(self.top_idds[i][5]) else : ball.set_col((0,0,0)) self.add_widget(ball) self.temp_widg_to_remove_list.append(ball) def shall_we_kill_the_simulation(self, dt) : '''En mode theory_tester, arrète la simulation quand il n'y a plus de parasite ou si le temps défini c'est écoulé''' if MODE == 'theory_tester': print self.duration if len(list_of_parazites) == 0 or self.duration > SIMULATION_TIME : self.on_stop() #=========GUI BULLSHIT================================== def update_data_files(self,dt): if MODE == 'war' or MODE == 'all_night_long': simulation_time = self.duration nb_of_healthies = len(list_of_healthies) #liste des individus sains vivants total_nb_of_parazites_alive = len(list_of_parazites) #liste des parasites vivants total_population = nb_of_healthies + total_nb_of_parazites_alive if nb_of_healthies > 0 : percentage_of_parazites_in_pop = round((float(total_nb_of_parazites_alive)/total_population)*100,2) #pourcentage de parasite dans la population totale else : percentage_of_parazites_in_pop = 0 # pour éviter division par zéro #inscription dans le fichier src/nb_sains_infectes.csv with open("nb_total_sains_infectes.csv","a") as csv_nb_total_sains_infectes: #creer le fichier des données générales de la population writer = csv.writer(csv_nb_total_sains_infectes) writer.writerow([simulation_time,total_population,nb_of_healthies,total_nb_of_parazites_alive,percentage_of_parazites_in_pop,self.mean_vir,self.mean_recov,self.mean_trans]) #parcours les souches et met à jour le fichier "souche.csv" qui leur correspond for strain_id in strain_dictionary: total_nb_of_infections = len(strain_dictionary[strain_id][1]) #nombre total d'infection secondaires nb_of_parazites_alive = 0 # nombre de parasites de cette souche en vie à l'instant t for id_infected_by_this_strain in strain_dictionary[strain_id][1]: # on parcours la liste des id qui ont été infectés par cette souche if id_infected_by_this_strain in balls_dictionnary: #on prend la liste des id qui ont été inféctés par cette souche et on compare avec la liste des invidividus encore en vie nb_of_parazites_alive += 1 if total_nb_of_parazites_alive != 0: percentage_of_all_infections = round((float(nb_of_parazites_alive)/total_nb_of_parazites_alive) * 100, 2) #quelle importance a cette souche comparée au total des autres else : percentage_of_all_infections = 0.0 if nb_of_parazites_alive > 0 : #on contnu de mettre à jour le fichier csv seuelment si la souche est encore active ( au moins 1 parasite encore en vie) with open("data/" + strain_id + ".csv","a") as UpdateStrainFile: dico_of_strains_for_csv[strain_id] = csv.writer(UpdateStrainFile) #dico_of_strains_for_csv[strain_id].writerow([simulation_time,strain_id,total_nb_of_infections,total_population,nb_of_parazites_alive, percentage_of_all_infections, float(strain_dictionary[strain_id][0][0]), float(strain_dictionary[strain_id][0][1]), float(strain_dictionary[strain_id][0][2])]) def on_pause(self): '''Arrète de mettre à jour update, update_life_and_death et update_files ''' global Event Clock.unschedule(self.update) Clock.unschedule(self.update_life_and_death) args = None #passage d'argument if len(sys.argv) > 1 : args = sys.argv[1] Clock.unschedule(Event) def on_resume(self): '''recommence de mettre à jour update, update_life_and_death, update_files''' global Event Clock.schedule_interval(self.update, DELTA_TIME) Clock.schedule_interval(self.update_life_and_death, 60*DELTA_TIME) args = None #passage d'argument if len(sys.argv) > 1 : args = sys.argv[1] Event = Clock.schedule_interval(partial(self.update_files, filename = args), 60*DELTA_TIME) def on_stop(self): '''Ferme l'application''' App.get_running_app().stop() return True def on_touch_down(self, touch): """Géré par kivy, quand on clique sur la fenêtre, affiche un graphique du nombre d'infections decondaire en fonction de la virulance""" if not self.pause : return listx = [] listy = [] for i in strain_dictionary.keys(): listx.append(strain_dictionary[i][0][0]) listy.append(len(strain_dictionary[i][1])) plt.scatter(listx, listy) plt.ylabel('Secondary infections') plt.plot((self.mean_vir, self.mean_vir), (0,len(list_of_parazites) + len(list_of_healthies)), 'k-',color = 'r') plt.title('Nb of sec. infections following virulance at time = ' + str(self.duration) + 'sec') plt.show() return def Keyboard(self, window, keycode, *args) : """Géré par Kivy, permet d'utiliser les inputs du clavier""" if keycode == 32 and not self.pause: #SPACE - pour ça je rajoute un print keycode avant et check le int self.duration = time.clock() - self.paused self.paused -= time.clock() self.on_pause() self.pause = True elif keycode == 32 and self.pause : self.on_resume() self.pause = False self.paused += time.clock() elif keycode == 275 : #right self.faster_events.append([Clock.schedule_interval(self.update, DELTA_TIME), Clock.schedule_interval(self.update_life_and_death, 60*DELTA_TIME)]) elif keycode == 276 : #left if len(self.faster_events)>0 : self.faster_events[-1][0].cancel() self.faster_events[-1][1].cancel() self.faster_events.pop() elif keycode == 115:# s pour stop self.on_stop() def idd_max(self,dico): '''Appelé dans update numbers, pour gérer les trois top IDs''' a = [i[0] for i in dico.values()] b=list(dico.keys()) return b[a.index(max(a))] # # -------------------- balls container-------------------- #-----------------------------Kivy GUI----------------------------------------------- if __name__ == '__main__': mainApp().run() #-----------------------------Kivy GUI----------------------------------------------- #test pour les résistances ''' h = add_one_healthy() ball = Ball() balls_dictionnary[h.getIdd()] = [ball, h, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] p = add_one_parazite() balls_dictionnary[p.getIdd()] = [ball, p, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] h1 = list_of_healthies[-1] p1 = list_of_parazites[-1] infect_him(p1,h1) guerison(list_of_parazites[-1]) print list_of_healthies[-1].getResistances() infect_him(p1, list_of_healthies[-1]) print"doit etre la", list_of_healthies[-1] p2 = add_one_parazite() balls_dictionnary[p2.getIdd()] = [ball, p2, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] infect_him(p2, list_of_healthies[-1]) print "encore par", list_of_parazites[-1].getResistances() guerison(list_of_parazites[-1]) print list_of_healthies[-1].getResistances() ''' #test pour le dico de souches ''' #infection h = add_one_healthy() #ID1 ball = Ball() balls_dictionnary['ID1'] = [ball, h, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] p = add_one_parazite() balls_dictionnary[p.getIdd()] = [ball, p, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] p1 = balls_dictionnary['ID2'][1] h1 = balls_dictionnary['ID1'][1] print p1.getStrain() infect_him(p1, h1) h1 = balls_dictionnary['ID1'][1] print type(h1) print len(list_of_parazites) print len(list_of_healthies) p2 = add_one_parazite() balls_dictionnary[p2.getIdd()] = [ball, p2, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] list_of_parazites[-1].setVir(0) print list_of_parazites[-1].getStrain() parazite_against_parazite(balls_dictionnary['ID2'][1], balls_dictionnary['ID3'][1]) print list_of_parazites[-1].getStrain() print strain_dictionary[balls_dictionnary['ID2'][1].getStrain()] reproduce(dt, balls_dictionnary['ID2'][1]) # def theory_tester(self,dt): # if TEST_THEORY == 1: # global CHANCE_OF_MUTATION_ON_INFECTION, CHANCE_OF_MUTATION_ON_NOTHING,CHANCE_OF_MUTATION_ON_REPRODUCTION, PARAZITE_FIGHT_CHANCE # CHANCE_OF_MUTATION_ON_INFECTION, CHANCE_OF_MUTATION_ON_NOTHING,CHANCE_OF_MUTATION_ON_REPRODUCTION, PARAZITE_FIGHT_CHANCE,GENERATION_RESISTANCE = 0,0,0,0,0 # if len(list_of_parazites) == 0 or self.duration - self.last_clock >= 15 : #time - change here # try: # eff = (list_of_parazites[-1].getVir()*100)**0.5 # except: # eff = 0.2 # print "effect = ", str(eff) # print "to get this results" # # self.place_neuve(dt) # # print "nb heal :", len(list_of_healthies) # print "nb par : ", len(list_of_parazites) # # self.start_balls(dt) # for i in range(0,NB_PARASITE): # ball = Ball() # ball.center = (randint(self.x, self.x+self.width), randint(self.y, self.y+self.height)) # ball.velocity = (-MAX_BALL_SPEED + random() * (2 * MAX_BALL_SPEED), #à revoir # -MAX_BALL_SPEED + random() * (2 * MAX_BALL_SPEED)) # self.add_widget(ball) # ball.set_col((uniform(0,1),uniform(0,1),0)) # # parazite = add_one_parazite(effect = eff) # balls_dictionnary[parazite.getIdd()] = [ball, parazite, [ball.x, ball.x + ball.width, ball.y, ball.y + ball.height]] # self.last_clock = self.duration # print "nb heal :", str(len(list_of_healthies)) # print "nb par : ", str(len(list_of_parazites)) # if (list_of_parazites[-1].getVir()*100)**0.5 >=9.81: # self.pause() #if isfile('data.csv') and 'y' != raw_input("le fichier data existe déjà, le remplaçer? (y/n)"): exit() #écrit les headers dans le dossier # #csv_nb_total_sains_infectes = csv.writer(open("nb_total_sains_infectes.csv","wb")) #creer le fichier des données générales de la population #csv_nb_total_sains_infectes.writerow(["temps","population totale","individus sains","individus infectés","pourcentage de la population infectée", # "virulence moyenne","taux de transmission guérison","taux de transmission"]) #header des données générales de la pop # def update_data_files(self,dt): # current_time = time.clock() # simulation_time = round(current_time - initial_time,4) # nb_of_healthies = len(list_of_healthies) #liste des individus sains vivants # total_nb_of_parazites_alive = len(list_of_parazites) #liste des parasites vivants # total_population = nb_of_healthies + total_nb_of_parazites_alive # if nb_of_healthies > 0 : # percentage_of_parazites_in_pop = round((float(total_nb_of_parazites_alive)/total_population)*100,2) #pourcentage de parasite dans la population totale # else : # percentage_of_parazites_in_pop = 0 # pour éviter division par zéro # # #inscription dans le fichier src/nb_sains_infectes.csv # csv_nb_total_sains_infectes.writerow([simulation_time,total_population,nb_of_healthies, # total_nb_of_parazites_alive,percentage_of_parazites_in_pop,self.mean_vir,self.mean_recov,self.mean_trans]) # # #parcours les souches et met à jour le fichier "souche.csv" qui leur correspond # for strain_id in strain_dictionary: # total_nb_of_infections = len(strain_dictionary[strain_id][1]) #nombre total d'infection secondaires # nb_of_parazites_alive = 0 # nombre de parasites de cette souche en vie à l'instant t # for id_infected_by_this_strain in strain_dictionary[strain_id][1]: # on parcours la liste des id qui ont été infectés par cette souche # if id_infected_by_this_strain in balls_dictionnary: #on prend la liste des id qui ont été inféctés par cette souche et on compare avec la liste des invidividus encore en vie # nb_of_parazites_alive += 1 # # percentage_of_all_infections = round((float(nb_of_parazites_alive)/total_nb_of_parazites_alive) * 100, 2) #quelle importance a cette souche comparée au total des autres # # if nb_of_parazites_alive > 0 : #on contnu de mettre à jour le fichier csv seuelment si la souche est encore active ( au moins 1 parasite encore en vie) # with open("data/" + strain_id + ".csv","a") as UpdateStrainFile: # dico_of_strains_for_csv[strain_id] = csv.writer(UpdateStrainFile) # dico_of_strains_for_csv[strain_id].writerow([simulation_time,strain_id,total_nb_of_infections, # total_population,nb_of_parazites_alive, percentage_of_all_infections]) '''
gpl-2.0
JPalmerio/GRB_population_code
grbpop/miscellaneous.py
1
8019
import logging import numpy as np import pandas as pd from io_grb_pop import root_dir, read_logRlogN log = logging.getLogger(__name__) def log_to_lin(log_x, log_x_errp, log_x_errm=None): """ Takes logscale data with errors and converts to linear scale with correct error propagation. If log_x_errm is not provided, errors are assumed symmetric. Returns : x, x_errp, x_errm """ if log_x_errm is None: log_x_errm = log_x_errp x = 10**log_x x_errp = x * (10**log_x_errp - 1.0) x_errm = x * (1.0 - 10**(-log_x_errm)) return x, x_errp, x_errm def lin_to_log(x, x_errp, x_errm=None): """ Takes linear data with errors and converts to logscale with correct error propagation. If x_errm is not provided, errors are assumed symmetric. Returns : log_x, log_x_errp, log_x_errm """ if x_errm is None: x_errm = x_errp log_x = np.log10(x) log_x_errp = np.log10((x + x_errp) / x) log_x_errm = np.log10(x / (x - x_errm)) return log_x, log_x_errp, log_x_errm def lin_to_log_ndarray(x): """ Helper function that turns an ndarray in linscale to logscale with proper error propagation. Assumes the data is in the form of the output of read_column (ndarray). i.e. : x[0] = data (float) x[1] = error plus (float) x[2] = error minus (float) x[3] = upper limit (bool) x[4] = lower limit (bool) """ log_x = np.zeros(x.shape) log_x[0] = np.log10(x[0]) log_x[1] = np.log10(x[0] + x[1]) - log_x[0] log_x[2] = log_x[0] - np.log10(x[0] - x[2]) log_x[3] = x[3] log_x[4] = x[4] return log_x def filter_df(df, filtering_key, lim_min=None, lim_max=None, equal=None, errors='raise', strip=None, string=False): """ Filter a df using a criteria based on filtering_key """ if not string: df[filtering_key] = pd.to_numeric(df[filtering_key], errors=errors) else: df[filtering_key] = df[filtering_key].str.strip(strip) if (lim_min is None) and (lim_max is None) and (equal is None): raise ValueError elif (lim_min is not None) and (lim_max is not None) and (equal is None): cond_min = df[filtering_key] >= lim_min cond_max = df[filtering_key] <= lim_max cond = cond_min & cond_max elif lim_min is not None: cond = df[filtering_key] >= lim_min elif lim_max is not None: cond = df[filtering_key] <= lim_max elif equal is not None: cond = df[filtering_key] == equal else: print("lim_min = {}".format(lim_min)) print("lim_max = {}".format(lim_max)) print("equal = {}".format(equal)) raise ValueError("You cannot provide all these arguments. Filtering must be " "lim_min and/or lim_max OR equal") df_out = df[cond].copy() return df_out def calc_cat_duration(fname, verbose=False): from datetime import datetime data = pd.read_csv(fname, sep='|', header=2, low_memory=False) data.rename(columns=lambda x:x.strip(), inplace=True) first_date = datetime.strptime(data['name'].min().strip('GRB')[:-3], '%y%m%d') last_date = datetime.strptime(data['name'].max().strip('GRB')[:-3], '%y%m%d') delta_t = (last_date - first_date).days/364.25 if verbose: print('First GRB detected on {}'.format(first_date)) print('Last GRB detected on {}'.format(last_date)) print('Duration: {:.3f} years'.format(delta_t)) return delta_t def create_filtered_sample(fname, keys, func=None, func_args={}, log=False, header=2, verbose=False, debug=False, errors='raise', subdf=False): """ Convenience function to quickly extract an observed sample from a given file name. A function to filter or cut the sample can be passed as func. """ # Read the entire file df_obs = pd.read_csv(fname, sep='|', header=header, low_memory=False) # Strip the colum names to remove whitespaces df_obs.rename(columns=lambda x:x.strip(), inplace=True) # Activate debug to check the column names if debug: for i,col in enumerate(df_obs.columns): print(i,col) # Apply function to the data if func is None: for key in keys: df_obs[key] = pd.to_numeric(df_obs[key], errors=errors) # If func is a list, iterate through the list and apply each function elif isinstance(func, list): if not isinstance(func_args, list): raise ValueError for i, func_i in enumerate(func): df_obs = func_i(df_obs, **func_args[i]) for key in keys: df_obs[key] = pd.to_numeric(df_obs[key], errors=errors) else: df_obs = func(df_obs.copy(), **func_args) df_obs[key] = pd.to_numeric(df_obs[key], errors=errors) if log: for key in keys: df_obs[key] = pd.to_numeric(df_obs[key], errors=errors) df_obs[key] = np.log10(df_obs[key]) if verbose: print("Sample size :{}".format(len(df_obs.dropna()))) if subdf: df_obs = df_obs[keys].copy() return df_obs def mask_ndarray(ndarray, mask): """ Helper function to easily mask a ndarray output from read_data """ if len(mask) != ndarray.shape[1]: print("[Error] in mask_ndarray : mask and array length are not the same") masked_ndarray = [] for i in range(ndarray.shape[0]): masked_ndarray.append(ndarray[i,mask]) masked_ndarray = np.asarray(masked_ndarray) return masked_ndarray def sort_ndarray(ndarray, sorter=None): """ Sorts a ndarray with finite values !! (will fail if there are NaNs) If sorter is none, sorts the array with its values. Otherwise, uses sorter as an argsort output. """ if sorter is None: arr_ind = ndarray[0].argsort() else: arr_ind = sorter sorted_data = np.zeros(ndarray.shape) sorted_data[:,:] = ndarray[:,arr_ind] return sorted_data def calc_rel_errors_GBM_band(df_obs): # Calculate relative errors on Band parameters for later filtering keys = ['pflx_band_epeak_pos_err','pflx_band_epeak', 'pflx_band_ampl_pos_err', 'pflx_band_ampl'] for k in keys: df_obs[k] = pd.to_numeric(df_obs[k], errors='coerce') df_obs['pflx_band_epeak_rel_err'] = df_obs['pflx_band_epeak_pos_err']/df_obs['pflx_band_epeak'] df_obs['pflx_band_ampl_rel_err'] = df_obs['pflx_band_ampl_pos_err']/df_obs['pflx_band_ampl'] return df_obs def global_GRB_rate_Stern(): """ Calculate the global LGRB rate by correcting the Stern histogram with the efficiency correction [Stern et al. 2001] (https://ui.adsabs.harvard.edu/abs/2001ApJ...563...80S/abstract) Eq. 5. and summing over all the bins """ fname = root_dir/'resources/Stern_logRlogN.txt' bins, hist_obs, err_obs = read_logRlogN(fname) delta_bins = np.log10(bins[1:]/bins[:-1]) all_sky_glob_rate = np.sum(10**(hist_obs)*delta_bins) log.info(''.join([f"Global LGRB rate from Stern constraint: {all_sky_glob_rate:.2f} ", f"GRB/yr in 4 pi with peak flux in [50-300 keV] above {bins.min()} ph/s/cm2"])) return all_sky_glob_rate def efficiency_correction_Stern(pflx, c_e0=0.097, nu=2.34, norm=0.7): """ The efficiency function of BATSE for detecting GRBs as a function of peak flux, derived by Stern+01 c_e0 is in [counts/s/cm2] pflx is in [ph/s/cm2] """ c_e = pflx * 0.75 # the conversion factor from counts to pflx comes from the Stern+01 paper as well, figure 7. return norm * (1.0 - np.exp(-(c_e/c_e0)**2))**nu def k_correction(z, photon_index=1.5, zm=2.): return ((1.+z)/(1+zm))**(photon_index-2) def str2bool(s): if s.lower() in ('yes', 'true', 't', 'y', '1'): return True elif s.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise TypeError('Boolean value expected')
gpl-3.0
davidam/python-examples
pandas/pandas-plot-iris-analysis.py
1
2301
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2018 David Arroyo Menéndez # Author: David Arroyo Menéndez <davidam@gnu.org> # Maintainer: David Arroyo Menéndez <davidam@gnu.org> # This file is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # This file is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with GNU Emacs; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301 USA, """ Analysis of Iris petal and sepal sizes ======================================= Ilustrate an analysis on a real dataset: - Visualizing the data to formulate intuitions - Fitting of a linear model - Hypothesis test of the effect of a categorical variable in the presence of a continuous confound """ import matplotlib.pyplot as plt import pandas from pandas.tools import plotting from statsmodels.formula.api import ols # Load the data data = pandas.read_csv('iris.csv') ############################################################################## # Plot a scatter matrix # Express the names as categories categories = pandas.Categorical(data['name']) # The parameter 'c' is passed to plt.scatter and will control the color plotting.scatter_matrix(data, c=categories.labels, marker='o') fig = plt.gcf() fig.suptitle("blue: setosa, green: versicolor, red: virginica", size=13) ############################################################################## # Statistical analysis # Let us try to explain the sepal length as a function of the petal # width and the category of iris model = ols('sepal_width ~ name + petal_length', data).fit() print(model.summary()) # Now formulate a "contrast", to test if the offset for versicolor and # virginica are identical print('Testing the difference between effect of versicolor and virginica') print(model.f_test([0, 1, -1, 0])) plt.show()
gpl-3.0
altairpearl/scikit-learn
examples/linear_model/plot_sgd_penalties.py
124
1877
""" ============== SGD: Penalties ============== Plot the contours of the three penalties. All of the above are supported by :class:`sklearn.linear_model.stochastic_gradient`. """ from __future__ import division print(__doc__) import numpy as np import matplotlib.pyplot as plt def l1(xs): return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs]) def l2(xs): return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs]) def el(xs, z): return np.array([(2 - 2 * x - 2 * z + 4 * x * z - (4 * z ** 2 - 8 * x * z ** 2 + 8 * x ** 2 * z ** 2 - 16 * x ** 2 * z ** 3 + 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2) - 2 * x * z ** 2) / (2 - 4 * z) for x in xs]) def cross(ext): plt.plot([-ext, ext], [0, 0], "k-") plt.plot([0, 0], [-ext, ext], "k-") xs = np.linspace(0, 1, 100) alpha = 0.501 # 0.5 division throuh zero cross(1.2) l1_color = "navy" l2_color = "c" elastic_net_color = "darkorange" lw = 2 plt.plot(xs, l1(xs), color=l1_color, label="L1", lw=lw) plt.plot(xs, -1.0 * l1(xs), color=l1_color, lw=lw) plt.plot(-1 * xs, l1(xs), color=l1_color, lw=lw) plt.plot(-1 * xs, -1.0 * l1(xs), color=l1_color, lw=lw) plt.plot(xs, l2(xs), color=l2_color, label="L2", lw=lw) plt.plot(xs, -1.0 * l2(xs), color=l2_color, lw=lw) plt.plot(-1 * xs, l2(xs), color=l2_color, lw=lw) plt.plot(-1 * xs, -1.0 * l2(xs), color=l2_color, lw=lw) plt.plot(xs, el(xs, alpha), color=elastic_net_color, label="Elastic Net", lw=lw) plt.plot(xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw) plt.plot(-1 * xs, el(xs, alpha), color=elastic_net_color, lw=lw) plt.plot(-1 * xs, -1.0 * el(xs, alpha), color=elastic_net_color, lw=lw) plt.xlabel(r"$w_0$") plt.ylabel(r"$w_1$") plt.legend() plt.axis("equal") plt.show()
bsd-3-clause
jart/tensorflow
tensorflow/contrib/gan/python/estimator/python/gan_estimator_test.py
5
12089
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TFGAN's estimator.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import tempfile from absl.testing import parameterized import numpy as np import six from tensorflow.contrib import layers from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples from tensorflow.contrib.gan.python.estimator.python import gan_estimator_impl as estimator from tensorflow.contrib.gan.python.losses.python import tuple_losses as losses from tensorflow.contrib.learn.python.learn.learn_io import graph_io from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import metrics as metrics_lib from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test from tensorflow.python.summary.writer import writer_cache from tensorflow.python.training import input as input_lib from tensorflow.python.training import learning_rate_decay from tensorflow.python.training import training from tensorflow.python.training import training_util def generator_fn(noise_dict, mode): del mode noise = noise_dict['x'] return layers.fully_connected(noise, noise.shape[1].value) def discriminator_fn(data, unused_conditioning, mode): del unused_conditioning, mode return layers.fully_connected(data, 1) class GetGANModelTest(test.TestCase, parameterized.TestCase): """Tests that `GetGANModel` produces the correct model.""" @parameterized.named_parameters( ('train', model_fn_lib.ModeKeys.TRAIN), ('eval', model_fn_lib.ModeKeys.EVAL), ('predict', model_fn_lib.ModeKeys.PREDICT)) def test_get_gan_model(self, mode): with ops.Graph().as_default(): generator_inputs = {'x': array_ops.ones([3, 4])} real_data = (array_ops.zeros([3, 4]) if mode != model_fn_lib.ModeKeys.PREDICT else None) gan_model = estimator._get_gan_model( mode, generator_fn, discriminator_fn, real_data, generator_inputs, add_summaries=False) self.assertEqual(generator_inputs, gan_model.generator_inputs) self.assertIsNotNone(gan_model.generated_data) self.assertEqual(2, len(gan_model.generator_variables)) # 1 FC layer self.assertIsNotNone(gan_model.generator_fn) if mode == model_fn_lib.ModeKeys.PREDICT: self.assertIsNone(gan_model.real_data) self.assertIsNone(gan_model.discriminator_real_outputs) self.assertIsNone(gan_model.discriminator_gen_outputs) self.assertIsNone(gan_model.discriminator_variables) self.assertIsNone(gan_model.discriminator_scope) self.assertIsNone(gan_model.discriminator_fn) else: self.assertIsNotNone(gan_model.real_data) self.assertIsNotNone(gan_model.discriminator_real_outputs) self.assertIsNotNone(gan_model.discriminator_gen_outputs) self.assertEqual(2, len(gan_model.discriminator_variables)) # 1 FC layer self.assertIsNotNone(gan_model.discriminator_scope) self.assertIsNotNone(gan_model.discriminator_fn) def get_dummy_gan_model(): # TODO(joelshor): Find a better way of creating a variable scope. with variable_scope.variable_scope('generator') as gen_scope: gen_var = variable_scope.get_variable('dummy_var', initializer=0.0) with variable_scope.variable_scope('discriminator') as dis_scope: dis_var = variable_scope.get_variable('dummy_var', initializer=0.0) return tfgan_tuples.GANModel( generator_inputs=None, generated_data=array_ops.ones([3, 4]), generator_variables=[gen_var], generator_scope=gen_scope, generator_fn=None, real_data=array_ops.zeros([3, 4]), discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var, discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var, discriminator_variables=[dis_var], discriminator_scope=dis_scope, discriminator_fn=None) def dummy_loss_fn(gan_model): return math_ops.reduce_sum(gan_model.discriminator_real_outputs - gan_model.discriminator_gen_outputs) def get_metrics(gan_model): return { 'mse_custom_metric': metrics_lib.mean_squared_error( gan_model.real_data, gan_model.generated_data) } class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase): """Tests that the EstimatorSpec is constructed appropriately.""" @classmethod def setUpClass(cls): cls._generator_optimizer = training.GradientDescentOptimizer(1.0) cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0) @parameterized.named_parameters( ('train', model_fn_lib.ModeKeys.TRAIN), ('eval', model_fn_lib.ModeKeys.EVAL), ('predict', model_fn_lib.ModeKeys.PREDICT)) def test_get_estimator_spec(self, mode): with ops.Graph().as_default(): self._gan_model = get_dummy_gan_model() spec = estimator._get_estimator_spec( mode, self._gan_model, generator_loss_fn=dummy_loss_fn, discriminator_loss_fn=dummy_loss_fn, get_eval_metric_ops_fn=get_metrics, generator_optimizer=self._generator_optimizer, discriminator_optimizer=self._discriminator_optimizer) self.assertEqual(mode, spec.mode) if mode == model_fn_lib.ModeKeys.PREDICT: self.assertEqual(self._gan_model.generated_data, spec.predictions) elif mode == model_fn_lib.ModeKeys.TRAIN: self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar self.assertIsNotNone(spec.train_op) self.assertIsNotNone(spec.training_hooks) elif mode == model_fn_lib.ModeKeys.EVAL: self.assertEqual(self._gan_model.generated_data, spec.predictions) self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar self.assertIsNotNone(spec.eval_metric_ops) # TODO(joelshor): Add pandas test. class GANEstimatorIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _test_complete_flow( self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size, lr_decay=False): def make_opt(): gstep = training_util.get_or_create_global_step() lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9) return training.GradientDescentOptimizer(lr) gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0) dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0) est = estimator.GANEstimator( generator_fn=generator_fn, discriminator_fn=discriminator_fn, generator_loss_fn=losses.wasserstein_generator_loss, discriminator_loss_fn=losses.wasserstein_discriminator_loss, generator_optimizer=gopt, discriminator_optimizer=dopt, get_eval_metric_ops_fn=get_metrics, model_dir=self._model_dir) # TRAIN num_steps = 10 est.train(train_input_fn, steps=num_steps) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn('loss', six.iterkeys(scores)) self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'], scores['loss']) self.assertIn('mse_custom_metric', six.iterkeys(scores)) # PREDICT predictions = np.array([x for x in est.predict(predict_input_fn)]) self.assertAllEqual(prediction_size, predictions.shape) def test_numpy_input_fn(self): """Tests complete flow with numpy_input_fn.""" input_dim = 4 batch_size = 5 data = np.zeros([batch_size, input_dim]) train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, prediction_size=[batch_size, input_dim]) def test_numpy_input_fn_lrdecay(self): """Tests complete flow with numpy_input_fn.""" input_dim = 4 batch_size = 5 data = np.zeros([batch_size, input_dim]) train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, prediction_size=[batch_size, input_dim], lr_decay=True) def test_input_fn_from_parse_example(self): """Tests complete flow with input_fn constructed from parse_example.""" input_dim = 4 batch_size = 6 data = np.zeros([batch_size, input_dim]) serialized_examples = [] for datum in data: example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), 'y': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32), 'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32), } def _train_input_fn(): feature_map = parsing_ops.parse_example( serialized_examples, feature_spec) _, features = graph_io.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) _, features = graph_io.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) _, features = graph_io.queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, prediction_size=[batch_size, input_dim]) if __name__ == '__main__': test.main()
apache-2.0
chrisjsewell/PyGauss
pygauss/analysis.py
1
44913
# -*- coding: utf-8 -*- from itertools import product, imap import copy import math import string import multiprocessing import platform import numpy as np import pandas as pd from pandas.tools.plotting import radviz import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import ColorConverter from sklearn.cluster import KMeans from IPython.core.display import clear_output from .molecule import Molecule from .utils import df_to_img from .file_io import Folder def unpack_and_make_molecule(val_dict): if val_dict.has_key('args'): args = val_dict.pop('args') else: args = [] return Molecule(*args, **val_dict) class Analysis(object): """a class to analyse multiple computations """ def __init__(self, folderpath='', server=None, username=None, passwrd=None, folder_obj=None, headers=[]): """a class to analyse multiple computations Parameters ---------- folderpath : str the folder directory storing the files to be analysed server : str the name of the server storing the files to be analysed username : str the username to connect to the server passwrd : str server password, if not present it will be asked for during initialisation headers : list the variable categories for each computation """ self._folder = None if folder_obj: self._folder = folder_obj elif folderpath or server: self.set_folder(folderpath, server, username, passwrd) heads = headers[:]+['Molecule'] self._df = pd.DataFrame(columns=heads) self._df.index.name = 'ID' self._next_index = 0 def __repr__(self): return self.get_table().to_string() def copy(self): clone = copy.deepcopy(self) return clone def get_folder(self): return self._folder def set_folder(self, folderpath='', server=None, username=None, passwrd=None): self._folder = Folder(folderpath, server, username, passwrd) folder = property(get_folder, set_folder, doc="The folder for gaussian runs") def count_runs(self): """ get number of runs held in analysis """ return len(self._df.index) def _add_molecule(self, molecule, identifiers): """add molecule to internal dataframe """ identifiers['Molecule'] = molecule series = pd.DataFrame(identifiers, index=[self._next_index]) self._df = self._df.copy().append(series) self._next_index += 1 return True def add_run(self, identifiers={}, init_fname=None, opt_fname=None, freq_fname=None, nbo_fname=None, alignto=[], atom_groups={}, add_if_error=False, folder_obj=None): """add single Gaussian run input/outputs """ if not folder_obj: folder_obj = self._folder molecule = Molecule(init_fname=init_fname, opt_fname=opt_fname, freq_fname=freq_fname, nbo_fname=nbo_fname, folder_obj=folder_obj, alignto=alignto, atom_groups=atom_groups, fail_silently=True) num_files = filter(lambda x:x, [init_fname, opt_fname, freq_fname, nbo_fname]) read_errors = molecule.get_init_read_errors() if len(read_errors) != num_files and (not read_errors or add_if_error): self._add_molecule(molecule, identifiers) return molecule.get_init_read_errors() def _get_molecules(self, mol_inputs, folder_obj, identifiers, ipython_print=False): """ get molecules """ if folder_obj.islocal() and not platform.system() == 'Windows': pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) mapping = pool.imap else: mapping = imap with folder_obj: molecules=[] all_read_errors = [] for molecule in mapping(unpack_and_make_molecule, mol_inputs): molecules.append(molecule) read_errors = [] for typ, fname, msg in molecule.get_init_read_errors(): idents = identifiers[len(molecules)-1].copy() idents.pop('Molecule', '_') idents['Type'] = typ idents['File'] = fname idents['Error_Message'] = msg read_errors.append(idents) all_read_errors.append(read_errors) if ipython_print: print 'Reading data {0} of {1}'.format(len(molecules), len(mol_inputs)) try: clear_output(wait=True) except: pass if folder_obj.islocal() and not platform.system() == 'Windows': pool.close() pool.join() return molecules, all_read_errors def add_runs(self, headers=[], values=[], init_pattern=None, opt_pattern=None, freq_pattern=None, nbo_pattern=None, add_if_error=False, alignto=[], atom_groups={}, ipython_print=False, folder_obj=None): """add multiple Gaussian run inputs/outputs """ # set folder oject if not folder_obj: folder_obj = self._folder #get variables for each run mol_inputs = [] identifiers = [] for idents in product(*values): mol_input = {} identifiers.append(dict(zip(headers, idents))) mol_input['init_fname'] = init_pattern.format(*idents) if init_pattern else None if type(opt_pattern) is str: mol_input['opt_fname'] = opt_pattern.format(*idents) if opt_pattern else None elif type(opt_pattern) is list or type(opt_pattern) is tuple: mol_input['opt_fname'] = [o.format(*idents) for o in opt_pattern] else: mol_input['opt_fname'] = None mol_input['freq_fname'] = freq_pattern.format(*idents) if freq_pattern else None mol_input['nbo_fname'] = nbo_pattern.format(*idents) if nbo_pattern else None mol_input['folder_obj'] = folder_obj mol_input['alignto'] = alignto mol_input['atom_groups'] = atom_groups mol_input['fail_silently'] = True mol_inputs.append(mol_input) #create the molecules molecules, read_errors = self._get_molecules(mol_inputs, folder_obj, identifiers, ipython_print) #add the molecules to the internal table for molecule, idents, inputs, read_error in zip(molecules, identifiers, mol_inputs, read_errors): num_files = filter(lambda x:x, [inputs['init_fname'], inputs['opt_fname'], inputs['freq_fname'], inputs['nbo_fname']]) if read_error != num_files and (not read_error or add_if_error): self._add_molecule(molecule, idents) #collate read errors into a dataframe to return read_errors = filter(len, read_errors) err_df = pd.DataFrame([item for sublist in read_errors for item in sublist]) if read_errors: cols = err_df.columns.tolist() #rearrange columns headers cols.remove('Type'); cols.append('Type') cols.remove('File'); cols.append('File') cols.remove('Error_Message'); cols.append('Error_Message') err_df = err_df[cols] return err_df def get_table(self, rows=[], columns=[], filters={}, precision=4, head=False, mol=False, row_index=[], column_index=[], as_image=False, na_rep='-', font_size=None, width=None, height=None, unconfined=False): """return pandas table of requested data in requested format Parameters ----------- rows : integer or list of integers select row ids columns : string/integer or list of strings/integers select column names/positions filters : dict filter for rows with certain value(s) in specific columns precision : int decimal precision of displayed values head : int return only first n rows mol : bool include column containing the molecule objects row_index : string or list of strings columns to use as new index column_index : list of strings srings to place in to higher order column indexs as_image : bool output the table as an image (used pygauss.utils.df_to_img) na_rep : str how to represent empty (nan) cells (if outputting image) width, height, unconfined : int, int, bool args for IPy Image Returns ------- df : pandas.DataFrame a table of data """ pd.set_option('precision', precision) if mol: df = self._df.copy() else: df = self._df.drop('Molecule', axis=1) for key, val in filters.iteritems(): if type(val) is list or type(val) is tuple: df = df[getattr(df, key).isin(val)] else: df = df[getattr(df, key)==val] if type(rows) is not list and type(rows) is not tuple: rows = [rows] if type(columns) is not list and type(columns) is not tuple: columns = [columns] if rows: df = df.loc[rows] if columns: cols = columns[:] if type(row_index) is list: cols += row_index else: cols.append(row_index) if mol: cols.append('Molecule') unique_cols = [] [unique_cols.append(x) for x in cols if x not in unique_cols] df = df.ix[:,unique_cols] if row_index: df = df.set_index(row_index) if column_index: col_index=[] for col in df.columns: col_tuple = (' ', col) for term in column_index: if len(col)>len(term): if col[:len(term)] == term: col_tuple = (term, col[len(term)+1:]) continue col_index.append(col_tuple) df.columns = pd.MultiIndex.from_tuples(col_index) if head: df = df.head(head) if as_image: return df_to_img(df, na_rep=na_rep, font_size=font_size, width=width, height=height, unconfined=unconfined) return df def remove_rows(self, rows): """remove one or more rows of molecules Parameters ---------- rows : int or list of ints: the rows to remove """ self._df.drop(rows, inplace=True) return self.get_table() def remove_columns(self, columns): self._df.drop(columns, axis=1, inplace=True) return self.get_table() _basic_properties={'nbasis':'get_basis_funcs', 'basis':'get_basis_descript', 'optimised':'is_optimised', 'opt_error': 'get_run_error', 'conformer': 'is_conformer'} def get_basic_property(self, prop, *args, **kwargs): """returns a series of a basic run property or nan if it is not available Parameters ---------- prop : str can be 'basis', 'nbasis', 'optimised', 'opt_error' or 'conformer' """ if prop not in self._basic_properties.keys(): raise ValueError('{0} not a molecule property'.format(prop)) def get_prop(m): method = getattr(m, self._basic_properties[prop]) try: out = method(*args, **kwargs) except: out = pd.np.nan return out return self._df.Molecule.map(get_prop) def add_basic_properties(self, props=['basis', 'nbasis', 'optimised', 'conformer']): """adds columns giving info of basic run properties """ for prop in props: try: series = self.get_basic_property(prop) except Exception: print 'error reading {0} \n setting to NaN'.format(prop) series = pd.np.nan self._df[prop.capitalize()] = series return self.get_table() def remove_non_optimised(self): """removes runs that were not optimised """ non_optimised = self._df[self.get_basic_property('optimised')!=True].copy() self._df = self._df[self.get_basic_property('optimised')==True] return non_optimised def remove_non_conformers(self, cutoff=0.): """removes runs with negative frequencies """ non_conformers = self._df[self.get_basic_property('conformer', cutoff=cutoff)!=True].copy() self._df = self._df[self.get_basic_property('conformer', cutoff=cutoff)==True] return non_conformers def add_mol_property(self, name, method, *args, **kwargs): """compute molecule property for all rows and create a data column Parameters ---------- name : str what to name the data column method : str what molecule method to call *args : various arguments to pass to the molecule method **kwargs : various keyword arguments to pass to the molecule method """ if type(name) is tuple or type(name) is list: for idx, n in enumerate(name): func = lambda m: getattr(m, method)(*args, **kwargs)[idx] self._df[n] = self._df.Molecule.map(func) else: func = lambda m: getattr(m, method)(*args, **kwargs) self._df[name] = self._df.Molecule.map(func) return self.get_table() def add_mol_property_subset(self, name, method, rows=[], filters={}, args=[], kwargs={}, relative_to_rows=[]): """compute molecule property for a subset of rows and create/add-to data column Parameters ---------- name : str or list of strings name for output column (multiple if method outputs more than one value) method : str what molecule method to call rows : list what molecule rows to calculate the property for filters : dict filter for selecting molecules to calculate the property for args : list the arguments to pass to the molecule method kwargs : dict the keyword arguments to pass to the molecule method relative_to_rows: list of ints compute values relative to the summated value(s) of molecule at the rows listed """ df = self.get_table(rows=rows, filters=filters, mol=True) if relative_to_rows: rel_df = self.get_table(rows=relative_to_rows, mol=True) if type(name) is tuple or type(name) is list: for idx, n in enumerate(name): func = lambda m: getattr(m, method)(*args, **kwargs)[idx] vals = df.Molecule.map(func) if relative_to_rows: rel_val = rel_df.Molecule.map(func).sum() vals = vals - rel_val if n in self._df.columns: self._df[n] = vals.combine_first(self._df[n]) else: self._df[n] = vals else: func = lambda m: getattr(m, method)(*args, **kwargs) vals = df.Molecule.map(func) if relative_to_rows: rel_val = rel_df.Molecule.map(func).sum() vals = vals - rel_val if name in self._df.columns: self._df[name] = vals.combine_first(self._df[name]) else: self._df[name] = vals return self.get_table() def get_ids(self, variable_names, variable_lists): """return ids of a list of unique computations """ df = self.get_table() df['Index'] = df.index df.set_index(variable_names, inplace=True) df.sortlevel(inplace=True) ids = [] for variable_lst in variable_lists: df1 = df.copy() try: for v in variable_lst: df1 = df1.loc[v] except KeyError: raise ValueError( 'could not find variable set; {}'.format(variable_lst)) i = df1.Index if hasattr(i, 'values'): raise ValueError( 'variable set is not unique; {}'.format(variable_lst)) ids.append(int(i)) return ids def get_molecule(self, row): """ get molecule object coresponding to particular row """ return copy.deepcopy(self._df.Molecule.loc[row]) ## TODO will active work? def yield_mol_images(self, rows=[], filters={}, mtype='optimised', sort_columns=[], align_to=[], rotations=[[0., 0., 0.]], gbonds=True, represent='ball_stick', zoom=1., width=300, height=300, axis_length=0, background='white', relative=False, minval=-1, maxval=1, highlight=[], active=False, sopt_min_energy=20., sopt_cutoff_energy=0., atom_groups=[], alpha=0.5, transparent=False, hbondwidth=5, eunits='kJmol-1', no_hbonds=False, ipyimg=True): """yields molecules Parameters ---------- mtype : 'initial', 'optimised', 'nbo', 'highlight', 'highlight-initial', 'sopt' or 'hbond' info_columns : list of str columns to use as info in caption max_cols : int maximum columns in plot label_size : int subplot label size (pts) start_letter : str starting (capital) letter for labelling subplots save_fname : str name of file, if you wish to save the plot to file rows : int or list index for the row of each molecule to plot (all plotted if empty) filters : dict {columns:values} to filter by sort_columns : list of str columns to sort by align_to : [int, int, int] align geometries to the plane containing these atoms rotations : list of [float, float, float] for each rotation set [x,y,z] an image will be produced gbonds : bool guess bonds between atoms (via distance) represent : str representation of molecule ('none', 'wire', 'vdw' or 'ball_stick') zoom : float zoom level of images width : int width of original images height : int height of original images (although width takes precedent) axis_length : float length of x,y,z axes in negative and positive directions background : matplotlib.colors background color relative : bool coloring of nbo atoms scaled to min/max values in atom set (for nbo mtype) minval : float coloring of nbo atoms scaled to absolute min (for nbo mtype) maxval : float coloring of nbo atoms scaled to absolute max (for nbo mtype) highlight : list of lists atom indxes to highlight (for highlight mtype) eunits : str the units of energy to return (for sopt/hbond mtype) sopt_min_energy : float minimum energy to show (for sopt/hbond mtype) sopt_cutoff_energy : float energy below which bonds will be dashed (for sopt mtype) alpha : float alpha color value of geometry (for highlight/sopt/hbond mtypes) transparent : bool whether atoms should be transparent (for highlight/sopt/hbond mtypes) hbondwidth : float width of lines depicting interaction (for hbond mtypes) atom_groups : [list or str, list or str] restrict interactions to between two lists (or identifiers) of atom indexes (for sopt/hbond mtypes) no_hbonds : bool whether to ignore H-Bonds in the calculation ipyimg : bool whether to return an IPython image, PIL image otherwise Yields ------- indx : int the row index of the molecule mol : IPython.display.Image or PIL.Image an image of the molecule in the format specified by ipyimg """ df = self.get_table(columns=['Molecule']+sort_columns, rows=rows, filters=filters, mol=True) if sort_columns: df.sort(sort_columns, inplace=True) show_kwargs = {'gbonds':gbonds, 'represent':represent, 'rotations':rotations, 'zoom':zoom, 'width':width, 'height':height, 'background':background, 'axis_length':axis_length, 'ipyimg':ipyimg} for indx, mol in zip(df.index, df.Molecule): if align_to: align_atoms = mol.get_atom_group(align_to) mol.set_alignment_atoms(*align_atoms) if mtype == 'initial': yield indx, mol.show_initial(**show_kwargs) elif mtype == 'optimised': yield indx, mol.show_optimisation(**show_kwargs) elif mtype == 'nbo': yield indx, mol.show_nbo_charges(relative=relative, minval=minval, maxval=maxval, **show_kwargs) elif mtype == 'highlight': yield indx, mol.show_highlight_atoms(highlight, alpha=alpha, optimised=True, transparent=transparent, **show_kwargs) elif mtype == 'highlight-initial': yield indx, mol.show_highlight_atoms(highlight, alpha=alpha, optimised=False, transparent=transparent, **show_kwargs) elif mtype == 'sopt': yield indx, mol.show_sopt_bonds(min_energy=sopt_min_energy, cutoff_energy=sopt_cutoff_energy, no_hbonds=no_hbonds, eunits=eunits, atom_groups=atom_groups, alpha=alpha, transparent=transparent, relative=relative, minval=minval, maxval=maxval, **show_kwargs) elif mtype == 'hbond': yield indx, mol.show_hbond_analysis(min_energy=sopt_min_energy, cutoff_energy=sopt_cutoff_energy, eunits=eunits, atom_groups=atom_groups, bondwidth=hbondwidth, alpha=alpha, transparent=transparent, relative=relative, minval=minval, maxval=maxval, **show_kwargs) else: raise ValueError( 'mtype must be initial, optimised, nbo, highlight, highligh-initial, sopt or hbond') def _get_letter(self, number): """get an uppercase letter according to a number""" if number < 26: return string.ascii_uppercase[number] else: first_letter = string.ascii_uppercase[int(number/26)-1] second_letter = string.ascii_uppercase[number % 26] return first_letter + second_letter def plot_mol_images(self, mtype='optimised', max_cols=1, padding=(1, 1), sort_columns=[], info_columns=[], info_incl_id=False, label_size=20, letter_prefix='', start_letter='A', rows=[], filters={}, align_to=[], rotations=[[0., 0., 0.]], gbonds=True, represent='ball_stick', zoom=1., width=500, height=500, axis_length=0, background='white', relative=False, minval=-1, maxval=1, highlight=[], frame_on=False, eunits='kJmol-1', sopt_min_energy=20., sopt_cutoff_energy=0., atom_groups=[], alpha=0.5, transparent=False, hbondwidth=5, no_hbonds=False): """show molecules in matplotlib table of axes Parameters ---------- mtype : 'initial', 'optimised', 'nbo', 'highlight', 'highlight-initial', 'sopt' or 'hbond' max_cols : int maximum columns in plot padding: tuple padding between images (horizontally, vertically) sort_columns : list of str columns to sort by info_columns : list of str columns to use as info in caption info_incl_id : bool include molecule id number in caption label_size : int subplot label size (pts) letter_prefix : str prefix for labelling subplots start_letter : str starting (capital) letter for labelling subplots rows : int or list index for the row of each molecule to plot (all plotted if empty) filters : dict {columns:values} to filter by align_to : [int, int, int] align geometries to the plane containing these atoms rotations : list of [float, float, float] for each rotation set [x,y,z] an image will be produced gbonds : bool guess bonds between atoms (via distance) represent : str representation of molecule ('none', 'wire', 'vdw' or 'ball_stick') zoom : float zoom level of images width : int width of original images height : int height of original images (although width takes precedent) axis_length : float length of x,y,z axes in negative and positive directions background : matplotlib.colors background color relative : bool coloring of nbo atoms scaled to min/max values in atom set (for nbo mtype) minval : float coloring of nbo atoms scaled to absolute min (for nbo mtype) maxval : float coloring of nbo atoms scaled to absolute max (for nbo mtype) highlight : list of lists atom indxes to highlight (for highlight mtype) eunits : str the units of energy to return (for sopt/hbond mtype) sopt_min_energy : float minimum energy to show (for sopt/hbond mtype) sopt_cutoff_energy : float energy below which bonds will be dashed (for sopt mtype) alpha : float alpha color value of geometry (for sopt/hbond mtypes) transparent : bool whether atoms should be transparent (for sopt/hbond mtypes) hbondwidth : float width of lines depicting interaction (for hbond mtypes) atom_groups : [list or str, list or str] restrict interactions to between two lists (or identifiers) of atom indexes (for sopt/hbond mtypes) no_hbonds : bool whether to ignore H-Bonds in the calculation (for sopt only) frame_on : bool whether to show frame around each image Returns ------- fig : matplotlib.figure.Figure A figure containing subplots for each molecule image caption : str A caption describing each subplot, given info_columns """ letter_offset = string.ascii_uppercase.find(start_letter) if letter_offset == -1: raise ValueError('start_letter must be an uppercase single letter') df = self.get_table(rows=rows, columns=info_columns, filters=filters) num_mols = len(df) imgs = self.yield_mol_images(rows=rows, filters=filters, mtype=mtype, align_to=align_to, gbonds=gbonds, represent=represent, sort_columns=sort_columns, rotations=rotations, zoom=zoom, width=width, height=height, axis_length=axis_length, relative=relative, minval=minval, maxval=maxval, highlight=highlight, active=False, ipyimg=False, eunits=eunits, sopt_min_energy=sopt_min_energy, sopt_cutoff_energy=sopt_cutoff_energy, atom_groups=atom_groups, alpha=alpha, transparent=transparent, background=background, hbondwidth=hbondwidth, no_hbonds=no_hbonds) #num_rows = int(math.ceil(num_mols/float(max_cols))) #num_cols = min([max_cols, num_mols]) num_cols=int(max_cols) num_rows=int(math.ceil(num_mols/float(num_cols))) fig, axes = plt.subplots(num_rows, num_cols, squeeze=False, gridspec_kw={'width_ratios':[1]*num_cols}) fig.set_facecolor(background) r,g,b = ColorConverter().to_rgb(background) if ( .241*(255*r)**2 + .691*(255*g)**2 + .068*(255*b)**2 )**0.5 < 130.: label_color = 'white' else: label_color = 'black' for ax in fig.get_axes(): ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.set_anchor('NW') ax.set_frame_on(False) mol_num = 0 caption = [] for indx, img in imgs: ax = axes[int(math.ceil((mol_num+1)/float(max_cols)))-1, mol_num % max_cols] ax.imshow(img)#, aspect='equal') ax.set_frame_on(frame_on) if label_size: ax.text(0,0.8,letter_prefix+self._get_letter(mol_num+letter_offset), size=label_size, weight="bold", color=label_color) info = ', '.join(df[info_columns].loc[indx].fillna('-').astype(str)) if info_incl_id: info = str(indx) + ', ' + info caption.append( '(' + letter_prefix+self._get_letter(mol_num+letter_offset) + ') ' + info) mol_num += 1 #resize extra axes to be same as last img while mol_num < num_rows*num_cols: ax = axes[int(math.ceil((mol_num+1)/float(max_cols)))-1, mol_num % max_cols] ax.imshow(img) ax.clear() mol_num += 1 fig.tight_layout(w_pad=padding[0], h_pad=padding[1]) caption = ', '.join(caption) #insert newline character every 80 charaters #caption = re.sub("(.{80})", "\\1\n", caption, 0, re.DOTALL) return fig, caption def plot_mol_graphs(self, gtype='energy', share_plot=False, max_cols=1, padding=(1,1), tick_rotation=0, rows=[], filters={}, sort_columns=[], info_columns=[], info_incl_id=False, letter_prefix='', start_letter='A', grid=True, sharex=True, sharey=True, legend_size=10, color_scheme='jet', eunits='eV', per_energy=1., lbound=None, ubound=None, color_homo='g', color_lumo='r', homo_lumo_lines=True,homo_lumo_values=True,band_gap_value=True): """get a set of data plots for each molecule Parameters ---------- gtype : str the type of plot, energy = optimisation energies, freq = frequency analsis, dos = Densty of States, share_plot : bool whether to plot all data on the same or separate axes max_cols : int maximum columns on plots (share_plot=False only) padding: tuple padding between images (horizontally, vertically) tick_rotation : int rotation of x-axis labels rows : int or list index for the row of each molecule to plot (all plotted if empty) filters : dict {columns:values} to filter by sort_columns : list of str columns to sort by info_columns : list of str columns to use as info in caption info_incl_id : bool include molecule id number in labels letter_prefix : str prefix for labelling subplots (share_plot=False only) start_letter : str starting (capital) letter for labelling subplots (share_plot=False only) grid : bool whether to include a grid in the axes sharex : bool whether to align x-axes (share_plot=False only) sharey : bool whether to align y-axes (share_plot=False only) legend_size : int the font size (in pts) for the legend color_scheme : str the scheme to use for each molecule (share_plot=True only) according to http://matplotlib.org/examples/color/colormaps_reference.html eunits : str the units of energy to use per_energy : float energy interval to group states by (DoS only) lbound : float lower bound energy (DoS only) ubound: float upper bound energy (DoS only) color_homo : matplotlib.colors color of homo in matplotlib format color_lumo : matplotlib.colors color of lumo in matplotlib.colors homo_lumo_lines : bool draw lines at HOMO and LUMO energies homo_lumo_values : bool annotate HOMO and LUMO lines with exact energy values band_gap_value : bool annotate inbetween HOMO and LUMO lines with band gap value Returns ------- data : matplotlib.figure.Figure plotted frequency data caption : str A caption describing each subplot, given info_columns """ df = self.get_table(columns=list(set(info_columns+sort_columns)), rows=rows, filters=filters, mol=True) num_plots = df.index.shape[0] if sort_columns: df.sort(sort_columns, inplace=True) if gtype == 'energy': mol_func = 'plot_opt_energy' x_label = 'Optimisation Step' y_label = 'Energy ({0})'.format(eunits) all_plot_kwargs = {'units':eunits} per_plot_kwargs = {'linecolor':getattr(cm,color_scheme)( np.linspace(0.1, 0.9, num_plots))} elif gtype == 'freq': mol_func = 'plot_freq_analysis' x_label = 'Frequency ($cm^{-1}$)' y_label = 'IR Intensity ($km/mol$)' all_plot_kwargs = {} per_plot_kwargs = {'color':getattr(cm,color_scheme)( np.linspace(0, 1, num_plots)), 'alpha':np.linspace(1, 0.5, num_plots), 'marker_size':np.linspace(25, 15, num_plots)} elif gtype == 'dos': if share_plot: raise ValueError('share_plots not available for Density of States') mol_func = 'plot_dos' x_label = 'Density of States (per {0} {1})'.format(per_energy, eunits) y_label = 'Energy ({})'.format(eunits) all_plot_kwargs = {'eunits':eunits, 'per_energy':per_energy, 'lbound':lbound, 'ubound':ubound, 'color_homo':color_homo, 'color_lumo':color_lumo, 'homo_lumo_lines':homo_lumo_lines, 'homo_lumo_values':homo_lumo_values, 'band_gap_value':band_gap_value, 'legend_size':legend_size} else: raise ValueError('gtype; {0}, not available'.format(gtype)) ax_num = 0 caption = [] if share_plot: fig, ax = plt.subplots() legend = [] for indx, row in df.iterrows(): plot_kwargs = all_plot_kwargs.copy() for k, v in per_plot_kwargs.iteritems(): plot_kwargs[k] = v[ax_num] getattr(row.Molecule, mol_func)(ax=ax, **plot_kwargs) label = ', '.join(row[info_columns].fillna('-').astype(str)) if info_incl_id: label = str(indx) + ', ' + label legend.append(label) ax_num += 1 ax.grid(grid) for tick in ax.get_xticklabels(): tick.set_rotation(tick_rotation) ax.legend(legend, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., prop={'size':legend_size}) else: num_rows = int(math.ceil(num_plots/float(max_cols))) num_cols = min([max_cols, num_plots]) fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False, sharex=sharex, sharey=sharey) letter_offset = string.ascii_uppercase.find(start_letter) if letter_offset == -1: raise ValueError('start_letter must be an uppercase single letter') for indx, row in df.iterrows(): i = int(math.ceil((ax_num+1)/float(max_cols)))-1 j = ax_num % max_cols getattr(row.Molecule, mol_func)(ax=axes[i,j], **all_plot_kwargs) axes[i,j].grid(grid) for tick in axes[i,j].get_xticklabels(): tick.set_rotation(tick_rotation) info = ', '.join(row[info_columns].fillna('-').astype(str)) if info_incl_id: info = str(indx) + ', ' + info letter = self._get_letter(ax_num+letter_offset) axes[i,j].set_title(letter_prefix+letter, fontweight="bold") caption.append('(' + letter_prefix+letter + ') ' + info) ax_num += 1 #hide extraneous axes for extra_ax in range(ax_num, num_rows*num_cols): i = int(math.ceil((extra_ax+1)/float(max_cols)))-1 j = extra_ax % max_cols axes[i,j].axis('off') ax = fig.add_subplot(111) # The big subplot ax.tick_params(top='off', bottom='off', left='off', right='off', labelbottom='on', labelleft='on', pad=25) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_frame_on(False) ax.set_xlabel(x_label) ax.set_ylabel(y_label) fig.tight_layout(w_pad=padding[0], h_pad=padding[1]) caption = ', '.join(caption) return fig, caption def plot_radviz_comparison(self, category_column, columns=[], rows=[], filters={}, point_size=30, **kwargs): """return plot axis of radviz graph RadViz is a way of visualizing multi-variate data. It is based on a simple spring tension minimization algorithm. Basically you set up a bunch of points in a plane. In our case they are equally spaced on a unit circle. Each point represents a single attribute. You then pretend that each sample in the data set is attached to each of these points by a spring, the stiffness of which is proportional to the numerical value of that attribute (they are normalized to unit interval). The point in the plane, where our sample settles to (where the forces acting on our sample are at an equilibrium) is where a dot representing our sample will be drawn. Depending on which class that sample belongs it will be colored differently. """ col_names = self._df.drop('Molecule', axis=1).columns.tolist() if category_column not in col_names: raise ValueError('{0} not in columns'.format(category_column)) columns = columns[:] if columns and category_column not in columns: if all(isinstance(item, int) for item in columns): columns.append(col_names.index(category_column)) else: columns.append(category_column) df = self.get_table(rows, columns, filters) df = df.sort(category_column) f, ax = plt.subplots() ax = radviz(df, category_column, ax=ax, s=point_size, **kwargs) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.set_frame_on(False) ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) return ax def calc_kmean_groups(self, category_column, category_name, groups, columns=[], rows=[], filters={}): """calculate the kmeans grouping of rows The KMeans algorithm clusters data by trying to separate samples in n groups of equal variance, minimizing a criterion known as the inertia or within-cluster sum-of-squares. This algorithm requires the number of clusters to be specified. It scales well to large number of samples and has been used across a large range of application areas in many different fields. """ col_names = self._df.drop('Molecule', axis=1).columns.tolist() if category_column not in col_names: raise ValueError('{0} not in columns'.format(category_column)) filters[category_column] = category_name df = self.get_table(rows, columns, filters) k_means = KMeans(n_clusters=groups) k_means.fit(df) cats = k_means.predict(df) return pd.DataFrame({'Name':category_name, 'Category':cats}, index=df.index) if __name__ == '__main__': pass
gpl-3.0
ZENGXH/scikit-learn
sklearn/utils/tests/test_random.py
230
7344
from __future__ import division import numpy as np import scipy.sparse as sp from scipy.misc import comb as combinations from numpy.testing import assert_array_almost_equal from sklearn.utils.random import sample_without_replacement from sklearn.utils.random import random_choice_csc from sklearn.utils.testing import ( assert_raises, assert_equal, assert_true) ############################################################################### # test custom sampling without replacement algorithm ############################################################################### def test_invalid_sample_without_replacement_algorithm(): assert_raises(ValueError, sample_without_replacement, 5, 4, "unknown") def test_sample_without_replacement_algorithms(): methods = ("auto", "tracking_selection", "reservoir_sampling", "pool") for m in methods: def sample_without_replacement_method(n_population, n_samples, random_state=None): return sample_without_replacement(n_population, n_samples, method=m, random_state=random_state) check_edge_case_of_sample_int(sample_without_replacement_method) check_sample_int(sample_without_replacement_method) check_sample_int_distribution(sample_without_replacement_method) def check_edge_case_of_sample_int(sample_without_replacement): # n_poluation < n_sample assert_raises(ValueError, sample_without_replacement, 0, 1) assert_raises(ValueError, sample_without_replacement, 1, 2) # n_population == n_samples assert_equal(sample_without_replacement(0, 0).shape, (0, )) assert_equal(sample_without_replacement(1, 1).shape, (1, )) # n_population >= n_samples assert_equal(sample_without_replacement(5, 0).shape, (0, )) assert_equal(sample_without_replacement(5, 1).shape, (1, )) # n_population < 0 or n_samples < 0 assert_raises(ValueError, sample_without_replacement, -1, 5) assert_raises(ValueError, sample_without_replacement, 5, -1) def check_sample_int(sample_without_replacement): # This test is heavily inspired from test_random.py of python-core. # # For the entire allowable range of 0 <= k <= N, validate that # the sample is of the correct length and contains only unique items n_population = 100 for n_samples in range(n_population + 1): s = sample_without_replacement(n_population, n_samples) assert_equal(len(s), n_samples) unique = np.unique(s) assert_equal(np.size(unique), n_samples) assert_true(np.all(unique < n_population)) # test edge case n_population == n_samples == 0 assert_equal(np.size(sample_without_replacement(0, 0)), 0) def check_sample_int_distribution(sample_without_replacement): # This test is heavily inspired from test_random.py of python-core. # # For the entire allowable range of 0 <= k <= N, validate that # sample generates all possible permutations n_population = 10 # a large number of trials prevents false negatives without slowing normal # case n_trials = 10000 for n_samples in range(n_population): # Counting the number of combinations is not as good as counting the # the number of permutations. However, it works with sampling algorithm # that does not provide a random permutation of the subset of integer. n_expected = combinations(n_population, n_samples, exact=True) output = {} for i in range(n_trials): output[frozenset(sample_without_replacement(n_population, n_samples))] = None if len(output) == n_expected: break else: raise AssertionError( "number of combinations != number of expected (%s != %s)" % (len(output), n_expected)) def test_random_choice_csc(n_samples=10000, random_state=24): # Explicit class probabilities classes = [np.array([0, 1]), np.array([0, 1, 2])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] got = random_choice_csc(n_samples, classes, class_probabilites, random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) assert_array_almost_equal(class_probabilites[k], p, decimal=1) # Implicit class probabilities classes = [[0, 1], [1, 2]] # test for array-like support class_probabilites = [np.array([0.5, 0.5]), np.array([0, 1/2, 1/2])] got = random_choice_csc(n_samples=n_samples, classes=classes, random_state=random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / float(n_samples) assert_array_almost_equal(class_probabilites[k], p, decimal=1) # Edge case proabilites 1.0 and 0.0 classes = [np.array([0, 1]), np.array([0, 1, 2])] class_probabilites = [np.array([1.0, 0.0]), np.array([0.0, 1.0, 0.0])] got = random_choice_csc(n_samples, classes, class_probabilites, random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel(), minlength=len(class_probabilites[k])) / n_samples assert_array_almost_equal(class_probabilites[k], p, decimal=1) # One class target data classes = [[1], [0]] # test for array-like support class_probabilites = [np.array([0.0, 1.0]), np.array([1.0])] got = random_choice_csc(n_samples=n_samples, classes=classes, random_state=random_state) assert_true(sp.issparse(got)) for k in range(len(classes)): p = np.bincount(got.getcol(k).toarray().ravel()) / n_samples assert_array_almost_equal(class_probabilites[k], p, decimal=1) def test_random_choice_csc_errors(): # the length of an array in classes and class_probabilites is mismatched classes = [np.array([0, 1]), np.array([0, 1, 2, 3])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1) # the class dtype is not supported classes = [np.array(["a", "1"]), np.array(["z", "1", "2"])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1) # the class dtype is not supported classes = [np.array([4.2, 0.1]), np.array([0.1, 0.2, 9.4])] class_probabilites = [np.array([0.5, 0.5]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1) # Given proabilites don't sum to 1 classes = [np.array([0, 1]), np.array([0, 1, 2])] class_probabilites = [np.array([0.5, 0.6]), np.array([0.6, 0.1, 0.3])] assert_raises(ValueError, random_choice_csc, 4, classes, class_probabilites, 1)
bsd-3-clause
fahadsultan/CausalRelations
InterDocumentCausations.py
1
16188
from bs4 import BeautifulSoup import os import pandas as pd import sys import traceback from sklearn.feature_extraction.text import CountVectorizer class OutWriter: def __init__(self): self.HTML_ANNOTATION_COLORS = ['yellow', 'silver', 'lightblue', 'cyan', 'gray', 'orange', 'red', 'green','pink', 'brown'] self.OUT_PATH = "out/" self.HEADER = "<style>\ table, td, th {\ border: 1px solid white;\ }</style>" self.FOOTER = "" def _make_doc_name_link(self, name): name = name.split('/')[-1].replace(".","_") name = "<a href='"+name+".html'>"+name+"</a>" return name def index_page(self, df): docs = df[['doc_name', 'date']].drop_duplicates().sort(['doc_name'], ascending=False).reset_index(drop=True) docs['doc_name'] = docs['doc_name'].apply(self._make_doc_name_link) out_html = self.HEADER+docs.to_html(escape=False)+self.FOOTER with open(self.OUT_PATH+'index.html', 'w') as f: f.write(out_html) def save_html_output(self, doc_name, soup, causations_in_doc): tokens = soup.findAll('token') token_arr = [token.text for token in tokens] color_idx = 0 for idx, row in causations_in_doc.iterrows(): c_idx = int(row['c_token_id'])-1 e_idx = int(row['e_token_id'])-1 token_arr[c_idx] = "<span style='background:"+self.HTML_ANNOTATION_COLORS[color_idx]+"'>"+str(token_arr[c_idx])+"</span>" token_arr[e_idx] = "<span style='background:"+self.HTML_ANNOTATION_COLORS[color_idx]+"'>"+str(token_arr[e_idx])+"</span>" color_idx = color_idx + 1 text = ' '.join(token_arr) html_content = self.HEADER +"<p>"+text+"</p>" if len(causations_in_doc) > 0: html_content = html_content + "<h3>Causal Relations \ with events defined as verb or nouns (Mirza's work):</h3>" causations_in_doc['relation'] = "caused" html_content = html_content + \ causations_in_doc[['c_lemma', 'relation', 'e_lemma']].rename(columns={'c_lemma':'event 1', 'e_lemma':'event 2'}).to_html() html_content = html_content + "<h3>Cause and effect redefined as event,subject pairs</h3>" causations_in_doc['cause_with_subj'] = causations_in_doc.apply(lambda x: "( %s , %s )" %(x["cause"], x["c_subj"]), axis=1) causations_in_doc['cause (event,subj)'] = causations_in_doc.apply(lambda x: "( "+x["c_lemma"]+", "+x["c_subj"]+" )", axis=1) causations_in_doc['effect (event, subj)'] = causations_in_doc.apply(lambda x: "( "+x["e_lemma"]+", "+x["e_subj"]+" )", axis=1) html_content = html_content + causations_in_doc[['cause (event,subj)', 'relation', 'effect (event, subj)']].to_html() html_content = html_content+self.FOOTER doc_name = doc_name.split('/')[-1] doc_name = doc_name.replace(".", "_") f = open("out/"+doc_name+".html", "w") f.write(html_content) f.close() def write_linkages(self, linkages): for folder, subs, files in os.walk('out/'): for filename in files: if 'index' in filename: print 'index found' continue with open(os.path.join(folder, filename), 'a') as f: doc_name = "data/xml/"+filename.replace("_xml", ".xml") doc_name = doc_name.replace(".html", "") print doc_name f.write("<h3>Incoming edges (causes in this document found as effects in older documents)</h3>") doc_links = linkages[linkages['doc_name_x'] == doc_name] if len(doc_links) == 0: f.write("None found") else: doc_links['common_event'] = doc_links.apply(lambda x: "(%s, %s)" %(x['cause_x'], x['c_subj_x']), axis=1) doc_links['time_difference'] = doc_links.apply(lambda x: x['date_x'] - x['date_y'], axis=1) doc_links['doc_name_y'] = doc_links['doc_name_y'].apply(self._make_doc_name_link) f.write(doc_links[['doc_name_y', 'time_difference', 'common_event']].to_html(escape=False)) f.write("<h3>Outgoing edges (effects in this document found as causes in more recent documents)</h3>") doc_links = linkages[linkages['doc_name_y'] == doc_name] if len(doc_links) == 0: f.write("None found") else: doc_links['common_event'] = doc_links.apply(lambda x: "(%s, %s)" %(x['effect_y'], x['e_subj_y']), axis=1) doc_links['time_difference'] = doc_links.apply(lambda x: x['date_x'] - x['date_y'], axis=1) doc_links['doc_name_x'] = doc_links['doc_name_x'].apply(self._make_doc_name_link) f.write(doc_links[['doc_name_x', 'time_difference', 'common_event']].to_html(escape=False)) class Parser: def __init__(self): COLUMN_NAMES = ['cause', 'c_lemma','cause_pos', 'c_token_id', 'c_subj', 'c_subj_token_id', 'c_pos_deps', 'effect', 'e_lemma', 'effect_pos', 'e_token_id', 'e_subj','e_subj_token_id', 'e_pos_deps', 'date', 'doc_name'] self.causal_df = pd.DataFrame(columns=COLUMN_NAMES) self.outWriter = OutWriter() def recursive_search(self, df, path, to_find_token, to_find_index, to_find_sentence, governor_token, governor_index, governor_sentence): dependencies = df[(self.deps['governor'] == governor_token) & (self.deps['governor_idx'] == int(governor_index)) & (self.deps['sentence'] == int(governor_sentence))] for i in range(len(dependencies)): dependency = dependencies.iloc[i] #Weird idiosynracy I came across where the governor and the dependent #were the same token if ((dependency['governor'] == dependency['dependent']) and (dependency['dependent_idx'] == dependency['governor_idx'])): continue #check break condition if (dependency['dependent'] == to_find_token and dependency['dependent_idx'] == to_find_index and dependency['sentence'] == to_find_sentence): path = path+' '+dependency['relation'] break else: path_to_pass = path+' '+dependency['relation'] path_returned = self.recursive_search( df, path_to_pass, to_find_token, to_find_index, to_find_sentence, dependency['dependent'], dependency['dependent_idx'], dependency['sentence']) if path_returned != path_to_pass: path = path_returned break return path def get_dependency_path(self, filename, e1_token, e1_token_id, e1_sentence, e2_token, e2_token_id, e2_sentence): #Since intersentential paths are allowed, the next sentence is #also included df = self.deps[(self.deps['filename'] == filename) & ((self.deps['sentence'] == int(e1_sentence)) | (self.deps['sentence'] == int(e1_sentence)+1))] path = self.recursive_search(df, '', e2_token, e2_token_id, e2_sentence, e1_token, e1_token_id, e1_sentence) if path is not '': return path else: #Try finding path from e2 to e1 return self.recursive_search(df, '', e1_token, e1_token_id, int(e1_sentence), e2_token, e2_token_id, int(e2_sentence)) def parseFile(self, filename): f = open(filename) soup = BeautifulSoup(f.read()) clinks = soup.findAll('clink') date_tag = soup.find(lambda x: (x.name == 'timex3') and (x.attrs['functionindocument'] == 'CREATION_TIME')) date_val = date_tag.attrs['value'] ## Dependencies ## file_prefix = (filename.split('/')[-1]).split(".xml")[0] deps = pd.read_csv('data/deps/'+file_prefix+".deps", sep='\t', header=-1, names=['idx', 'token', 'lemma', 'pos', 'ner', 'head_idx', 'relation']) deps['token_id'] = deps.index + 1 deps['sentence_no'] = (deps['idx'] == 1).cumsum() deps['pos_trimmed'] = deps['pos'].apply(lambda x: x[:2]) for i, clink in enumerate(clinks): cause_event_id = clink.find('source').attrs['id'] effect_event_id = clink.find('target').attrs['id'] cause_event_tag = soup.find(lambda x: (x.name == 'event') and (x.attrs['id'] == cause_event_id)) cause_pos = cause_event_tag.attrs['pos'] effect_event_tag = soup.find(lambda x: (x.name == 'event') and (x.attrs['id'] == effect_event_id)) effect_pos = effect_event_tag.attrs['pos'] cause_token_id = cause_event_tag.find('token_anchor').attrs['id'] effect_token_id = effect_event_tag.find('token_anchor').attrs['id'] cause_token_tag = soup.find(lambda x: (x.name == 'token') and (x.attrs['id'] == str(cause_token_id))) cause_sentence = cause_token_tag.attrs['sentence'] cause_token = cause_token_tag.text all_cause_tokens = soup.findAll(lambda x: (x.text == cause_token) & (x.name == 'token')) c_tokens_arr = [c_token.attrs['id'] for c_token in all_cause_tokens] c_occurence = c_tokens_arr.index(cause_token_id) effect_token_tag = soup.find(lambda x: (x.name == 'token') and (x.attrs['id'] == str(effect_token_id))) effect_sentence = effect_token_tag.attrs['sentence'] effect_token = effect_token_tag.text all_effect_tokens = soup.findAll(lambda x: (x.text == effect_token) & (x.name == 'token')) e_tokens_arr = [e_token.attrs['id'] for e_token in all_effect_tokens] e_occurence = e_tokens_arr.index(effect_token_id) c_dep = deps[deps['token'] == cause_token].iloc[c_occurence] e_dep = deps[deps['token'] == effect_token].iloc[e_occurence] c_pos_from_deps = c_dep['pos'] e_pos_from_deps = e_dep['pos'] c_lemma = c_dep['lemma'] e_lemma = e_dep['lemma'] print cause_token+" -> "+effect_token resp = self.get_cause_effect_subjects(deps, cause_token, c_occurence, cause_sentence, cause_token_id, effect_token, e_occurence, effect_sentence, effect_token_id) self.causal_df.loc[len(self.causal_df)] = [cause_token, c_lemma, cause_pos, cause_token_id, resp['c_subj'], resp['c_subj_token_id'], c_pos_from_deps, effect_token, e_lemma, effect_pos, effect_token_id, resp['e_subj'], resp['e_subj_token_id'], e_pos_from_deps, date_val, filename] self.outWriter.save_html_output(filename, soup, self.causal_df[self.causal_df['doc_name'] == filename]) f.close() def populate_causal_df(self): for folder, subs, files in os.walk('data/xml'): for filename in files: try: if ('.xml' in filename) and (filename[0] != '.'): print '\n'+'Parsing File: '+filename+'\n' if 'wsj' in filename: self.parseFile(os.path.join(folder, filename)) except Exception as e: traceback.print_exc() continue # break self.causal_df.to_csv('causal_df.csv') def get_cause_effect_subjects(self, deps, c_token, c_occurence, c_sentence, c_token_id, e_token, e_occurence, e_sentence, e_token_id): return_dict = {'e_subj':'', 'c_subj':'', 'e_subj_token_id':-1, 'c_subj_token_id':-1} # c_row = deps[deps['token_id'] == int(c_token_id)].iloc[0] # c_rows = deps[(deps['token'] == c_token) & (deps['sentence_no'] == (int(c_sentence)+1))] c_rows = deps[deps['token'] == c_token] if len(c_rows) > c_occurence: c_row = c_rows.iloc[c_occurence] c_deps = self.recursive_trace(deps[(deps['sentence_no'] == c_row['sentence_no'])], c_row['idx'], "pos_trimmed", "NN") # , "relation", "nsubj" if c_deps is not None: return_dict['c_subj'] = c_deps.iloc[0]['token'] return_dict['c_subj_token_id'] = c_deps.iloc[0]['token_id'] else: print 'No Causal Rows found' e_rows = deps[deps['token'] == e_token] if len(e_rows) > e_occurence: e_row = e_rows.iloc[e_occurence] e_deps = self.recursive_trace(deps[(deps['sentence_no'] == e_row['sentence_no'])], e_row['idx'],"pos_trimmed", "NN") # "relation", "nsubj" if e_deps is not None: return_dict['e_subj'] = e_deps.iloc[0]['token'] return_dict['e_subj_token_id'] = e_deps.iloc[0]['token_id'] else: print 'No Effect Rows found' return return_dict def visualize_dependency_tree(df, sentenceNumber): df = df[(df['sentence_no'] == sentenceNumber)] print '\nSentence:'+(' '.join(list(df['token'])))+'\n' root = df[df['relation'] == 'ROOT'].iloc[0] print root.token+' ('+root.pos+', '+root.relation+')\n' _recursive_parse(df, root.idx, 1) def _recursive_parse(df, head_idx, depth): step_str = ' ' deps = df[df['head_idx'] == head_idx] for i in range(len(deps)): dep = deps.iloc[i] print (step_str*depth)+dep.token+' ('+dep.pos+', '+dep.relation+')\n' _recursive_parse(df, dep.idx, depth+1) def trace_dependency_path(df, dep_path, start_col, start_val, end_col, end_val): recursive_trace(df[df[start_col] == start_val], dep_path, end_col, end_val) def recursive_trace(self, df, current_idx, end_col, end_val): deps_current_head = df[df['head_idx'] == current_idx] print len(deps_current_head) df_search = deps_current_head[deps_current_head[end_col] == end_val] if len(df_search) == 0: for idx, row in deps_current_head.iterrows(): print row['idx'] return self.recursive_trace( df, row['idx'], end_col, end_val) else: return df_search def identify_inter_doc_linkages(self): self.causal_df['date'] = pd.to_datetime(self.causal_df['date']) self.linkages = pd.merge(self.causal_df, self.causal_df, left_on=['c_lemma', 'c_subj'], right_on=['e_lemma', 'e_subj']) self.linkages = self.linkages[(self.linkages['date_x'] >= self.linkages['date_y'])] self.linkages[['cause_y', 'c_subj_y', 'effect_y', 'e_subj_y', 'date_y', 'doc_name_y', 'cause_x', 'c_subj_x', 'effect_x', 'e_subj_x', 'date_x', 'doc_name_x']] self.outWriter.write_linkages(self.linkages) return self.linkages def create_index_page(self): self.outWriter.index_page(self.causal_df) if __name__ == "__main__": parser = Parser() parser.populate_causal_df() chains = parser.identify_inter_doc_linkages() parser.create_index_page()
apache-2.0
tomsilver/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/dviread.py
69
29920
""" An experimental module for reading dvi files output by TeX. Several limitations make this not (currently) useful as a general-purpose dvi preprocessor. Interface:: dvi = Dvi(filename, 72) for page in dvi: # iterate over pages w, h, d = page.width, page.height, page.descent for x,y,font,glyph,width in page.text: fontname = font.texname pointsize = font.size ... for x,y,height,width in page.boxes: ... """ import errno import matplotlib import matplotlib.cbook as mpl_cbook import numpy as np import struct import subprocess _dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4) class Dvi(object): """ A dvi ("device-independent") file, as produced by TeX. The current implementation only reads the first page and does not even attempt to verify the postamble. """ def __init__(self, filename, dpi): """ Initialize the object. This takes the filename as input and opens the file; actually reading the file happens when iterating through the pages of the file. """ matplotlib.verbose.report('Dvi: ' + filename, 'debug') self.file = open(filename, 'rb') self.dpi = dpi self.fonts = {} self.state = _dvistate.pre def __iter__(self): """ Iterate through the pages of the file. Returns (text, pages) pairs, where: text is a list of (x, y, fontnum, glyphnum, width) tuples boxes is a list of (x, y, height, width) tuples The coordinates are transformed into a standard Cartesian coordinate system at the dpi value given when initializing. The coordinates are floating point numbers, but otherwise precision is not lost and coordinate values are not clipped to integers. """ while True: have_page = self._read() if have_page: yield self._output() else: break def close(self): """ Close the underlying file if it is open. """ if not self.file.closed: self.file.close() def _output(self): """ Output the text and boxes belonging to the most recent page. page = dvi._output() """ minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf maxy_pure = -np.inf for elt in self.text + self.boxes: if len(elt) == 4: # box x,y,h,w = elt e = 0 # zero depth else: # glyph x,y,font,g,w = elt h = _mul2012(font._scale, font._tfm.height[g]) e = _mul2012(font._scale, font._tfm.depth[g]) minx = min(minx, x) miny = min(miny, y - h) maxx = max(maxx, x + w) maxy = max(maxy, y + e) maxy_pure = max(maxy_pure, y) if self.dpi is None: # special case for ease of debugging: output raw dvi coordinates return mpl_cbook.Bunch(text=self.text, boxes=self.boxes, width=maxx-minx, height=maxy_pure-miny, descent=maxy-maxy_pure) d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units text = [ ((x-minx)*d, (maxy-y)*d, f, g, w*d) for (x,y,f,g,w) in self.text ] boxes = [ ((x-minx)*d, (maxy-y)*d, h*d, w*d) for (x,y,h,w) in self.boxes ] return mpl_cbook.Bunch(text=text, boxes=boxes, width=(maxx-minx)*d, height=(maxy_pure-miny)*d, descent=(maxy-maxy_pure)*d) def _read(self): """ Read one page from the file. Return True if successful, False if there were no more pages. """ while True: byte = ord(self.file.read(1)) self._dispatch(byte) # if self.state == _dvistate.inpage: # matplotlib.verbose.report( # 'Dvi._read: after %d at %f,%f' % # (byte, self.h, self.v), # 'debug-annoying') if byte == 140: # end of page return True if self.state == _dvistate.post_post: # end of file self.close() return False def _arg(self, nbytes, signed=False): """ Read and return an integer argument "nbytes" long. Signedness is determined by the "signed" keyword. """ str = self.file.read(nbytes) value = ord(str[0]) if signed and value >= 0x80: value = value - 0x100 for i in range(1, nbytes): value = 0x100*value + ord(str[i]) return value def _dispatch(self, byte): """ Based on the opcode "byte", read the correct kinds of arguments from the dvi file and call the method implementing that opcode with those arguments. """ if 0 <= byte <= 127: self._set_char(byte) elif byte == 128: self._set_char(self._arg(1)) elif byte == 129: self._set_char(self._arg(2)) elif byte == 130: self._set_char(self._arg(3)) elif byte == 131: self._set_char(self._arg(4, True)) elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True)) elif byte == 133: self._put_char(self._arg(1)) elif byte == 134: self._put_char(self._arg(2)) elif byte == 135: self._put_char(self._arg(3)) elif byte == 136: self._put_char(self._arg(4, True)) elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True)) elif byte == 138: self._nop() elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)]) elif byte == 140: self._eop() elif byte == 141: self._push() elif byte == 142: self._pop() elif byte == 143: self._right(self._arg(1, True)) elif byte == 144: self._right(self._arg(2, True)) elif byte == 145: self._right(self._arg(3, True)) elif byte == 146: self._right(self._arg(4, True)) elif byte == 147: self._right_w(None) elif byte == 148: self._right_w(self._arg(1, True)) elif byte == 149: self._right_w(self._arg(2, True)) elif byte == 150: self._right_w(self._arg(3, True)) elif byte == 151: self._right_w(self._arg(4, True)) elif byte == 152: self._right_x(None) elif byte == 153: self._right_x(self._arg(1, True)) elif byte == 154: self._right_x(self._arg(2, True)) elif byte == 155: self._right_x(self._arg(3, True)) elif byte == 156: self._right_x(self._arg(4, True)) elif byte == 157: self._down(self._arg(1, True)) elif byte == 158: self._down(self._arg(2, True)) elif byte == 159: self._down(self._arg(3, True)) elif byte == 160: self._down(self._arg(4, True)) elif byte == 161: self._down_y(None) elif byte == 162: self._down_y(self._arg(1, True)) elif byte == 163: self._down_y(self._arg(2, True)) elif byte == 164: self._down_y(self._arg(3, True)) elif byte == 165: self._down_y(self._arg(4, True)) elif byte == 166: self._down_z(None) elif byte == 167: self._down_z(self._arg(1, True)) elif byte == 168: self._down_z(self._arg(2, True)) elif byte == 169: self._down_z(self._arg(3, True)) elif byte == 170: self._down_z(self._arg(4, True)) elif 171 <= byte <= 234: self._fnt_num(byte-171) elif byte == 235: self._fnt_num(self._arg(1)) elif byte == 236: self._fnt_num(self._arg(2)) elif byte == 237: self._fnt_num(self._arg(3)) elif byte == 238: self._fnt_num(self._arg(4, True)) elif 239 <= byte <= 242: len = self._arg(byte-238) special = self.file.read(len) self._xxx(special) elif 243 <= byte <= 246: k = self._arg(byte-242, byte==246) c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ] n = self.file.read(a+l) self._fnt_def(k, c, s, d, a, l, n) elif byte == 247: i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ] x = self.file.read(k) self._pre(i, num, den, mag, x) elif byte == 248: self._post() elif byte == 249: self._post_post() else: raise ValueError, "unknown command: byte %d"%byte def _pre(self, i, num, den, mag, comment): if self.state != _dvistate.pre: raise ValueError, "pre command in middle of dvi file" if i != 2: raise ValueError, "Unknown dvi format %d"%i if num != 25400000 or den != 7227 * 2**16: raise ValueError, "nonstandard units in dvi file" # meaning: TeX always uses those exact values, so it # should be enough for us to support those # (There are 72.27 pt to an inch so 7227 pt = # 7227 * 2**16 sp to 100 in. The numerator is multiplied # by 10^5 to get units of 10**-7 meters.) if mag != 1000: raise ValueError, "nonstandard magnification in dvi file" # meaning: LaTeX seems to frown on setting \mag, so # I think we can assume this is constant self.state = _dvistate.outer def _set_char(self, char): if self.state != _dvistate.inpage: raise ValueError, "misplaced set_char in dvi file" self._put_char(char) self.h += self.fonts[self.f]._width_of(char) def _set_rule(self, a, b): if self.state != _dvistate.inpage: raise ValueError, "misplaced set_rule in dvi file" self._put_rule(a, b) self.h += b def _put_char(self, char): if self.state != _dvistate.inpage: raise ValueError, "misplaced put_char in dvi file" font = self.fonts[self.f] if font._vf is None: self.text.append((self.h, self.v, font, char, font._width_of(char))) # matplotlib.verbose.report( # 'Dvi._put_char: %d,%d %d' %(self.h, self.v, char), # 'debug-annoying') else: scale = font._scale for x, y, f, g, w in font._vf[char].text: newf = DviFont(scale=_mul2012(scale, f._scale), tfm=f._tfm, texname=f.texname, vf=f._vf) self.text.append((self.h + _mul2012(x, scale), self.v + _mul2012(y, scale), newf, g, newf._width_of(g))) self.boxes.extend([(self.h + _mul2012(x, scale), self.v + _mul2012(y, scale), _mul2012(a, scale), _mul2012(b, scale)) for x, y, a, b in font._vf[char].boxes]) def _put_rule(self, a, b): if self.state != _dvistate.inpage: raise ValueError, "misplaced put_rule in dvi file" if a > 0 and b > 0: self.boxes.append((self.h, self.v, a, b)) # matplotlib.verbose.report( # 'Dvi._put_rule: %d,%d %d,%d' % (self.h, self.v, a, b), # 'debug-annoying') def _nop(self): pass def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p): if self.state != _dvistate.outer: raise ValueError, \ "misplaced bop in dvi file (state %d)" % self.state self.state = _dvistate.inpage self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0 self.stack = [] self.text = [] # list of (x,y,fontnum,glyphnum) self.boxes = [] # list of (x,y,width,height) def _eop(self): if self.state != _dvistate.inpage: raise ValueError, "misplaced eop in dvi file" self.state = _dvistate.outer del self.h, self.v, self.w, self.x, self.y, self.z, self.stack def _push(self): if self.state != _dvistate.inpage: raise ValueError, "misplaced push in dvi file" self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z)) def _pop(self): if self.state != _dvistate.inpage: raise ValueError, "misplaced pop in dvi file" self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop() def _right(self, b): if self.state != _dvistate.inpage: raise ValueError, "misplaced right in dvi file" self.h += b def _right_w(self, new_w): if self.state != _dvistate.inpage: raise ValueError, "misplaced w in dvi file" if new_w is not None: self.w = new_w self.h += self.w def _right_x(self, new_x): if self.state != _dvistate.inpage: raise ValueError, "misplaced x in dvi file" if new_x is not None: self.x = new_x self.h += self.x def _down(self, a): if self.state != _dvistate.inpage: raise ValueError, "misplaced down in dvi file" self.v += a def _down_y(self, new_y): if self.state != _dvistate.inpage: raise ValueError, "misplaced y in dvi file" if new_y is not None: self.y = new_y self.v += self.y def _down_z(self, new_z): if self.state != _dvistate.inpage: raise ValueError, "misplaced z in dvi file" if new_z is not None: self.z = new_z self.v += self.z def _fnt_num(self, k): if self.state != _dvistate.inpage: raise ValueError, "misplaced fnt_num in dvi file" self.f = k def _xxx(self, special): matplotlib.verbose.report( 'Dvi._xxx: encountered special: %s' % ''.join([(32 <= ord(ch) < 127) and ch or '<%02x>' % ord(ch) for ch in special]), 'debug') def _fnt_def(self, k, c, s, d, a, l, n): tfm = _tfmfile(n[-l:]) if c != 0 and tfm.checksum != 0 and c != tfm.checksum: raise ValueError, 'tfm checksum mismatch: %s'%n # It seems that the assumption behind the following check is incorrect: #if d != tfm.design_size: # raise ValueError, 'tfm design size mismatch: %d in dvi, %d in %s'%\ # (d, tfm.design_size, n) vf = _vffile(n[-l:]) self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf) def _post(self): if self.state != _dvistate.outer: raise ValueError, "misplaced post in dvi file" self.state = _dvistate.post_post # TODO: actually read the postamble and finale? # currently post_post just triggers closing the file def _post_post(self): raise NotImplementedError class DviFont(object): """ Object that holds a font's texname and size, supports comparison, and knows the widths of glyphs in the same units as the AFM file. There are also internal attributes (for use by dviread.py) that are _not_ used for comparison. The size is in Adobe points (converted from TeX points). """ __slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm') def __init__(self, scale, tfm, texname, vf): self._scale, self._tfm, self.texname, self._vf = \ scale, tfm, texname, vf self.size = scale * (72.0 / (72.27 * 2**16)) try: nchars = max(tfm.width.iterkeys()) except ValueError: nchars = 0 self.widths = [ (1000*tfm.width.get(char, 0)) >> 20 for char in range(nchars) ] def __eq__(self, other): return self.__class__ == other.__class__ and \ self.texname == other.texname and self.size == other.size def __ne__(self, other): return not self.__eq__(other) def _width_of(self, char): """ Width of char in dvi units. For internal use by dviread.py. """ width = self._tfm.width.get(char, None) if width is not None: return _mul2012(width, self._scale) matplotlib.verbose.report( 'No width for char %d in font %s' % (char, self.texname), 'debug') return 0 class Vf(Dvi): """ A virtual font (\*.vf file) containing subroutines for dvi files. Usage:: vf = Vf(filename) glyph = vf[code] glyph.text, glyph.boxes, glyph.width """ def __init__(self, filename): Dvi.__init__(self, filename, 0) self._first_font = None self._chars = {} self._packet_ends = None self._read() self.close() def __getitem__(self, code): return self._chars[code] def _dispatch(self, byte): # If we are in a packet, execute the dvi instructions if self.state == _dvistate.inpage: byte_at = self.file.tell()-1 if byte_at == self._packet_ends: self._finalize_packet() # fall through elif byte_at > self._packet_ends: raise ValueError, "Packet length mismatch in vf file" else: if byte in (139, 140) or byte >= 243: raise ValueError, "Inappropriate opcode %d in vf file" % byte Dvi._dispatch(self, byte) return # We are outside a packet if byte < 242: # a short packet (length given by byte) cc, tfm = self._arg(1), self._arg(3) self._init_packet(byte, cc, tfm) elif byte == 242: # a long packet pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ] self._init_packet(pl, cc, tfm) elif 243 <= byte <= 246: Dvi._dispatch(self, byte) elif byte == 247: # preamble i, k = self._arg(1), self._arg(1) x = self.file.read(k) cs, ds = self._arg(4), self._arg(4) self._pre(i, x, cs, ds) elif byte == 248: # postamble (just some number of 248s) self.state = _dvistate.post_post else: raise ValueError, "unknown vf opcode %d" % byte def _init_packet(self, pl, cc, tfm): if self.state != _dvistate.outer: raise ValueError, "Misplaced packet in vf file" self.state = _dvistate.inpage self._packet_ends = self.file.tell() + pl self._packet_char = cc self._packet_width = tfm self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0 self.stack, self.text, self.boxes = [], [], [] self.f = self._first_font def _finalize_packet(self): self._chars[self._packet_char] = mpl_cbook.Bunch( text=self.text, boxes=self.boxes, width = self._packet_width) self.state = _dvistate.outer def _pre(self, i, x, cs, ds): if self.state != _dvistate.pre: raise ValueError, "pre command in middle of vf file" if i != 202: raise ValueError, "Unknown vf format %d" % i if len(x): matplotlib.verbose.report('vf file comment: ' + x, 'debug') self.state = _dvistate.outer # cs = checksum, ds = design size def _fnt_def(self, k, *args): Dvi._fnt_def(self, k, *args) if self._first_font is None: self._first_font = k def _fix2comp(num): """ Convert from two's complement to negative. """ assert 0 <= num < 2**32 if num & 2**31: return num - 2**32 else: return num def _mul2012(num1, num2): """ Multiply two numbers in 20.12 fixed point format. """ # Separated into a function because >> has surprising precedence return (num1*num2) >> 20 class Tfm(object): """ A TeX Font Metric file. This implementation covers only the bare minimum needed by the Dvi class. Attributes: checksum: for verifying against dvi file design_size: design size of the font (in what units?) width[i]: width of character \#i, needs to be scaled by the factor specified in the dvi file (this is a dict because indexing may not start from 0) height[i], depth[i]: height and depth of character \#i """ __slots__ = ('checksum', 'design_size', 'width', 'height', 'depth') def __init__(self, filename): matplotlib.verbose.report('opening tfm file ' + filename, 'debug') file = open(filename, 'rb') try: header1 = file.read(24) lh, bc, ec, nw, nh, nd = \ struct.unpack('!6H', header1[2:14]) matplotlib.verbose.report( 'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % ( lh, bc, ec, nw, nh, nd), 'debug') header2 = file.read(4*lh) self.checksum, self.design_size = \ struct.unpack('!2I', header2[:8]) # there is also encoding information etc. char_info = file.read(4*(ec-bc+1)) widths = file.read(4*nw) heights = file.read(4*nh) depths = file.read(4*nd) finally: file.close() self.width, self.height, self.depth = {}, {}, {} widths, heights, depths = \ [ struct.unpack('!%dI' % (len(x)/4), x) for x in (widths, heights, depths) ] for i in range(ec-bc): self.width[bc+i] = _fix2comp(widths[ord(char_info[4*i])]) self.height[bc+i] = _fix2comp(heights[ord(char_info[4*i+1]) >> 4]) self.depth[bc+i] = _fix2comp(depths[ord(char_info[4*i+1]) & 0xf]) class PsfontsMap(object): """ A psfonts.map formatted file, mapping TeX fonts to PS fonts. Usage: map = PsfontsMap('.../psfonts.map'); map['cmr10'] For historical reasons, TeX knows many Type-1 fonts by different names than the outside world. (For one thing, the names have to fit in eight characters.) Also, TeX's native fonts are not Type-1 but Metafont, which is nontrivial to convert to PostScript except as a bitmap. While high-quality conversions to Type-1 format exist and are shipped with modern TeX distributions, we need to know which Type-1 fonts are the counterparts of which native fonts. For these reasons a mapping is needed from internal font names to font file names. A texmf tree typically includes mapping files called e.g. psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm. psfonts.map might avoid embedding the 35 PostScript fonts, while the pdf-related files perhaps only avoid the "Base 14" pdf fonts. But the user may have configured these files differently. """ __slots__ = ('_font',) def __init__(self, filename): self._font = {} file = open(filename, 'rt') try: self._parse(file) finally: file.close() def __getitem__(self, texname): result = self._font[texname] fn, enc = result.filename, result.encoding if fn is not None and not fn.startswith('/'): result.filename = find_tex_file(fn) if enc is not None and not enc.startswith('/'): result.encoding = find_tex_file(result.encoding) return result def _parse(self, file): """Parse each line into words.""" for line in file: line = line.strip() if line == '' or line.startswith('%'): continue words, pos = [], 0 while pos < len(line): if line[pos] == '"': # double quoted word pos += 1 end = line.index('"', pos) words.append(line[pos:end]) pos = end + 1 else: # ordinary word end = line.find(' ', pos+1) if end == -1: end = len(line) words.append(line[pos:end]) pos = end while pos < len(line) and line[pos] == ' ': pos += 1 self._register(words) def _register(self, words): """Register a font described by "words". The format is, AFAIK: texname fontname [effects and filenames] Effects are PostScript snippets like ".177 SlantFont", filenames begin with one or two less-than signs. A filename ending in enc is an encoding file, other filenames are font files. This can be overridden with a left bracket: <[foobar indicates an encoding file named foobar. There is some difference between <foo.pfb and <<bar.pfb in subsetting, but I have no example of << in my TeX installation. """ texname, psname = words[:2] effects, encoding, filename = [], None, None for word in words[2:]: if not word.startswith('<'): effects.append(word) else: word = word.lstrip('<') if word.startswith('['): assert encoding is None encoding = word[1:] elif word.endswith('.enc'): assert encoding is None encoding = word else: assert filename is None filename = word self._font[texname] = mpl_cbook.Bunch( texname=texname, psname=psname, effects=effects, encoding=encoding, filename=filename) class Encoding(object): """ Parses a \*.enc file referenced from a psfonts.map style file. The format this class understands is a very limited subset of PostScript. Usage (subject to change):: for name in Encoding(filename): whatever(name) """ __slots__ = ('encoding',) def __init__(self, filename): file = open(filename, 'rt') try: matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying') self.encoding = self._parse(file) matplotlib.verbose.report('Result: ' + `self.encoding`, 'debug-annoying') finally: file.close() def __iter__(self): for name in self.encoding: yield name def _parse(self, file): result = [] state = 0 for line in file: comment_start = line.find('%') if comment_start > -1: line = line[:comment_start] line = line.strip() if state == 0: # Expecting something like /FooEncoding [ if '[' in line: state = 1 line = line[line.index('[')+1:].strip() if state == 1: if ']' in line: # ] def line = line[:line.index(']')] state = 2 words = line.split() for w in words: if w.startswith('/'): # Allow for /abc/def/ghi subwords = w.split('/') result.extend(subwords[1:]) else: raise ValueError, "Broken name in encoding file: " + w return result def find_tex_file(filename, format=None): """ Call kpsewhich to find a file in the texmf tree. If format is not None, it is used as the value for the --format option. See the kpathsea documentation for more information. Apparently most existing TeX distributions on Unix-like systems use kpathsea. I hear MikTeX (a popular distribution on Windows) doesn't use kpathsea, so what do we do? (TODO) """ cmd = ['kpsewhich'] if format is not None: cmd += ['--format=' + format] cmd += [filename] matplotlib.verbose.report('find_tex_file(%s): %s' \ % (filename,cmd), 'debug') pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE) result = pipe.communicate()[0].rstrip() matplotlib.verbose.report('find_tex_file result: %s' % result, 'debug') return result def _read_nointr(pipe, bufsize=-1): while True: try: return pipe.read(bufsize) except OSError, e: if e.errno == errno.EINTR: continue else: raise # With multiple text objects per figure (e.g. tick labels) we may end # up reading the same tfm and vf files many times, so we implement a # simple cache. TODO: is this worth making persistent? _tfmcache = {} _vfcache = {} def _fontfile(texname, class_, suffix, cache): try: return cache[texname] except KeyError: pass filename = find_tex_file(texname + suffix) if filename: result = class_(filename) else: result = None cache[texname] = result return result def _tfmfile(texname): return _fontfile(texname, Tfm, '.tfm', _tfmcache) def _vffile(texname): return _fontfile(texname, Vf, '.vf', _vfcache) if __name__ == '__main__': import sys matplotlib.verbose.set_level('debug-annoying') fname = sys.argv[1] try: dpi = float(sys.argv[2]) except IndexError: dpi = None dvi = Dvi(fname, dpi) fontmap = PsfontsMap(find_tex_file('pdftex.map')) for page in dvi: print '=== new page ===' fPrev = None for x,y,f,c,w in page.text: if f != fPrev: print 'font', f.texname, 'scaled', f._scale/pow(2.0,20) fPrev = f print x,y,c, 32 <= c < 128 and chr(c) or '.', w for x,y,w,h in page.boxes: print x,y,'BOX',w,h
gpl-3.0
geomf/omf-fork
omf/models/_cvrDynamic.py
1
28120
# Portions Copyright (C) 2015 Intel Corporation ''' Calculate CVR impacts using a targetted set of dynamic loadflows. ''' import json import sys import math import calendar import os import multiprocessing from os.path import join as pJoin from jinja2 import Template from matplotlib import pyplot as plt from datetime import datetime, timedelta import __metaModel__ from __metaModel__ import cancel, getStatus as getStatusMeta # OMF imports sys.path.append(__metaModel__._omfDir) from omf import feeder import omf.calibrate from omf.solvers import gridlabd from omf.common.plot import Plot template = None def renderTemplate(template, fs, modelDir="", absolutePaths=False, datastoreNames={}): # Our HTML template for the interface: with fs.open("models/_cvrDynamic.html") as tempFile: template = Template(tempFile.read()) return __metaModel__.renderTemplate(template, modelDir, absolutePaths, datastoreNames) def getStatus(modelDir, fs): return getStatusMeta(modelDir, fs) def returnMag(complexStr): ''' real and imaginary parts of a complex number and returns magnitude handles string if the string starts with a '+' or a '-' handles negative or positive, real and imaginary parts''' if complexStr[0] == '+' or complexStr[0] == '-': complexStr1 = complexStr[1:len(complexStr) + 1] sign = complexStr[0] + '1' else: complexStr1 = complexStr sign = 1 if complexStr1.find('+') > 0: real = float(complexStr1[0:complexStr1.find('+')]) * float(sign) imag = float(complexStr1[complexStr1.find('+') + 1:-1]) else: if complexStr1.find('-') > 0: real = float(complexStr1[0:complexStr1.find('-')]) * float(sign) imag = float(complexStr1[complexStr1.find('-') + 1:-1]) * -1 else: if complexStr1.find('j') > 0: real = 0.0 imag = float( complexStr1[0:complexStr1.find('j')]) * float(sign) else: real = float(complexStr1) * float(sign) imag = 0.0 return (math.sqrt(real**2 + imag**2)) / 60.0 def run(modelDir, inData, fs): ''' Run the model in a separate process. web.py calls this to run the model. This function will return fast, but results take a while to hit the file system.''' if not fs.exists(modelDir): fs.create_dir(modelDir) inData["created"] = str(datetime.now()) fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inData, indent=4)) # If we are re-running, remove output: try: os.remove(pJoin(modelDir, "allOutputData.json")) except: pass # Start the computation. backProc = multiprocessing.Process( target=runForeground, args=(modelDir, inData, fs)) backProc.start() print "SENT TO BACKGROUND", modelDir with open(pJoin(modelDir, "PPID.txt"), "w") as pPidFile: pPidFile.write(str(backProc.pid)) def runForeground(modelDir, inData, fs): '''This reads a glm file, changes the method of powerflow and reruns''' try: startTime = datetime.now() # calibrate and run cvrdynamic feederPath = pJoin("data", "Feeder", inData[ "feederName"].split("___")[0], inData["feederName"].split("___")[1] + '.json') fs.export_from_fs_to_local(feederPath, feederPath) scadaPath = pJoin("uploads", (inData["scadaFile"] + '.tsv')) fs.export_from_fs_to_local(scadaPath, scadaPath) omf.calibrate.omfCalibrate(modelDir, feederPath, scadaPath) allOutput = {} print "here" with open(pJoin(modelDir, "calibratedFeeder.json"), "r") as jsonIn: feederJson = json.load(jsonIn) localTree = feederJson.get("tree", {}) for key in localTree: if "solver_method" in localTree[key].keys(): print "current solver method", localTree[key]["solver_method"] localTree[key]["solver_method"] = 'FBS' # find the swing bus and recorder attached to substation for key in localTree: if localTree[key].get('bustype', '').lower() == 'swing': swingIndex = key swingName = localTree[key].get('name') if localTree[key].get('object', '') == 'regulator' and localTree[key].get('from', '') == swingName: regIndex = key regConfName = localTree[key]['configuration'] # find the regulator and capacitor names and combine to form a string # for volt-var control object regKeys = [] accum_reg = "" for key in localTree: if localTree[key].get("object", "") == "regulator": accum_reg += localTree[key].get("name", "ERROR") + "," regKeys.append(key) regstr = accum_reg[:-1] print regKeys capKeys = [] accum_cap = "" for key in localTree: if localTree[key].get("object", "") == "capacitor": accum_cap += localTree[key].get("name", "ERROR") + "," capKeys.append(key) if localTree[key].get("control", "").lower() == "manual": localTree[key]['control'] = "VOLT" print "changing capacitor control from manual to volt" capstr = accum_cap[:-1] print capKeys # Attach recorders relevant to CVR. recorders = [ {'object': 'collector', 'file': 'ZlossesTransformer.csv', 'group': 'class=transformer', 'limit': '0', 'property': 'sum(power_losses_A.real),sum(power_losses_A.imag),sum(power_losses_B.real),sum(power_losses_B.imag),sum(power_losses_C.real),sum(power_losses_C.imag)'}, {'object': 'collector', 'file': 'ZlossesUnderground.csv', 'group': 'class=underground_line', 'limit': '0', 'property': 'sum(power_losses_A.real),sum(power_losses_A.imag),sum(power_losses_B.real),sum(power_losses_B.imag),sum(power_losses_C.real),sum(power_losses_C.imag)'}, {'object': 'collector', 'file': 'ZlossesOverhead.csv', 'group': 'class=overhead_line', 'limit': '0', 'property': 'sum(power_losses_A.real),sum(power_losses_A.imag),sum(power_losses_B.real),sum(power_losses_B.imag),sum(power_losses_C.real),sum(power_losses_C.imag)'}, {'object': 'recorder', 'file': 'Zregulator.csv', 'limit': '0', 'parent': localTree[regIndex]['name'], 'property': 'tap_A,tap_B,tap_C,power_in.real,power_in.imag'}, {'object': 'collector', 'file': 'ZvoltageJiggle.csv', 'group': 'class=triplex_meter', 'limit': '0', 'property': 'min(voltage_12.mag),mean(voltage_12.mag),max(voltage_12.mag),std(voltage_12.mag)'}, {'object': 'recorder', 'file': 'ZsubstationTop.csv', 'limit': '0', 'parent': localTree[swingIndex]['name'], 'property': 'voltage_A,voltage_B,voltage_C'}, {'object': 'recorder', 'file': 'ZsubstationBottom.csv', 'limit': '0', 'parent': localTree[regIndex]['to'], 'property': 'voltage_A,voltage_B,voltage_C'}] # recorder object for capacitor switching - if capacitors exist if capKeys != []: for key in capKeys: recorders.append({'object': 'recorder', 'file': 'ZcapSwitch' + str(key) + '.csv', 'limit': '0', 'parent': localTree[key]['name'], 'property': 'switchA,switchB,switchC'}) # attach recorder process biggest = 1 + max([int(k) for k in localTree.keys()]) for index, rec in enumerate(recorders): localTree[biggest + index] = rec # run a reference load flow HOURS = float(inData['simLengthHours']) simStartDate = inData['simStart'] feeder.adjustTime(localTree, HOURS, "hours", simStartDate) output = gridlabd.runInFilesystem( localTree, keepFiles=False, workDir=modelDir) os.remove(pJoin(modelDir, "PID.txt")) p = output['Zregulator.csv']['power_in.real'] q = output['Zregulator.csv']['power_in.imag'] # calculating length of simulation because it migth be different from # the simulation input HOURS simRealLength = int(len(p)) # time delays from configuration files time_delay_reg = '30.0' time_delay_cap = '300.0' for key in localTree: if localTree[key].get('object', '') == "regulator_configuration": time_delay_reg = localTree[key]['time_delay'] print "time_delay_reg", time_delay_reg # if localTree[key].get('object','') == "capacitor": # time_delay_cap = localTree[key]['time_delay'] # print "time_delay_cap",time_delay_cap # change the recorder names for key in localTree: if localTree[key].get('object', '') == "collector" or localTree[key].get('object', '') == "recorder": if localTree[key].get('file', '').startswith('Z'): localTree[key]['file'] = localTree[key].get( 'file', '').replace('Z', 'NewZ') # create volt-var control object max_key = max([int(key) for key in localTree.keys()]) print max_key localTree[max_key + 1] = {'object': 'volt_var_control', 'name': 'IVVC1', 'control_method': 'ACTIVE', 'capacitor_delay': str(time_delay_cap), 'regulator_delay': str(time_delay_reg), 'desired_pf': '0.99', 'd_max': '0.6', 'd_min': '0.1', 'substation_link': str(localTree[regIndex]['name']), 'regulator_list': regstr, 'capacitor_list': capstr} # running powerflow analysis via gridalab after attaching a regulator feeder.adjustTime(localTree, HOURS, "hours", simStartDate) output1 = gridlabd.runInFilesystem( localTree, keepFiles=True, workDir=modelDir) os.remove(pJoin(modelDir, "PID.txt")) pnew = output1['NewZregulator.csv']['power_in.real'] qnew = output1['NewZregulator.csv']['power_in.imag'] # total real and imaginary losses as a function of time def vecSum(u, v): ''' Add vectors u and v element-wise. Return has len <= len(u) and <=len(v). ''' return map(sum, zip(u, v)) def zeroVec(length): ''' Give a zero vector of input length. ''' return [0 for x in xrange(length)] (realLoss, imagLoss, realLossnew, imagLossnew) = (zeroVec(int(HOURS)) for x in range(4)) for device in ['ZlossesOverhead.csv', 'ZlossesTransformer.csv', 'ZlossesUnderground.csv']: for letter in ['A', 'B', 'C']: realLoss = vecSum( realLoss, output[device]['sum(power_losses_' + letter + '.real)']) imagLoss = vecSum( imagLoss, output[device]['sum(power_losses_' + letter + '.imag)']) realLossnew = vecSum( realLossnew, output1['New' + device]['sum(power_losses_' + letter + '.real)']) imagLossnew = vecSum( imagLossnew, output1['New' + device]['sum(power_losses_' + letter + '.imag)']) # voltage calculations and tap calculations def divby2(u): '''divides by 2''' return u / 2 lowVoltage = [] meanVoltage = [] highVoltage = [] lowVoltagenew = [] meanVoltagenew = [] highVoltagenew = [] tap = {'A': [], 'B': [], 'C': []} tapnew = {'A': [], 'B': [], 'C': []} volt = {'A': [], 'B': [], 'C': []} voltnew = {'A': [], 'B': [], 'C': []} switch = {'A': [], 'B': [], 'C': []} switchnew = {'A': [], 'B': [], 'C': []} for letter in ['A', 'B', 'C']: tap[letter] = output['Zregulator.csv']['tap_' + letter] tapnew[letter] = output1['NewZregulator.csv']['tap_' + letter] if capKeys != []: switch[letter] = output[ 'ZcapSwitch' + str(int(capKeys[0])) + '.csv']['switch' + letter] switchnew[letter] = output1[ 'NewZcapSwitch' + str(int(capKeys[0])) + '.csv']['switch' + letter] volt[letter] = map( returnMag, output['ZsubstationBottom.csv']['voltage_' + letter]) voltnew[letter] = map( returnMag, output1['NewZsubstationBottom.csv']['voltage_' + letter]) lowVoltage = map( divby2, output['ZvoltageJiggle.csv']['min(voltage_12.mag)']) lowVoltagenew = map( divby2, output1['NewZvoltageJiggle.csv']['min(voltage_12.mag)']) meanVoltage = map( divby2, output['ZvoltageJiggle.csv']['mean(voltage_12.mag)']) meanVoltagenew = map( divby2, output1['NewZvoltageJiggle.csv']['mean(voltage_12.mag)']) highVoltage = map( divby2, output['ZvoltageJiggle.csv']['max(voltage_12.mag)']) highVoltagenew = map( divby2, output1['NewZvoltageJiggle.csv']['max(voltage_12.mag)']) # energy calculations whEnergy = [] whLosses = [] whLoads = [] whEnergy.append(sum(p) / 10**6) whLosses.append(sum(realLoss) / 10**6) whLoads.append((sum(p) - sum(realLoss)) / 10**6) whEnergy.append(sum(pnew) / 10**6) whLosses.append(sum(realLossnew) / 10**6) whLoads.append((sum(pnew) - sum(realLossnew)) / 10**6) indices = ['No IVVC', 'With IVVC'] # energySalesRed = (whLoads[1]-whLoads[0])*(inData['wholesaleEnergyCostPerKwh'])*1000 # lossSav = (whLosses[0]-whLosses[1])*inData['wholesaleEnergyCostPerKwh']*1000 # print energySalesRed, lossSav # plots ticks = [] plt.clf() plt.title("total energy") plt.ylabel("total load and losses (MWh)") for element in range(2): ticks.append(element) bar_loss = plt.bar(element, whLosses[element], 0.15, color='red') bar_load = plt.bar( element + 0.15, whLoads[element], 0.15, color='orange') plt.legend([bar_load[0], bar_loss[0]], ['total load', 'total losses'], bbox_to_anchor=(0., 0.915, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.1) plt.xticks([t + 0.15 for t in ticks], indices) Plot.save_fig(plt, pJoin(modelDir, "totalEnergy.png")) # real and imaginary power plt.figure("real power") plt.title("Real Power at substation") plt.ylabel("substation real power (MW)") pMW = [element / 10**6 for element in p] pMWn = [element / 10**6 for element in pnew] pw = plt.plot(pMW) npw = plt.plot(pMWn) plt.legend([pw[0], npw[0]], ['NO IVVC', 'WITH IVVC'], bbox_to_anchor=(0., 0.915, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.1) Plot.save_fig(plt, pJoin(modelDir, "realPower.png")) plt.figure("Reactive power") plt.title("Reactive Power at substation") plt.ylabel("substation reactive power (MVAR)") qMVAR = [element / 10**6 for element in q] qMVARn = [element / 10**6 for element in qnew] iw = plt.plot(qMVAR) niw = plt.plot(qMVARn) plt.legend([iw[0], niw[0]], ['NO IVVC', 'WITH IVVC'], bbox_to_anchor=(0., 0.915, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.1) Plot.save_fig(plt, pJoin(modelDir, "imaginaryPower.png")) # voltage plots plt.figure("voltages as a function of time") f, ax = plt.subplots(2, sharex=True) f.suptitle("Min and Max voltages on the feeder") lv = ax[0].plot(lowVoltage, color='cadetblue') mv = ax[0].plot(meanVoltage, color='blue') hv = ax[0].plot(highVoltage, color='cadetblue') ax[0].legend([lv[0], mv[0], hv[0]], ['low voltage', 'mean voltage', 'high voltage'], bbox_to_anchor=(0., 0.915, 1., .1), loc=3, ncol=3, mode="expand", borderaxespad=0.1) ax[0].set_ylabel('NO IVVC') nlv = ax[1].plot(lowVoltagenew, color='cadetblue') nmv = ax[1].plot(meanVoltagenew, color='blue') nhv = ax[1].plot(highVoltagenew, color='cadetblue') ax[1].set_ylabel('WITH IVVC') Plot.save_fig(plt, pJoin(modelDir, "Voltages.png")) # tap positions plt.figure("TAP positions NO IVVC") f, ax = plt.subplots(6, sharex=True) f.set_size_inches(10, 12.0) #f.suptitle("Regulator Tap positions") ax[0].plot(tap['A']) ax[0].set_title("Regulator Tap positions NO IVVC") ax[0].set_ylabel("TAP A") ax[1].plot(tap['B']) ax[1].set_ylabel("TAP B") ax[2].plot(tap['C']) ax[2].set_ylabel("TAP C") ax[3].plot(tapnew['A']) ax[3].set_title("WITH IVVC") ax[3].set_ylabel("TAP A") ax[4].plot(tapnew['B']) ax[4].set_ylabel("TAP B") ax[5].plot(tapnew['C']) ax[5].set_ylabel("TAP C") for subplot in range(6): ax[subplot].set_ylim(-20, 20) f.tight_layout() Plot.save_fig(plt, pJoin(modelDir, "RegulatorTAPpositions.png")) # substation voltages plt.figure("substation voltage as a function of time") f, ax = plt.subplots(6, sharex=True) f.set_size_inches(10, 12.0) #f.suptitle("voltages at substation NO IVVC") ax[0].plot(volt['A']) ax[0].set_title('Substation voltages NO IVVC') ax[0].set_ylabel('voltage A') ax[1].plot(volt['B']) ax[1].set_ylabel('voltage B') ax[2].plot(volt['C']) ax[2].set_ylabel('voltage C') ax[3].plot(voltnew['A']) ax[3].set_title("WITH IVVC") ax[3].set_ylabel('voltage A') ax[4].plot(voltnew['B']) ax[4].set_ylabel('voltage B') ax[5].plot(voltnew['C']) ax[5].set_ylabel('voltage C') f.tight_layout() Plot.save_fig(plt, pJoin(modelDir, "substationVoltages.png")) # cap switches plt.figure("capacitor switch state as a function of time") f, ax = plt.subplots(6, sharex=True) f.set_size_inches(10, 12.0) #f.suptitle("Capacitor switch state NO IVVC") ax[0].plot(switch['A']) ax[0].set_title("Capacitor switch state NO IVVC") ax[0].set_ylabel("switch A") ax[1].plot(switch['B']) ax[1].set_ylabel("switch B") ax[2].plot(switch['C']) ax[2].set_ylabel("switch C") ax[3].plot(switchnew['A']) ax[3].set_title("WITH IVVC") ax[3].set_ylabel("switch A") ax[4].plot(switchnew['B']) ax[4].set_ylabel("switch B") ax[5].plot(switchnew['C']) ax[5].set_ylabel("switch C") for subplot in range(6): ax[subplot].set_ylim(-2, 2) f.tight_layout() Plot.save_fig(plt, pJoin(modelDir, "capacitorSwitch.png")) # plt.show() # monetization monthNames = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"] monthToSeason = {'January': 'Winter', 'February': 'Winter', 'March': 'Spring', 'April': 'Spring', 'May': 'Spring', 'June': 'Summer', 'July': 'Summer', 'August': 'Summer', 'September': 'Fall', 'October': 'Fall', 'November': 'Fall', 'December': 'Winter'} # calculate the month and hour of simulation start and month and hour # of simulation end simStartTimestamp = simStartDate + " 00:00:00" simFormattedDate = datetime.strptime( simStartTimestamp, "%Y-%m-%d %H:%M:%S") simStartMonthNum = int(simFormattedDate.strftime('%m')) simstartMonth = monthNames[simStartMonthNum - 1] simStartDay = int(simFormattedDate.strftime('%d')) if calendar.isleap(int(simFormattedDate.strftime('%Y'))): febDays = 29 else: febDays = 28 monthHours = [int(31 * 24), int(febDays * 24), int(31 * 24), int(30 * 24), int(31 * 24), int( 30 * 24), int(31 * 24), int(31 * 24), int(30 * 24), int(31 * 24), int(30 * 24), int(31 * 24)] simStartIndex = int( sum(monthHours[:(simStartMonthNum - 1)]) + (simStartDay - 1) * 24) temp = 0 cumulHours = [0] for x in range(12): temp += monthHours[x] cumulHours.append(temp) for i in range((simStartMonthNum), 13): if int(simStartIndex + simRealLength) <= cumulHours[i] and int(simStartIndex + simRealLength) > cumulHours[i - 1]: simEndMonthNum = i - 1 simEndMonth = monthNames[simEndMonthNum] print simstartMonth, simEndMonth # calculate peaks for the number of months in simulation previndex = 0 monthPeak = {} monthPeakNew = {} peakSaveDollars = {} energyLostDollars = {} lossRedDollars = {} simMonthList = monthNames[ monthNames.index(simstartMonth):(monthNames.index(simEndMonth) + 1)] print simMonthList for monthElement in simMonthList: print monthElement month = monthNames.index(monthElement) index1 = int(previndex) index2 = int(min((index1 + int(monthHours[month])), simRealLength)) monthPeak[monthElement] = max(p[index1:index2]) / 1000.0 monthPeakNew[monthElement] = max(pnew[index1:index2]) / 1000.0 peakSaveDollars[monthElement] = (monthPeak[monthElement] - monthPeakNew[monthElement]) * float( inData['peakDemandCost' + str(monthToSeason[monthElement]) + 'PerKw']) lossRedDollars[monthElement] = (sum(realLoss[index1:index2]) / 1000.0 - sum( realLossnew[index1:index2]) / 1000.0) * (float(inData['wholesaleEnergyCostPerKwh'])) energyLostDollars[monthElement] = (sum(p[index1:index2]) / 1000.0 - sum(pnew[index1:index2]) / 1000.0 - sum(realLoss[index1:index2]) / 1000.0 + sum(realLossnew[index1:index2]) / 1000.0) * (float(inData['wholesaleEnergyCostPerKwh']) - float(inData['retailEnergyCostPerKwh'])) previndex = index2 # money charts fig = plt.figure("cost benefit barchart", figsize=(10, 8)) ticks = range(len(simMonthList)) ticks1 = [element + 0.15 for element in ticks] ticks2 = [element + 0.30 for element in ticks] print ticks eld = [energyLostDollars[month] for month in simMonthList] lrd = [lossRedDollars[month] for month in simMonthList] psd = [peakSaveDollars[month] for month in simMonthList] bar_eld = plt.bar(ticks, eld, 0.15, color='red') bar_psd = plt.bar(ticks1, psd, 0.15, color='blue') bar_lrd = plt.bar(ticks2, lrd, 0.15, color='green') plt.legend([bar_eld[0], bar_psd[0], bar_lrd[0]], ['energyLostDollars', 'peakReductionDollars', 'lossReductionDollars'], bbox_to_anchor=(0., 1.015, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.1) monShort = [element[0:3] for element in simMonthList] plt.xticks([t + 0.15 for t in ticks], monShort) plt.ylabel('Utility Savings ($)') Plot.save_fig(plt, pJoin(modelDir, "spendChart.png")) # cumulative savings graphs fig = plt.figure("cost benefit barchart", figsize=(10, 5)) annualSavings = sum(eld) + sum(lrd) + sum(psd) annualSave = lambda x: ( annualSavings - float(inData['omCost'])) * x - float(inData['capitalCost']) simplePayback = float( inData['capitalCost']) / (annualSavings - float(inData['omCost'])) plt.xlabel('Year After Installation') plt.xlim(0, 30) plt.ylabel('Cumulative Savings ($)') plt.plot([0 for x in range(31)], c='gray') plt.axvline(x=simplePayback, ymin=0, ymax=1, c='gray', linestyle='--') plt.plot([annualSave(x) for x in range(31)], c='green') Plot.save_fig(plt, pJoin(modelDir, "savingsChart.png")) # get exact time stamps from the CSV files generated by Gridlab-D timeWithZone = output['Zregulator.csv']['# timestamp'] timestamps = [element[:19] for element in timeWithZone] # data for highcharts allOutput["timeStamps"] = timestamps allOutput["noCVRPower"] = p allOutput["withCVRPower"] = pnew allOutput["noCVRLoad"] = whLoads[0] allOutput["withCVRLoad"] = whLoads[1] allOutput["noCVRLosses"] = whLosses[0] allOutput["withCVRLosses"] = whLosses[1] allOutput["noCVRTaps"] = tap allOutput["withCVRTaps"] = tapnew allOutput["noCVRSubVolts"] = volt allOutput["withCVRSubVolts"] = voltnew allOutput["noCVRCapSwitch"] = switch allOutput["withCVRCapSwitch"] = switchnew allOutput["noCVRHighVolt"] = highVoltage allOutput["withCVRHighVolt"] = highVoltagenew allOutput["noCVRLowVolt"] = lowVoltage allOutput["withCVRLowVolt"] = lowVoltagenew allOutput["noCVRMeanVolt"] = meanVoltage allOutput["withCVRMeanVolt"] = meanVoltagenew # monetization allOutput["simMonthList"] = monShort allOutput["energyLostDollars"] = energyLostDollars allOutput["lossRedDollars"] = lossRedDollars allOutput["peakSaveDollars"] = peakSaveDollars allOutput["annualSave"] = [annualSave(x) for x in range(31)] # Update the runTime in the input file. endTime = datetime.now() inData["runTime"] = str( timedelta(seconds=int((endTime - startTime).total_seconds()))) fs.save(pJoin(modelDir, "allInputData.json"), json.dumps(inData, indent=4)) fs.save(pJoin(modelDir, "allOutputData.json"), json.dumps(allOutput, indent=4)) # For autotest, there won't be such file. try: os.remove(pJoin(modelDir, "PPID.txt")) except: pass print "DONE RUNNING", modelDir except Exception as e: print "Oops, Model Crashed!!!" cancel(modelDir) print e def _tests(): "runs local tests for dynamic CVR model" import shutil from .. import filesystem fs = filesystem.Filesystem().fs # creating a work directory and initializing data inData = {"modelName": "Automated DynamicCVR Testing", "modelType": "_cvrDynamic", "user": "admin", "feederName": "public___ABEC Frank pre calib", "scadaFile": "FrankScada", "runTime": "", "capitalCost": 30000, "omCost": 1000, "wholesaleEnergyCostPerKwh": 0.06, "retailEnergyCostPerKwh": 0.10, "peakDemandCostSpringPerKw": 5.0, "peakDemandCostSummerPerKw": 10.0, "peakDemandCostFallPerKw": 6.0, "peakDemandCostWinterPerKw": 8.0, "simStart": "2011-01-01", "simLengthHours": 100} workDir = pJoin(__metaModel__._omfDir, "data", "Model") modelDir = pJoin(workDir, inData["user"], inData["modelName"]) # Clean up previous run. try: shutil.rmtree(modelDir) except: pass run(modelDir, inData, fs) if __name__ == '__main__': _tests()
gpl-2.0
mehdidc/scikit-learn
sklearn/datasets/svmlight_format.py
39
15319
"""This module implements a loader and dumper for the svmlight format This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. """ # Authors: Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck <L.J.Buitinck@uva.nl> # Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause from contextlib import closing import io import os.path import numpy as np import scipy.sparse as sp from ._svmlight_format import _load_svmlight_file from .. import __version__ from ..externals import six from ..externals.six import u, b from ..externals.six.moves import range, zip from ..utils import check_array from ..utils.fixes import frombuffer_empty def load_svmlight_file(f, n_features=None, dtype=np.float64, multilabel=False, zero_based="auto", query_id=False): """Load datasets in the svmlight / libsvm format into sparse CSR matrix This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. This format is used as the default format for both svmlight and the libsvm command line programs. Parsing a text based source can be expensive. When working on repeatedly on the same dataset, it is recommended to wrap this loader with joblib.Memory.cache to store a memmapped backup of the CSR results of the first call and benefit from the near instantaneous loading of memmapped structures for the subsequent calls. In case the file contains a pairwise preference constraint (known as "qid" in the svmlight format) these are ignored unless the query_id parameter is set to True. These pairwise preference constraints can be used to constraint the combination of samples when using pairwise loss functions (as is the case in some learning to rank problems) so that only pairs with the same query_id value are considered. This implementation is written in Cython and is reasonably fast. However, a faster API-compatible loader is also available at: https://github.com/mblondel/svmlight-loader Parameters ---------- f : {str, file-like, int} (Path to) a file to load. If a path ends in ".gz" or ".bz2", it will be uncompressed on the fly. If an integer is passed, it is assumed to be a file descriptor. A file-like or file descriptor will not be closed by this function. A file-like object must be opened in binary mode. n_features : int or None The number of features to use. If None, it will be inferred. This argument is useful to load several files that are subsets of a bigger sliced dataset: each subset might not have examples of every feature, hence the inferred shape might vary from one slice to another. multilabel : boolean, optional, default False Samples may have several labels each (see http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html) zero_based : boolean or "auto", optional, default "auto" Whether column indices in f are zero-based (True) or one-based (False). If column indices are one-based, they are transformed to zero-based to match Python/NumPy conventions. If set to "auto", a heuristic check is applied to determine this from the file contents. Both kinds of files occur "in the wild", but they are unfortunately not self-identifying. Using "auto" or True should always be safe. query_id : boolean, default False If True, will return the query_id array for each file. dtype : numpy data type, default np.float64 Data type of dataset to be loaded. This will be the data type of the output numpy arrays ``X`` and ``y``. Returns ------- X: scipy.sparse matrix of shape (n_samples, n_features) y: ndarray of shape (n_samples,), or, in the multilabel a list of tuples of length n_samples. query_id: array of shape (n_samples,) query_id for each sample. Only returned when query_id is set to True. See also -------- load_svmlight_files: similar function for loading multiple files in this format, enforcing the same number of features/columns on all of them. Examples -------- To use joblib.Memory to cache the svmlight file:: from sklearn.externals.joblib import Memory from sklearn.datasets import load_svmlight_file mem = Memory("./mycache") @mem.cache def get_data(): data = load_svmlight_file("mysvmlightfile") return data[0], data[1] X, y = get_data() """ return tuple(load_svmlight_files([f], n_features, dtype, multilabel, zero_based, query_id)) def _gen_open(f): if isinstance(f, int): # file descriptor return io.open(f, "rb", closefd=False) elif not isinstance(f, six.string_types): raise TypeError("expected {str, int, file-like}, got %s" % type(f)) _, ext = os.path.splitext(f) if ext == ".gz": import gzip return gzip.open(f, "rb") elif ext == ".bz2": from bz2 import BZ2File return BZ2File(f, "rb") else: return open(f, "rb") def _open_and_load(f, dtype, multilabel, zero_based, query_id): if hasattr(f, "read"): actual_dtype, data, ind, indptr, labels, query = \ _load_svmlight_file(f, dtype, multilabel, zero_based, query_id) # XXX remove closing when Python 2.7+/3.1+ required else: with closing(_gen_open(f)) as f: actual_dtype, data, ind, indptr, labels, query = \ _load_svmlight_file(f, dtype, multilabel, zero_based, query_id) # convert from array.array, give data the right dtype if not multilabel: labels = frombuffer_empty(labels, np.float64) data = frombuffer_empty(data, actual_dtype) indices = frombuffer_empty(ind, np.intc) indptr = np.frombuffer(indptr, dtype=np.intc) # never empty query = frombuffer_empty(query, np.intc) data = np.asarray(data, dtype=dtype) # no-op for float{32,64} return data, indices, indptr, labels, query def load_svmlight_files(files, n_features=None, dtype=np.float64, multilabel=False, zero_based="auto", query_id=False): """Load dataset from multiple files in SVMlight format This function is equivalent to mapping load_svmlight_file over a list of files, except that the results are concatenated into a single, flat list and the samples vectors are constrained to all have the same number of features. In case the file contains a pairwise preference constraint (known as "qid" in the svmlight format) these are ignored unless the query_id parameter is set to True. These pairwise preference constraints can be used to constraint the combination of samples when using pairwise loss functions (as is the case in some learning to rank problems) so that only pairs with the same query_id value are considered. Parameters ---------- files : iterable over {str, file-like, int} (Paths of) files to load. If a path ends in ".gz" or ".bz2", it will be uncompressed on the fly. If an integer is passed, it is assumed to be a file descriptor. File-likes and file descriptors will not be closed by this function. File-like objects must be opened in binary mode. n_features: int or None The number of features to use. If None, it will be inferred from the maximum column index occurring in any of the files. This can be set to a higher value than the actual number of features in any of the input files, but setting it to a lower value will cause an exception to be raised. multilabel: boolean, optional Samples may have several labels each (see http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multilabel.html) zero_based: boolean or "auto", optional Whether column indices in f are zero-based (True) or one-based (False). If column indices are one-based, they are transformed to zero-based to match Python/NumPy conventions. If set to "auto", a heuristic check is applied to determine this from the file contents. Both kinds of files occur "in the wild", but they are unfortunately not self-identifying. Using "auto" or True should always be safe. query_id: boolean, defaults to False If True, will return the query_id array for each file. dtype : numpy data type, default np.float64 Data type of dataset to be loaded. This will be the data type of the output numpy arrays ``X`` and ``y``. Returns ------- [X1, y1, ..., Xn, yn] where each (Xi, yi) pair is the result from load_svmlight_file(files[i]). If query_id is set to True, this will return instead [X1, y1, q1, ..., Xn, yn, qn] where (Xi, yi, qi) is the result from load_svmlight_file(files[i]) Notes ----- When fitting a model to a matrix X_train and evaluating it against a matrix X_test, it is essential that X_train and X_test have the same number of features (X_train.shape[1] == X_test.shape[1]). This may not be the case if you load the files individually with load_svmlight_file. See also -------- load_svmlight_file """ r = [_open_and_load(f, dtype, multilabel, bool(zero_based), bool(query_id)) for f in files] if (zero_based is False or zero_based == "auto" and all(np.min(tmp[1]) > 0 for tmp in r)): for ind in r: indices = ind[1] indices -= 1 n_f = max(ind[1].max() for ind in r) + 1 if n_features is None: n_features = n_f elif n_features < n_f: raise ValueError("n_features was set to {}," " but input file contains {} features" .format(n_features, n_f)) result = [] for data, indices, indptr, y, query_values in r: shape = (indptr.shape[0] - 1, n_features) X = sp.csr_matrix((data, indices, indptr), shape) X.sort_indices() result += X, y if query_id: result.append(query_values) return result def _dump_svmlight(X, y, f, one_based, comment, query_id): is_sp = int(hasattr(X, "tocsr")) if X.dtype.kind == 'i': value_pattern = u("%d:%d") else: value_pattern = u("%d:%.16g") if y.dtype.kind == 'i': line_pattern = u("%d") else: line_pattern = u("%.16g") if query_id is not None: line_pattern += u(" qid:%d") line_pattern += u(" %s\n") if comment: f.write(b("# Generated by dump_svmlight_file from scikit-learn %s\n" % __version__)) f.write(b("# Column indices are %s-based\n" % ["zero", "one"][one_based])) f.write(b("#\n")) f.writelines(b("# %s\n" % line) for line in comment.splitlines()) for i in range(X.shape[0]): if is_sp: span = slice(X.indptr[i], X.indptr[i + 1]) row = zip(X.indices[span], X.data[span]) else: nz = X[i] != 0 row = zip(np.where(nz)[0], X[i, nz]) s = " ".join(value_pattern % (j + one_based, x) for j, x in row) if query_id is not None: feat = (y[i], query_id[i], s) else: feat = (y[i], s) f.write((line_pattern % feat).encode('ascii')) def dump_svmlight_file(X, y, f, zero_based=True, comment=None, query_id=None): """Dump the dataset in svmlight / libsvm file format. This format is a text-based format, with one sample per line. It does not store zero valued features hence is suitable for sparse dataset. The first element of each line can be used to store a target variable to predict. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target values. f : string or file-like in binary mode If string, specifies the path that will contain the data. If file-like, data will be written to f. f should be opened in binary mode. zero_based : boolean, optional Whether column indices should be written zero-based (True) or one-based (False). comment : string, optional Comment to insert at the top of the file. This should be either a Unicode string, which will be encoded as UTF-8, or an ASCII byte string. If a comment is given, then it will be preceded by one that identifies the file as having been dumped by scikit-learn. Note that not all tools grok comments in SVMlight files. query_id : array-like, shape = [n_samples] Array containing pairwise preference constraints (qid in svmlight format). """ if comment is not None: # Convert comment string to list of lines in UTF-8. # If a byte string is passed, then check whether it's ASCII; # if a user wants to get fancy, they'll have to decode themselves. # Avoid mention of str and unicode types for Python 3.x compat. if isinstance(comment, bytes): comment.decode("ascii") # just for the exception else: comment = comment.encode("utf-8") if six.b("\0") in comment: raise ValueError("comment string contains NUL byte") y = np.asarray(y) if y.ndim != 1: raise ValueError("expected y of shape (n_samples,), got %r" % (y.shape,)) Xval = check_array(X, accept_sparse='csr') if Xval.shape[0] != y.shape[0]: raise ValueError("X.shape[0] and y.shape[0] should be the same, got" " %r and %r instead." % (Xval.shape[0], y.shape[0])) # We had some issues with CSR matrices with unsorted indices (e.g. #1501), # so sort them here, but first make sure we don't modify the user's X. # TODO We can do this cheaper; sorted_indices copies the whole matrix. if Xval is X and hasattr(Xval, "sorted_indices"): X = Xval.sorted_indices() else: X = Xval if hasattr(X, "sort_indices"): X.sort_indices() if query_id is not None: query_id = np.asarray(query_id) if query_id.shape[0] != y.shape[0]: raise ValueError("expected query_id of shape (n_samples,), got %r" % (query_id.shape,)) one_based = not zero_based if hasattr(f, "write"): _dump_svmlight(X, y, f, one_based, comment, query_id) else: with open(f, "wb") as f: _dump_svmlight(X, y, f, one_based, comment, query_id)
bsd-3-clause
Kirubaharan/hydrology
ch_616/ch_616_stage_vol.py
2
8192
__author__ = 'kiruba' import numpy as np import matplotlib.pyplot as plt import pandas as pd import itertools from spread import spread from bisect import bisect_left, bisect_right from matplotlib import rc from scipy.interpolate import griddata from matplotlib import cm from matplotlib.path import * from mpl_toolkits.mplot3d import axes3d, Axes3D import matplotlib as mpl # latex parameters rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}) rc('text', usetex=True) plt.rc('text', usetex=True) plt.rc('font', family='serif', size=18) base_file = '/media/kiruba/New Volume/milli_watershed/stream_profile/616/base_profile_616.csv' df_base = pd.read_csv(base_file, header=-1, skiprows=1) # print df_base.head() slope_file = '/media/kiruba/New Volume/milli_watershed/stream_profile/616/slope_616.csv' df_slope = pd.read_csv(slope_file, header=0) # print df_slope df_base_trans = df_base.T df_base_trans.columns = df_base_trans.ix[0, 0:] # print df_base_trans df_base_trans = df_base_trans.ix[1:, 1500:] # print df_base_trans # raise SystemExit(0) """ Filling of profile """ def find_range(array, ab): if ab < max(array): start = bisect_left(array, ab) return array[start-1] else: return max(array) def fill_profile(base_df, slope_df, midpoint_index): """ :param base_df: base profile :param slope_df: slope profile :param midpoint_index: index of midpoint(x=0) :return: """ base_z = base_df.ix[midpoint_index, 0:] slope_z = slope_df.ix[ :, 1] base_y = base_z.index # print base_z # base_y_list =base_y.tolist() slope_y = slope_df.ix[:, 0] slope_z.index = slope_y # print slope_z.head() # print base_z new_base_df = base_df for y_s in slope_z.index: if y_s not in base_z.index.tolist(): # print y_s y_t = find_range(base_y, y_s) template = base_df[y_t] z1 = template.ix[midpoint_index, ] # print z1 z2 = slope_z[y_s] diff = z2 - z1 # print template # print diff profile = template + diff profile.name = y_s # profile.loc[0] = y_s # profile = profile.sort_index() # print profile # no_of_col = len(base_df.columns) new_base_df = new_base_df.join(profile, how='right') # base_df.columns.values[no_of_col+1] = y_s return new_base_df def set_column_sequence(dataframe, seq): '''Takes a dataframe and a subsequence of its columns, returns dataframe with seq as first columns''' cols = seq[:] # copy so we don't mutate seq for x in dataframe.columns: if x not in cols: cols.append(x) return dataframe[cols] created_profile = fill_profile(df_base_trans, df_slope, 7) # created_profile = created_profile[sorted(created_profile.columns)] # print created_profile.head() sorted_df = created_profile.iloc[0:, 1:] sorted_df = sorted_df[sorted(sorted_df.columns)] sorted_df = sorted_df.join(created_profile.iloc[0:, 0], how='right') created_profile = set_column_sequence(sorted_df, [1500]) # print created_profile.head() # raise SystemExit(0) """ Create (x,y,z) point cloud """ z_array = created_profile.iloc[0:, 1:] columns = z_array.columns z_array = z_array.values index = created_profile.iloc[0:,0] df = pd.DataFrame(z_array, columns=columns).set_index(index) data_1 = [] for y, row in df.iteritems(): for x, z in row.iteritems(): data_1.append((x, y, z)) data_1_df = pd.DataFrame(data_1, columns=['x', 'y', 'z']) print data_1_df.head() # raise SystemExit(0) X = data_1_df.x Y = data_1_df.y Z = data_1_df.z ## contour and 3d surface plotting fig = plt.figure(figsize=plt.figaspect(0.5)) ax = fig.gca(projection='3d') # ax = fig.add_subplot(1, 2, 1, projection='3d') xi = np.linspace(X.min(), X.max(), 100) yi = np.linspace(Y.min(), Y.max(), 100) # print len(xi) # print len(yi) # print len(Z) zi = griddata((X, Y), Z, (xi[None, :], yi[:, None]), method='linear') # create a uniform spaced grid xig, yig = np.meshgrid(xi, yi) surf = ax.plot_surface(xig, yig, zi, rstride=5, cstride=3, linewidth=0, cmap=cm.coolwarm, antialiased=False) # 3d plot # inter_1 = [] # inter_1.append((xi, yi, zi)) # inter = pd.DataFrame(inter_1, columns=['x', 'y', 'z']) # inter.to_csv('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/inter.csv') # interpolation data output fig.colorbar(surf, shrink=0.5, aspect=5) rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']}) rc('text', usetex=True) # plt.rc('text', usetex=True) # plt.rc('font', family='serif') # plt.xlabel(r'\textbf{X} (m)') # plt.ylabel(r'\textbf{Y} (m)') # plt.title(r"Profile for 591", fontsize=16) plt.gca().invert_xaxis() # reverses x axis # # ax = fig # plt.savefig('/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/linear_interpolation') plt.show() raise SystemExit(0) # ## trace contours # Refer: Nikolai Shokhirev http://www.numericalexpert.com/blog/area_calculation/ check_dam_height = 1.5 #metre levels = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6] #, 3.93] plt.figure(figsize=(11.69, 8.27)) CS = plt.contourf(xi, yi, zi, len(levels), alpha=.75, cmap=cm.hot, levels=levels) C = plt.contour(xi, yi, zi, len(levels), colors='black', linewidth=.5, levels=levels) plt.clabel(C, inline=1, fontsize=10) plt.colorbar(CS, shrink=0.5, aspect=5) plt.yticks(np.arange(0,40, 5)) plt.xticks(np.arange(-6,6, 2)) plt.grid() plt.gca().invert_xaxis() plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_616/cont_2d') plt.show() # for i in range(len(CS.collections)): # print CS.levels[i] # # for i in range(len(C.collections)): # print(C.levels[i]) def contour_area(mpl_obj): """ Returns a array of contour levels and corresponding cumulative area of contours :param mpl_obj: Matplotlib contour object :return: [(level1, area1), (level1, area1+area2)] """ #Refer: Nikolai Shokhirev http://www.numericalexpert.com/blog/area_calculation/ n_c = len(mpl_obj.collections) # n_c = no of contours print 'No. of contours = %s' % n_c area = 0.0000 cont_area_array = [] for contour in range(n_c): # area = 0 n_p = len(mpl_obj.collections[contour].get_paths()) zc = mpl_obj.levels[contour] for path in range(n_p): p = mpl_obj.collections[contour].get_paths()[path] v = p.vertices l = len(v) s = 0.0000 for i in range(l): j = (i+1) % l s += (v[j, 0] - v[i, 0]) * (v[j, 1] + v[i, 1]) poly_area = 0.5*abs(s) area += poly_area cont_area_array.append((zc, area)) return cont_area_array # contour_area(C) contour_a = contour_area(CS) cont_area_df = pd.DataFrame(contour_a, columns=['Z', 'Area']) plt.plot(cont_area_df['Z'], cont_area_df['Area']) plt.rc('text', usetex=True) plt.rc('font', family='serif') plt.ylabel(r'\textbf{Area} ($m^2$)') plt.xlabel(r'\textbf{Stage} (m)') plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_616/cont_area_616') # plt.show() cont_area_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_616/cont_area.csv') ## Curve fitting # fig = plt.figure(figsize=(11.69, 8.27)) y = cont_area_df['Area'] x = cont_area_df['Z'] #calculate 2nd deg polynomial po = np.polyfit(x, y, 1) f = np.poly1d(po) print po print np.poly1d(f) #calculate new x, y x_new = np.linspace(min(x), max(x), 50) y_new = f(x_new) fig = plt.figure(figsize=(11.69, 8.27)) plt.plot(x, y, 'o', x_new, y_new) plt.xlim([(min(x))-1, (max(x))+1]) plt.xlabel(r'\textbf{Stage} (m)') plt.ylabel(r'\textbf{Area} ($m^2$)') plt.text(-0.8, 500, r"$y = {0:.2f}x {1:.2f} $".format(po[0], po[1])) plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_616/poly_2_deg_616') plt.show() created_profile.iloc[0] = created_profile.columns print created_profile created_profile.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/ch_616/created_profile_616.csv')
gpl-3.0
bzier/TensorKart
record.py
1
6674
#!/usr/bin/env python import numpy as np import os import shutil import mss import matplotlib matplotlib.use('TkAgg') from datetime import datetime from matplotlib.figure import Figure from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as FigCanvas from PIL import ImageTk, Image import Tkinter as tk import ttk import tkMessageBox from utils import Screenshot, XboxController IMAGE_SIZE = (320, 240) IDLE_SAMPLE_RATE = 1500 SAMPLE_RATE = 200 class MainWindow(): """ Main frame of the application """ def __init__(self): self.root = tk.Tk() self.sct = mss.mss() self.root.title('Data Acquisition') self.root.geometry("660x325") self.root.resizable(False, False) # Init controller self.controller = XboxController() # Create GUI self.create_main_panel() # Timer self.rate = IDLE_SAMPLE_RATE self.sample_rate = SAMPLE_RATE self.idle_rate = IDLE_SAMPLE_RATE self.recording = False self.t = 0 self.pause_timer = False self.on_timer() self.root.mainloop() def create_main_panel(self): # Panels top_half = tk.Frame(self.root) top_half.pack(side=tk.TOP, expand=True, padx=5, pady=5) message = tk.Label(self.root, text="(Note: UI updates are disabled while recording)") message.pack(side=tk.TOP, padx=5) bottom_half = tk.Frame(self.root) bottom_half.pack(side=tk.LEFT, padx=5, pady=10) # Images self.img_panel = tk.Label(top_half, image=ImageTk.PhotoImage("RGB", size=IMAGE_SIZE)) # Placeholder self.img_panel.pack(side = tk.LEFT, expand=False, padx=5) # Joystick self.init_plot() self.PlotCanvas = FigCanvas(figure=self.fig, master=top_half) self.PlotCanvas.get_tk_widget().pack(side=tk.RIGHT, expand=False, padx=5) # Recording textframe = tk.Frame(bottom_half, width=332, height=15, padx=5) textframe.pack(side=tk.LEFT) textframe.pack_propagate(0) self.outputDirStrVar = tk.StringVar() self.txt_outputDir = tk.Entry(textframe, textvariable=self.outputDirStrVar, width=100) self.txt_outputDir.pack(side=tk.LEFT) self.outputDirStrVar.set("samples/" + datetime.now().strftime('%Y-%m-%d_%H:%M:%S')) self.record_button = ttk.Button(bottom_half, text="Record", command=self.on_btn_record) self.record_button.pack(side = tk.LEFT, padx=5) def init_plot(self): self.plotMem = 50 # how much data to keep on the plot self.plotData = [[0] * (5)] * self.plotMem # mem storage for plot self.fig = Figure((4,3)) self.axes = self.fig.add_subplot(111) def on_timer(self): self.poll() # stop drawing if recording to avoid slow downs if self.recording == False: self.draw() if not self.pause_timer: self.root.after(self.rate, self.on_timer) def poll(self): self.img = self.take_screenshot() self.controller_data = self.controller.read() self.update_plot() if self.recording == True: self.save_data() self.t += 1 def take_screenshot(self): # Get raw pixels from the screen sct_img = self.sct.grab({ "top": Screenshot.OFFSET_Y, "left": Screenshot.OFFSET_X, "width": Screenshot.SRC_W, "height": Screenshot.SRC_H}) # Create the Image return Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX') def update_plot(self): self.plotData.append(self.controller_data) # adds to the end of the list self.plotData.pop(0) # remove the first item in the list, ie the oldest def save_data(self): image_file = self.outputDir+'/'+'img_'+str(self.t)+'.png' self.img.save(image_file) # write csv line self.outfile.write( image_file + ',' + ','.join(map(str, self.controller_data)) + '\n' ) def draw(self): # Image self.img.thumbnail(IMAGE_SIZE, Image.ANTIALIAS) # Resize self.img_panel.img = ImageTk.PhotoImage(self.img) self.img_panel['image'] = self.img_panel.img # Joystick x = np.asarray(self.plotData) self.axes.plot(range(0,self.plotMem), x[:,0], 'r') self.axes.hold(True) self.axes.plot(range(0,self.plotMem), x[:,1], 'b') self.axes.plot(range(0,self.plotMem), x[:,2], 'g') self.axes.plot(range(0,self.plotMem), x[:,3], 'k') self.axes.plot(range(0,self.plotMem), x[:,4], 'y') self.axes.hold(False) self.PlotCanvas.draw() def on_btn_record(self): # pause timer self.pause_timer = True if self.recording: self.recording = False else: self.start_recording() if self.recording: self.t = 0 # Reset our counter for the new recording self.record_button["text"] = "Stop" self.rate = self.sample_rate # make / open outfile self.outfile = open(self.outputDir+'/'+'data.csv', 'a') else: self.record_button["text"] = "Record" self.rate = self.idle_rate self.outfile.close() # un pause timer self.pause_timer = False self.on_timer() def start_recording(self): should_record = True # check that a dir has been specified if not self.outputDirStrVar.get(): tkMessageBox.showerror(title='Error', message='Specify the Output Directory', parent=self.root) should_record = False else: # a directory was specified self.outputDir = self.outputDirStrVar.get() # check if path exists - i.e. may be saving over data if os.path.exists(self.outputDir): # overwrite the data, yes/no? if tkMessageBox.askyesno(title='Warning!', message='Output Directory Exists - Overwrite Data?', parent=self.root): # delete & re-make the dir: shutil.rmtree(self.outputDir) os.mkdir(self.outputDir) # answer was 'no', so do not overwrite the data else: should_record = False self.txt_outputDir.focus_set() # directory doesn't exist, so make one else: os.mkdir(self.outputDir) self.recording = should_record if __name__ == '__main__': app = MainWindow()
mit
Ledoux/ShareYourSystem
Pythonlogy/draft/Simulaters/Lifer/01_ExampleCell.py
1
1170
#ImportModules import ShareYourSystem as SYS import scipy.stats #Definition an instance MyLifer=SYS.LiferClass( **{ 'PopulatingUnitsInt':10 } ).collect( 'SpikeMoniters', 'MySpikes', SYS.MoniterClass() ).lif( #LifingRestVariable -60., #-60.+2.*scipy.stats.uniform.rvs(0,size=10), #LifingConstantTimeVariable 20., #20.+2.*scipy.stats.uniform.rvs(0,size=10), #LifingThresholdVariable #-50., -50.+2.*scipy.stats.uniform.rvs(0,size=10), #LifingResetVariable #-70. -70.+2.*scipy.stats.uniform.rvs(0,size=10), ).neurongroup( ) #Definition the AttestedStr SYS._attest( [ 'MyLifer is '+SYS._str( MyLifer, **{ 'RepresentingBaseKeyStrsListBool':False, 'RepresentingAlineaIsBool':False } ), ] ) #Print from brian2 import Network,ms,mV MyNetwork=Network() map( MyNetwork.add, SYS.flat( [ MyLifer.NeurongroupedBrianVariable, MyLifer.NeurongroupedSpikeMonitorsList, MyLifer.NeurongroupedStateMonitorsList ] ) ) #plot MyLifer.NeurongroupedBrianVariable.v=-55.*mV MyNetwork.run(100.*ms) M=MyLifer.NeurongroupedSpikeMonitorsList[0] from matplotlib import pyplot pyplot.plot(M.t/ms, M.i, '.') pyplot.show()
mit
AnasGhrab/scikit-learn
examples/cluster/plot_agglomerative_clustering_metrics.py
402
4492
""" Agglomerative clustering with different metrics =============================================== Demonstrates the effect of different metrics on the hierarchical clustering. The example is engineered to show the effect of the choice of different metrics. It is applied to waveforms, which can be seen as high-dimensional vector. Indeed, the difference between metrics is usually more pronounced in high dimension (in particular for euclidean and cityblock). We generate data from three groups of waveforms. Two of the waveforms (waveform 1 and waveform 2) are proportional one to the other. The cosine distance is invariant to a scaling of the data, as a result, it cannot distinguish these two waveforms. Thus even with no noise, clustering using this distance will not separate out waveform 1 and 2. We add observation noise to these waveforms. We generate very sparse noise: only 6% of the time points contain noise. As a result, the l1 norm of this noise (ie "cityblock" distance) is much smaller than it's l2 norm ("euclidean" distance). This can be seen on the inter-class distance matrices: the values on the diagonal, that characterize the spread of the class, are much bigger for the Euclidean distance than for the cityblock distance. When we apply clustering to the data, we find that the clustering reflects what was in the distance matrices. Indeed, for the Euclidean distance, the classes are ill-separated because of the noise, and thus the clustering does not separate the waveforms. For the cityblock distance, the separation is good and the waveform classes are recovered. Finally, the cosine distance does not separate at all waveform 1 and 2, thus the clustering puts them in the same cluster. """ # Author: Gael Varoquaux # License: BSD 3-Clause or CC-0 import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import pairwise_distances np.random.seed(0) # Generate waveform data n_features = 2000 t = np.pi * np.linspace(0, 1, n_features) def sqr(x): return np.sign(np.cos(x)) X = list() y = list() for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]): for _ in range(30): phase_noise = .01 * np.random.normal() amplitude_noise = .04 * np.random.normal() additional_noise = 1 - 2 * np.random.rand(n_features) # Make the noise sparse additional_noise[np.abs(additional_noise) < .997] = 0 X.append(12 * ((a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise))) + additional_noise)) y.append(i) X = np.array(X) y = np.array(y) n_clusters = 3 labels = ('Waveform 1', 'Waveform 2', 'Waveform 3') # Plot the ground-truth labelling plt.figure() plt.axes([0, 0, 1, 1]) for l, c, n in zip(range(n_clusters), 'rgb', labels): lines = plt.plot(X[y == l].T, c=c, alpha=.5) lines[0].set_label(n) plt.legend(loc='best') plt.axis('tight') plt.axis('off') plt.suptitle("Ground truth", size=20) # Plot the distances for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): avg_dist = np.zeros((n_clusters, n_clusters)) plt.figure(figsize=(5, 4.5)) for i in range(n_clusters): for j in range(n_clusters): avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j], metric=metric).mean() avg_dist /= avg_dist.max() for i in range(n_clusters): for j in range(n_clusters): plt.text(i, j, '%5.3f' % avg_dist[i, j], verticalalignment='center', horizontalalignment='center') plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2, vmin=0) plt.xticks(range(n_clusters), labels, rotation=45) plt.yticks(range(n_clusters), labels) plt.colorbar() plt.suptitle("Interclass %s distances" % metric, size=18) plt.tight_layout() # Plot clustering results for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): model = AgglomerativeClustering(n_clusters=n_clusters, linkage="average", affinity=metric) model.fit(X) plt.figure() plt.axes([0, 0, 1, 1]) for l, c in zip(np.arange(model.n_clusters), 'rgbk'): plt.plot(X[model.labels_ == l].T, c=c, alpha=.5) plt.axis('tight') plt.axis('off') plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20) plt.show()
bsd-3-clause
matthieudumont/dipy
doc/examples/simulate_multi_tensor.py
16
2546
""" ====================== MultiTensor Simulation ====================== In this example we show how someone can simulate the signal and the ODF of a single voxel using a MultiTensor. """ import numpy as np from dipy.sims.voxel import (multi_tensor, multi_tensor_odf, single_tensor_odf, all_tensor_evecs) from dipy.data import get_sphere """ For the simulation we will need a GradientTable with the b-values and b-vectors Here we use the one we created in :ref:`example_gradients_spheres`. """ from gradients_spheres import gtab """ In ``mevals`` we save the eigenvalues of each tensor. """ mevals = np.array([[0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003]]) """ In ``angles`` we save in polar coordinates (:math:`\theta, \phi`) the principal axis of each tensor. """ angles = [(0, 0), (60, 0)] """ In ``fractions`` we save the percentage of the contribution of each tensor. """ fractions = [50, 50] """ The function ``multi_tensor`` will return the simulated signal and an array with the principal axes of the tensors in cartesian coordinates. """ signal, sticks = multi_tensor(gtab, mevals, S0=100, angles=angles, fractions=fractions, snr=None) """ We can also add rician noise with a specific SNR. """ signal_noisy, sticks = multi_tensor(gtab, mevals, S0=100, angles=angles, fractions=fractions, snr=20) import matplotlib.pyplot as plt plt.plot(signal, label='noiseless') plt.plot(signal_noisy, label='with noise') plt.legend() plt.show() plt.savefig('simulated_signal.png') """ .. figure:: simulated_signal.png :align: center **Simulated MultiTensor signal** """ """ For the ODF simulation we will need a sphere. Because we are interested in a simulation of only a single voxel, we can use a sphere with very high resolution. We generate that by subdividing the triangles of one of Dipy's cached spheres, which we can read in the following way. """ sphere = get_sphere('symmetric724') sphere = sphere.subdivide(2) odf = multi_tensor_odf(sphere.vertices, mevals, angles, fractions) from dipy.viz import fvtk ren = fvtk.ren() odf_actor = fvtk.sphere_funcs(odf, sphere) odf_actor.RotateX(90) fvtk.add(ren, odf_actor) print('Saving illustration as multi_tensor_simulation') fvtk.record(ren, out_path='multi_tensor_simulation.png', size=(300, 300)) """ .. figure:: multi_tensor_simulation.png :align: center **Simulating a MultiTensor ODF** """
bsd-3-clause
adamgreenhall/scikit-learn
examples/manifold/plot_mds.py
261
2616
""" ========================= Multi-dimensional scaling ========================= An illustration of the metric and non-metric MDS on generated noisy data. The reconstructed points using the metric MDS and non metric MDS are slightly shifted to avoid overlapping. """ # Author: Nelle Varoquaux <nelle.varoquaux@gmail.com> # Licence: BSD print(__doc__) import numpy as np from matplotlib import pyplot as plt from matplotlib.collections import LineCollection from sklearn import manifold from sklearn.metrics import euclidean_distances from sklearn.decomposition import PCA n_samples = 20 seed = np.random.RandomState(seed=3) X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float) X_true = X_true.reshape((n_samples, 2)) # Center the data X_true -= X_true.mean() similarities = euclidean_distances(X_true) # Add noise to the similarities noise = np.random.rand(n_samples, n_samples) noise = noise + noise.T noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0 similarities += noise mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed, dissimilarity="precomputed", n_jobs=1) pos = mds.fit(similarities).embedding_ nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12, dissimilarity="precomputed", random_state=seed, n_jobs=1, n_init=1) npos = nmds.fit_transform(similarities, init=pos) # Rescale the data pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum()) npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum()) # Rotate the data clf = PCA(n_components=2) X_true = clf.fit_transform(X_true) pos = clf.fit_transform(pos) npos = clf.fit_transform(npos) fig = plt.figure(1) ax = plt.axes([0., 0., 1., 1.]) plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20) plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g') plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b') plt.legend(('True position', 'MDS', 'NMDS'), loc='best') similarities = similarities.max() / similarities * 100 similarities[np.isinf(similarities)] = 0 # Plot the edges start_idx, end_idx = np.where(pos) #a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[X_true[i, :], X_true[j, :]] for i in range(len(pos)) for j in range(len(pos))] values = np.abs(similarities) lc = LineCollection(segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, values.max())) lc.set_array(similarities.flatten()) lc.set_linewidths(0.5 * np.ones(len(segments))) ax.add_collection(lc) plt.show()
bsd-3-clause
kenshay/ImageScript
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/compat/numpy/__init__.py
7
2205
""" support numpy compatiblitiy across versions """ import re import numpy as np from distutils.version import LooseVersion from pandas.compat import string_types, string_and_binary_types # numpy versioning _np_version = np.version.short_version _nlv = LooseVersion(_np_version) _np_version_under1p8 = _nlv < '1.8' _np_version_under1p9 = _nlv < '1.9' _np_version_under1p10 = _nlv < '1.10' _np_version_under1p11 = _nlv < '1.11' _np_version_under1p12 = _nlv < '1.12' if LooseVersion(_np_version) < '1.7.0': raise ImportError('this version of pandas is incompatible with ' 'numpy < 1.7.0\n' 'your numpy version is {0}.\n' 'Please upgrade numpy to >= 1.7.0 to use ' 'this pandas version'.format(_np_version)) _tz_regex = re.compile('[+-]0000$') def tz_replacer(s): if isinstance(s, string_types): if s.endswith('Z'): s = s[:-1] elif _tz_regex.search(s): s = s[:-5] return s def np_datetime64_compat(s, *args, **kwargs): """ provide compat for construction of strings to numpy datetime64's with tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ if not _np_version_under1p11: s = tz_replacer(s) return np.datetime64(s, *args, **kwargs) def np_array_datetime64_compat(arr, *args, **kwargs): """ provide compat for construction of an array of strings to a np.array(..., dtype=np.datetime64(..)) tz-changes in 1.11 that make '2015-01-01 09:00:00Z' show a deprecation warning, when need to pass '2015-01-01 09:00:00' """ if not _np_version_under1p11: # is_list_like if hasattr(arr, '__iter__') and not \ isinstance(arr, string_and_binary_types): arr = [tz_replacer(s) for s in arr] else: arr = tz_replacer(arr) return np.array(arr, *args, **kwargs) __all__ = ['np', '_np_version_under1p8', '_np_version_under1p9', '_np_version_under1p10', '_np_version_under1p11', '_np_version_under1p12', ]
gpl-3.0
hlin117/scikit-learn
examples/plot_kernel_approximation.py
26
8069
""" ================================================== Explicit feature map approximation for RBF kernels ================================================== An example illustrating the approximation of the feature map of an RBF kernel. .. currentmodule:: sklearn.kernel_approximation It shows how to use :class:`RBFSampler` and :class:`Nystroem` to approximate the feature map of an RBF kernel for classification with an SVM on the digits dataset. Results using a linear SVM in the original space, a linear SVM using the approximate mappings and using a kernelized SVM are compared. Timings and accuracy for varying amounts of Monte Carlo samplings (in the case of :class:`RBFSampler`, which uses random Fourier features) and different sized subsets of the training set (for :class:`Nystroem`) for the approximate mapping are shown. Please note that the dataset here is not large enough to show the benefits of kernel approximation, as the exact SVM is still reasonably fast. Sampling more dimensions clearly leads to better classification results, but comes at a greater cost. This means there is a tradeoff between runtime and accuracy, given by the parameter n_components. Note that solving the Linear SVM and also the approximate kernel SVM could be greatly accelerated by using stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`. This is not easily possible for the case of the kernelized SVM. The second plot visualized the decision surfaces of the RBF kernel SVM and the linear SVM with approximate kernel maps. The plot shows decision surfaces of the classifiers projected onto the first two principal components of the data. This visualization should be taken with a grain of salt since it is just an interesting slice through the decision surface in 64 dimensions. In particular note that a datapoint (represented as a dot) does not necessarily be classified into the region it is lying in, since it will not lie on the plane that the first two principal components span. The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail in :ref:`kernel_approximation`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt import numpy as np from time import time # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, pipeline from sklearn.kernel_approximation import (RBFSampler, Nystroem) from sklearn.decomposition import PCA # The digits dataset digits = datasets.load_digits(n_class=9) # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.data) data = digits.data / 16. data -= data.mean(axis=0) # We learn the digits on the first half of the digits data_train, targets_train = (data[:n_samples // 2], digits.target[:n_samples // 2]) # Now predict the value of the digit on the second half: data_test, targets_test = (data[n_samples // 2:], digits.target[n_samples // 2:]) # data_test = scaler.transform(data_test) # Create a classifier: a support vector classifier kernel_svm = svm.SVC(gamma=.2) linear_svm = svm.LinearSVC() # create pipeline from kernel approximation # and linear svm feature_map_fourier = RBFSampler(gamma=.2, random_state=1) feature_map_nystroem = Nystroem(gamma=.2, random_state=1) fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier), ("svm", svm.LinearSVC())]) nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem), ("svm", svm.LinearSVC())]) # fit and predict using linear and kernel svm: kernel_svm_time = time() kernel_svm.fit(data_train, targets_train) kernel_svm_score = kernel_svm.score(data_test, targets_test) kernel_svm_time = time() - kernel_svm_time linear_svm_time = time() linear_svm.fit(data_train, targets_train) linear_svm_score = linear_svm.score(data_test, targets_test) linear_svm_time = time() - linear_svm_time sample_sizes = 30 * np.arange(1, 10) fourier_scores = [] nystroem_scores = [] fourier_times = [] nystroem_times = [] for D in sample_sizes: fourier_approx_svm.set_params(feature_map__n_components=D) nystroem_approx_svm.set_params(feature_map__n_components=D) start = time() nystroem_approx_svm.fit(data_train, targets_train) nystroem_times.append(time() - start) start = time() fourier_approx_svm.fit(data_train, targets_train) fourier_times.append(time() - start) fourier_score = fourier_approx_svm.score(data_test, targets_test) nystroem_score = nystroem_approx_svm.score(data_test, targets_test) nystroem_scores.append(nystroem_score) fourier_scores.append(fourier_score) # plot the results: plt.figure(figsize=(8, 8)) accuracy = plt.subplot(211) # second y axis for timeings timescale = plt.subplot(212) accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel") timescale.plot(sample_sizes, nystroem_times, '--', label='Nystroem approx. kernel') accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel") timescale.plot(sample_sizes, fourier_times, '--', label='Fourier approx. kernel') # horizontal lines for exact rbf and linear kernels: accuracy.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_score, linear_svm_score], label="linear svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_time, linear_svm_time], '--', label='linear svm') accuracy.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_score, kernel_svm_score], label="rbf svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_time, kernel_svm_time], '--', label='rbf svm') # vertical line for dataset dimensionality = 64 accuracy.plot([64, 64], [0.7, 1], label="n_features") # legends and labels accuracy.set_title("Classification accuracy") timescale.set_title("Training times") accuracy.set_xlim(sample_sizes[0], sample_sizes[-1]) accuracy.set_xticks(()) accuracy.set_ylim(np.min(fourier_scores), 1) timescale.set_xlabel("Sampling steps = transformed feature dimension") accuracy.set_ylabel("Classification accuracy") timescale.set_ylabel("Training time in seconds") accuracy.legend(loc='best') timescale.legend(loc='best') # visualize the decision surface, projected down to the first # two principal components of the dataset pca = PCA(n_components=8).fit(data_train) X = pca.transform(data_train) # Generate grid along first two principal components multiples = np.arange(-2, 2, 0.1) # steps along first component first = multiples[:, np.newaxis] * pca.components_[0, :] # steps along second component second = multiples[:, np.newaxis] * pca.components_[1, :] # combine grid = first[np.newaxis, :, :] + second[:, np.newaxis, :] flat_grid = grid.reshape(-1, data.shape[1]) # title for the plots titles = ['SVC with rbf kernel', 'SVC (linear kernel)\n with Fourier rbf feature map\n' 'n_components=100', 'SVC (linear kernel)\n with Nystroem rbf feature map\n' 'n_components=100'] plt.tight_layout() plt.figure(figsize=(12, 5)) # predict and plot for i, clf in enumerate((kernel_svm, nystroem_approx_svm, fourier_approx_svm)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. plt.subplot(1, 3, i + 1) Z = clf.predict(flat_grid) # Put the result into a color plot Z = Z.reshape(grid.shape[:-1]) plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired) plt.title(titles[i]) plt.tight_layout() plt.show()
bsd-3-clause
samuel1208/scikit-learn
sklearn/datasets/tests/test_20news.py
280
3045
"""Test the 20news downloader, if the data is available.""" import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import SkipTest from sklearn import datasets def test_20news(): try: data = datasets.fetch_20newsgroups( subset='all', download_if_missing=False, shuffle=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # Extract a reduced dataset data2cats = datasets.fetch_20newsgroups( subset='all', categories=data.target_names[-1:-3:-1], shuffle=False) # Check that the ordering of the target_names is the same # as the ordering in the full dataset assert_equal(data2cats.target_names, data.target_names[-2:]) # Assert that we have only 0 and 1 as labels assert_equal(np.unique(data2cats.target).tolist(), [0, 1]) # Check that the number of filenames is consistent with data/target assert_equal(len(data2cats.filenames), len(data2cats.target)) assert_equal(len(data2cats.filenames), len(data2cats.data)) # Check that the first entry of the reduced dataset corresponds to # the first entry of the corresponding category in the full dataset entry1 = data2cats.data[0] category = data2cats.target_names[data2cats.target[0]] label = data.target_names.index(category) entry2 = data.data[np.where(data.target == label)[0][0]] assert_equal(entry1, entry2) def test_20news_length_consistency(): """Checks the length consistencies within the bunch This is a non-regression test for a bug present in 0.16.1. """ try: data = datasets.fetch_20newsgroups( subset='all', download_if_missing=False, shuffle=False) except IOError: raise SkipTest("Download 20 newsgroups to run this test") # Extract the full dataset data = datasets.fetch_20newsgroups(subset='all') assert_equal(len(data['data']), len(data.data)) assert_equal(len(data['target']), len(data.target)) assert_equal(len(data['filenames']), len(data.filenames)) def test_20news_vectorized(): # This test is slow. raise SkipTest("Test too slow.") bunch = datasets.fetch_20newsgroups_vectorized(subset="train") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (11314, 107428)) assert_equal(bunch.target.shape[0], 11314) assert_equal(bunch.data.dtype, np.float64) bunch = datasets.fetch_20newsgroups_vectorized(subset="test") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (7532, 107428)) assert_equal(bunch.target.shape[0], 7532) assert_equal(bunch.data.dtype, np.float64) bunch = datasets.fetch_20newsgroups_vectorized(subset="all") assert_true(sp.isspmatrix_csr(bunch.data)) assert_equal(bunch.data.shape, (11314 + 7532, 107428)) assert_equal(bunch.target.shape[0], 11314 + 7532) assert_equal(bunch.data.dtype, np.float64)
bsd-3-clause
trmznt/fatools
fatools/lib/sqlmodels/__init__.py
2
5290
## handler #from fatools.lib.sqlmodels import schema #from fatools.lib.utils import cout, cerr #from pandas import DataFrame import os, sys class SQLHandler_XXX(object): #Marker = schema.Marker #Batch = schema.Batch def __init__(self, dbfile, initial=False): print("Opening db: %s" % dbfile) if not initial and not os.path.isfile(dbfile): cerr('ERR - sqlite db file not found: %s' % dbfile) sys.exit(1) if initial and os.path.isfile(dbfile): cerr('ERR - sqlite db file already exists: %s' % dbfile) sys.exit(1) self.dbfile = dbfile self.engine, self.session = schema.engine_from_file(dbfile) def initdb(self, create_table = True): if create_table: schema.Base.metadata.create_all(self.engine) from fatools.lib.sqlmodels.setup import setup setup( self.session ) cout('Database at %s has been initialized.' % self.dbfile) def get_batch(self, batch_code = None): if not batch_code: cerr('ERR - batch code must be supplied!') sys.exit(1) return schema.Batch.search(batch_code, self.session) def get_marker(self, marker_code = None): if not marker_code: cerr('ERR - marker code must be supplied') sys.exit(1) return schema.Marker.search(marker_code, self.session) def get_code_pairs(self, sample_ids): """ return [(sample_id, code), ...] pairs """ q = self.session.query( schema.Sample.id, schema.Sample.code ) q = q.filter( schema.Sample.id.in_( sample_ids )) return list(q) def get_sample_by_id(self, sample_id): return schema.Sample.get(sample_id, self.session) def get_marker_by_id(self, marker_id): return schema.Marker.get(marker_id, self.session) def Panel(self): p = schema.Panel() p._dbh_session_ = self.session return p def get_allele_dataframe(self, sample_ids, marker_ids, params): """ return a Pandas dataframe with this columns ( marker_id, sample_id, bin, size, height, assay_id ) """ # params -> # allele_absolute_threshold # allele_relative_threshold # allele_relative_cutoff # peak_type assert sample_ids and marker_ids and params q = self.session.query( schema.AlleleSet.sample_id, schema.Channel.assay_id, schema.Allele.marker_id, schema.Allele.bin, schema.Allele.size, schema.Allele.height ).join(schema.Allele).join(schema.Channel) q = q.filter( schema.AlleleSet.sample_id.in_( sample_ids ) ) if type(params.peaktype) in [ list, tuple ]: q = q.filter( schema.Allele.type.in_( params.peaktype ) ) else: q = q.filter( schema.Allele.type == params.peaktype ) # we order based on marker_id, sample_id and then descending height q = q.order_by( schema.Allele.marker_id, schema.AlleleSet.sample_id, schema.Allele.height.desc() ) print('MARKER IDS:', marker_ids) if marker_ids: q = q.filter( schema.AlleleSet.marker_id.in_( marker_ids ) ) #q = q.outerjoin( Marker, Allele.marker_id == Marker.id ) #q = q.filter( Marker.id.in_( marker_ids ) ) if params.abs_threshold > 0: q = q.filter( schema.Allele.height > params.abs_threshold ) if params.rel_threshold == 0 and params.rel_cutoff == 0: df = DataFrame( [ (marker_id, sample_id, value, size, height, assay_id ) for ( sample_id, assay_id, marker_id, value, size, height ) in q ] ) else: alleles = [] max_height = 0 last_marker_id = 0 last_sample_id = 0 skip_flag = False for ( sample_id, assay_id, marker_id, value, size, height ) in q: if sample_id == last_sample_id: if last_marker_id == marker_id: if skip_flag: continue ratio = height / max_height if ratio < params.rel_threshold: continue if ( params.rel_cutoff > 0 and ratio > params.rel_cutoff ): # turn off this marker by skipping this sample_id & marker_id skip_flag = True # don't forget to remove the latest allele del alleles[-1] continue else: last_sample_id = sample_id last_marker_id = marker_id max_height = height skip_flag = False alleles.append( (marker_id, sample_id, value, size, height, assay_id) ) df = DataFrame( alleles ) if len(df) == 0: return df df.columns = ( 'marker_id', 'sample_id', 'value', 'size', 'height', 'assay_id' ) return df def get_sample_ids(self, sample_selector): pass
lgpl-3.0
aelaguiz/pyvotune
pyvotune/feature_extractors/pyrbm_rbm_extractor.py
1
2207
# -*- coding: utf-8 -*- from collections import Mapping, Sequence from operator import itemgetter import math import time import numpy as np import scipy.sparse as sp import random from sklearn.preprocessing import normalize from sklearn.base import BaseEstimator, TransformerMixin from sklearn.utils import atleast2d_or_csr from pyvotune.pyrbm.rbm import RBM, Trainer from pyvotune.log import logger log = logger() global_theano = None global_T = None global_RandomStreams = None def chunks(l, n): """ Yield successive n-sized chunks from l. """ for i in xrange(0, len(l), n): yield l[i:i + n] class PyRBMFeatureExtractor(BaseEstimator, TransformerMixin): def __init__( self, n_hidden=500, binary=True, learning_rate=0.1, momentum=0.2, l2_weight=0.001, sparsity=0.1, n_training_epochs=10, scale=0.001, reconstruction=True, n_gibbs=10, batch_size=20): self.n_hidden = n_hidden self.binary = binary self.momentum = momentum self.learning_rate = learning_rate self.l2_weight = l2_weight self.sparsity = sparsity self.batch_size = batch_size self.scale = scale self.n_training_epochs = n_training_epochs self.reconstruction = reconstruction self.n_gibbs = n_gibbs super(PyRBMFeatureExtractor, self).__init__() def fit(self, X, y=None): n_features = X.shape[1] self.rbm = RBM( n_features, self.n_hidden, self.binary, self.scale) trainer = Trainer( self.rbm, l2=self.l2_weight, momentum=self.momentum, target_sparsity=self.sparsity) for i in range(self.n_training_epochs): for j, batch in enumerate(chunks(X, self.batch_size)): #log.debug("Training rbm on epoch %s batch %s" % (i, j)) trainer.learn(batch, learning_rate=self.learning_rate) return self def transform(self, X, y=None): if self.reconstruction: return self.rbm.reconstruct(X, self.n_gibbs) else: return self.rbm.hidden_expectation(X)
mit
GuessWhoSamFoo/pandas
pandas/tests/indexes/timedeltas/test_astype.py
2
4066
from datetime import timedelta import numpy as np import pytest import pandas as pd from pandas import ( Float64Index, Index, Int64Index, NaT, Timedelta, TimedeltaIndex, timedelta_range) import pandas.util.testing as tm class TestTimedeltaIndex(object): def test_astype_object(self): idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx') expected_list = [Timedelta('1 days'), Timedelta('2 days'), Timedelta('3 days'), Timedelta('4 days')] result = idx.astype(object) expected = Index(expected_list, dtype=object, name='idx') tm.assert_index_equal(result, expected) assert idx.tolist() == expected_list def test_astype_object_with_nat(self): idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), NaT, timedelta(days=4)], name='idx') expected_list = [Timedelta('1 days'), Timedelta('2 days'), NaT, Timedelta('4 days')] result = idx.astype(object) expected = Index(expected_list, dtype=object, name='idx') tm.assert_index_equal(result, expected) assert idx.tolist() == expected_list def test_astype(self): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN]) result = idx.astype(object) expected = Index([Timedelta('1 days 03:46:40')] + [NaT] * 3, dtype=object) tm.assert_index_equal(result, expected) result = idx.astype(int) expected = Int64Index([100000000000000] + [-9223372036854775808] * 3, dtype=np.int64) tm.assert_index_equal(result, expected) result = idx.astype(str) expected = Index(str(x) for x in idx) tm.assert_index_equal(result, expected) rng = timedelta_range('1 days', periods=10) result = rng.astype('i8') tm.assert_index_equal(result, Index(rng.asi8)) tm.assert_numpy_array_equal(rng.asi8, result.values) def test_astype_uint(self): arr = timedelta_range('1H', periods=2) expected = pd.UInt64Index( np.array([3600000000000, 90000000000000], dtype="uint64") ) tm.assert_index_equal(arr.astype("uint64"), expected) tm.assert_index_equal(arr.astype("uint32"), expected) def test_astype_timedelta64(self): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN]) result = idx.astype('timedelta64') expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64') tm.assert_index_equal(result, expected) result = idx.astype('timedelta64[ns]') tm.assert_index_equal(result, idx) assert result is not idx result = idx.astype('timedelta64[ns]', copy=False) tm.assert_index_equal(result, idx) assert result is idx @pytest.mark.parametrize('dtype', [ float, 'datetime64', 'datetime64[ns]']) def test_astype_raises(self, dtype): # GH 13149, GH 13209 idx = TimedeltaIndex([1e14, 'NaT', NaT, np.NaN]) msg = 'Cannot cast TimedeltaArray to dtype' with pytest.raises(TypeError, match=msg): idx.astype(dtype) def test_astype_category(self): obj = pd.timedelta_range("1H", periods=2, freq='H') result = obj.astype('category') expected = pd.CategoricalIndex([pd.Timedelta('1H'), pd.Timedelta('2H')]) tm.assert_index_equal(result, expected) result = obj._data.astype('category') expected = expected.values tm.assert_categorical_equal(result, expected) def test_astype_array_fallback(self): obj = pd.timedelta_range("1H", periods=2) result = obj.astype(bool) expected = pd.Index(np.array([True, True])) tm.assert_index_equal(result, expected) result = obj._data.astype(bool) expected = np.array([True, True]) tm.assert_numpy_array_equal(result, expected)
bsd-3-clause
lazywei/scikit-learn
examples/classification/plot_lda.py
164
2224
""" ==================================================================== Normal and Shrinkage Linear Discriminant Analysis for classification ==================================================================== Shows how shrinkage improves classification. """ from __future__ import division import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_blobs from sklearn.lda import LDA n_train = 20 # samples for training n_test = 200 # samples for testing n_averages = 50 # how often to repeat classification n_features_max = 75 # maximum number of features step = 4 # step size for the calculation def generate_data(n_samples, n_features): """Generate random blob-ish data with noisy features. This returns an array of input data with shape `(n_samples, n_features)` and an array of `n_samples` target labels. Only one feature contains discriminative information, the other features contain only noise. """ X, y = make_blobs(n_samples=n_samples, n_features=1, centers=[[-2], [2]]) # add non-discriminative features if n_features > 1: X = np.hstack([X, np.random.randn(n_samples, n_features - 1)]) return X, y acc_clf1, acc_clf2 = [], [] n_features_range = range(1, n_features_max + 1, step) for n_features in n_features_range: score_clf1, score_clf2 = 0, 0 for _ in range(n_averages): X, y = generate_data(n_train, n_features) clf1 = LDA(solver='lsqr', shrinkage='auto').fit(X, y) clf2 = LDA(solver='lsqr', shrinkage=None).fit(X, y) X, y = generate_data(n_test, n_features) score_clf1 += clf1.score(X, y) score_clf2 += clf2.score(X, y) acc_clf1.append(score_clf1 / n_averages) acc_clf2.append(score_clf2 / n_averages) features_samples_ratio = np.array(n_features_range) / n_train plt.plot(features_samples_ratio, acc_clf1, linewidth=2, label="LDA with shrinkage", color='r') plt.plot(features_samples_ratio, acc_clf2, linewidth=2, label="LDA", color='g') plt.xlabel('n_features / n_samples') plt.ylabel('Classification accuracy') plt.legend(loc=1, prop={'size': 12}) plt.suptitle('LDA vs. shrinkage LDA (1 discriminative feature)') plt.show()
bsd-3-clause
yotamfr/prot2vec
src/python/deepseq2.py
1
13886
import os # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # os.environ["CUDA_VISIBLE_DEVICES"] = "1" from src.python.baselines import * from pymongo import MongoClient from tqdm import tqdm import tensorflow as tf ### Keras from keras import optimizers from keras.models import Model from keras.layers import Input, Dense, Embedding, Activation from keras.layers import Conv2D, Conv1D from keras.layers import Dropout, BatchNormalization from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D from keras.layers import Concatenate, Flatten, Reshape from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LambdaCallback, LearningRateScheduler # from keras.losses import hinge, binary_crossentropy from keras import backend as K from sklearn.metrics import log_loss import math import argparse sess = tf.Session() K.set_session(sess) LR = 0.001 BATCH_SIZE = 32 LONG_EXPOSURE = True t0 = datetime(2014, 1, 1, 0, 0) t1 = datetime(2014, 9, 1, 0, 0) MAX_LENGTH = 2000 MIN_LENGTH = 30 def get_classes(db, onto, start=t0, end=t1): q1 = {'DB': 'UniProtKB', 'Evidence': {'$in': exp_codes}, 'Date': {"$lte": start}, 'Aspect': ASPECT} q2 = {'DB': 'UniProtKB', 'Evidence': {'$in': exp_codes}, 'Date': {"$gt": start, "$lte": end}, 'Aspect': ASPECT} def helper(q): seq2go, _ = GoAnnotationCollectionLoader( db.goa_uniprot.find(q), db.goa_uniprot.count(q), ASPECT).load() for i, (k, v) in enumerate(seq2go.items()): sys.stdout.write("\r{0:.0f}%".format(100.0 * i / len(seq2go))) seq2go[k] = onto.propagate(v) return reduce(lambda x, y: set(x) | set(y), seq2go.values(), set()) return onto.sort(helper(q1) | helper(q2)) def get_training_and_validation_streams(db, limit=None): q_train = {'DB': 'UniProtKB', 'Evidence': {'$in': exp_codes}, 'Date': {"$lte": t0}, 'Aspect': ASPECT} seq2go_trn, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_train), db.goa_uniprot.count(q_train), ASPECT).load() query = {"_id": {"$in": unique(list(seq2go_trn.keys())).tolist()}} count = limit if limit else db.uniprot.count(query) source = db.uniprot.find(query).batch_size(10) if limit: source = source.limit(limit) stream_trn = DataStream(source, count, seq2go_trn) q_valid = {'DB': 'UniProtKB', 'Evidence': {'$in': exp_codes}, 'Date': {"$gt": t0, "$lte": t1}, 'Aspect': ASPECT} seq2go_tst, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_valid), db.goa_uniprot.count(q_valid), ASPECT).load() query = {"_id": {"$in": unique(list(seq2go_tst.keys())).tolist()}} count = limit if limit else db.uniprot.count(query) source = db.uniprot.find(query).batch_size(10) if limit: source = source.limit(limit) stream_tst = DataStream(source, count, seq2go_tst) return stream_trn, stream_tst class DataStream(object): def __init__(self, source, count, seq2go): self._count = count self._source = source self._seq2go = seq2go def __iter__(self): count = self._count source = self._source seq2go = self._seq2go for k, seq in UniprotCollectionLoader(source, count): if not MIN_LENGTH <= len(seq) <= MAX_LENGTH: continue x = [AA.aa2index[aa] for aa in seq] yield k, x, seq2go[k] def __len__(self): return self._count def step_decay(epoch): initial_lrate = LR drop = 0.5 epochs_drop = 1.0 lrate = max(0.0001, initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))) return lrate def OriginalIception(inpt, num_channels=64): # tower_0 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_1 = Conv1D(num_channels, 3, padding='same', activation='relu')(tower_1) tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_2 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_2) # tower_3 = MaxPooling1D(3, padding='same')(inpt) # tower_3 = Conv1D(num_channels, 1, padding='same')(tower_3) return Concatenate(axis=2)([tower_1, tower_2,]) def LargeInception(inpt, num_channels=64): tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(inpt) tower_1 = BatchNormalization()(tower_1) tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(tower_1) tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(inpt) tower_2 = BatchNormalization()(tower_2) tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(tower_2) return Concatenate(axis=2)([tower_1, tower_2]) def SmallInception(inpt, num_channels=150): tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_1 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_1) # tower_1 = BatchNormalization()(tower_1) tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_2 = Conv1D(num_channels, 15, padding='same', activation='relu')(tower_2) # tower_2 = BatchNormalization()(tower_2) return Concatenate(axis=2)([tower_1, tower_2]) def Classifier(inp1d, classes): out = Dense(len(classes))(inp1d) out = BatchNormalization()(out) out = Activation('sigmoid')(out) return out def MotifNet(classes, opt): inpt = Input(shape=(None,)) out = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt) out = Conv1D(250, 15, activation='relu', padding='valid')(out) out = Dropout(0.2)(out) out = Conv1D(100, 15, activation='relu', padding='valid')(out) out = SmallInception(out) out = Dropout(0.2)(out) out = SmallInception(out) out = Dropout(0.2)(out) out = Conv1D(250, 5, activation='relu', padding='valid')(out) out = Dropout(0.2)(out) out = Classifier(GlobalMaxPooling1D()(out), classes) model = Model(inputs=[inpt], outputs=[out]) model.compile(loss='binary_crossentropy', optimizer=opt) return model def Inception(inpt, tower1=6, tower2=10): tower_1 = Conv1D(64, 1, padding='same', activation='relu')(inpt) tower_1 = Conv1D(64, tower1, padding='same', activation='relu')(tower_1) tower_2 = Conv1D(64, 1, padding='same', activation='relu')(inpt) tower_2 = Conv1D(64, tower2, padding='same', activation='relu')(tower_2) # tower_3 = MaxPooling1D(3, strides=1, padding='same')(inpt) # tower_3 = Conv1D(64, 1, padding='same', activation='relu')(tower_3) return Concatenate(axis=2)([tower_1, tower_2]) def ProteinInception(classes, opt): inpt = Input(shape=(None,)) img = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt) feats = Inception(Inception(img)) out = Classifier(GlobalMaxPooling1D()(feats), classes) model = Model(inputs=[inpt], outputs=[out]) model.compile(loss='binary_crossentropy', optimizer=opt) return model def Features(inpt): feats = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt) feats = Conv1D(250, 15, activation='relu', padding='valid')(feats) feats = Dropout(0.3)(feats) feats = Conv1D(100, 15, activation='relu', padding='valid')(feats) feats = Dropout(0.3)(feats) feats = Conv1D(100, 15, activation='relu', padding='valid')(feats) feats = Dropout(0.3)(feats) feats = Conv1D(250, 15, activation='relu', padding='valid')(feats) feats = Dropout(0.3)(feats) feats = GlobalMaxPooling1D()(feats) return feats def DeeperSeq(classes, opt): inp = Input(shape=(None,)) out = Classifier(Features(inp), classes) model = Model(inputs=[inp], outputs=[out]) model.compile(loss='binary_crossentropy', optimizer=opt) return model def batch_generator(stream, onto, classes): s_cls = set(classes) data = dict() def labels2vec(lbl): y = np.zeros(len(classes)) for go in onto.propagate(lbl, include_root=False): if go not in s_cls: continue y[classes.index(go)] = 1 return y def pad_seq(seq, max_length=MAX_LENGTH): delta = max_length - len(seq) left = [PAD for _ in range(delta // 2)] right = [PAD for _ in range(delta - delta // 2)] seq = left + seq + right return np.asarray(seq) def prepare_batch(sequences, labels): b = max(map(len, sequences)) + 100 Y = np.asarray([labels2vec(lbl) for lbl in labels]) X = np.asarray([pad_seq(seq, b) for seq in sequences]) return X, Y for k, x, y in stream: lx = len(x) if lx in data: data[lx].append([k, x, y]) ids, seqs, lbls = zip(*data[lx]) if len(seqs) == BATCH_SIZE: yield ids, prepare_batch(seqs, lbls) del data[lx] else: data[lx] = [[k, x, y]] for packet in data.values(): ids, seqs, lbls = zip(*packet) yield ids, prepare_batch(seqs, lbls) class LossHistory(Callback): def __init__(self): self.losses = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) def train(model, gen_xy, length_xy, epoch, num_epochs, history=LossHistory(), lrate=LearningRateScheduler(step_decay)): pbar = tqdm(total=length_xy) for _, (X, Y) in gen_xy: model.fit(x=X, y=Y, batch_size=BATCH_SIZE, epochs=num_epochs if LONG_EXPOSURE else epoch + 1, verbose=0, validation_data=None, initial_epoch=epoch, callbacks=[history]) pbar.set_description("Training Loss:%.5f" % np.mean(history.losses)) pbar.update(len(Y)) pbar.close() def zeroone2oneminusone(vec): return np.add(np.multiply(np.array(vec), 2), -1) def oneminusone2zeroone(vec): return np.divide(np.add(np.array(vec), 1), 2) def calc_loss(y_true, y_pred): return np.mean([log_loss(y, y_hat) for y, y_hat in zip(y_true, y_pred) if np.any(y)]) def predict(model, gen_xy, length_xy, classes): pbar = tqdm(total=length_xy, desc="Predicting...") i, m, n = 0, length_xy, len(classes) ids = list() y_pred, y_true = np.zeros((m, n)), np.zeros((m, n)) for i, (keys, (X, Y)) in enumerate(gen_xy): k = len(Y) ids.extend(keys) y_hat, y = model.predict(X), Y y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y pbar.update(k) pbar.close() return ids, y_true, y_pred def evaluate(y_true, y_pred, classes): y_pred = y_pred[~np.all(y_pred == 0, axis=1)] y_true = y_true[~np.all(y_true == 0, axis=1)] prs, rcs, f1s = performance(y_pred, y_true, classes) return calc_loss(y_true, y_pred), prs, rcs, f1s def add_arguments(parser): parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/', help="Supply the URL of MongoDB"), parser.add_argument("--aspect", type=str, choices=['F', 'P', 'C'], default="F", help="Specify the ontology aspect.") parser.add_argument("--init_epoch", type=int, default=0, help="Which epoch to start training the model?") parser.add_argument("--arch", type=str, choices=['deepseq', 'motifnet', 'inception'], default="deepseq", help="Specify the model arch.") parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') if __name__ == "__main__": parser = argparse.ArgumentParser() add_arguments(parser) args = parser.parse_args() ASPECT = args.aspect # default: Molecular Function client = MongoClient(args.mongo_url) db = client['prot2vec'] print("Loading Ontology...") onto = get_ontology(ASPECT) # classes = get_classes(db, onto) classes = onto.classes classes.remove(onto.root) assert onto.root not in classes opt = optimizers.Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-8) if args.arch == 'inception': model = ProteinInception(classes, opt) LONG_EXPOSURE = False num_epochs = 200 elif args.arch == 'deepseq': model = DeeperSeq(classes, opt) LONG_EXPOSURE = True num_epochs = 20 elif args.arch == 'motifnet': model = MotifNet(classes, opt) LONG_EXPOSURE = False num_epochs = 200 else: print('Unknown model arch') exit(0) if args.resume: model.load_weights(args.resume) print("Loaded model from disk") model.summary() for epoch in range(args.init_epoch, num_epochs): trn_stream, tst_stream = get_training_and_validation_streams(db) train(model, batch_generator(trn_stream, onto, classes), len(trn_stream), epoch, num_epochs) _, y_true, y_pred = predict(model, batch_generator(tst_stream, onto, classes), len(tst_stream), classes) loss, prs, rcs, f1s = evaluate(y_true, y_pred, classes) i = np.argmax(f1s) f_max = f1s[i] print("[Epoch %d/%d] (Validation Loss: %.5f, F_max: %.3f, precision: %.3f, recall: %.3f)" % (epoch + 1, num_epochs, loss, f1s[i], prs[i], rcs[i])) model_str = '%s-%d-%.5f-%.2f' % (args.arch, epoch + 1, loss, f_max) model.save_weights("checkpoints/%s.hdf5" % model_str) with open("checkpoints/%s.json" % model_str, "w+") as f: f.write(model.to_json()) np.save("checkpoints/%s.npy" % model_str, np.asarray(classes))
mit
dingocuster/scikit-learn
sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py
221
5517
""" Testing for the gradient boosting loss functions and initial estimators. """ import numpy as np from numpy.testing import assert_array_equal from numpy.testing import assert_almost_equal from numpy.testing import assert_equal from nose.tools import assert_raises from sklearn.utils import check_random_state from sklearn.ensemble.gradient_boosting import BinomialDeviance from sklearn.ensemble.gradient_boosting import LogOddsEstimator from sklearn.ensemble.gradient_boosting import LeastSquaresError from sklearn.ensemble.gradient_boosting import RegressionLossFunction from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS from sklearn.ensemble.gradient_boosting import _weighted_percentile def test_binomial_deviance(): # Check binomial deviance loss. # Check against alternative definitions in ESLII. bd = BinomialDeviance(2) # pred has the same BD for y in {0, 1} assert_equal(bd(np.array([0.0]), np.array([0.0])), bd(np.array([1.0]), np.array([0.0]))) assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])), 0.0) assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]), np.array([100.0, -100.0, -100.0])), 0) # check if same results as alternative definition of deviance (from ESLII) alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 * (2.0 * y - 1) * pred)) test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])), (np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])), (np.array([0.0, 0.0, 0.0]), np.array([-100.0, -100.0, -100.0])), (np.array([1.0, 1.0, 1.0]), np.array([-100.0, -100.0, -100.0]))] for datum in test_data: assert_almost_equal(bd(*datum), alt_dev(*datum)) # check the gradient against the alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred)) for datum in test_data: assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum)) def test_log_odds_estimator(): # Check log odds estimator. est = LogOddsEstimator() assert_raises(ValueError, est.fit, None, np.array([1])) est.fit(None, np.array([1.0, 0.0])) assert_equal(est.prior, 0.0) assert_array_equal(est.predict(np.array([[1.0], [1.0]])), np.array([[0.0], [0.0]])) def test_sample_weight_smoke(): rng = check_random_state(13) y = rng.rand(100) pred = rng.rand(100) # least squares loss = LeastSquaresError(1) loss_wo_sw = loss(y, pred) loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32)) assert_almost_equal(loss_wo_sw, loss_w_sw) def test_sample_weight_init_estimators(): # Smoke test for init estimators with sample weights. rng = check_random_state(13) X = rng.rand(100, 2) sample_weight = np.ones(100) reg_y = rng.rand(100) clf_y = rng.randint(0, 2, size=100) for Loss in LOSS_FUNCTIONS.values(): if Loss is None: continue if issubclass(Loss, RegressionLossFunction): k = 1 y = reg_y else: k = 2 y = clf_y if Loss.is_multi_class: # skip multiclass continue loss = Loss(k) init_est = loss.init_estimator() init_est.fit(X, y) out = init_est.predict(X) assert_equal(out.shape, (y.shape[0], 1)) sw_init_est = loss.init_estimator() sw_init_est.fit(X, y, sample_weight=sample_weight) sw_out = init_est.predict(X) assert_equal(sw_out.shape, (y.shape[0], 1)) # check if predictions match assert_array_equal(out, sw_out) def test_weighted_percentile(): y = np.empty(102, dtype=np.float) y[:50] = 0 y[-51:] = 2 y[-1] = 100000 y[50] = 1 sw = np.ones(102, dtype=np.float) sw[-1] = 0.0 score = _weighted_percentile(y, sw, 50) assert score == 1 def test_weighted_percentile_equal(): y = np.empty(102, dtype=np.float) y.fill(0.0) sw = np.ones(102, dtype=np.float) sw[-1] = 0.0 score = _weighted_percentile(y, sw, 50) assert score == 0 def test_weighted_percentile_zero_weight(): y = np.empty(102, dtype=np.float) y.fill(1.0) sw = np.ones(102, dtype=np.float) sw.fill(0.0) score = _weighted_percentile(y, sw, 50) assert score == 1.0 def test_sample_weight_deviance(): # Test if deviance supports sample weights. rng = check_random_state(13) X = rng.rand(100, 2) sample_weight = np.ones(100) reg_y = rng.rand(100) clf_y = rng.randint(0, 2, size=100) mclf_y = rng.randint(0, 3, size=100) for Loss in LOSS_FUNCTIONS.values(): if Loss is None: continue if issubclass(Loss, RegressionLossFunction): k = 1 y = reg_y p = reg_y else: k = 2 y = clf_y p = clf_y if Loss.is_multi_class: k = 3 y = mclf_y # one-hot encoding p = np.zeros((y.shape[0], k), dtype=np.float64) for i in range(k): p[:, i] = y == i loss = Loss(k) deviance_w_w = loss(y, p, sample_weight) deviance_wo_w = loss(y, p) assert deviance_wo_w == deviance_w_w
bsd-3-clause
tbenthompson/tectosaur
setup.py
1
1306
import setuptools try: import pypandoc description = pypandoc.convert('README.md', 'rst') except (IOError, ImportError): description = open('README.md').read() setuptools.setup( packages = setuptools.find_packages(), install_requires = ['matplotlib', 'numpy', 'scipy', 'mako', 'cppimport', 'attrs', 'taskloaf>=18.12.08', 'okada_wrapper', 'psutil'], zip_safe = False, include_package_data = True, name = 'tectosaur', version = '0.0.1', description = 'Boundary element methods for crustal deformation and earthquake science.', long_description = description, url = 'https://github.com/tbenthompson/tectosaur', author = 'T. Ben Thompson', author_email = 't.ben.thompson@gmail.com', license = 'MIT', platforms = ['any'], classifiers = [ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Operating System :: OS Independent', 'Operating System :: POSIX', 'Topic :: Software Development', 'Topic :: Scientific/Engineering :: Physics', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Programming Language :: C++' ] )
mit
toastedcornflakes/scikit-learn
sklearn/preprocessing/tests/test_data.py
4
60847
# Authors: # # Giorgio Patrini # # License: BSD 3 clause import warnings import numpy as np import numpy.linalg as la from scipy import sparse from distutils.version import LooseVersion from sklearn.utils import gen_batches from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import clean_warning_registry from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_less from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import skip_if_32bit from sklearn.utils.sparsefuncs import mean_variance_axis from sklearn.preprocessing.data import _transform_selected from sklearn.preprocessing.data import _handle_zeros_in_scale from sklearn.preprocessing.data import Binarizer from sklearn.preprocessing.data import KernelCenterer from sklearn.preprocessing.data import Normalizer from sklearn.preprocessing.data import normalize from sklearn.preprocessing.data import OneHotEncoder from sklearn.preprocessing.data import StandardScaler from sklearn.preprocessing.data import scale from sklearn.preprocessing.data import MinMaxScaler from sklearn.preprocessing.data import minmax_scale from sklearn.preprocessing.data import MaxAbsScaler from sklearn.preprocessing.data import maxabs_scale from sklearn.preprocessing.data import RobustScaler from sklearn.preprocessing.data import robust_scale from sklearn.preprocessing.data import add_dummy_feature from sklearn.preprocessing.data import PolynomialFeatures from sklearn.exceptions import DataConversionWarning from sklearn.pipeline import Pipeline from sklearn.cross_validation import cross_val_predict from sklearn.svm import SVR from sklearn import datasets iris = datasets.load_iris() # Make some data to be used many times rng = np.random.RandomState(0) n_features = 30 n_samples = 1000 offsets = rng.uniform(-1, 1, size=n_features) scales = rng.uniform(1, 10, size=n_features) X_2d = rng.randn(n_samples, n_features) * scales + offsets X_1row = X_2d[0, :].reshape(1, n_features) X_1col = X_2d[:, 0].reshape(n_samples, 1) X_list_1row = X_1row.tolist() X_list_1col = X_1col.tolist() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def _check_dim_1axis(a): if isinstance(a, list): return np.array(a).shape[0] return a.shape[0] def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size, n_samples_seen): if batch_stop != n: assert_equal((i + 1) * chunk_size, n_samples_seen) else: assert_equal(i * chunk_size + (batch_stop - batch_start), n_samples_seen) def test_polynomial_features(): # Test Polynomial Features X1 = np.arange(6)[:, np.newaxis] P1 = np.hstack([np.ones_like(X1), X1, X1 ** 2, X1 ** 3]) deg1 = 3 X2 = np.arange(6).reshape((3, 2)) x1 = X2[:, :1] x2 = X2[:, 1:] P2 = np.hstack([x1 ** 0 * x2 ** 0, x1 ** 1 * x2 ** 0, x1 ** 0 * x2 ** 1, x1 ** 2 * x2 ** 0, x1 ** 1 * x2 ** 1, x1 ** 0 * x2 ** 2]) deg2 = 2 for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]: P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X) assert_array_almost_equal(P_test, P) P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X) assert_array_almost_equal(P_test, P[:, 1:]) interact = PolynomialFeatures(2, interaction_only=True, include_bias=True) X_poly = interact.fit_transform(X) assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]]) assert_equal(interact.powers_.shape, (interact.n_output_features_, interact.n_input_features_)) def test_polynomial_feature_names(): X = np.arange(30).reshape(10, 3) poly = PolynomialFeatures(degree=2, include_bias=True).fit(X) feature_names = poly.get_feature_names() assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1', 'x0 x2', 'x1^2', 'x1 x2', 'x2^2'], feature_names) poly = PolynomialFeatures(degree=3, include_bias=False).fit(X) feature_names = poly.get_feature_names(["a", "b", "c"]) assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2', 'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c', 'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c', 'b c^2', 'c^3'], feature_names) # test some unicode poly = PolynomialFeatures(degree=1, include_bias=True).fit(X) feature_names = poly.get_feature_names([u"\u0001F40D", u"\u262E", u"\u05D0"]) assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"], feature_names) def test_standard_scaler_1d(): # Test scaling of dataset along single axis for X in [X_1row, X_1col, X_list_1row, X_list_1row]: scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) if isinstance(X, list): X = np.array(X) # cast only after scaling done if _check_dim_1axis(X) == 1: assert_almost_equal(scaler.mean_, X.ravel()) assert_almost_equal(scaler.scale_, np.ones(n_features)) assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) assert_array_almost_equal(X_scaled.std(axis=0), np.zeros_like(n_features)) else: assert_almost_equal(scaler.mean_, X.mean()) assert_almost_equal(scaler.scale_, X.std()) assert_array_almost_equal(X_scaled.mean(axis=0), np.zeros_like(n_features)) assert_array_almost_equal(X_scaled.mean(axis=0), .0) assert_array_almost_equal(X_scaled.std(axis=0), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X) # Constant feature X = np.ones(5).reshape(5, 1) scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_almost_equal(scaler.mean_, 1.) assert_almost_equal(scaler.scale_, 1.) assert_array_almost_equal(X_scaled.mean(axis=0), .0) assert_array_almost_equal(X_scaled.std(axis=0), .0) assert_equal(scaler.n_samples_seen_, X.shape[0]) def test_scale_1d(): # 1-d inputs X_list = [1., 3., 5., 0.] X_arr = np.array(X_list) for X in [X_list, X_arr]: X_scaled = scale(X) assert_array_almost_equal(X_scaled.mean(), 0.0) assert_array_almost_equal(X_scaled.std(), 1.0) assert_array_equal(scale(X, with_mean=False, with_std=False), X) @skip_if_32bit def test_standard_scaler_numerical_stability(): """Test numerical stability of scaling""" # np.log(1e-5) is taken because of its floating point representation # was empirically found to cause numerical problems with np.mean & np.std. x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64) if LooseVersion(np.__version__) >= LooseVersion('1.9'): # This does not raise a warning as the number of samples is too low # to trigger the problem in recent numpy x_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(scale(x), np.zeros(8)) else: w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(8)) # with 2 more samples, the std computation run into numerical issues: x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64) w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(10)) x = np.ones(10, dtype=np.float64) * 1e-100 x_small_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(x_small_scaled, np.zeros(10)) # Large values can cause (often recoverable) numerical stability issues: x_big = np.ones(10, dtype=np.float64) * 1e100 w = "Dataset may contain too large values" x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big) assert_array_almost_equal(x_big_scaled, np.zeros(10)) assert_array_almost_equal(x_big_scaled, x_small_scaled) x_big_centered = assert_warns_message(UserWarning, w, scale, x_big, with_std=False) assert_array_almost_equal(x_big_centered, np.zeros(10)) assert_array_almost_equal(x_big_centered, x_small_scaled) def test_scaler_2d_arrays(): # Test scaling of 2d array along first axis rng = np.random.RandomState(0) n_features = 5 n_samples = 4 X = rng.randn(n_samples, n_features) X[:, 0] = 0.0 # first feature is always of zero scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_equal(scaler.n_samples_seen_, n_samples) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has been copied assert_true(X_scaled is not X) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_scaled = scale(X, axis=1, with_std=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) X_scaled = scale(X, axis=1, with_std=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0]) assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0]) # Check that the data hasn't been modified assert_true(X_scaled is not X) X_scaled = scaler.fit(X).transform(X, copy=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is X) X = rng.randn(4, 5) X[:, 0] = 1.0 # first feature is a constant, non zero feature scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) def test_handle_zeros_in_scale(): s1 = np.array([0, 1, 2, 3]) s2 = _handle_zeros_in_scale(s1, copy=True) assert_false(s1[0] == s2[0]) assert_array_equal(s1, np.array([0, 1, 2, 3])) assert_array_equal(s2, np.array([1, 1, 2, 3])) def test_minmax_scaler_partial_fit(): # Test if partial_fit run over many batches of size 1 and 50 # gives the same results as fit X = X_2d n = X.shape[0] for chunk_size in [1, 2, 50, n, n + 42]: # Test mean at the end of the process scaler_batch = MinMaxScaler().fit(X) scaler_incr = MinMaxScaler() for batch in gen_batches(n_samples, chunk_size): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) # Test std after 1 step batch0 = slice(0, chunk_size) scaler_batch = MinMaxScaler().fit(X[batch0]) scaler_incr = MinMaxScaler().partial_fit(X[batch0]) assert_array_almost_equal(scaler_batch.data_min_, scaler_incr.data_min_) assert_array_almost_equal(scaler_batch.data_max_, scaler_incr.data_max_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) assert_array_almost_equal(scaler_batch.data_range_, scaler_incr.data_range_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_) # Test std until the end of partial fits, and scaler_batch = MinMaxScaler().fit(X) scaler_incr = MinMaxScaler() # Clean estimator for i, batch in enumerate(gen_batches(n_samples, chunk_size)): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_correct_incr(i, batch_start=batch.start, batch_stop=batch.stop, n=n, chunk_size=chunk_size, n_samples_seen=scaler_incr.n_samples_seen_) def test_standard_scaler_partial_fit(): # Test if partial_fit run over many batches of size 1 and 50 # gives the same results as fit X = X_2d n = X.shape[0] for chunk_size in [1, 2, 50, n, n + 42]: # Test mean at the end of the process scaler_batch = StandardScaler(with_std=False).fit(X) scaler_incr = StandardScaler(with_std=False) for batch in gen_batches(n_samples, chunk_size): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_) assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) # Test std after 1 step batch0 = slice(0, chunk_size) scaler_incr = StandardScaler().partial_fit(X[batch0]) if chunk_size == 1: assert_array_almost_equal(np.zeros(n_features, dtype=np.float64), scaler_incr.var_) assert_array_almost_equal(np.ones(n_features, dtype=np.float64), scaler_incr.scale_) else: assert_array_almost_equal(np.var(X[batch0], axis=0), scaler_incr.var_) assert_array_almost_equal(np.std(X[batch0], axis=0), scaler_incr.scale_) # no constants # Test std until the end of partial fits, and scaler_batch = StandardScaler().fit(X) scaler_incr = StandardScaler() # Clean estimator for i, batch in enumerate(gen_batches(n_samples, chunk_size)): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_correct_incr(i, batch_start=batch.start, batch_stop=batch.stop, n=n, chunk_size=chunk_size, n_samples_seen=scaler_incr.n_samples_seen_) assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) def test_standard_scaler_partial_fit_numerical_stability(): # Test if the incremental computation introduces significative errors # for large datasets with values of large magniture rng = np.random.RandomState(0) n_features = 2 n_samples = 100 offsets = rng.uniform(-1e15, 1e15, size=n_features) scales = rng.uniform(1e3, 1e6, size=n_features) X = rng.randn(n_samples, n_features) * scales + offsets scaler_batch = StandardScaler().fit(X) scaler_incr = StandardScaler() for chunk in X: scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features)) # Regardless of abs values, they must not be more diff 6 significant digits tol = 10 ** (-6) assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol) assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol) assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol) # NOTE Be aware that for much larger offsets std is very unstable (last # assert) while mean is OK. # Sparse input size = (100, 3) scale = 1e20 X = rng.randint(0, 2, size).astype(np.float64) * scale X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) for X in [X_csr, X_csc]: # with_mean=False is required with sparse input scaler = StandardScaler(with_mean=False).fit(X) scaler_incr = StandardScaler(with_mean=False) for chunk in X: # chunk = sparse.csr_matrix(data_chunks) scaler_incr = scaler_incr.partial_fit(chunk) # Regardless of magnitude, they must not differ more than of 6 digits tol = 10 ** (-6) assert_true(scaler.mean_ is not None) assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol) assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol) def test_partial_fit_sparse_input(): # Check that sparsity is not destroyed X = np.array([[1.], [0.], [0.], [5.]]) X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) for X in [X_csr, X_csc]: X_null = null_transform.partial_fit(X).transform(X) assert_array_equal(X_null.data, X.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_null.data) assert_array_equal(X_orig.data, X.data) def test_standard_scaler_trasform_with_partial_fit(): # Check some postconditions after applying partial_fit and transform X = X_2d[:100, :] scaler_incr = StandardScaler() for i, batch in enumerate(gen_batches(X.shape[0], 1)): X_sofar = X[:(i + 1), :] chunks_copy = X_sofar.copy() scaled_batch = StandardScaler().fit_transform(X_sofar) scaler_incr = scaler_incr.partial_fit(X[batch]) scaled_incr = scaler_incr.transform(X_sofar) assert_array_almost_equal(scaled_batch, scaled_incr) assert_array_almost_equal(X_sofar, chunks_copy) # No change right_input = scaler_incr.inverse_transform(scaled_incr) assert_array_almost_equal(X_sofar, right_input) zero = np.zeros(X.shape[1]) epsilon = np.nextafter(0, 1) assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal assert_array_less(zero, scaler_incr.scale_ + epsilon) # (i+1) because the Scaler has been already fitted assert_equal((i + 1), scaler_incr.n_samples_seen_) def test_min_max_scaler_iris(): X = iris.data scaler = MinMaxScaler() # default params X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 0) assert_array_almost_equal(X_trans.max(axis=0), 1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # not default params: min=1, max=2 scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 1) assert_array_almost_equal(X_trans.max(axis=0), 2) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # min=-.5, max=.6 scaler = MinMaxScaler(feature_range=(-.5, .6)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), -.5) assert_array_almost_equal(X_trans.max(axis=0), .6) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # raises on invalid range scaler = MinMaxScaler(feature_range=(2, 1)) assert_raises(ValueError, scaler.fit, X) def test_min_max_scaler_zero_variance_features(): # Check min max scaler on toy data with zero variance features X = [[0., 1., +0.5], [0., 1., -0.1], [0., 1., +1.1]] X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] # default params scaler = MinMaxScaler() X_trans = scaler.fit_transform(X) X_expected_0_1 = [[0., 0., 0.5], [0., 0., 0.0], [0., 0., 1.0]] assert_array_almost_equal(X_trans, X_expected_0_1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) X_trans_new = scaler.transform(X_new) X_expected_0_1_new = [[+0., 1., 0.500], [-1., 0., 0.083], [+0., 0., 1.333]] assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2) # not default params scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) X_expected_1_2 = [[1., 1., 1.5], [1., 1., 1.0], [1., 1., 2.0]] assert_array_almost_equal(X_trans, X_expected_1_2) # function interface X_trans = minmax_scale(X) assert_array_almost_equal(X_trans, X_expected_0_1) X_trans = minmax_scale(X, feature_range=(1, 2)) assert_array_almost_equal(X_trans, X_expected_1_2) def test_minmax_scale_axis1(): X = iris.data X_trans = minmax_scale(X, axis=1) assert_array_almost_equal(np.min(X_trans, axis=1), 0) assert_array_almost_equal(np.max(X_trans, axis=1), 1) def test_min_max_scaler_1d(): # Test scaling of dataset along single axis for X in [X_1row, X_1col, X_list_1row, X_list_1row]: scaler = MinMaxScaler(copy=True) X_scaled = scaler.fit(X).transform(X) if isinstance(X, list): X = np.array(X) # cast only after scaling done if _check_dim_1axis(X) == 1: assert_array_almost_equal(X_scaled.min(axis=0), np.zeros(n_features)) assert_array_almost_equal(X_scaled.max(axis=0), np.zeros(n_features)) else: assert_array_almost_equal(X_scaled.min(axis=0), .0) assert_array_almost_equal(X_scaled.max(axis=0), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X) # Constant feature X = np.ones(5).reshape(5, 1) scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_greater_equal(X_scaled.min(), 0.) assert_less_equal(X_scaled.max(), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # Function interface X_1d = X_1row.ravel() min_ = X_1d.min() max_ = X_1d.max() assert_array_almost_equal((X_1d - min_) / (max_ - min_), minmax_scale(X_1d, copy=True)) def test_scaler_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) assert_raises(ValueError, StandardScaler().fit, X_csr) assert_raises(ValueError, StandardScaler().fit, X_csc) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csc.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_array_almost_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.var_, scaler_csr.var_) assert_array_almost_equal(scaler.scale_, scaler_csr.scale_) assert_array_almost_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.var_, scaler_csc.var_) assert_array_almost_equal(scaler.scale_, scaler_csc.scale_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_int(): # test that scaler converts integer input to floating # for both sparse and dense matrices rng = np.random.RandomState(42) X = rng.randint(20, size=(4, 5)) X[:, 0] = 0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) clean_warning_registry() with warnings.catch_warnings(record=True): X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) clean_warning_registry() with warnings.catch_warnings(record=True): scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csc.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_array_almost_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.var_, scaler_csr.var_) assert_array_almost_equal(scaler.scale_, scaler_csr.scale_) assert_array_almost_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.var_, scaler_csc.var_) assert_array_almost_equal(scaler.scale_, scaler_csc.scale_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., 1.109, 1.856, 21., 1.559], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis( X_csr_scaled.astype(np.float), 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_without_copy(): # Check that StandardScaler.fit does not change input rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) X_copy = X.copy() StandardScaler(copy=False).fit(X) assert_array_equal(X, X_copy) X_csr_copy = X_csr.copy() StandardScaler(with_mean=False, copy=False).fit(X_csr) assert_array_equal(X_csr.toarray(), X_csr_copy.toarray()) X_csc_copy = X_csc.copy() StandardScaler(with_mean=False, copy=False).fit(X_csc) assert_array_equal(X_csc.toarray(), X_csc_copy.toarray()) def test_scale_sparse_with_mean_raise_exception(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) # check scaling and fit with direct calls on sparse data assert_raises(ValueError, scale, X_csr, with_mean=True) assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr) assert_raises(ValueError, scale, X_csc, with_mean=True) assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc) # check transform and inverse_transform after a fit on a dense array scaler = StandardScaler(with_mean=True).fit(X) assert_raises(ValueError, scaler.transform, X_csr) assert_raises(ValueError, scaler.transform, X_csc) X_transformed_csr = sparse.csr_matrix(scaler.transform(X)) assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr) X_transformed_csc = sparse.csc_matrix(scaler.transform(X)) assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc) def test_scale_input_finiteness_validation(): # Check if non finite inputs raise ValueError X = [np.nan, 5, 6, 7, 8] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) X = [np.inf, 5, 6, 7, 8] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) def test_robust_scaler_2d_arrays(): """Test robust scaling of 2d array along first axis""" rng = np.random.RandomState(0) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero scaler = RobustScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0)[0], 0) def test_robust_scaler_transform_one_row_csr(): # Check RobustScaler on transforming csr matrix with one row rng = np.random.RandomState(0) X = rng.randn(4, 5) single_row = np.array([[0.1, 1., 2., 0., -1.]]) scaler = RobustScaler(with_centering=False) scaler = scaler.fit(X) row_trans = scaler.transform(sparse.csr_matrix(single_row)) row_expected = single_row / scaler.scale_ assert_array_almost_equal(row_trans.toarray(), row_expected) row_scaled_back = scaler.inverse_transform(row_trans) assert_array_almost_equal(single_row, row_scaled_back.toarray()) def test_robust_scaler_iris(): X = iris.data scaler = RobustScaler() X_trans = scaler.fit_transform(X) assert_array_almost_equal(np.median(X_trans, axis=0), 0) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) q = np.percentile(X_trans, q=(25, 75), axis=0) iqr = q[1] - q[0] assert_array_almost_equal(iqr, 1) def test_scale_function_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_scaled = scale(X, with_mean=False) assert_false(np.any(np.isnan(X_scaled))) X_csr_scaled = scale(X_csr, with_mean=False) assert_false(np.any(np.isnan(X_csr_scaled.data))) # test csc has same outcome X_csc_scaled = scale(X_csr.tocsc(), with_mean=False) assert_array_almost_equal(X_scaled, X_csc_scaled.toarray()) # raises value error on axis != 0 assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1) assert_array_almost_equal(X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # null scale X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True) assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray()) def test_robust_scale_axis1(): X = iris.data X_trans = robust_scale(X, axis=1) assert_array_almost_equal(np.median(X_trans, axis=1), 0) q = np.percentile(X_trans, q=(25, 75), axis=1) iqr = q[1] - q[0] assert_array_almost_equal(iqr, 1) def test_robust_scaler_zero_variance_features(): """Check RobustScaler on toy data with zero variance features""" X = [[0., 1., +0.5], [0., 1., -0.1], [0., 1., +1.1]] scaler = RobustScaler() X_trans = scaler.fit_transform(X) # NOTE: for such a small sample size, what we expect in the third column # depends HEAVILY on the method used to calculate quantiles. The values # here were calculated to fit the quantiles produces by np.percentile # using numpy 1.9 Calculating quantiles with # scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles # would yield very different results! X_expected = [[0., 0., +0.0], [0., 0., -1.0], [0., 0., +1.0]] assert_array_almost_equal(X_trans, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # make sure new data gets transformed correctly X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] X_trans_new = scaler.transform(X_new) X_expected_new = [[+0., 1., +0.], [-1., 0., -0.83333], [+0., 0., +1.66667]] assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3) def test_maxabs_scaler_zero_variance_features(): """Check MaxAbsScaler on toy data with zero variance features""" X = [[0., 1., +0.5], [0., 1., -0.3], [0., 1., +1.5], [0., 0., +0.0]] scaler = MaxAbsScaler() X_trans = scaler.fit_transform(X) X_expected = [[0., 1., 1.0 / 3.0], [0., 1., -0.2], [0., 1., 1.0], [0., 0., 0.0]] assert_array_almost_equal(X_trans, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # make sure new data gets transformed correctly X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] X_trans_new = scaler.transform(X_new) X_expected_new = [[+0., 2.0, 1.0 / 3.0], [-1., 1.0, 0.0], [+0., 1.0, 1.0]] assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2) # function interface X_trans = maxabs_scale(X) assert_array_almost_equal(X_trans, X_expected) # sparse data X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) X_trans_csr = scaler.fit_transform(X_csr) X_trans_csc = scaler.fit_transform(X_csc) X_expected = [[0., 1., 1.0 / 3.0], [0., 1., -0.2], [0., 1., 1.0], [0., 0., 0.0]] assert_array_almost_equal(X_trans_csr.A, X_expected) assert_array_almost_equal(X_trans_csc.A, X_expected) X_trans_csr_inv = scaler.inverse_transform(X_trans_csr) X_trans_csc_inv = scaler.inverse_transform(X_trans_csc) assert_array_almost_equal(X, X_trans_csr_inv.A) assert_array_almost_equal(X, X_trans_csc_inv.A) def test_maxabs_scaler_large_negative_value(): # Check MaxAbsScaler on toy data with a large negative value X = [[0., 1., +0.5, -1.0], [0., 1., -0.3, -0.5], [0., 1., -100.0, 0.0], [0., 0., +0.0, -2.0]] scaler = MaxAbsScaler() X_trans = scaler.fit_transform(X) X_expected = [[0., 1., 0.005, -0.5], [0., 1., -0.003, -0.25], [0., 1., -1.0, 0.0], [0., 0., 0.0, -1.0]] assert_array_almost_equal(X_trans, X_expected) def test_maxabs_scaler_transform_one_row_csr(): # Check MaxAbsScaler on transforming csr matrix with one row X = sparse.csr_matrix([[0.5, 1., 1.]]) scaler = MaxAbsScaler() scaler = scaler.fit(X) X_trans = scaler.transform(X) X_expected = sparse.csr_matrix([[1., 1., 1.]]) assert_array_almost_equal(X_trans.toarray(), X_expected.toarray()) X_scaled_back = scaler.inverse_transform(X_trans) assert_array_almost_equal(X.toarray(), X_scaled_back.toarray()) @ignore_warnings def test_deprecation_minmax_scaler(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) scaler = MinMaxScaler().fit(X) depr_message = ("Attribute data_range will be removed in " "0.19. Use ``data_range_`` instead") data_range = assert_warns_message(DeprecationWarning, depr_message, getattr, scaler, "data_range") assert_array_equal(data_range, scaler.data_range) depr_message = ("Attribute data_min will be removed in " "0.19. Use ``data_min_`` instead") data_min = assert_warns_message(DeprecationWarning, depr_message, getattr, scaler, "data_min") assert_array_equal(data_min, scaler.data_min) def test_warning_scaling_integers(): # Check warning when scaling integer data X = np.array([[1, 2, 0], [0, 0, 0]], dtype=np.uint8) w = "Data with input dtype uint8 was converted to float64" clean_warning_registry() assert_warns_message(DataConversionWarning, w, scale, X) assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X) assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X) def test_maxabs_scaler_1d(): # Test scaling of dataset along single axis for X in [X_1row, X_1col, X_list_1row, X_list_1row]: scaler = MaxAbsScaler(copy=True) X_scaled = scaler.fit(X).transform(X) if isinstance(X, list): X = np.array(X) # cast only after scaling done if _check_dim_1axis(X) == 1: assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), np.ones(n_features)) else: assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X) # Constant feature X = np.ones(5).reshape(5, 1) scaler = MaxAbsScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.) assert_equal(scaler.n_samples_seen_, X.shape[0]) # function interface X_1d = X_1row.ravel() max_abs = np.abs(X_1d).max() assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True)) def test_maxabs_scaler_partial_fit(): # Test if partial_fit run over many batches of size 1 and 50 # gives the same results as fit X = X_2d[:100, :] n = X.shape[0] for chunk_size in [1, 2, 50, n, n + 42]: # Test mean at the end of the process scaler_batch = MaxAbsScaler().fit(X) scaler_incr = MaxAbsScaler() scaler_incr_csr = MaxAbsScaler() scaler_incr_csc = MaxAbsScaler() for batch in gen_batches(n, chunk_size): scaler_incr = scaler_incr.partial_fit(X[batch]) X_csr = sparse.csr_matrix(X[batch]) scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr) X_csc = sparse.csc_matrix(X[batch]) scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc) assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_) assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csr.max_abs_) assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr_csc.max_abs_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr_csr.n_samples_seen_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr_csc.n_samples_seen_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_) assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X)) # Test std after 1 step batch0 = slice(0, chunk_size) scaler_batch = MaxAbsScaler().fit(X[batch0]) scaler_incr = MaxAbsScaler().partial_fit(X[batch0]) assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_) assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_) assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_) assert_array_almost_equal(scaler_batch.transform(X), scaler_incr.transform(X)) # Test std until the end of partial fits, and scaler_batch = MaxAbsScaler().fit(X) scaler_incr = MaxAbsScaler() # Clean estimator for i, batch in enumerate(gen_batches(n, chunk_size)): scaler_incr = scaler_incr.partial_fit(X[batch]) assert_correct_incr(i, batch_start=batch.start, batch_stop=batch.stop, n=n, chunk_size=chunk_size, n_samples_seen=scaler_incr.n_samples_seen_) def test_normalizer_l1(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l1', copy=True) X_norm = normalizer.transform(X) assert_true(X_norm is not X) X_norm1 = toarray(X_norm) normalizer = Normalizer(norm='l1', copy=False) X_norm = normalizer.transform(X) assert_true(X_norm is X) X_norm2 = toarray(X_norm) for X_norm in (X_norm1, X_norm2): row_sums = np.abs(X_norm).sum(axis=1) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(row_sums[3], 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalizer_l2(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l2', copy=True) X_norm1 = normalizer.transform(X) assert_true(X_norm1 is not X) X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='l2', copy=False) X_norm2 = normalizer.transform(X) assert_true(X_norm2 is X) X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalizer_max(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='max', copy=True) X_norm1 = normalizer.transform(X) assert_true(X_norm1 is not X) X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='max', copy=False) X_norm2 = normalizer.transform(X) assert_true(X_norm2 is X) X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): row_maxs = X_norm.max(axis=1) for i in range(3): assert_almost_equal(row_maxs[i], 1.0) assert_almost_equal(row_maxs[3], 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(row_maxs[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalize(): # Test normalize function # Only tests functionality not used by the tests for Normalizer. X = np.random.RandomState(37).randn(3, 2) assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T) assert_raises(ValueError, normalize, [[0]], axis=2) assert_raises(ValueError, normalize, [[0]], norm='l3') rs = np.random.RandomState(0) X_dense = rs.randn(10, 5) X_sparse = sparse.csr_matrix(X_dense) ones = np.ones((10)) for X in (X_dense, X_sparse): for dtype in (np.float32, np.float64): for norm in ('l1', 'l2'): X = X.astype(dtype) X_norm = normalize(X, norm=norm) assert_equal(X_norm.dtype, dtype) X_norm = toarray(X_norm) if norm == 'l1': row_sums = np.abs(X_norm).sum(axis=1) else: X_norm_squared = X_norm**2 row_sums = X_norm_squared.sum(axis=1) assert_array_almost_equal(row_sums, ones) def test_binarizer(): X_ = np.array([[1, 0, 5], [2, 3, -1]]) for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix): X = init(X_.copy()) binarizer = Binarizer(threshold=2.0, copy=True) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 4) assert_equal(np.sum(X_bin == 1), 2) X_bin = binarizer.transform(X) assert_equal(sparse.issparse(X), sparse.issparse(X_bin)) binarizer = Binarizer(copy=True).fit(X) X_bin = toarray(binarizer.transform(X)) assert_true(X_bin is not X) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=True) X_bin = binarizer.transform(X) assert_true(X_bin is not X) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=False) X_bin = binarizer.transform(X) if init is not list: assert_true(X_bin is X) binarizer = Binarizer(copy=False) X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64) X_bin = binarizer.transform(X_float) if init is not list: assert_true(X_bin is X_float) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(threshold=-0.5, copy=True) for init in (np.array, list): X = init(X_.copy()) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 1) assert_equal(np.sum(X_bin == 1), 5) X_bin = binarizer.transform(X) # Cannot use threshold < 0 for sparse assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X)) def test_center_kernel(): # Test that KernelCenterer is equivalent to StandardScaler # in feature space rng = np.random.RandomState(0) X_fit = rng.random_sample((5, 4)) scaler = StandardScaler(with_std=False) scaler.fit(X_fit) X_fit_centered = scaler.transform(X_fit) K_fit = np.dot(X_fit, X_fit.T) # center fit time matrix centerer = KernelCenterer() K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T) K_fit_centered2 = centerer.fit_transform(K_fit) assert_array_almost_equal(K_fit_centered, K_fit_centered2) # center predict time matrix X_pred = rng.random_sample((2, 4)) K_pred = np.dot(X_pred, X_fit.T) X_pred_centered = scaler.transform(X_pred) K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T) K_pred_centered2 = centerer.transform(K_pred) assert_array_almost_equal(K_pred_centered, K_pred_centered2) def test_cv_pipeline_precomputed(): """Cross-validate a regression on four coplanar points with the same value. Use precomputed kernel to ensure Pipeline with KernelCenterer is treated as a _pairwise operation.""" X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]]) y_true = np.ones((4,)) K = X.dot(X.T) kcent = KernelCenterer() pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())]) # did the pipeline set the _pairwise attribute? assert_true(pipeline._pairwise) # test cross-validation, score should be almost perfect # NB: this test is pretty vacuous -- it's mainly to test integration # of Pipeline and KernelCenterer y_pred = cross_val_predict(pipeline, K, y_true, cv=2) assert_array_almost_equal(y_true, y_pred) def test_fit_transform(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) for obj in ((StandardScaler(), Normalizer(), Binarizer())): X_transformed = obj.fit(X).transform(X) X_transformed2 = obj.fit_transform(X) assert_array_equal(X_transformed, X_transformed2) def test_deprecation_standard_scaler(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) scaler = StandardScaler().fit(X) depr_message = ("Function std_ is deprecated; Attribute ``std_`` will be " "removed in 0.19. Use ``scale_`` instead") std_ = assert_warns_message(DeprecationWarning, depr_message, getattr, scaler, "std_") assert_array_equal(std_, scaler.scale_) def test_add_dummy_feature(): X = [[1, 0], [0, 1], [0, 1]] X = add_dummy_feature(X) assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_coo(): X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_coo(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csc(): X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csc(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csr(): X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csr(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_one_hot_encoder_sparse(): # Test OneHotEncoder's fit and transform. X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder() # discover max values automatically X_trans = enc.fit_transform(X).toarray() assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, [[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]]) # max value given as 3 enc = OneHotEncoder(n_values=4) X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 4 * 3)) assert_array_equal(enc.feature_indices_, [0, 4, 8, 12]) # max value given per feature enc = OneHotEncoder(n_values=[3, 2, 2]) X = [[1, 0, 1], [0, 1, 1]] X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 3 + 2 + 2)) assert_array_equal(enc.n_values_, [3, 2, 2]) # check that testing with larger feature works: X = np.array([[2, 0, 1], [0, 1, 1]]) enc.transform(X) # test that an error is raised when out of bounds: X_too_large = [[0, 2, 1], [0, 1, 1]] assert_raises(ValueError, enc.transform, X_too_large) error_msg = "unknown categorical feature present \[2\] during transform." assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large) assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X) # test that error is raised when wrong number of features assert_raises(ValueError, enc.transform, X[:, :-1]) # test that error is raised when wrong number of features in fit # with prespecified n_values assert_raises(ValueError, enc.fit, X[:, :-1]) # test exception on wrong init param assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X) enc = OneHotEncoder() # test negative input to fit assert_raises(ValueError, enc.fit, [[0], [-1]]) # test negative input to transform enc.fit([[0], [1]]) assert_raises(ValueError, enc.transform, [[0], [-1]]) def test_one_hot_encoder_dense(): # check for sparse=False X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder(sparse=False) # discover max values automatically X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, np.array([[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]])) def _check_transform_selected(X, X_expected, sel): for M in (X, sparse.csr_matrix(X)): Xtr = _transform_selected(M, Binarizer().transform, sel) assert_array_equal(toarray(Xtr), X_expected) def test_transform_selected(): X = [[3, 2, 1], [0, 1, 1]] X_expected = [[1, 2, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0]) _check_transform_selected(X, X_expected, [True, False, False]) X_expected = [[1, 1, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0, 1, 2]) _check_transform_selected(X, X_expected, [True, True, True]) _check_transform_selected(X, X_expected, "all") _check_transform_selected(X, X, []) _check_transform_selected(X, X, [False, False, False]) def _run_one_hot(X, X2, cat): enc = OneHotEncoder(categorical_features=cat) Xtr = enc.fit_transform(X) X2tr = enc.transform(X2) return Xtr, X2tr def _check_one_hot(X, X2, cat, n_features): ind = np.where(cat)[0] # With mask A, B = _run_one_hot(X, X2, cat) # With indices C, D = _run_one_hot(X, X2, ind) # Check shape assert_equal(A.shape, (2, n_features)) assert_equal(B.shape, (1, n_features)) assert_equal(C.shape, (2, n_features)) assert_equal(D.shape, (1, n_features)) # Check that mask and indices give the same results assert_array_equal(toarray(A), toarray(C)) assert_array_equal(toarray(B), toarray(D)) def test_one_hot_encoder_categorical_features(): X = np.array([[3, 2, 1], [0, 1, 1]]) X2 = np.array([[1, 1, 1]]) cat = [True, False, False] _check_one_hot(X, X2, cat, 4) # Edge case: all non-categorical cat = [False, False, False] _check_one_hot(X, X2, cat, 3) # Edge case: all categorical cat = [True, True, True] _check_one_hot(X, X2, cat, 5) def test_one_hot_encoder_unknown_transform(): X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]]) y = np.array([[4, 1, 1]]) # Test that one hot encoder raises error for unknown features # present during transform. oh = OneHotEncoder(handle_unknown='error') oh.fit(X) assert_raises(ValueError, oh.transform, y) # Test the ignore option, ignores unknown features. oh = OneHotEncoder(handle_unknown='ignore') oh.fit(X) assert_array_equal( oh.transform(y).toarray(), np.array([[0., 0., 0., 0., 1., 0., 0.]])) # Raise error if handle_unknown is neither ignore or error. oh = OneHotEncoder(handle_unknown='42') oh.fit(X) assert_raises(ValueError, oh.transform, y) def test_fit_cold_start(): X = iris.data X_2d = X[:, :2] # Scalers that have a partial_fit method scalers = [StandardScaler(with_mean=False, with_std=False), MinMaxScaler(), MaxAbsScaler()] for scaler in scalers: scaler.fit_transform(X) # with a different shape, this may break the scaler unless the internal # state is reset scaler.fit_transform(X_2d)
bsd-3-clause
xesscorp/skidl
tests/spice_tests/test_spice_subckt.py
1
1407
import matplotlib.pyplot as plt from skidl.pyspice import * lib_search_paths[SPICE].append("../SpiceLib") vin = V(ref="VIN", dc_value=8 @ u_V) # Input power supply. vreg = Part("NCP1117", "ncp1117_33-x") # Voltage regulator from ON Semi part lib. print(vreg) # Print vreg pin names. r = R(value=470 @ u_Ohm) # Load resistor on regulator output. vreg["IN", "OUT"] += ( vin["p"], r[1], ) # Connect vreg input to vin and output to load resistor. gnd += vin["n"], r[2], vreg["GND"] # Ground connections for everybody. # Simulate the voltage regulator subcircuit. # circ = generate_netlist(libs='SpiceLib') # Pass-in the library where the voltage regulator subcircuit is stored. circ = ( generate_netlist() ) # Pass-in the library where the voltage regulator subcircuit is stored. print(circ) sim = circ.simulator() dc_vals = sim.dc( VIN=slice(0, 10, 0.1) ) # Ramp vin from 0->10V and observe regulator output voltage. # Get the input and output voltages. inp = dc_vals[node(vin["p"])] outp = dc_vals[node(vreg["OUT"])] # Plot the regulator output voltage vs. the input supply voltage. Note that the regulator # starts to operate once the input exceeds 4V and the output voltage clamps at 3.3V. figure = plt.figure(1) plt.title("NCP1117-3.3 Regulator Output Voltage vs. Input Voltage") plt.xlabel("Input Voltage (V)") plt.ylabel("Output Voltage (V)") plt.plot(inp, outp) plt.show()
mit
DistrictDataLabs/yellowbrick
docs/api/model_selection/rfecv.py
1
1883
#!/usr/bin/env python3 # rfecv.py # Generates RFECV visualizations for the documentation # # Copyright (C) 2018 The scikit-yb developers # For license information, see LICENSE.txt # # ID: rfecv.py [] $ import os import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.datasets import make_classification from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import StratifiedKFold from yellowbrick.model_selection import RFECV, rfecv from yellowbrick.datasets import load_credit CWD = os.path.dirname(__file__) IMAGES = os.path.join(CWD, "images") def rfecv_sklearn_example(image="rfecv_sklearn_example.png"): X, y = make_classification( n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0, ) _, ax = plt.subplots() oz = RFECV(SVC(kernel="linear", C=1), ax=ax) oz.fit(X, y) oz.show(outpath=os.path.join(IMAGES, image)) def rfecv_credit_example(image="rfecv_credit.png"): X, y = load_credit() _, ax = plt.subplots() cv = StratifiedKFold(5) oz = RFECV(RandomForestClassifier(), ax=ax, cv=cv, scoring="f1_weighted") oz.fit(X, y) oz.show(outpath=os.path.join(IMAGES, image)) def rfecv_quick_method(image="rfecv_quick_method.png"): X, y = load_credit() _, ax = plt.subplots() cv = StratifiedKFold(5) visualizer = rfecv(RandomForestClassifier(), X=X, y=y, ax=ax, cv=cv, scoring='f1_weighted') visualizer.show(outpath=os.path.join(IMAGES, image)) ########################################################################## ## Main Method ########################################################################## if __name__ == "__main__": rfecv_sklearn_example() rfecv_credit_example() rfecv_quick_method()
apache-2.0
YihaoLu/statsmodels
statsmodels/examples/tsa/ex_var.py
33
1280
from __future__ import print_function import numpy as np import statsmodels.api as sm from statsmodels.tsa.api import VAR # some example data mdata = sm.datasets.macrodata.load().data mdata = mdata[['realgdp','realcons','realinv']] names = mdata.dtype.names data = mdata.view((float,3)) use_growthrate = False #True #False if use_growthrate: data = 100 * 4 * np.diff(np.log(data), axis=0) model = VAR(data, names=names) res = model.fit(4) nobs_all = data.shape[0] #in-sample 1-step ahead forecasts fc_in = np.array([np.squeeze(res.forecast(model.y[t-20:t], 1)) for t in range(nobs_all-6,nobs_all)]) print(fc_in - res.fittedvalues[-6:]) #out-of-sample 1-step ahead forecasts fc_out = np.array([np.squeeze(VAR(data[:t]).fit(2).forecast(data[t-20:t], 1)) for t in range(nobs_all-6,nobs_all)]) print(fc_out - data[nobs_all-6:nobs_all]) print(fc_out - res.fittedvalues[-6:]) #out-of-sample h-step ahead forecasts h = 2 fc_out = np.array([VAR(data[:t]).fit(2).forecast(data[t-20:t], h)[-1] for t in range(nobs_all-6-h+1,nobs_all-h+1)]) print(fc_out - data[nobs_all-6:nobs_all]) #out-of-sample forecast error print(fc_out - res.fittedvalues[-6:]) import matplotlib.pyplot as plt res.plot_forecast(20) #plt.show()
bsd-3-clause
yonglehou/scikit-learn
sklearn/svm/classes.py
126
40114
import warnings import numpy as np from .base import _fit_liblinear, BaseSVC, BaseLibSVM from ..base import BaseEstimator, RegressorMixin from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \ LinearModel from ..feature_selection.from_model import _LearntSelectorMixin from ..utils import check_X_y from ..utils.validation import _num_samples class LinearSVC(BaseEstimator, LinearClassifierMixin, _LearntSelectorMixin, SparseCoefMixin): """Linear Support Vector Classification. Similar to SVC with parameter kernel='linear', but implemented in terms of liblinear rather than libsvm, so it has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples. This class supports both dense and sparse input and the multiclass support is handled according to a one-vs-the-rest scheme. Read more in the :ref:`User Guide <svm_classification>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge') Specifies the loss function. 'hinge' is the standard SVM loss (used e.g. by the SVC class) while 'squared_hinge' is the square of the hinge loss. penalty : string, 'l1' or 'l2' (default='l2') Specifies the norm used in the penalization. The 'l2' penalty is the standard used in SVC. The 'l1' leads to `coef_` vectors that are sparse. dual : bool, (default=True) Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features. tol : float, optional (default=1e-4) Tolerance for stopping criteria. multi_class: string, 'ovr' or 'crammer_singer' (default='ovr') Determines the multi-class strategy if `y` contains more than two classes. `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer` optimizes a joint objective over all classes. While `crammer_singer` is interesting from a theoretical perspective as it is consistent, it is seldom used in practice as it rarely leads to better accuracy and is more expensive to compute. If `crammer_singer` is chosen, the options loss, penalty and dual will be ignored. fit_intercept : boolean, optional (default=True) Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (i.e. data is expected to be already centered). intercept_scaling : float, optional (default=1) When self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. class_weight : {dict, 'balanced'}, optional Set the parameter C of class i to class_weight[i]*C for SVC. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` verbose : int, (default=0) Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in liblinear that, if enabled, may not work properly in a multithreaded context. random_state : int seed, RandomState instance, or None (default=None) The seed of the pseudo random number generator to use when shuffling the data. max_iter : int, (default=1000) The maximum number of iterations to be run. Attributes ---------- coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is a readonly property derived from `raw_coef_` that follows the internal memory layout of liblinear. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. Notes ----- The underlying C implementation uses a random number generator to select features when fitting the model. It is thus not uncommon to have slightly different results for the same input data. If that happens, try with a smaller ``tol`` parameter. The underlying implementation (liblinear) uses a sparse internal representation for the data that will incur a memory copy. Predict output may not match that of standalone liblinear in certain cases. See :ref:`differences from liblinear <liblinear_differences>` in the narrative documentation. **References:** `LIBLINEAR: A Library for Large Linear Classification <http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__ See also -------- SVC Implementation of Support Vector Machine classifier using libsvm: the kernel can be non-linear but its SMO algorithm does not scale to large number of samples as LinearSVC does. Furthermore SVC multi-class mode is implemented using one vs one scheme while LinearSVC uses one vs the rest. It is possible to implement one vs the rest with SVC by using the :class:`sklearn.multiclass.OneVsRestClassifier` wrapper. Finally SVC can fit dense data without memory copy if the input is C-contiguous. Sparse data will still incur memory copy though. sklearn.linear_model.SGDClassifier SGDClassifier can optimize the same cost function as LinearSVC by adjusting the penalty and loss parameters. In addition it requires less memory, allows incremental (online) learning, and implements various loss functions and regularization regimes. """ def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4, C=1.0, multi_class='ovr', fit_intercept=True, intercept_scaling=1, class_weight=None, verbose=0, random_state=None, max_iter=1000): self.dual = dual self.tol = tol self.C = C self.multi_class = multi_class self.fit_intercept = fit_intercept self.intercept_scaling = intercept_scaling self.class_weight = class_weight self.verbose = verbose self.random_state = random_state self.max_iter = max_iter self.penalty = penalty self.loss = loss def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target vector relative to X Returns ------- self : object Returns self. """ # FIXME Remove l1/l2 support in 1.0 ----------------------------------- loss_l = self.loss.lower() msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the loss='%s' will be removed in %s") # FIXME change loss_l --> self.loss after 0.18 if loss_l in ('l1', 'l2'): old_loss = self.loss self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l) warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'), DeprecationWarning) # --------------------------------------------------------------------- if self.C < 0: raise ValueError("Penalty term must be positive; got (C=%r)" % self.C) X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C") self.classes_ = np.unique(y) self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear( X, y, self.C, self.fit_intercept, self.intercept_scaling, self.class_weight, self.penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, self.multi_class, self.loss) if self.multi_class == "crammer_singer" and len(self.classes_) == 2: self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1) if self.fit_intercept: intercept = self.intercept_[1] - self.intercept_[0] self.intercept_ = np.array([intercept]) return self class LinearSVR(LinearModel, RegressorMixin): """Linear Support Vector Regression. Similar to SVR with parameter kernel='linear', but implemented in terms of liblinear rather than libsvm, so it has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples. This class supports both dense and sparse input. Read more in the :ref:`User Guide <svm_regression>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. The penalty is a squared l2 penalty. The bigger this parameter, the less regularization is used. loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive') Specifies the loss function. 'l1' is the epsilon-insensitive loss (standard SVR) while 'l2' is the squared epsilon-insensitive loss. epsilon : float, optional (default=0.1) Epsilon parameter in the epsilon-insensitive loss function. Note that the value of this parameter depends on the scale of the target variable y. If unsure, set epsilon=0. dual : bool, (default=True) Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features. tol : float, optional (default=1e-4) Tolerance for stopping criteria. fit_intercept : boolean, optional (default=True) Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (i.e. data is expected to be already centered). intercept_scaling : float, optional (default=1) When self.fit_intercept is True, instance vector x becomes [x, self.intercept_scaling], i.e. a "synthetic" feature with constant value equals to intercept_scaling is appended to the instance vector. The intercept becomes intercept_scaling * synthetic feature weight Note! the synthetic feature weight is subject to l1/l2 regularization as all other features. To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased. verbose : int, (default=0) Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in liblinear that, if enabled, may not work properly in a multithreaded context. random_state : int seed, RandomState instance, or None (default=None) The seed of the pseudo random number generator to use when shuffling the data. max_iter : int, (default=1000) The maximum number of iterations to be run. Attributes ---------- coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is a readonly property derived from `raw_coef_` that follows the internal memory layout of liblinear. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. See also -------- LinearSVC Implementation of Support Vector Machine classifier using the same library as this class (liblinear). SVR Implementation of Support Vector Machine regression using libsvm: the kernel can be non-linear but its SMO algorithm does not scale to large number of samples as LinearSVC does. sklearn.linear_model.SGDRegressor SGDRegressor can optimize the same cost function as LinearSVR by adjusting the penalty and loss parameters. In addition it requires less memory, allows incremental (online) learning, and implements various loss functions and regularization regimes. """ def __init__(self, epsilon=0.0, tol=1e-4, C=1.0, loss='epsilon_insensitive', fit_intercept=True, intercept_scaling=1., dual=True, verbose=0, random_state=None, max_iter=1000): self.tol = tol self.C = C self.epsilon = epsilon self.fit_intercept = fit_intercept self.intercept_scaling = intercept_scaling self.verbose = verbose self.random_state = random_state self.max_iter = max_iter self.dual = dual self.loss = loss def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. y : array-like, shape = [n_samples] Target vector relative to X Returns ------- self : object Returns self. """ # FIXME Remove l1/l2 support in 1.0 ----------------------------------- loss_l = self.loss.lower() msg = ("loss='%s' has been deprecated in favor of " "loss='%s' as of 0.16. Backward compatibility" " for the loss='%s' will be removed in %s") # FIXME change loss_l --> self.loss after 0.18 if loss_l in ('l1', 'l2'): old_loss = self.loss self.loss = {'l1': 'epsilon_insensitive', 'l2': 'squared_epsilon_insensitive' }.get(loss_l) warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'), DeprecationWarning) # --------------------------------------------------------------------- if self.C < 0: raise ValueError("Penalty term must be positive; got (C=%r)" % self.C) X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64, order="C") penalty = 'l2' # SVR only accepts l2 penalty self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear( X, y, self.C, self.fit_intercept, self.intercept_scaling, None, penalty, self.dual, self.verbose, self.max_iter, self.tol, self.random_state, loss=self.loss, epsilon=self.epsilon) self.coef_ = self.coef_.ravel() return self class SVC(BaseSVC): """C-Support Vector Classification. The implementation is based on libsvm. The fit time complexity is more than quadratic with the number of samples which makes it hard to scale to dataset with more than a couple of 10000 samples. The multiclass support is handled according to a one-vs-one scheme. For details on the precise mathematical formulation of the provided kernel functions and how `gamma`, `coef0` and `degree` affect each other, see the corresponding section in the narrative documentation: :ref:`svm_kernels`. Read more in the :ref:`User Guide <svm_classification>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to pre-compute the kernel matrix from data matrices; that matrix should be an array of shape ``(n_samples, n_samples)``. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. probability : boolean, optional (default=False) Whether to enable probability estimates. This must be enabled prior to calling `fit`, and will slow down that method. shrinking : boolean, optional (default=True) Whether to use the shrinking heuristic. tol : float, optional (default=1e-3) Tolerance for stopping criterion. cache_size : float, optional Specify the size of the kernel cache (in MB). class_weight : {dict, 'balanced'}, optional Set the parameter C of class i to class_weight[i]*C for SVC. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. decision_function_shape : 'ovo', 'ovr' or None, default=None Whether to return a one-vs-rest ('ovr') ecision function of shape (n_samples, n_classes) as all other classifiers, or the original one-vs-one ('ovo') decision function of libsvm which has shape (n_samples, n_classes * (n_classes - 1) / 2). The default of None will currently behave as 'ovo' for backward compatibility and raise a deprecation warning, but will change 'ovr' in 0.18. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data for probability estimation. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [n_SV, n_features] Support vectors. n_support_ : array-like, dtype=int32, shape = [n_class] Number of support vectors for each class. dual_coef_ : array, shape = [n_class-1, n_SV] Coefficients of the support vector in the decision function. For multiclass, coefficient for all 1-vs-1 classifiers. The layout of the coefficients in the multiclass case is somewhat non-trivial. See the section about multi-class classification in the SVM section of the User Guide for details. coef_ : array, shape = [n_class-1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is a readonly property derived from `dual_coef_` and `support_vectors_`. intercept_ : array, shape = [n_class * (n_class-1) / 2] Constants in decision function. Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> y = np.array([1, 1, 2, 2]) >>> from sklearn.svm import SVC >>> clf = SVC() >>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape=None, degree=3, gamma='auto', kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- SVR Support Vector Machine for Regression implemented using libsvm. LinearSVC Scalable Linear Support Vector Machine for classification implemented using liblinear. Check the See also section of LinearSVC for more comparison element. """ def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto', coef0=0.0, shrinking=True, probability=False, tol=1e-3, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape=None, random_state=None): super(SVC, self).__init__( impl='c_svc', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking, probability=probability, cache_size=cache_size, class_weight=class_weight, verbose=verbose, max_iter=max_iter, decision_function_shape=decision_function_shape, random_state=random_state) class NuSVC(BaseSVC): """Nu-Support Vector Classification. Similar to SVC but uses a parameter to control the number of support vectors. The implementation is based on libsvm. Read more in the :ref:`User Guide <svm_classification>`. Parameters ---------- nu : float, optional (default=0.5) An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to precompute the kernel matrix. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. probability : boolean, optional (default=False) Whether to enable probability estimates. This must be enabled prior to calling `fit`, and will slow down that method. shrinking : boolean, optional (default=True) Whether to use the shrinking heuristic. tol : float, optional (default=1e-3) Tolerance for stopping criterion. cache_size : float, optional Specify the size of the kernel cache (in MB). class_weight : {dict, 'auto'}, optional Set the parameter C of class i to class_weight[i]*C for SVC. If not given, all classes are supposed to have weight one. The 'auto' mode uses the values of y to automatically adjust weights inversely proportional to class frequencies. verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. decision_function_shape : 'ovo', 'ovr' or None, default=None Whether to return a one-vs-rest ('ovr') ecision function of shape (n_samples, n_classes) as all other classifiers, or the original one-vs-one ('ovo') decision function of libsvm which has shape (n_samples, n_classes * (n_classes - 1) / 2). The default of None will currently behave as 'ovo' for backward compatibility and raise a deprecation warning, but will change 'ovr' in 0.18. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data for probability estimation. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [n_SV, n_features] Support vectors. n_support_ : array-like, dtype=int32, shape = [n_class] Number of support vectors for each class. dual_coef_ : array, shape = [n_class-1, n_SV] Coefficients of the support vector in the decision function. For multiclass, coefficient for all 1-vs-1 classifiers. The layout of the coefficients in the multiclass case is somewhat non-trivial. See the section about multi-class classification in the SVM section of the User Guide for details. coef_ : array, shape = [n_class-1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is readonly property derived from `dual_coef_` and `support_vectors_`. intercept_ : array, shape = [n_class * (n_class-1) / 2] Constants in decision function. Examples -------- >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> y = np.array([1, 1, 2, 2]) >>> from sklearn.svm import NuSVC >>> clf = NuSVC() >>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE NuSVC(cache_size=200, class_weight=None, coef0=0.0, decision_function_shape=None, degree=3, gamma='auto', kernel='rbf', max_iter=-1, nu=0.5, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- SVC Support Vector Machine for classification using libsvm. LinearSVC Scalable linear Support Vector Machine for classification using liblinear. """ def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto', coef0=0.0, shrinking=True, probability=False, tol=1e-3, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape=None, random_state=None): super(NuSVC, self).__init__( impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking, probability=probability, cache_size=cache_size, class_weight=class_weight, verbose=verbose, max_iter=max_iter, decision_function_shape=decision_function_shape, random_state=random_state) class SVR(BaseLibSVM, RegressorMixin): """Epsilon-Support Vector Regression. The free parameters in the model are C and epsilon. The implementation is based on libsvm. Read more in the :ref:`User Guide <svm_regression>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. epsilon : float, optional (default=0.1) Epsilon in the epsilon-SVR model. It specifies the epsilon-tube within which no penalty is associated in the training loss function with points predicted within a distance epsilon from the actual value. kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to precompute the kernel matrix. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. shrinking : boolean, optional (default=True) Whether to use the shrinking heuristic. tol : float, optional (default=1e-3) Tolerance for stopping criterion. cache_size : float, optional Specify the size of the kernel cache (in MB). verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [nSV, n_features] Support vectors. dual_coef_ : array, shape = [1, n_SV] Coefficients of the support vector in the decision function. coef_ : array, shape = [1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is readonly property derived from `dual_coef_` and `support_vectors_`. intercept_ : array, shape = [1] Constants in decision function. Examples -------- >>> from sklearn.svm import SVR >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = SVR(C=1.0, epsilon=0.2) >>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto', kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False) See also -------- NuSVR Support Vector Machine for regression implemented using libsvm using a parameter to control the number of support vectors. LinearSVR Scalable Linear Support Vector Machine for regression implemented using liblinear. """ def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=1e-3, C=1.0, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=-1): super(SVR, self).__init__( 'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose, shrinking=shrinking, probability=False, cache_size=cache_size, class_weight=None, max_iter=max_iter, random_state=None) class NuSVR(BaseLibSVM, RegressorMixin): """Nu Support Vector Regression. Similar to NuSVC, for regression, uses a parameter nu to control the number of support vectors. However, unlike NuSVC, where nu replaces C, here nu replaces the parameter epsilon of epsilon-SVR. The implementation is based on libsvm. Read more in the :ref:`User Guide <svm_regression>`. Parameters ---------- C : float, optional (default=1.0) Penalty parameter C of the error term. nu : float, optional An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. By default 0.5 will be taken. kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to precompute the kernel matrix. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. shrinking : boolean, optional (default=True) Whether to use the shrinking heuristic. tol : float, optional (default=1e-3) Tolerance for stopping criterion. cache_size : float, optional Specify the size of the kernel cache (in MB). verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [nSV, n_features] Support vectors. dual_coef_ : array, shape = [1, n_SV] Coefficients of the support vector in the decision function. coef_ : array, shape = [1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is readonly property derived from `dual_coef_` and `support_vectors_`. intercept_ : array, shape = [1] Constants in decision function. Examples -------- >>> from sklearn.svm import NuSVR >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = NuSVR(C=1.0, nu=0.1) >>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto', kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001, verbose=False) See also -------- NuSVC Support Vector Machine for classification implemented with libsvm with a parameter to control the number of support vectors. SVR epsilon Support Vector Machine for regression implemented with libsvm. """ def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3, gamma='auto', coef0=0.0, shrinking=True, tol=1e-3, cache_size=200, verbose=False, max_iter=-1): super(NuSVR, self).__init__( 'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking, probability=False, cache_size=cache_size, class_weight=None, verbose=verbose, max_iter=max_iter, random_state=None) class OneClassSVM(BaseLibSVM): """Unsupervised Outlier Detection. Estimate the support of a high-dimensional distribution. The implementation is based on libsvm. Read more in the :ref:`User Guide <svm_outlier_detection>`. Parameters ---------- kernel : string, optional (default='rbf') Specifies the kernel type to be used in the algorithm. It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable. If none is given, 'rbf' will be used. If a callable is given it is used to precompute the kernel matrix. nu : float, optional An upper bound on the fraction of training errors and a lower bound of the fraction of support vectors. Should be in the interval (0, 1]. By default 0.5 will be taken. degree : int, optional (default=3) Degree of the polynomial kernel function ('poly'). Ignored by all other kernels. gamma : float, optional (default='auto') Kernel coefficient for 'rbf', 'poly' and 'sigmoid'. If gamma is 'auto' then 1/n_features will be used instead. coef0 : float, optional (default=0.0) Independent term in kernel function. It is only significant in 'poly' and 'sigmoid'. tol : float, optional Tolerance for stopping criterion. shrinking : boolean, optional Whether to use the shrinking heuristic. cache_size : float, optional Specify the size of the kernel cache (in MB). verbose : bool, default: False Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm that, if enabled, may not work properly in a multithreaded context. max_iter : int, optional (default=-1) Hard limit on iterations within solver, or -1 for no limit. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data for probability estimation. Attributes ---------- support_ : array-like, shape = [n_SV] Indices of support vectors. support_vectors_ : array-like, shape = [nSV, n_features] Support vectors. dual_coef_ : array, shape = [n_classes-1, n_SV] Coefficients of the support vectors in the decision function. coef_ : array, shape = [n_classes-1, n_features] Weights assigned to the features (coefficients in the primal problem). This is only available in the case of a linear kernel. `coef_` is readonly property derived from `dual_coef_` and `support_vectors_` intercept_ : array, shape = [n_classes-1] Constants in decision function. """ def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0, tol=1e-3, nu=0.5, shrinking=True, cache_size=200, verbose=False, max_iter=-1, random_state=None): super(OneClassSVM, self).__init__( 'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking, False, cache_size, None, verbose, max_iter, random_state) def fit(self, X, y=None, sample_weight=None, **params): """ Detects the soft boundary of the set of samples X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Set of samples, where n_samples is the number of samples and n_features is the number of features. sample_weight : array-like, shape (n_samples,) Per-sample weights. Rescale C per sample. Higher weights force the classifier to put more emphasis on these points. Returns ------- self : object Returns self. Notes ----- If X is not a C-ordered contiguous array it is copied. """ super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight, **params) return self def decision_function(self, X): """Distance of the samples X to the separating hyperplane. Parameters ---------- X : array-like, shape (n_samples, n_features) Returns ------- X : array-like, shape (n_samples,) Returns the decision function of the samples. """ dec = self._decision_function(X) return dec
bsd-3-clause
bnaul/scikit-learn
sklearn/neighbors/_nca.py
10
20696
# coding: utf-8 """ Neighborhood Component Analysis """ # Authors: William de Vazelhes <wdevazelhes@gmail.com> # John Chiotellis <ioannis.chiotellis@in.tum.de> # License: BSD 3 clause from __future__ import print_function from warnings import warn import numpy as np import sys import time import numbers from scipy.optimize import minimize from ..utils.extmath import softmax from ..metrics import pairwise_distances from ..base import BaseEstimator, TransformerMixin from ..preprocessing import LabelEncoder from ..decomposition import PCA from ..utils.multiclass import check_classification_targets from ..utils.random import check_random_state from ..utils.validation import check_is_fitted, check_array, check_scalar from ..utils.validation import _deprecate_positional_args from ..exceptions import ConvergenceWarning class NeighborhoodComponentsAnalysis(TransformerMixin, BaseEstimator): """Neighborhood Components Analysis Neighborhood Component Analysis (NCA) is a machine learning algorithm for metric learning. It learns a linear transformation in a supervised fashion to improve the classification accuracy of a stochastic nearest neighbors rule in the transformed space. Read more in the :ref:`User Guide <nca>`. Parameters ---------- n_components : int, default=None Preferred dimensionality of the projected space. If None it will be set to ``n_features``. init : {'auto', 'pca', 'lda', 'identity', 'random'} or ndarray of shape \ (n_features_a, n_features_b), default='auto' Initialization of the linear transformation. Possible options are 'auto', 'pca', 'lda', 'identity', 'random', and a numpy array of shape (n_features_a, n_features_b). 'auto' Depending on ``n_components``, the most reasonable initialization will be chosen. If ``n_components <= n_classes`` we use 'lda', as it uses labels information. If not, but ``n_components < min(n_features, n_samples)``, we use 'pca', as it projects data in meaningful directions (those of higher variance). Otherwise, we just use 'identity'. 'pca' ``n_components`` principal components of the inputs passed to :meth:`fit` will be used to initialize the transformation. (See :class:`~sklearn.decomposition.PCA`) 'lda' ``min(n_components, n_classes)`` most discriminative components of the inputs passed to :meth:`fit` will be used to initialize the transformation. (If ``n_components > n_classes``, the rest of the components will be zero.) (See :class:`~sklearn.discriminant_analysis.LinearDiscriminantAnalysis`) 'identity' If ``n_components`` is strictly smaller than the dimensionality of the inputs passed to :meth:`fit`, the identity matrix will be truncated to the first ``n_components`` rows. 'random' The initial transformation will be a random array of shape `(n_components, n_features)`. Each value is sampled from the standard normal distribution. numpy array n_features_b must match the dimensionality of the inputs passed to :meth:`fit` and n_features_a must be less than or equal to that. If ``n_components`` is not None, n_features_a must match it. warm_start : bool, default=False If True and :meth:`fit` has been called before, the solution of the previous call to :meth:`fit` is used as the initial linear transformation (``n_components`` and ``init`` will be ignored). max_iter : int, default=50 Maximum number of iterations in the optimization. tol : float, default=1e-5 Convergence tolerance for the optimization. callback : callable, default=None If not None, this function is called after every iteration of the optimizer, taking as arguments the current solution (flattened transformation matrix) and the number of iterations. This might be useful in case one wants to examine or store the transformation found after each iteration. verbose : int, default=0 If 0, no progress messages will be printed. If 1, progress messages will be printed to stdout. If > 1, progress messages will be printed and the ``disp`` parameter of :func:`scipy.optimize.minimize` will be set to ``verbose - 2``. random_state : int or numpy.RandomState, default=None A pseudo random number generator object or a seed for it if int. If ``init='random'``, ``random_state`` is used to initialize the random transformation. If ``init='pca'``, ``random_state`` is passed as an argument to PCA when initializing the transformation. Pass an int for reproducible results across multiple function calls. See :term: `Glossary <random_state>`. Attributes ---------- components_ : ndarray of shape (n_components, n_features) The linear transformation learned during fitting. n_iter_ : int Counts the number of iterations performed by the optimizer. random_state_ : numpy.RandomState Pseudo random number generator object used during initialization. Examples -------- >>> from sklearn.neighbors import NeighborhoodComponentsAnalysis >>> from sklearn.neighbors import KNeighborsClassifier >>> from sklearn.datasets import load_iris >>> from sklearn.model_selection import train_test_split >>> X, y = load_iris(return_X_y=True) >>> X_train, X_test, y_train, y_test = train_test_split(X, y, ... stratify=y, test_size=0.7, random_state=42) >>> nca = NeighborhoodComponentsAnalysis(random_state=42) >>> nca.fit(X_train, y_train) NeighborhoodComponentsAnalysis(...) >>> knn = KNeighborsClassifier(n_neighbors=3) >>> knn.fit(X_train, y_train) KNeighborsClassifier(...) >>> print(knn.score(X_test, y_test)) 0.933333... >>> knn.fit(nca.transform(X_train), y_train) KNeighborsClassifier(...) >>> print(knn.score(nca.transform(X_test), y_test)) 0.961904... References ---------- .. [1] J. Goldberger, G. Hinton, S. Roweis, R. Salakhutdinov. "Neighbourhood Components Analysis". Advances in Neural Information Processing Systems. 17, 513-520, 2005. http://www.cs.nyu.edu/~roweis/papers/ncanips.pdf .. [2] Wikipedia entry on Neighborhood Components Analysis https://en.wikipedia.org/wiki/Neighbourhood_components_analysis """ @_deprecate_positional_args def __init__(self, n_components=None, *, init='auto', warm_start=False, max_iter=50, tol=1e-5, callback=None, verbose=0, random_state=None): self.n_components = n_components self.init = init self.warm_start = warm_start self.max_iter = max_iter self.tol = tol self.callback = callback self.verbose = verbose self.random_state = random_state def fit(self, X, y): """Fit the model according to the given training data. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The corresponding training labels. Returns ------- self : object returns a trained NeighborhoodComponentsAnalysis model. """ # Verify inputs X and y and NCA parameters, and transform a copy if # needed X, y, init = self._validate_params(X, y) # Initialize the random generator self.random_state_ = check_random_state(self.random_state) # Measure the total training time t_train = time.time() # Compute a mask that stays fixed during optimization: same_class_mask = y[:, np.newaxis] == y[np.newaxis, :] # (n_samples, n_samples) # Initialize the transformation transformation = self._initialize(X, y, init) # Create a dictionary of parameters to be passed to the optimizer disp = self.verbose - 2 if self.verbose > 1 else -1 optimizer_params = {'method': 'L-BFGS-B', 'fun': self._loss_grad_lbfgs, 'args': (X, same_class_mask, -1.0), 'jac': True, 'x0': transformation, 'tol': self.tol, 'options': dict(maxiter=self.max_iter, disp=disp), 'callback': self._callback } # Call the optimizer self.n_iter_ = 0 opt_result = minimize(**optimizer_params) # Reshape the solution found by the optimizer self.components_ = opt_result.x.reshape(-1, X.shape[1]) # Stop timer t_train = time.time() - t_train if self.verbose: cls_name = self.__class__.__name__ # Warn the user if the algorithm did not converge if not opt_result.success: warn('[{}] NCA did not converge: {}'.format( cls_name, opt_result.message), ConvergenceWarning) print('[{}] Training took {:8.2f}s.'.format(cls_name, t_train)) return self def transform(self, X): """Applies the learned transformation to the given data. Parameters ---------- X : array-like of shape (n_samples, n_features) Data samples. Returns ------- X_embedded: ndarray of shape (n_samples, n_components) The data samples transformed. Raises ------ NotFittedError If :meth:`fit` has not been called before. """ check_is_fitted(self) X = check_array(X) return np.dot(X, self.components_.T) def _validate_params(self, X, y): """Validate parameters as soon as :meth:`fit` is called. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The corresponding training labels. Returns ------- X : ndarray of shape (n_samples, n_features) The validated training samples. y : ndarray of shape (n_samples,) The validated training labels, encoded to be integers in the range(0, n_classes). init : str or ndarray of shape (n_features_a, n_features_b) The validated initialization of the linear transformation. Raises ------- TypeError If a parameter is not an instance of the desired type. ValueError If a parameter's value violates its legal value range or if the combination of two or more given parameters is incompatible. """ # Validate the inputs X and y, and converts y to numerical classes. X, y = self._validate_data(X, y, ensure_min_samples=2) check_classification_targets(y) y = LabelEncoder().fit_transform(y) # Check the preferred dimensionality of the projected space if self.n_components is not None: check_scalar( self.n_components, 'n_components', numbers.Integral, min_val=1) if self.n_components > X.shape[1]: raise ValueError('The preferred dimensionality of the ' 'projected space `n_components` ({}) cannot ' 'be greater than the given data ' 'dimensionality ({})!' .format(self.n_components, X.shape[1])) # If warm_start is enabled, check that the inputs are consistent check_scalar(self.warm_start, 'warm_start', bool) if self.warm_start and hasattr(self, 'components_'): if self.components_.shape[1] != X.shape[1]: raise ValueError('The new inputs dimensionality ({}) does not ' 'match the input dimensionality of the ' 'previously learned transformation ({}).' .format(X.shape[1], self.components_.shape[1])) check_scalar(self.max_iter, 'max_iter', numbers.Integral, min_val=1) check_scalar(self.tol, 'tol', numbers.Real, min_val=0.) check_scalar(self.verbose, 'verbose', numbers.Integral, min_val=0) if self.callback is not None: if not callable(self.callback): raise ValueError('`callback` is not callable.') # Check how the linear transformation should be initialized init = self.init if isinstance(init, np.ndarray): init = check_array(init) # Assert that init.shape[1] = X.shape[1] if init.shape[1] != X.shape[1]: raise ValueError( 'The input dimensionality ({}) of the given ' 'linear transformation `init` must match the ' 'dimensionality of the given inputs `X` ({}).' .format(init.shape[1], X.shape[1])) # Assert that init.shape[0] <= init.shape[1] if init.shape[0] > init.shape[1]: raise ValueError( 'The output dimensionality ({}) of the given ' 'linear transformation `init` cannot be ' 'greater than its input dimensionality ({}).' .format(init.shape[0], init.shape[1])) if self.n_components is not None: # Assert that self.n_components = init.shape[0] if self.n_components != init.shape[0]: raise ValueError('The preferred dimensionality of the ' 'projected space `n_components` ({}) does' ' not match the output dimensionality of ' 'the given linear transformation ' '`init` ({})!' .format(self.n_components, init.shape[0])) elif init in ['auto', 'pca', 'lda', 'identity', 'random']: pass else: raise ValueError( "`init` must be 'auto', 'pca', 'lda', 'identity', 'random' " "or a numpy array of shape (n_components, n_features).") return X, y, init def _initialize(self, X, y, init): """Initialize the transformation. Parameters ---------- X : array-like of shape (n_samples, n_features) The training samples. y : array-like of shape (n_samples,) The training labels. init : str or ndarray of shape (n_features_a, n_features_b) The validated initialization of the linear transformation. Returns ------- transformation : ndarray of shape (n_components, n_features) The initialized linear transformation. """ transformation = init if self.warm_start and hasattr(self, 'components_'): transformation = self.components_ elif isinstance(init, np.ndarray): pass else: n_samples, n_features = X.shape n_components = self.n_components or n_features if init == 'auto': n_classes = len(np.unique(y)) if n_components <= min(n_features, n_classes - 1): init = 'lda' elif n_components < min(n_features, n_samples): init = 'pca' else: init = 'identity' if init == 'identity': transformation = np.eye(n_components, X.shape[1]) elif init == 'random': transformation = self.random_state_.randn(n_components, X.shape[1]) elif init in {'pca', 'lda'}: init_time = time.time() if init == 'pca': pca = PCA(n_components=n_components, random_state=self.random_state_) if self.verbose: print('Finding principal components... ', end='') sys.stdout.flush() pca.fit(X) transformation = pca.components_ elif init == 'lda': from ..discriminant_analysis import ( LinearDiscriminantAnalysis) lda = LinearDiscriminantAnalysis(n_components=n_components) if self.verbose: print('Finding most discriminative components... ', end='') sys.stdout.flush() lda.fit(X, y) transformation = lda.scalings_.T[:n_components] if self.verbose: print('done in {:5.2f}s'.format(time.time() - init_time)) return transformation def _callback(self, transformation): """Called after each iteration of the optimizer. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The solution computed by the optimizer in this iteration. """ if self.callback is not None: self.callback(transformation, self.n_iter_) self.n_iter_ += 1 def _loss_grad_lbfgs(self, transformation, X, same_class_mask, sign=1.0): """Compute the loss and the loss gradient w.r.t. ``transformation``. Parameters ---------- transformation : ndarray of shape (n_components * n_features,) The raveled linear transformation on which to compute loss and evaluate gradient. X : ndarray of shape (n_samples, n_features) The training samples. same_class_mask : ndarray of shape (n_samples, n_samples) A mask where ``mask[i, j] == 1`` if ``X[i]`` and ``X[j]`` belong to the same class, and ``0`` otherwise. Returns ------- loss : float The loss computed for the given transformation. gradient : ndarray of shape (n_components * n_features,) The new (flattened) gradient of the loss. """ if self.n_iter_ == 0: self.n_iter_ += 1 if self.verbose: header_fields = ['Iteration', 'Objective Value', 'Time(s)'] header_fmt = '{:>10} {:>20} {:>10}' header = header_fmt.format(*header_fields) cls_name = self.__class__.__name__ print('[{}]'.format(cls_name)) print('[{}] {}\n[{}] {}'.format(cls_name, header, cls_name, '-' * len(header))) t_funcall = time.time() transformation = transformation.reshape(-1, X.shape[1]) X_embedded = np.dot(X, transformation.T) # (n_samples, n_components) # Compute softmax distances p_ij = pairwise_distances(X_embedded, squared=True) np.fill_diagonal(p_ij, np.inf) p_ij = softmax(-p_ij) # (n_samples, n_samples) # Compute loss masked_p_ij = p_ij * same_class_mask p = np.sum(masked_p_ij, axis=1, keepdims=True) # (n_samples, 1) loss = np.sum(p) # Compute gradient of loss w.r.t. `transform` weighted_p_ij = masked_p_ij - p_ij * p weighted_p_ij_sym = weighted_p_ij + weighted_p_ij.T np.fill_diagonal(weighted_p_ij_sym, -weighted_p_ij.sum(axis=0)) gradient = 2 * X_embedded.T.dot(weighted_p_ij_sym).dot(X) # time complexity of the gradient: O(n_components x n_samples x ( # n_samples + n_features)) if self.verbose: t_funcall = time.time() - t_funcall values_fmt = '[{}] {:>10} {:>20.6e} {:>10.2f}' print(values_fmt.format(self.__class__.__name__, self.n_iter_, loss, t_funcall)) sys.stdout.flush() return sign * loss, sign * gradient.ravel() def _more_tags(self): return {'requires_y': True}
bsd-3-clause
nanr1987/gradient_descent
sgd_logreg.py
1
2936
# Topic - Logistic regression using Stochastic gradient descent # Author - Nandhini # Date - 10/27/2016 import sys import datetime ''' #Dataset - Linear regression x= [1,2,3,4,5] y= [1,3,2,3,5] ''' #Dataset - Logistic regression x= [0.1,0.7,2,2.4,2.5] y= [0,0,1,1,1] ''' #Plot the data once to visualize import matplotlib.pyplot as plt plt.plot(x,y) plt.axis([0,3,-1,2]) plt.show() ''' # The data points appear like they can be modelled using linear regression # Consider the simple linear regression equation # y=b0+b1*x # where # y is the dependent variable # x is the independent variable # b0 is the intercept # b1 is the slope # For clarity, let us denote the actual value of y as y and the predicted value of y as p through out the code # Problem definition # Our aim here is to find the best possible combination of b0 & b1 so that the error between the actual and predicted value of y is as low as possible # We will use the stochastic gradient descent approach that incrementally adjusts weights over every row # The regular gradient descent runs through all the data before adjusting the weights every time # Function definition class SGD: def error_fn(self,p,y): err=p-y return err def predict(self,b0,b1,x): p=b0+(b1*x) return p def new_b0(self,b0,alpha,err): b0_new=b0-(alpha*err) return b0_new def new_b1(self,b1,aplha,err,x): b1_new=b1-(alpha*err*x) return b1_new #Logic # Begin by assiging random values for b0 & b1 b0=0 b1=0 # Assign learning rate alpha alpha=0.02 # Create object for class SGD sgd_obj=SGD() # Compute predicted y -> p incrementally print datetime.datetime.now() for j in range(0,4): # 4 is the number of epochs required. A epoch is one pass through the entire dataset. This is the condition for termination print "epoch: ",j print "starts: ", b0,b1 p=[] for i in range(0,len(x)): print b0,b1 p.append(b0+(b1*x[i])) err=sgd_obj.error_fn(p[i],y[i]) #Simultaneously update b0 & b1 before using them in the next iteration b0_new=sgd_obj.new_b0(b0,alpha,err) b1_new=sgd_obj.new_b1(b1,alpha,err,x[i]) b0=b0_new b1=b1_new j+=1 print datetime.datetime.now() # Solve linear equation using learned coefficients final_p=[] for i in range(0,len(x)): final_p.append(sgd_obj.predict(b0,b1,x[i])) prob=[] #Calculate probabilities using final_p in the sigmoid function 1/(1+e^-z) import math for i in range(0,len(x)): exp_minus_x=math.exp(-(round(final_p[i]))) proba=1/(1+exp_minus_x) prob.append(proba) #Create final array of class predictions 0 or 1 pred=[] for i in range(0,len(x)): if final_p[i]>0.5: pred.append(1) else: pred.append(0) # Plot original y against predicted probabilities and classes import matplotlib.pyplot as plt plt.plot(x,y) plt.plot(x,prob) plt.plot(x,pred) plt.axis([0,6,0,6]) plt.show()
gpl-3.0
tracierenea/gnuradio
gr-fec/python/fec/polar/decoder.py
24
10396
#!/usr/bin/env python # # Copyright 2015 Free Software Foundation, Inc. # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import numpy as np from common import PolarCommon # for dev from encoder import PolarEncoder from matplotlib import pyplot as plt class PolarDecoder(PolarCommon): def __init__(self, n, k, frozen_bit_position, frozenbits=None): PolarCommon.__init__(self, n, k, frozen_bit_position, frozenbits) self.error_probability = 0.1 # this is kind of a dummy value. usually chosen individually. self.lrs = ((1 - self.error_probability) / self.error_probability, self.error_probability / (1 - self.error_probability)) self.llrs = np.log(self.lrs) def _llr_bit(self, bit): return self.llrs[bit] def _llr_odd(self, la, lb): # this functions uses the min-sum approximation # exact formula: np.log((np.exp(la + lb) + 1) / (np.exp(la) + np.exp(lb))) return np.sign(la) * np.sign(lb) * np.minimum(np.abs(la), np.abs(lb)) _f_vals = np.array((1.0, -1.0), dtype=float) def _llr_even(self, la, lb, f): return (la * self._f_vals[f]) + lb def _llr_bit_decision(self, llr): if llr < 0.0: ui = int(1) else: ui = int(0) return ui def _retrieve_bit_from_llr(self, lr, pos): f_index = np.where(self.frozen_bit_position == pos)[0] if not f_index.size == 0: ui = self.frozenbits[f_index][0] else: ui = self._llr_bit_decision(lr) return ui def _lr_bit(self, bit): return self.lrs[bit] def _lr_odd(self, la, lb): # la is upper branch and lb is lower branch return (la * lb + 1) / (la + lb) def _lr_even(self, la, lb, f): # la is upper branch and lb is lower branch, f is last decoded bit. return (la ** (1 - (2 * f))) * lb def _lr_bit_decision(self, lr): if lr < 1: return int(1) return int(0) def _get_even_indices_values(self, u_hat): # looks like overkill for some indexing, but zero and one based indexing mix-up gives you haedaches. return u_hat[1::2] def _get_odd_indices_values(self, u_hat): return u_hat[0::2] def _calculate_lrs(self, y, u): ue = self._get_even_indices_values(u) uo = self._get_odd_indices_values(u) ya = y[0:y.size//2] yb = y[(y.size//2):] la = self._lr_decision_element(ya, (ue + uo) % 2) lb = self._lr_decision_element(yb, ue) return la, lb def _lr_decision_element(self, y, u): if y.size == 1: return self._llr_bit(y[0]) if u.size % 2 == 0: # use odd branch formula la, lb = self._calculate_lrs(y, u) return self._llr_odd(la, lb) else: ui = u[-1] la, lb = self._calculate_lrs(y, u[0:-1]) return self._llr_even(la, lb, ui) def _retrieve_bit_from_lr(self, lr, pos): f_index = np.where(self.frozen_bit_position == pos)[0] if not f_index.size == 0: ui = self.frozenbits[f_index][0] else: ui = self._lr_bit_decision(lr) return ui def _lr_sc_decoder(self, y): # this is the standard SC decoder as derived from the formulas. It sticks to natural bit order. u = np.array([], dtype=int) for i in range(y.size): lr = self._lr_decision_element(y, u) ui = self._retrieve_bit_from_llr(lr, i) u = np.append(u, ui) return u def _llr_retrieve_bit(self, llr, pos): f_index = np.where(self.frozen_bit_position == pos)[0] if not f_index.size == 0: ui = self.frozenbits[f_index][0] else: ui = self._llr_bit_decision(llr) return ui def _butterfly_decode_bits(self, pos, graph, u): bit_num = u.size llr = graph[pos][0] ui = self._llr_retrieve_bit(llr, bit_num) # ui = self._llr_bit_decision(llr) u = np.append(u, ui) lower_right = pos + (self.N // 2) la = graph[pos][1] lb = graph[lower_right][1] graph[lower_right][0] = self._llr_even(la, lb, ui) llr = graph[lower_right][0] # ui = self._llr_bit_decision(llr) ui = self._llr_retrieve_bit(llr, u.size) u = np.append(u, ui) return graph, u def _lr_sc_decoder_efficient(self, y): graph = np.full((self.N, self.power + 1), np.NaN, dtype=float) for i in range(self.N): graph[i][self.power] = self._llr_bit(y[i]) decode_order = self._vector_bit_reversed(np.arange(self.N), self.power) decode_order = np.delete(decode_order, np.where(decode_order >= self.N // 2)) u = np.array([], dtype=int) for pos in decode_order: graph = self._butterfly(pos, 0, graph, u) graph, u = self._butterfly_decode_bits(pos, graph, u) return u def _stop_propagation(self, bf_entry_row, stage): # calculate break condition modulus = 2 ** (self.power - stage) # stage_size = self.N // (2 ** stage) # half_stage_size = stage_size // 2 half_stage_size = self.N // (2 ** (stage + 1)) stage_pos = bf_entry_row % modulus return stage_pos >= half_stage_size def _butterfly(self, bf_entry_row, stage, graph, u): if not self.power > stage: return graph if self._stop_propagation(bf_entry_row, stage): upper_right = bf_entry_row - self.N // (2 ** (stage + 1)) la = graph[upper_right][stage + 1] lb = graph[bf_entry_row][stage + 1] ui = u[-1] graph[bf_entry_row][stage] = self._llr_even(la, lb, ui) return graph # activate right side butterflies u_even = self._get_even_indices_values(u) u_odd = self._get_odd_indices_values(u) graph = self._butterfly(bf_entry_row, stage + 1, graph, (u_even + u_odd) % 2) lower_right = bf_entry_row + self.N // (2 ** (stage + 1)) graph = self._butterfly(lower_right, stage + 1, graph, u_even) la = graph[bf_entry_row][stage + 1] lb = graph[lower_right][stage + 1] graph[bf_entry_row][stage] = self._llr_odd(la, lb) return graph def decode(self, data, is_packed=False): if not len(data) == self.N: raise ValueError("len(data)={0} is not equal to n={1}!".format(len(data), self.N)) if is_packed: data = np.unpackbits(data) data = self._lr_sc_decoder_efficient(data) data = self._extract_info_bits(data) if is_packed: data = np.packbits(data) return data def _extract_info_bits_reversed(self, y): info_bit_positions_reversed = self._vector_bit_reversed(self.info_bit_position, self.power) return y[info_bit_positions_reversed] def decode_systematic(self, data): if not len(data) == self.N: raise ValueError("len(data)={0} is not equal to n={1}!".format(len(data), self.N)) # data = self._reverse_bits(data) data = self._lr_sc_decoder_efficient(data) data = self._encode_natural_order(data) data = self._extract_info_bits_reversed(data) return data def test_systematic_decoder(): ntests = 1000 n = 16 k = 8 frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int) encoder = PolarEncoder(n, k, frozenbitposition) decoder = PolarDecoder(n, k, frozenbitposition) for i in range(ntests): bits = np.random.randint(2, size=k) y = encoder.encode_systematic(bits) u_hat = decoder.decode_systematic(y) assert (bits == u_hat).all() def test_reverse_enc_dec(): n = 16 k = 8 frozenbits = np.zeros(n - k) frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int) bits = np.random.randint(2, size=k) encoder = PolarEncoder(n, k, frozenbitposition, frozenbits) decoder = PolarDecoder(n, k, frozenbitposition, frozenbits) encoded = encoder.encode(bits) print 'encoded:', encoded rx = decoder.decode(encoded) print 'bits:', bits print 'rx :', rx print (bits == rx).all() def compare_decoder_impls(): print '\nthis is decoder test' n = 8 k = 4 frozenbits = np.zeros(n - k) # frozenbitposition16 = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int) frozenbitposition = np.array((0, 1, 2, 4), dtype=int) bits = np.random.randint(2, size=k) print 'bits:', bits encoder = PolarEncoder(n, k, frozenbitposition, frozenbits) decoder = PolarDecoder(n, k, frozenbitposition, frozenbits) encoded = encoder.encode(bits) print 'encoded:', encoded rx_st = decoder._lr_sc_decoder(encoded) rx_eff = decoder._lr_sc_decoder_efficient(encoded) print 'standard :', rx_st print 'efficient:', rx_eff print (rx_st == rx_eff).all() def main(): # power = 3 # n = 2 ** power # k = 4 # frozenbits = np.zeros(n - k, dtype=int) # frozenbitposition = np.array((0, 1, 2, 4), dtype=int) # frozenbitposition4 = np.array((0, 1), dtype=int) # # # encoder = PolarEncoder(n, k, frozenbitposition, frozenbits) # decoder = PolarDecoder(n, k, frozenbitposition, frozenbits) # # bits = np.ones(k, dtype=int) # print "bits: ", bits # evec = encoder.encode(bits) # print "froz: ", encoder._insert_frozen_bits(bits) # print "evec: ", evec # # evec[1] = 0 # deced = decoder._lr_sc_decoder(evec) # print 'SC decoded:', deced # # test_reverse_enc_dec() # compare_decoder_impls() test_systematic_decoder() if __name__ == '__main__': main()
gpl-3.0
sanketloke/scikit-learn
sklearn/utils/random.py
37
10511
# Author: Hamzeh Alsalhi <ha258@cornell.edu> # # License: BSD 3 clause from __future__ import division import numpy as np import scipy.sparse as sp import operator import array from sklearn.utils import check_random_state from sklearn.utils.fixes import astype from ._random import sample_without_replacement __all__ = ['sample_without_replacement', 'choice'] # This is a backport of np.random.choice from numpy 1.7 # The function can be removed when we bump the requirements to >=1.7 def choice(a, size=None, replace=True, p=None, random_state=None): """ choice(a, size=None, replace=True, p=None) Generates a random sample from a given 1-D array .. versionadded:: 1.7.0 Parameters ----------- a : 1-D array-like or int If an ndarray, a random sample is generated from its elements. If an int, the random sample is generated as if a was np.arange(n) size : int or tuple of ints, optional Output shape. Default is None, in which case a single value is returned. replace : boolean, optional Whether the sample is with or without replacement. p : 1-D array-like, optional The probabilities associated with each entry in a. If not given the sample assumes a uniform distribution over all entries in a. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns -------- samples : 1-D ndarray, shape (size,) The generated random samples Raises ------- ValueError If a is an int and less than zero, if a or p are not 1-dimensional, if a is an array-like of size 0, if p is not a vector of probabilities, if a and p have different lengths, or if replace=False and the sample size is greater than the population size See Also --------- randint, shuffle, permutation Examples --------- Generate a uniform random sample from np.arange(5) of size 3: >>> np.random.choice(5, 3) # doctest: +SKIP array([0, 3, 4]) >>> #This is equivalent to np.random.randint(0,5,3) Generate a non-uniform random sample from np.arange(5) of size 3: >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP array([3, 3, 0]) Generate a uniform random sample from np.arange(5) of size 3 without replacement: >>> np.random.choice(5, 3, replace=False) # doctest: +SKIP array([3,1,0]) >>> #This is equivalent to np.random.shuffle(np.arange(5))[:3] Generate a non-uniform random sample from np.arange(5) of size 3 without replacement: >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0]) ... # doctest: +SKIP array([2, 3, 0]) Any of the above can be repeated with an arbitrary array-like instead of just integers. For instance: >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher'] >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3]) ... # doctest: +SKIP array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], dtype='|S11') """ random_state = check_random_state(random_state) # Format and Verify input a = np.array(a, copy=False) if a.ndim == 0: try: # __index__ must return an integer by python rules. pop_size = operator.index(a.item()) except TypeError: raise ValueError("a must be 1-dimensional or an integer") if pop_size <= 0: raise ValueError("a must be greater than 0") elif a.ndim != 1: raise ValueError("a must be 1-dimensional") else: pop_size = a.shape[0] if pop_size is 0: raise ValueError("a must be non-empty") if None != p: p = np.array(p, dtype=np.double, ndmin=1, copy=False) if p.ndim != 1: raise ValueError("p must be 1-dimensional") if p.size != pop_size: raise ValueError("a and p must have same size") if np.any(p < 0): raise ValueError("probabilities are not non-negative") if not np.allclose(p.sum(), 1): raise ValueError("probabilities do not sum to 1") shape = size if shape is not None: size = np.prod(shape, dtype=np.intp) else: size = 1 # Actual sampling if replace: if None != p: cdf = p.cumsum() cdf /= cdf[-1] uniform_samples = random_state.random_sample(shape) idx = cdf.searchsorted(uniform_samples, side='right') # searchsorted returns a scalar idx = np.array(idx, copy=False) else: idx = random_state.randint(0, pop_size, size=shape) else: if size > pop_size: raise ValueError("Cannot take a larger sample than " "population when 'replace=False'") if None != p: if np.sum(p > 0) < size: raise ValueError("Fewer non-zero entries in p than size") n_uniq = 0 p = p.copy() found = np.zeros(shape, dtype=np.int) flat_found = found.ravel() while n_uniq < size: x = random_state.rand(size - n_uniq) if n_uniq > 0: p[flat_found[0:n_uniq]] = 0 cdf = np.cumsum(p) cdf /= cdf[-1] new = cdf.searchsorted(x, side='right') _, unique_indices = np.unique(new, return_index=True) unique_indices.sort() new = new.take(unique_indices) flat_found[n_uniq:n_uniq + new.size] = new n_uniq += new.size idx = found else: idx = random_state.permutation(pop_size)[:size] if shape is not None: idx.shape = shape if shape is None and isinstance(idx, np.ndarray): # In most cases a scalar will have been made an array idx = idx.item(0) # Use samples as indices for a if a is array-like if a.ndim == 0: return idx if shape is not None and idx.ndim == 0: # If size == () then the user requested a 0-d array as opposed to # a scalar object when size is None. However a[idx] is always a # scalar and not an array. So this makes sure the result is an # array, taking into account that np.array(item) may not work # for object arrays. res = np.empty((), dtype=a.dtype) res[()] = a[idx] return res return a[idx] def random_choice_csc(n_samples, classes, class_probability=None, random_state=None): """Generate a sparse random matrix given column class distributions Parameters ---------- n_samples : int, Number of samples to draw in each column. classes : list of size n_outputs of arrays of size (n_classes,) List of classes for each column. class_probability : list of size n_outputs of arrays of size (n_classes,) Optional (default=None). Class distribution of each column. If None the uniform distribution is assumed. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- random_matrix : sparse csc matrix of size (n_samples, n_outputs) """ data = array.array('i') indices = array.array('i') indptr = array.array('i', [0]) for j in range(len(classes)): classes[j] = np.asarray(classes[j]) if classes[j].dtype.kind != 'i': raise ValueError("class dtype %s is not supported" % classes[j].dtype) classes[j] = astype(classes[j], np.int64, copy=False) # use uniform distribution if no class_probability is given if class_probability is None: class_prob_j = np.empty(shape=classes[j].shape[0]) class_prob_j.fill(1 / classes[j].shape[0]) else: class_prob_j = np.asarray(class_probability[j]) if np.sum(class_prob_j) != 1.0: raise ValueError("Probability array at index {0} does not sum to " "one".format(j)) if class_prob_j.shape[0] != classes[j].shape[0]: raise ValueError("classes[{0}] (length {1}) and " "class_probability[{0}] (length {2}) have " "different length.".format(j, classes[j].shape[0], class_prob_j.shape[0])) # If 0 is not present in the classes insert it with a probability 0.0 if 0 not in classes[j]: classes[j] = np.insert(classes[j], 0, 0) class_prob_j = np.insert(class_prob_j, 0, 0.0) # If there are nonzero classes choose randomly using class_probability rng = check_random_state(random_state) if classes[j].shape[0] > 1: p_nonzero = 1 - class_prob_j[classes[j] == 0] nnz = int(n_samples * p_nonzero) ind_sample = sample_without_replacement(n_population=n_samples, n_samples=nnz, random_state=random_state) indices.extend(ind_sample) # Normalize probabilites for the nonzero elements classes_j_nonzero = classes[j] != 0 class_probability_nz = class_prob_j[classes_j_nonzero] class_probability_nz_norm = (class_probability_nz / np.sum(class_probability_nz)) classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(), rng.rand(nnz)) data.extend(classes[j][classes_j_nonzero][classes_ind]) indptr.append(len(indices)) return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)
bsd-3-clause
russel1237/scikit-learn
sklearn/neighbors/unsupervised.py
117
4755
"""Unsupervised nearest neighbors learner""" from .base import NeighborsBase from .base import KNeighborsMixin from .base import RadiusNeighborsMixin from .base import UnsupervisedMixin class NearestNeighbors(NeighborsBase, KNeighborsMixin, RadiusNeighborsMixin, UnsupervisedMixin): """Unsupervised learner for implementing neighbor searches. Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- n_neighbors : int, optional (default = 5) Number of neighbors to use by default for :meth:`k_neighbors` queries. radius : float, optional (default = 1.0) Range of parameter space to use by default for :meth`radius_neighbors` queries. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDtree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p: integer, optional (default = 2) Parameter for the Minkowski metric from sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric : string or callable, default 'minkowski' metric to use for distance computation. Any metric from scikit-learn or scipy.spatial.distance can be used. If metric is a callable function, it is called on each pair of instances (rows) and the resulting value recorded. The callable should take two arrays as input and return one value indicating the distance between them. This works for Scipy's metrics, but is less efficient than passing the metric name as a string. Distance matrices are not supported. Valid values for metric are: - from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2', 'manhattan'] - from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'] See the documentation for scipy.spatial.distance for details on these metrics. metric_params : dict, optional (default = None) Additional keyword arguments for the metric function. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods. Examples -------- >>> import numpy as np >>> from sklearn.neighbors import NearestNeighbors >>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]] >>> neigh = NearestNeighbors(2, 0.4) >>> neigh.fit(samples) #doctest: +ELLIPSIS NearestNeighbors(...) >>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False) ... #doctest: +ELLIPSIS array([[2, 0]]...) >>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False) >>> np.asarray(nbrs[0][0]) array(2) See also -------- KNeighborsClassifier RadiusNeighborsClassifier KNeighborsRegressor RadiusNeighborsRegressor BallTree Notes ----- See :ref:`Nearest Neighbors <neighbors>` in the online documentation for a discussion of the choice of ``algorithm`` and ``leaf_size``. http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm """ def __init__(self, n_neighbors=5, radius=1.0, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, n_jobs=1, **kwargs): self._init_params(n_neighbors=n_neighbors, radius=radius, algorithm=algorithm, leaf_size=leaf_size, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs, **kwargs)
bsd-3-clause
CarterBain/AlephNull
alephnull/examples/dual_moving_average.py
1
3696
#!/usr/bin/env python # # Copyright 2013 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import matplotlib.pyplot as plt from alephnull.algorithm import TradingAlgorithm from alephnull.finance import trading from alephnull.transforms import MovingAverage from alephnull.utils.factory import load_from_yahoo from datetime import datetime import pytz class DualMovingAverage(TradingAlgorithm): """Dual Moving Average Crossover algorithm. This algorithm buys apple once its short moving average crosses its long moving average (indicating upwards momentum) and sells its shares once the averages cross again (indicating downwards momentum). """ def initialize(self, short_window=20, long_window=40): # Add 2 mavg transforms, one with a long window, one # with a short window. self.add_transform(MovingAverage, 'short_mavg', ['price'], window_length=short_window) self.add_transform(MovingAverage, 'long_mavg', ['price'], window_length=long_window) # To keep track of whether we invested in the stock or not self.invested = False def handle_data(self, data): self.short_mavg = data['AAPL'].short_mavg['price'] self.long_mavg = data['AAPL'].long_mavg['price'] self.buy = False self.sell = False if self.short_mavg > self.long_mavg and not self.invested: self.order('AAPL', 5000) self.invested = True self.buy = True elif self.short_mavg < self.long_mavg and self.invested: self.order('AAPL', -5000) self.invested = False self.sell = True self.record(short_mavg=self.short_mavg, long_mavg=self.long_mavg, buy=self.buy, sell=self.sell) if __name__ == '__main__': start = datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc) end = datetime(1991, 1, 1, 0, 0, 0, 0, pytz.utc) data = load_from_yahoo(stocks=['AAPL'], indexes={}, start=start, end=end) dma = DualMovingAverage() results = dma.run(data) br = trading.environment.benchmark_returns bm_returns = br[(br.index >= start) & (br.index <= end)] results['benchmark_returns'] = (1 + bm_returns).cumprod().values results['algorithm_returns'] = (1 + results.returns).cumprod() fig = plt.figure() ax1 = fig.add_subplot(211, ylabel='cumulative returns') results[['algorithm_returns', 'benchmark_returns']].plot(ax=ax1, sharex=True) ax2 = fig.add_subplot(212) data['AAPL'].plot(ax=ax2, color='r') results[['short_mavg', 'long_mavg']].plot(ax=ax2) ax2.plot(results.ix[results.buy].index, results.short_mavg[results.buy], '^', markersize=10, color='m') ax2.plot(results.ix[results.sell].index, results.short_mavg[results.sell], 'v', markersize=10, color='k') plt.legend(loc=0) sharpe = [risk['sharpe'] for risk in dma.risk_report['one_month']] print "Monthly Sharpe ratios:", sharpe plt.gcf().set_size_inches(18, 8)
apache-2.0
ilo10/scikit-learn
sklearn/cluster/birch.py
207
22706
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com> # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Joel Nothman <joel.nothman@gmail.com> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy import sparse from math import sqrt from ..metrics.pairwise import euclidean_distances from ..base import TransformerMixin, ClusterMixin, BaseEstimator from ..externals.six.moves import xrange from ..utils import check_array from ..utils.extmath import row_norms, safe_sparse_dot from ..utils.validation import NotFittedError, check_is_fitted from .hierarchical import AgglomerativeClustering def _iterate_sparse_X(X): """This little hack returns a densified row when iterating over a sparse matrix, insted of constructing a sparse matrix for every row that is expensive. """ n_samples = X.shape[0] X_indices = X.indices X_data = X.data X_indptr = X.indptr for i in xrange(n_samples): row = np.zeros(X.shape[1]) startptr, endptr = X_indptr[i], X_indptr[i + 1] nonzero_indices = X_indices[startptr:endptr] row[nonzero_indices] = X_data[startptr:endptr] yield row def _split_node(node, threshold, branching_factor): """The node has to be split if there is no place for a new subcluster in the node. 1. Two empty nodes and two empty subclusters are initialized. 2. The pair of distant subclusters are found. 3. The properties of the empty subclusters and nodes are updated according to the nearest distance between the subclusters to the pair of distant subclusters. 4. The two nodes are set as children to the two subclusters. """ new_subcluster1 = _CFSubcluster() new_subcluster2 = _CFSubcluster() new_node1 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_node2 = _CFNode( threshold, branching_factor, is_leaf=node.is_leaf, n_features=node.n_features) new_subcluster1.child_ = new_node1 new_subcluster2.child_ = new_node2 if node.is_leaf: if node.prev_leaf_ is not None: node.prev_leaf_.next_leaf_ = new_node1 new_node1.prev_leaf_ = node.prev_leaf_ new_node1.next_leaf_ = new_node2 new_node2.prev_leaf_ = new_node1 new_node2.next_leaf_ = node.next_leaf_ if node.next_leaf_ is not None: node.next_leaf_.prev_leaf_ = new_node2 dist = euclidean_distances( node.centroids_, Y_norm_squared=node.squared_norm_, squared=True) n_clusters = dist.shape[0] farthest_idx = np.unravel_index( dist.argmax(), (n_clusters, n_clusters)) node1_dist, node2_dist = dist[[farthest_idx]] node1_closer = node1_dist < node2_dist for idx, subcluster in enumerate(node.subclusters_): if node1_closer[idx]: new_node1.append_subcluster(subcluster) new_subcluster1.update(subcluster) else: new_node2.append_subcluster(subcluster) new_subcluster2.update(subcluster) return new_subcluster1, new_subcluster2 class _CFNode(object): """Each node in a CFTree is called a CFNode. The CFNode can have a maximum of branching_factor number of CFSubclusters. Parameters ---------- threshold : float Threshold needed for a new subcluster to enter a CFSubcluster. branching_factor : int Maximum number of CF subclusters in each node. is_leaf : bool We need to know if the CFNode is a leaf or not, in order to retrieve the final subclusters. n_features : int The number of features. Attributes ---------- subclusters_ : array-like list of subclusters for a particular CFNode. prev_leaf_ : _CFNode prev_leaf. Useful only if is_leaf is True. next_leaf_ : _CFNode next_leaf. Useful only if is_leaf is True. the final subclusters. init_centroids_ : ndarray, shape (branching_factor + 1, n_features) manipulate ``init_centroids_`` throughout rather than centroids_ since the centroids are just a view of the ``init_centroids_`` . init_sq_norm_ : ndarray, shape (branching_factor + 1,) manipulate init_sq_norm_ throughout. similar to ``init_centroids_``. centroids_ : ndarray view of ``init_centroids_``. squared_norm_ : ndarray view of ``init_sq_norm_``. """ def __init__(self, threshold, branching_factor, is_leaf, n_features): self.threshold = threshold self.branching_factor = branching_factor self.is_leaf = is_leaf self.n_features = n_features # The list of subclusters, centroids and squared norms # to manipulate throughout. self.subclusters_ = [] self.init_centroids_ = np.zeros((branching_factor + 1, n_features)) self.init_sq_norm_ = np.zeros((branching_factor + 1)) self.squared_norm_ = [] self.prev_leaf_ = None self.next_leaf_ = None def append_subcluster(self, subcluster): n_samples = len(self.subclusters_) self.subclusters_.append(subcluster) self.init_centroids_[n_samples] = subcluster.centroid_ self.init_sq_norm_[n_samples] = subcluster.sq_norm_ # Keep centroids and squared norm as views. In this way # if we change init_centroids and init_sq_norm_, it is # sufficient, self.centroids_ = self.init_centroids_[:n_samples + 1, :] self.squared_norm_ = self.init_sq_norm_[:n_samples + 1] def update_split_subclusters(self, subcluster, new_subcluster1, new_subcluster2): """Remove a subcluster from a node and update it with the split subclusters. """ ind = self.subclusters_.index(subcluster) self.subclusters_[ind] = new_subcluster1 self.init_centroids_[ind] = new_subcluster1.centroid_ self.init_sq_norm_[ind] = new_subcluster1.sq_norm_ self.append_subcluster(new_subcluster2) def insert_cf_subcluster(self, subcluster): """Insert a new subcluster into the node.""" if not self.subclusters_: self.append_subcluster(subcluster) return False threshold = self.threshold branching_factor = self.branching_factor # We need to find the closest subcluster among all the # subclusters so that we can insert our new subcluster. dist_matrix = np.dot(self.centroids_, subcluster.centroid_) dist_matrix *= -2. dist_matrix += self.squared_norm_ closest_index = np.argmin(dist_matrix) closest_subcluster = self.subclusters_[closest_index] # If the subcluster has a child, we need a recursive strategy. if closest_subcluster.child_ is not None: split_child = closest_subcluster.child_.insert_cf_subcluster( subcluster) if not split_child: # If it is determined that the child need not be split, we # can just update the closest_subcluster closest_subcluster.update(subcluster) self.init_centroids_[closest_index] = \ self.subclusters_[closest_index].centroid_ self.init_sq_norm_[closest_index] = \ self.subclusters_[closest_index].sq_norm_ return False # things not too good. we need to redistribute the subclusters in # our child node, and add a new subcluster in the parent # subcluster to accomodate the new child. else: new_subcluster1, new_subcluster2 = _split_node( closest_subcluster.child_, threshold, branching_factor) self.update_split_subclusters( closest_subcluster, new_subcluster1, new_subcluster2) if len(self.subclusters_) > self.branching_factor: return True return False # good to go! else: merged = closest_subcluster.merge_subcluster( subcluster, self.threshold) if merged: self.init_centroids_[closest_index] = \ closest_subcluster.centroid_ self.init_sq_norm_[closest_index] = \ closest_subcluster.sq_norm_ return False # not close to any other subclusters, and we still # have space, so add. elif len(self.subclusters_) < self.branching_factor: self.append_subcluster(subcluster) return False # We do not have enough space nor is it closer to an # other subcluster. We need to split. else: self.append_subcluster(subcluster) return True class _CFSubcluster(object): """Each subcluster in a CFNode is called a CFSubcluster. A CFSubcluster can have a CFNode has its child. Parameters ---------- linear_sum : ndarray, shape (n_features,), optional Sample. This is kept optional to allow initialization of empty subclusters. Attributes ---------- n_samples_ : int Number of samples that belong to each subcluster. linear_sum_ : ndarray Linear sum of all the samples in a subcluster. Prevents holding all sample data in memory. squared_sum_ : float Sum of the squared l2 norms of all samples belonging to a subcluster. centroid_ : ndarray Centroid of the subcluster. Prevent recomputing of centroids when ``CFNode.centroids_`` is called. child_ : _CFNode Child Node of the subcluster. Once a given _CFNode is set as the child of the _CFNode, it is set to ``self.child_``. sq_norm_ : ndarray Squared norm of the subcluster. Used to prevent recomputing when pairwise minimum distances are computed. """ def __init__(self, linear_sum=None): if linear_sum is None: self.n_samples_ = 0 self.squared_sum_ = 0.0 self.linear_sum_ = 0 else: self.n_samples_ = 1 self.centroid_ = self.linear_sum_ = linear_sum self.squared_sum_ = self.sq_norm_ = np.dot( self.linear_sum_, self.linear_sum_) self.child_ = None def update(self, subcluster): self.n_samples_ += subcluster.n_samples_ self.linear_sum_ += subcluster.linear_sum_ self.squared_sum_ += subcluster.squared_sum_ self.centroid_ = self.linear_sum_ / self.n_samples_ self.sq_norm_ = np.dot(self.centroid_, self.centroid_) def merge_subcluster(self, nominee_cluster, threshold): """Check if a cluster is worthy enough to be merged. If yes then merge. """ new_ss = self.squared_sum_ + nominee_cluster.squared_sum_ new_ls = self.linear_sum_ + nominee_cluster.linear_sum_ new_n = self.n_samples_ + nominee_cluster.n_samples_ new_centroid = (1 / new_n) * new_ls new_norm = np.dot(new_centroid, new_centroid) dot_product = (-2 * new_n) * new_norm sq_radius = (new_ss + dot_product) / new_n + new_norm if sq_radius <= threshold ** 2: (self.n_samples_, self.linear_sum_, self.squared_sum_, self.centroid_, self.sq_norm_) = \ new_n, new_ls, new_ss, new_centroid, new_norm return True return False @property def radius(self): """Return radius of the subcluster""" dot_product = -2 * np.dot(self.linear_sum_, self.centroid_) return sqrt( ((self.squared_sum_ + dot_product) / self.n_samples_) + self.sq_norm_) class Birch(BaseEstimator, TransformerMixin, ClusterMixin): """Implements the Birch clustering algorithm. Every new sample is inserted into the root of the Clustering Feature Tree. It is then clubbed together with the subcluster that has the centroid closest to the new sample. This is done recursively till it ends up at the subcluster of the leaf of the tree has the closest centroid. Read more in the :ref:`User Guide <birch>`. Parameters ---------- threshold : float, default 0.5 The radius of the subcluster obtained by merging a new sample and the closest subcluster should be lesser than the threshold. Otherwise a new subcluster is started. branching_factor : int, default 50 Maximum number of CF subclusters in each node. If a new samples enters such that the number of subclusters exceed the branching_factor then the node has to be split. The corresponding parent also has to be split and if the number of subclusters in the parent is greater than the branching factor, then it has to be split recursively. n_clusters : int, instance of sklearn.cluster model, default None Number of clusters after the final clustering step, which treats the subclusters from the leaves as new samples. By default, this final clustering step is not performed and the subclusters are returned as they are. If a model is provided, the model is fit treating the subclusters as new samples and the initial data is mapped to the label of the closest subcluster. If an int is provided, the model fit is AgglomerativeClustering with n_clusters set to the int. compute_labels : bool, default True Whether or not to compute labels for each fit. copy : bool, default True Whether or not to make a copy of the given data. If set to False, the initial data will be overwritten. Attributes ---------- root_ : _CFNode Root of the CFTree. dummy_leaf_ : _CFNode Start pointer to all the leaves. subcluster_centers_ : ndarray, Centroids of all subclusters read directly from the leaves. subcluster_labels_ : ndarray, Labels assigned to the centroids of the subclusters after they are clustered globally. labels_ : ndarray, shape (n_samples,) Array of labels assigned to the input data. if partial_fit is used instead of fit, they are assigned to the last batch of data. Examples -------- >>> from sklearn.cluster import Birch >>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]] >>> brc = Birch(branching_factor=50, n_clusters=None, threshold=0.5, ... compute_labels=True) >>> brc.fit(X) Birch(branching_factor=50, compute_labels=True, copy=True, n_clusters=None, threshold=0.5) >>> brc.predict(X) array([0, 0, 0, 1, 1, 1]) References ---------- * Tian Zhang, Raghu Ramakrishnan, Maron Livny BIRCH: An efficient data clustering method for large databases. http://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf * Roberto Perdisci JBirch - Java implementation of BIRCH clustering algorithm https://code.google.com/p/jbirch/ """ def __init__(self, threshold=0.5, branching_factor=50, n_clusters=3, compute_labels=True, copy=True): self.threshold = threshold self.branching_factor = branching_factor self.n_clusters = n_clusters self.compute_labels = compute_labels self.copy = copy def fit(self, X, y=None): """ Build a CF Tree for the input data. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. """ self.fit_, self.partial_fit_ = True, False return self._fit(X) def _fit(self, X): X = check_array(X, accept_sparse='csr', copy=self.copy) threshold = self.threshold branching_factor = self.branching_factor if branching_factor <= 1: raise ValueError("Branching_factor should be greater than one.") n_samples, n_features = X.shape # If partial_fit is called for the first time or fit is called, we # start a new tree. partial_fit = getattr(self, 'partial_fit_') has_root = getattr(self, 'root_', None) if getattr(self, 'fit_') or (partial_fit and not has_root): # The first root is the leaf. Manipulate this object throughout. self.root_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) # To enable getting back subclusters. self.dummy_leaf_ = _CFNode(threshold, branching_factor, is_leaf=True, n_features=n_features) self.dummy_leaf_.next_leaf_ = self.root_ self.root_.prev_leaf_ = self.dummy_leaf_ # Cannot vectorize. Enough to convince to use cython. if not sparse.issparse(X): iter_func = iter else: iter_func = _iterate_sparse_X for sample in iter_func(X): subcluster = _CFSubcluster(linear_sum=sample) split = self.root_.insert_cf_subcluster(subcluster) if split: new_subcluster1, new_subcluster2 = _split_node( self.root_, threshold, branching_factor) del self.root_ self.root_ = _CFNode(threshold, branching_factor, is_leaf=False, n_features=n_features) self.root_.append_subcluster(new_subcluster1) self.root_.append_subcluster(new_subcluster2) centroids = np.concatenate([ leaf.centroids_ for leaf in self._get_leaves()]) self.subcluster_centers_ = centroids self._global_clustering(X) return self def _get_leaves(self): """ Retrieve the leaves of the CF Node. Returns ------- leaves: array-like List of the leaf nodes. """ leaf_ptr = self.dummy_leaf_.next_leaf_ leaves = [] while leaf_ptr is not None: leaves.append(leaf_ptr) leaf_ptr = leaf_ptr.next_leaf_ return leaves def partial_fit(self, X=None, y=None): """ Online learning. Prevents rebuilding of CFTree from scratch. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features), None Input data. If X is not provided, only the global clustering step is done. """ self.partial_fit_, self.fit_ = True, False if X is None: # Perform just the final global clustering step. self._global_clustering() return self else: self._check_fit(X) return self._fit(X) def _check_fit(self, X): is_fitted = hasattr(self, 'subcluster_centers_') # Called by partial_fit, before fitting. has_partial_fit = hasattr(self, 'partial_fit_') # Should raise an error if one does not fit before predicting. if not (is_fitted or has_partial_fit): raise NotFittedError("Fit training data before predicting") if is_fitted and X.shape[1] != self.subcluster_centers_.shape[1]: raise ValueError( "Training data and predicted data do " "not have same number of features.") def predict(self, X): """ Predict data using the ``centroids_`` of subclusters. Avoid computation of the row norms of X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- labels: ndarray, shape(n_samples) Labelled data. """ X = check_array(X, accept_sparse='csr') self._check_fit(X) reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T) reduced_distance *= -2 reduced_distance += self._subcluster_norms return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)] def transform(self, X, y=None): """ Transform X into subcluster centroids dimension. Each dimension represents the distance from the sample point to each cluster centroid. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns ------- X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters) Transformed data. """ check_is_fitted(self, 'subcluster_centers_') return euclidean_distances(X, self.subcluster_centers_) def _global_clustering(self, X=None): """ Global clustering for the subclusters obtained after fitting """ clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = (X is not None) and self.compute_labels # Preprocessing for the global clustering. not_enough_centroids = False if isinstance(clusterer, int): clusterer = AgglomerativeClustering( n_clusters=self.n_clusters) # There is no need to perform the global clustering step. if len(centroids) < self.n_clusters: not_enough_centroids = True elif (clusterer is not None and not hasattr(clusterer, 'fit_predict')): raise ValueError("n_clusters should be an instance of " "ClusterMixin or an int") # To use in predict to avoid recalculation. self._subcluster_norms = row_norms( self.subcluster_centers_, squared=True) if clusterer is None or not_enough_centroids: self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn( "Number of subclusters found (%d) by Birch is less " "than (%d). Decrease the threshold." % (len(centroids), self.n_clusters)) else: # The global clustering step that clusters the subclusters of # the leaves. It assumes the centroids of the subclusters as # samples and finds the final centroids. self.subcluster_labels_ = clusterer.fit_predict( self.subcluster_centers_) if compute_labels: self.labels_ = self.predict(X)
bsd-3-clause
mjgrav2001/scikit-learn
sklearn/tree/export.py
75
15670
""" This module defines export functions for decision trees. """ # Authors: Gilles Louppe <g.louppe@gmail.com> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Brian Holt <bdholt1@gmail.com> # Noel Dawe <noel@dawe.me> # Satrajit Gosh <satrajit.ghosh@gmail.com> # Trevor Stephens <trev.stephens@gmail.com> # Licence: BSD 3 clause import numpy as np from ..externals import six from . import _tree def _color_brew(n): """Generate n colors with equally spaced hues. Parameters ---------- n : int The number of colors required. Returns ------- color_list : list, length n List of n tuples of form (R, G, B) being the components of each color. """ color_list = [] # Initialize saturation & value; calculate chroma & value shift s, v = 0.75, 0.9 c = s * v m = v - c for h in np.arange(25, 385, 360. / n).astype(int): # Calculate some intermediate values h_bar = h / 60. x = c * (1 - abs((h_bar % 2) - 1)) # Initialize RGB with same hue & chroma as our color rgb = [(c, x, 0), (x, c, 0), (0, c, x), (0, x, c), (x, 0, c), (c, 0, x), (c, x, 0)] r, g, b = rgb[int(h_bar)] # Shift the initial RGB values to match value and store rgb = [(int(255 * (r + m))), (int(255 * (g + m))), (int(255 * (b + m)))] color_list.append(rgb) return color_list def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None, feature_names=None, class_names=None, label='all', filled=False, leaves_parallel=False, impurity=True, node_ids=False, proportion=False, rotate=False, rounded=False, special_characters=False): """Export a decision tree in DOT format. This function generates a GraphViz representation of the decision tree, which is then written into `out_file`. Once exported, graphical renderings can be generated using, for example:: $ dot -Tps tree.dot -o tree.ps (PostScript format) $ dot -Tpng tree.dot -o tree.png (PNG format) The sample counts that are shown are weighted with any sample_weights that might be present. Read more in the :ref:`User Guide <tree>`. Parameters ---------- decision_tree : decision tree classifier The decision tree to be exported to GraphViz. out_file : file object or string, optional (default="tree.dot") Handle or name of the output file. max_depth : int, optional (default=None) The maximum depth of the representation. If None, the tree is fully generated. feature_names : list of strings, optional (default=None) Names of each of the features. class_names : list of strings, bool or None, optional (default=None) Names of each of the target classes in ascending numerical order. Only relevant for classification and not supported for multi-output. If ``True``, shows a symbolic representation of the class name. label : {'all', 'root', 'none'}, optional (default='all') Whether to show informative labels for impurity, etc. Options include 'all' to show at every node, 'root' to show only at the top root node, or 'none' to not show at any node. filled : bool, optional (default=False) When set to ``True``, paint nodes to indicate majority class for classification, extremity of values for regression, or purity of node for multi-output. leaves_parallel : bool, optional (default=False) When set to ``True``, draw all leaf nodes at the bottom of the tree. impurity : bool, optional (default=True) When set to ``True``, show the impurity at each node. node_ids : bool, optional (default=False) When set to ``True``, show the ID number on each node. proportion : bool, optional (default=False) When set to ``True``, change the display of 'values' and/or 'samples' to be proportions and percentages respectively. rotate : bool, optional (default=False) When set to ``True``, orient tree left to right rather than top-down. rounded : bool, optional (default=False) When set to ``True``, draw node boxes with rounded corners and use Helvetica fonts instead of Times-Roman. special_characters : bool, optional (default=False) When set to ``False``, ignore special characters for PostScript compatibility. Examples -------- >>> from sklearn.datasets import load_iris >>> from sklearn import tree >>> clf = tree.DecisionTreeClassifier() >>> iris = load_iris() >>> clf = clf.fit(iris.data, iris.target) >>> tree.export_graphviz(clf, ... out_file='tree.dot') # doctest: +SKIP """ def get_color(value): # Find the appropriate color & intensity for a node if colors['bounds'] is None: # Classification tree color = list(colors['rgb'][np.argmax(value)]) sorted_values = sorted(value, reverse=True) alpha = int(255 * (sorted_values[0] - sorted_values[1]) / (1 - sorted_values[1])) else: # Regression tree or multi-output color = list(colors['rgb'][0]) alpha = int(255 * ((value - colors['bounds'][0]) / (colors['bounds'][1] - colors['bounds'][0]))) # Return html color code in #RRGGBBAA format color.append(alpha) hex_codes = [str(i) for i in range(10)] hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f']) color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color] return '#' + ''.join(color) def node_to_str(tree, node_id, criterion): # Generate the node content string if tree.n_outputs == 1: value = tree.value[node_id][0, :] else: value = tree.value[node_id] # Should labels be shown? labels = (label == 'root' and node_id == 0) or label == 'all' # PostScript compatibility for special characters if special_characters: characters = ['&#35;', '<SUB>', '</SUB>', '&le;', '<br/>', '>'] node_string = '<' else: characters = ['#', '[', ']', '<=', '\\n', '"'] node_string = '"' # Write node ID if node_ids: if labels: node_string += 'node ' node_string += characters[0] + str(node_id) + characters[4] # Write decision criteria if tree.children_left[node_id] != _tree.TREE_LEAF: # Always write node decision criteria, except for leaves if feature_names is not None: feature = feature_names[tree.feature[node_id]] else: feature = "X%s%s%s" % (characters[1], tree.feature[node_id], characters[2]) node_string += '%s %s %s%s' % (feature, characters[3], round(tree.threshold[node_id], 4), characters[4]) # Write impurity if impurity: if not isinstance(criterion, six.string_types): criterion = "impurity" if labels: node_string += '%s = ' % criterion node_string += (str(round(tree.impurity[node_id], 4)) + characters[4]) # Write node sample count if labels: node_string += 'samples = ' if proportion: percent = (100. * tree.n_node_samples[node_id] / float(tree.n_node_samples[0])) node_string += (str(round(percent, 1)) + '%' + characters[4]) else: node_string += (str(tree.n_node_samples[node_id]) + characters[4]) # Write node class distribution / regression value if proportion and tree.n_classes[0] != 1: # For classification this will show the proportion of samples value = value / tree.weighted_n_node_samples[node_id] if labels: node_string += 'value = ' if tree.n_classes[0] == 1: # Regression value_text = np.around(value, 4) elif proportion: # Classification value_text = np.around(value, 2) elif np.all(np.equal(np.mod(value, 1), 0)): # Classification without floating-point weights value_text = value.astype(int) else: # Classification with floating-point weights value_text = np.around(value, 4) # Strip whitespace value_text = str(value_text.astype('S32')).replace("b'", "'") value_text = value_text.replace("' '", ", ").replace("'", "") if tree.n_classes[0] == 1 and tree.n_outputs == 1: value_text = value_text.replace("[", "").replace("]", "") value_text = value_text.replace("\n ", characters[4]) node_string += value_text + characters[4] # Write node majority class if (class_names is not None and tree.n_classes[0] != 1 and tree.n_outputs == 1): # Only done for single-output classification trees if labels: node_string += 'class = ' if class_names is not True: class_name = class_names[np.argmax(value)] else: class_name = "y%s%s%s" % (characters[1], np.argmax(value), characters[2]) node_string += class_name # Clean up any trailing newlines if node_string[-2:] == '\\n': node_string = node_string[:-2] if node_string[-5:] == '<br/>': node_string = node_string[:-5] return node_string + characters[5] def recurse(tree, node_id, criterion, parent=None, depth=0): if node_id == _tree.TREE_LEAF: raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) left_child = tree.children_left[node_id] right_child = tree.children_right[node_id] # Add node with description if max_depth is None or depth <= max_depth: # Collect ranks for 'leaf' option in plot_options if left_child == _tree.TREE_LEAF: ranks['leaves'].append(str(node_id)) elif str(depth) not in ranks: ranks[str(depth)] = [str(node_id)] else: ranks[str(depth)].append(str(node_id)) out_file.write('%d [label=%s' % (node_id, node_to_str(tree, node_id, criterion))) if filled: # Fetch appropriate color for node if 'rgb' not in colors: # Initialize colors and bounds if required colors['rgb'] = _color_brew(tree.n_classes[0]) if tree.n_outputs != 1: # Find max and min impurities for multi-output colors['bounds'] = (np.min(-tree.impurity), np.max(-tree.impurity)) elif tree.n_classes[0] == 1: # Find max and min values in leaf nodes for regression colors['bounds'] = (np.min(tree.value), np.max(tree.value)) if tree.n_outputs == 1: node_val = (tree.value[node_id][0, :] / tree.weighted_n_node_samples[node_id]) if tree.n_classes[0] == 1: # Regression node_val = tree.value[node_id][0, :] else: # If multi-output color node by impurity node_val = -tree.impurity[node_id] out_file.write(', fillcolor="%s"' % get_color(node_val)) out_file.write('] ;\n') if parent is not None: # Add edge to parent out_file.write('%d -> %d' % (parent, node_id)) if parent == 0: # Draw True/False labels if parent is root node angles = np.array([45, -45]) * ((rotate - .5) * -2) out_file.write(' [labeldistance=2.5, labelangle=') if node_id == 1: out_file.write('%d, headlabel="True"]' % angles[0]) else: out_file.write('%d, headlabel="False"]' % angles[1]) out_file.write(' ;\n') if left_child != _tree.TREE_LEAF: recurse(tree, left_child, criterion=criterion, parent=node_id, depth=depth + 1) recurse(tree, right_child, criterion=criterion, parent=node_id, depth=depth + 1) else: ranks['leaves'].append(str(node_id)) out_file.write('%d [label="(...)"' % node_id) if filled: # color cropped nodes grey out_file.write(', fillcolor="#C0C0C0"') out_file.write('] ;\n' % node_id) if parent is not None: # Add edge to parent out_file.write('%d -> %d ;\n' % (parent, node_id)) own_file = False try: if isinstance(out_file, six.string_types): if six.PY3: out_file = open(out_file, "w", encoding="utf-8") else: out_file = open(out_file, "wb") own_file = True # The depth of each node for plotting with 'leaf' option ranks = {'leaves': []} # The colors to render each node with colors = {'bounds': None} out_file.write('digraph Tree {\n') # Specify node aesthetics out_file.write('node [shape=box') rounded_filled = [] if filled: rounded_filled.append('filled') if rounded: rounded_filled.append('rounded') if len(rounded_filled) > 0: out_file.write(', style="%s", color="black"' % ", ".join(rounded_filled)) if rounded: out_file.write(', fontname=helvetica') out_file.write('] ;\n') # Specify graph & edge aesthetics if leaves_parallel: out_file.write('graph [ranksep=equally, splines=polyline] ;\n') if rounded: out_file.write('edge [fontname=helvetica] ;\n') if rotate: out_file.write('rankdir=LR ;\n') # Now recurse the tree and add node & edge attributes if isinstance(decision_tree, _tree.Tree): recurse(decision_tree, 0, criterion="impurity") else: recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion) # If required, draw leaf nodes at same depth as each other if leaves_parallel: for rank in sorted(ranks): out_file.write("{rank=same ; " + "; ".join(r for r in ranks[rank]) + "} ;\n") out_file.write("}") finally: if own_file: out_file.close()
bsd-3-clause
conceptcreative/free_grants_community
adstats/analyze.py
1
1561
""" # Adstats Analysis Calculate and display statistics based on ad network data. """ import argparse import pandas as pd import matplotlib.pyplot as plt from StringIO import StringIO parser = argparse.ArgumentParser(description='Ad Network Statistics') parser.add_argument('--adwords', type=str, help='path to Google Adwords csv report') parser.add_argument('--adsense', type=str, help='path to Google Adsense csv report') parser.add_argument('--print-col-names', action='store_true', default=False) parser.add_argument('--interactive', action='store_true', default=False) args = parser.parse_args() adwords = pd.read_csv(args.adwords, skiprows=1, skipfooter=1, thousands=',', parse_dates=[0,], index_col=0) adwords['CTR'] = adwords['CTR'].map(lambda val: float(val[:-1]) / 100) adwords.rename(columns=lambda val: 'Adwords ' + val, inplace=True) text = open(args.adsense, 'rb').read().decode('utf-16').encode('utf-8') adsense = pd.read_csv(StringIO(text), sep=None, parse_dates=[0,], index_col=0) adsense['Page CTR'] = adsense['Page CTR'].map(lambda val: float(val[:-1]) / 100) adsense.rename(columns=lambda val: 'Adsense ' + val, inplace=True) data = pd.merge(adwords, adsense, left_index=True, right_index=True) data['Profit'] = data['Adsense Estimated earnings (USD)'] - data['Adwords Cost'] if args.print_col_names: for idx, name in enumerate(data.columns): print '{0} "{1}"'.format(idx, name) if args.interactive: import pylab pylab.ion() from IPython import embed embed()
mit
HeraclesHX/scikit-learn
sklearn/neighbors/nearest_centroid.py
199
7249
# -*- coding: utf-8 -*- """ Nearest Centroid Classification """ # Author: Robert Layton <robertlayton@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # # License: BSD 3 clause import warnings import numpy as np from scipy import sparse as sp from ..base import BaseEstimator, ClassifierMixin from ..metrics.pairwise import pairwise_distances from ..preprocessing import LabelEncoder from ..utils.validation import check_array, check_X_y, check_is_fitted from ..utils.sparsefuncs import csc_median_axis_0 class NearestCentroid(BaseEstimator, ClassifierMixin): """Nearest centroid classifier. Each class is represented by its centroid, with test samples classified to the class with the nearest centroid. Read more in the :ref:`User Guide <nearest_centroid_classifier>`. Parameters ---------- metric: string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. The centroids for the samples corresponding to each class is the point from which the sum of the distances (according to the metric) of all samples that belong to that particular class are minimized. If the "manhattan" metric is provided, this centroid is the median and for all other metrics, the centroid is now set to be the mean. shrink_threshold : float, optional (default = None) Threshold for shrinking centroids to remove features. Attributes ---------- centroids_ : array-like, shape = [n_classes, n_features] Centroid of each class Examples -------- >>> from sklearn.neighbors.nearest_centroid import NearestCentroid >>> import numpy as np >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = NearestCentroid() >>> clf.fit(X, y) NearestCentroid(metric='euclidean', shrink_threshold=None) >>> print(clf.predict([[-0.8, -1]])) [1] See also -------- sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier Notes ----- When used for text classification with tf-idf vectors, this classifier is also known as the Rocchio classifier. References ---------- Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of multiple cancer types by shrunken centroids of gene expression. Proceedings of the National Academy of Sciences of the United States of America, 99(10), 6567-6572. The National Academy of Sciences. """ def __init__(self, metric='euclidean', shrink_threshold=None): self.metric = metric self.shrink_threshold = shrink_threshold def fit(self, X, y): """ Fit the NearestCentroid model according to the given training data. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training vector, where n_samples in the number of samples and n_features is the number of features. Note that centroid shrinking cannot be used with sparse matrices. y : array, shape = [n_samples] Target values (integers) """ # If X is sparse and the metric is "manhattan", store it in a csc # format is easier to calculate the median. if self.metric == 'manhattan': X, y = check_X_y(X, y, ['csc']) else: X, y = check_X_y(X, y, ['csr', 'csc']) is_X_sparse = sp.issparse(X) if is_X_sparse and self.shrink_threshold: raise ValueError("threshold shrinking not supported" " for sparse input") n_samples, n_features = X.shape le = LabelEncoder() y_ind = le.fit_transform(y) self.classes_ = classes = le.classes_ n_classes = classes.size if n_classes < 2: raise ValueError('y has less than 2 classes') # Mask mapping each class to it's members. self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64) # Number of clusters in each class. nk = np.zeros(n_classes) for cur_class in range(n_classes): center_mask = y_ind == cur_class nk[cur_class] = np.sum(center_mask) if is_X_sparse: center_mask = np.where(center_mask)[0] # XXX: Update other averaging methods according to the metrics. if self.metric == "manhattan": # NumPy does not calculate median of sparse matrices. if not is_X_sparse: self.centroids_[cur_class] = np.median(X[center_mask], axis=0) else: self.centroids_[cur_class] = csc_median_axis_0(X[center_mask]) else: if self.metric != 'euclidean': warnings.warn("Averaging for metrics other than " "euclidean and manhattan not supported. " "The average is set to be the mean." ) self.centroids_[cur_class] = X[center_mask].mean(axis=0) if self.shrink_threshold: dataset_centroid_ = np.mean(X, axis=0) # m parameter for determining deviation m = np.sqrt((1. / nk) + (1. / n_samples)) # Calculate deviation using the standard deviation of centroids. variance = (X - self.centroids_[y_ind]) ** 2 variance = variance.sum(axis=0) s = np.sqrt(variance / (n_samples - n_classes)) s += np.median(s) # To deter outliers from affecting the results. mm = m.reshape(len(m), 1) # Reshape to allow broadcasting. ms = mm * s deviation = ((self.centroids_ - dataset_centroid_) / ms) # Soft thresholding: if the deviation crosses 0 during shrinking, # it becomes zero. signs = np.sign(deviation) deviation = (np.abs(deviation) - self.shrink_threshold) deviation[deviation < 0] = 0 deviation *= signs # Now adjust the centroids using the deviation msd = ms * deviation self.centroids_ = dataset_centroid_[np.newaxis, :] + msd return self def predict(self, X): """Perform classification on an array of test vectors X. The predicted class C for each sample in X is returned. Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- C : array, shape = [n_samples] Notes ----- If the metric constructor parameter is "precomputed", X is assumed to be the distance matrix between the data to be predicted and ``self.centroids_``. """ check_is_fitted(self, 'centroids_') X = check_array(X, accept_sparse='csr') return self.classes_[pairwise_distances( X, self.centroids_, metric=self.metric).argmin(axis=1)]
bsd-3-clause
wmvanvliet/mne-python
mne/externals/tqdm/_tqdm/gui.py
14
11601
""" GUI progressbar decorator for iterators. Includes a default (x)range iterator printing to stderr. Usage: >>> from tqdm.gui import trange[, tqdm] >>> for i in trange(10): #same as: for i in tqdm(xrange(10)) ... ... """ # future division is important to divide integers and get as # a result precise floating numbers (instead of truncated int) from __future__ import division, absolute_import # import compatibility functions and utilities from .utils import _range # to inherit from the tqdm class from .std import tqdm as std_tqdm from .std import TqdmExperimentalWarning from warnings import warn __author__ = {"github.com/": ["casperdcl", "lrq3000"]} __all__ = ['tqdm_gui', 'tgrange', 'tqdm', 'trange'] class tqdm_gui(std_tqdm): # pragma: no cover """ Experimental GUI version of tqdm! """ # TODO: @classmethod: write() on GUI? def __init__(self, *args, **kwargs): import matplotlib as mpl import matplotlib.pyplot as plt from collections import deque kwargs['gui'] = True super(tqdm_gui, self).__init__(*args, **kwargs) # Initialize the GUI display if self.disable or not kwargs['gui']: return warn('GUI is experimental/alpha', TqdmExperimentalWarning, stacklevel=2) self.mpl = mpl self.plt = plt self.sp = None # Remember if external environment uses toolbars self.toolbar = self.mpl.rcParams['toolbar'] self.mpl.rcParams['toolbar'] = 'None' self.mininterval = max(self.mininterval, 0.5) self.fig, ax = plt.subplots(figsize=(9, 2.2)) # self.fig.subplots_adjust(bottom=0.2) total = len(self) if total is not None: self.xdata = [] self.ydata = [] self.zdata = [] else: self.xdata = deque([]) self.ydata = deque([]) self.zdata = deque([]) self.line1, = ax.plot(self.xdata, self.ydata, color='b') self.line2, = ax.plot(self.xdata, self.zdata, color='k') ax.set_ylim(0, 0.001) if total is not None: ax.set_xlim(0, 100) ax.set_xlabel('percent') self.fig.legend((self.line1, self.line2), ('cur', 'est'), loc='center right') # progressbar self.hspan = plt.axhspan(0, 0.001, xmin=0, xmax=0, color='g') else: # ax.set_xlim(-60, 0) ax.set_xlim(0, 60) ax.invert_xaxis() ax.set_xlabel('seconds') ax.legend(('cur', 'est'), loc='lower left') ax.grid() # ax.set_xlabel('seconds') ax.set_ylabel((self.unit if self.unit else 'it') + '/s') if self.unit_scale: plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) ax.yaxis.get_offset_text().set_x(-0.15) # Remember if external environment is interactive self.wasion = plt.isinteractive() plt.ion() self.ax = ax def __iter__(self): # TODO: somehow allow the following: # if not self.gui: # return super(tqdm_gui, self).__iter__() iterable = self.iterable if self.disable: for obj in iterable: yield obj return # ncols = self.ncols mininterval = self.mininterval maxinterval = self.maxinterval miniters = self.miniters dynamic_miniters = self.dynamic_miniters last_print_t = self.last_print_t last_print_n = self.last_print_n n = self.n # dynamic_ncols = self.dynamic_ncols smoothing = self.smoothing avg_time = self.avg_time time = self._time for obj in iterable: yield obj # Update and possibly print the progressbar. # Note: does not call self.update(1) for speed optimisation. n += 1 # check counter first to avoid calls to time() if n - last_print_n >= self.miniters: miniters = self.miniters # watch monitoring thread changes delta_t = time() - last_print_t if delta_t >= mininterval: cur_t = time() delta_it = n - last_print_n # EMA (not just overall average) if smoothing and delta_t and delta_it: rate = delta_t / delta_it avg_time = self.ema(rate, avg_time, smoothing) self.avg_time = avg_time self.n = n self.display() # If no `miniters` was specified, adjust automatically # to the max iteration rate seen so far between 2 prints if dynamic_miniters: if maxinterval and delta_t >= maxinterval: # Adjust miniters to time interval by rule of 3 if mininterval: # Set miniters to correspond to mininterval miniters = delta_it * mininterval / delta_t else: # Set miniters to correspond to maxinterval miniters = delta_it * maxinterval / delta_t elif smoothing: # EMA-weight miniters to converge # towards the timeframe of mininterval rate = delta_it if mininterval and delta_t: rate *= mininterval / delta_t miniters = self.ema(rate, miniters, smoothing) else: # Maximum nb of iterations between 2 prints miniters = max(miniters, delta_it) # Store old values for next call self.n = self.last_print_n = last_print_n = n self.last_print_t = last_print_t = cur_t self.miniters = miniters # Closing the progress bar. # Update some internal variables for close(). self.last_print_n = last_print_n self.n = n self.miniters = miniters self.close() def update(self, n=1): # if not self.gui: # return super(tqdm_gui, self).close() if self.disable: return if n < 0: self.last_print_n += n # for auto-refresh logic to work self.n += n # check counter first to reduce calls to time() if self.n - self.last_print_n >= self.miniters: delta_t = self._time() - self.last_print_t if delta_t >= self.mininterval: cur_t = self._time() delta_it = self.n - self.last_print_n # >= n # elapsed = cur_t - self.start_t # EMA (not just overall average) if self.smoothing and delta_t and delta_it: rate = delta_t / delta_it self.avg_time = self.ema( rate, self.avg_time, self.smoothing) self.display() # If no `miniters` was specified, adjust automatically to the # maximum iteration rate seen so far between two prints. # e.g.: After running `tqdm.update(5)`, subsequent # calls to `tqdm.update()` will only cause an update after # at least 5 more iterations. if self.dynamic_miniters: if self.maxinterval and delta_t >= self.maxinterval: if self.mininterval: self.miniters = delta_it * self.mininterval \ / delta_t else: self.miniters = delta_it * self.maxinterval \ / delta_t elif self.smoothing: self.miniters = self.smoothing * delta_it * \ (self.mininterval / delta_t if self.mininterval and delta_t else 1) + \ (1 - self.smoothing) * self.miniters else: self.miniters = max(self.miniters, delta_it) # Store old values for next call self.last_print_n = self.n self.last_print_t = cur_t def close(self): # if not self.gui: # return super(tqdm_gui, self).close() if self.disable: return self.disable = True with self.get_lock(): self._instances.remove(self) # Restore toolbars self.mpl.rcParams['toolbar'] = self.toolbar # Return to non-interactive mode if not self.wasion: self.plt.ioff() if not self.leave: self.plt.close(self.fig) def display(self): n = self.n cur_t = self._time() elapsed = cur_t - self.start_t delta_it = n - self.last_print_n delta_t = cur_t - self.last_print_t # Inline due to multiple calls total = self.total xdata = self.xdata ydata = self.ydata zdata = self.zdata ax = self.ax line1 = self.line1 line2 = self.line2 # instantaneous rate y = delta_it / delta_t # overall rate z = n / elapsed # update line data xdata.append(n * 100.0 / total if total else cur_t) ydata.append(y) zdata.append(z) # Discard old values # xmin, xmax = ax.get_xlim() # if (not total) and elapsed > xmin * 1.1: if (not total) and elapsed > 66: xdata.popleft() ydata.popleft() zdata.popleft() ymin, ymax = ax.get_ylim() if y > ymax or z > ymax: ymax = 1.1 * y ax.set_ylim(ymin, ymax) ax.figure.canvas.draw() if total: line1.set_data(xdata, ydata) line2.set_data(xdata, zdata) try: poly_lims = self.hspan.get_xy() except AttributeError: self.hspan = self.plt.axhspan( 0, 0.001, xmin=0, xmax=0, color='g') poly_lims = self.hspan.get_xy() poly_lims[0, 1] = ymin poly_lims[1, 1] = ymax poly_lims[2] = [n / total, ymax] poly_lims[3] = [poly_lims[2, 0], ymin] if len(poly_lims) > 4: poly_lims[4, 1] = ymin self.hspan.set_xy(poly_lims) else: t_ago = [cur_t - i for i in xdata] line1.set_data(t_ago, ydata) line2.set_data(t_ago, zdata) ax.set_title(self.format_meter( n, total, elapsed, 0, self.desc, self.ascii, self.unit, self.unit_scale, 1 / self.avg_time if self.avg_time else None, self.bar_format, self.postfix, self.unit_divisor), fontname="DejaVu Sans Mono", fontsize=11) self.plt.pause(1e-9) def tgrange(*args, **kwargs): """ A shortcut for `tqdm.gui.tqdm(xrange(*args), **kwargs)`. On Python3+, `range` is used instead of `xrange`. """ return tqdm_gui(_range(*args), **kwargs) # Aliases tqdm = tqdm_gui trange = tgrange
bsd-3-clause
microsoft/EconML
econml/grf/_base_grf.py
1
49979
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # # This code contains snippets of code from # https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/ensemble/_forest.py # published under the following license and copyright: # BSD 3-Clause License # # Copyright (c) 2007-2020 The scikit-learn developers. # All rights reserved. import numbers from warnings import catch_warnings, simplefilter, warn from abc import ABCMeta, abstractmethod import numpy as np import threading from .._ensemble import (BaseEnsemble, _partition_estimators, _get_n_samples_subsample, _accumulate_prediction, _accumulate_prediction_var, _accumulate_prediction_and_var, _accumulate_oob_preds) from ..utilities import check_inputs, cross_product from ..tree._tree import DTYPE, DOUBLE from ._base_grftree import GRFTree from joblib import Parallel, delayed from scipy.sparse import hstack as sparse_hstack from sklearn.utils import check_random_state, compute_sample_weight from sklearn.utils.validation import _check_sample_weight, check_is_fitted from sklearn.utils import check_X_y import scipy.stats from scipy.special import erfc __all__ = ["BaseGRF"] MAX_INT = np.iinfo(np.int32).max # ============================================================================= # Base Generalized Random Forest # ============================================================================= class BaseGRF(BaseEnsemble, metaclass=ABCMeta): """ Base class for Genearlized Random Forests for solving linear moment equations of the form:: E[J * theta(x) - A | X = x] = 0 where J is an (d, d) random matrix, A is an (d, 1) random vector and theta(x) is a local parameter to be estimated, which might contain both relevant and nuisance parameters. Warning: This class should not be used directly. Use derived classes instead. """ def __init__(self, n_estimators=100, *, criterion="mse", max_depth=None, min_samples_split=10, min_samples_leaf=5, min_weight_fraction_leaf=0., min_var_fraction_leaf=None, min_var_leaf_on_val=False, max_features="auto", min_impurity_decrease=0., max_samples=.45, min_balancedness_tol=.45, honest=True, inference=True, fit_intercept=True, subforest_size=4, n_jobs=-1, random_state=None, verbose=0, warm_start=False): super().__init__( base_estimator=GRFTree(), n_estimators=n_estimators, estimator_params=("criterion", "max_depth", "min_samples_split", "min_samples_leaf", "min_weight_fraction_leaf", "min_var_leaf", "min_var_leaf_on_val", "max_features", "min_impurity_decrease", "honest", "min_balancedness_tol", "random_state")) self.criterion = criterion self.max_depth = max_depth self.min_samples_split = min_samples_split self.min_samples_leaf = min_samples_leaf self.min_weight_fraction_leaf = min_weight_fraction_leaf self.min_var_fraction_leaf = min_var_fraction_leaf self.min_var_leaf_on_val = min_var_leaf_on_val self.max_features = max_features self.min_impurity_decrease = min_impurity_decrease self.min_balancedness_tol = min_balancedness_tol self.honest = honest self.inference = inference self.fit_intercept = fit_intercept self.subforest_size = subforest_size self.n_jobs = n_jobs self.random_state = random_state self.verbose = verbose self.warm_start = warm_start self.max_samples = max_samples def _get_alpha_and_pointJ(self, X, T, y, **kwargs): """ This function must be implemented by child class and given input variables X, T, y and any auxiliary variables passed as keyword only, should be calculating the point-wise random vector A and the point-wise jacobian random variable J of the linear moment equation for every sample in the input samples. Returns ------- A : array of shape (n_samples, n_outputs) The A part of the moment equation for each sample J : array of shape (n_samples, n_outputs * n_outputs) The J matrix part of the moment equation, flattened in Fortran-contiguous format. """ pass def _get_n_outputs_decomposition(self, X, T, y, **kwargs): """ This function must be implemented by child class and given input variables X, T, y and any auxiliary variables passed as keyword only, should return a tuple (n_outputs, n_relevant_outputs), which determines how many parameters is the moment estimating and what prefix of these parameters are the relevant ones that we care about. Returns ------- n_outputs : int The number of parameters we are estimating n_relevant_outputs : int The length of the prefix of parameters that we care about (remainder are nuisance) """ pass def apply(self, X): """ Apply trees in the forest to X, return leaf indices. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. Returns ------- X_leaves : ndarray of shape (n_samples, n_estimators) For each datapoint x in X and for each tree in the forest, return the index of the leaf x ends up in. """ X = self._validate_X_predict(X) results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")( delayed(tree.apply)(X, check_input=False) for tree in self.estimators_) return np.array(results).T def decision_path(self, X): """ Return the decision path in the forest. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. Returns ------- indicator : sparse matrix of shape (n_samples, n_nodes) Return a node indicator matrix where non zero elements indicates that the samples goes through the nodes. The matrix is of CSR format. n_nodes_ptr : ndarray of shape (n_estimators + 1,) The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]] gives the indicator value for the i-th estimator. """ X = self._validate_X_predict(X) indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend='threading')( delayed(tree.decision_path)(X, check_input=False) for tree in self.estimators_) n_nodes = [0] n_nodes.extend([i.shape[1] for i in indicators]) n_nodes_ptr = np.array(n_nodes).cumsum() return sparse_hstack(indicators).tocsr(), n_nodes_ptr def fit(self, X, T, y, *, sample_weight=None, **kwargs): """ Build a forest of trees from the training set (X, T, y) and any other auxiliary variables. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. Internally, its dtype will be converted to ``dtype=np.float64``. T : array-like of shape (n_samples, n_treatments) The treatment vector for each sample y : array-like of shape (n_samples,) or (n_samples, n_outcomes) The outcome values for each sample. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. **kwargs : dictionary of array-like items of shape (n_samples, d_var) Auxiliary random variables that go into the moment function (e.g. instrument, censoring etc) Any of these variables will be passed on as is to the `get_pointJ` and `get_alpha` method of the children classes. Returns ------- self : object """ # TODO: support freq_weight and sample_var y, T, X, _ = check_inputs(y, T, X, W=None, multi_output_T=True, multi_output_Y=True) if sample_weight is not None: sample_weight = _check_sample_weight(sample_weight, X, DOUBLE) # Remap output n_samples, self.n_features_ = X.shape y = np.atleast_1d(y) if y.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. y = np.reshape(y, (-1, 1)) self.n_y_ = y.shape[1] T = np.atleast_1d(T) if T.ndim == 1: # reshape is necessary to preserve the data contiguity against vs # [:, np.newaxis] that does not. T = np.reshape(T, (-1, 1)) alpha, pointJ = self._get_alpha_and_pointJ(X, T, y, **kwargs) self.n_outputs_, self.n_relevant_outputs_ = self._get_n_outputs_decomposition(X, T, y, **kwargs) yaug = np.hstack([y, alpha, pointJ]) if getattr(yaug, "dtype", None) != DOUBLE or not yaug.flags.contiguous: yaug = np.ascontiguousarray(yaug, dtype=DOUBLE) if getattr(X, "dtype", None) != DTYPE: X = X.astype(DTYPE) # Get subsample sample size n_samples_subsample = _get_n_samples_subsample( n_samples=n_samples, max_samples=self.max_samples ) # Converting `min_var_fraction_leaf` to an absolute `min_var_leaf` that the GRFTree can handle if self.min_var_fraction_leaf is None: self.min_var_leaf = None elif (not isinstance(self.min_var_fraction_leaf, numbers.Real)) or (not (0 < self.min_var_fraction_leaf <= 1)): msg = "`min_var_fraction_leaf` must be in range (0, 1) but got value {}" raise ValueError(msg.format(self.min_var_fraction_leaf)) else: # We calculate the min eigenvalue proxy that each criterion is considering # on the overall mean jacobian, to determine the absolute level of `min_var_leaf` jac = np.mean(pointJ, axis=0).reshape((self.n_outputs_, self.n_outputs_)) min_var = np.min(np.abs(np.diag(jac))) if self.criterion == 'mse': for i in range(self.n_outputs_): for j in range(self.n_outputs_): if j != i: det = np.sqrt(np.abs(jac[i, i] * jac[j, j] - jac[i, j] * jac[j, i])) if det < min_var: min_var = det self.min_var_leaf = min_var * self.min_var_fraction_leaf # Check parameters self._validate_estimator() random_state = check_random_state(self.random_state) # We re-initialize the subsample_random_seed_ only if we are not in warm_start mode or # if this is the first `fit` call of the warm start mode. if (not self.warm_start) or (not hasattr(self, 'subsample_random_seed_')): self.subsample_random_seed_ = random_state.randint(MAX_INT) else: random_state.randint(MAX_INT) # just advance random_state subsample_random_state = check_random_state(self.subsample_random_seed_) if (self.warm_start and hasattr(self, 'inference_') and (self.inference != self.inference_)): raise ValueError("Parameter inference cannot be altered in between `fit` " "calls when `warm_start=True`.") self.inference_ = self.inference self.warm_start_ = self.warm_start if not self.warm_start or not hasattr(self, "estimators_"): # Free allocated memory, if any self.estimators_ = [] self.slices_ = [] # the below are needed to replicate randomness of subsampling when warm_start=True self.slices_n_samples_ = [] self.slices_n_samples_subsample_ = [] self.n_samples_ = [] self.n_samples_subsample_ = [] n_more_estimators = self.n_estimators - len(self.estimators_) if n_more_estimators < 0: raise ValueError('n_estimators=%d must be larger or equal to ' 'len(estimators_)=%d when warm_start==True' % (self.n_estimators, len(self.estimators_))) elif n_more_estimators == 0: warn("Warm-start fitting without increasing n_estimators does not " "fit new trees.") else: if self.inference: if not isinstance(self.subforest_size, numbers.Integral): raise ValueError("Parameter `subforest_size` must be " "an integer but got value {}.".format(self.subforest_size)) if self.subforest_size < 2: raise ValueError("Parameter `subforest_size` must be at least 2 if `inference=True`, " "but got value {}".format(self.subforest_size)) if not (n_more_estimators % self.subforest_size == 0): raise ValueError("The number of estimators to be constructed must be divisible " "the `subforest_size` parameter. Asked to build `n_estimators={}` " "with `subforest_size={}`.".format(n_more_estimators, self.subforest_size)) if n_samples_subsample > n_samples // 2: if isinstance(self.max_samples, numbers.Integral): raise ValueError("Parameter `max_samples` must be in [1, n_samples // 2], " "if `inference=True`. " "Got values n_samples={}, max_samples={}".format(n_samples, self.max_samples)) else: raise ValueError("Parameter `max_samples` must be in (0, .5], if `inference=True`. " "Got value {}".format(self.max_samples)) if self.warm_start and len(self.estimators_) > 0: # We draw from the random state to get the random state we # would have got if we hadn't used a warm_start. random_state.randint(MAX_INT, size=len(self.estimators_)) trees = [self._make_estimator(append=False, random_state=random_state).init() for i in range(n_more_estimators)] if self.inference: if self.warm_start: # Advancing subsample_random_state. Assumes each prior fit call has the same number of # samples at fit time. If not then this would not exactly replicate a single batch execution, # but would still advance randomness enough so that tree subsamples will be different. for sl, n_, ns_ in zip(self.slices_, self.slices_n_samples_, self.slices_n_samples_subsample_): subsample_random_state.choice(n_, n_ // 2, replace=False) for _ in range(len(sl)): subsample_random_state.choice(n_ // 2, ns_, replace=False) # Generating indices a priori before parallelism ended up being orders of magnitude # faster than how sklearn does it. The reason is that random samplers do not release the # gil it seems. n_groups = n_more_estimators // self.subforest_size new_slices = np.array_split(np.arange(len(self.estimators_), len(self.estimators_) + n_more_estimators), n_groups) s_inds = [] for sl in new_slices: half_sample_inds = subsample_random_state.choice(n_samples, n_samples // 2, replace=False) s_inds.extend([half_sample_inds[subsample_random_state.choice(n_samples // 2, n_samples_subsample, replace=False)] for _ in range(len(sl))]) else: if self.warm_start: # Advancing subsample_random_state. Assumes each prior fit call has the same number of # samples at fit time. If not then this would not exactly replicate a single batch execution, # but would still advance randomness enough so that tree subsamples will be different. for _, n_, ns_ in zip(range(len(self.estimators_)), self.n_samples_, self.n_samples_subsample_): subsample_random_state.choice(n_, ns_, replace=False) new_slices = [] s_inds = [subsample_random_state.choice(n_samples, n_samples_subsample, replace=False) for _ in range(n_more_estimators)] # Parallel loop: we prefer the threading backend as the Cython code # for fitting the trees is internally releasing the Python GIL # making threading more efficient than multiprocessing in # that case. However, for joblib 0.12+ we respect any # parallel_backend contexts set at a higher level, # since correctness does not rely on using threads. trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend='threading')( delayed(t.fit)(X[s], yaug[s], self.n_y_, self.n_outputs_, self.n_relevant_outputs_, sample_weight=sample_weight[s] if sample_weight is not None else None, check_input=False) for t, s in zip(trees, s_inds)) # Collect newly grown trees self.estimators_.extend(trees) self.n_samples_.extend([n_samples] * len(trees)) self.n_samples_subsample_.extend([n_samples_subsample] * len(trees)) self.slices_.extend(list(new_slices)) self.slices_n_samples_.extend([n_samples] * len(new_slices)) self.slices_n_samples_subsample_.extend([n_samples_subsample] * len(new_slices)) return self def get_subsample_inds(self,): """ Re-generate the example same sample indices as those at fit time using same pseudo-randomness. """ check_is_fitted(self) subsample_random_state = check_random_state(self.subsample_random_seed_) if self.inference_: s_inds = [] for sl, n_, ns_ in zip(self.slices_, self.slices_n_samples_, self.slices_n_samples_subsample_): half_sample_inds = subsample_random_state.choice(n_, n_ // 2, replace=False) s_inds.extend([half_sample_inds[subsample_random_state.choice(n_ // 2, ns_, replace=False)] for _ in range(len(sl))]) return s_inds else: return [subsample_random_state.choice(n_, ns_, replace=False) for n_, ns_ in zip(self.n_samples_, self.n_samples_subsample_)] def feature_importances(self, max_depth=4, depth_decay_exponent=2.0): """ The feature importances based on the amount of parameter heterogeneity they create. The higher, the more important the feature. The importance of a feature is computed as the (normalized) total heterogeneity that the feature creates. For each tree and for each split that the feature was chosen adds:: parent_weight * (left_weight * right_weight) * mean((value_left[k] - value_right[k])**2) / parent_weight**2 to the importance of the feature. Each such quantity is also weighted by the depth of the split. These importances are normalized at the tree level and then averaged across trees. Parameters ---------- max_depth : int, default=4 Splits of depth larger than `max_depth` are not used in this calculation depth_decay_exponent: double, default=2.0 The contribution of each split to the total score is re-weighted by 1 / (1 + `depth`)**2.0. Returns ------- feature_importances_ : ndarray of shape (n_features,) Normalized total parameter heterogeneity inducing importance of each feature """ check_is_fitted(self) all_importances = Parallel(n_jobs=self.n_jobs, backend='threading')( delayed(tree.feature_importances)( max_depth=max_depth, depth_decay_exponent=depth_decay_exponent) for tree in self.estimators_ if tree.tree_.node_count > 1) if not all_importances: return np.zeros(self.n_features_, dtype=np.float64) all_importances = np.mean(all_importances, axis=0, dtype=np.float64) return all_importances / np.sum(all_importances) @property def feature_importances_(self): return self.feature_importances() def _validate_X_predict(self, X): """ Validate X whenever one tries to predict, apply, and other predict methods.""" check_is_fitted(self) return self.estimators_[0]._validate_X_predict(X, check_input=True) def predict_tree_average_full(self, X): """ Return the fitted local parameters for each X, i.e. theta(X). This method simply returns the average of the parameters estimated by each tree. `predict_full` should be preferred over `pred_tree_average_full`, as it performs a more stable averaging across trees. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. Returns ------- theta(X) : array-like of shape (n_samples, n_outputs) The estimated relevant parameters for each row of X """ check_is_fitted(self) # Check data X = self._validate_X_predict(X) # Assign chunk of trees to jobs n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs) # avoid storing the output of every estimator by summing them here y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64) # Parallel loop lock = threading.Lock() Parallel(n_jobs=n_jobs, verbose=self.verbose, backend='threading', require="sharedmem")( delayed(_accumulate_prediction)(e.predict_full, X, [y_hat], lock) for e in self.estimators_) y_hat /= len(self.estimators_) return y_hat def predict_tree_average(self, X): """ Return the prefix of relevant fitted local parameters for each X, i.e. theta(X)[1..n_relevant_outputs]. This method simply returns the average of the parameters estimated by each tree. `predict` should be preferred over `pred_tree_average`, as it performs a more stable averaging across trees. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. Returns ------- theta(X)[1, .., n_relevant_outputs] : array-like of shape (n_samples, n_relevant_outputs) The estimated relevant parameters for each row of X """ y_hat = self.predict_tree_average_full(X) if self.n_relevant_outputs_ == self.n_outputs_: return y_hat return y_hat[:, :self.n_relevant_outputs_] def predict_moment_and_var(self, X, parameter, slice=None, parallel=True): """ Return the value of the conditional expected moment vector at each sample and for the given parameter estimate for each sample:: M(x; theta(x)) := E[J | X=x] theta(x) - E[A | X=x] where conditional expectations are estimated based on the forest weights, i.e.:: M_tree(x; theta(x)) := (1/ |leaf(x)|) sum_{val sample i in leaf(x)} w[i] (J[i] theta(x) - A[i]) M(x; theta(x) = (1/n_trees) sum_{trees} M_tree(x; theta(x)) where w[i] is the sample weight (1.0 if sample_weight is None), as well as the variance of the local moment vector across trees:: Var(M_tree(x; theta(x))) = (1/n_trees) sum_{trees} M_tree(x; theta(x)) @ M_tree(x; theta(x)).T Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. parameter : array-like of shape (n_samples, n_outputs) An estimate of the parameter theta(x) for each sample x in X slice : list of int or None, default=None If not None, then only the trees with index in slice, will be used to calculate the mean and the variance. parallel : bool , default=True Whether the averaging should happen using parallelism or not. Parallelism adds some overhead but makes it faster with many trees. Returns ------- moment : array-like of shape (n_samples, n_outputs) The estimated conditional moment M(x; theta(x)) for each sample x in X moment_var : array-like of shape (n_samples, n_outputs) The variance of the conditional moment Var(M_tree(x; theta(x))) across trees for each sample x """ check_is_fitted(self) # Check data X = self._validate_X_predict(X) # Assign chunk of trees to jobs if slice is None: slice = np.arange(len(self.estimators_)) moment_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64) moment_var_hat = np.zeros((X.shape[0], self.n_outputs_, self.n_outputs_), dtype=np.float64) lock = threading.Lock() if parallel: n_jobs, _, _ = _partition_estimators(len(slice), self.n_jobs) verbose = self.verbose # Parallel loop Parallel(n_jobs=n_jobs, verbose=verbose, backend='threading', require="sharedmem")( delayed(_accumulate_prediction_and_var)(self.estimators_[t].predict_moment, X, [moment_hat], [moment_var_hat], lock, parameter) for t in slice) else: [_accumulate_prediction_and_var(self.estimators_[t].predict_moment, X, [moment_hat], [moment_var_hat], lock, parameter) for t in slice] moment_hat /= len(slice) moment_var_hat /= len(slice) return moment_hat, moment_var_hat def predict_alpha_and_jac(self, X, slice=None, parallel=True): """ Return the value of the conditional jacobian E[J | X=x] and the conditional alpha E[A | X=x] using the forest as kernel weights, i.e.:: alpha(x) = (1/n_trees) sum_{trees} (1/ |leaf(x)|) sum_{val sample i in leaf(x)} w[i] A[i] jac(x) = (1/n_trees) sum_{trees} (1/ |leaf(x)|) sum_{val sample i in leaf(x)} w[i] J[i] where w[i] is the sample weight (1.0 if sample_weight is None). Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. slice : list of int or None, default=None If not None, then only the trees with index in slice, will be used to calculate the mean and the variance. parallel : bool , default=True Whether the averaging should happen using parallelism or not. Parallelism adds some overhead but makes it faster with many trees. Returns ------- alpha : array-like of shape (n_samples, n_outputs) The estimated conditional A, alpha(x) for each sample x in X jac : array-like of shape (n_samples, n_outputs, n_outputs) The estimated conditional J, jac(x) for each sample x in X """ check_is_fitted(self) # Check data X = self._validate_X_predict(X) # Assign chunk of trees to jobs if slice is None: slice = np.arange(len(self.estimators_)) n_jobs = 1 verbose = 0 if parallel: n_jobs, _, _ = _partition_estimators(len(slice), self.n_jobs) verbose = self.verbose alpha_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64) jac_hat = np.zeros((X.shape[0], self.n_outputs_**2), dtype=np.float64) # Parallel loop lock = threading.Lock() Parallel(n_jobs=n_jobs, verbose=verbose, backend='threading', require="sharedmem")( delayed(_accumulate_prediction)(self.estimators_[t].predict_alpha_and_jac, X, [alpha_hat, jac_hat], lock) for t in slice) alpha_hat /= len(slice) jac_hat /= len(slice) return alpha_hat, jac_hat.reshape((-1, self.n_outputs_, self.n_outputs_)) def _predict_point_and_var(self, X, full=False, point=True, var=False, project=False, projector=None): """ An internal private method that coordinates all prediction functionality and tries to share as much computation between different predict methods to avoid re-computation and re-spawining of parallel executions. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. full : bool, default=False Whether to return the full estimated parameter or only the relevant part point : bool, default=True Whether to return the point estimate theta(x) var : bool, default=False Whether to return the co-variance of the point estimate V(theta(x)) project : bool, default=False Whether to project the point estimate using an inner product with a projector, and also return the variance of the projection projector : array-like of shape (n_samples, n_outputs) The projection vector for each sample. The point estimate theta(x) for each sample will be projected and return the inner produce <theta(x), projector(x)> for each sample x. Also the variance information will be about the inner product as opposed to the parameter theta(x). Returns ------- point : array-like of shape (n_samples, x) The point estimate of the parameter theta(x) or its inner product with projector(x) for each sample x in X. If `point=False`, this return value is omitted. If `project=True`, then `x=1`. If `project=False` and `full=True`, then `x=n_outputs`. If `project=False` and `full=False`, then `x=n_relevant_outputs`. var : array-like of shape (n_samples, x, x) or (n_samples, 1) The covariance of the parameter theta(x) or its inner product with projector(x) for each sample x in X. If `var=False`, this return value is omitted. If `project=True`, then return is of shape (n_samples, 1). If `project=False` and `full=True`, then `x=n_outputs`. If `project=False` and `full=False`, then `x=n_relevant_outputs`. """ alpha, jac = self.predict_alpha_and_jac(X) invjac = np.linalg.pinv(jac) parameter = np.einsum('ijk,ik->ij', invjac, alpha) if var: if not self.inference: raise AttributeError("Inference not available. Forest was initiated with `inference=False`.") slices = self.slices_ n_jobs, _, _ = _partition_estimators(len(slices), self.n_jobs) moment_bags, moment_var_bags = zip(*Parallel(n_jobs=n_jobs, verbose=self.verbose, backend='threading')( delayed(self.predict_moment_and_var)(X, parameter, slice=sl, parallel=False) for sl in slices)) moment = np.mean(moment_bags, axis=0) trans_moment_bags = np.moveaxis(moment_bags, 0, -1) sq_between = np.einsum('tij,tjk->tik', trans_moment_bags, np.transpose(trans_moment_bags, (0, 2, 1))) / len(slices) moment_sq = np.einsum('tij,tjk->tik', moment.reshape(moment.shape + (1,)), moment.reshape(moment.shape[:-1] + (1, moment.shape[-1]))) var_between = sq_between - moment_sq pred_cov = np.einsum('ijk,ikm->ijm', invjac, np.einsum('ijk,ikm->ijm', var_between, np.transpose(invjac, (0, 2, 1)))) if project: pred_var = np.einsum('ijk,ikm->ijm', projector.reshape((-1, 1, projector.shape[1])), np.einsum('ijk,ikm->ijm', pred_cov, projector.reshape((-1, projector.shape[1], 1))))[:, 0, 0] else: pred_var = np.diagonal(pred_cov, axis1=1, axis2=2) ##################### # Variance correction ##################### # Subtract the average within bag variance. This ends up being equal to the # overall (E_{all trees}[moment^2] - E_bags[ E[mean_bag_moment]^2 ]) / sizeof(bag). # The negative part is just sq_between. var_total = np.mean(moment_var_bags, axis=0) correction = (var_total - sq_between) / (len(slices[0]) - 1) pred_cov_correction = np.einsum('ijk,ikm->ijm', invjac, np.einsum('ijk,ikm->ijm', correction, np.transpose(invjac, (0, 2, 1)))) if project: pred_var_correction = np.einsum('ijk,ikm->ijm', projector.reshape((-1, 1, projector.shape[1])), np.einsum('ijk,ikm->ijm', pred_cov_correction, projector.reshape((-1, projector.shape[1], 1))))[:, 0, 0] else: pred_var_correction = np.diagonal(pred_cov_correction, axis1=1, axis2=2) # Objective bayes debiasing for the diagonals where we know a-prior they are positive # The off diagonals we have no objective prior, so no correction is applied. naive_estimate = pred_var - pred_var_correction se = np.maximum(pred_var, pred_var_correction) * np.sqrt(2.0 / len(slices)) zstat = naive_estimate / np.clip(se, 1e-10, np.inf) numerator = np.exp(- (zstat**2) / 2) / np.sqrt(2.0 * np.pi) denominator = 0.5 * erfc(-zstat / np.sqrt(2.0)) pred_var_corrected = naive_estimate + se * numerator / denominator # Finally correcting the pred_cov or pred_var if project: pred_var = pred_var_corrected else: pred_cov = pred_cov - pred_cov_correction for t in range(self.n_outputs_): pred_cov[:, t, t] = pred_var_corrected[:, t] if project: if point: pred = np.sum(parameter * projector, axis=1) if var: return pred, pred_var else: return pred else: return pred_var else: n_outputs = self.n_outputs_ if full else self.n_relevant_outputs_ if point and var: return (parameter[:, :n_outputs], pred_cov[:, :n_outputs, :n_outputs],) elif point: return parameter[:, :n_outputs] else: return pred_cov[:, :n_outputs, :n_outputs] def predict_full(self, X, interval=False, alpha=0.05): """ Return the fitted local parameters for each x in X, i.e. theta(x). Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. interval : bool, default=False Whether to return a confidence interval too alpha : float in (0, 1), default=0.05 The confidence level of the confidence interval. Returns a symmetric (alpha/2, 1-alpha/2) confidence interval. Returns ------- theta(x) : array-like of shape (n_samples, n_outputs) The estimated relevant parameters for each row x of X lb(x), ub(x) : array-like of shape (n_samples, n_outputs) The lower and upper end of the confidence interval for each parameter. Return value is omitted if `interval=False`. """ if interval: point, pred_var = self._predict_point_and_var(X, full=True, point=True, var=True) lb, ub = np.zeros(point.shape), np.zeros(point.shape) for t in range(self.n_outputs_): lb[:, t] = scipy.stats.norm.ppf(alpha / 2, loc=point[:, t], scale=np.sqrt(pred_var[:, t, t])) ub[:, t] = scipy.stats.norm.ppf(1 - alpha / 2, loc=point[:, t], scale=np.sqrt(pred_var[:, t, t])) return point, lb, ub return self._predict_point_and_var(X, full=True, point=True, var=False) def predict(self, X, interval=False, alpha=0.05): """ Return the prefix of relevant fitted local parameters for each x in X, i.e. theta(x)[1..n_relevant_outputs]. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. interval : bool, default=False Whether to return a confidence interval too alpha : float in (0, 1), default=0.05 The confidence level of the confidence interval. Returns a symmetric (alpha/2, 1-alpha/2) confidence interval. Returns ------- theta(X)[1, .., n_relevant_outputs] : array-like of shape (n_samples, n_relevant_outputs) The estimated relevant parameters for each row of X lb(x), ub(x) : array-like of shape (n_samples, n_relevant_outputs) The lower and upper end of the confidence interval for each parameter. Return value is omitted if `interval=False`. """ if interval: y_hat, lb, ub = self.predict_full(X, interval=interval, alpha=alpha) if self.n_relevant_outputs_ == self.n_outputs_: return y_hat, lb, ub return (y_hat[:, :self.n_relevant_outputs_], lb[:, :self.n_relevant_outputs_], ub[:, :self.n_relevant_outputs_]) else: y_hat = self.predict_full(X, interval=False) if self.n_relevant_outputs_ == self.n_outputs_: return y_hat return y_hat[:, :self.n_relevant_outputs_] def predict_interval(self, X, alpha=0.05): """ Return the confidence interval for the relevant fitted local parameters for each x in X, i.e. theta(x)[1..n_relevant_outputs]. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. alpha : float in (0, 1), default=0.05 The confidence level of the confidence interval. Returns a symmetric (alpha/2, 1-alpha/2) confidence interval. Returns ------- lb(x), ub(x) : array-like of shape (n_samples, n_relevant_outputs) The lower and upper end of the confidence interval for each parameter. Return value is omitted if `interval=False`. """ _, lb, ub = self.predict(X, interval=True, alpha=alpha) return lb, ub def predict_and_var(self, X): """ Return the prefix of relevant fitted local parameters for each x in X, i.e. theta(x)[1..n_relevant_outputs] and their covariance matrix. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. Returns ------- theta(x)[1, .., n_relevant_outputs] : array-like of shape (n_samples, n_relevant_outputs) The estimated relevant parameters for each row of X var(theta(x)) : array-like of shape (n_samples, n_relevant_outputs, n_relevant_outputs) The covariance of theta(x)[1, .., n_relevant_outputs] """ return self._predict_point_and_var(X, full=False, point=True, var=True) def predict_var(self, X): """ Return the covariance matrix of the prefix of relevant fitted local parameters for each x in X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. Returns ------- var(theta(x)) : array-like of shape (n_samples, n_relevant_outputs, n_relevant_outputs) The covariance of theta(x)[1, .., n_relevant_outputs] """ return self._predict_point_and_var(X, full=False, point=False, var=True) def prediction_stderr(self, X): """ Return the standard deviation of each coordinate of the prefix of relevant fitted local parameters for each x in X. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. Returns ------- std(theta(x)) : array-like of shape (n_samples, n_relevant_outputs) The standard deviation of each theta(x)[i] for i in {1, .., n_relevant_outputs} """ return np.sqrt(np.diagonal(self.predict_var(X), axis1=1, axis2=2)) def _check_projector(self, X, projector): """ validate the projector parameter """ X, projector = check_X_y(X, projector, multi_output=True, y_numeric=True) if projector.ndim == 1: projector = projector.reshape((-1, 1)) if self.n_outputs_ > self.n_relevant_outputs_: projector = np.hstack([projector, np.zeros((projector.shape[0], self.n_outputs_ - self.n_relevant_outputs_))]) return X, projector def predict_projection_and_var(self, X, projector): """ Return the inner product of the prefix of relevant fitted local parameters for each x in X, i.e. theta(x)[1..n_relevant_outputs], with a projector vector projector(x), i.e.:: mu(x) := <theta(x)[1..n_relevant_outputs], projector(x)> as well as the variance of mu(x). Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. projector : array-like of shape (n_samples, n_relevant_outputs) The projector vector for each sample x in X Returns ------- mu(x) : array-like of shape (n_samples, 1) The estimated inner product of the relevant parameters with the projector for each row x of X var(mu(x)) : array-like of shape (n_samples, 1) The variance of the estimated inner product """ X, projector = self._check_projector(X, projector) return self._predict_point_and_var(X, full=False, point=True, var=True, project=True, projector=projector) def predict_projection(self, X, projector): """ Return the inner product of the prefix of relevant fitted local parameters for each x in X, i.e. theta(x)[1..n_relevant_outputs], with a projector vector projector(x), i.e.:: mu(x) := <theta(x)[1..n_relevant_outputs], projector(x)> Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. projector : array-like of shape (n_samples, n_relevant_outputs) The projector vector for each sample x in X Returns ------- mu(x) : array-like of shape (n_samples, 1) The estimated inner product of the relevant parameters with the projector for each row x of X """ X, projector = self._check_projector(X, projector) return self._predict_point_and_var(X, full=False, point=True, var=False, project=True, projector=projector) def predict_projection_var(self, X, projector): """ Return the variance of the inner product of the prefix of relevant fitted local parameters for each x in X, i.e. theta(x)[1..n_relevant_outputs], with a projector vector projector(x), i.e.:: Var(mu(x)) for mu(x) := <theta(x)[1..n_relevant_outputs], projector(x)> Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``dtype=np.float64``. projector : array-like of shape (n_samples, n_relevant_outputs) The projector vector for each sample x in X Returns ------- var(mu(x)) : array-like of shape (n_samples, 1) The variance of the estimated inner product """ X, projector = self._check_projector(X, projector) return self._predict_point_and_var(X, full=False, point=False, var=True, project=True, projector=projector) def oob_predict(self, Xtrain): """ Returns the relevant output predictions for each of the training data points, when only trees where that data point was not used are incorporated. This method is not available is the estimator was trained with `warm_start=True`. Parameters ---------- Xtrain : (n_training_samples, n_features) matrix Must be the same exact X matrix that was passed to the forest at fit time. Returns ------- oob_preds : (n_training_samples, n_relevant_outputs) matrix The out-of-bag predictions of the relevant output parameters for each of the training points """ if self.warm_start_: raise AttributeError("`oob_predict` is not available when " "the estimator was fitted with `warm_start=True`") # avoid storing the output of every estimator by summing them here alpha_hat = np.zeros((Xtrain.shape[0], self.n_outputs_), dtype=np.float64) jac_hat = np.zeros((Xtrain.shape[0], self.n_outputs_**2), dtype=np.float64) counts = np.zeros((Xtrain.shape[0],), dtype=np.intp) subsample_inds = self.get_subsample_inds() # Parallel loop lock = threading.Lock() Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend='threading', require="sharedmem")( delayed(_accumulate_oob_preds)(tree, Xtrain, sinds, alpha_hat, jac_hat, counts, lock) for tree, sinds in zip(self.estimators_, subsample_inds)) pos_count = (counts > 0) alpha_hat[pos_count] /= counts[pos_count].reshape((-1, 1)) jac_hat[pos_count] /= counts[pos_count].reshape((-1, 1)) invjac = np.linalg.pinv(jac_hat.reshape((-1, self.n_outputs_, self.n_outputs_))) oob_preds = np.einsum('ijk,ik->ij', invjac, alpha_hat)[:, :self.n_relevant_outputs_] oob_preds[~pos_count] = np.nan return oob_preds
mit
ageron/tensorflow
tensorflow/contrib/gan/python/estimator/python/stargan_estimator_test.py
7
12092
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for TF-GAN's stargan_estimator.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import tempfile from absl.testing import parameterized import numpy as np from tensorflow.contrib import layers from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples from tensorflow.contrib.gan.python.estimator.python import stargan_estimator_impl as estimator from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import metrics as metrics_lib from tensorflow.python.ops import variable_scope from tensorflow.python.platform import test from tensorflow.python.summary.writer import writer_cache from tensorflow.python.training import learning_rate_decay from tensorflow.python.training import training from tensorflow.python.training import training_util def dummy_generator_fn(input_data, input_data_domain_label, mode): del input_data_domain_label, mode return variable_scope.get_variable('dummy_g', initializer=0.5) * input_data def dummy_discriminator_fn(input_data, num_domains, mode): del mode hidden = layers.flatten(input_data) output_src = math_ops.reduce_mean(hidden, axis=1) output_cls = layers.fully_connected( inputs=hidden, num_outputs=num_domains, scope='debug') return output_src, output_cls class StarGetGANModelTest(test.TestCase, parameterized.TestCase): """Tests that `StarGetGANModel` produces the correct model.""" @parameterized.named_parameters(('train', model_fn_lib.ModeKeys.TRAIN), ('eval', model_fn_lib.ModeKeys.EVAL), ('predict', model_fn_lib.ModeKeys.PREDICT)) def test_get_gan_model(self, mode): with ops.Graph().as_default(): input_data = array_ops.ones([6, 4, 4, 3]) input_data_domain_label = array_ops.one_hot([0] * 6, 5) gan_model = estimator._get_gan_model( mode, dummy_generator_fn, dummy_discriminator_fn, input_data, input_data_domain_label, add_summaries=False) self.assertEqual(input_data, gan_model.input_data) self.assertIsNotNone(gan_model.generated_data) self.assertIsNotNone(gan_model.generated_data_domain_target) self.assertLen(gan_model.generator_variables, 1) self.assertIsNotNone(gan_model.generator_scope) self.assertIsNotNone(gan_model.generator_fn) if mode == model_fn_lib.ModeKeys.PREDICT: self.assertIsNone(gan_model.input_data_domain_label) self.assertEqual(input_data_domain_label, gan_model.generated_data_domain_target) self.assertIsNone(gan_model.reconstructed_data) self.assertIsNone(gan_model.discriminator_input_data_source_predication) self.assertIsNone( gan_model.discriminator_generated_data_source_predication) self.assertIsNone(gan_model.discriminator_input_data_domain_predication) self.assertIsNone( gan_model.discriminator_generated_data_domain_predication) self.assertIsNone(gan_model.discriminator_variables) self.assertIsNone(gan_model.discriminator_scope) self.assertIsNone(gan_model.discriminator_fn) else: self.assertEqual(input_data_domain_label, gan_model.input_data_domain_label) self.assertIsNotNone(gan_model.reconstructed_data.shape) self.assertIsNotNone( gan_model.discriminator_input_data_source_predication) self.assertIsNotNone( gan_model.discriminator_generated_data_source_predication) self.assertIsNotNone( gan_model.discriminator_input_data_domain_predication) self.assertIsNotNone( gan_model.discriminator_generated_data_domain_predication) self.assertLen(gan_model.discriminator_variables, 2) # 1 FC layer self.assertIsNotNone(gan_model.discriminator_scope) self.assertIsNotNone(gan_model.discriminator_fn) def get_dummy_gan_model(): """Similar to get_gan_model().""" # TODO(joelshor): Find a better way of creating a variable scope. with variable_scope.variable_scope('generator') as gen_scope: gen_var = variable_scope.get_variable('dummy_var', initializer=0.0) with variable_scope.variable_scope('discriminator') as dis_scope: dis_var = variable_scope.get_variable('dummy_var', initializer=0.0) return tfgan_tuples.StarGANModel( input_data=array_ops.ones([1, 2, 2, 3]), input_data_domain_label=array_ops.ones([1, 2]), generated_data=array_ops.ones([1, 2, 2, 3]), generated_data_domain_target=array_ops.ones([1, 2]), reconstructed_data=array_ops.ones([1, 2, 2, 3]), discriminator_input_data_source_predication=array_ops.ones([1]) * dis_var, discriminator_generated_data_source_predication=array_ops.ones( [1]) * gen_var * dis_var, discriminator_input_data_domain_predication=array_ops.ones([1, 2 ]) * dis_var, discriminator_generated_data_domain_predication=array_ops.ones([1, 2]) * gen_var * dis_var, generator_variables=[gen_var], generator_scope=gen_scope, generator_fn=None, discriminator_variables=[dis_var], discriminator_scope=dis_scope, discriminator_fn=None) def dummy_loss_fn(gan_model): loss = math_ops.reduce_sum( gan_model.discriminator_input_data_domain_predication - gan_model.discriminator_generated_data_domain_predication) loss += math_ops.reduce_sum(gan_model.input_data - gan_model.generated_data) return tfgan_tuples.GANLoss(loss, loss) def get_metrics(gan_model): return { 'mse_custom_metric': metrics_lib.mean_squared_error(gan_model.input_data, gan_model.generated_data) } class GetEstimatorSpecTest(test.TestCase, parameterized.TestCase): """Tests that the EstimatorSpec is constructed appropriately.""" @classmethod def setUpClass(cls): super(GetEstimatorSpecTest, cls).setUpClass() cls._generator_optimizer = training.GradientDescentOptimizer(1.0) cls._discriminator_optimizer = training.GradientDescentOptimizer(1.0) @parameterized.named_parameters(('train', model_fn_lib.ModeKeys.TRAIN), ('eval', model_fn_lib.ModeKeys.EVAL), ('predict', model_fn_lib.ModeKeys.PREDICT)) def test_get_estimator_spec(self, mode): with ops.Graph().as_default(): self._gan_model = get_dummy_gan_model() spec = estimator._get_estimator_spec( mode, self._gan_model, loss_fn=dummy_loss_fn, get_eval_metric_ops_fn=get_metrics, generator_optimizer=self._generator_optimizer, discriminator_optimizer=self._discriminator_optimizer) self.assertEqual(mode, spec.mode) if mode == model_fn_lib.ModeKeys.PREDICT: self.assertEqual(self._gan_model.generated_data, spec.predictions) elif mode == model_fn_lib.ModeKeys.TRAIN: self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar self.assertIsNotNone(spec.train_op) self.assertIsNotNone(spec.training_hooks) elif mode == model_fn_lib.ModeKeys.EVAL: self.assertEqual(self._gan_model.generated_data, spec.predictions) self.assertShapeEqual(np.array(0), spec.loss) # must be a scalar self.assertIsNotNone(spec.eval_metric_ops) # TODO(joelshor): Add pandas test. class StarGANEstimatorIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn, prediction_size, lr_decay=False): def make_opt(): gstep = training_util.get_or_create_global_step() lr = learning_rate_decay.exponential_decay(1.0, gstep, 10, 0.9) return training.GradientDescentOptimizer(lr) gopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0) dopt = make_opt if lr_decay else training.GradientDescentOptimizer(1.0) est = estimator.StarGANEstimator( generator_fn=dummy_generator_fn, discriminator_fn=dummy_discriminator_fn, loss_fn=dummy_loss_fn, generator_optimizer=gopt, discriminator_optimizer=dopt, get_eval_metric_ops_fn=get_metrics, model_dir=self._model_dir) # TRAIN num_steps = 10 est.train(train_input_fn, steps=num_steps) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn('loss', scores) self.assertEqual(scores['discriminator_loss'] + scores['generator_loss'], scores['loss']) self.assertIn('mse_custom_metric', scores) # PREDICT predictions = np.array([x for x in est.predict(predict_input_fn)]) self.assertAllEqual(prediction_size, predictions.shape) @staticmethod def _numpy_input_fn_wrapper(numpy_input_fn, batch_size, label_size): """Wrapper to remove the dictionary in numpy_input_fn. NOTE: We create the domain_label here because the model expect a fully define batch_size from the input. Args: numpy_input_fn: input_fn created from numpy_io batch_size: (int) number of items for each batch label_size: (int) number of domains Returns: a new input_fn """ def new_input_fn(): features = numpy_input_fn() return features['x'], array_ops.one_hot([0] * batch_size, label_size) return new_input_fn def test_numpy_input_fn(self): """Tests complete flow with numpy_input_fn.""" batch_size = 5 img_size = 8 channel_size = 3 label_size = 3 image_data = np.zeros( [batch_size, img_size, img_size, channel_size], dtype=np.float32) train_input_fn = numpy_io.numpy_input_fn( x={'x': image_data}, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': image_data}, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': image_data}, shuffle=False) train_input_fn = self._numpy_input_fn_wrapper(train_input_fn, batch_size, label_size) eval_input_fn = self._numpy_input_fn_wrapper(eval_input_fn, batch_size, label_size) predict_input_fn = self._numpy_input_fn_wrapper(predict_input_fn, batch_size, label_size) predict_input_fn = estimator.stargan_prediction_input_fn_wrapper( predict_input_fn) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, prediction_size=[batch_size, img_size, img_size, channel_size]) if __name__ == '__main__': test.main()
apache-2.0
zorroblue/scikit-learn
sklearn/feature_selection/__init__.py
140
1302
""" The :mod:`sklearn.feature_selection` module implements feature selection algorithms. It currently includes univariate filter selection methods and the recursive feature elimination algorithm. """ from .univariate_selection import chi2 from .univariate_selection import f_classif from .univariate_selection import f_oneway from .univariate_selection import f_regression from .univariate_selection import SelectPercentile from .univariate_selection import SelectKBest from .univariate_selection import SelectFpr from .univariate_selection import SelectFdr from .univariate_selection import SelectFwe from .univariate_selection import GenericUnivariateSelect from .variance_threshold import VarianceThreshold from .rfe import RFE from .rfe import RFECV from .from_model import SelectFromModel from .mutual_info_ import mutual_info_regression, mutual_info_classif __all__ = ['GenericUnivariateSelect', 'RFE', 'RFECV', 'SelectFdr', 'SelectFpr', 'SelectFwe', 'SelectKBest', 'SelectFromModel', 'SelectPercentile', 'VarianceThreshold', 'chi2', 'f_classif', 'f_oneway', 'f_regression', 'mutual_info_classif', 'mutual_info_regression']
bsd-3-clause
idlead/scikit-learn
sklearn/svm/tests/test_sparse.py
22
13181
from nose.tools import assert_raises, assert_true, assert_false import numpy as np from scipy import sparse from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_equal) from sklearn import datasets, svm, linear_model, base from sklearn.datasets import make_classification, load_digits, make_blobs from sklearn.svm.tests import test_svm from sklearn.exceptions import ConvergenceWarning from sklearn.utils.extmath import safe_sparse_dot from sklearn.utils.testing import assert_warns, assert_raise_message # test sample 1 X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]) X_sp = sparse.lil_matrix(X) Y = [1, 1, 1, 2, 2, 2] T = np.array([[-1, -1], [2, 2], [3, 2]]) true_result = [1, 2, 2] # test sample 2 X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ], [0, 0, 2], [3, 3, 3]]) X2_sp = sparse.dok_matrix(X2) Y2 = [1, 2, 2, 2, 3] T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]]) true_result2 = [1, 2, 3] iris = datasets.load_iris() # permute rng = np.random.RandomState(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # sparsify iris.data = sparse.csr_matrix(iris.data) def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test): dense_svm.fit(X_train.toarray(), y_train) if sparse.isspmatrix(X_test): X_test_dense = X_test.toarray() else: X_test_dense = X_test sparse_svm.fit(X_train, y_train) assert_true(sparse.issparse(sparse_svm.support_vectors_)) assert_true(sparse.issparse(sparse_svm.dual_coef_)) assert_array_almost_equal(dense_svm.support_vectors_, sparse_svm.support_vectors_.toarray()) assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray()) if dense_svm.kernel == "linear": assert_true(sparse.issparse(sparse_svm.coef_)) assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray()) assert_array_almost_equal(dense_svm.support_, sparse_svm.support_) assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test)) assert_array_almost_equal(dense_svm.decision_function(X_test_dense), sparse_svm.decision_function(X_test)) assert_array_almost_equal(dense_svm.decision_function(X_test_dense), sparse_svm.decision_function(X_test_dense)) if isinstance(dense_svm, svm.OneClassSVM): msg = "cannot use sparse input in 'OneClassSVM' trained on dense data" else: assert_array_almost_equal(dense_svm.predict_proba(X_test_dense), sparse_svm.predict_proba(X_test), 4) msg = "cannot use sparse input in 'SVC' trained on dense data" if sparse.isspmatrix(X_test): assert_raise_message(ValueError, msg, dense_svm.predict, X_test) def test_svc(): """Check that sparse SVC gives the same result as SVC""" # many class dataset: X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0) X_blobs = sparse.csr_matrix(X_blobs) datasets = [[X_sp, Y, T], [X2_sp, Y2, T2], [X_blobs[:80], y_blobs[:80], X_blobs[80:]], [iris.data, iris.target, iris.data]] kernels = ["linear", "poly", "rbf", "sigmoid"] for dataset in datasets: for kernel in kernels: clf = svm.SVC(kernel=kernel, probability=True, random_state=0, decision_function_shape='ovo') sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0, decision_function_shape='ovo') check_svm_model_equal(clf, sp_clf, *dataset) def test_unsorted_indices(): # test that the result with sorted and unsorted indices in csr is the same # we use a subset of digits as iris, blobs or make_classification didn't # show the problem digits = load_digits() X, y = digits.data[:50], digits.target[:50] X_test = sparse.csr_matrix(digits.data[50:100]) X_sparse = sparse.csr_matrix(X) coef_dense = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X, y).coef_ sparse_svc = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X_sparse, y) coef_sorted = sparse_svc.coef_ # make sure dense and sparse SVM give the same result assert_array_almost_equal(coef_dense, coef_sorted.toarray()) X_sparse_unsorted = X_sparse[np.arange(X.shape[0])] X_test_unsorted = X_test[np.arange(X_test.shape[0])] # make sure we scramble the indices assert_false(X_sparse_unsorted.has_sorted_indices) assert_false(X_test_unsorted.has_sorted_indices) unsorted_svc = svm.SVC(kernel='linear', probability=True, random_state=0).fit(X_sparse_unsorted, y) coef_unsorted = unsorted_svc.coef_ # make sure unsorted indices give same result assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray()) assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted), sparse_svc.predict_proba(X_test)) def test_svc_with_custom_kernel(): kfunc = lambda x, y: safe_sparse_dot(x, y.T) clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y) clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y) assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp)) def test_svc_iris(): # Test the sparse SVC with the iris dataset for k in ('linear', 'poly', 'rbf'): sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target) clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target) assert_array_almost_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) assert_array_almost_equal( clf.predict(iris.data.toarray()), sp_clf.predict(iris.data)) if k == 'linear': assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray()) def test_sparse_decision_function(): #Test decision_function #Sanity check, test that decision_function implemented in python #returns the same as the one in libsvm # multi class: svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo') clf = svc.fit(iris.data, iris.target) dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_ assert_array_almost_equal(dec, clf.decision_function(iris.data)) # binary: clf.fit(X, Y) dec = np.dot(X, clf.coef_.T) + clf.intercept_ prediction = clf.predict(X) assert_array_almost_equal(dec.ravel(), clf.decision_function(X)) assert_array_almost_equal( prediction, clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()]) expected = np.array([-1., -0.66, -1., 0.66, 1., 1.]) assert_array_almost_equal(clf.decision_function(X), expected, 2) def test_error(): # Test that it gives proper exception on deficient input # impossible value of C assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y) # impossible value of nu clf = svm.NuSVC(nu=0.0) assert_raises(ValueError, clf.fit, X_sp, Y) Y2 = Y[:-1] # wrong dimensions for labels assert_raises(ValueError, clf.fit, X_sp, Y2) clf = svm.SVC() clf.fit(X_sp, Y) assert_array_equal(clf.predict(T), true_result) def test_linearsvc(): # Similar to test_SVC clf = svm.LinearSVC(random_state=0).fit(X, Y) sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y) assert_true(sp_clf.fit_intercept) assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp)) clf.fit(X2, Y2) sp_clf.fit(X2_sp, Y2) assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4) def test_linearsvc_iris(): # Test the sparse LinearSVC with the iris dataset sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target) clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target) assert_equal(clf.fit_intercept, sp_clf.fit_intercept) assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1) assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1) assert_array_almost_equal( clf.predict(iris.data.toarray()), sp_clf.predict(iris.data)) # check decision_function pred = np.argmax(sp_clf.decision_function(iris.data), 1) assert_array_almost_equal(pred, clf.predict(iris.data.toarray())) # sparsify the coefficients on both models and check that they still # produce the same results clf.sparsify() assert_array_equal(pred, clf.predict(iris.data)) sp_clf.sparsify() assert_array_equal(pred, sp_clf.predict(iris.data)) def test_weight(): # Test class weights X_, y_ = make_classification(n_samples=200, n_features=100, weights=[0.833, 0.167], random_state=0) X_ = sparse.csr_matrix(X_) for clf in (linear_model.LogisticRegression(), svm.LinearSVC(random_state=0), svm.SVC()): clf.set_params(class_weight={0: 5}) clf.fit(X_[:180], y_[:180]) y_pred = clf.predict(X_[180:]) assert_true(np.sum(y_pred == y_[180:]) >= 11) def test_sample_weights(): # Test weights on individual samples clf = svm.SVC() clf.fit(X_sp, Y) assert_array_equal(clf.predict([X[2]]), [1.]) sample_weight = [.1] * 3 + [10] * 3 clf.fit(X_sp, Y, sample_weight=sample_weight) assert_array_equal(clf.predict([X[2]]), [2.]) def test_sparse_liblinear_intercept_handling(): # Test that sparse liblinear honours intercept_scaling param test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC) def test_sparse_oneclasssvm(): """Check that sparse OneClassSVM gives the same result as dense OneClassSVM""" # many class dataset: X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0) X_blobs = sparse.csr_matrix(X_blobs) datasets = [[X_sp, None, T], [X2_sp, None, T2], [X_blobs[:80], None, X_blobs[80:]], [iris.data, None, iris.data]] kernels = ["linear", "poly", "rbf", "sigmoid"] for dataset in datasets: for kernel in kernels: clf = svm.OneClassSVM(kernel=kernel, random_state=0) sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0) check_svm_model_equal(clf, sp_clf, *dataset) def test_sparse_realdata(): # Test on a subset from the 20newsgroups dataset. # This catchs some bugs if input is not correctly converted into # sparse format or weights are not correctly initialized. data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069]) indices = np.array([6, 5, 35, 31]) indptr = np.array( [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4]) X = sparse.csr_matrix((data, indices, indptr)) y = np.array( [1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2., 0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2., 0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1., 3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2., 0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2., 3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1., 1., 3.]) clf = svm.SVC(kernel='linear').fit(X.toarray(), y) sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y) assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray()) assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray()) def test_sparse_svc_clone_with_callable_kernel(): # Test that the "dense_fit" is called even though we use sparse input # meaning that everything works fine. a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True, random_state=0) b = base.clone(a) b.fit(X_sp, Y) pred = b.predict(X_sp) b.predict_proba(X_sp) dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T), probability=True, random_state=0) pred_dense = dense_svm.fit(X, Y).predict(X) assert_array_equal(pred_dense, pred) # b.decision_function(X_sp) # XXX : should be supported def test_timeout(): sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True, random_state=0, max_iter=1) assert_warns(ConvergenceWarning, sp.fit, X_sp, Y) def test_consistent_proba(): a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_1 = a.fit(X, Y).predict_proba(X) a = svm.SVC(probability=True, max_iter=1, random_state=0) proba_2 = a.fit(X, Y).predict_proba(X) assert_array_almost_equal(proba_1, proba_2)
bsd-3-clause
verdverm/pypge
test/test_odeint.py
1
1852
#!/usr/bin/env python """ Program to plot the motion of a "springy pendulum". (kindly taken from: http://julianoliver.com/share/free-science-books/comp-phys-python.pdf [page 102-103]) We actually have FOUR parameters to track, here: L, L dot, theta, and theta dot. So instead of the usual Nx2 array, make it Nx4. Each 4-element row will be used for the state of the system at one instant, and each instant is separated by time dt. I'll use the order given above. """ import numpy as np import scipy from scipy.integrate import odeint ## Nx4 N = 1000 # number of steps to take y = np.zeros([4]) Lo = 1.0 # unstretched spring length L = 1.0 # Initial stretch of spring vo = 0.0 # initial velocity thetao = 0.3 # radians omegao = 0.0 # initial angular velocity y[0] = L # set initial state y[1] = vo y[2] = thetao y[3] = omegao time = np.linspace(0, 25, N) k = 3.5 # spring constant, in N/m m = 0.2 # mass, in kg gravity = 9.8 # g, in m/s^2 def springpendulum(y, time): """ This defines the set of differential equations we are solving. Note that there are more than just the usual two derivatives! """ g0 = y[1] g1 = (Lo+y[0])*y[3]*y[3] - k/m*y[0] + gravity*np.cos(y[2]) g2 = y[3] g3 = -(gravity*np.sin(y[2]) + 2.0*y[1]*y[3]) / (Lo + y[0]) return np.array([g0,g1,g2,g3]) # Now we do the calculations. answer = scipy.integrate.odeint(springpendulum, y, time) # Now graph the results. # rather than graph in terms of t, I'm going # to graph the track the mass takes in 2D. # This will require that I change L,theta data # to x,y data. xdata = (Lo + answer[:,0])*np.sin(answer[:,2]) ydata = -(Lo + answer[:,0])*np.cos(answer[:,2]) import os if os.getenv("TRAVIS", "false") != "true": import matplotlib.pyplot as plt plt.plot(xdata, ydata, 'r-') plt.xlabel("Horizontal position") plt.ylabel("Vertical position") # plt.show()
mit
M-R-Houghton/euroscipy_2015
bokeh/bokeh/compat/mplexporter/tools.py
75
1732
""" Tools for matplotlib plot exporting """ def ipynb_vega_init(): """Initialize the IPython notebook display elements This function borrows heavily from the excellent vincent package: http://github.com/wrobstory/vincent """ try: from IPython.core.display import display, HTML except ImportError: print('IPython Notebook could not be loaded.') require_js = ''' if (window['d3'] === undefined) {{ require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }}); require(["d3"], function(d3) {{ window.d3 = d3; {0} }}); }}; if (window['topojson'] === undefined) {{ require.config( {{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }} ); require(["topojson"], function(topojson) {{ window.topojson = topojson; }}); }}; ''' d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js" d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/" "d3.layout.cloud.js") topojson_js_url = "http://d3js.org/topojson.v1.min.js" vega_js_url = 'http://trifacta.github.com/vega/vega.js' dep_libs = '''$.getScript("%s", function() { $.getScript("%s", function() { $.getScript("%s", function() { $.getScript("%s", function() { $([IPython.events]).trigger("vega_loaded.vincent"); }) }) }) });''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url, topojson_js_url, vega_js_url) load_js = require_js.format(dep_libs) html = '<script>'+load_js+'</script>' display(HTML(html))
mit
TomAugspurger/pandas
pandas/tests/series/methods/test_cov_corr.py
1
4778
import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import Series, isna import pandas._testing as tm class TestSeriesCov: def test_cov(self, datetime_series): # full overlap tm.assert_almost_equal( datetime_series.cov(datetime_series), datetime_series.std() ** 2 ) # partial overlap tm.assert_almost_equal( datetime_series[:15].cov(datetime_series[5:]), datetime_series[5:15].std() ** 2, ) # No overlap assert np.isnan(datetime_series[::2].cov(datetime_series[1::2])) # all NA cp = datetime_series[:10].copy() cp[:] = np.nan assert isna(cp.cov(cp)) # min_periods assert isna(datetime_series[:15].cov(datetime_series[5:], min_periods=12)) ts1 = datetime_series[:15].reindex(datetime_series.index) ts2 = datetime_series[5:].reindex(datetime_series.index) assert isna(ts1.cov(ts2, min_periods=12)) class TestSeriesCorr: @td.skip_if_no_scipy def test_corr(self, datetime_series): import scipy.stats as stats # full overlap tm.assert_almost_equal(datetime_series.corr(datetime_series), 1) # partial overlap tm.assert_almost_equal(datetime_series[:15].corr(datetime_series[5:]), 1) assert isna(datetime_series[:15].corr(datetime_series[5:], min_periods=12)) ts1 = datetime_series[:15].reindex(datetime_series.index) ts2 = datetime_series[5:].reindex(datetime_series.index) assert isna(ts1.corr(ts2, min_periods=12)) # No overlap assert np.isnan(datetime_series[::2].corr(datetime_series[1::2])) # all NA cp = datetime_series[:10].copy() cp[:] = np.nan assert isna(cp.corr(cp)) A = tm.makeTimeSeries() B = tm.makeTimeSeries() result = A.corr(B) expected, _ = stats.pearsonr(A, B) tm.assert_almost_equal(result, expected) @td.skip_if_no_scipy def test_corr_rank(self): import scipy.stats as stats # kendall and spearman A = tm.makeTimeSeries() B = tm.makeTimeSeries() A[-5:] = A[:5] result = A.corr(B, method="kendall") expected = stats.kendalltau(A, B)[0] tm.assert_almost_equal(result, expected) result = A.corr(B, method="spearman") expected = stats.spearmanr(A, B)[0] tm.assert_almost_equal(result, expected) # results from R A = Series( [ -0.89926396, 0.94209606, -1.03289164, -0.95445587, 0.76910310, -0.06430576, -2.09704447, 0.40660407, -0.89926396, 0.94209606, ] ) B = Series( [ -1.01270225, -0.62210117, -1.56895827, 0.59592943, -0.01680292, 1.17258718, -1.06009347, -0.10222060, -0.89076239, 0.89372375, ] ) kexp = 0.4319297 sexp = 0.5853767 tm.assert_almost_equal(A.corr(B, method="kendall"), kexp) tm.assert_almost_equal(A.corr(B, method="spearman"), sexp) def test_corr_invalid_method(self): # GH PR #22298 s1 = pd.Series(np.random.randn(10)) s2 = pd.Series(np.random.randn(10)) msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, " with pytest.raises(ValueError, match=msg): s1.corr(s2, method="____") def test_corr_callable_method(self, datetime_series): # simple correlation example # returns 1 if exact equality, 0 otherwise my_corr = lambda a, b: 1.0 if (a == b).all() else 0.0 # simple example s1 = Series([1, 2, 3, 4, 5]) s2 = Series([5, 4, 3, 2, 1]) expected = 0 tm.assert_almost_equal(s1.corr(s2, method=my_corr), expected) # full overlap tm.assert_almost_equal( datetime_series.corr(datetime_series, method=my_corr), 1.0 ) # partial overlap tm.assert_almost_equal( datetime_series[:15].corr(datetime_series[5:], method=my_corr), 1.0 ) # No overlap assert np.isnan( datetime_series[::2].corr(datetime_series[1::2], method=my_corr) ) # dataframe example df = pd.DataFrame([s1, s2]) expected = pd.DataFrame([{0: 1.0, 1: 0}, {0: 0, 1: 1.0}]) tm.assert_almost_equal(df.transpose().corr(method=my_corr), expected)
bsd-3-clause
dandanvidi/effective-capacity
scripts/pFVA.py
3
1834
import pandas as pd from cobra.core import Metabolite, Reaction from cobra.io.sbml import create_cobra_model_from_sbml_file from cobra.manipulation.modify import convert_to_irreversible, revert_to_reversible from cobra.flux_analysis.variability import flux_variability_analysis gc = pd.DataFrame.from_csv('../data/growth_conditions.csv') gc = gc[gc.media_key>0] m = create_cobra_model_from_sbml_file('../data/iJO1366.xml') convert_to_irreversible(m) fake = Metabolite(id='fake') m.add_metabolites(fake) for r in m.reactions: r.add_metabolites({fake:1}) flux_counter = Reaction(name='flux_counter') flux_counter.add_metabolites(metabolites={fake:-1}) m.add_reaction(flux_counter) m.change_objective(flux_counter) m.reactions.get_by_id('EX_glc_e_reverse').upper_bound = 0 rxns = {r.id:r for r in m.reactions} index = pd.MultiIndex.from_product([gc.index, ['maximum', 'minimum']]) fluxes = pd.DataFrame(index=index, columns=rxns.keys()) for i,c in enumerate(gc.index): rxns['EX_'+gc['media_key'][c]+'_e'].lower_bound = -1000 rxns['Ec_biomass_iJO1366_WT_53p95M'].upper_bound = gc['growth rate [h-1]'][c] rxns['Ec_biomass_iJO1366_WT_53p95M'].lower_bound = gc['growth rate [h-1]'][c] for j, r in enumerate(m.reactions): fva_results = flux_variability_analysis(m,reaction_list=[r], objective_sense='minimize',fraction_of_optimum=1.0001) fva = pd.DataFrame.from_dict(fva_results) fluxes[r.id][c,'maximum'] = fva.loc['maximum'][0] fluxes[r.id][c,'minimum'] = fva.loc['minimum'][0] print c, i, j, r rxns['EX_'+gc['media_key'][c]+'_e'].lower_bound = 0 fluxes.dropna(how='all', inplace=True) fluxes.T.to_csv('../data/flux_variability_[mmol_gCDW_h]_01%.csv')
mit
RomainBrault/scikit-learn
sklearn/ensemble/tests/test_forest.py
9
43013
""" Testing for the forest module (sklearn.ensemble.forest). """ # Authors: Gilles Louppe, # Brian Holt, # Andreas Mueller, # Arnaud Joly # License: BSD 3 clause import pickle from collections import defaultdict from itertools import combinations from itertools import product import numpy as np from scipy.misc import comb from scipy.sparse import csr_matrix from scipy.sparse import csc_matrix from scipy.sparse import coo_matrix from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_less, assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import skip_if_32bit from sklearn import datasets from sklearn.decomposition import TruncatedSVD from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomTreesEmbedding from sklearn.model_selection import GridSearchCV from sklearn.svm import LinearSVC from sklearn.utils.fixes import bincount from sklearn.utils.validation import check_random_state from sklearn.tree.tree import SPARSE_SPLITTERS # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = check_random_state(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] # also make a hastie_10_2 dataset hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1) hastie_X = hastie_X.astype(np.float32) FOREST_CLASSIFIERS = { "ExtraTreesClassifier": ExtraTreesClassifier, "RandomForestClassifier": RandomForestClassifier, } FOREST_REGRESSORS = { "ExtraTreesRegressor": ExtraTreesRegressor, "RandomForestRegressor": RandomForestRegressor, } FOREST_TRANSFORMERS = { "RandomTreesEmbedding": RandomTreesEmbedding, } FOREST_ESTIMATORS = dict() FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS) FOREST_ESTIMATORS.update(FOREST_REGRESSORS) FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS) def check_classification_toy(name): """Check classification on a toy dataset.""" ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result) assert_equal(10, len(clf)) # also test apply leaf_indices = clf.apply(X) assert_equal(leaf_indices.shape, (len(X), clf.n_estimators)) def test_classification_toy(): for name in FOREST_CLASSIFIERS: yield check_classification_toy, name def check_iris_criterion(name, criterion): # Check consistency on dataset iris. ForestClassifier = FOREST_CLASSIFIERS[name] clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.9, "Failed with criterion %s and score = %f" % (criterion, score)) clf = ForestClassifier(n_estimators=10, criterion=criterion, max_features=2, random_state=1) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) assert_greater(score, 0.5, "Failed with criterion %s and score = %f" % (criterion, score)) def test_iris(): for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")): yield check_iris_criterion, name, criterion def check_boston_criterion(name, criterion): # Check consistency on dataset boston house prices. ForestRegressor = FOREST_REGRESSORS[name] clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.94, "Failed with max_features=None, criterion %s " "and score = %f" % (criterion, score)) clf = ForestRegressor(n_estimators=5, criterion=criterion, max_features=6, random_state=1) clf.fit(boston.data, boston.target) score = clf.score(boston.data, boston.target) assert_greater(score, 0.95, "Failed with max_features=6, criterion %s " "and score = %f" % (criterion, score)) def test_boston(): for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")): yield check_boston_criterion, name, criterion def check_regressor_attributes(name): # Regression models should not have a classes_ attribute. r = FOREST_REGRESSORS[name](random_state=0) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) r.fit([[1, 2, 3], [4, 5, 6]], [1, 2]) assert_false(hasattr(r, "classes_")) assert_false(hasattr(r, "n_classes_")) def test_regressor_attributes(): for name in FOREST_REGRESSORS: yield check_regressor_attributes, name def check_probability(name): # Predict probabilities. ForestClassifier = FOREST_CLASSIFIERS[name] with np.errstate(divide="ignore"): clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1, max_depth=1) clf.fit(iris.data, iris.target) assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])) assert_array_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))) def test_probability(): for name in FOREST_CLASSIFIERS: yield check_probability, name def check_importances(name, criterion, X, y): ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(n_estimators=20, criterion=criterion, random_state=0) est.fit(X, y) importances = est.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10) assert_equal(n_important, 3) # Check with parallel importances = est.feature_importances_ est.set_params(n_jobs=2) importances_parrallel = est.feature_importances_ assert_array_almost_equal(importances, importances_parrallel) # Check with sample weights sample_weight = check_random_state(0).randint(1, 10, len(X)) est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion) est.fit(X, y, sample_weight=sample_weight) importances = est.feature_importances_ assert_true(np.all(importances >= 0.0)) for scale in [0.5, 10, 100]: est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion) est.fit(X, y, sample_weight=scale * sample_weight) importances_bis = est.feature_importances_ assert_less(np.abs(importances - importances_bis).mean(), 0.001) @skip_if_32bit def test_importances(): X, y = datasets.make_classification(n_samples=500, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]): yield check_importances, name, criterion, X, y for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]): yield check_importances, name, criterion, X, y def test_importances_asymptotic(): # Check whether variable importances of totally randomized trees # converge towards their theoretical values (See Louppe et al, # Understanding variable importances in forests of randomized trees, 2013). def binomial(k, n): return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True) def entropy(samples): n_samples = len(samples) entropy = 0. for count in bincount(samples): p = 1. * count / n_samples if p > 0: entropy -= p * np.log2(p) return entropy def mdi_importance(X_m, X, y): n_samples, n_features = X.shape features = list(range(n_features)) features.pop(X_m) values = [np.unique(X[:, i]) for i in range(n_features)] imp = 0. for k in range(n_features): # Weight of each B of size k coef = 1. / (binomial(k, n_features) * (n_features - k)) # For all B of size k for B in combinations(features, k): # For all values B=b for b in product(*[values[B[j]] for j in range(k)]): mask_b = np.ones(n_samples, dtype=np.bool) for j in range(k): mask_b &= X[:, B[j]] == b[j] X_, y_ = X[mask_b, :], y[mask_b] n_samples_b = len(X_) if n_samples_b > 0: children = [] for xi in values[X_m]: mask_xi = X_[:, X_m] == xi children.append(y_[mask_xi]) imp += (coef * (1. * n_samples_b / n_samples) # P(B=b) * (entropy(y_) - sum([entropy(c) * len(c) / n_samples_b for c in children]))) return imp data = np.array([[0, 0, 1, 0, 0, 1, 0, 1], [1, 0, 1, 1, 1, 0, 1, 2], [1, 0, 1, 1, 0, 1, 1, 3], [0, 1, 1, 1, 0, 1, 0, 4], [1, 1, 0, 1, 0, 1, 1, 5], [1, 1, 0, 1, 1, 1, 1, 6], [1, 0, 1, 0, 0, 1, 0, 7], [1, 1, 1, 1, 1, 1, 1, 8], [1, 1, 1, 1, 0, 1, 1, 9], [1, 1, 1, 0, 1, 1, 1, 0]]) X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7] n_features = X.shape[1] # Compute true importances true_importances = np.zeros(n_features) for i in range(n_features): true_importances[i] = mdi_importance(i, X, y) # Estimate importances with totally randomized trees clf = ExtraTreesClassifier(n_estimators=500, max_features=1, criterion="entropy", random_state=0).fit(X, y) importances = sum(tree.tree_.compute_feature_importances(normalize=False) for tree in clf.estimators_) / clf.n_estimators # Check correctness assert_almost_equal(entropy(y), sum(importances)) assert_less(np.abs(true_importances - importances).mean(), 0.01) def check_unfitted_feature_importances(name): assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0), "feature_importances_") def test_unfitted_feature_importances(): for name in FOREST_ESTIMATORS: yield check_unfitted_feature_importances, name def check_oob_score(name, X, y, n_estimators=20): # Check that oob prediction is a good estimation of the generalization # error. # Proper behavior est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=n_estimators, bootstrap=True) n_samples = X.shape[0] est.fit(X[:n_samples // 2, :], y[:n_samples // 2]) test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:]) if name in FOREST_CLASSIFIERS: assert_less(abs(test_score - est.oob_score_), 0.1) else: assert_greater(test_score, est.oob_score_) assert_greater(est.oob_score_, .8) # Check warning if not enough estimators with np.errstate(divide="ignore", invalid="ignore"): est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0, n_estimators=1, bootstrap=True) assert_warns(UserWarning, est.fit, X, y) def test_oob_score(): for name in FOREST_CLASSIFIERS: yield check_oob_score, name, iris.data, iris.target # csc matrix yield check_oob_score, name, csc_matrix(iris.data), iris.target # non-contiguous targets in classification yield check_oob_score, name, iris.data, iris.target * 2 + 1 for name in FOREST_REGRESSORS: yield check_oob_score, name, boston.data, boston.target, 50 # csc matrix yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50 def check_oob_score_raise_error(name): ForestEstimator = FOREST_ESTIMATORS[name] if name in FOREST_TRANSFORMERS: for oob_score in [True, False]: assert_raises(TypeError, ForestEstimator, oob_score=oob_score) assert_raises(NotImplementedError, ForestEstimator()._set_oob_score, X, y) else: # Unfitted / no bootstrap / no oob_score for oob_score, bootstrap in [(True, False), (False, True), (False, False)]: est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap, random_state=0) assert_false(hasattr(est, "oob_score_")) # No bootstrap assert_raises(ValueError, ForestEstimator(oob_score=True, bootstrap=False).fit, X, y) def test_oob_score_raise_error(): for name in FOREST_ESTIMATORS: yield check_oob_score_raise_error, name def check_gridsearch(name): forest = FOREST_CLASSIFIERS[name]() clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)}) clf.fit(iris.data, iris.target) def test_gridsearch(): # Check that base trees can be grid-searched. for name in FOREST_CLASSIFIERS: yield check_gridsearch, name def check_parallel(name, X, y): """Check parallel computations in classification""" ForestEstimator = FOREST_ESTIMATORS[name] forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0) forest.fit(X, y) assert_equal(len(forest), 10) forest.set_params(n_jobs=1) y1 = forest.predict(X) forest.set_params(n_jobs=2) y2 = forest.predict(X) assert_array_almost_equal(y1, y2, 3) def test_parallel(): for name in FOREST_CLASSIFIERS: yield check_parallel, name, iris.data, iris.target for name in FOREST_REGRESSORS: yield check_parallel, name, boston.data, boston.target def check_pickle(name, X, y): # Check pickability. ForestEstimator = FOREST_ESTIMATORS[name] obj = ForestEstimator(random_state=0) obj.fit(X, y) score = obj.score(X, y) pickle_object = pickle.dumps(obj) obj2 = pickle.loads(pickle_object) assert_equal(type(obj2), obj.__class__) score2 = obj2.score(X, y) assert_equal(score, score2) def test_pickle(): for name in FOREST_CLASSIFIERS: yield check_pickle, name, iris.data[::2], iris.target[::2] for name in FOREST_REGRESSORS: yield check_pickle, name, boston.data[::2], boston.target[::2] def check_multioutput(name): # Check estimators on multi-output problems. X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]] est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) y_pred = est.fit(X_train, y_train).predict(X_test) assert_array_almost_equal(y_pred, y_test) if name in FOREST_CLASSIFIERS: with np.errstate(divide="ignore"): proba = est.predict_proba(X_test) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = est.predict_log_proba(X_test) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) def test_multioutput(): for name in FOREST_CLASSIFIERS: yield check_multioutput, name for name in FOREST_REGRESSORS: yield check_multioutput, name def check_classes_shape(name): # Test that n_classes_ and classes_ have proper shape. ForestClassifier = FOREST_CLASSIFIERS[name] # Classification, single output clf = ForestClassifier(random_state=0).fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(random_state=0).fit(X, _y) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_classes_shape(): for name in FOREST_CLASSIFIERS: yield check_classes_shape, name def test_random_trees_dense_type(): # Test that the `sparse_output` parameter of RandomTreesEmbedding # works by returning a dense array. # Create the RTE with sparse=False hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # Assert that type is ndarray, not scipy.sparse.csr.csr_matrix assert_equal(type(X_transformed), np.ndarray) def test_random_trees_dense_equal(): # Test that the `sparse_output` parameter of RandomTreesEmbedding # works by returning the same array for both argument values. # Create the RTEs hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False, random_state=0) hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True, random_state=0) X, y = datasets.make_circles(factor=0.5) X_transformed_dense = hasher_dense.fit_transform(X) X_transformed_sparse = hasher_sparse.fit_transform(X) # Assert that dense and sparse hashers have same array. assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense) # Ignore warnings from switching to more power iterations in randomized_svd @ignore_warnings def test_random_hasher(): # test random forest hashing on circles dataset # make sure that it is linearly separable. # even after projected to two SVD dimensions # Note: Not all random_states produce perfect results. hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X, y = datasets.make_circles(factor=0.5) X_transformed = hasher.fit_transform(X) # test fit and transform: hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray()) # one leaf active per data point per forest assert_equal(X_transformed.shape[0], X.shape[0]) assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators) svd = TruncatedSVD(n_components=2) X_reduced = svd.fit_transform(X_transformed) linear_clf = LinearSVC() linear_clf.fit(X_reduced, y) assert_equal(linear_clf.score(X_reduced, y), 1.) def test_random_hasher_sparse_data(): X, y = datasets.make_multilabel_classification(random_state=0) hasher = RandomTreesEmbedding(n_estimators=30, random_state=1) X_transformed = hasher.fit_transform(X) X_transformed_sparse = hasher.fit_transform(csc_matrix(X)) assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray()) def test_parallel_train(): rng = check_random_state(12321) n_samples, n_features = 80, 30 X_train = rng.randn(n_samples, n_features) y_train = rng.randint(0, 2, n_samples) clfs = [ RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(X_train, y_train) for n_jobs in [1, 2, 3, 8, 16, 32] ] X_test = rng.randn(n_samples, n_features) probas = [clf.predict_proba(X_test) for clf in clfs] for proba1, proba2 in zip(probas, probas[1:]): assert_array_almost_equal(proba1, proba2) def test_distribution(): rng = check_random_state(12321) # Single variable with 4 values X = rng.randint(0, 4, size=(1000, 1)) y = rng.rand(1000) n_trees = 500 clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = sorted([(1. * count / n_trees, tree) for tree, count in uniques.items()]) # On a single variable problem where X_0 has 4 equiprobable values, there # are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of # them has probability 1/3 while the 4 others have probability 1/6. assert_equal(len(uniques), 5) assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6. assert_greater(0.20, uniques[1][0]) assert_greater(0.20, uniques[2][0]) assert_greater(0.20, uniques[3][0]) assert_greater(uniques[4][0], 0.3) assert_equal(uniques[4][1], "0,1/0,0/--0,2/--") # Two variables, one with 2 values, one with 3 values X = np.empty((1000, 2)) X[:, 0] = np.random.randint(0, 2, 1000) X[:, 1] = np.random.randint(0, 3, 1000) y = rng.rand(1000) clf = ExtraTreesRegressor(n_estimators=100, max_features=1, random_state=1).fit(X, y) uniques = defaultdict(int) for tree in clf.estimators_: tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-") for f, t in zip(tree.tree_.feature, tree.tree_.threshold)) uniques[tree] += 1 uniques = [(count, tree) for tree, count in uniques.items()] assert_equal(len(uniques), 8) def check_max_leaf_nodes_max_depth(name): X, y = hastie_X, hastie_y # Test precedence of max_leaf_nodes over max_depth. ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(max_depth=1, max_leaf_nodes=4, n_estimators=1, random_state=0).fit(X, y) assert_greater(est.estimators_[0].tree_.max_depth, 1) est = ForestEstimator(max_depth=1, n_estimators=1, random_state=0).fit(X, y) assert_equal(est.estimators_[0].tree_.max_depth, 1) def test_max_leaf_nodes_max_depth(): for name in FOREST_ESTIMATORS: yield check_max_leaf_nodes_max_depth, name def check_min_samples_split(name): X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] # test boundary value assert_raises(ValueError, ForestEstimator(min_samples_split=-1).fit, X, y) assert_raises(ValueError, ForestEstimator(min_samples_split=0).fit, X, y) assert_raises(ValueError, ForestEstimator(min_samples_split=1.1).fit, X, y) est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0) est.fit(X, y) node_idx = est.estimators_[0].tree_.children_left != -1 node_samples = est.estimators_[0].tree_.n_node_samples[node_idx] assert_greater(np.min(node_samples), len(X) * 0.5 - 1, "Failed with {0}".format(name)) est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0) est.fit(X, y) node_idx = est.estimators_[0].tree_.children_left != -1 node_samples = est.estimators_[0].tree_.n_node_samples[node_idx] assert_greater(np.min(node_samples), len(X) * 0.5 - 1, "Failed with {0}".format(name)) def test_min_samples_split(): for name in FOREST_ESTIMATORS: yield check_min_samples_split, name def check_min_samples_leaf(name): X, y = hastie_X, hastie_y # Test if leaves contain more than leaf_count training examples ForestEstimator = FOREST_ESTIMATORS[name] # test boundary value assert_raises(ValueError, ForestEstimator(min_samples_leaf=-1).fit, X, y) assert_raises(ValueError, ForestEstimator(min_samples_leaf=0).fit, X, y) est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0) est.fit(X, y) out = est.estimators_[0].tree_.apply(X) node_counts = bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1, random_state=0) est.fit(X, y) out = est.estimators_[0].tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), len(X) * 0.25 - 1, "Failed with {0}".format(name)) def test_min_samples_leaf(): for name in FOREST_ESTIMATORS: yield check_min_samples_leaf, name def check_min_weight_fraction_leaf(name): X, y = hastie_X, hastie_y # Test if leaves contain at least min_weight_fraction_leaf of the # training set ForestEstimator = FOREST_ESTIMATORS[name] rng = np.random.RandomState(0) weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for frac in np.linspace(0, 0.5, 6): est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1, random_state=0) if "RandomForest" in name: est.bootstrap = False est.fit(X, y, sample_weight=weights) out = est.estimators_[0].tree_.apply(X) node_weights = bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): for name in FOREST_ESTIMATORS: yield check_min_weight_fraction_leaf, name def check_sparse_input(name, X, X_sparse, y): ForestEstimator = FOREST_ESTIMATORS[name] dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y) sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y) assert_array_almost_equal(sparse.apply(X), dense.apply(X)) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_array_almost_equal(sparse.predict(X), dense.predict(X)) assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_) if name in FOREST_CLASSIFIERS: assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X)) assert_array_almost_equal(sparse.predict_log_proba(X), dense.predict_log_proba(X)) if name in FOREST_TRANSFORMERS: assert_array_almost_equal(sparse.transform(X).toarray(), dense.transform(X).toarray()) assert_array_almost_equal(sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()) def test_sparse_input(): X, y = datasets.make_multilabel_classification(random_state=0, n_samples=50) for name, sparse_matrix in product(FOREST_ESTIMATORS, (csr_matrix, csc_matrix, coo_matrix)): yield check_sparse_input, name, X, sparse_matrix(X), y def check_memory_layout(name, dtype): # Check that it works no matter the memory layout est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if est.base_estimator.splitter in SPARSE_SPLITTERS: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # coo_matrix X = coo_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_memory_layout(): for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]): yield check_memory_layout, name, dtype for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]): yield check_memory_layout, name, dtype @ignore_warnings def check_1d_input(name, X, X_2d, y): ForestEstimator = FOREST_ESTIMATORS[name] assert_raises(ValueError, ForestEstimator(n_estimators=1, random_state=0).fit, X, y) est = ForestEstimator(random_state=0) est.fit(X_2d, y) if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS: assert_raises(ValueError, est.predict, X) @ignore_warnings def test_1d_input(): X = iris.data[:, 0] X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target for name in FOREST_ESTIMATORS: yield check_1d_input, name, X, X_2d, y def check_class_weights(name): # Check class_weights resemble sample_weights behavior. ForestClassifier = FOREST_CLASSIFIERS[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = ForestClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "balanced" which should also have no effect clf4 = ForestClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = ForestClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = ForestClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Using a Python 2.x list as the sample_weight parameter used to raise # an exception. This test makes sure such code will now run correctly. clf = ForestClassifier() sample_weight = [1.] * len(iris.data) clf.fit(iris.data, iris.target, sample_weight=sample_weight) def test_class_weights(): for name in FOREST_CLASSIFIERS: yield check_class_weights, name def check_class_weight_balanced_and_bootstrap_multi_output(name): # Test class_weight works for multi-output""" ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T clf = ForestClassifier(class_weight='balanced', random_state=0) clf.fit(X, _y) clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}], random_state=0) clf.fit(X, _y) # smoke test for balanced subsample clf = ForestClassifier(class_weight='balanced_subsample', random_state=0) clf.fit(X, _y) def test_class_weight_balanced_and_bootstrap_multi_output(): for name in FOREST_CLASSIFIERS: yield check_class_weight_balanced_and_bootstrap_multi_output, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. ForestClassifier = FOREST_CLASSIFIERS[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = ForestClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Warning warm_start with preset clf = ForestClassifier(class_weight='balanced', warm_start=True, random_state=0) assert_warns(UserWarning, clf.fit, X, y) assert_warns(UserWarning, clf.fit, X, _y) # Not a list or preset for multi-output clf = ForestClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in FOREST_CLASSIFIERS: yield check_class_weight_errors, name def check_warm_start(name, random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = ForestEstimator(n_estimators=n_estimators, random_state=random_state, warm_start=True) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert_equal(len(clf_ws), n_estimators) clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state, warm_start=False) clf_no_ws.fit(X, y) assert_equal(set([tree.random_state for tree in clf_ws]), set([tree.random_state for tree in clf_no_ws])) assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X), err_msg="Failed with {0}".format(name)) def test_warm_start(): for name in FOREST_ESTIMATORS: yield check_warm_start, name def check_warm_start_clear(name): # Test if fit clears state and grows a new forest when warm_start==False. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True, random_state=2) clf_2.fit(X, y) # inits state clf_2.set_params(warm_start=False, random_state=1) clf_2.fit(X, y) # clears old state and equals clf assert_array_almost_equal(clf_2.apply(X), clf.apply(X)) def test_warm_start_clear(): for name in FOREST_ESTIMATORS: yield check_warm_start_clear, name def check_warm_start_smaller_n_estimators(name): # Test if warm start second fit with smaller n_estimators raises error. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) assert_raises(ValueError, clf.fit, X, y) def test_warm_start_smaller_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_smaller_n_estimators, name def check_warm_start_equal_n_estimators(name): # Test if warm start with equal n_estimators does nothing and returns the # same forest and raises a warning. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1) clf_2.fit(X, y) # Now clf_2 equals clf. clf_2.set_params(random_state=2) assert_warns(UserWarning, clf_2.fit, X, y) # If we had fit the trees again we would have got a different forest as we # changed the random state. assert_array_equal(clf.apply(X), clf_2.apply(X)) def test_warm_start_equal_n_estimators(): for name in FOREST_ESTIMATORS: yield check_warm_start_equal_n_estimators, name def check_warm_start_oob(name): # Test that the warm start computes oob score when asked. X, y = hastie_X, hastie_y ForestEstimator = FOREST_ESTIMATORS[name] # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning. clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=True) clf.fit(X, y) clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False, random_state=1, bootstrap=True, oob_score=False) clf_2.fit(X, y) clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15) clf_2.fit(X, y) assert_true(hasattr(clf_2, 'oob_score_')) assert_equal(clf.oob_score_, clf_2.oob_score_) # Test that oob_score is computed even if we don't need to train # additional trees. clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True, random_state=1, bootstrap=True, oob_score=False) clf_3.fit(X, y) assert_true(not(hasattr(clf_3, 'oob_score_'))) clf_3.set_params(oob_score=True) ignore_warnings(clf_3.fit)(X, y) assert_equal(clf.oob_score_, clf_3.oob_score_) def test_warm_start_oob(): for name in FOREST_CLASSIFIERS: yield check_warm_start_oob, name for name in FOREST_REGRESSORS: yield check_warm_start_oob, name def test_dtype_convert(n_classes=15): classifier = RandomForestClassifier(random_state=0, bootstrap=False) X = np.eye(n_classes) y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]] result = classifier.fit(X, y).predict(X) assert_array_equal(classifier.classes_, y) assert_array_equal(result, y) def check_decision_path(name): X, y = hastie_X, hastie_y n_samples = X.shape[0] ForestEstimator = FOREST_ESTIMATORS[name] est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1) est.fit(X, y) indicator, n_nodes_ptr = est.decision_path(X) assert_equal(indicator.shape[1], n_nodes_ptr[-1]) assert_equal(indicator.shape[0], n_samples) assert_array_equal(np.diff(n_nodes_ptr), [e.tree_.node_count for e in est.estimators_]) # Assert that leaves index are correct leaves = est.apply(X) for est_id in range(leaves.shape[1]): leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j] for i, j in enumerate(leaves[:, est_id])] assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples)) def test_decision_path(): for name in FOREST_CLASSIFIERS: yield check_decision_path, name for name in FOREST_REGRESSORS: yield check_decision_path, name def test_min_impurity_split(): # Test if min_impurity_split of base estimators is set # Regression test for #8006 X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) all_estimators = [RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor] for Estimator in all_estimators: est = Estimator(min_impurity_split=0.1) est = assert_warns_message(DeprecationWarning, "min_impurity_decrease", est.fit, X, y) for tree in est.estimators_: assert_equal(tree.min_impurity_split, 0.1) def test_min_impurity_decrease(): X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) all_estimators = [RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor] for Estimator in all_estimators: est = Estimator(min_impurity_decrease=0.1) est.fit(X, y) for tree in est.estimators_: # Simply check if the parameter is passed on correctly. Tree tests # will suffice for the actual working of this param assert_equal(tree.min_impurity_decrease, 0.1)
bsd-3-clause
glennq/scikit-learn
sklearn/datasets/tests/test_svmlight_format.py
53
13398
from bz2 import BZ2File import gzip from io import BytesIO import numpy as np import scipy.sparse as sp import os import shutil from tempfile import NamedTemporaryFile from sklearn.externals.six import b from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import raises from sklearn.utils.testing import assert_in import sklearn from sklearn.datasets import (load_svmlight_file, load_svmlight_files, dump_svmlight_file) currdir = os.path.dirname(os.path.abspath(__file__)) datafile = os.path.join(currdir, "data", "svmlight_classification.txt") multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt") invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt") invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt") def test_load_svmlight_file(): X, y = load_svmlight_file(datafile) # test X's shape assert_equal(X.indptr.shape[0], 7) assert_equal(X.shape[0], 6) assert_equal(X.shape[1], 21) assert_equal(y.shape[0], 6) # test X's non-zero values for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5), (1, 5, 1.0), (1, 12, -3), (2, 20, 27)): assert_equal(X[i, j], val) # tests X's zero values assert_equal(X[0, 3], 0) assert_equal(X[0, 5], 0) assert_equal(X[1, 8], 0) assert_equal(X[1, 16], 0) assert_equal(X[2, 18], 0) # test can change X's values X[0, 2] *= 2 assert_equal(X[0, 2], 5) # test y assert_array_equal(y, [1, 2, 3, 4, 1, 2]) def test_load_svmlight_file_fd(): # test loading from file descriptor X1, y1 = load_svmlight_file(datafile) fd = os.open(datafile, os.O_RDONLY) try: X2, y2 = load_svmlight_file(fd) assert_array_equal(X1.data, X2.data) assert_array_equal(y1, y2) finally: os.close(fd) def test_load_svmlight_file_multilabel(): X, y = load_svmlight_file(multifile, multilabel=True) assert_equal(y, [(0, 1), (2,), (), (1, 2)]) def test_load_svmlight_files(): X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2, dtype=np.float32) assert_array_equal(X_train.toarray(), X_test.toarray()) assert_array_equal(y_train, y_test) assert_equal(X_train.dtype, np.float32) assert_equal(X_test.dtype, np.float32) X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3, dtype=np.float64) assert_equal(X1.dtype, X2.dtype) assert_equal(X2.dtype, X3.dtype) assert_equal(X3.dtype, np.float64) def test_load_svmlight_file_n_features(): X, y = load_svmlight_file(datafile, n_features=22) # test X'shape assert_equal(X.indptr.shape[0], 7) assert_equal(X.shape[0], 6) assert_equal(X.shape[1], 22) # test X's non-zero values for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (1, 5, 1.0), (1, 12, -3)): assert_equal(X[i, j], val) # 21 features in file assert_raises(ValueError, load_svmlight_file, datafile, n_features=20) def test_load_compressed(): X, y = load_svmlight_file(datafile) with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp: tmp.close() # necessary under windows with open(datafile, "rb") as f: shutil.copyfileobj(f, gzip.open(tmp.name, "wb")) Xgz, ygz = load_svmlight_file(tmp.name) # because we "close" it manually and write to it, # we need to remove it manually. os.remove(tmp.name) assert_array_equal(X.toarray(), Xgz.toarray()) assert_array_equal(y, ygz) with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp: tmp.close() # necessary under windows with open(datafile, "rb") as f: shutil.copyfileobj(f, BZ2File(tmp.name, "wb")) Xbz, ybz = load_svmlight_file(tmp.name) # because we "close" it manually and write to it, # we need to remove it manually. os.remove(tmp.name) assert_array_equal(X.toarray(), Xbz.toarray()) assert_array_equal(y, ybz) @raises(ValueError) def test_load_invalid_file(): load_svmlight_file(invalidfile) @raises(ValueError) def test_load_invalid_order_file(): load_svmlight_file(invalidfile2) @raises(ValueError) def test_load_zero_based(): f = BytesIO(b("-1 4:1.\n1 0:1\n")) load_svmlight_file(f, zero_based=False) def test_load_zero_based_auto(): data1 = b("-1 1:1 2:2 3:3\n") data2 = b("-1 0:0 1:1\n") f1 = BytesIO(data1) X, y = load_svmlight_file(f1, zero_based="auto") assert_equal(X.shape, (1, 3)) f1 = BytesIO(data1) f2 = BytesIO(data2) X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto") assert_equal(X1.shape, (1, 4)) assert_equal(X2.shape, (1, 4)) def test_load_with_qid(): # load svmfile with qid attribute data = b(""" 3 qid:1 1:0.53 2:0.12 2 qid:1 1:0.13 2:0.1 7 qid:2 1:0.87 2:0.12""") X, y = load_svmlight_file(BytesIO(data), query_id=False) assert_array_equal(y, [3, 2, 7]) assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]]) res1 = load_svmlight_files([BytesIO(data)], query_id=True) res2 = load_svmlight_file(BytesIO(data), query_id=True) for X, y, qid in (res1, res2): assert_array_equal(y, [3, 2, 7]) assert_array_equal(qid, [1, 1, 2]) assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]]) @raises(ValueError) def test_load_invalid_file2(): load_svmlight_files([datafile, invalidfile, datafile]) @raises(TypeError) def test_not_a_filename(): # in python 3 integers are valid file opening arguments (taken as unix # file descriptors) load_svmlight_file(.42) @raises(IOError) def test_invalid_filename(): load_svmlight_file("trou pic nic douille") def test_dump(): X_sparse, y_dense = load_svmlight_file(datafile) X_dense = X_sparse.toarray() y_sparse = sp.csr_matrix(y_dense) # slicing a csr_matrix can unsort its .indices, so test that we sort # those correctly X_sliced = X_sparse[np.arange(X_sparse.shape[0])] y_sliced = y_sparse[np.arange(y_sparse.shape[0])] for X in (X_sparse, X_dense, X_sliced): for y in (y_sparse, y_dense, y_sliced): for zero_based in (True, False): for dtype in [np.float32, np.float64, np.int32]: f = BytesIO() # we need to pass a comment to get the version info in; # LibSVM doesn't grok comments so they're not put in by # default anymore. if (sp.issparse(y) and y.shape[0] == 1): # make sure y's shape is: (n_samples, n_labels) # when it is sparse y = y.T dump_svmlight_file(X.astype(dtype), y, f, comment="test", zero_based=zero_based) f.seek(0) comment = f.readline() try: comment = str(comment, "utf-8") except TypeError: # fails in Python 2.x pass assert_in("scikit-learn %s" % sklearn.__version__, comment) comment = f.readline() try: comment = str(comment, "utf-8") except TypeError: # fails in Python 2.x pass assert_in(["one", "zero"][zero_based] + "-based", comment) X2, y2 = load_svmlight_file(f, dtype=dtype, zero_based=zero_based) assert_equal(X2.dtype, dtype) assert_array_equal(X2.sorted_indices().indices, X2.indices) X2_dense = X2.toarray() if dtype == np.float32: # allow a rounding error at the last decimal place assert_array_almost_equal( X_dense.astype(dtype), X2_dense, 4) assert_array_almost_equal( y_dense.astype(dtype), y2, 4) else: # allow a rounding error at the last decimal place assert_array_almost_equal( X_dense.astype(dtype), X2_dense, 15) assert_array_almost_equal( y_dense.astype(dtype), y2, 15) def test_dump_multilabel(): X = [[1, 0, 3, 0, 5], [0, 0, 0, 0, 0], [0, 5, 0, 1, 0]] y_dense = [[0, 1, 0], [1, 0, 1], [1, 1, 0]] y_sparse = sp.csr_matrix(y_dense) for y in [y_dense, y_sparse]: f = BytesIO() dump_svmlight_file(X, y, f, multilabel=True) f.seek(0) # make sure it dumps multilabel correctly assert_equal(f.readline(), b("1 0:1 2:3 4:5\n")) assert_equal(f.readline(), b("0,2 \n")) assert_equal(f.readline(), b("0,1 1:5 3:1\n")) def test_dump_concise(): one = 1 two = 2.1 three = 3.01 exact = 1.000000000000001 # loses the last decimal place almost = 1.0000000000000001 X = [[one, two, three, exact, almost], [1e9, 2e18, 3e27, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]] y = [one, two, three, exact, almost] f = BytesIO() dump_svmlight_file(X, y, f) f.seek(0) # make sure it's using the most concise format possible assert_equal(f.readline(), b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n")) assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n")) assert_equal(f.readline(), b("3.01 \n")) assert_equal(f.readline(), b("1.000000000000001 \n")) assert_equal(f.readline(), b("1 \n")) f.seek(0) # make sure it's correct too :) X2, y2 = load_svmlight_file(f) assert_array_almost_equal(X, X2.toarray()) assert_array_equal(y, y2) def test_dump_comment(): X, y = load_svmlight_file(datafile) X = X.toarray() f = BytesIO() ascii_comment = "This is a comment\nspanning multiple lines." dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False) f.seek(0) X2, y2 = load_svmlight_file(f, zero_based=False) assert_array_almost_equal(X, X2.toarray()) assert_array_equal(y, y2) # XXX we have to update this to support Python 3.x utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc") f = BytesIO() assert_raises(UnicodeDecodeError, dump_svmlight_file, X, y, f, comment=utf8_comment) unicode_comment = utf8_comment.decode("utf-8") f = BytesIO() dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False) f.seek(0) X2, y2 = load_svmlight_file(f, zero_based=False) assert_array_almost_equal(X, X2.toarray()) assert_array_equal(y, y2) f = BytesIO() assert_raises(ValueError, dump_svmlight_file, X, y, f, comment="I've got a \0.") def test_dump_invalid(): X, y = load_svmlight_file(datafile) f = BytesIO() y2d = [y] assert_raises(ValueError, dump_svmlight_file, X, y2d, f) f = BytesIO() assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f) def test_dump_query_id(): # test dumping a file with query_id X, y = load_svmlight_file(datafile) X = X.toarray() query_id = np.arange(X.shape[0]) // 2 f = BytesIO() dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True) f.seek(0) X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True) assert_array_almost_equal(X, X1.toarray()) assert_array_almost_equal(y, y1) assert_array_almost_equal(query_id, query_id1) def test_load_with_long_qid(): # load svmfile with longint qid attribute data = b(""" 1 qid:0 0:1 1:2 2:3 0 qid:72048431380967004 0:1440446648 1:72048431380967004 2:236784985 0 qid:-9223372036854775807 0:1440446648 1:72048431380967004 2:236784985 3 qid:9223372036854775807 0:1440446648 1:72048431380967004 2:236784985""") X, y, qid = load_svmlight_file(BytesIO(data), query_id=True) true_X = [[1, 2, 3], [1440446648, 72048431380967004, 236784985], [1440446648, 72048431380967004, 236784985], [1440446648, 72048431380967004, 236784985]] true_y = [1, 0, 0, 3] trueQID = [0, 72048431380967004, -9223372036854775807, 9223372036854775807] assert_array_equal(y, true_y) assert_array_equal(X.toarray(), true_X) assert_array_equal(qid, trueQID) f = BytesIO() dump_svmlight_file(X, y, f, query_id=qid, zero_based=True) f.seek(0) X, y, qid = load_svmlight_file(f, query_id=True, zero_based=True) assert_array_equal(y, true_y) assert_array_equal(X.toarray(), true_X) assert_array_equal(qid, trueQID) f.seek(0) X, y = load_svmlight_file(f, query_id=False, zero_based=True) assert_array_equal(y, true_y) assert_array_equal(X.toarray(), true_X)
bsd-3-clause
lenovor/scikit-learn
examples/classification/plot_digits_classification.py
289
2397
""" ================================ Recognizing hand-written digits ================================ An example showing how the scikit-learn can be used to recognize images of hand-written digits. This example is commented in the :ref:`tutorial section of the user manual <introduction>`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, metrics # The digits dataset digits = datasets.load_digits() # The data that we are interested in is made of 8x8 images of digits, let's # have a look at the first 3 images, stored in the `images` attribute of the # dataset. If we were working from image files, we could load them using # pylab.imread. Note that each image must have the same size. For these # images, we know which digit they represent: it is given in the 'target' of # the dataset. images_and_labels = list(zip(digits.images, digits.target)) for index, (image, label) in enumerate(images_and_labels[:4]): plt.subplot(2, 4, index + 1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Training: %i' % label) # To apply a classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) # Create a classifier: a support vector classifier classifier = svm.SVC(gamma=0.001) # We learn the digits on the first half of the digits classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2]) # Now predict the value of the digit on the second half: expected = digits.target[n_samples / 2:] predicted = classifier.predict(data[n_samples / 2:]) print("Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted)) for index, (image, prediction) in enumerate(images_and_predictions[:4]): plt.subplot(2, 4, index + 5) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Prediction: %i' % prediction) plt.show()
bsd-3-clause
mtth/hdfs
hdfs/util.py
1
4419
#!/usr/bin/env python # encoding: utf-8 """Common utilities.""" from contextlib import contextmanager from shutil import rmtree from six.moves.queue import Queue from tempfile import mkstemp from threading import Thread import logging as lg import os import os.path as osp _logger = lg.getLogger(__name__) class HdfsError(Exception): """Base error class. :param message: Error message. :param args: optional Message formatting arguments. """ def __init__(self, message, *args, **kwargs): self.message = message % args if args else message super(HdfsError, self).__init__(self.message) self.exception = kwargs.get("exception") class AsyncWriter(object): """Asynchronous publisher-consumer. :param consumer: Function which takes a single generator as argument. This class can be used to transform functions which expect a generator into file-like writer objects. This can make it possible to combine different APIs together more easily. For example, to send streaming requests: .. code-block:: python import requests as rq with AsyncWriter(lambda data: rq.post(URL, data=data)) as writer: writer.write('Hello, world!') """ # Expected by pandas to write csv files (https://github.com/mtth/hdfs/pull/130). __iter__ = None def __init__(self, consumer): self._consumer = consumer self._queue = None self._reader = None self._err = None _logger.debug('Instantiated %r.', self) def __repr__(self): return '<%s(consumer=%r)>' % (self.__class__.__name__, self._consumer) def __enter__(self): if self._queue: raise ValueError('Cannot nest contexts.') self._queue = Queue() self._err = None def consumer(data): """Wrapped consumer that lets us get a child's exception.""" try: _logger.debug('Starting consumer.') self._consumer(data) except Exception as err: # pylint: disable=broad-except _logger.exception('Exception in child.') self._err = err finally: _logger.debug('Finished consumer.') def reader(queue): """Generator read by the consumer.""" while True: chunk = queue.get() if chunk is None: break yield chunk self._reader = Thread(target=consumer, args=(reader(self._queue), )) self._reader.start() _logger.debug('Started child thread.') return self def __exit__(self, exc_type, exc_value, traceback): if exc_value: _logger.debug('Exception in parent.') if self._reader and self._reader.is_alive(): _logger.debug('Signaling child.') self._queue.put(None) self._reader.join() if self._err: raise self._err # pylint: disable=raising-bad-type else: _logger.debug('Child terminated without errors.') self._queue = None def flush(self): """Pass-through implementation.""" pass def seekable(self): """Implement file-like method expected by certain libraries. `fastavro` relies on it in python 3. """ return False def tell(self): """No-op implementation.""" return 0 def write(self, chunk): """Stream data to the underlying consumer. :param chunk: Bytes to write. These will be buffered in memory until the consumer reads them. """ if chunk: # We skip empty chunks, otherwise they cause request to terminate the # response stream. Note that these chunks can be produced by valid # upstream encoders (e.g. bzip2). self._queue.put(chunk) @contextmanager def temppath(dpath=None): """Create a temporary path. :param dpath: Explicit directory name where to create the temporary path. A system dependent default will be used otherwise (cf. `tempfile.mkstemp`). Usage:: with temppath() as path: pass # do stuff Any file or directory corresponding to the path will be automatically deleted afterwards. """ (desc, path) = mkstemp(dir=dpath) os.close(desc) os.remove(path) try: _logger.debug('Created temporary path at %s.', path) yield path finally: if osp.exists(path): if osp.isdir(path): rmtree(path) _logger.debug('Deleted temporary directory at %s.', path) else: os.remove(path) _logger.debug('Deleted temporary file at %s.', path) else: _logger.debug('No temporary file or directory to delete at %s.', path)
mit
foxtrotmike/pairpred
analyzeMouse.py
1
2283
# -*- coding: utf-8 -*- """ Created on Sun Feb 24 19:46:32 2013 Code for finding out the differences in prediction scores for the mouse ISG15 in case study of PAIRpred paper @author: root """ import numpy as np from analyzePredFile import * import matplotlib.pyplot as plt mppfile='./testcase/DBD4_IGS15_NS1B_PairPred/Mutagenesis/M6B1_D4.InterPRed.txt' hppfile='./testcase/DBD4_IGS15_NS1B_PairPred/HNS1_d4PN.InterPRed.txt'#HNS1.InterPRed200.txt' (_,hMv,_,hlseq,_,_,_)=readFile(hppfile) (_,mMv,_,mlseq,_,_,_)=readFile(mppfile) """ nz=np.zeros((1,mMv.shape[1])) nz.fill(np.nan) mMv=np.vstack((mMv[:48,:],nz,mMv[47:51,:],nz,mMv[51:,:]))[:hMv.shape[0],:] mMv[:9,:]=np.nan;mMv[-6:,:]=np.nan;mMv[:,:7]=np.nan;mMv[:,-6:]=np.nan; hMv[:9,:]=np.nan;hMv[-6:,:]=np.nan;hMv[:,:7]=np.nan;hMv[:,-6:]=np.nan; hv=np.nanmax(hMv,axis=1) mv=np.nanmax(mMv,axis=1) mv=(mv-np.mean(mv[~np.isnan(mv)]))/np.std(mv[~np.isnan(mv)]) hv=(hv-np.mean(hv[~np.isnan(hv)]))/np.std(hv[~np.isnan(hv)]) print np.nonzero((hv>1.3) & ((mv-hv)<-0.8)) plt.plot(hv);plt.plot(mv);plt.show() """ nz=np.zeros((1,mMv.shape[1])) nz.fill(np.nan) mMv=np.vstack((mMv[:48,:],nz,mMv[47:51,:],nz,mMv[51:,:]))[:hMv.shape[0],:] #mMv[:9,:]=np.nan;mMv[-5:,:]=np.nan;mMv[:,:6]=np.nan;mMv[:,-5:]=np.nan; #hMv[:9,:]=np.nan;hMv[-5:,:]=np.nan;hMv[:,:6]=np.nan;hMv[:,-5:]=np.nan; mMv=(mMv-np.mean(mMv[~np.isnan(mMv)]))/np.std(mMv[~np.isnan(mMv)]) hMv=(hMv-np.mean(hMv[~np.isnan(hMv)]))/np.std(hMv[~np.isnan(hMv)]) hMvf=hMv.flatten() hMvf=hMvf[~np.isnan(hMvf)] thr=-10.0 dthr=1.0 R=mMv-hMv Rf=R.flatten() Rf=Rf[~np.isnan(Rf)] R[hMv<=thr]=np.nan hMv[hMv<=thr]=np.nan R[R>-dthr]=np.nan #R[mMv>thr-dthr]=np.nan print np.unique(np.nonzero(~np.isnan(R))[0]) print np.nonzero(~np.isnan(np.nanmax(R,axis=1))) dd=dict(zip(*(np.nonzero(~np.isnan(np.nanmax(R,axis=1)))[0],np.nanmax(R,axis=1)[np.nonzero(~np.isnan(np.nanmax(R,axis=1)))[0]]))) print dd #plt.figure();plt.hist(hMvf,cumulative=True,normed=True,bins=200) #plt.figure();plt.hist(Rf,cumulative=True,normed=True,bins=50) p=(np.sum(hMv>thr)/float(hMv.size))*100 print p """ cmap =plt.cm.jet;# plt.get_cmap() cmap.set_bad(color = 'w', alpha = 1.) plt.figure();plt.imshow(R,cmap=cmap);plt.colorbar(); plt.title('Considering top %1.1f predictions with decrease in prediction score' %p) plt.show() """
gpl-3.0
dubourg/openturns
python/src/viewer.py
1
20628
""" Graph viewer. Graph or Drawable viewer using matplotlib Examples -------- >>> import openturns as ot >>> from openturns.viewer import View >>> graph = ot.Normal().drawPDF() >>> view = View(graph, plot_kwargs={'color':'blue'}) >>> view.save('curve.png', dpi=100) >>> view.show(block=False) """ import openturns as ot import numpy as np import matplotlib import matplotlib.pyplot as plt from distutils.version import LooseVersion import os import re import warnings import io import sys class View(object): """ Create the figure. Parameters ---------- graph : :class:`~openturns.Graph, :class:`~openturns.Drawable` A Graph or Drawable object. figure : :class:`matplotlib.figure.Figure` The figure to draw on. figure_kwargs : dict, optional Passed on to matplotlib.pyplot.figure kwargs axes : :class:`matplotlib.axes.Axes` The axes to draw on. plot_kwargs : dict, optional Used when drawing Cloud, Curve drawables Passed on as matplotlib.axes.Axes.plot kwargs axes_kwargs : dict, optional Passed on to matplotlib.figure.Figure.add_subplot kwargs bar_kwargs : dict, optional Used when drawing BarPlot drawables Passed on to matplotlib.pyplot.bar kwargs pie_kwargs : dict, optional Used when drawing Pie drawables Passed on to matplotlib.pyplot.pie kwargs polygon_kwargs : dict, optional Used when drawing Polygon drawables Passed on to matplotlib.patches.Polygon kwargs polygoncollection_kwargs : dict, optional Used when drawing PolygonArray drawables Passed on to matplotlib.collection.PolygonCollection kwargs contour_kwargs : dict, optional Used when drawing Contour drawables Passed on to matplotlib.pyplot.contour kwargs clabel_kwargs : dict, optional Used when drawing Contour drawables Passed on to matplotlib.pyplot.clabel kwargs step_kwargs : dict, optional Used when drawing Staircase drawables Passed on to matplotlib.pyplot.step kwargs text_kwargs : dict, optional Used when drawing Pairs drawables Passed on to matplotlib.axes.Axes.text kwargs legend_kwargs : dict, optional Passed on to matplotlib.axes.Axes.legend kwargs add_legend : bool, optional Adds a legend if True. Default is True. """ @staticmethod def CheckDict(arg): """Check that the argument is a python dictionary.""" result = arg if arg is None: result = dict() elif not isinstance(arg, dict): raise TypeError('Argument is not a dict') return result @staticmethod def ToUnicode(s): """Convert to unicode if necessary.""" if isinstance(s, bytes): s = s.decode('utf8') return s def __init__(self, graph, figure=None, figure_kwargs=None, axes=[], plot_kwargs=None, axes_kwargs=None, bar_kwargs=None, pie_kwargs=None, polygon_kwargs=None, polygoncollection_kwargs=None, contour_kwargs=None, step_kwargs=None, clabel_kwargs=None, text_kwargs=None, legend_kwargs=None, add_legend=True, **kwargs): # prevent Qt from stopping the interpreter, see matplotlib PR #1905 if LooseVersion(matplotlib.__version__) < LooseVersion('1.3'): # check for DISPLAY env variable on X11 build of Qt if plt.get_backend().startswith('Qt4'): from matplotlib.backends.qt4_compat import QtGui if hasattr(QtGui, 'QX11Info'): display = os.environ.get('DISPLAY') if display is None or not re.search(':\d', display): raise RuntimeError('Invalid DISPLAY variable') if not isinstance(graph, ot.Graph) and not isinstance(graph, ot.GraphImplementation): if not isinstance(graph, ot.Drawable) and not isinstance(graph, ot.DrawableImplementation): raise RuntimeError( '-- The given object cannot be converted into a Graph nor Drawable.') else: # convert Drawable => Graph drawable = graph graph = ot.Graph() graph.add(drawable) drawables = graph.getDrawables() size = len(drawables) if size == 0: warnings.warn('-- Nothing to draw.') return # check that arguments are dictionnaries figure_kwargs = self.CheckDict(figure_kwargs) axes_kwargs = self.CheckDict(axes_kwargs) plot_kwargs_default = self.CheckDict(plot_kwargs) bar_kwargs_default = self.CheckDict(bar_kwargs) pie_kwargs_default = self.CheckDict(pie_kwargs) polygon_kwargs_default = self.CheckDict(polygon_kwargs) polygoncollection_kwargs_default = self.CheckDict( polygoncollection_kwargs) contour_kwargs_default = self.CheckDict(contour_kwargs) step_kwargs_default = self.CheckDict(step_kwargs) clabel_kwargs_default = self.CheckDict(clabel_kwargs) text_kwargs_default = self.CheckDict(text_kwargs) legend_kwargs = self.CheckDict(legend_kwargs) # set step drawstyle step_kwargs_default.setdefault('where', 'post') # set title axes_kwargs.setdefault('title', self.ToUnicode(graph.getTitle())) # set scale if (graph.getLogScale() == ot.GraphImplementation.LOGX) or (graph.getLogScale() == ot.GraphImplementation.LOGXY): axes_kwargs.setdefault('xscale', 'log') if (graph.getLogScale() == ot.GraphImplementation.LOGY) or (graph.getLogScale() == ot.GraphImplementation.LOGXY): axes_kwargs.setdefault('yscale', 'log') # set bounding box axes_kwargs.setdefault( 'xlim', [graph.getBoundingBox()[0], graph.getBoundingBox()[1]]) axes_kwargs.setdefault( 'ylim', [graph.getBoundingBox()[2], graph.getBoundingBox()[3]]) # set figure if figure is None: if len(axes) == 0: self._fig = plt.figure(**figure_kwargs) else: self._fig = axes[0].get_figure() else: self._fig = figure if len(axes) == 0: axes = self._fig.axes # set axes if len(axes) == 0: self._ax = [self._fig.add_subplot(111, **axes_kwargs)] else: self._ax = axes has_labels = False self._ax[0].grid(b=graph.getGrid()) for drawable in drawables: # reset working dictionaries by excplicitely creating copies plot_kwargs = dict(plot_kwargs_default) bar_kwargs = dict(bar_kwargs_default) pie_kwargs = dict(pie_kwargs_default) polygon_kwargs = dict(polygon_kwargs_default) polygoncollection_kwargs = dict(polygoncollection_kwargs_default) contour_kwargs = dict(contour_kwargs_default) step_kwargs = dict(step_kwargs_default) clabel_kwargs = dict(clabel_kwargs_default) text_kwargs = dict(text_kwargs_default) # set color if ('color' not in plot_kwargs_default) and ('c' not in plot_kwargs_default): plot_kwargs['color'] = drawable.getColorCode() if ('color' not in bar_kwargs_default) and ('c' not in bar_kwargs_default): bar_kwargs['color'] = drawable.getColorCode() if ('color' not in step_kwargs_default) and ('c' not in step_kwargs_default): step_kwargs['color'] = drawable.getColorCode() # set marker pointStyleDict = {'square': 's', 'circle': 'o', 'triangleup': '2', 'plus': '+', 'times': '+', 'diamond': '+', 'triangledown': 'v', 'star': '*', 'fsquare': 's', 'fcircle': 'o', 'ftriangleup': '2', 'fdiamond': 'D', 'bullet': '.', 'dot': ',', 'none': 'None'} if 'marker' not in plot_kwargs_default: try: plot_kwargs['marker'] = pointStyleDict[ drawable.getPointStyle()] except: warnings.warn( '-- Unknown marker: ' + drawable.getPointStyle()) # set line style lineStyleDict = {'solid': '-', 'dashed': '--', 'dotted': ':', 'dotdash': '-.', 'longdash': '--', 'twodash': '--'} if ('linestyle' not in plot_kwargs_default) and ('ls' not in plot_kwargs_default): try: plot_kwargs['linestyle'] = lineStyleDict[ drawable.getLineStyle()] except: warnings.warn('-- Unknown line style') if ('linestyle' not in step_kwargs_default) and ('ls' not in step_kwargs_default): try: step_kwargs['linestyle'] = lineStyleDict[ drawable.getLineStyle()] except: warnings.warn('-- Unknown line style') # set line width if ('linewidth' not in plot_kwargs_default) and ('lw' not in plot_kwargs_default): plot_kwargs['linewidth'] = drawable.getLineWidth() if ('linewidth' not in step_kwargs_default) and ('lw' not in step_kwargs_default): step_kwargs['linewidth'] = drawable.getLineWidth() # retrieve data data = drawable.getData() x = data.getMarginal(0) if data.getDimension() > 1: y = data.getMarginal(1) # add label, title drawableKind = drawable.getImplementation().getClassName() if drawableKind != 'Pie': self._ax[0].set_xlabel(self.ToUnicode(graph.getXTitle())) self._ax[0].set_ylabel(self.ToUnicode(graph.getYTitle())) if (len(drawable.getLegend()) > 0) and ((drawableKind != 'Cloud') or (drawable.getPointStyle() != 'none')): label = self.ToUnicode(drawable.getLegend()) has_labels = True plot_kwargs.setdefault('label', label) bar_kwargs.setdefault('label', label) step_kwargs.setdefault('label', label) if drawableKind == 'BarPlot': # linestyle for bar() is different than the one for plot() if 'linestyle' in bar_kwargs_default: bar_kwargs.pop('linestyle') if ('linestyle' not in plot_kwargs_default) and ('ls' not in plot_kwargs_default): lineStyleDict = {'solid': 'solid', 'dashed': 'dashed', 'dotted': 'dotted', 'dotdash': 'dashdot', 'longdash': 'dashed', 'twodash': 'dashed'} if drawable.getLineStyle() in lineStyleDict: bar_kwargs['linestyle'] = lineStyleDict[ drawable.getLineStyle()] else: warnings.warn( '-- Unknown line style: ' + drawable.getLineStyle()) xi = drawable.getOrigin() for i in range(x.getSize()): # label only the first bar to avoid getting several legend # items if (i == 1) and ('label' in bar_kwargs): bar_kwargs.pop('label') self._ax[0].bar( xi, height=y[i][0], width=x[i][0], **bar_kwargs) xi += x[i][0] elif drawableKind == 'Cloud': plot_kwargs['linestyle'] = 'None' self._ax[0].plot(x, y, **plot_kwargs) elif drawableKind == 'Curve': self._ax[0].plot(x, y, **plot_kwargs) elif drawableKind == 'Polygon': if ('facecolor' not in polygon_kwargs_default) and ('fc' not in polygon_kwargs_default): polygon_kwargs['facecolor'] = drawable.getColorCode() if ('edgecolor' not in polygon_kwargs_default) and ('ec' not in polygon_kwargs_default): polygon_kwargs['edgecolor'] = drawable.ConvertFromName( drawable.getEdgeColor()) self._ax[0].add_patch( matplotlib.patches.Polygon(data, **polygon_kwargs)) elif drawableKind == 'PolygonArray': polygonsNumber = drawable.getPalette().getSize() verticesNumber = drawable.getData().getSize() // polygonsNumber colors = drawable.getPalette() colorsRGBA = [] for i in range(polygonsNumber): rgba = drawable.ConvertToRGBA(colors[i]) colorsRGBA.append( (rgba[0] / 255.0, rgba[1] / 255.0, rgba[2] / 255.0, rgba[3] / 255.0)) if 'facecolors' not in polygoncollection_kwargs_default: polygoncollection_kwargs['facecolors'] = colorsRGBA if 'edgecolors' not in polygon_kwargs_default: polygoncollection_kwargs['edgecolors'] = colorsRGBA self._ax[0].add_collection( matplotlib.collections.PolyCollection(np.array(data).reshape((polygonsNumber, verticesNumber, 2)), **polygoncollection_kwargs)) elif drawableKind == 'Pie': pie_kwargs.setdefault( 'labels', list(map(self.ToUnicode, drawable.getLabels()))) pie_kwargs.setdefault('colors', drawable.getPalette()) self._ax[0].set_aspect('equal') self._ax[0].pie(x, **pie_kwargs) elif drawableKind == 'Contour': X, Y = np.meshgrid(drawable.getX(), drawable.getY()) Z = np.reshape(drawable.getData(), ( drawable.getX().getSize(), drawable.getY().getSize())) if drawable.getDrawLabels(): contour_kwargs.setdefault('levels', drawable.getLevels()) clabel_kwargs.setdefault('fontsize', 8) clabel_kwargs.setdefault('fmt', '%g') if ('linestyles' not in contour_kwargs_default) and ('ls' not in contour_kwargs_default): try: contour_kwargs['linestyles'] = lineStyleDict[ drawable.getLineStyle()] except: warnings.warn('-- Unknown line style') if 'color' not in contour_kwargs_default: contour_kwargs['color'] = drawable.getColor() contourset = self._ax[0].contour(X, Y, Z, **contour_kwargs) plt.clabel(contourset, **clabel_kwargs) elif drawableKind == 'Staircase': self._ax[0].step(x, y, **step_kwargs) elif drawableKind == 'Pairs': # disable axis : grid, ticks, axis self._ax[0].axison = False if 'title' in axes_kwargs: axes_kwargs.pop('title') axes_kwargs['xticks'] = [] axes_kwargs['yticks'] = [] dim = drawable.getData().getDimension() labels = drawable.getLabels() # adjust font if ('fontsize' not in text_kwargs_default) and ('size' not in text_kwargs_default): text_kwargs['fontsize'] = max(16 - dim, 4) text_kwargs.setdefault('horizontalalignment', 'center') text_kwargs.setdefault('verticalalignment', 'center') for i in range(dim): for j in range(dim): if len(self._ax) <= dim * dim: self._ax.append(self._fig.add_subplot( dim, dim, 1 + i * dim + j, **axes_kwargs)) if i != j: x = drawable.getData().getMarginal(i) y = drawable.getData().getMarginal(j) x_min = x.getMin()[0] x_max = x.getMax()[0] x_margin = 0.1 * (x_max - x_min) y_min = y.getMin()[0] y_max = y.getMax()[0] y_margin = 0.1 * (y_max - y_min) plot_kwargs['linestyle'] = 'None' self._ax[1 + i * dim + j].plot(x, y, **plot_kwargs) self._ax[1 + i * dim + j].set_xlim( x_min - x_margin, x_max + x_margin) self._ax[1 + i * dim + j].set_ylim( y_min - y_margin, y_max + y_margin) else: text_kwargs['transform'] = self._ax[ 1 + i * dim + j].transAxes self._ax[1 + i * dim + j].text( 0.5, 0.5, labels[i], **text_kwargs) else: raise ValueError( 'Drawable type not implemented: ' + drawableKind) # Add legend if add_legend and has_labels and (graph.getLegendPosition() != ''): # set legend position if 'loc' not in legend_kwargs: try: legendPositionDict = {'bottomright': 'lower right', 'bottom': 'lower center', 'bottomleft': 'lower left', 'left': 'center left', 'topleft': 'upper left', 'topright': 'upper right', 'right': 'center right', 'center': 'center'} legend_kwargs['loc'] = legendPositionDict[ graph.getLegendPosition()] except: warnings.warn( '-- Unknown legend position: ' + graph.getLegendPosition()) # set a single legend point legend_kwargs.setdefault('numpoints', 1) # enable round box by default legend_kwargs.setdefault('fancybox', True) # enable shadow by default legend_kwargs.setdefault('shadow', True) self._ax[0].legend(**legend_kwargs) def show(self, **kwargs): """ Display the graph on screen. Parameters ---------- kwargs: block: bool, optional If true (default), block until the graph is closed. These parameters are passed to matplotlib.pyplot.show() """ plt.show(**kwargs) def save(self, fname, **kwargs): """ Save the graph as file. Parameters ---------- fname: bool, optional A string containing a path to a filename from which file format is deduced. kwargs: Refer to matplotlib.figure.Figure.savefig documentation for valid keyword arguments. """ self._fig.savefig(fname, **kwargs) def getFigure(self): """ Accessor to the underlying figure object. Refer to matplotlib.figure.Figure for further information. """ return self._fig def getAxes(self): """ Get the list of Axes objects. Refer to matplotlib.axes.Axes for further information. """ return self._ax def close(self): """Close the figure.""" plt.close(self._fig) def ToSVGString(graph): """ Convert as SVG file. Parameters ---------- graph : object A Graph or Drawable object. Returns a SVG representation as string """ if sys.version_info[0] >= 3: output = io.StringIO() else: output = io.BytesIO() # save interactive mode state ision = plt.isinteractive() plt.ioff() view = View(graph) view.save(output, format='svg') view.close() # restore interactive mode state if ision: plt.ion() return output.getvalue()
gpl-3.0
kapteyn-astro/kapteyn
doc/source/EXAMPLES/mu_insidelabeldemo.py
1
1047
from kapteyn import maputils from matplotlib import pylab as plt header = {'NAXIS': 2 ,'NAXIS1':100 , 'NAXIS2': 100 , 'CDELT1': -7.165998823000E-03, 'CRPIX1': 5.100000000000E+01 , 'CRVAL1': -5.128208479590E+01, 'CTYPE1': 'RA---NCP', 'CUNIT1': 'DEGREE ', 'CDELT2': 7.165998823000E-03 , 'CRPIX2': 5.100000000000E+01, 'CRVAL2': 6.015388802060E+01 , 'CTYPE2': 'DEC--NCP ', 'CUNIT2': 'DEGREE' } fig = plt.figure() frame = fig.add_axes([0.15,0.15,0.8,0.8]) f = maputils.FITSimage(externalheader=header) annim = f.Annotatedimage(frame) grat = annim.Graticule() grat2 = annim.Graticule(skyout='Galactic') grat2.setp_gratline(color='g') grat2.setp_ticklabel(visible=False) grat2.setp_axislabel(visible=False) inswcs0 = grat2.Insidelabels(wcsaxis=0, deltapx=5, deltapy=5) inswcs1 = grat2.Insidelabels(wcsaxis=1, constval='95d45m') inswcs0.setp_label(color='r') inswcs0.setp_label(position="96d0m", color='b', tex=False, fontstyle='italic') inswcs1.setp_label(position="12d0m", fontsize=14, color='m') annim.plot() annim.interact_toolbarinfo() plt.show()
bsd-3-clause
DSLituiev/scikit-learn
sklearn/neighbors/tests/test_kde.py
80
5560
import numpy as np from sklearn.utils.testing import (assert_allclose, assert_raises, assert_equal) from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors from sklearn.neighbors.ball_tree import kernel_norm from sklearn.pipeline import make_pipeline from sklearn.datasets import make_blobs from sklearn.model_selection import GridSearchCV from sklearn.preprocessing import StandardScaler def compute_kernel_slow(Y, X, kernel, h): d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1)) norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0] if kernel == 'gaussian': return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1) elif kernel == 'tophat': return norm * (d < h).sum(-1) elif kernel == 'epanechnikov': return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1) elif kernel == 'exponential': return norm * (np.exp(-d / h)).sum(-1) elif kernel == 'linear': return norm * ((1 - d / h) * (d < h)).sum(-1) elif kernel == 'cosine': return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1) else: raise ValueError('kernel not recognized') def test_kernel_density(n_samples=100, n_features=3): rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) Y = rng.randn(n_samples, n_features) for kernel in ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']: for bandwidth in [0.01, 0.1, 1]: dens_true = compute_kernel_slow(Y, X, kernel, bandwidth) def check_results(kernel, bandwidth, atol, rtol): kde = KernelDensity(kernel=kernel, bandwidth=bandwidth, atol=atol, rtol=rtol) log_dens = kde.fit(X).score_samples(Y) assert_allclose(np.exp(log_dens), dens_true, atol=atol, rtol=max(1E-7, rtol)) assert_allclose(np.exp(kde.score(Y)), np.prod(dens_true), atol=atol, rtol=max(1E-7, rtol)) for rtol in [0, 1E-5]: for atol in [1E-6, 1E-2]: for breadth_first in (True, False): yield (check_results, kernel, bandwidth, atol, rtol) def test_kernel_density_sampling(n_samples=100, n_features=3): rng = np.random.RandomState(0) X = rng.randn(n_samples, n_features) bandwidth = 0.2 for kernel in ['gaussian', 'tophat']: # draw a tophat sample kde = KernelDensity(bandwidth, kernel=kernel).fit(X) samp = kde.sample(100) assert_equal(X.shape, samp.shape) # check that samples are in the right range nbrs = NearestNeighbors(n_neighbors=1).fit(X) dist, ind = nbrs.kneighbors(X, return_distance=True) if kernel == 'tophat': assert np.all(dist < bandwidth) elif kernel == 'gaussian': # 5 standard deviations is safe for 100 samples, but there's a # very small chance this test could fail. assert np.all(dist < 5 * bandwidth) # check unsupported kernels for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']: kde = KernelDensity(bandwidth, kernel=kernel).fit(X) assert_raises(NotImplementedError, kde.sample, 100) # non-regression test: used to return a scalar X = rng.randn(4, 1) kde = KernelDensity(kernel="gaussian").fit(X) assert_equal(kde.sample().shape, (1, 1)) def test_kde_algorithm_metric_choice(): # Smoke test for various metrics and algorithms rng = np.random.RandomState(0) X = rng.randn(10, 2) # 2 features required for haversine dist. Y = rng.randn(10, 2) for algorithm in ['auto', 'ball_tree', 'kd_tree']: for metric in ['euclidean', 'minkowski', 'manhattan', 'chebyshev', 'haversine']: if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics: assert_raises(ValueError, KernelDensity, algorithm=algorithm, metric=metric) else: kde = KernelDensity(algorithm=algorithm, metric=metric) kde.fit(X) y_dens = kde.score_samples(Y) assert_equal(y_dens.shape, Y.shape[:1]) def test_kde_score(n_samples=100, n_features=3): pass #FIXME #np.random.seed(0) #X = np.random.random((n_samples, n_features)) #Y = np.random.random((n_samples, n_features)) def test_kde_badargs(): assert_raises(ValueError, KernelDensity, algorithm='blah') assert_raises(ValueError, KernelDensity, bandwidth=0) assert_raises(ValueError, KernelDensity, kernel='blah') assert_raises(ValueError, KernelDensity, metric='blah') assert_raises(ValueError, KernelDensity, algorithm='kd_tree', metric='blah') def test_kde_pipeline_gridsearch(): # test that kde plays nice in pipelines and grid-searches X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False), KernelDensity(kernel="gaussian")) params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10]) search = GridSearchCV(pipe1, param_grid=params, cv=5) search.fit(X) assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
bsd-3-clause
armadill-odyssey/aima-python
submissions/LaMartina/myNN.py
13
7078
import traceback from submissions.LaMartina import state_crime from sklearn.neural_network import MLPClassifier class DataFrame: data = [] feature_names = [] target = [] target_names = [] crimes = DataFrame() ''' Extract data from the CORGIS state_crime. ''' joint = {} crime = state_crime.get_all_crimes() for c in crime: try: #if c['State'] == 'Alabama': stateyear = c['State'] + str(c['Year']) pop = c['Data']['Population'] murder = c['Data']['Totals']['Violent']['Murder'] assault = c['Data']['Totals']['Violent']['Assault'] robbery = c['Data']['Totals']['Violent']['Robbery'] rape = c['Data']['Totals']['Violent']['Rape'] burg = c['Data']['Totals']['Property']['Burglary'] larceny = c['Data']['Totals']['Property']['Larceny'] motor = c['Data']['Totals']['Property']['Motor'] joint[stateyear] = {} joint[stateyear]['Population'] = pop joint[stateyear]['Murder Numbers'] = murder joint[stateyear]['Rape Numbers'] = rape joint[stateyear]['Burglary Numbers'] = burg joint[stateyear]['Robbery Numbers'] = robbery joint[stateyear]['Assault Numbers'] = assault joint[stateyear]['Larceny Numbers'] = larceny joint[stateyear]['Motor Crime Numbers'] = motor except: traceback.print_exc() crimes.data = [] ''' Build the input frame, row by row. ''' for port in joint: # choose the input values crimes.data.append([ #port, #joint[port]['Population'], joint[port]['Rape Numbers'], joint[port]['Robbery Numbers'], joint[port]['Assault Numbers'] #joint[port]['Burglary Numbers'], ]) crimes.feature_names = [ #'Population', 'Rape Numbers', #'Burglary Numbers', 'Robbery Numbers' 'Assault Numbers' ] ''' Build the target list, one entry for each row in the input frame. The Naive Bayesian network is a classifier, i.e. it sorts data points into bins. The best it can do to estimate a continuous variable is to break the domain into segments, and predict the segment into which the variable's value will fall. In this example, I'm breaking Trump's % into two arbitrary segments. ''' crimes.target = [] def murderTarget(murdernum): if murdernum > 800: return 1 return 0 for cri in joint: # choose the target c = murderTarget(joint[cri]['Murder Numbers']) crimes.target.append(c) crimes.target_names = [ 'Murders <= 800', 'Murders > 800', ] ''' Try scaling the data. ''' crimesScaled = DataFrame() def setupScales(grid): global min, max min = list(grid[0]) max = list(grid[0]) for row in range(1, len(grid)): for col in range(len(grid[row])): cell = grid[row][col] if cell < min[col]: min[col] = cell if cell > max[col]: max[col] = cell def scaleGrid(grid): newGrid = [] for row in range(len(grid)): newRow = [] for col in range(len(grid[row])): try: cell = grid[row][col] scaled = (cell - min[col]) \ / (max[col] - min[col]) newRow.append(scaled) except: pass newGrid.append(newRow) return newGrid #The scaled data frame setupScales(crimes.data) crimesScaled.data = scaleGrid(crimes.data) crimesScaled.feature_names = crimes.feature_names crimesScaled.target = crimes.target crimesScaled.target_names = crimes.target_names #New MLPClassifier that adjusts learning rate and iterations mlp2 = MLPClassifier( # hidden_layer_sizes = (100,), # activation = 'relu', #solver='sgd', # 'adam', # alpha = 0.0001, # batch_size='auto', learning_rate = 'adaptive', # 'constant', # power_t = 0.5, max_iter = 1000, # 200, # shuffle = True, # random_state = None, # tol = 1e-4, # verbose = False, # warm_start = False, # momentum = 0.9, # nesterovs_momentum = True, # early_stopping = False, # validation_fraction = 0.1, # beta_1 = 0.9, # beta_2 = 0.999, # epsilon = 1e-8, ) #New MLPClassifier that only adjusts the iterations mlp3 = MLPClassifier( # hidden_layer_sizes = (100,), # activation = 'relu', #solver='sgd', # 'adam', # alpha = 0.0001, # batch_size='auto', #learning_rate = 'adaptive', # 'constant', # power_t = 0.5, max_iter = 2000, # 200, # shuffle = True, # random_state = None, # tol = 1e-4, # verbose = False, # warm_start = False, # momentum = 0.9, # nesterovs_momentum = True, # early_stopping = False, # validation_fraction = 0.1, # beta_1 = 0.9, # beta_2 = 0.999, # epsilon = 1e-8, ) #New classifier that messes with mulitple dials to get the best results mlp4 = MLPClassifier( hidden_layer_sizes = (100,100,), #activation = 'logistic', #solver='lbfgs', # 'adam', #alpha = 1, #batch_size=1000, #learning_rate = 'adaptive', # 'constant', # power_t = 0.5, max_iter = 1000, # 200, # shuffle = True, # random_state = None, # tol = 1e-4, # verbose = False, # warm_start = False, # momentum = 0.9, # nesterovs_momentum = True, # early_stopping = False, # validation_fraction = 0.1, # beta_1 = 0.9, # beta_2 = 0.999, # epsilon = 1e-8, ) #data frame for nonviolent crimes and using them to predict murder nonVicrimes = DataFrame() nonVicrimes.data = [] ''' Build the input frame, row by row. ''' for port in joint: # choose the input values nonVicrimes.data.append([ #port, #joint[port]['Population'], joint[port]['Burglary Numbers'], joint[port]['Larceny Numbers'], joint[port]['Motor Crime Numbers'] #joint[port]['Burglary Numbers'], ]) nonVicrimes.feature_names = [ #'Population', 'Burglary Numbers', #'Burglary Numbers', 'Larceny Numbers' 'Motor Crime Numbers' ] nonVicrimes.target = crimes.target nonVicrimes.target_names = crimes.target_names #Scaled non-violent crimes data frame nonVicrimesScaled = DataFrame() setupScales(nonVicrimes.data) nonVicrimesScaled.data = scaleGrid(nonVicrimes.data) nonVicrimesScaled.feature_names = crimes.feature_names nonVicrimesScaled.target = crimes.target nonVicrimesScaled.target_names = crimes.target_names Examples = { 'Crimes': { 'frame': crimes, }, 'CrimesScaled': { 'frame': crimesScaled, }, 'CrimesMLP2': { 'frame': crimes, 'mlp2': mlp2 }, 'CrimesMLP2Scaled': { 'frame': crimesScaled, 'mlp2': mlp2 }, 'CrimesMLP3': { 'frame': crimes, 'mlp3': mlp3 }, 'CrimesMLP4': { 'frame': crimes, 'mlp4': mlp4 }, # 'CrimesMLP4Scaled': { # 'frame': crimesScaled, # 'mlp4': mlp4 # }, 'Non-Violent Crimes': { 'frame': nonVicrimes, }, 'Non-Violent Crimes Scaled': { 'frame': nonVicrimesScaled, }, }
mit
spallavolu/scikit-learn
examples/svm/plot_separating_hyperplane_unbalanced.py
329
1850
""" ================================================= SVM: Separating hyperplane for unbalanced classes ================================================= Find the optimal separating hyperplane using an SVC for classes that are unbalanced. We first find the separating plane with a plain SVC and then plot (dashed) the separating hyperplane with automatically correction for unbalanced classes. .. currentmodule:: sklearn.linear_model .. note:: This example will also work by replacing ``SVC(kernel="linear")`` with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour such as that of a SVC with a linear kernel. For example try instead of the ``SVC``:: clf = SGDClassifier(n_iter=100, alpha=0.01) """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm #from sklearn.linear_model import SGDClassifier # we create 40 separable points rng = np.random.RandomState(0) n_samples_1 = 1000 n_samples_2 = 100 X = np.r_[1.5 * rng.randn(n_samples_1, 2), 0.5 * rng.randn(n_samples_2, 2) + [2, 2]] y = [0] * (n_samples_1) + [1] * (n_samples_2) # fit the model and get the separating hyperplane clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, y) w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - clf.intercept_[0] / w[1] # get the separating hyperplane using weighted classes wclf = svm.SVC(kernel='linear', class_weight={1: 10}) wclf.fit(X, y) ww = wclf.coef_[0] wa = -ww[0] / ww[1] wyy = wa * xx - wclf.intercept_[0] / ww[1] # plot separating hyperplanes and samples h0 = plt.plot(xx, yy, 'k-', label='no weights') h1 = plt.plot(xx, wyy, 'k--', label='with weights') plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.legend() plt.axis('tight') plt.show()
bsd-3-clause
lenovor/BDA_py_demos
demos_ch2/demo2_3.py
19
1931
"""Bayesian Data Analysis, 3rd ed Chapter 2, demo 3 Simulate samples from Beta(438,544), draw a histogram with quantiles, and do the same for a transformed variable. """ import numpy as np from scipy.stats import beta import matplotlib.pyplot as plt # Edit default plot settings (colours from colorbrewer2.org) plt.rc('font', size=14) plt.rc('lines', color='#377eb8', linewidth=2) plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a', '#984ea3','#ff7f00','#ffff33')) # Plotting grid x = np.linspace(0.36, 0.54, 150) # Draw n random samples from Beta(438,544) n = 10000 th = beta.rvs(438, 544, size=n) # rvs comes from `random variates` # Plot 2 subplots fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(8, 10)) # Plot histogram axes[0].hist(th, bins=30) # Compute 2.5% and 97.5% quantile approximation using samples th25, th975 = np.percentile(th, [2.5, 97.5]) # Draw lines for these axes[0].axvline(th25, color='#e41a1c', linewidth=1.5) axes[0].axvline(th975, color='#e41a1c', linewidth=1.5) axes[0].text(th25, axes[0].get_ylim()[1]+15, '2.5%', horizontalalignment='center') axes[0].text(th975, axes[0].get_ylim()[1]+15, '97.5%', horizontalalignment='center') axes[0].set_xlabel(r'$\theta$', fontsize=18) axes[0].set_yticks(()) # Plot histogram for the transformed variable phi = (1-th)/th axes[1].hist(phi, bins=30) # Compute 2.5% and 97.5% quantile approximation using samples phi25, phi975 = np.percentile(phi, [2.5, 97.5]) # Draw lines for these axes[1].axvline(phi25, color='#e41a1c', linewidth=1.5) axes[1].axvline(phi975, color='#e41a1c', linewidth=1.5) axes[1].text(phi25, axes[1].get_ylim()[1]+15, '2.5%', horizontalalignment='center') axes[1].text(phi975, axes[1].get_ylim()[1]+15, '97.5%', horizontalalignment='center') axes[1].set_xlabel(r'$\phi$', fontsize=18) axes[1].set_yticks(()) # Display the figure plt.show()
gpl-3.0
mtmarsh2/vislab
vislab/results_viz.py
4
1963
import numpy as np import matplotlib.pyplot as plt import vislab.gg def plot_df_bar(df, columns=None, figsize=(16, 4), fontsize=13): """ Used to plot AP vs MCC for a single feature, or AP between features. """ fig = plt.figure(figsize=figsize) ax = fig.add_subplot(111) if columns is not None: df = df[columns] df.plot(ax=ax, kind='bar') ax.set_ylim([0, 1]) ax.set_yticks(np.arange(11) / 10.) fig.autofmt_xdate() ax.legend( loc='upper center', bbox_to_anchor=(0.5, 1.2), ncol=3, fancybox=True, shadow=True, prop={'size': fontsize}) for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()): item.set_fontsize(fontsize) vislab.gg.rstyle(ax) return fig def plot_top_k_accuracies(accuracies_df, top_k=5, font_size=13): fig = plt.figure() ax = fig.add_subplot(111) accuracies_df.ix[range(top_k + 1)].plot(ax=ax, style='s--') ax.set_xlim([1, top_k]) ax.set_xticks(range(1, top_k + 1)) ax.set_xlabel('K') ax.set_ylim([0, 1]) ax.set_ylabel('Top-K Accuracy') ax.legend( loc='upper center', bbox_to_anchor=(0.5, 1.35), ncol=2, fancybox=True, shadow=True, prop={'size': font_size}) for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()): item.set_fontsize(font_size) vislab.gg.rstyle(ax) return fig def plot_curve_with_area(x, y, area, xlabel, ylabel, area_label, title=None): fig = plt.figure(figsize=(5, 5)) ax = fig.add_subplot(111) ax.plot(x, y, 'k-') ax.bar(0, area, 1, alpha=0.2) ax.text(.05, area - 0.05, '{}: {:.3f}'.format(area_label, area)) ax.set_xticks([0, .25, .5, .75, 1]) ax.set_yticks([0, .25, .5, .75, 1]) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) if title is not None: ax.set_title(title) return fig
bsd-2-clause
ishank08/scikit-learn
benchmarks/bench_plot_lasso_path.py
84
4005
"""Benchmarks of Lasso regularization path computation using Lars and CD The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function from collections import defaultdict import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path from sklearn.linear_model import lasso_path from sklearn.datasets.samples_generator import make_regression def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') dataset_kwargs = { 'n_samples': n_samples, 'n_features': n_features, 'n_informative': n_features / 10, 'effective_rank': min(n_samples, n_features) / 10, #'effective_rank': None, 'bias': 0.0, } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) X, y = make_regression(**dataset_kwargs) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (with Gram)'].append(delta) gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (without Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (with Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=True) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (with Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (without Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=False) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (without Gram)'].append(delta) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(10, 2000, 5).astype(np.int) features_range = np.linspace(10, 2000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(max(t) for t in results.values()) fig = plt.figure('scikit-learn Lasso path benchmark results') i = 1 for c, (label, timings) in zip('bcry', sorted(results.items())): ax = fig.add_subplot(2, 2, i, projection='3d') X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) # ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.set_zlim3d(0.0, max_time * 1.1) ax.set_title(label) # ax.legend() i += 1 plt.show()
bsd-3-clause
ominux/scikit-learn
examples/manifold/plot_compare_methods.py
4
2211
""" ========================================= Comparison of Manifold Learning methods ========================================= An illustration of dimensionality reduction on the S-curve dataset with various manifold learning methods. For a discussion and comparison of these algorithms, see the :ref:`manifold module page <manifold>` """ # Author: Jake Vanderplas -- <vanderplas@astro.washington.edu> print __doc__ from time import time import pylab as pl from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import NullFormatter from sklearn import manifold, datasets n_points = 1000 X, color = datasets.samples_generator.make_s_curve(n_points) n_neighbors = 10 out_dim = 2 fig = pl.figure(figsize=(12, 8)) pl.suptitle("Manifold Learning with %i points, %i neighbors" % (1000, n_neighbors), fontsize=14) try: # compatibility matplotlib < 1.0 ax = fig.add_subplot(231, projection='3d') ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=pl.cm.Spectral) ax.view_init(4, -72) except: ax = fig.add_subplot(231, projection='3d') pl.scatter(X[:, 0], X[:, 2], c=color, cmap=pl.cm.Spectral) methods = ['standard', 'ltsa', 'hessian', 'modified'] labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE'] for i, method in enumerate(methods): t0 = time() Y = manifold.LocallyLinearEmbedding(n_neighbors, out_dim, eigen_solver='auto', method=method).fit_transform(X) t1 = time() print "%s: %.2g sec" % (methods[i], t1 - t0) ax = fig.add_subplot(232 + i) pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral) pl.title("%s (%.2g sec)" % (labels[i], t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) pl.axis('tight') t0 = time() Y = manifold.Isomap(n_neighbors, out_dim).fit_transform(X) t1 = time() print "Isomap: %.2g sec" % (t1 - t0) ax = fig.add_subplot(236) pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral) pl.title("Isomap (%.2g sec)" % (t1 - t0)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_major_formatter(NullFormatter()) pl.axis('tight') pl.show()
bsd-3-clause
adamgreenhall/scikit-learn
examples/linear_model/plot_lasso_and_elasticnet.py
249
1982
""" ======================================== Lasso and Elastic Net for Sparse Signals ======================================== Estimates Lasso and Elastic-Net regression models on a manually generated sparse signal corrupted with an additive noise. Estimated coefficients are compared with the ground-truth. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import r2_score ############################################################################### # generate some sparse data to play with np.random.seed(42) n_samples, n_features = 50, 200 X = np.random.randn(n_samples, n_features) coef = 3 * np.random.randn(n_features) inds = np.arange(n_features) np.random.shuffle(inds) coef[inds[10:]] = 0 # sparsify coef y = np.dot(X, coef) # add noise y += 0.01 * np.random.normal((n_samples,)) # Split data in train set and test set n_samples = X.shape[0] X_train, y_train = X[:n_samples / 2], y[:n_samples / 2] X_test, y_test = X[n_samples / 2:], y[n_samples / 2:] ############################################################################### # Lasso from sklearn.linear_model import Lasso alpha = 0.1 lasso = Lasso(alpha=alpha) y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test) r2_score_lasso = r2_score(y_test, y_pred_lasso) print(lasso) print("r^2 on test data : %f" % r2_score_lasso) ############################################################################### # ElasticNet from sklearn.linear_model import ElasticNet enet = ElasticNet(alpha=alpha, l1_ratio=0.7) y_pred_enet = enet.fit(X_train, y_train).predict(X_test) r2_score_enet = r2_score(y_test, y_pred_enet) print(enet) print("r^2 on test data : %f" % r2_score_enet) plt.plot(enet.coef_, label='Elastic net coefficients') plt.plot(lasso.coef_, label='Lasso coefficients') plt.plot(coef, '--', label='original coefficients') plt.legend(loc='best') plt.title("Lasso R^2: %f, Elastic Net R^2: %f" % (r2_score_lasso, r2_score_enet)) plt.show()
bsd-3-clause
Djabbz/scikit-learn
examples/linear_model/plot_sparse_recovery.py
27
7466
""" ============================================================ Sparse recovery: feature selection for sparse linear models ============================================================ Given a small number of observations, we want to recover which features of X are relevant to explain y. For this :ref:`sparse linear models <l1_feature_selection>` can outperform standard statistical tests if the true model is sparse, i.e. if a small fraction of the features are relevant. As detailed in :ref:`the compressive sensing notes <compressive_sensing>`, the ability of L1-based approach to identify the relevant variables depends on the sparsity of the ground truth, the number of samples, the number of features, the conditioning of the design matrix on the signal subspace, the amount of noise, and the absolute value of the smallest non-zero coefficient [Wainwright2006] (http://statistics.berkeley.edu/tech-reports/709.pdf). Here we keep all parameters constant and vary the conditioning of the design matrix. For a well-conditioned design matrix (small mutual incoherence) we are exactly in compressive sensing conditions (i.i.d Gaussian sensing matrix), and L1-recovery with the Lasso performs very well. For an ill-conditioned matrix (high mutual incoherence), regressors are very correlated, and the Lasso randomly selects one. However, randomized-Lasso can recover the ground truth well. In each situation, we first vary the alpha parameter setting the sparsity of the estimated model and look at the stability scores of the randomized Lasso. This analysis, knowing the ground truth, shows an optimal regime in which relevant features stand out from the irrelevant ones. If alpha is chosen too small, non-relevant variables enter the model. On the opposite, if alpha is selected too large, the Lasso is equivalent to stepwise regression, and thus brings no advantage over a univariate F-test. In a second time, we set alpha and compare the performance of different feature selection methods, using the area under curve (AUC) of the precision-recall. """ print(__doc__) # Author: Alexandre Gramfort and Gael Varoquaux # License: BSD 3 clause import warnings import matplotlib.pyplot as plt import numpy as np from scipy import linalg from sklearn.linear_model import (RandomizedLasso, lasso_stability_path, LassoLarsCV) from sklearn.feature_selection import f_regression from sklearn.preprocessing import StandardScaler from sklearn.metrics import auc, precision_recall_curve from sklearn.ensemble import ExtraTreesRegressor from sklearn.utils.extmath import pinvh from sklearn.exceptions import ConvergenceWarning def mutual_incoherence(X_relevant, X_irelevant): """Mutual incoherence, as defined by formula (26a) of [Wainwright2006]. """ projector = np.dot(np.dot(X_irelevant.T, X_relevant), pinvh(np.dot(X_relevant.T, X_relevant))) return np.max(np.abs(projector).sum(axis=1)) for conditioning in (1, 1e-4): ########################################################################### # Simulate regression data with a correlated design n_features = 501 n_relevant_features = 3 noise_level = .2 coef_min = .2 # The Donoho-Tanner phase transition is around n_samples=25: below we # will completely fail to recover in the well-conditioned case n_samples = 25 block_size = n_relevant_features rng = np.random.RandomState(42) # The coefficients of our model coef = np.zeros(n_features) coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features) # The correlation of our design: variables correlated by blocs of 3 corr = np.zeros((n_features, n_features)) for i in range(0, n_features, block_size): corr[i:i + block_size, i:i + block_size] = 1 - conditioning corr.flat[::n_features + 1] = 1 corr = linalg.cholesky(corr) # Our design X = rng.normal(size=(n_samples, n_features)) X = np.dot(X, corr) # Keep [Wainwright2006] (26c) constant X[:n_relevant_features] /= np.abs( linalg.svdvals(X[:n_relevant_features])).max() X = StandardScaler().fit_transform(X.copy()) # The output variable y = np.dot(X, coef) y /= np.std(y) # We scale the added noise as a function of the average correlation # between the design and the output variable y += noise_level * rng.normal(size=n_samples) mi = mutual_incoherence(X[:, :n_relevant_features], X[:, n_relevant_features:]) ########################################################################### # Plot stability selection path, using a high eps for early stopping # of the path, to save computation time alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42, eps=0.05) plt.figure() # We plot the path as a function of alpha/alpha_max to the power 1/3: the # power 1/3 scales the path less brutally than the log, and enables to # see the progression along the path hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r') hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k') ymin, ymax = plt.ylim() plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$') plt.ylabel('Stability score: proportion of times selected') plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi) plt.axis('tight') plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'), loc='best') ########################################################################### # Plot the estimated stability scores for a given alpha # Use 6-fold cross-validation rather than the default 3-fold: it leads to # a better choice of alpha: # Stop the user warnings outputs- they are not necessary for the example # as it is specifically set up to be challenging. with warnings.catch_warnings(): warnings.simplefilter('ignore', UserWarning) warnings.simplefilter('ignore', ConvergenceWarning) lars_cv = LassoLarsCV(cv=6).fit(X, y) # Run the RandomizedLasso: we use a paths going down to .1*alpha_max # to avoid exploring the regime in which very noisy variables enter # the model alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6) clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y) trees = ExtraTreesRegressor(100).fit(X, y) # Compare with F-score F, _ = f_regression(X, y) plt.figure() for name, score in [('F-test', F), ('Stability selection', clf.scores_), ('Lasso coefs', np.abs(lars_cv.coef_)), ('Trees', trees.feature_importances_), ]: precision, recall, thresholds = precision_recall_curve(coef != 0, score) plt.semilogy(np.maximum(score / np.max(score), 1e-4), label="%s. AUC: %.3f" % (name, auc(recall, precision))) plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo', label="Ground truth") plt.xlabel("Features") plt.ylabel("Score") # Plot only the 100 first coefficients plt.xlim(0, 100) plt.legend(loc='best') plt.title('Feature selection scores - Mutual incoherence: %.1f' % mi) plt.show()
bsd-3-clause
dingocuster/scikit-learn
sklearn/svm/setup.py
321
3157
import os from os.path import join import numpy from sklearn._build_utils import get_blas_info def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration config = Configuration('svm', parent_package, top_path) config.add_subpackage('tests') # Section LibSVM # we compile both libsvm and libsvm_sparse config.add_library('libsvm-skl', sources=[join('src', 'libsvm', 'libsvm_template.cpp')], depends=[join('src', 'libsvm', 'svm.cpp'), join('src', 'libsvm', 'svm.h')], # Force C++ linking in case gcc is picked up instead # of g++ under windows with some versions of MinGW extra_link_args=['-lstdc++'], ) libsvm_sources = ['libsvm.c'] libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'), join('src', 'libsvm', 'libsvm_template.cpp'), join('src', 'libsvm', 'svm.cpp'), join('src', 'libsvm', 'svm.h')] config.add_extension('libsvm', sources=libsvm_sources, include_dirs=[numpy.get_include(), join('src', 'libsvm')], libraries=['libsvm-skl'], depends=libsvm_depends, ) ### liblinear module cblas_libs, blas_info = get_blas_info() if os.name == 'posix': cblas_libs.append('m') liblinear_sources = ['liblinear.c', join('src', 'liblinear', '*.cpp')] liblinear_depends = [join('src', 'liblinear', '*.h'), join('src', 'liblinear', 'liblinear_helper.c')] config.add_extension('liblinear', sources=liblinear_sources, libraries=cblas_libs, include_dirs=[join('..', 'src', 'cblas'), numpy.get_include(), blas_info.pop('include_dirs', [])], extra_compile_args=blas_info.pop('extra_compile_args', []), depends=liblinear_depends, # extra_compile_args=['-O0 -fno-inline'], ** blas_info) ## end liblinear module # this should go *after* libsvm-skl libsvm_sparse_sources = ['libsvm_sparse.c'] config.add_extension('libsvm_sparse', libraries=['libsvm-skl'], sources=libsvm_sparse_sources, include_dirs=[numpy.get_include(), join("src", "libsvm")], depends=[join("src", "libsvm", "svm.h"), join("src", "libsvm", "libsvm_sparse_helper.c")]) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(**configuration(top_path='').todict())
bsd-3-clause
amolkahat/pandas
pandas/core/groupby/base.py
1
5185
""" Provide basic components for groupby. These defintiions hold the whitelist of methods that are exposed on the SeriesGroupBy and the DataFrameGroupBy objects. """ import types from pandas.core.dtypes.common import is_list_like, is_scalar from pandas.util._decorators import make_signature class GroupByMixin(object): """ provide the groupby facilities to the mixed object """ @staticmethod def _dispatch(name, *args, **kwargs): """ dispatch to apply """ def outer(self, *args, **kwargs): def f(x): x = self._shallow_copy(x, groupby=self._groupby) return getattr(x, name)(*args, **kwargs) return self._groupby.apply(f) outer.__name__ = name return outer def _gotitem(self, key, ndim, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ # create a new object to prevent aliasing if subset is None: subset = self.obj # we need to make a shallow copy of ourselves # with the same groupby kwargs = {attr: getattr(self, attr) for attr in self._attributes} # Try to select from a DataFrame, falling back to a Series try: groupby = self._groupby[key] except IndexError: groupby = self._groupby self = self.__class__(subset, groupby=groupby, parent=self, **kwargs) self._reset_cache() if subset.ndim == 2: if is_scalar(key) and key in subset or is_list_like(key): self._selection = key return self # special case to prevent duplicate plots when catching exceptions when # forwarding methods from NDFrames plotting_methods = frozenset(['plot', 'boxplot', 'hist']) common_apply_whitelist = frozenset([ 'last', 'first', 'head', 'tail', 'median', 'mean', 'sum', 'min', 'max', 'cumcount', 'ngroup', 'resample', 'rank', 'quantile', 'fillna', 'mad', 'any', 'all', 'take', 'idxmax', 'idxmin', 'shift', 'tshift', 'ffill', 'bfill', 'pct_change', 'skew', 'corr', 'cov', 'diff', ]) | plotting_methods series_apply_whitelist = ((common_apply_whitelist | {'nlargest', 'nsmallest', 'is_monotonic_increasing', 'is_monotonic_decreasing'}) - {'boxplot'}) | frozenset(['dtype', 'unique']) dataframe_apply_whitelist = ((common_apply_whitelist | frozenset(['dtypes', 'corrwith'])) - {'boxplot'}) cython_transforms = frozenset(['cumprod', 'cumsum', 'shift', 'cummin', 'cummax']) cython_cast_blacklist = frozenset(['rank', 'count', 'size']) def whitelist_method_generator(base, klass, whitelist): """ Yields all GroupBy member defs for DataFrame/Series names in whitelist. Parameters ---------- base : class base class klass : class class where members are defined. Should be Series or DataFrame whitelist : list list of names of klass methods to be constructed Returns ------- The generator yields a sequence of strings, each suitable for exec'ing, that define implementations of the named methods for DataFrameGroupBy or SeriesGroupBy. Since we don't want to override methods explicitly defined in the base class, any such name is skipped. """ method_wrapper_template = \ """def %(name)s(%(sig)s) : \""" %(doc)s \""" f = %(self)s.__getattr__('%(name)s') return f(%(args)s)""" property_wrapper_template = \ """@property def %(name)s(self) : \""" %(doc)s \""" return self.__getattr__('%(name)s')""" for name in whitelist: # don't override anything that was explicitly defined # in the base class if hasattr(base, name): continue # ugly, but we need the name string itself in the method. f = getattr(klass, name) doc = f.__doc__ doc = doc if type(doc) == str else '' if isinstance(f, types.MethodType): wrapper_template = method_wrapper_template decl, args = make_signature(f) # pass args by name to f because otherwise # GroupBy._make_wrapper won't know whether # we passed in an axis parameter. args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]] params = {'name': name, 'doc': doc, 'sig': ','.join(decl), 'self': args[0], 'args': ','.join(args_by_name)} else: wrapper_template = property_wrapper_template params = {'name': name, 'doc': doc} yield wrapper_template % params
bsd-3-clause
stephenliu1989/HK_DataMiner
hkdataminer/cluster/mr_dbscan_.py
1
21171
# -*- coding: utf-8 -*- """ DBSCAN: Density-Based Spatial Clustering of Applications with Noise """ # Author: Robert Layton <robertlayton@gmail.com> # Joel Nothman <joel.nothman@gmail.com> # Lars Buitinck # # License: BSD 3 clause import numpy as np from scipy import sparse from sklearn.base import BaseEstimator, ClusterMixin from sklearn.utils import check_array, check_consistent_length #from sklearn.utils.fixes import astype from sklearn.neighbors import NearestNeighbors from sklearn.cluster._dbscan_inner import dbscan_inner # Local imports #import knn as knnn # imports the shared library knn.so outliers = -1 def merge_assignments(new_assignments, old_assignments): # Number of clusters in assignments, ignoring noise if present. # clusters_size = len(set(old_assignments)) - (1 if -1 in old_assignments else 0) clusters_size = np.max(old_assignments) + 1 max_clust_id = clusters_size print "max_clust_id:", max_clust_id count_first = [0] * clusters_size count_second = [0] * clusters_size old_assignments_size = len(old_assignments) # new_assignments_size = len(new_assignments) for i in xrange(0, old_assignments_size): if old_assignments[i] != outliers: if new_assignments[i] != outliers: count_first[old_assignments[i]] += 1 count_second[old_assignments[i]] += 1 # Percentage percentage = [0.0] * clusters_size for i in xrange(0, clusters_size): if count_second[i] is 0: percentage[i] = 0.0 else: percentage[i] = float(count_first[i])/float(count_second[i]) # Starting assignment for i in xrange(0, old_assignments_size): if old_assignments[i] != outliers and percentage[old_assignments[i]] < 0.7: if new_assignments[i] != outliers: old_assignments[i] = new_assignments[i] + max_clust_id # print old_assignments[i] # else: # old_assignments[i] = outliers return old_assignments def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', metric_params=None, algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1): """Perform DBSCAN clustering from vector array or distance matrix. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. eps : float, optional The maximum distance between two samples for them to be considered as in the same neighborhood. min_samples : int, optional The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a sparse matrix, in which case only "nonzero" elements may be considered neighbors for DBSCAN. metric_params : dict, optional Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, optional The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Returns ------- core_samples : array [n_core_samples] Indices of core samples. labels : array [n_samples] Cluster labels for each point. Noisy samples are given the label -1. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ if not eps > 0.0: raise ValueError("eps must be positive.") X = check_array(X, accept_sparse='csr') if sample_weight is not None: sample_weight = np.asarray(sample_weight) check_consistent_length(X, sample_weight) # Calculate neighborhood for all samples. This leaves the original point # in, which needs to be considered later (i.e. point i is in the # neighborhood of point i. While True, its useless information) if metric == 'precomputed' and sparse.issparse(X): neighborhoods = np.empty(X.shape[0], dtype=object) X.sum_duplicates() # XXX: modifies X's internals in-place X_mask = X.data <= eps masked_indices = X.indices.astype(np.intp, copy=False)[X_mask] masked_indptr = np.concatenate(([0], np.cumsum(X_mask)))[X.indptr[1:]] # insert the diagonal: a point is its own neighbor, but 0 distance # means absence from sparse matrix data masked_indices = np.insert(masked_indices, masked_indptr, np.arange(X.shape[0])) masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0]) # split into rows neighborhoods[:] = np.split(masked_indices, masked_indptr) else: neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm, leaf_size=leaf_size, metric=metric, metric_params=metric_params, p=p, n_jobs=n_jobs) neighbors_model.fit(X) # This has worst case O(n^2) memory complexity neighborhoods = neighbors_model.radius_neighbors(X, eps, return_distance=False) if sample_weight is None: n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods]) else: n_neighbors = np.array([np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]) # Initially, all samples are noise. labels = -np.ones(X.shape[0], dtype=np.intp) # A list of all core samples found. core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8) dbscan_inner(core_samples, neighborhoods, labels) return np.where(core_samples)[0], labels ''' def dbscan(X, eps=0.5, min_samples=5, metric='minkowski', metric_params=None, algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1): """Perform DBSCAN clustering from vector array or distance matrix. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. eps : float, optional The maximum distance between two samples for them to be considered as in the same neighborhood. min_samples : int, optional The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a sparse matrix, in which case only "nonzero" elements may be considered neighbors for DBSCAN. metric_params : dict, optional Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, optional The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Returns ------- core_samples : array [n_core_samples] Indices of core samples. labels : array [n_samples] Cluster labels for each point. Noisy samples are given the label -1. Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ if not eps > 0.0: raise ValueError("eps must be positive.") X = check_array(X, accept_sparse='csr') if sample_weight is not None: sample_weight = np.asarray(sample_weight) check_consistent_length(X, sample_weight) # Calculate neighborhood for all samples. This leaves the original point # in, which needs to be considered later (i.e. point i is in the # neighborhood of point i. While True, its useless information) if metric == 'precomputed' and sparse.issparse(X): neighborhoods = np.empty(X.shape[0], dtype=object) X.sum_duplicates() # XXX: modifies X's internals in-place X_mask = X.data <= eps masked_indices = astype(X.indices, np.intp, copy=False)[X_mask] masked_indptr = np.concatenate(([0], np.cumsum(X_mask)))[X.indptr[1:]] # insert the diagonal: a point is its own neighbor, but 0 distance # means absence from sparse matrix data masked_indices = np.insert(masked_indices, masked_indptr, np.arange(X.shape[0])) masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0]) # split into rows neighborhoods[:] = np.split(masked_indices, masked_indptr) elif algorithm != 'vp_tree': neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm, leaf_size=leaf_size, metric=metric, metric_params=metric_params, p=p, n_jobs=n_jobs) neighbors_model.fit(X) # This has worst case O(n^2) memory complexity neighborhoods = neighbors_model.radius_neighbors(X, eps, return_distance=False) #else: # Creates a parallel CPU VP-Tree based search #print "Calculating knn..." #t0 = time.time() if metric is 'rmsd': shape_x = np.shape(X.xyz) knn = knnn.vp_tree_parallel(np.reshape(X.xyz, (shape_x[0] * shape_x[1] * shape_x[2])), shape_x[1] * 3, "rmsd_serial") # This has worst case O(n^2) memory complexity queries = np.linspace(0, len(X.xyz) - 1, len(X.xyz), dtype='int') distances, neighborhoods = knn.query_radius(queries, eps) if sample_weight is None: n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods]) else: n_neighbors = np.array([np.sum(sample_weight[neighbors]) for neighbors in neighborhoods]) # Initially, all samples are noise. if metric is "rmsd:": labels = -np.ones(X.xyz.shape[0], dtype=np.intp) else: labels = -np.ones(X.shape[0], dtype=np.intp) #labels = -np.ones(X.shape[0], dtype=np.intp) # A list of all core samples found. core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8) dbscan_inner(core_samples, neighborhoods, labels) return np.where(core_samples)[0], labels ''' class MR_DBSCAN(BaseEstimator, ClusterMixin): """Perform DBSCAN clustering from vector array or distance matrix. DBSCAN - Density-Based Spatial Clustering of Applications with Noise. Finds core samples of high density and expands clusters from them. Good for data which contains clusters of similar density. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- eps : float, optional The maximum distance between two samples for them to be considered as in the same neighborhood. min_samples : int, optional The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.calculate_distance for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a sparse matrix, in which case only "nonzero" elements may be considered neighbors for DBSCAN. .. versionadded:: 0.17 metric *precomputed* to accept precomputed sparse matrix. metric_params : dict, optional Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, optional The power of the Minkowski metric to be used to calculate distance between points. n_jobs : int, optional (default = 1) The number of parallel jobs to run. If ``-1``, then the number of jobs is set to the number of CPU cores. Attributes ---------- core_sample_indices_ : array, shape = [n_core_samples] Indices of core samples. components_ : array, shape = [n_core_samples, n_features] Copy of each core sample found by training. labels_ : array, shape = [n_samples] Cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. Notes ----- See examples/cluster/plot_dbscan.py for an example. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). Sparse neighborhoods can be precomputed using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise". In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 """ def __init__(self, eps=0.5, min_samples=5, metric='euclidean', metric_params=None, algorithm='auto', leaf_size=30, p=None, n_jobs=1): self.eps = eps self.min_samples = min_samples self.metric = metric self.metric_params = metric_params self.algorithm = algorithm self.leaf_size = leaf_size self.p = p self.n_jobs = n_jobs def fit(self, X, y=None, sample_weight=None): """Perform DBSCAN clustering from features or distance matrix. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. """ X = check_array(X, accept_sparse='csr') clust = dbscan(X, sample_weight=sample_weight, **self.get_params()) self.core_sample_indices_, self.labels_ = clust if len(self.core_sample_indices_): # fix for scipy sparse indexing issue self.components_ = X[self.core_sample_indices_].copy() else: # no core samples self.components_ = np.empty((0, X.shape[1])) return self def fit_predict(self, X, y=None, sample_weight=None): """Performs clustering on X and returns cluster labels. Parameters ---------- X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \ array of shape (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. sample_weight : array, shape (n_samples,), optional Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. Returns ------- y : ndarray, shape (n_samples,) cluster labels """ self.fit(X, sample_weight=sample_weight) return self.labels_
apache-2.0
jpn--/larch
tests/test_model_group.py
1
2941
import larch import pandas as pd from larch import P,X,PX from pytest import approx from larch.data_warehouse import example_file def test_simple_model_group(): df = pd.read_csv(example_file("MTCwork.csv.gz")) df.set_index(['casenum','altnum'], inplace=True) d = larch.DataFrames(df, ch='chose', crack=True) d.set_alternative_names({ 1: 'DA', 2: 'SR2', 3: 'SR3+', 4: 'Transit', 5: 'Bike', 6: 'Walk', }) m0 = larch.Model(dataservice=d) m0.utility_co[2] = P("ASC_SR2") + P("hhinc#2") * X("hhinc") m0.utility_co[3] = P("ASC_SR3P") + P("hhinc#3") * X("hhinc") m0.utility_co[4] = P("ASC_TRAN") + P("hhinc#4") * X("hhinc") m0.utility_co[5] = P("ASC_BIKE") + P("hhinc#5") * X("hhinc") m0.utility_co[6] = P("ASC_WALK") + P("hhinc#6") * X("hhinc") m0.utility_ca = ( (P("tottime_m")*X("tottime") + P("totcost_m")*X("totcost"))*X("femdum == 0") + (P("tottime_f")*X("tottime") + P("totcost_f")*X("totcost"))*X("femdum == 1") ) m1 = larch.Model(dataservice=d.selector_co("femdum == 0")) m1.utility_co[2] = P("ASC_SR2") + P("hhinc#2") * X("hhinc") m1.utility_co[3] = P("ASC_SR3P") + P("hhinc#3") * X("hhinc") m1.utility_co[4] = P("ASC_TRAN") + P("hhinc#4") * X("hhinc") m1.utility_co[5] = P("ASC_BIKE") + P("hhinc#5") * X("hhinc") m1.utility_co[6] = P("ASC_WALK") + P("hhinc#6") * X("hhinc") m1.utility_ca = P("tottime_m")*X("tottime") + P("totcost_m")*X("totcost") m2 = larch.Model(dataservice=d.selector_co("femdum == 1")) m2.utility_co[2] = P("ASC_SR2") + P("hhinc#2") * X("hhinc") m2.utility_co[3] = P("ASC_SR3P") + P("hhinc#3") * X("hhinc") m2.utility_co[4] = P("ASC_TRAN") + P("hhinc#4") * X("hhinc") m2.utility_co[5] = P("ASC_BIKE") + P("hhinc#5") * X("hhinc") m2.utility_co[6] = P("ASC_WALK") + P("hhinc#6") * X("hhinc") m2.utility_ca = P("tottime_f")*X("tottime") + P("totcost_f")*X("totcost") m0.load_data() assert m0.loglike2().ll == approx(-7309.600971749625) m1.load_data() assert m1.loglike2().ll == approx(-4068.8091617468717) m2.load_data() assert m2.loglike2().ll == approx(-3240.7918100027578) from larch.model.model_group import ModelGroup mg = ModelGroup([m1,m2]) assert mg.loglike2().ll == approx(-7309.600971749625) assert mg.loglike() == approx(-7309.600971749625) pd.testing.assert_series_equal( mg.loglike2().dll.sort_index(), m0.loglike2().dll.sort_index() ) m0.simple_step_bhhh() mg.set_values(**m0.pf.value) pd.testing.assert_series_equal( mg.loglike2().dll.sort_index(), m0.loglike2().dll.sort_index() ) assert mg.loglike2().ll == approx(-4926.4822036792275) assert mg.check_d_loglike().data.similarity.min() > 4 result = mg.maximize_loglike(method='slsqp') assert result.loglike == approx(-3620.697668335103) mg2 = ModelGroup([]) mg2.append(m1) mg2.append(m2) assert mg2.loglike() == approx(-3620.697667552756) mg3 = ModelGroup([]) mg3.append(m1) mg3.append(m2) mg3.doctor() assert mg3.loglike() == approx(-3620.697667552756)
gpl-3.0
sinhrks/scikit-learn
sklearn/manifold/tests/test_isomap.py
121
4301
from itertools import product import numpy as np from numpy.testing import (assert_almost_equal, assert_array_almost_equal, assert_equal) from sklearn import datasets from sklearn import manifold from sklearn import neighbors from sklearn import pipeline from sklearn import preprocessing from sklearn.utils.testing import assert_less eigen_solvers = ['auto', 'dense', 'arpack'] path_methods = ['auto', 'FW', 'D'] def test_isomap_simple_grid(): # Isomap should preserve distances when all neighbors are used N_per_side = 5 Npts = N_per_side ** 2 n_neighbors = Npts - 1 # grid of equidistant points in 2D, n_components = n_dim X = np.array(list(product(range(N_per_side), repeat=2))) # distances from each point to all others G = neighbors.kneighbors_graph(X, n_neighbors, mode='distance').toarray() for eigen_solver in eigen_solvers: for path_method in path_methods: clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2, eigen_solver=eigen_solver, path_method=path_method) clf.fit(X) G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode='distance').toarray() assert_array_almost_equal(G, G_iso) def test_isomap_reconstruction_error(): # Same setup as in test_isomap_simple_grid, with an added dimension N_per_side = 5 Npts = N_per_side ** 2 n_neighbors = Npts - 1 # grid of equidistant points in 2D, n_components = n_dim X = np.array(list(product(range(N_per_side), repeat=2))) # add noise in a third dimension rng = np.random.RandomState(0) noise = 0.1 * rng.randn(Npts, 1) X = np.concatenate((X, noise), 1) # compute input kernel G = neighbors.kneighbors_graph(X, n_neighbors, mode='distance').toarray() centerer = preprocessing.KernelCenterer() K = centerer.fit_transform(-0.5 * G ** 2) for eigen_solver in eigen_solvers: for path_method in path_methods: clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2, eigen_solver=eigen_solver, path_method=path_method) clf.fit(X) # compute output kernel G_iso = neighbors.kneighbors_graph(clf.embedding_, n_neighbors, mode='distance').toarray() K_iso = centerer.fit_transform(-0.5 * G_iso ** 2) # make sure error agrees reconstruction_error = np.linalg.norm(K - K_iso) / Npts assert_almost_equal(reconstruction_error, clf.reconstruction_error()) def test_transform(): n_samples = 200 n_components = 10 noise_scale = 0.01 # Create S-curve dataset X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0) # Compute isomap embedding iso = manifold.Isomap(n_components, 2) X_iso = iso.fit_transform(X) # Re-embed a noisy version of the points rng = np.random.RandomState(0) noise = noise_scale * rng.randn(*X.shape) X_iso2 = iso.transform(X + noise) # Make sure the rms error on re-embedding is comparable to noise_scale assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale) def test_pipeline(): # check that Isomap works fine as a transformer in a Pipeline # only checks that no error is raised. # TODO check that it actually does something useful X, y = datasets.make_blobs(random_state=0) clf = pipeline.Pipeline( [('isomap', manifold.Isomap()), ('clf', neighbors.KNeighborsClassifier())]) clf.fit(X, y) assert_less(.9, clf.score(X, y)) def test_isomap_clone_bug(): # regression test for bug reported in #6062 model = manifold.Isomap() for n_neighbors in [10, 15, 20]: model.set_params(n_neighbors=n_neighbors) model.fit(np.random.rand(50, 2)) assert_equal(model.nbrs_.n_neighbors, n_neighbors)
bsd-3-clause
lparsons/tools-iuc
tools/repmatch_gff3/repmatch_gff3_util.py
22
17958
import bisect import csv import os import shutil import sys import tempfile import matplotlib matplotlib.use('Agg') from matplotlib import pyplot # noqa: I202,E402 # Graph settings Y_LABEL = 'Counts' X_LABEL = 'Number of matched replicates' TICK_WIDTH = 3 # Amount to shift the graph to make labels fit, [left, right, top, bottom] ADJUST = [0.180, 0.9, 0.9, 0.1] # Length of tick marks, use TICK_WIDTH for width pyplot.rc('xtick.major', size=10.00) pyplot.rc('ytick.major', size=10.00) pyplot.rc('lines', linewidth=4.00) pyplot.rc('axes', linewidth=3.00) pyplot.rc('font', family='Bitstream Vera Sans', size=32.0) COLORS = 'krb' ISPY2 = sys.version_info[0] == 2 class Replicate(object): def __init__(self, id, dataset_path): self.id = id self.dataset_path = dataset_path if ISPY2: fh = open(dataset_path, 'rb') else: fh = open(dataset_path, 'r', newline='') self.parse(csv.reader(fh, delimiter='\t')) def parse(self, reader): self.chromosomes = {} for line in reader: if line[0].startswith("#") or line[0].startswith('"'): continue cname, junk, junk, mid, midplus, value, strand, junk, attrs = line attrs = parse_gff_attrs(attrs) distance = int(attrs['cw_distance']) mid = int(mid) midplus = int(midplus) value = float(value) if cname not in self.chromosomes: self.chromosomes[cname] = Chromosome(cname) chrom = self.chromosomes[cname] chrom.add_peak(Peak(cname, mid, value, distance, self)) for chrom in self.chromosomes.values(): chrom.sort_by_index() def filter(self, up_limit, low_limit): for chrom in self.chromosomes.values(): chrom.filter(up_limit, low_limit) def size(self): return sum([len(c.peaks) for c in self.chromosomes.values()]) class Chromosome(object): def __init__(self, name): self.name = name self.peaks = [] def add_peak(self, peak): self.peaks.append(peak) def sort_by_index(self): self.peaks.sort(key=lambda peak: peak.midpoint) self.keys = make_keys(self.peaks) def remove_peak(self, peak): i = bisect.bisect_left(self.keys, peak.midpoint) # If the peak was actually found if i < len(self.peaks) and self.peaks[i].midpoint == peak.midpoint: del self.keys[i] del self.peaks[i] def filter(self, up_limit, low_limit): self.peaks = [p for p in self.peaks if low_limit <= p.distance <= up_limit] self.keys = make_keys(self.peaks) class Peak(object): def __init__(self, chrom, midpoint, value, distance, replicate): self.chrom = chrom self.value = value self.midpoint = midpoint self.distance = distance self.replicate = replicate def normalized_value(self, med): return self.value * med / self.replicate.median class PeakGroup(object): def __init__(self): self.peaks = {} def add_peak(self, repid, peak): self.peaks[repid] = peak @property def chrom(self): return list(self.peaks.values())[0].chrom @property def midpoint(self): return int(median([peak.midpoint for peak in self.peaks.values()])) @property def num_replicates(self): return len(self.peaks) @property def median_distance(self): return int(median([peak.distance for peak in self.peaks.values()])) @property def value_sum(self): return sum([peak.value for peak in self.peaks.values()]) def normalized_value(self, med): values = [] for peak in self.peaks.values(): values.append(peak.normalized_value(med)) return median(values) @property def peakpeak_distance(self): keys = list(self.peaks.keys()) return abs(self.peaks[keys[0]].midpoint - self.peaks[keys[1]].midpoint) class FrequencyDistribution(object): def __init__(self, d=None): self.dist = d or {} def add(self, x): self.dist[x] = self.dist.get(x, 0) + 1 def graph_series(self): x = [] y = [] for key, val in self.dist.items(): x.append(key) y.append(val) return x, y def mode(self): return max(self.dist.items(), key=lambda data: data[1])[0] def size(self): return sum(self.dist.values()) def stop_err(msg): sys.stderr.write(msg) sys.exit(1) def median(data): """ Find the integer median of the data set. """ if not data: return 0 sdata = sorted(data) if len(data) % 2 == 0: return (sdata[len(data) // 2] + sdata[len(data) // 2 - 1]) / 2 else: return sdata[len(data) // 2] def make_keys(peaks): return [data.midpoint for data in peaks] def get_window(chromosome, target_peaks, distance): """ Returns a window of all peaks from a replicate within a certain distance of a peak from another replicate. """ lower = list(target_peaks)[0].midpoint upper = list(target_peaks)[0].midpoint for peak in target_peaks: lower = min(lower, peak.midpoint - distance) upper = max(upper, peak.midpoint + distance) start_index = bisect.bisect_left(chromosome.keys, lower) end_index = bisect.bisect_right(chromosome.keys, upper) return (chromosome.peaks[start_index: end_index], chromosome.name) def match_largest(window, peak, chrum): if not window: return None if peak.chrom != chrum: return None return max(window, key=lambda cpeak: cpeak.value) def match_closest(window, peak, chrum): if not window: return None if peak.chrom != chrum: return None return min(window, key=lambda match: abs(match.midpoint - peak.midpoint)) def frequency_histogram(freqs, dataset_path, labels=[], title=''): pyplot.clf() pyplot.figure(figsize=(10, 10)) for i, freq in enumerate(freqs): xvals, yvals = freq.graph_series() # Go from high to low xvals.reverse() pyplot.bar([x - 0.4 + 0.8 / len(freqs) * i for x in xvals], yvals, width=0.8 / len(freqs), color=COLORS[i]) pyplot.xticks(range(min(xvals), max(xvals) + 1), map(str, reversed(range(min(xvals), max(xvals) + 1)))) pyplot.xlabel(X_LABEL) pyplot.ylabel(Y_LABEL) pyplot.subplots_adjust(left=ADJUST[0], right=ADJUST[1], top=ADJUST[2], bottom=ADJUST[3]) ax = pyplot.gca() for l in ax.get_xticklines() + ax.get_yticklines(): l.set_markeredgewidth(TICK_WIDTH) pyplot.savefig(dataset_path) METHODS = {'closest': match_closest, 'largest': match_largest} def gff_attrs(l): if len(l) == 0: return '.' return ';'.join('%s=%s' % (tup[0], tup[1]) for tup in l) def parse_gff_attrs(s): d = {} if s == '.': return d for item in s.split(';'): key, val = item.split('=') d[key] = val return d def gff_row(cname, start, end, score, source, stype='.', strand='.', phase='.', attrs=None): return (cname, source, stype, start, end, score, strand, phase, gff_attrs(attrs or [])) def get_temporary_plot_path(): """ Return the path to a temporary file with a valid image format file extension that can be used with bioformats. """ tmp_dir = tempfile.mkdtemp(prefix='tmp-repmatch-') fd, name = tempfile.mkstemp(suffix='.pdf', dir=tmp_dir) os.close(fd) return name def process_files(dataset_paths, galaxy_hids, method, distance, step, replicates, up_limit, low_limit, output_files, output_matched_peaks, output_unmatched_peaks, output_detail, output_statistics_table, output_statistics_histogram): output_statistics_histogram_file = output_files in ["all"] and method in ["all"] if len(dataset_paths) < 2: return if method == 'all': match_methods = METHODS.keys() else: match_methods = [method] for match_method in match_methods: statistics = perform_process(dataset_paths, galaxy_hids, match_method, distance, step, replicates, up_limit, low_limit, output_files, output_matched_peaks, output_unmatched_peaks, output_detail, output_statistics_table, output_statistics_histogram) if output_statistics_histogram_file: tmp_statistics_histogram_path = get_temporary_plot_path() frequency_histogram([stat['distribution'] for stat in [statistics]], tmp_statistics_histogram_path, METHODS.keys()) shutil.move(tmp_statistics_histogram_path, output_statistics_histogram) def perform_process(dataset_paths, galaxy_hids, method, distance, step, num_required, up_limit, low_limit, output_files, output_matched_peaks, output_unmatched_peaks, output_detail, output_statistics_table, output_statistics_histogram): output_detail_file = output_files in ["all"] and output_detail is not None output_statistics_table_file = output_files in ["all"] and output_statistics_table is not None output_unmatched_peaks_file = output_files in ["all", "matched_peaks_unmatched_peaks"] and output_unmatched_peaks is not None output_statistics_histogram_file = output_files in ["all"] and output_statistics_histogram is not None replicates = [] for i, dataset_path in enumerate(dataset_paths): try: galaxy_hid = galaxy_hids[i] r = Replicate(galaxy_hid, dataset_path) replicates.append(r) except Exception as e: stop_err('Unable to parse file "%s", exception: %s' % (dataset_path, str(e))) attrs = 'd%sr%s' % (distance, num_required) if up_limit != 1000: attrs += 'u%d' % up_limit if low_limit != -1000: attrs += 'l%d' % low_limit if step != 0: attrs += 's%d' % step def td_writer(file_path): # Returns a tab-delimited writer for a certain output if ISPY2: fh = open(file_path, 'wb') return csv.writer(fh, delimiter='\t') else: fh = open(file_path, 'w', newline='') return csv.writer(fh, delimiter='\t', quoting=csv.QUOTE_NONE) labels = ('chrom', 'median midpoint', 'median midpoint+1', 'median normalized reads', 'replicates', 'median c-w distance', 'reads sum') for replicate in replicates: labels += ('chrom', 'median midpoint', 'median midpoint+1', 'c-w sum', 'c-w distance', 'replicate id') matched_peaks_output = td_writer(output_matched_peaks) if output_statistics_table_file: statistics_table_output = td_writer(output_statistics_table) statistics_table_output.writerow(('data', 'median read count')) if output_detail_file: detail_output = td_writer(output_detail) detail_output.writerow(labels) if output_unmatched_peaks_file: unmatched_peaks_output = td_writer(output_unmatched_peaks) unmatched_peaks_output.writerow(('chrom', 'midpoint', 'midpoint+1', 'c-w sum', 'c-w distance', 'replicate id')) # Perform filtering if up_limit < 1000 or low_limit > -1000: for replicate in replicates: replicate.filter(up_limit, low_limit) # Actually merge the peaks peak_groups = [] unmatched_peaks = [] freq = FrequencyDistribution() def do_match(reps, distance): # Copy list because we will mutate it, but keep replicate references. reps = reps[:] while len(reps) > 1: # Iterate over each replicate as "main" main = reps[0] reps.remove(main) for chromosome in list(main.chromosomes.values()): peaks_by_value = chromosome.peaks[:] # Sort main replicate by value peaks_by_value.sort(key=lambda peak: -peak.value) def search_for_matches(group): # Here we use multiple passes, expanding the window to be # +- distance from any previously matched peak. while True: new_match = False for replicate in reps: if replicate.id in group.peaks: # Stop if match already found for this replicate continue try: # Lines changed to remove a major bug by Rohit Reja. window, chrum = get_window(replicate.chromosomes[chromosome.name], list(group.peaks.values()), distance) match = METHODS[method](window, peak, chrum) except KeyError: continue if match: group.add_peak(replicate.id, match) new_match = True if not new_match: break # Attempt to enlarge existing peak groups for group in peak_groups: old_peaks = list(group.peaks.values()) search_for_matches(group) for peak in list(group.peaks.values()): if peak not in old_peaks: peak.replicate.chromosomes[chromosome.name].remove_peak(peak) # Attempt to find new peaks groups. For each peak in the # main replicate, search for matches in the other replicates for peak in peaks_by_value: matches = PeakGroup() matches.add_peak(main.id, peak) search_for_matches(matches) # Were enough replicates matched? if matches.num_replicates >= num_required: for peak in list(matches.peaks.values()): peak.replicate.chromosomes[chromosome.name].remove_peak(peak) peak_groups.append(matches) # Zero or less = no stepping if step <= 0: do_match(replicates, distance) else: for d in range(0, distance, step): do_match(replicates, d) for group in peak_groups: freq.add(group.num_replicates) # Collect together the remaining unmatched_peaks for replicate in replicates: for chromosome in replicate.chromosomes.values(): for peak in chromosome.peaks: freq.add(1) unmatched_peaks.append(peak) # Average the unmatched_peaks count in the graph by # replicates med = median([peak.value for group in peak_groups for peak in group.peaks.values()]) for replicate in replicates: replicate.median = median([peak.value for group in peak_groups for peak in group.peaks.values() if peak.replicate == replicate]) statistics_table_output.writerow((replicate.id, replicate.median)) for group in peak_groups: # Output matched_peaks (matched pairs). matched_peaks_output.writerow(gff_row(cname=group.chrom, start=group.midpoint, end=group.midpoint + 1, score=group.normalized_value(med), source='repmatch', stype='.', strand='.', phase='.', attrs=[('median_distance', group.median_distance), ('value_sum', group.value_sum), ('replicates', group.num_replicates)])) if output_detail_file: matched_peaks = (group.chrom, group.midpoint, group.midpoint + 1, group.normalized_value(med), group.num_replicates, group.median_distance, group.value_sum) for peak in group.peaks.values(): matched_peaks += (peak.chrom, peak.midpoint, peak.midpoint + 1, peak.value, peak.distance, peak.replicate.id) detail_output.writerow(matched_peaks) if output_unmatched_peaks_file: for unmatched_peak in unmatched_peaks: unmatched_peaks_output.writerow((unmatched_peak.chrom, unmatched_peak.midpoint, unmatched_peak.midpoint + 1, unmatched_peak.value, unmatched_peak.distance, unmatched_peak.replicate.id)) if output_statistics_histogram_file: tmp_statistics_histogram_path = get_temporary_plot_path() frequency_histogram([freq], tmp_statistics_histogram_path) shutil.move(tmp_statistics_histogram_path, output_statistics_histogram) return {'distribution': freq}
mit
ljdursi/parallel-IO-tutorial
adios/plots.py
2
4707
#!/bin/env python """ ./plots.py [--help] file Plots the supplied hdf5 file, assuming it consists of a 2d density array and a 2d array of 2d velocities. """ import tables import netCDF4 import numpy import matplotlib import pylab import sys import getopt def readHDF5file(filename): densData = None velData = None ydims = None xdims = None h5file=tables.openFile(filename,mode="r") # densData = h5file.root.ArrayData.dens.read() # velData = h5file.root.ArrayData.vel.read() for node in h5file.walkNodes('/',classname='Array'): if (node.name == "dens"): densData = node.read() if (node.name == "vel"): velData = node.read() print 'veldata shape = ', velData.shape if ((velData.shape)[2] <= 3): velData = numpy.transpose(velData,(2,1,0)) densData = numpy.transpose(densData,(1,0)) print 'Transposing...' return ("HDF5", densData,velData,ydims,xdims) def readNetCDF4file(filename): densData = None velData = None ydims = None xdims = None file=netCDF4.Dataset(filename,"r") ycoordname = 'Y coordinate' xcoordname = 'X coordinate' densname = 'Density' velname = 'Velocity' if ycoordname in file.variables: ydims=file.variables['Y coordinate'][:] if xcoordname in file.variables: xdims=file.variables['X coordinate'][:] if densname in file.variables: densData = file.variables['Density'][:,:] print 'densData shape = ', densData.shape if velname in file.variables: velData = file.variables['Velocity'][:,:,:] if not velData is None: print 'veldata shape = ', velData.shape if ((velData.shape)[2] <= 3): velData = numpy.transpose(velData,(2,1,0)) densData = numpy.transpose(densData,(1,0)) print 'Transposing...' file.close() return ("NetCDF4", densData,velData,xdims,ydims) def getData(filename="data.h5"): densData = None velData = None ydims = None xdims = None ext = ((filename.split("."))[-1]).lower() if ext == "h5" or ext == "hdf5" or ext == "hdf" : (filetype, densData, velData, xdims, ydims) = readHDF5file(filename) elif ext == "nc" or ext == "nc4" or ext == "netcdf" or ext=="ncdf": (filetype, densData, velData, xdims, ydims) = readNetCDF4file(filename) return (filetype,densData, velData, xdims, ydims) def plot2darray(filename="data.h5"): (filetype,densData, velData, xdims, ydims) = getData(filename) if densData is None and velData is None: print "No Data in file "+filename return print "Plotting ",filename if not densData is None: densDataT = numpy.transpose(densData) if not velData is None: vx = numpy.transpose(velData[0,:,:]) vy = numpy.transpose(velData[1,:,:]) nx = None ny = None if not densData is None: size = densData.shape nx = size[0] ny = size[1] elif not velData is None: size = vx.shape nx = size[1] ny = size[0] if xdims is None: hdims = numpy.arange(0,nx) else: hdims = xdims if ydims is None: vdims = numpy.arange(0,ny) else: vdims = ydims X,Y = pylab.meshgrid(hdims,vdims) # want about 20 arrows per dim narrows = 20 everyX = nx/narrows everyY = nx/narrows fig = pylab.figure(1) ax = fig.add_subplot(111,aspect='equal') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_title(filetype+' 2D Arrays Output: '+filename) ax.set_xlim(min(hdims), max(hdims)) ax.set_ylim(min(vdims), max(vdims)) if not densData is None: ax.contourf(hdims,vdims,densDataT) if not velData is None: maxv = numpy.max(numpy.sqrt(vx*vx + vy*vy)) ax.quiver(X[::everyX,::everyY], Y[::everyX, ::everyY], vx[::everyX,::everyY],vy[::everyX,::everyY], scale=4.*maxv) pylab.show() class Usage(Exception): def __init__(self, msg): self.msg = msg def main(argv=None): if argv is None: argv = sys.argv try: try: opts, args = getopt.getopt(argv[1:], "h", ["help"]) except getopt.error, msg: raise Usage(__doc__) for o,a in opts: if o in ("-h", "--help"): print __doc__ sys.exit(0) if args == []: plot2darray() for arg in args: plot2darray(arg) sys.exit(0) except Usage, err: print >>sys.stderr, err.msg print >>sys.stderr, "for help use --help" return 2 if __name__ == "__main__": sys.exit(main())
gpl-2.0
wathen/PhD
MHD/FEniCS/MHD/Stabilised/SaddlePointForm/Test/ExactSchur/MHDfluid.py
2
10620
#!/usr/bin/python # interpolate scalar gradient onto nedelec space from dolfin import * import petsc4py import sys petsc4py.init(sys.argv) from petsc4py import PETSc Print = PETSc.Sys.Print # from MatrixOperations import * import numpy as np #import matplotlib.pylab as plt import PETScIO as IO import common import scipy import scipy.io import time import BiLinear as forms import IterOperations as Iter import MatrixOperations as MO import CheckPetsc4py as CP import ExactSol import Solver as S import MHDmatrixPrecondSetup as PrecondSetup import NSprecondSetup import MHDallatonce as MHDpreconditioner m = 5 errL2u =np.zeros((m-1,1)) errH1u =np.zeros((m-1,1)) errL2p =np.zeros((m-1,1)) errL2b =np.zeros((m-1,1)) errCurlb =np.zeros((m-1,1)) errL2r =np.zeros((m-1,1)) errH1r =np.zeros((m-1,1)) l2uorder = np.zeros((m-1,1)) H1uorder =np.zeros((m-1,1)) l2porder = np.zeros((m-1,1)) l2border = np.zeros((m-1,1)) Curlborder =np.zeros((m-1,1)) l2rorder = np.zeros((m-1,1)) H1rorder = np.zeros((m-1,1)) NN = np.zeros((m-1,1)) DoF = np.zeros((m-1,1)) Velocitydim = np.zeros((m-1,1)) Magneticdim = np.zeros((m-1,1)) Pressuredim = np.zeros((m-1,1)) Lagrangedim = np.zeros((m-1,1)) Wdim = np.zeros((m-1,1)) iterations = np.zeros((m-1,1)) SolTime = np.zeros((m-1,1)) udiv = np.zeros((m-1,1)) MU = np.zeros((m-1,1)) level = np.zeros((m-1,1)) NSave = np.zeros((m-1,1)) Mave = np.zeros((m-1,1)) TotalTime = np.zeros((m-1,1)) nn = 2 dim = 2 ShowResultPlots = 'yes' split = 'Linear' MU[0]= 1e0 for xx in xrange(1,m): print xx level[xx-1] = xx+0 nn = 2**(level[xx-1]) # Create mesh and define function space nn = int(nn) NN[xx-1] = nn/2 # parameters["form_compiler"]["quadrature_degree"] = 6 # parameters = CP.ParameterSetup() mesh = UnitSquareMesh(nn,nn) order = 1 parameters['reorder_dofs_serial'] = False Velocity = VectorFunctionSpace(mesh, "CG", order) Pressure = FunctionSpace(mesh, "DG", order-1) Magnetic = FunctionSpace(mesh, "N1curl", order) Lagrange = FunctionSpace(mesh, "CG", order) W = MixedFunctionSpace([Velocity,Magnetic, Pressure, Lagrange]) # W = Velocity*Pressure*Magnetic*Lagrange Velocitydim[xx-1] = Velocity.dim() Pressuredim[xx-1] = Pressure.dim() Magneticdim[xx-1] = Magnetic.dim() Lagrangedim[xx-1] = Lagrange.dim() Wdim[xx-1] = W.dim() print "\n\nW: ",Wdim[xx-1],"Velocity: ",Velocitydim[xx-1],"Pressure: ",Pressuredim[xx-1],"Magnetic: ",Magneticdim[xx-1],"Lagrange: ",Lagrangedim[xx-1],"\n\n" dim = [Velocity.dim(), Magnetic.dim(), Pressure.dim(), Lagrange.dim()] def boundary(x, on_boundary): return on_boundary u0, p0,b0, r0, Laplacian, Advection, gradPres,CurlCurl, gradR, NS_Couple, M_Couple = ExactSol.MHD2D(4,1) bcu = DirichletBC(W.sub(0),u0, boundary) bcb = DirichletBC(W.sub(1),b0, boundary) bcr = DirichletBC(W.sub(3),r0, boundary) # bc = [u0,p0,b0,r0] bcs = [bcu,bcb,bcr] FSpaces = [Velocity,Pressure,Magnetic,Lagrange] (u, b, p, r) = TrialFunctions(W) (v, c, q, s) = TestFunctions(W) kappa = 1.0 Mu_m =1e1 MU = 1.0/1 IterType = 'Full' F_NS = -MU*Laplacian+Advection+gradPres-kappa*NS_Couple if kappa == 0: F_M = Mu_m*CurlCurl+gradR -kappa*M_Couple else: F_M = Mu_m*kappa*CurlCurl+gradR -kappa*M_Couple params = [kappa,Mu_m,MU] # MO.PrintStr("Preconditioning MHD setup",5,"+","\n\n","\n\n") HiptmairMatrices = PrecondSetup.MagneticSetup(Magnetic, Lagrange, b0, r0, 1e-4, params) MO.PrintStr("Setting up MHD initial guess",5,"+","\n\n","\n\n") u_k,p_k,b_k,r_k = common.InitialGuess(FSpaces,[u0,p0,b0,r0],[F_NS,F_M],params,HiptmairMatrices,1e-6,Neumann=Expression(("0","0")),options ="New", FS = "DG") plot(p_k, interactive = True) b_t = TrialFunction(Velocity) c_t = TestFunction(Velocity) #print assemble(inner(b,c)*dx).array().shape #print mat #ShiftedMass = assemble(inner(mat*b,c)*dx) #as_vector([inner(b,c)[0]*b_k[0],inner(b,c)[1]*(-b_k[1])]) ones = Function(Pressure) ones.vector()[:]=(0*ones.vector().array()+1) # pConst = - assemble(p_k*dx)/assemble(ones*dx) p_k.vector()[:] += - assemble(p_k*dx)/assemble(ones*dx) x = Iter.u_prev(u_k,b_k,p_k,r_k) KSPlinearfluids, MatrixLinearFluids = PrecondSetup.FluidLinearSetup(Pressure, MU) kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k) plot(b_k) ns,maxwell,CoupleTerm,Lmaxwell,Lns = forms.MHD2D(mesh, W,F_M,F_NS, u_k,b_k,params,IterType,"DG", SaddlePoint = "Yes") RHSform = forms.PicardRHS(mesh, W, u_k, p_k, b_k, r_k, params,"DG",SaddlePoint = "Yes") bcu = DirichletBC(W.sub(0),Expression(("0.0","0.0")), boundary) bcb = DirichletBC(W.sub(1),Expression(("0.0","0.0")), boundary) bcr = DirichletBC(W.sub(3),Expression(("0.0")), boundary) bcs = [bcu,bcb,bcr] eps = 1.0 # error measure ||u-u_k|| tol = 1.0E-4 # tolerance iter = 0 # iteration counter maxiter = 40 # max no of iterations allowed SolutionTime = 0 outer = 0 # parameters['linear_algebra_backend'] = 'uBLAS' # FSpaces = [Velocity,Magnetic,Pressure,Lagrange] if IterType == "CD": AA, bb = assemble_system(maxwell+ns, (Lmaxwell + Lns) - RHSform, bcs) A,b = CP.Assemble(AA,bb) # u = b.duplicate() # P = CP.Assemble(PP) u_is = PETSc.IS().createGeneral(range(Velocity.dim())) NS_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim())) M_is = PETSc.IS().createGeneral(range(Velocity.dim()+Pressure.dim(),W.dim())) OuterTol = 1e-5 InnerTol = 1e-3 NSits =0 Mits =0 TotalStart =time.time() SolutionTime = 0 while eps > tol and iter < maxiter: iter += 1 MO.PrintStr("Iter "+str(iter),7,"=","\n\n","\n\n") tic() if IterType == "CD": bb = assemble((Lmaxwell + Lns) - RHSform) for bc in bcs: bc.apply(bb) FF = AA.sparray()[0:dim[0],0:dim[0]] A,b = CP.Assemble(AA,bb) # if iter == 1 if iter == 1: u = b.duplicate() F = A.getSubMatrix(u_is,u_is) kspF = NSprecondSetup.LSCKSPnonlinear(F) else: AA, bb = assemble_system(maxwell+ns+CoupleTerm, (Lmaxwell + Lns) - RHSform, bcs) A,b = CP.Assemble(AA,bb) F = A.getSubMatrix(u_is,u_is) n = FacetNormal(mesh) mat = as_matrix([[b_k[1]*b_k[1],-b_k[1]*b_k[0]],[-b_k[1]*b_k[0],b_k[0]*b_k[0]]]) a = params[2]*inner(grad(b_t), grad(c_t))*dx(W.mesh()) + inner((grad(b_t)*u_k),c_t)*dx(W.mesh()) +(1/2)*div(u_k)*inner(c_t,b_t)*dx(W.mesh()) - (1/2)*inner(u_k,n)*inner(c_t,b_t)*ds(W.mesh())+kappa*kappa/Mu_m*inner(mat*b_t,c_t)*dx(W.mesh()) ShiftedMass = assemble(a) bcu.apply(ShiftedMass) #MO.StoreMatrix(AA.sparray()[0:dim[0],0:dim[0]]+ShiftedMass.sparray(),"A") FF = CP.Assemble(ShiftedMass) kspF = NSprecondSetup.LSCKSPnonlinear(FF) # if iter == 1: if iter == 1: u = b.duplicate() print ("{:40}").format("MHD assemble, time: "), " ==> ",("{:4f}").format(toc()), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5]) kspFp, Fp = PrecondSetup.FluidNonLinearSetup(Pressure, MU, u_k) print "Inititial guess norm: ", u.norm() ksp = PETSc.KSP() ksp.create(comm=PETSc.COMM_WORLD) pc = ksp.getPC() ksp.setType('gmres') pc.setType('python') pc.setType(PETSc.PC.Type.PYTHON) # FSpace = [Velocity,Magnetic,Pressure,Lagrange] reshist = {} def monitor(ksp, its, fgnorm): reshist[its] = fgnorm print its," OUTER:", fgnorm # ksp.setMonitor(monitor) ksp.max_it = 1000 FFSS = [Velocity,Magnetic,Pressure,Lagrange] pc.setPythonContext(MHDpreconditioner.InnerOuterMAGNETICinverse(FFSS,kspF, KSPlinearfluids[0], KSPlinearfluids[1],Fp, HiptmairMatrices[3], HiptmairMatrices[4], HiptmairMatrices[2], HiptmairMatrices[0], HiptmairMatrices[1], HiptmairMatrices[6],1e-6,FF)) # OptDB = PETSc.Options() # OptDB['pc_factor_mat_solver_package'] = "mumps" # OptDB['pc_factor_mat_ordering_type'] = "rcm" # ksp.setFromOptions() scale = b.norm() b = b/scale ksp.setOperators(A,A) stime = time.time() ksp.solve(b,u) Soltime = time.time()- stime NSits += ksp.its # Mits +=dodim u = u*scale SolutionTime = SolutionTime +Soltime MO.PrintStr("Number of iterations ="+str(ksp.its),60,"+","\n\n","\n\n") u1, p1, b1, r1, eps= Iter.PicardToleranceDecouple(u,x,FSpaces,dim,"2",iter, SaddlePoint = "Yes") p1.vector()[:] += - assemble(p1*dx)/assemble(ones*dx) u_k.assign(u1) p_k.assign(p1) b_k.assign(b1) r_k.assign(r1) uOld= np.concatenate((u_k.vector().array(),b_k.vector().array(),p_k.vector().array(),r_k.vector().array()), axis=0) x = IO.arrayToVec(uOld) XX= np.concatenate((u_k.vector().array(),p_k.vector().array(),b_k.vector().array(),r_k.vector().array()), axis=0) SolTime[xx-1] = SolutionTime/iter NSave[xx-1] = (float(NSits)/iter) Mave[xx-1] = (float(Mits)/iter) iterations[xx-1] = iter TotalTime[xx-1] = time.time() - TotalStart print SolTime import pandas as pd print "\n\n Iteration table" if IterType == "Full": IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av Outer its","Av Inner its",] else: IterTitles = ["l","DoF","AV solve Time","Total picard time","picard iterations","Av NS iters","Av M iters"] IterValues = np.concatenate((level,Wdim,SolTime,TotalTime,iterations,NSave,Mave),axis=1) IterTable= pd.DataFrame(IterValues, columns = IterTitles) if IterType == "Full": IterTable = MO.PandasFormat(IterTable,'Av Outer its',"%2.1f") IterTable = MO.PandasFormat(IterTable,'Av Inner its',"%2.1f") else: IterTable = MO.PandasFormat(IterTable,'Av NS iters',"%2.1f") IterTable = MO.PandasFormat(IterTable,'Av M iters',"%2.1f") print IterTable.to_latex() print " \n Outer Tol: ",OuterTol, "Inner Tol: ", InnerTol # # # if (ShowResultPlots == 'yes'): # plot(u_k) # plot(interpolate(ue,Velocity)) # plot(p_k) # plot(interpolate(pe,Pressure)) # plot(b_k) # plot(interpolate(be,Magnetic)) # plot(r_k) # plot(interpolate(re,Lagrange)) # interactive() interactive()
mit
xzh86/scikit-learn
sklearn/cluster/spectral.py
233
18153
# -*- coding: utf-8 -*- """Algorithms for spectral clustering""" # Author: Gael Varoquaux gael.varoquaux@normalesup.org # Brian Cheung # Wei LI <kuantkid@gmail.com> # License: BSD 3 clause import warnings import numpy as np from ..base import BaseEstimator, ClusterMixin from ..utils import check_random_state, as_float_array from ..utils.validation import check_array from ..utils.extmath import norm from ..metrics.pairwise import pairwise_kernels from ..neighbors import kneighbors_graph from ..manifold import spectral_embedding from .k_means_ import k_means def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20, random_state=None): """Search for a partition matrix (clustering) which is closest to the eigenvector embedding. Parameters ---------- vectors : array-like, shape: (n_samples, n_clusters) The embedding space of the samples. copy : boolean, optional, default: True Whether to copy vectors, or perform in-place normalization. max_svd_restarts : int, optional, default: 30 Maximum number of attempts to restart SVD if convergence fails n_iter_max : int, optional, default: 30 Maximum number of iterations to attempt in rotation and partition matrix search if machine precision convergence is not reached random_state: int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the of the rotation matrix Returns ------- labels : array of integers, shape: n_samples The labels of the clusters. References ---------- - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf Notes ----- The eigenvector embedding is used to iteratively search for the closest discrete partition. First, the eigenvector embedding is normalized to the space of partition matrices. An optimal discrete partition matrix closest to this normalized embedding multiplied by an initial rotation is calculated. Fixing this discrete partition matrix, an optimal rotation matrix is calculated. These two calculations are performed until convergence. The discrete partition matrix is returned as the clustering solution. Used in spectral clustering, this method tends to be faster and more robust to random initialization than k-means. """ from scipy.sparse import csc_matrix from scipy.linalg import LinAlgError random_state = check_random_state(random_state) vectors = as_float_array(vectors, copy=copy) eps = np.finfo(float).eps n_samples, n_components = vectors.shape # Normalize the eigenvectors to an equal length of a vector of ones. # Reorient the eigenvectors to point in the negative direction with respect # to the first element. This may have to do with constraining the # eigenvectors to lie in a specific quadrant to make the discretization # search easier. norm_ones = np.sqrt(n_samples) for i in range(vectors.shape[1]): vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \ * norm_ones if vectors[0, i] != 0: vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i]) # Normalize the rows of the eigenvectors. Samples should lie on the unit # hypersphere centered at the origin. This transforms the samples in the # embedding space to the space of partition matrices. vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis] svd_restarts = 0 has_converged = False # If there is an exception we try to randomize and rerun SVD again # do this max_svd_restarts times. while (svd_restarts < max_svd_restarts) and not has_converged: # Initialize first column of rotation matrix with a row of the # eigenvectors rotation = np.zeros((n_components, n_components)) rotation[:, 0] = vectors[random_state.randint(n_samples), :].T # To initialize the rest of the rotation matrix, find the rows # of the eigenvectors that are as orthogonal to each other as # possible c = np.zeros(n_samples) for j in range(1, n_components): # Accumulate c to ensure row is as orthogonal as possible to # previous picks as well as current one c += np.abs(np.dot(vectors, rotation[:, j - 1])) rotation[:, j] = vectors[c.argmin(), :].T last_objective_value = 0.0 n_iter = 0 while not has_converged: n_iter += 1 t_discrete = np.dot(vectors, rotation) labels = t_discrete.argmax(axis=1) vectors_discrete = csc_matrix( (np.ones(len(labels)), (np.arange(0, n_samples), labels)), shape=(n_samples, n_components)) t_svd = vectors_discrete.T * vectors try: U, S, Vh = np.linalg.svd(t_svd) svd_restarts += 1 except LinAlgError: print("SVD did not converge, randomizing and trying again") break ncut_value = 2.0 * (n_samples - S.sum()) if ((abs(ncut_value - last_objective_value) < eps) or (n_iter > n_iter_max)): has_converged = True else: # otherwise calculate rotation and continue last_objective_value = ncut_value rotation = np.dot(Vh.T, U.T) if not has_converged: raise LinAlgError('SVD did not converge') return labels def spectral_clustering(affinity, n_clusters=8, n_components=None, eigen_solver=None, random_state=None, n_init=10, eigen_tol=0.0, assign_labels='kmeans'): """Apply clustering to a projection to the normalized laplacian. In practice Spectral Clustering is very useful when the structure of the individual clusters is highly non-convex or more generally when a measure of the center and spread of the cluster is not a suitable description of the complete cluster. For instance when clusters are nested circles on the 2D plan. If affinity is the adjacency matrix of a graph, this method can be used to find normalized graph cuts. Read more in the :ref:`User Guide <spectral_clustering>`. Parameters ----------- affinity : array-like or sparse matrix, shape: (n_samples, n_samples) The affinity matrix describing the relationship of the samples to embed. **Must be symmetric**. Possible examples: - adjacency matrix of a graph, - heat kernel of the pairwise distance matrix of the samples, - symmetric k-nearest neighbours connectivity matrix of the samples. n_clusters : integer, optional Number of clusters to extract. n_components : integer, optional, default is n_clusters Number of eigen vectors to use for the spectral embedding eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by the K-Means initialization. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. eigen_tol : float, optional, default: 0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. assign_labels : {'kmeans', 'discretize'}, default: 'kmeans' The strategy to use to assign labels in the embedding space. There are two ways to assign labels after the laplacian embedding. k-means can be applied and is a popular choice. But it can also be sensitive to initialization. Discretization is another approach which is less sensitive to random initialization. See the 'Multiclass spectral clustering' paper referenced below for more details on the discretization approach. Returns ------- labels : array of integers, shape: n_samples The labels of the clusters. References ---------- - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf Notes ------ The graph should contain only one connect component, elsewhere the results make little sense. This algorithm solves the normalized cut for k=2: it is a normalized spectral clustering. """ if assign_labels not in ('kmeans', 'discretize'): raise ValueError("The 'assign_labels' parameter should be " "'kmeans' or 'discretize', but '%s' was given" % assign_labels) random_state = check_random_state(random_state) n_components = n_clusters if n_components is None else n_components maps = spectral_embedding(affinity, n_components=n_components, eigen_solver=eigen_solver, random_state=random_state, eigen_tol=eigen_tol, drop_first=False) if assign_labels == 'kmeans': _, labels, _ = k_means(maps, n_clusters, random_state=random_state, n_init=n_init) else: labels = discretize(maps, random_state=random_state) return labels class SpectralClustering(BaseEstimator, ClusterMixin): """Apply clustering to a projection to the normalized laplacian. In practice Spectral Clustering is very useful when the structure of the individual clusters is highly non-convex or more generally when a measure of the center and spread of the cluster is not a suitable description of the complete cluster. For instance when clusters are nested circles on the 2D plan. If affinity is the adjacency matrix of a graph, this method can be used to find normalized graph cuts. When calling ``fit``, an affinity matrix is constructed using either kernel function such the Gaussian (aka RBF) kernel of the euclidean distanced ``d(X, X)``:: np.exp(-gamma * d(X,X) ** 2) or a k-nearest neighbors connectivity matrix. Alternatively, using ``precomputed``, a user-provided affinity matrix can be used. Read more in the :ref:`User Guide <spectral_clustering>`. Parameters ----------- n_clusters : integer, optional The dimension of the projection subspace. affinity : string, array-like or callable, default 'rbf' If a string, this may be one of 'nearest_neighbors', 'precomputed', 'rbf' or one of the kernels supported by `sklearn.metrics.pairwise_kernels`. Only kernels that produce similarity scores (non-negative values that increase with similarity) should be used. This property is not checked by the clustering algorithm. gamma : float Scaling factor of RBF, polynomial, exponential chi^2 and sigmoid affinity kernel. Ignored for ``affinity='nearest_neighbors'``. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. n_neighbors : integer Number of neighbors to use when constructing the affinity matrix using the nearest neighbors method. Ignored for ``affinity='rbf'``. eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'} The eigenvalue decomposition strategy to use. AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities random_state : int seed, RandomState instance, or None (default) A pseudo random number generator used for the initialization of the lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by the K-Means initialization. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. eigen_tol : float, optional, default: 0.0 Stopping criterion for eigendecomposition of the Laplacian matrix when using arpack eigen_solver. assign_labels : {'kmeans', 'discretize'}, default: 'kmeans' The strategy to use to assign labels in the embedding space. There are two ways to assign labels after the laplacian embedding. k-means can be applied and is a popular choice. But it can also be sensitive to initialization. Discretization is another approach which is less sensitive to random initialization. kernel_params : dictionary of string to any, optional Parameters (keyword arguments) and values for kernel passed as callable object. Ignored by other kernels. Attributes ---------- affinity_matrix_ : array-like, shape (n_samples, n_samples) Affinity matrix used for clustering. Available only if after calling ``fit``. labels_ : Labels of each point Notes ----- If you have an affinity matrix, such as a distance matrix, for which 0 means identical elements, and high values means very dissimilar elements, it can be transformed in a similarity matrix that is well suited for the algorithm by applying the Gaussian (RBF, heat) kernel:: np.exp(- X ** 2 / (2. * delta ** 2)) Another alternative is to take a symmetric version of the k nearest neighbors connectivity matrix of the points. If the pyamg package is installed, it is used: this greatly speeds up computation. References ---------- - Normalized cuts and image segmentation, 2000 Jianbo Shi, Jitendra Malik http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324 - A Tutorial on Spectral Clustering, 2007 Ulrike von Luxburg http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323 - Multiclass spectral clustering, 2003 Stella X. Yu, Jianbo Shi http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf """ def __init__(self, n_clusters=8, eigen_solver=None, random_state=None, n_init=10, gamma=1., affinity='rbf', n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1, kernel_params=None): self.n_clusters = n_clusters self.eigen_solver = eigen_solver self.random_state = random_state self.n_init = n_init self.gamma = gamma self.affinity = affinity self.n_neighbors = n_neighbors self.eigen_tol = eigen_tol self.assign_labels = assign_labels self.degree = degree self.coef0 = coef0 self.kernel_params = kernel_params def fit(self, X, y=None): """Creates an affinity matrix for X using the selected affinity, then applies spectral clustering to this affinity matrix. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) OR, if affinity==`precomputed`, a precomputed affinity matrix of shape (n_samples, n_samples) """ X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) if X.shape[0] == X.shape[1] and self.affinity != "precomputed": warnings.warn("The spectral clustering API has changed. ``fit``" "now constructs an affinity matrix from data. To use" " a custom affinity matrix, " "set ``affinity=precomputed``.") if self.affinity == 'nearest_neighbors': connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True) self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T) elif self.affinity == 'precomputed': self.affinity_matrix_ = X else: params = self.kernel_params if params is None: params = {} if not callable(self.affinity): params['gamma'] = self.gamma params['degree'] = self.degree params['coef0'] = self.coef0 self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity, filter_params=True, **params) random_state = check_random_state(self.random_state) self.labels_ = spectral_clustering(self.affinity_matrix_, n_clusters=self.n_clusters, eigen_solver=self.eigen_solver, random_state=random_state, n_init=self.n_init, eigen_tol=self.eigen_tol, assign_labels=self.assign_labels) return self @property def _pairwise(self): return self.affinity == "precomputed"
bsd-3-clause
UDST/synthpop
synthpop/categorizer.py
2
4463
import itertools import numpy as np import pandas as pd # TODO DOCSTRINGS!! def categorize(df, eval_d, index_cols=None): cat_df = pd.DataFrame(index=df.index) for index, expr in eval_d.items(): cat_df[index] = df.eval(expr) if index_cols is not None: cat_df[index_cols] = df[index_cols] cat_df = cat_df.set_index(index_cols) cat_df.columns = pd.MultiIndex.from_tuples(cat_df.columns, names=['cat_name', 'cat_value']) cat_df = cat_df.sort_index(axis=1) return cat_df def sum_accross_category(df, subtract_mean=True): """ This is a convenience function to sum the categorical values for each category - the mean across each category is then subtracted so all the cells in the table should be close to zero. The reason why it's not exactly zero is because of rounding errors in the scaling of any tract variables down to block group variables """ df = df.stack(level=1).fillna(0).groupby(level=0).sum() if subtract_mean: df = df.sub(df.mean(axis=1), axis="rows") return df def category_combinations(index): """ THis method converts a hierarchical multindex of category names and category values into the cross-product of all possible category combinations. """ d = {} for cat_name, cat_value in index: d.setdefault(cat_name, []) d[cat_name].append(cat_value) for cat_name in list(d): if len(d[cat_name]) == 1: del d[cat_name] df = pd.DataFrame(list(itertools.product(*list(d.values())))) df.columns = cols = list(d.keys()) df.index.name = "cat_id" df = df.reset_index().set_index(cols) return df def joint_distribution(sample_df, category_df, mapping_functions=None): # set counts to zero category_df["frequency"] = 0 category_names = list(category_df.index.names) if mapping_functions: for name in category_names: assert name in mapping_functions, "Every category needs to have " \ "mapping function with the same a " \ "name to define that category for " \ "the pums sample records" sample_df[name] = sample_df.apply(mapping_functions[name], axis=1).astype('category') category_df["frequency"] = sample_df.groupby(category_names).size() category_df["frequency"] = category_df["frequency"].fillna(0) # do the merge to add the category id sample_df = pd.merge(sample_df, category_df[["cat_id"]], left_on=category_names, right_index=True) return sample_df, category_df def _frequency_table(sample_df, category_ids): """ Take the result that comes out of the method above and turn it in to the frequencytable format used by the ipu """ df = sample_df.groupby(['hh_id', 'cat_id']).size().unstack().fillna(0) # need to manually add in case we missed a whole cat_id in the sample missing_ids = list(set(category_ids) - set(df.columns)) if missing_ids: missing_df = pd.DataFrame( data=np.zeros((len(df), len(missing_ids))), index=df.index, columns=missing_ids) df = df.merge(missing_df, left_index=True, right_index=True) assert len(df.columns) == len(category_ids) assert df.sum().sum() == len(sample_df) return df def frequency_tables(persons_sample_df, households_sample_df, person_cat_ids, household_cat_ids): households_sample_df.index.name = "hh_id" households_sample_df = households_sample_df.reset_index().\ set_index("serialno") h_freq_table = _frequency_table(households_sample_df, household_cat_ids) persons_sample_df = pd.merge(persons_sample_df, households_sample_df[["hh_id"]], left_on=["serialno"], right_index=True) p_freq_table = _frequency_table(persons_sample_df, person_cat_ids) p_freq_table = p_freq_table.reindex(h_freq_table.index).fillna(0) assert len(h_freq_table) == len(p_freq_table) h_freq_table = h_freq_table.sort_index(axis=1) p_freq_table = p_freq_table.sort_index(axis=1) return h_freq_table, p_freq_table
bsd-3-clause
caspar/PhysicsLab
00_PythonPrimer/least_squares_fit.py
1
1059
# Lab 0 # Linear Least Squares Fit # Author Caspar Lant import numpy as np import matplotlib.pyplot as plt # load csv file DATA = "SampleData-1.csv"; measurement, temperature, pressure, uncertainty, error = np.loadtxt(DATA, skiprows=5, unpack=True, delimiter=','); # plot temperature vs. pressure + error bars plt.xlabel("Temperature ($^\circ$C)"); plt.ylabel("Pressure (lb/in$ ^2$)"); plt.errorbar(temperature, pressure, error, linestyle = 'None', marker='o', mfc='orange', mec='r', ms=14, mew=1, ecolor = "k"); # linear least squares fit line def least_squares_fit (x, y): xavg = x.mean() slope = (y*(x-xavg)).sum()/(x*(x-xavg)).sum() intercept = y.mean()-slope*xavg return slope, intercept slope, intercept = least_squares_fit(temperature, pressure); # create arrays to plot y1 = slope * 150 + intercept; # y1 = m(x1) + b y2 = slope * -250 + intercept; # y2 = m(x2) + b x_range = [-250, 150]; # array of x values y_range = [y2 , y1 ]; # array of y values # show the graph plt.plot(x_range,y_range); plt.show();
mit
Yvictor/TradingGym
trading_env/envs/training_v0.py
1
27856
import os import logging import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as patches class trading_env: def __init__(self, env_id, obs_data_len, step_len, df, fee, max_position=5, deal_col_name='price', feature_names=['price', 'volume'], fluc_div=100.0, gameover_limit=5, *args, **kwargs): """ #assert df # need deal price as essential and specified the df format # obs_data_leng -> observation data length # step_len -> when call step rolling windows will + step_len # df -> dataframe that contain data for trading(format as...) # price # datetime # serial_number -> serial num of deal at each day recalculating # fee -> when each deal will pay the fee, set with your product # max_position -> the max market position for you trading share # deal_col_name -> the column name for cucalate reward used. # feature_names -> list contain the feature columns to use in trading status. # ?day trade option set as default if don't use this need modify """ logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s') self.logger = logging.getLogger(env_id) #self.file_loc_path = os.environ.get('FILEPATH', '') self.df = df self.action_space = np.array([3,]) self.gym_actions = range(3) self.obs_len = obs_data_len self.feature_len = len(feature_names) self.observation_space = np.array([self.obs_len*self.feature_len,]) self.using_feature = feature_names self.price_name = deal_col_name self.step_len = step_len self.fee = fee self.max_position = max_position self.fluc_div = fluc_div self.gameover = gameover_limit self.begin_fs = self.df[self.df['serial_number']==0] self.date_leng = len(self.begin_fs) self.render_on = 0 self.buy_color, self.sell_color = (1, 2) self.new_rotation, self.cover_rotation = (1, 2) self.transaction_details = pd.DataFrame() self.logger.info('Making new env: {}'.format(env_id)) def reset(self): random_int = np.random.randint(self.date_leng) if random_int == self.date_leng - 1: begin_point = self.begin_fs.index[random_int] end_point = None else: begin_point, end_point = self.begin_fs.index[random_int: random_int+2] self.df_sample = self.df.iloc[begin_point: end_point] self.step_st = 0 self.price = self.df_sample[self.price_name].as_matrix() self.obs_features = self.df_sample[self.using_feature].as_matrix() self.obs_res = self.obs_features[self.step_st: self.step_st+self.obs_len] #maybe make market position feature in final feature, set as option self.posi_l = [0]*self.obs_len # self.position_feature = np.array(self.posi_l[self.step_st:self.step_st+self.obs_len])/(self.max_position*2)+0.5 self.reward_sum = 0 self.reward_fluctuant = 0 self.reward_ret = 0 self.transaction_details = pd.DataFrame() self.reward_curve = [] self.t_index = 0 self.buy_color, self.sell_color = (1, 2) self.new_rotation, self.cover_rotation = (1, 2) return self.obs_res def step(self, action): #price_current can be change next one to some avg of next_N or simulate with slippage next_index = self.step_st+self.obs_len+1 self.price_current = self.price[next_index] self.make_real = 0 reward = 0.0 if action == 1 and self.max_position > self.posi_l[-1] >= 0: self.buy_price = self.price_current if self.posi_l[-1] > 0: self.position_share = self.transaction_details.iloc[-1].loc['position'] abs_pos = abs(self.position_share) self.reward_fluctuant = self.price_current*self.position_share - self.transaction_details.iloc[-1]['price_mean']*self.position_share - self.fee*abs_pos self.price_mean = (self.transaction_details.iloc[-1]['price_mean']*self.position_share + self.buy_price)/(self.position_share+1.0) self.position_share += 1 else: self.reward_fluctuant = 0.0 self.position_share = 1.0 self.price_mean = self.buy_price self.posi_l += ([self.posi_l[-1] + 1 ]*self.step_len) self.t_index += 1 transact_n = pd.DataFrame({'step': next_index, 'datetime': self.df_sample.iloc[next_index].datetime, 'transact': 'Buy', 'transact_type': 'new', 'price': self.buy_price, 'share': 1, 'price_mean': self.price_mean, 'position': self.position_share, 'reward_fluc': self.reward_fluctuant, 'reward': reward, 'reward_sum': self.reward_sum, 'color': self.buy_color, 'rotation': self.new_rotation} ,index=[self.t_index],columns=['step','datetime','transact','transact_type','price','share','price_mean','position','reward_fluc', 'reward','reward_sum','color','rotation']) self.transaction_details = pd.concat([self.transaction_details,transact_n]) elif action == 2 and -self.max_position < self.posi_l[-1] <= 0: self.sell_price = self.price_current if self.posi_l[-1] < 0: self.position_share = self.transaction_details.iloc[-1].loc['position'] abs_pos = abs(self.position_share) self.reward_fluctuant = self.price_current*self.position_share - self.transaction_details.iloc[-1]['price_mean']*self.position_share - self.fee*abs_pos self.price_mean = (-self.transaction_details.iloc[-1]['price_mean']*self.position_share + self.sell_price)/-(self.position_share-1.0) self.position_share -= 1 else: self.reward_fluctuant = 0.0 self.position_share = -1.0 self.price_mean = self.sell_price self.posi_l+=([self.posi_l[-1] - 1 ]*self.step_len) self.t_index += 1 transact_n = pd.DataFrame({'step': next_index, 'datetime': self.df_sample.iloc[next_index].datetime, 'transact': 'Sell', 'transact_type': 'new', 'price': self.sell_price, 'share':-1, 'price_mean': self.price_mean, 'position': self.position_share, 'reward_fluc': self.reward_fluctuant, 'reward': reward, 'reward_sum': self.reward_sum, 'color': self.sell_color,'rotation': self.new_rotation} ,index=[self.t_index],columns=['step','datetime','transact','transact_type','price','share','price_mean','position','reward_fluc', 'reward','reward_sum','color','rotation']) self.transaction_details = pd.concat([self.transaction_details,transact_n]) elif action == 1 and self.posi_l[-1]<0: self.buy_price = self.price_current self.position_share = self.transaction_details.iloc[-1].loc['position'] abs_pos = abs(self.position_share) self.reward_fluctuant = self.price_current*self.position_share - self.transaction_details.iloc[-1]['price_mean']*self.position_share - self.fee*abs_pos self.position_share +=1 reward = self.transaction_details.iloc[-1]['price_mean'] - self.buy_price - self.fee self.reward_sum += reward self.make_real = 1 self.posi_l += ([self.posi_l[-1] + 1 ]*self.step_len) self.t_index += 1 transact_n = pd.DataFrame({'step': next_index, 'datetime': self.df_sample.iloc[next_index].datetime, 'transact':'Buy', 'transact_type':'cover', 'price':self.buy_price,'share':1, 'price_mean':self.price_mean, 'position':self.position_share, 'reward_fluc': self.reward_fluctuant, 'reward':reward,'reward_sum':self.reward_sum, 'color':self.buy_color,'rotation':self.cover_rotation} ,index=[self.t_index],columns=['step','datetime','transact','transact_type','price','share','price_mean','position','reward_fluc', 'reward','reward_sum','color','rotation']) self.transaction_details = pd.concat([self.transaction_details,transact_n]) elif action == 2 and self.posi_l[-1]>0: self.sell_price = self.price_current self.position_share = self.transaction_details.iloc[-1].loc['position'] abs_pos = abs(self.position_share) self.reward_fluctuant = self.price_current*self.position_share - self.transaction_details.iloc[-1]['price_mean']*self.position_share - self.fee*abs_pos self.position_share -=1 reward = self.sell_price - self.transaction_details.iloc[-1]['price_mean'] - self.fee self.reward_sum += reward self.make_real = 1 self.posi_l+=([self.posi_l[-1] - 1 ]*self.step_len) self.t_index +=1 transact_n = pd.DataFrame({'step': next_index, 'datetime': self.df_sample.iloc[next_index].datetime, 'transact':'Sell', 'transact_type':'cover', 'price':self.sell_price,'share':-1, 'price_mean':self.price_mean, 'position':self.position_share, 'reward_fluc': self.reward_fluctuant, 'reward':reward,'reward_sum':self.reward_sum, 'color':self.sell_color,'rotation':self.cover_rotation} ,index=[self.t_index],columns=['step','datetime','transact','transact_type','price','share','price_mean','position','reward_fluc', 'reward','reward_sum','color','rotation']) self.transaction_details = pd.concat([self.transaction_details,transact_n]) elif action ==1 and self.posi_l[-1]==self.max_position: action = 0 elif action == 2 and self.posi_l[-1]== -self.max_position: action = 0 if action ==0: if self.posi_l[-1] != 0: self.posi_l+=([self.posi_l[-1]]*self.step_len) self.t_index +=1 self.position_share = self.transaction_details.iloc[-1].loc['position'] abs_pos = abs(self.position_share) self.reward_fluctuant = self.price_current*self.position_share - self.transaction_details.iloc[-1]['price_mean']*self.position_share - self.fee*abs_pos else: self.posi_l+=([self.posi_l[-1]]*self.step_len) self.t_index +=1 self.reward_fluctuant = 0.0 self.reward_curve.append((self.step_st+self.obs_len, self.reward_fluctuant+self.reward_sum)) self.step_st += self.step_len done = False if self.step_st+self.obs_len+self.step_len >= len(self.price): done = True if self.posi_l[-1] < 0: self.make_real = 1 self.buy_price = self.price_current self.posi_l+=([0]*self.step_len) self.t_index += 1 self.reward_fluctuant = 0.0 self.position_share = self.transaction_details.iloc[-1].loc['position'] reward = (self.transaction_details.iloc[-1]['price_mean'] - self.buy_price - self.fee)*(-self.position_share) self.reward_sum +=reward transact_n = pd.DataFrame({'step': next_index, 'datetime': self.df_sample.iloc[next_index].datetime, 'transact':'Buy', 'transact_type':'cover', 'price':self.buy_price,'share':-self.position_share, 'price_mean':self.price_mean, 'position':0, 'reward_fluc': self.reward_fluctuant, 'reward':reward,'reward_sum':self.reward_sum, 'color':self.buy_color,'rotation':self.cover_rotation} ,index=[self.t_index],columns=['step','datetime','transact','transact_type','price','share','price_mean','position','reward_fluc', 'reward','reward_sum','color','rotation']) self.transaction_details = pd.concat([self.transaction_details,transact_n]) if self.posi_l[-1] > 0: self.make_real = 1 self.sell_price = self.price_current self.posi_l+=([0]*self.step_len) self.t_index += 1 self.reward_fluctuant = 0.0 self.position_share = self.transaction_details.iloc[-1].loc['position'] reward = (self.sell_price - self.transaction_details.iloc[-1]['price_mean'] - self.fee)*self.position_share self.reward_sum +=reward transact_n = pd.DataFrame({'step': next_index, 'datetime': self.df_sample.iloc[next_index].datetime, 'transact':'Sell', 'transact_type':'cover', 'price':self.sell_price,'share':-self.position_share, 'price_mean':self.price_mean, 'position':0, 'reward_fluc': self.reward_fluctuant, 'reward':reward,'reward_sum':self.reward_sum, 'color':self.sell_color,'rotation':self.cover_rotation} ,index=[self.t_index],columns=['step','datetime','transact','transact_type','price','share','price_mean','position','reward_fluc', 'reward','reward_sum','color','rotation']) self.transaction_details = pd.concat([self.transaction_details,transact_n]) elif self.gameover and self.reward_sum+self.reward_fluctuant < -self.gameover:#3.5: done = True if self.posi_l[-1] < 0: self.make_real = 1 self.buy_price = self.price_current self.posi_l+=([0]*self.step_len) self.t_index += 1 self.reward_fluctuant = 0.0 self.position_share = self.transaction_details.iloc[-1].loc['position'] reward = (self.transaction_details.iloc[-1]['price_mean'] - self.buy_price - self.fee)*(-self.position_share) self.reward_sum +=reward transact_n = pd.DataFrame({'step': next_index, 'datetime': self.df_sample.iloc[next_index].datetime, 'transact':'Buy', 'transact_type':'cover', 'price':self.buy_price,'share':-self.position_share, 'price_mean':self.price_mean, 'position':0, 'reward_fluc': self.reward_fluctuant, 'reward':reward,'reward_sum':self.reward_sum, 'color':self.buy_color,'rotation':self.cover_rotation} ,index=[self.t_index],columns=['step','datetime','transact','transact_type','price','share','price_mean','position','reward_fluc', 'reward','reward_sum','color','rotation']) self.transaction_details = pd.concat([self.transaction_details,transact_n]) if self.posi_l[-1] > 0: self.make_real = 1 self.sell_price = self.price_current self.posi_l+=([0]*self.step_len) self.t_index += 1 self.reward_fluctuant = 0.0 self.position_share = self.transaction_details.iloc[-1].loc['position'] reward = (self.sell_price - self.transaction_details.iloc[-1]['price_mean'] - self.fee)*self.position_share self.reward_sum +=reward transact_n = pd.DataFrame({'step': next_index, 'datetime': self.df_sample.iloc[next_index].datetime, 'transact':'Sell', 'transact_type':'cover', 'price':self.sell_price,'share':-self.position_share, 'price_mean':self.price_mean, 'position':0, 'reward_fluc': self.reward_fluctuant, 'reward':reward,'reward_sum':self.reward_sum, 'color':self.sell_color,'rotation':self.cover_rotation} ,index=[self.t_index],columns=['step','datetime','transact','transact_type','price','share','price_mean','position','reward_fluc', 'reward','reward_sum','color','rotation']) self.transaction_details = pd.concat([self.transaction_details,transact_n]) #self.logger.debug('Setp %d : make action %d'%(self.t_index,action)) self.obs_res = self.obs_features[self.step_st: self.step_st+self.obs_len] # position feature #self.position_feature = np.array(self.posi_l[self.step_st:self.step_st+self.obs_len])/(self.max_position*2)+0.5 #self.obs_res = np.array([self.price_feature,self.up_down_feature,self.ask_bid_feature,self.vol_feature,self.position_feature]).reshape(1,self.obs_len,self.feature_len) #self.obs_pv = np.concatenate([self.price_feature,self.up_down_feature,self.ask_bid_feature,self.vol_feature]) self.reward_ret = 0.0 if self.make_real == 0: self.reward_ret = self.reward_fluctuant/self.fluc_div elif self.make_real ==1: self.reward_ret = reward #if self.reward_ret < 0: #self.reward_ret = -0.005 #self.obs_res = np.concatenate((self.obs_pv.reshape(self.obs_len*self.feature_len),self.position))#.astype(float) info = None return self.obs_res, self.reward_ret, done, info def render(self, save=False): if self.render_on == 0: matplotlib.style.use('dark_background') self.render_on = 1 left, width = 0.1, 0.8 rect1 = [left, 0.4, width, 0.55] rect2 = [left, 0.2, width, 0.2] rect3 = [left, 0.05, width, 0.15] self.fig = plt.figure(figsize=(15,8)) self.fig.suptitle('%s'%self.df_sample['datetime'].iloc[0].date(), fontsize=14, fontweight='bold') #self.ax = self.fig.add_subplot(1,1,1) self.ax = self.fig.add_axes(rect1) # left, bottom, width, height self.ax2 = self.fig.add_axes(rect2, sharex=self.ax) self.ax3 = self.fig.add_axes(rect3, sharex=self.ax) #fig, ax = plt.subplots() price_x = list(range(len(self.price[:self.step_st+self.obs_len]))) self.price_plot = self.ax.plot(price_x, self.price[:self.step_st+self.obs_len], 'dodgerblue',zorder=1) self.vol_plot = self.ax3.plot(price_x, self.obs_features[:self.step_st+self.obs_len, 1], 'cyan') rect_high = self.price[self.step_st:self.step_st+self.obs_len].max() - self.price[self.step_st:self.step_st+self.obs_len].min() self.target_box = self.ax.add_patch( patches.Rectangle( (self.step_st, self.price[self.step_st:self.step_st+self.obs_len].min()),self.obs_len,rect_high, label='observation',edgecolor=(1,1,1),facecolor=(0.95,1,0.1,0.8),linestyle=':',linewidth=2, fill=True) ) # remove background) self.fluc_reward_plot = self.ax2.fill_between([x[0] for x in self.reward_curve],0,[y[1] for y in self.reward_curve],facecolor='yellow',alpha=0.8) if len(self.transaction_details)!=0: self.reward_plot = self.ax2.fill_between(self.transaction_details.step,0,self.transaction_details.reward_sum,facecolor='cyan', alpha=0.5) self.share_plot = self.ax2.fill_between(self.transaction_details.step,0,self.transaction_details.position,facecolor='r', alpha=0.5) buy_record = self.transaction_details[self.transaction_details['transact']=='Buy'] if len(buy_record)!=0: trade_x = buy_record.step trade_y = [self.price[i] for i in trade_x] trade_color = [(1,0,0) if i =='new' else (1,0.7,0.7) for i in buy_record.transact_type] #trade_marker = ['v' if i =='Sell' else '^' for i in self.transaction_details.transact] self.trade_plot = self.ax.scatter(x=trade_x,y=trade_y,s=100,marker='^',c=trade_color,edgecolors='none', zorder=2) sell_record = self.transaction_details[self.transaction_details['transact']=='Sell'] if len(sell_record)!=0: trade_x = sell_record.step trade_y = [self.price[i] for i in trade_x] trade_color = [(0,1,0) if i =='new' else (0.7,1,0.7) for i in sell_record.transact_type] self.trade_plot = self.ax.scatter(x=trade_x,y=trade_y,s=100,marker='v',c=trade_color,edgecolors='none',zorder=2) self.ax.set_xlim(0,len(self.price[:self.step_st+self.obs_len])+200) plt.ion() #self.fig.tight_layout() plt.show() if save: self.fig.savefig('fig/%s.png' % str(self.t_index)) elif self.render_on == 1: self.ax.lines.remove(self.price_plot[0]) self.ax3.lines.remove(self.vol_plot[0]) price_x = list(range(len(self.price[:self.step_st+self.obs_len]))) self.price_plot = self.ax.plot(price_x, self.price[:self.step_st+self.obs_len], 'dodgerblue',zorder=1) self.vol_plot = self.ax3.plot(price_x, self.obs_features[:self.step_st+self.obs_len, 1], 'cyan') self.fluc_reward_plot.remove() self.target_box.remove() try: self.reward_plot.remove() self.share_plot.remove() except: pass self.fluc_reward_plot = self.ax2.fill_between([x[0] for x in self.reward_curve],0,[y[1] for y in self.reward_curve],facecolor='yellow',alpha=0.8) rect_high = self.price[self.step_st:self.step_st+self.obs_len].max() - self.price[self.step_st:self.step_st+self.obs_len].min() self.target_box = self.ax.add_patch( patches.Rectangle( (self.step_st, self.price[self.step_st:self.step_st+self.obs_len].min()),self.obs_len,rect_high, label='observation',edgecolor=(1,1,1),facecolor=(0.95,1,0.1,0.75),linestyle=':',linewidth=2, fill=True) ) if len(self.transaction_details)!=0: try: self.trade_plot.remove() except: pass self.reward_plot = self.ax2.fill_between(self.transaction_details.step,0,self.transaction_details.reward_sum,edgecolors='cyan',facecolor='cyan') self.share_plot = self.ax2.fill_between(self.transaction_details.step,0,self.transaction_details.position,facecolor='r', alpha=0.5) buy_record = self.transaction_details[self.transaction_details['transact']=='Buy'] if len(buy_record)!=0: trade_x = buy_record.step trade_y = [self.price[i] for i in trade_x] trade_color = [(0.8,0,0) if i =='new' else (1,0.7,0.7) for i in buy_record.transact_type] #trade_marker = ['v' if i =='Sell' else '^' for i in self.transaction_details.transact] self.trade_plot = self.ax.scatter(x=trade_x,y=trade_y,s=100,marker='^',c=trade_color,edgecolors='none', zorder=2) sell_record = self.transaction_details[self.transaction_details['transact']=='Sell'] if len(sell_record)!=0: trade_x = sell_record.step trade_y = [self.price[i] for i in trade_x] trade_color = [(0,1,0) if i =='new' else (0.7,1,0.7) for i in sell_record.transact_type] self.trade_plot = self.ax.scatter(x=trade_x,y=trade_y,s=100,marker='v',c=trade_color,edgecolors='none',zorder=2) self.ax.set_xlim(0,len(self.price[:self.step_st+self.obs_len])+200) if save: self.fig.savefig('fig/%s.png' % str(self.t_index)) plt.pause(0.0001) def backtest(self): self.gameover = None self.df_sample = self.df self.step_st = 0 self.price = self.df_sample[self.price_name].as_matrix() self.obs_features = self.df_sample[self.using_feature].as_matrix() self.obs_res = self.obs_features[self.step_st: self.step_st+self.obs_len] #maybe make market position feature in final feature, set as option self.posi_l = [0]*self.obs_len # self.position_feature = np.array(self.posi_l[self.step_st:self.step_st+self.obs_len])/(self.max_position*2)+0.5 self.reward_sum = 0 self.reward_fluctuant = 0 self.reward_ret = 0 self.transaction_details = pd.DataFrame() self.reward_curve = [] self.t_index = 0 self.buy_color, self.sell_color = (1, 2) self.new_rotation, self.cover_rotation = (1, 2) return self.obs_res def show_pattern(self, transact_index): record_index = self.transaction_details.loc[transact_index]['step'] return self.df_sample.iloc[record_index-self.obs_len-1:record_index] def show_future(self, transact_index): record_index = self.transaction_details.loc[transact_index]['step'] nextdf = self.df_sample.iloc[record_index:] next_sess_index = nextdf[nextdf['serial_number']==0].iloc[0].name return nextdf.loc[:next_sess_index]
mit
dsm054/pandas
pandas/tests/frame/test_repr_info.py
1
17663
# -*- coding: utf-8 -*- from __future__ import print_function from datetime import datetime, timedelta import re import sys import textwrap import numpy as np import pytest from pandas import (DataFrame, Series, compat, option_context, date_range, period_range, Categorical) from pandas.compat import StringIO, lrange, u, PYPY import pandas.io.formats.format as fmt import pandas as pd import pandas.util.testing as tm from pandas.tests.frame.common import TestData # Segregated collection of methods that require the BlockManager internal data # structure class TestDataFrameReprInfoEtc(TestData): def test_repr_empty(self): # empty foo = repr(self.empty) # noqa # empty with index frame = DataFrame(index=np.arange(1000)) foo = repr(frame) # noqa def test_repr_mixed(self): buf = StringIO() # mixed foo = repr(self.mixed_frame) # noqa self.mixed_frame.info(verbose=False, buf=buf) @pytest.mark.slow def test_repr_mixed_big(self): # big mixed biggie = DataFrame({'A': np.random.randn(200), 'B': tm.makeStringIndex(200)}, index=lrange(200)) biggie.loc[:20, 'A'] = np.nan biggie.loc[:20, 'B'] = np.nan foo = repr(biggie) # noqa def test_repr(self): buf = StringIO() # small one foo = repr(self.frame) self.frame.info(verbose=False, buf=buf) # even smaller self.frame.reindex(columns=['A']).info(verbose=False, buf=buf) self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf) # exhausting cases in DataFrame.info # columns but no index no_index = DataFrame(columns=[0, 1, 3]) foo = repr(no_index) # noqa # no columns or index self.empty.info(buf=buf) df = DataFrame(["a\n\r\tb"], columns=["a\n\r\td"], index=["a\n\r\tf"]) assert "\t" not in repr(df) assert "\r" not in repr(df) assert "a\n" not in repr(df) def test_repr_dimensions(self): df = DataFrame([[1, 2, ], [3, 4]]) with option_context('display.show_dimensions', True): assert "2 rows x 2 columns" in repr(df) with option_context('display.show_dimensions', False): assert "2 rows x 2 columns" not in repr(df) with option_context('display.show_dimensions', 'truncate'): assert "2 rows x 2 columns" not in repr(df) @pytest.mark.slow def test_repr_big(self): # big one biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4), index=lrange(200)) repr(biggie) def test_repr_unsortable(self): # columns are not sortable import warnings warn_filters = warnings.filters warnings.filterwarnings('ignore', category=FutureWarning, module=".*format") unsortable = DataFrame({'foo': [1] * 50, datetime.today(): [1] * 50, 'bar': ['bar'] * 50, datetime.today() + timedelta(1): ['bar'] * 50}, index=np.arange(50)) repr(unsortable) fmt.set_option('display.precision', 3, 'display.column_space', 10) repr(self.frame) fmt.set_option('display.max_rows', 10, 'display.max_columns', 2) repr(self.frame) fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000) repr(self.frame) tm.reset_display_options() warnings.filters = warn_filters def test_repr_unicode(self): uval = u('\u03c3\u03c3\u03c3\u03c3') # TODO(wesm): is this supposed to be used? bval = uval.encode('utf-8') # noqa df = DataFrame({'A': [uval, uval]}) result = repr(df) ex_top = ' A' assert result.split('\n')[0].rstrip() == ex_top df = DataFrame({'A': [uval, uval]}) result = repr(df) assert result.split('\n')[0].rstrip() == ex_top def test_unicode_string_with_unicode(self): df = DataFrame({'A': [u("\u05d0")]}) if compat.PY3: str(df) else: compat.text_type(df) def test_bytestring_with_unicode(self): df = DataFrame({'A': [u("\u05d0")]}) if compat.PY3: bytes(df) else: str(df) def test_very_wide_info_repr(self): df = DataFrame(np.random.randn(10, 20), columns=tm.rands_array(10, 20)) repr(df) def test_repr_column_name_unicode_truncation_bug(self): # #1906 df = DataFrame({'Id': [7117434], 'StringCol': ('Is it possible to modify drop plot code' ' so that the output graph is displayed ' 'in iphone simulator, Is it possible to ' 'modify drop plot code so that the ' 'output graph is \xe2\x80\xa8displayed ' 'in iphone simulator.Now we are adding ' 'the CSV file externally. I want to Call' ' the File through the code..')}) with option_context('display.max_columns', 20): assert 'StringCol' in repr(df) def test_latex_repr(self): result = r"""\begin{tabular}{llll} \toprule {} & 0 & 1 & 2 \\ \midrule 0 & $\alpha$ & b & c \\ 1 & 1 & 2 & 3 \\ \bottomrule \end{tabular} """ with option_context("display.latex.escape", False, 'display.latex.repr', True): df = DataFrame([[r'$\alpha$', 'b', 'c'], [1, 2, 3]]) assert result == df._repr_latex_() # GH 12182 assert df._repr_latex_() is None @tm.capture_stdout def test_info(self): io = StringIO() self.frame.info(buf=io) self.tsframe.info(buf=io) frame = DataFrame(np.random.randn(5, 3)) frame.info() frame.info(verbose=False) def test_info_memory(self): # https://github.com/pandas-dev/pandas/issues/21056 df = pd.DataFrame({'a': pd.Series([1, 2], dtype='i8')}) buf = StringIO() df.info(buf=buf) result = buf.getvalue() bytes = float(df.memory_usage().sum()) expected = textwrap.dedent("""\ <class 'pandas.core.frame.DataFrame'> RangeIndex: 2 entries, 0 to 1 Data columns (total 1 columns): a 2 non-null int64 dtypes: int64(1) memory usage: {} bytes """.format(bytes)) assert result == expected def test_info_wide(self): from pandas import set_option, reset_option io = StringIO() df = DataFrame(np.random.randn(5, 101)) df.info(buf=io) io = StringIO() df.info(buf=io, max_cols=101) rs = io.getvalue() assert len(rs.splitlines()) > 100 xp = rs set_option('display.max_info_columns', 101) io = StringIO() df.info(buf=io) assert rs == xp reset_option('display.max_info_columns') def test_info_duplicate_columns(self): io = StringIO() # it works! frame = DataFrame(np.random.randn(1500, 4), columns=['a', 'a', 'b', 'b']) frame.info(buf=io) def test_info_duplicate_columns_shows_correct_dtypes(self): # GH11761 io = StringIO() frame = DataFrame([[1, 2.0]], columns=['a', 'a']) frame.info(buf=io) io.seek(0) lines = io.readlines() assert 'a 1 non-null int64\n' == lines[3] assert 'a 1 non-null float64\n' == lines[4] def test_info_shows_column_dtypes(self): dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]', 'complex128', 'object', 'bool'] data = {} n = 10 for i, dtype in enumerate(dtypes): data[i] = np.random.randint(2, size=n).astype(dtype) df = DataFrame(data) buf = StringIO() df.info(buf=buf) res = buf.getvalue() for i, dtype in enumerate(dtypes): name = '%d %d non-null %s' % (i, n, dtype) assert name in res def test_info_max_cols(self): df = DataFrame(np.random.randn(10, 5)) for len_, verbose in [(5, None), (5, False), (10, True)]: # For verbose always ^ setting ^ summarize ^ full output with option_context('max_info_columns', 4): buf = StringIO() df.info(buf=buf, verbose=verbose) res = buf.getvalue() assert len(res.strip().split('\n')) == len_ for len_, verbose in [(10, None), (5, False), (10, True)]: # max_cols no exceeded with option_context('max_info_columns', 5): buf = StringIO() df.info(buf=buf, verbose=verbose) res = buf.getvalue() assert len(res.strip().split('\n')) == len_ for len_, max_cols in [(10, 5), (5, 4)]: # setting truncates with option_context('max_info_columns', 4): buf = StringIO() df.info(buf=buf, max_cols=max_cols) res = buf.getvalue() assert len(res.strip().split('\n')) == len_ # setting wouldn't truncate with option_context('max_info_columns', 5): buf = StringIO() df.info(buf=buf, max_cols=max_cols) res = buf.getvalue() assert len(res.strip().split('\n')) == len_ def test_info_memory_usage(self): # Ensure memory usage is displayed, when asserted, on the last line dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]', 'complex128', 'object', 'bool'] data = {} n = 10 for i, dtype in enumerate(dtypes): data[i] = np.random.randint(2, size=n).astype(dtype) df = DataFrame(data) buf = StringIO() # display memory usage case df.info(buf=buf, memory_usage=True) res = buf.getvalue().splitlines() assert "memory usage: " in res[-1] # do not display memory usage case df.info(buf=buf, memory_usage=False) res = buf.getvalue().splitlines() assert "memory usage: " not in res[-1] df.info(buf=buf, memory_usage=True) res = buf.getvalue().splitlines() # memory usage is a lower bound, so print it as XYZ+ MB assert re.match(r"memory usage: [^+]+\+", res[-1]) df.iloc[:, :5].info(buf=buf, memory_usage=True) res = buf.getvalue().splitlines() # excluded column with object dtype, so estimate is accurate assert not re.match(r"memory usage: [^+]+\+", res[-1]) # Test a DataFrame with duplicate columns dtypes = ['int64', 'int64', 'int64', 'float64'] data = {} n = 100 for i, dtype in enumerate(dtypes): data[i] = np.random.randint(2, size=n).astype(dtype) df = DataFrame(data) df.columns = dtypes df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo']) df_with_object_index.info(buf=buf, memory_usage=True) res = buf.getvalue().splitlines() assert re.match(r"memory usage: [^+]+\+", res[-1]) df_with_object_index.info(buf=buf, memory_usage='deep') res = buf.getvalue().splitlines() assert re.match(r"memory usage: [^+]+$", res[-1]) # Ensure df size is as expected # (cols * rows * bytes) + index size df_size = df.memory_usage().sum() exp_size = len(dtypes) * n * 8 + df.index.nbytes assert df_size == exp_size # Ensure number of cols in memory_usage is the same as df size_df = np.size(df.columns.values) + 1 # index=True; default assert size_df == np.size(df.memory_usage()) # assert deep works only on object assert df.memory_usage().sum() == df.memory_usage(deep=True).sum() # test for validity DataFrame(1, index=['a'], columns=['A'] ).memory_usage(index=True) DataFrame(1, index=['a'], columns=['A'] ).index.nbytes df = DataFrame( data=1, index=pd.MultiIndex.from_product( [['a'], range(1000)]), columns=['A'] ) df.index.nbytes df.memory_usage(index=True) df.index.values.nbytes mem = df.memory_usage(deep=True).sum() assert mem > 0 @pytest.mark.skipif(PYPY, reason="on PyPy deep=True doesn't change result") def test_info_memory_usage_deep_not_pypy(self): df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo']) assert (df_with_object_index.memory_usage( index=True, deep=True).sum() > df_with_object_index.memory_usage( index=True).sum()) df_object = pd.DataFrame({'a': ['a']}) assert (df_object.memory_usage(deep=True).sum() > df_object.memory_usage().sum()) @pytest.mark.skipif(not PYPY, reason="on PyPy deep=True does not change result") def test_info_memory_usage_deep_pypy(self): df_with_object_index = pd.DataFrame({'a': [1]}, index=['foo']) assert (df_with_object_index.memory_usage( index=True, deep=True).sum() == df_with_object_index.memory_usage( index=True).sum()) df_object = pd.DataFrame({'a': ['a']}) assert (df_object.memory_usage(deep=True).sum() == df_object.memory_usage().sum()) @pytest.mark.skipif(PYPY, reason="PyPy getsizeof() fails by design") def test_usage_via_getsizeof(self): df = DataFrame( data=1, index=pd.MultiIndex.from_product( [['a'], range(1000)]), columns=['A'] ) mem = df.memory_usage(deep=True).sum() # sys.getsizeof will call the .memory_usage with # deep=True, and add on some GC overhead diff = mem - sys.getsizeof(df) assert abs(diff) < 100 def test_info_memory_usage_qualified(self): buf = StringIO() df = DataFrame(1, columns=list('ab'), index=[1, 2, 3]) df.info(buf=buf) assert '+' not in buf.getvalue() buf = StringIO() df = DataFrame(1, columns=list('ab'), index=list('ABC')) df.info(buf=buf) assert '+' in buf.getvalue() buf = StringIO() df = DataFrame(1, columns=list('ab'), index=pd.MultiIndex.from_product( [range(3), range(3)])) df.info(buf=buf) assert '+' not in buf.getvalue() buf = StringIO() df = DataFrame(1, columns=list('ab'), index=pd.MultiIndex.from_product( [range(3), ['foo', 'bar']])) df.info(buf=buf) assert '+' in buf.getvalue() def test_info_memory_usage_bug_on_multiindex(self): # GH 14308 # memory usage introspection should not materialize .values from string import ascii_uppercase as uppercase def memory_usage(f): return f.memory_usage(deep=True).sum() N = 100 M = len(uppercase) index = pd.MultiIndex.from_product([list(uppercase), pd.date_range('20160101', periods=N)], names=['id', 'date']) df = DataFrame({'value': np.random.randn(N * M)}, index=index) unstacked = df.unstack('id') assert df.values.nbytes == unstacked.values.nbytes assert memory_usage(df) > memory_usage(unstacked) # high upper bound assert memory_usage(unstacked) - memory_usage(df) < 2000 def test_info_categorical(self): # GH14298 idx = pd.CategoricalIndex(['a', 'b']) df = pd.DataFrame(np.zeros((2, 2)), index=idx, columns=idx) buf = StringIO() df.info(buf=buf) def test_info_categorical_column(self): # make sure it works n = 2500 df = DataFrame({'int64': np.random.randint(100, size=n)}) df['category'] = Series(np.array(list('abcdefghij')).take( np.random.randint(0, 10, size=n))).astype('category') df.isna() buf = StringIO() df.info(buf=buf) df2 = df[df['category'] == 'd'] buf = compat.StringIO() df2.info(buf=buf) def test_repr_categorical_dates_periods(self): # normal DataFrame dt = date_range('2011-01-01 09:00', freq='H', periods=5, tz='US/Eastern') p = period_range('2011-01', freq='M', periods=5) df = DataFrame({'dt': dt, 'p': p}) exp = """ dt p 0 2011-01-01 09:00:00-05:00 2011-01 1 2011-01-01 10:00:00-05:00 2011-02 2 2011-01-01 11:00:00-05:00 2011-03 3 2011-01-01 12:00:00-05:00 2011-04 4 2011-01-01 13:00:00-05:00 2011-05""" df = DataFrame({'dt': Categorical(dt), 'p': Categorical(p)}) assert repr(df) == exp
bsd-3-clause
pcm17/tensorflow
tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
1
35604
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Estimator.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import itertools import json import os import tempfile import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib import learn from tensorflow.contrib import lookup from tensorflow.contrib.framework.python.ops import variables from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib from tensorflow.contrib.layers.python.layers import optimizers from tensorflow.contrib.learn.python.learn import experiment from tensorflow.contrib.learn.python.learn import models from tensorflow.contrib.learn.python.learn import monitors as monitors_lib from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.estimators import _sklearn from tensorflow.contrib.learn.python.learn.estimators import estimator from tensorflow.contrib.learn.python.learn.estimators import linear from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.utils import input_fn_utils from tensorflow.contrib.metrics.python.ops import metric_ops from tensorflow.contrib.testing.python.framework import util_test from tensorflow.python.client import session as session_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.saved_model import loader from tensorflow.python.saved_model import tag_constants from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import input as input_lib from tensorflow.python.training import monitored_session from tensorflow.python.training import queue_runner_impl from tensorflow.python.training import session_run_hook from tensorflow.python.util import compat _BOSTON_INPUT_DIM = 13 _IRIS_INPUT_DIM = 4 def boston_input_fn(num_epochs=None): boston = base.load_boston() features = input_lib.limit_epochs( array_ops.reshape( constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]), num_epochs=num_epochs) labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1]) return features, labels def boston_input_fn_with_queue(num_epochs=None): features, labels = boston_input_fn(num_epochs=num_epochs) # Create a minimal queue runner. fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32) queue_runner = queue_runner_impl.QueueRunner(fake_queue, [constant_op.constant(0)]) queue_runner_impl.add_queue_runner(queue_runner) return features, labels def iris_input_fn(): iris = base.load_iris() features = array_ops.reshape( constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM]) labels = array_ops.reshape(constant_op.constant(iris.target), [-1]) return features, labels def iris_input_fn_labels_dict(): iris = base.load_iris() features = array_ops.reshape( constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM]) labels = { 'labels': array_ops.reshape(constant_op.constant(iris.target), [-1]) } return features, labels def boston_eval_fn(): boston = base.load_boston() n_examples = len(boston.target) features = array_ops.reshape( constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM]) labels = array_ops.reshape( constant_op.constant(boston.target), [n_examples, 1]) return array_ops.concat([features, features], 0), array_ops.concat( [labels, labels], 0) def extract(data, key): if isinstance(data, dict): assert key in data return data[key] else: return data def linear_model_params_fn(features, labels, mode, params): features = extract(features, 'input') labels = extract(labels, 'labels') assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.INFER) prediction, loss = (models.linear_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=params['learning_rate']) return prediction, loss, train_op def linear_model_fn(features, labels, mode): features = extract(features, 'input') labels = extract(labels, 'labels') assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.INFER) if isinstance(features, dict): (_, features), = features.items() prediction, loss = (models.linear_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return prediction, loss, train_op def linear_model_fn_with_model_fn_ops(features, labels, mode): """Same as linear_model_fn, but returns `ModelFnOps`.""" assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL, model_fn.ModeKeys.INFER) prediction, loss = (models.linear_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return model_fn.ModelFnOps( mode=mode, predictions=prediction, loss=loss, train_op=train_op) def logistic_model_no_mode_fn(features, labels): features = extract(features, 'input') labels = extract(labels, 'labels') labels = array_ops.one_hot(labels, 3, 1, 0) prediction, loss = (models.logistic_regression_zero_init(features, labels)) train_op = optimizers.optimize_loss( loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return { 'class': math_ops.argmax(prediction, 1), 'prob': prediction }, loss, train_op VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n' EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n' def _build_estimator_for_export_tests(tmpdir): def _input_fn(): iris = base.load_iris() return { 'feature': constant_op.constant( iris.data, dtype=dtypes.float32) }, constant_op.constant( iris.target, shape=[150], dtype=dtypes.int32) feature_columns = [ feature_column_lib.real_valued_column( 'feature', dimension=4) ] est = linear.LinearRegressor(feature_columns) est.fit(input_fn=_input_fn, steps=20) feature_spec = feature_column_lib.create_feature_spec_for_parsing( feature_columns) serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec) # hack in an op that uses an asset, in order to test asset export. # this is not actually valid, of course. def serving_input_fn_with_asset(): features, labels, inputs = serving_input_fn() vocab_file_name = os.path.join(tmpdir, 'my_vocab_file') vocab_file = gfile.GFile(vocab_file_name, mode='w') vocab_file.write(VOCAB_FILE_CONTENT) vocab_file.close() hashtable = lookup.HashTable( lookup.TextFileStringTableInitializer(vocab_file_name), 'x') features['bogus_lookup'] = hashtable.lookup( math_ops.to_int64(features['feature'])) return input_fn_utils.InputFnOps(features, labels, inputs) return est, serving_input_fn_with_asset class CheckCallsMonitor(monitors_lib.BaseMonitor): def __init__(self, expect_calls): super(CheckCallsMonitor, self).__init__() self.begin_calls = None self.end_calls = None self.expect_calls = expect_calls def begin(self, max_steps): self.begin_calls = 0 self.end_calls = 0 def step_begin(self, step): self.begin_calls += 1 return {} def step_end(self, step, outputs): self.end_calls += 1 return False def end(self): assert (self.end_calls == self.expect_calls and self.begin_calls == self.expect_calls) class EstimatorTest(test.TestCase): def testExperimentIntegration(self): exp = experiment.Experiment( estimator=estimator.Estimator(model_fn=linear_model_fn), train_input_fn=boston_input_fn, eval_input_fn=boston_input_fn) exp.test() def testModelFnArgs(self): expected_param = {'some_param': 'some_value'} expected_config = run_config.RunConfig() expected_config.i_am_test = True def _argument_checker(features, labels, mode, params, config): _, _ = features, labels self.assertEqual(model_fn.ModeKeys.TRAIN, mode) self.assertEqual(expected_param, params) self.assertTrue(config.i_am_test) return constant_op.constant(0.), constant_op.constant( 0.), constant_op.constant(0.) est = estimator.Estimator( model_fn=_argument_checker, params=expected_param, config=expected_config) est.fit(input_fn=boston_input_fn, steps=1) def testModelFnWithModelDir(self): expected_param = {'some_param': 'some_value'} expected_model_dir = tempfile.mkdtemp() def _argument_checker(features, labels, mode, params, config=None, model_dir=None): _, _, _ = features, labels, config self.assertEqual(model_fn.ModeKeys.TRAIN, mode) self.assertEqual(expected_param, params) self.assertEqual(model_dir, expected_model_dir) return constant_op.constant(0.), constant_op.constant( 0.), constant_op.constant(0.) est = estimator.Estimator(model_fn=_argument_checker, params=expected_param, model_dir=expected_model_dir) est.fit(input_fn=boston_input_fn, steps=1) def testInvalidModelFn_no_train_op(self): def _invalid_model_fn(features, labels): # pylint: disable=unused-argument w = variables_lib.Variable(42.0, 'weight') loss = 100.0 - w return None, loss, None est = estimator.Estimator(model_fn=_invalid_model_fn) with self.assertRaisesRegexp(ValueError, 'Missing training_op'): est.fit(input_fn=boston_input_fn, steps=1) def testInvalidModelFn_no_loss(self): def _invalid_model_fn(features, labels, mode): # pylint: disable=unused-argument w = variables_lib.Variable(42.0, 'weight') loss = 100.0 - w train_op = w.assign_add(loss / 100.0) predictions = loss if mode == model_fn.ModeKeys.EVAL: loss = None return predictions, loss, train_op est = estimator.Estimator(model_fn=_invalid_model_fn) est.fit(input_fn=boston_input_fn, steps=1) with self.assertRaisesRegexp(ValueError, 'Missing loss'): est.evaluate(input_fn=boston_eval_fn, steps=1) def testInvalidModelFn_no_prediction(self): def _invalid_model_fn(features, labels): # pylint: disable=unused-argument w = variables_lib.Variable(42.0, 'weight') loss = 100.0 - w train_op = w.assign_add(loss / 100.0) return None, loss, train_op est = estimator.Estimator(model_fn=_invalid_model_fn) est.fit(input_fn=boston_input_fn, steps=1) with self.assertRaisesRegexp(ValueError, 'Missing prediction'): est.evaluate(input_fn=boston_eval_fn, steps=1) with self.assertRaisesRegexp(ValueError, 'Missing prediction'): est.predict(input_fn=boston_input_fn) with self.assertRaisesRegexp(ValueError, 'Missing prediction'): est.predict( input_fn=functools.partial( boston_input_fn, num_epochs=1), as_iterable=True) def testModelFnScaffold(self): self.is_init_fn_called = False def _init_fn(scaffold, session): _, _ = scaffold, session self.is_init_fn_called = True def _model_fn_scaffold(features, labels, mode): _, _ = features, labels return model_fn.ModelFnOps( mode=mode, predictions=constant_op.constant(0.), loss=constant_op.constant(0.), train_op=constant_op.constant(0.), scaffold=monitored_session.Scaffold(init_fn=_init_fn)) est = estimator.Estimator(model_fn=_model_fn_scaffold) est.fit(input_fn=boston_input_fn, steps=1) self.assertTrue(self.is_init_fn_called) def testCheckpointSaverHookSuppressesTheDefaultOne(self): saver_hook = test.mock.Mock( spec=basic_session_run_hooks.CheckpointSaverHook) saver_hook.before_run.return_value = None est = estimator.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook]) # test nothing is saved, due to suppressing default saver with self.assertRaises(learn.NotFittedError): est.evaluate(input_fn=boston_input_fn, steps=1) def testCustomConfig(self): test_random_seed = 5783452 class TestInput(object): def __init__(self): self.random_seed = 0 def config_test_input_fn(self): self.random_seed = ops.get_default_graph().seed return constant_op.constant([[1.]]), constant_op.constant([1.]) config = run_config.RunConfig(tf_random_seed=test_random_seed) test_input = TestInput() est = estimator.Estimator(model_fn=linear_model_fn, config=config) est.fit(input_fn=test_input.config_test_input_fn, steps=1) # If input_fn ran, it will have given us the random seed set on the graph. self.assertEquals(test_random_seed, test_input.random_seed) def testCheckInputs(self): est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn)) # Lambdas so we have to different objects to compare right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32) right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32) est.fit(right_features(), right_labels(), steps=1) # TODO(wicke): This does not fail for np.int32 because of data_feeder magic. wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64) wrong_size_features = np.ones(shape=[7, 10]) wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32) wrong_size_labels = np.ones(shape=[7, 11]) est.fit(x=right_features(), y=right_labels(), steps=1) with self.assertRaises(ValueError): est.fit(x=wrong_type_features, y=right_labels(), steps=1) with self.assertRaises(ValueError): est.fit(x=wrong_size_features, y=right_labels(), steps=1) with self.assertRaises(ValueError): est.fit(x=right_features(), y=wrong_type_labels, steps=1) with self.assertRaises(ValueError): est.fit(x=right_features(), y=wrong_size_labels, steps=1) def testBadInput(self): est = estimator.Estimator(model_fn=linear_model_fn) self.assertRaisesRegexp( ValueError, 'Either x or input_fn must be provided.', est.fit, x=None, input_fn=None, steps=1) self.assertRaisesRegexp( ValueError, 'Can not provide both input_fn and x or y', est.fit, x='X', input_fn=iris_input_fn, steps=1) self.assertRaisesRegexp( ValueError, 'Can not provide both input_fn and x or y', est.fit, y='Y', input_fn=iris_input_fn, steps=1) self.assertRaisesRegexp( ValueError, 'Can not provide both input_fn and batch_size', est.fit, input_fn=iris_input_fn, batch_size=100, steps=1) self.assertRaisesRegexp( ValueError, 'Inputs cannot be tensors. Please provide input_fn.', est.fit, x=constant_op.constant(1.), steps=1) def testUntrained(self): boston = base.load_boston() est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn)) with self.assertRaises(learn.NotFittedError): _ = est.score(x=boston.data, y=boston.target.astype(np.float64)) with self.assertRaises(learn.NotFittedError): est.predict(x=boston.data) def testContinueTraining(self): boston = base.load_boston() output_dir = tempfile.mkdtemp() est = estimator.SKCompat( estimator.Estimator( model_fn=linear_model_fn, model_dir=output_dir)) float64_labels = boston.target.astype(np.float64) est.fit(x=boston.data, y=float64_labels, steps=50) scores = est.score( x=boston.data, y=float64_labels, metrics={'MSE': metric_ops.streaming_mean_squared_error}) del est # Create another estimator object with the same output dir. est2 = estimator.SKCompat( estimator.Estimator( model_fn=linear_model_fn, model_dir=output_dir)) # Check we can evaluate and predict. scores2 = est2.score( x=boston.data, y=float64_labels, metrics={'MSE': metric_ops.streaming_mean_squared_error}) self.assertAllClose(scores['MSE'], scores2['MSE']) predictions = np.array(list(est2.predict(x=boston.data))) other_score = _sklearn.mean_squared_error(predictions, float64_labels) self.assertAllClose(scores['MSE'], other_score) # Check we can keep training. est2.fit(x=boston.data, y=float64_labels, steps=100) scores3 = est2.score( x=boston.data, y=float64_labels, metrics={'MSE': metric_ops.streaming_mean_squared_error}) self.assertLess(scores3['MSE'], scores['MSE']) def testEstimatorParams(self): boston = base.load_boston() est = estimator.SKCompat( estimator.Estimator( model_fn=linear_model_params_fn, params={'learning_rate': 0.01})) est.fit(x=boston.data, y=boston.target, steps=100) def testHooksNotChanged(self): est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) # We pass empty array and expect it to remain empty after calling # fit and evaluate. Requires inside to copy this array if any hooks were # added. my_array = [] est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array) _ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array) self.assertEqual(my_array, []) def testIrisIterator(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) x_iter = itertools.islice(iris.data, 100) y_iter = itertools.islice(iris.target, 100) estimator.SKCompat(est).fit(x_iter, y_iter, steps=20) eval_result = est.evaluate(input_fn=iris_input_fn, steps=1) x_iter_eval = itertools.islice(iris.data, 100) y_iter_eval = itertools.islice(iris.target, 100) score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval) print(score_result) self.assertItemsEqual(eval_result.keys(), score_result.keys()) self.assertItemsEqual(['global_step', 'loss'], score_result.keys()) predictions = estimator.SKCompat(est).predict(x=iris.data)['class'] self.assertEqual(len(predictions), iris.target.shape[0]) def testIrisIteratorArray(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) x_iter = itertools.islice(iris.data, 100) y_iter = (np.array(x) for x in iris.target) est.fit(x_iter, y_iter, steps=100) _ = est.evaluate(input_fn=iris_input_fn, steps=1) _ = six.next(est.predict(x=iris.data))['class'] def testIrisIteratorPlainInt(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) x_iter = itertools.islice(iris.data, 100) y_iter = (v for v in iris.target) est.fit(x_iter, y_iter, steps=100) _ = est.evaluate(input_fn=iris_input_fn, steps=1) _ = six.next(est.predict(x=iris.data))['class'] def testIrisTruncatedIterator(self): iris = base.load_iris() est = estimator.Estimator(model_fn=logistic_model_no_mode_fn) x_iter = itertools.islice(iris.data, 50) y_iter = ([np.int32(v)] for v in iris.target) est.fit(x_iter, y_iter, steps=100) def testTrainStepsIsIncremental(self): est = estimator.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, steps=10) self.assertEqual(10, est.get_variable_value('global_step')) est.fit(input_fn=boston_input_fn, steps=15) self.assertEqual(25, est.get_variable_value('global_step')) def testTrainMaxStepsIsNotIncremental(self): est = estimator.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, max_steps=10) self.assertEqual(10, est.get_variable_value('global_step')) est.fit(input_fn=boston_input_fn, max_steps=15) self.assertEqual(15, est.get_variable_value('global_step')) def testPredict(self): est = estimator.Estimator(model_fn=linear_model_fn) boston = base.load_boston() est.fit(input_fn=boston_input_fn, steps=1) output = list(est.predict(x=boston.data, batch_size=10)) self.assertEqual(len(output), boston.target.shape[0]) def testWithModelFnOps(self): """Test for model_fn that returns `ModelFnOps`.""" est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops) boston = base.load_boston() est.fit(input_fn=boston_input_fn, steps=1) input_fn = functools.partial(boston_input_fn, num_epochs=1) scores = est.evaluate(input_fn=input_fn, steps=1) self.assertIn('loss', scores.keys()) output = list(est.predict(input_fn=input_fn)) self.assertEqual(len(output), boston.target.shape[0]) def testWrongInput(self): def other_input_fn(): return { 'other': constant_op.constant([0, 0, 0]) }, constant_op.constant([0, 0, 0]) est = estimator.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, steps=1) with self.assertRaises(ValueError): est.fit(input_fn=other_input_fn, steps=1) def testMonitorsForFit(self): est = estimator.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, steps=21, monitors=[CheckCallsMonitor(expect_calls=21)]) def testHooksForEvaluate(self): class CheckCallHook(session_run_hook.SessionRunHook): def __init__(self): self.run_count = 0 def after_run(self, run_context, run_values): self.run_count += 1 est = learn.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, steps=1) hook = CheckCallHook() est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook]) self.assertEqual(3, hook.run_count) def testSummaryWriting(self): est = estimator.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, steps=200) est.evaluate(input_fn=boston_input_fn, steps=200) loss_summary = util_test.simple_values_from_events( util_test.latest_events(est.model_dir), ['OptimizeLoss/loss']) self.assertEqual(1, len(loss_summary)) def testLossInGraphCollection(self): class _LossCheckerHook(session_run_hook.SessionRunHook): def begin(self): self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES) hook = _LossCheckerHook() est = estimator.Estimator(model_fn=linear_model_fn) est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook]) self.assertTrue(hook.loss_collection) def test_export_returns_exported_dirname(self): expected = '/path/to/some_dir' with test.mock.patch.object(estimator, 'export') as mock_export_module: mock_export_module._export_estimator.return_value = expected est = estimator.Estimator(model_fn=linear_model_fn) actual = est.export('/path/to') self.assertEquals(expected, actual) def test_export_savedmodel(self): tmpdir = tempfile.mkdtemp() est, serving_input_fn = _build_estimator_for_export_tests(tmpdir) extra_file_name = os.path.join( compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file')) extra_file = gfile.GFile(extra_file_name, mode='w') extra_file.write(EXTRA_FILE_CONTENT) extra_file.close() assets_extra = {'some/sub/directory/my_extra_file': extra_file_name} export_dir_base = os.path.join( compat.as_bytes(tmpdir), compat.as_bytes('export')) export_dir = est.export_savedmodel( export_dir_base, serving_input_fn, assets_extra=assets_extra) self.assertTrue(gfile.Exists(export_dir_base)) self.assertTrue(gfile.Exists(export_dir)) self.assertTrue( gfile.Exists( os.path.join( compat.as_bytes(export_dir), compat.as_bytes( 'saved_model.pb')))) self.assertTrue( gfile.Exists( os.path.join( compat.as_bytes(export_dir), compat.as_bytes('variables')))) self.assertTrue( gfile.Exists( os.path.join( compat.as_bytes(export_dir), compat.as_bytes('variables/variables.index')))) self.assertTrue( gfile.Exists( os.path.join( compat.as_bytes(export_dir), compat.as_bytes('variables/variables.data-00000-of-00001')))) self.assertTrue( gfile.Exists( os.path.join( compat.as_bytes(export_dir), compat.as_bytes('assets')))) self.assertTrue( gfile.Exists( os.path.join( compat.as_bytes(export_dir), compat.as_bytes('assets/my_vocab_file')))) self.assertEqual( compat.as_bytes(VOCAB_FILE_CONTENT), compat.as_bytes( gfile.GFile( os.path.join( compat.as_bytes(export_dir), compat.as_bytes('assets/my_vocab_file'))).read())) expected_extra_path = os.path.join( compat.as_bytes(export_dir), compat.as_bytes('assets.extra/some/sub/directory/my_extra_file')) self.assertTrue( gfile.Exists( os.path.join( compat.as_bytes(export_dir), compat.as_bytes('assets.extra')))) self.assertTrue(gfile.Exists(expected_extra_path)) self.assertEqual( compat.as_bytes(EXTRA_FILE_CONTENT), compat.as_bytes(gfile.GFile(expected_extra_path).read())) expected_vocab_file = os.path.join( compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file')) # Restore, to validate that the export was well-formed. with ops.Graph().as_default() as graph: with session_lib.Session(graph=graph) as sess: loader.load(sess, [tag_constants.SERVING], export_dir) assets = [ x.eval() for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS) ] self.assertItemsEqual([expected_vocab_file], assets) graph_ops = [x.name for x in graph.get_operations()] self.assertTrue('input_example_tensor' in graph_ops) self.assertTrue('ParseExample/ParseExample' in graph_ops) self.assertTrue('linear/linear/feature/matmul' in graph_ops) # cleanup gfile.DeleteRecursively(tmpdir) class InferRealValuedColumnsTest(test.TestCase): def testInvalidArgs(self): with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'): estimator.infer_real_valued_columns_from_input(None) with self.assertRaisesRegexp(ValueError, 'cannot be tensors'): estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0)) def _assert_single_feature_column(self, expected_shape, expected_dtype, feature_columns): self.assertEqual(1, len(feature_columns)) feature_column = feature_columns[0] self.assertEqual('', feature_column.name) self.assertEqual( { '': parsing_ops.FixedLenFeature( shape=expected_shape, dtype=expected_dtype) }, feature_column.config) def testInt32Input(self): feature_columns = estimator.infer_real_valued_columns_from_input( np.ones( shape=[7, 8], dtype=np.int32)) self._assert_single_feature_column([8], dtypes.int32, feature_columns) def testInt32InputFn(self): feature_columns = estimator.infer_real_valued_columns_from_input_fn( lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None)) self._assert_single_feature_column([8], dtypes.int32, feature_columns) def testInt64Input(self): feature_columns = estimator.infer_real_valued_columns_from_input( np.ones( shape=[7, 8], dtype=np.int64)) self._assert_single_feature_column([8], dtypes.int64, feature_columns) def testInt64InputFn(self): feature_columns = estimator.infer_real_valued_columns_from_input_fn( lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None)) self._assert_single_feature_column([8], dtypes.int64, feature_columns) def testFloat32Input(self): feature_columns = estimator.infer_real_valued_columns_from_input( np.ones( shape=[7, 8], dtype=np.float32)) self._assert_single_feature_column([8], dtypes.float32, feature_columns) def testFloat32InputFn(self): feature_columns = estimator.infer_real_valued_columns_from_input_fn( lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None)) self._assert_single_feature_column([8], dtypes.float32, feature_columns) def testFloat64Input(self): feature_columns = estimator.infer_real_valued_columns_from_input( np.ones( shape=[7, 8], dtype=np.float64)) self._assert_single_feature_column([8], dtypes.float64, feature_columns) def testFloat64InputFn(self): feature_columns = estimator.infer_real_valued_columns_from_input_fn( lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None)) self._assert_single_feature_column([8], dtypes.float64, feature_columns) def testBoolInput(self): with self.assertRaisesRegexp( ValueError, 'on integer or non floating types are not supported'): estimator.infer_real_valued_columns_from_input( np.array([[False for _ in xrange(8)] for _ in xrange(7)])) def testBoolInputFn(self): with self.assertRaisesRegexp( ValueError, 'on integer or non floating types are not supported'): # pylint: disable=g-long-lambda estimator.infer_real_valued_columns_from_input_fn( lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool), None)) def testStringInput(self): with self.assertRaisesRegexp( ValueError, 'on integer or non floating types are not supported'): # pylint: disable=g-long-lambda estimator.infer_real_valued_columns_from_input( np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)])) def testStringInputFn(self): with self.assertRaisesRegexp( ValueError, 'on integer or non floating types are not supported'): # pylint: disable=g-long-lambda estimator.infer_real_valued_columns_from_input_fn( lambda: ( constant_op.constant([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]), None)) def testBostonInputFn(self): feature_columns = estimator.infer_real_valued_columns_from_input_fn( boston_input_fn) self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64, feature_columns) def testIrisInputFn(self): feature_columns = estimator.infer_real_valued_columns_from_input_fn( iris_input_fn) self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64, feature_columns) class ReplicaDeviceSetterTest(test.TestCase): def testVariablesAreOnPs(self): tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}} with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() with ops.device(estimator._get_replica_device_setter(config)): v = variables_lib.Variable([1, 2]) w = variables_lib.Variable([2, 1]) a = v + w self.assertDeviceEqual('/job:ps/task:0', v.device) self.assertDeviceEqual('/job:ps/task:0', v.initializer.device) self.assertDeviceEqual('/job:ps/task:0', w.device) self.assertDeviceEqual('/job:ps/task:0', w.initializer.device) self.assertDeviceEqual('/job:worker', a.device) def testVariablesAreLocal(self): with ops.device( estimator._get_replica_device_setter(run_config.RunConfig())): v = variables_lib.Variable([1, 2]) w = variables_lib.Variable([2, 1]) a = v + w self.assertDeviceEqual('', v.device) self.assertDeviceEqual('', v.initializer.device) self.assertDeviceEqual('', w.device) self.assertDeviceEqual('', w.initializer.device) self.assertDeviceEqual('', a.device) def testMutableHashTableIsOnPs(self): tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}} with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() with ops.device(estimator._get_replica_device_setter(config)): default_val = constant_op.constant([-1, -1], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) input_string = constant_op.constant(['brain', 'salad', 'tank']) output = table.lookup(input_string) self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device) self.assertDeviceEqual('/job:ps/task:0', output.device) def testMutableHashTableIsLocal(self): with ops.device( estimator._get_replica_device_setter(run_config.RunConfig())): default_val = constant_op.constant([-1, -1], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) input_string = constant_op.constant(['brain', 'salad', 'tank']) output = table.lookup(input_string) self.assertDeviceEqual('', table._table_ref.device) self.assertDeviceEqual('', output.device) def testTaskIsSetOnWorkerWhenJobNameIsSet(self): tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0'] }, 'task': { 'type': run_config.TaskType.WORKER, 'index': 3 } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() with ops.device(estimator._get_replica_device_setter(config)): v = variables_lib.Variable([1, 2]) w = variables_lib.Variable([2, 1]) a = v + w self.assertDeviceEqual('/job:ps/task:0', v.device) self.assertDeviceEqual('/job:ps/task:0', v.initializer.device) self.assertDeviceEqual('/job:ps/task:0', w.device) self.assertDeviceEqual('/job:ps/task:0', w.initializer.device) self.assertDeviceEqual('/job:worker/task:3', a.device) if __name__ == '__main__': test.main()
apache-2.0
aalmah/pylearn2
pylearn2/scripts/tests/test_print_monitor_cv.py
48
1927
""" Test print_monitor_cv.py by training on a short TrainCV YAML file and analyzing the output pickle. """ import os import tempfile from pylearn2.config import yaml_parse from pylearn2.scripts import print_monitor_cv from pylearn2.testing.skip import skip_if_no_sklearn def test_print_monitor_cv(): """Test print_monitor_cv.py.""" skip_if_no_sklearn() handle, filename = tempfile.mkstemp() trainer = yaml_parse.load(test_print_monitor_cv_yaml % {'filename': filename}) trainer.main_loop() # run print_monitor_cv.py main print_monitor_cv.main(filename) # run print_monitor_cv.py main with all=True print_monitor_cv.main(filename, all=True) # cleanup os.remove(filename) test_print_monitor_cv_yaml = """ !obj:pylearn2.cross_validation.TrainCV { dataset_iterator: !obj:pylearn2.cross_validation.dataset_iterators.DatasetKFold { dataset: !obj:pylearn2.testing.datasets.random_one_hot_dense_design_matrix { rng: !obj:numpy.random.RandomState { seed: 1 }, num_examples: 10, dim: 10, num_classes: 2, }, }, model: !obj:pylearn2.models.mlp.MLP { layers: [ !obj:pylearn2.models.mlp.Sigmoid { layer_name: h0, dim: 8, irange: 0.05, }, !obj:pylearn2.models.mlp.Softmax { layer_name: y, n_classes: 2, irange: 0.05, }, ], nvis: 10, }, algorithm: !obj:pylearn2.training_algorithms.bgd.BGD { batch_size: 5, line_search_mode: 'exhaustive', conjugate: 1, termination_criterion: !obj:pylearn2.termination_criteria.EpochCounter { max_epochs: 1, }, }, save_path: %(filename)s, } """
bsd-3-clause
0x0all/scikit-learn
sklearn/feature_extraction/tests/test_dict_vectorizer.py
276
3790
# Authors: Lars Buitinck <L.J.Buitinck@uva.nl> # Dan Blanchard <dblanchard@ets.org> # License: BSD 3 clause from random import Random import numpy as np import scipy.sparse as sp from numpy.testing import assert_array_equal from sklearn.utils.testing import (assert_equal, assert_in, assert_false, assert_true) from sklearn.feature_extraction import DictVectorizer from sklearn.feature_selection import SelectKBest, chi2 def test_dictvectorizer(): D = [{"foo": 1, "bar": 3}, {"bar": 4, "baz": 2}, {"bar": 1, "quux": 1, "quuux": 2}] for sparse in (True, False): for dtype in (int, np.float32, np.int16): for sort in (True, False): for iterable in (True, False): v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort) X = v.fit_transform(iter(D) if iterable else D) assert_equal(sp.issparse(X), sparse) assert_equal(X.shape, (3, 5)) assert_equal(X.sum(), 14) assert_equal(v.inverse_transform(X), D) if sparse: # CSR matrices can't be compared for equality assert_array_equal(X.A, v.transform(iter(D) if iterable else D).A) else: assert_array_equal(X, v.transform(iter(D) if iterable else D)) if sort: assert_equal(v.feature_names_, sorted(v.feature_names_)) def test_feature_selection(): # make two feature dicts with two useful features and a bunch of useless # ones, in terms of chi2 d1 = dict([("useless%d" % i, 10) for i in range(20)], useful1=1, useful2=20) d2 = dict([("useless%d" % i, 10) for i in range(20)], useful1=20, useful2=1) for indices in (True, False): v = DictVectorizer().fit([d1, d2]) X = v.transform([d1, d2]) sel = SelectKBest(chi2, k=2).fit(X, [0, 1]) v.restrict(sel.get_support(indices=indices), indices=indices) assert_equal(v.get_feature_names(), ["useful1", "useful2"]) def test_one_of_k(): D_in = [{"version": "1", "ham": 2}, {"version": "2", "spam": .3}, {"version=3": True, "spam": -1}] v = DictVectorizer() X = v.fit_transform(D_in) assert_equal(X.shape, (3, 5)) D_out = v.inverse_transform(X) assert_equal(D_out[0], {"version=1": 1, "ham": 2}) names = v.get_feature_names() assert_true("version=2" in names) assert_false("version" in names) def test_unseen_or_no_features(): D = [{"camelot": 0, "spamalot": 1}] for sparse in [True, False]: v = DictVectorizer(sparse=sparse).fit(D) X = v.transform({"push the pram a lot": 2}) if sparse: X = X.toarray() assert_array_equal(X, np.zeros((1, 2))) X = v.transform({}) if sparse: X = X.toarray() assert_array_equal(X, np.zeros((1, 2))) try: v.transform([]) except ValueError as e: assert_in("empty", str(e)) def test_deterministic_vocabulary(): # Generate equal dictionaries with different memory layouts items = [("%03d" % i, i) for i in range(1000)] rng = Random(42) d_sorted = dict(items) rng.shuffle(items) d_shuffled = dict(items) # check that the memory layout does not impact the resulting vocabulary v_1 = DictVectorizer().fit([d_sorted]) v_2 = DictVectorizer().fit([d_shuffled]) assert_equal(v_1.vocabulary_, v_2.vocabulary_)
bsd-3-clause
mumuwoyou/vnpy-dev
vn.trader/ctaStrategy/strategy/strategyTmm2ag.py
1
18603
# encoding: UTF-8 from ctaBase import * from ctaTemplate import CtaTemplate import talib import numpy as np import math import copy from datetime import datetime ######################################################################## class Tmm2agStrategy(CtaTemplate): className = 'Tmm2agStrategy' author = u'用Python的交易员' barDbName = MINUTE_5_DB_NAME # 策略参数 initDays = 10 # 初始化数据所用的天数 fixedSize = 1 # 开仓 # 策略变量 bar = None # K线对象 m5bar = None barMinute = EMPTY_STRING # K线当前的分钟 bufferSize = 200 # 需要缓存的数据的大小 bufferCount = 0 # 目前已经缓存了的数据的计数 UporDownCount = 0 # 上升或下降的计数 highArray = np.zeros(bufferSize) # K线最高价的数组 lowArray = np.zeros(bufferSize) # K线最低价的数组 closeArray = np.zeros(bufferSize) # K线收盘价的数组 openArray = np.zeros(bufferSize) # K线开盘价的数组 # Tmm K线 H1Array = np.zeros(bufferSize) # K线最高价的数组 L1Array = np.zeros(bufferSize) # K线最低价的数组 C1Array = np.zeros(bufferSize) # K线收盘价的数组 O1Array = np.zeros(bufferSize) # K线开盘价的数组 UPorDOWNArray = np.zeros(bufferSize) H1Value = 0 L1Value = 0 C1Value = 0 O1Value = 0 UPorDOWNValue = 0 orderList = [] # 保存委托代码的列表 # 参数列表,保存了参数的名称 paramList = ['name', 'className', 'author', 'vtSymbol'] # 变量列表,保存了变量的名称 varList = ['inited', 'trading', 'pos', 'ydPos', 'lastEntryPrice', 'H1Value', 'L1Value', 'O1Value', 'C1Value', 'UPorDOWNValue'] #---------------------------------------------------------------------- def __init__(self, ctaEngine, setting): """Constructor""" super(Tmm2agStrategy, self).__init__(ctaEngine, setting) # 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建, # 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险, # 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读 # 策略时方便(更多是个编程习惯的选择) self.isPrePosHaved = False self.isAlreadyTraded = False #---------------------------------------------------------------------- def onInit(self): """初始化策略(必须由用户继承实现)""" self.writeCtaLog(u'%s策略初始化' %self.name) # 载入历史数据,并采用回放计算的方式初始化策略数值 initData = self.loadBar(self.initDays) for bar in initData: self.onBar(bar) self.putEvent() #---------------------------------------------------------------------- def onStart(self): """启动策略(必须由用户继承实现)""" self.writeCtaLog(u'%s策略启动' %self.name) self.putEvent() #---------------------------------------------------------------------- def onStop(self): """停止策略(必须由用户继承实现)""" self.writeCtaLog(u'%s策略停止' %self.name) self.putEvent() #---------------------------------------------------------------------- def onTick(self, tick): """收到行情TICK推送(必须由用户继承实现)""" # 计算K线 tickMinute = tick.datetime.minute if tickMinute != self.barMinute: if self.bar: self.bar.datetime = tick.datetime.replace(second=0, microsecond=0) self.bar.date = tick.date self.bar.time = tick.time # self.writeCtaLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s' # % (self.bar.vtSymbol, self.bar.time, self.bar.open, self.bar.high, # # self.bar.low, self.bar.close)) if self.barInTime(tick): self.procecssBar(self.bar) bar = CtaBarData() bar.vtSymbol = tick.vtSymbol bar.symbol = tick.symbol bar.exchange = tick.exchange bar.open = tick.lastPrice bar.high = tick.lastPrice bar.low = tick.lastPrice bar.close = tick.lastPrice bar.date = tick.date bar.time = tick.time bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间 self.bar = bar # 这种写法为了减少一层访问,加快速度 self.barMinute = tickMinute # 更新当前的分钟 else: # 否则继续累加新的K线 bar = self.bar # 写法同样为了加快速度 bar.high = max(bar.high, tick.lastPrice) bar.low = min(bar.low, tick.lastPrice) bar.close = tick.lastPrice #---------------------------------------------------------------------- def procecssBar(self,bar): if not self.m5bar or bar.datetime.minute % 5 == 1: m5bar = CtaBarData() m5bar.vtSymbol = bar.vtSymbol m5bar.symbol = bar.vtSymbol m5bar.exchange = bar.exchange m5bar.open = bar.open m5bar.high = bar.high m5bar.low = bar.low m5bar.close = bar.close m5bar.date = bar.date m5bar.time = bar.time m5bar.datetime = bar.datetime m5bar.volume = bar.volume m5bar.openInterest = bar.openInterest self.m5bar = m5bar else: m5bar = self.m5bar m5bar.high = max(m5bar.high, bar.high) m5bar.low = min(m5bar.low, bar.low) m5bar.close = bar.close m5bar.volume = m5bar.volume + bar.volume m5bar.openInterest = bar.openInterest if bar.datetime.minute % 5 == 0: newBar = copy.copy(m5bar) newBar.datetime = bar.datetime.replace(second=0,microsecond=0) newBar.date = bar.date newBar.time = bar.time self.onBar(newBar) # self.writeCtaLog(u'记录3分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s' # % (newBar.vtSymbol, newBar.time, newBar.open, newBar.high, # newBar.low, newBar.close)) #---------------------------------------------------------------------- def onBar(self, bar): """收到Bar推送(必须由用户继承实现)""" # 撤销之前发出的尚未成交的委托(包括限价单和停止单) for orderID in self.orderList: self.cancelOrder(orderID) self.orderList = [] # 保存K线数据 self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize] self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize] self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize] self.openArray[0:self.bufferSize - 1] = self.openArray[1:self.bufferSize] self.C1Array[0:self.bufferSize - 1] = self.C1Array[1:self.bufferSize] self.H1Array[0:self.bufferSize - 1] = self.H1Array[1:self.bufferSize] self.L1Array[0:self.bufferSize - 1] = self.L1Array[1:self.bufferSize] self.O1Array[0:self.bufferSize - 1] = self.O1Array[1:self.bufferSize] self.UPorDOWNArray[0:self.bufferSize - 1] = self.UPorDOWNArray[1:self.bufferSize] self.closeArray[-1] = bar.close self.highArray[-1] = bar.high self.lowArray[-1] = bar.low self.openArray[-1] = bar.open self.bufferCount += 1 if self.bufferCount == 1: #第一天特殊处理 if self.closeArray[-1] >= self.openArray[-1]: #上涨 self.O1Array[-1] = self.openArray[-1] self.L1Array[-1] = self.openArray[-1] self.H1Array[-1] = self.closeArray[-1] self.C1Array[-1] = self.closeArray[-1] self.UPorDOWNArray[-1] = 1 else: #下跌 self.O1Array[-1] = self.openArray[-1] self.H1Array[-1] = self.openArray[-1] self.L1Array[-1] = self.closeArray[-1] self.C1Array[-1] = self.closeArray[-1] self.UPorDOWNArray[-1] = 0 self.UporDownCount = 1 return if self.bufferCount > 1: if self.UPorDOWNArray[-2] == 1: #昨天是上涨 if self.closeArray[-1] > self.H1Array[-2]: #第一种情况,上涨:今天的收盘价超过前一个柱子的最高点 self.O1Array[-1] = self.H1Array[-2] self.L1Array[-1] = self.H1Array[-2] self.H1Array[-1] = self.closeArray[-1] self.C1Array[-1] = self.closeArray[-1] self.UPorDOWNArray[-1] = 1 #表示上涨 self.UporDownCount += 1 if self.closeArray[-1] < self.L1Array[-2]: #第二种情况,下跌:今天的收盘价,下跌超过前三个柱子的最低价 #低过前一个柱子的最低价,才开始计算 hh = self.L1Array[-2] n = 1 if self.bufferCount > 2: for a in range(3,self.bufferSize): if self.UPorDOWNArray[-a] == 0: break if self.H1Array[-a] != self.H1Array[-a + 1] and self.H1Array[-a] != 0: n = n + 1 hh = self.L1Array[-a] if n == 3: break if self.closeArray[-1] < hh: self.O1Array[-1] = self.L1Array[-2] self.H1Array[-1] = self.L1Array[-2] self.L1Array[-1] = self.closeArray[-1] self.C1Array[-1] = self.closeArray[-1] self.UPorDOWNArray[-1] = 0 self.UporDownCount = 1 if self.UPorDOWNArray[-2] == 0: #昨天是下跌 if self.closeArray[-1] < self.L1Array[-2]: #第一种情况,下跌:今天的收盘价超过前一个柱子的最低点 self.O1Array[-1] = self.L1Array[-2] self.H1Array[-1] = self.L1Array[-2] self.L1Array[-1] = self.closeArray[-1] self.C1Array[-1] = self.closeArray[-1] self.UPorDOWNArray[-1] = 0 #表示下跌 self.UporDownCount += 1 if self.closeArray[-1] > self.H1Array[-2]: #第二种情况,上涨:今天的收盘价,下跌超过前三个柱子的最高价 #高过前一个柱子的最高价,才开始计算 hh = self.H1Array[-2] n = 1 if self.bufferCount > 2: for a in range(3,self.bufferSize): if self.UPorDOWNArray[-a] == 1: break if self.H1Array[-a] != self.H1Array[-a + 1] and self.H1Array[-a] != 0: n = n + 1 hh = self.H1Array[-a] if n == 3: break if self.closeArray[-1] > hh: self.O1Array[-1] = self.H1Array[-2] self.L1Array[-1] = self.H1Array[-2] self.H1Array[-1] = self.closeArray[-1] self.C1Array[-1] = self.closeArray[-1] self.UPorDOWNArray[-1] = 1 self.UporDownCount = 1 self.O1Value = self.O1Array[-1] self.L1Value = self.L1Array[-1] self.H1Value = self.H1Array[-1] self.C1Value = self.C1Array[-1] self.UPorDOWNValue = self.UPorDOWNArray[-1] self.writeCtaLog(u'记录5分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s, PD:%s, Count:%s' % (bar.vtSymbol, bar.time,self.O1Value, self.H1Value, self.L1Value, self.C1Value, self.UPorDOWNValue, self.UporDownCount)) # 判断是否要进行交易 # 当前无仓位 if self.pos == 0: #self.intraTradeHigh = bar.high #self.intraTradeLow = bar.low # 当前K线上涨前一K线下跌买入开仓 if self.UPorDOWNArray[-1] == 1 and self.UPorDOWNArray[-2] == 0: # 这里为了保证成交,选择超价5个整指数点下单 orderID = self.buy(bar.close + 5, self.fixedSize * 2) self.orderList.append(orderID) self.lastEntryPrice = bar.close # 当前K线下跌前一K线上涨卖出开仓 elif self.UPorDOWNArray[-1] == 0 and self.UPorDOWNArray[-2] == 1: orderID = self.short(bar.close - 5, self.fixedSize * 2) self.orderList.append(orderID) self.lastEntryPrice = bar.close # 持有多头仓位 elif self.pos > 0: # 为上涨,上涨计数为4时加仓 if self.UPorDOWNValue == 1 and self.UporDownCount >= 6 and self.pos == 2 * self.fixedSize: orderID = self.sell(bar.close - 5, self.fixedSize) self.orderList.append(orderID) if self.UPorDOWNValue == 1 and self.UporDownCount >= 4: orderID = self.sell(self.lastEntryPrice, abs(self.pos), True) self.orderList.append(orderID) else: orderID = self.sell(self.lastEntryPrice - 20, abs(self.pos), True) self.orderList.append(orderID) # 当前K线下跌前一K线上涨卖出开仓 if self.UPorDOWNArray[-1] == 0: orderID = self.sell(bar.close - 5, abs(self.pos)) self.orderList.append(orderID) orderID = self.short(bar.close - 5, self.fixedSize * 2) self.orderList.append(orderID) self.lastEntryPrice = bar.close # 持有空头仓位 elif self.pos < 0: # 为下跌,下跌计数为4时加仓 if self.UPorDOWNValue == 0 and self.UporDownCount >= 6 and self.pos == -2 * self.fixedSize: orderID = self.cover(bar.close + 5, self.fixedSize) self.orderList.append(orderID) if self.UPorDOWNValue == 0 and self.UporDownCount >= 4: orderID = self.cover(self.lastEntryPrice, abs(self.pos), True) self.orderList.append(orderID) else: orderID = self.cover(self.lastEntryPrice + 20, abs(self.pos), True) self.orderList.append(orderID) # 当前K线上涨前一K线下跌买入开仓 if self.UPorDOWNArray[-1] == 1: # 这里为了保证成交,选择超价5个整指数点下单 orderID = self.cover(bar.close + 5, abs(self.pos)) self.orderList.append(orderID) orderID = self.buy(bar.close + 5, self.fixedSize * 2) self.orderList.append(orderID) self.lastEntryPrice = bar.close # 发出状态更新事件 self.putEvent() #---------------------------------------------------------------------- def onOrder(self, order): """收到委托变化推送(必须由用户继承实现)""" pass #---------------------------------------------------------------------- def onTrade(self, trade): pass #----------------------------------------------------------------------- def onTimer(self): pass if __name__ == '__main__': # 提供直接双击回测的功能 # 导入PyQt4的包是为了保证matplotlib使用PyQt4而不是PySide,防止初始化出错 from ctaBacktesting import * from PyQt4 import QtCore, QtGui # 创建回测引擎 engine = BacktestingEngine() # 设置引擎的回测模式为K线 engine.setBacktestingMode(engine.BAR_MODE) # 设置回测用的数据起始日期 engine.setStartDate('20161010') # 设置产品相关参数 engine.setSlippage(0.2) # 股指1跳 engine.setRate(0.3 / 10000) # 万0.3 engine.setSize(15) # 股指合约大小 # 设置使用的历史数据库 engine.setDatabase(MINUTE_DB_NAME, 'ag1612') ## 在引擎中创建策略对象 # d = {'atrLength': 11} # engine.initStrategy(AtrRsiStrategy, d) ## 开始跑回测 ##engine.runBacktesting() ## 显示回测结果 ##engine.showBacktestingResult() # 跑优化 setting = OptimizationSetting() # 新建一个优化任务设置对象 setting.setOptimizeTarget('capital') # 设置优化排序的目标是策略净盈利 setting.addParameter('atrLength', 11, 20, 1) # 增加第一个优化参数atrLength,起始11,结束12,步进1 setting.addParameter('atrMaLength', 20, 30, 5) # 增加第二个优化参数atrMa,起始20,结束30,步进1 # 性能测试环境:I7-3770,主频3.4G, 8核心,内存16G,Windows 7 专业版 # 测试时还跑着一堆其他的程序,性能仅供参考 import time start = time.time() # 运行单进程优化函数,自动输出结果,耗时:359秒 # engine.runOptimization(AtrRsiStrategy, setting) # 多进程优化,耗时:89秒 engine.runParallelOptimization(AtrRsiStrategy, setting) print u'耗时:%s' % (time.time() - start)
mit