code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## ENSC 813 Project
#
# # Classifying car images in the TCC dataset
#
# ## <NAME> (301400489)
#
# ### Spring 2020. Simon Fraser University
# Parameters used:
#
# image_dim = dimension of images resized
#
# name_brand_1 = name of first brand of car
#
# name_brand_2 = name of second brand of car
# Select two brands for binary classification. Two of [Audi, BMW, Honda, Lexus, Mercedes-Benz, Toyota]
name_brand_1 = 'Honda';
name_brand_2 = 'Toyota';
# +
# Import the necessary packages
# numpy for linear algebra, cv2 for image processing
# glob and os to navigate directories
import numpy as np
import random
import glob
import os
import sys
# matplotlib for plotting
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['DejaVu Sans']})
rc('text', usetex=True)
params = {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']}
plt.rcParams.update(params);
# pandas for excel sheet wrangling
import pandas as pd
#import json
# %matplotlib inline
# + language="javascript"
# IPython.notebook.kernel.execute(`notebookName = '${IPython.notebook.notebook_name}'`);
# +
# Sort out utilities for file naming
# for Jupyter notebook:
# https://stackoverflow.com/questions/52691468/can-a-jupyter-notebook-find-its-own-filename
script_name = notebookName[:-6];
# All files created by this script will be named according to this:
full_name = script_name+'_'+name_brand_1+'_'+name_brand_2+ "_undersampl";
print('The full name is %s' %full_name);
# -
path_base = 'TCC_dataset/'
print('Available classes in the dataset are: ');
classes_list = os.listdir(path_base)
print(classes_list);
# +
# file type of interest
file_extension = "jpg";
classes_count = np.zeros([len(classes_list)],dtype=int);
# count how many examples there are for each class
for i in range(len(classes_list)):
classes_count[i] = len(glob.glob1(path_base + classes_list[i]+"/","*."+file_extension));
classes_count_total = np.sum(classes_count);
print('Our dataset comprises of %d images.' %classes_count_total);
# calculate statistics of this dataset
classes_prob = classes_count*(1/np.sum(classes_count));
classes_mean = np.mean(classes_count);
classes_std = np.std(classes_count);
print("The mean number of examples is %.3f \n" %classes_mean);
print("The standard deviation is %.3f examples. \n" %classes_std);
# +
# Choose brands for classification
chosen_classes = [name_brand_1,name_brand_2];
print('We will classify images between the following classes:');
print(chosen_classes);
# Count number of examples for each class
chosen_classes_num = np.zeros([len(chosen_classes)],dtype=int);
for i in range(len(chosen_classes)):
chosen_classes_num[i] = classes_count[classes_list.index(chosen_classes[i])];
chosen_classes_total = np.sum(chosen_classes_num);
print('This subset consists of %d images.' %chosen_classes_total);
# +
fig = plt.figure(1);
pos = np.arange(len(classes_list));
color_list = ['limegreen','indianred','teal','darkorange','cornflowerblue','lightsalmon'];
for index in pos:
plt.bar(index,classes_count[index],color=color_list[index],label=r"%.3f" %(classes_prob[index]));
plt.xticks(pos,classes_list);
plt.title(r"\textbf{Distribution of classes in the} \textit{TCC dataset}",fontsize=12)
plt.xlabel(r"\textbf{Classes}")
plt.ylabel(r"\textbf{Count}")
plt.legend(loc='upper left');
plt.savefig(full_name+'_full_dataset.png');
#plt.savefig(full_name+'full_dataset.pdf');
plt.show();
# -
# The above bar graph shows that the dataset is unbalanced.
#
# Ideally, we would prefer to have a balanced dataset, that is, each brand should be present 1/6th of the time.
#
# Clearly, some brands, such as *Toyota* with 26.4%, are overrepresented while others are underrepresented, such as *Mercedes-Benz* with 11.1%.
# We balance the dataset by undersampling the overrepresented classes.
#
# We randomly choose which particular example from the overrepresented classes will be chosen and which will be excluded from the dataset used for learning.
# +
# Find the least represented class and undersample the other class
smallest_count_chosen = np.min(chosen_classes_num);
smallest_count_chosen_index = np.argmin(chosen_classes_num);
smallest_count_chosen_id = chosen_classes[smallest_count_chosen_index];
print('The least represented class is %s which has %d examples.' %(smallest_count_chosen_id,smallest_count_chosen));
print('We will undersample the other class so that we end up with a balanced dataset')
# +
# Create list of file names for each class to undersample
# Choose randomly in this list to obtain the required number of examples
overall_files_list = [];
for i in range(0,len(chosen_classes)):
files_list = [];
for file in glob.glob(path_base+"/"+chosen_classes[i]+"/*."+file_extension):
index_for_filename = file.index('\\');
files_list.append(file[index_for_filename+1:]);
random.shuffle(files_list);
overall_files_list.extend(files_list[:smallest_count_chosen]);
df_list = pd.DataFrame(overall_files_list);
df_list.to_excel(full_name+'.xlsx', engine='xlsxwriter')
print('Examples per class:')
print(len(overall_files_list)/len(chosen_classes));
# -
# We have created an Excel worksheet to save the names of all files which will be used for learning.
#
# We now verify that we created this worksheet correctly.
# Load excel sheet and verify the distribution of classes
# Read the excel file and pick out the images which are relevant to this script
worksheet_name = 'Sheet1';
list_file = full_name+'.xlsx';
data_frames = pd.read_excel(list_file, sheet_name=worksheet_name);
curated_file_list = np.asarray(data_frames.values.tolist());
# +
curated_file_list_cleaned = [None]*len(curated_file_list);
curated_file_list_classes = [None]*len(curated_file_list);
for k in range(len(curated_file_list)):
filename = str(curated_file_list[k]);
curated_file_list_cleaned[k] = filename[2:-2];
curated_file_list_classes[k] = filename[2:].split("_")[0];
# Find unique classes and their frequencies
curated_brands, curated_brands_freq = np.unique(curated_file_list_classes,return_counts=True);
# Compute stats for the undersampled dataset
curated_brands_prob = np.asarray(curated_brands_freq,dtype=np.float64)*(1/np.sum(np.asarray(curated_brands_freq,dtype=np.float64)));
curated_brands_mean = np.mean(np.asarray(curated_brands_freq,dtype=np.float64));
curated_brands_std = np.std(np.asarray(curated_brands_freq,dtype=np.float64));
print('For the undersampled dataset:')
print("The mean number of examples is %.3f " %curated_brands_mean);
print("The standard deviation is %.3f examples." %curated_brands_std);
# +
# Plot the selected dataset (after undersampling)
fig = plt.figure(2);
pos = np.arange(len(curated_brands));
color_list = ['limegreen','indianred','teal','darkorange','cornflowerblue','lightsalmon'];
for index in pos:
plt.bar(index,curated_brands_freq[index],color=color_list[index],edgecolor='dimgray',label=r"%.3f" %(curated_brands_prob[index]));
plt.xticks(pos,curated_brands);
plt.title(r"\textbf{Distribution of classes in the curated} \textit{TCC dataset}",fontsize=12)
plt.xlabel(r"\textbf{Classes}")
plt.ylabel(r"\textbf{Count}")
plt.legend(loc='upper left');
plt.savefig(full_name+'_balanced_dataset.png');
#plt.savefig(full_name+'balanced_dataset.pdf');
plt.show();
# -
# As can be seen in the above figure, the two brands are equally distributed in the curated dataset.
#
# This helps to avoid issues when working on the learning task.
| notebooks/nb_02_binary_classification_00.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import os
import numpy as np
import pandas as pd
import subprocess
import matplotlib as mpl
import matplotlib.pyplot as plt
from IPython.display import display
from ipywidgets import IntProgress
# -
print(sys.version)
mpl.rcParams['figure.facecolor'] = '#BBBBBB'
# +
GENOME = "AgamP4.11"
GENE_PREFIX = "AGAP"
TRANSFN = "../datafiles/transcript_list_{}".format(GENOME)
CHOPCHOP_OUT_DIR = "../all_transcripts_run/".format(GENOME)
MIN_GC = 30 # minimum allowable GC percentage (inclusive)
MAX_GC = 70 # maximum allowable GC percentage (inclusive)
MAX_OFFTARGET_HITS = 0 # maximum allowable total offtarget hits
# -
tlist = pd.read_csv(TRANSFN, header=None, names=['transcript_id'])
t = tlist['transcript_id'].str.rsplit('-', n=1, expand=True)
t.columns = ['gene', 'splice_id']
tlist = pd.concat((tlist,t), axis=1)
tlist
t['gene'].unique().shape
# histogram of number of splice variants by gene
t = tlist['gene'].value_counts()
fig,ax = plt.subplots()
ax.hist(t, bins=np.arange(t.min()-.5,t.max()+1.5), edgecolor='k');
ax.set_yscale('log')
ax.set_xlim((t.min()-.5, t.max()+.5))
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))
ax.set_title('All Genes')
ax.set_xlabel('# of splice variants')
ax.set_ylabel('# of genes (log scale)')
fig.tight_layout()
# +
# use awk to append the transcript_ids and merge all the chopchop outputs
# stream than into pandas.read_csv
cmd = ("/usr/bin/awk -F "+r"'\t' "
"'BEGIN{OFS=FS}{if(FNR==1){if(NR==FNR){print $0, "+'"transcript_id"'+"}}else{print $0, FILENAME}}' "+
"{}/{}*".format(GENOME, GENE_PREFIX))
print(cmd)
with subprocess.Popen(cmd, cwd=CHOPCHOP_OUT_DIR, shell=True, stdout=subprocess.PIPE) as p:
d_orig = pd.read_csv(p.stdout, sep='\t', index_col=0)
# -
# +
d = d_orig.copy(deep=True) # so we don't munge the original data
d.reset_index(inplace=True)
# set the >=555 sort of entries in the MM columns to 999
d["sumMM"] = d.loc[:,("MM0","MM1","MM2","MM3")].\
apply(pd.to_numeric, errors='coerce').fillna(999).sum(axis=1)
# filter
flt = ((d["GC content (%)"] >= MIN_GC) &
(d["GC content (%)"] <= MAX_GC) &
(d["sumMM"] <= MAX_OFFTARGET_HITS))
if True: # apply filter
d = d.loc[flt,:]
else:
d['flt'] = flt
# add a gene column
d['gene'] = d['transcript_id'].str.rsplit('-',1).str.get(0)
print("{} of {} unique transcript_ids left after flitering".format(
d['transcript_id'].unique().shape[0],
d_orig['transcript_id'].unique().shape[0]))
# +
# unique target+gene
t = d.set_index('Genomic location').loc[:,('Target sequence','gene')]
single_target_different_genes = t[t.index.duplicated() & ~t.duplicated()]
print(single_target_different_genes.shape[0], "targets hitting more than one gene")
print(single_target_different_genes['gene'].unique().shape[0], "genes affected")
# filter to targets which are unique or hit different genes
t = t[~t.duplicated(keep='first')]
print(t.shape[0], 'unique target+gene combinations')
# number of genes with 0 targets
total_num_genes = tlist['gene'].unique().shape[0]
num_0_target_genes =total_num_genes - t['gene'].unique().shape[0]
print(num_0_target_genes, "genes of", total_num_genes, "with no targets")
# -
# +
cnt = t['gene'].value_counts().values
# add entries for genes with 0 targets
cnt = np.append(cnt, [0]*num_0_target_genes)
# histogram of number of splice variants by gene
fig,ax = plt.subplots()
ax.hist(cnt, edgecolor='k', bins=cnt.max()+1, align='mid')#, bins=np.arange(cnt.min()-.5,cnt.max()+1.5));
ax.set_yscale('log')
# ax.set_xlim((-1,10))
# ax.set_xlim((cnt.min()-.5, cnt.max()+.5))
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))
ax.set_title('{} all genes'.format(GENOME))
ax.set_xlabel('# of Cas9 targets ({} have 0 targets)'.format(num_0_target_genes))
ax.set_ylabel('# of genes (log scale)')
fig.tight_layout()
# +
cnt = t['gene'].value_counts().values
# add entries for genes with 0 targets
cnt = np.append(cnt, [0]*num_0_target_genes)
fig,ax = plt.subplots()
# inverse emperical cdf
x = np.sort(cnt)
y = np.arange(1,len(x)+1)/float(len(x))
plt.plot(x, 1-y)
ax.set_yscale('log')
ax.set_title('AgamP4.11 all genes')
ax.set_xlabel('# of Cas9 targets ({:.2f}% have 0 targets)'.format(
100*num_0_target_genes/total_num_genes))
ax.set_ylabel('proportion of genes with >= # targets')
fig.tight_layout()
# -
cnt = t['gene'].value_counts().values
cnt = np.append(cnt, [0]*num_0_target_genes)
cnt = pd.Series(cnt)
cnt.describe()
| parsing/parse_all_transcripts_run-AgamP4.11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pip install emoji
import emoji
import pandas as pd
import numpy as np
emoji.EMOJI_UNICODE
import matplotlib.pyplot as plt
import tensorflow as tf
tf.__version__
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import tensorflow.keras.utils as ku
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.layers import Embedding
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
emoji_dict = { "0": "\u2764\uFE0F",
"1": ":baseball:",
"2": ":beaming_face_with_smiling_eyes:",
"3": ":downcast_face_with_sweat:",
"4": ":fork_and_knife:",
}
for e in emoji_dict.values():
print(emoji.emojize(e))
df1=pd.read_csv('train_emoji.csv')
df1.head(132)
df1.shape
df1['Labels'].value_counts()
df2=pd.read_csv('emojify_data.csv')
df2.head(132)
df2.shape
train_df=pd.concat([df1,df2],ignore_index=True)
train_df
test_df=pd.read_csv('test_emoji.csv')
test_df.head(56)
x_train=train_df['Text']
x_test=test_df['Text']
y_train=train_df['Labels']
y_train.value_counts()
y_test=test_df['Labels']
y_test.value_counts()
y_train_categorical=ku.to_categorical(y_train,num_classes=5)
y_test_categorical=ku.to_categorical(y_test,num_classes=5)
tokenizer=Tokenizer()
tokenizer.fit_on_texts(x_train)
word_index=tokenizer.word_index
word_index
vocab_size=len(word_index)+1
vocab_size
train_sequence=tokenizer.texts_to_sequences(x_train)
train_padded=pad_sequences(train_sequence,maxlen=10)
test_sequence=tokenizer.texts_to_sequences(x_test)
test_padded=pad_sequences(test_sequence,maxlen=10)
# +
# This is the 100 dimension version of GloVe from Stanford
# I am using a api for faster access
'''!wget --no-check-certificate \
https://storage.googleapis.com/laurencemoroney-blog.appspot.com/glove.6B.100d.txt \
-O /tmp/glove.6B.100d.txt'''
embeddings_index = {};
with open('glove.6B.100d.txt',errors="ignore") as f:
for line in f:
values = line.split();
word = values[0];
coefs = np.asarray(values[1:], dtype='str');
embeddings_index[word] = coefs;
embeddings_matrix = np.zeros((vocab_size,100));
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word);
if embedding_vector is not None:
embeddings_matrix[i] = embedding_vector;
# -
model1=Sequential()
model1.add(Embedding(vocab_size,100,input_length=10,weights=[embeddings_matrix],trainable=False))
model1.add(Dropout(0.3))
model1.add(Bidirectional(LSTM(32)))
model1.add(Dense(5,activation='softmax'))
model1.summary()
model1.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
history=model1.fit(train_padded,y_train_categorical,epochs=30,validation_data=(test_padded,y_test_categorical))
fig,(ax1,ax2)=plt.subplots(nrows=1,ncols=2,figsize=(20,6))
ax1.plot(history.history['accuracy'],label='train_accuracy')
ax1.plot(history.history['val_accuracy'],label='test_accuracy')
ax1.legend()
ax2.plot(history.history['loss'],label='train_loss')
ax2.plot(history.history['val_loss'],label='test_loss')
ax2.legend()
plt.show()
model2=Sequential()
model2.add(Embedding(vocab_size,100,input_length=10,weights=[embeddings_matrix],trainable=False))
model2.add(Dropout(0.3))
model2.add(Bidirectional(LSTM(128,return_sequences=True)))
model2.add(Dropout(0.3))
model2.add(Bidirectional(LSTM(128)))
model2.add(Dense(5,activation='softmax'))
model2.summary()
model2.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
history=model2.fit(train_padded,y_train_categorical,epochs=30,validation_data=(test_padded,y_test_categorical))
fig,(ax1,ax2)=plt.subplots(nrows=1,ncols=2,figsize=(20,6))
ax1.plot(history.history['accuracy'],label='train_accuracy')
ax1.plot(history.history['val_accuracy'],label='test_accuracy')
ax1.legend()
ax2.plot(history.history['loss'],label='train_loss')
ax2.plot(history.history['val_loss'],label='test_loss')
ax2.legend()
plt.show()
prediction=model2.predict_classes(test_padded)
print(classification_report(y_test,prediction))
confusion_matrix(y_test,prediction)
emoji_dict={0:['heart',':heart:'],1:['baseball',':baseball:'],2:['smile',':smile:'],3:['disappoined',':disappointed:'],4:['fork and knife',':fork_and_knife:']}
text=x_test[8]
text
y_test[8]
def predict(text):
text_sequence=tokenizer.texts_to_sequences([text])
text_padded=pad_sequences(text_sequence,maxlen=30)
prediction=model2.predict_classes(text_padded)
x=prediction[0]
print(emoji_dict[x][0])
print(emoji.emojize(emoji_dict[x][1], use_aliases=True))
ans=predict(text)
ans
msg='you brighten my day'
ans=predict(msg)
ans
msg='she got me a nice present'
ans=predict(msg)
ans
msg='This girl is messing with me'
ans=predict(msg)
ans
msg='will you be my valentine'
ans=predict(msg)
ans
msg='Honey lets go out for a date'
ans=predict(msg)
ans
msg='I am happy'
ans=predict(msg)
ans
| emoji prediction 14032021.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1millionwomentotech SummerOfCode
#
# # Week 3 Intro to NLP - Day 2
#
# http://www.nltk.org/book/ch02.html
# %matplotlib inline
import nltk
import matplotlib
import matplotlib.pyplot as plt
from nltk.corpus import gutenberg
from nltk.corpus import webtext
from nltk.corpus import brown
nltk.corpus.gutenberg.fileids()
emma = nltk.corpus.gutenberg.words('austen-emma.txt')
len(emma)
gutenberg.fileids()
for fileid in gutenberg.fileids():
# homework
print()
# NLTK Corpus Documentation
#
# https://www.nltk.org/api/nltk.corpus.html
for fileid in webtext.fileids():
print(fileid, webtext.raw(fileid)[:65], '...')
# # Brown Corpus
brown
brown.categories()
brown.words()
brown.words(categories='humor')
brown.fileids()
brown.sents(categories=['adventure','humor','mystery'])
# +
humor_text = brown.words(categories='humor')
fdist = nltk.FreqDist(w.lower() for w in humor_text)
print(fdist)
modals = ['can','could','may','must']
for m in modals:
print(m + ":", fdist[m], end= ' ')
# +
cfd = nltk.ConditionalFreqDist(
(genre, word)
for genre in brown.categories()
for word in brown.words(categories=genre)
)
genres = ['humor', 'news', 'hobbies']
pronouns = ['she', 'her', 'hers', 'he', 'him', 'his', 'it', 'its', 'they', 'them', 'theirs']
cfd.tabulate(conditions=genres, samples=pronouns)
# -
def my_for(list):
result = []
for element in list:
if len(element) >= 6:
result.append(element)
return result
# # Homework
#
# - type up the whole Chapter 1
# - exercises 8 and 10
# - ◑ Define a conditional frequency distribution over the Names corpus that allows you to see which initial letters are more frequent for males vs. females (cf. 4.4).
# - ◑ Read the BBC News article: UK's Vicky Pollards 'left behind' http://news.bbc.co.uk/1/hi/education/6173441.stm. The article gives the following statistic about teen language: "the top 20 words used, including yeah, no, but and like, account for around a third of all words." How many word types account for a third of all word tokens, for a variety of text sources? What do you conclude about this statistic? Read more about this on LanguageLog, at http://itre.cis.upenn.edu/~myl/languagelog/archives/003993.html.
#
# The rest of the exercises are optional, of course they provide fabulous practice.
| summer-of-code/week-03/.ipynb_checkpoints/day2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# As we discovered in the [Introduction](Introduction.ipynb), HoloViews allows plotting a variety of data types. Here we will use the sample data module and load the pandas and dask hvPlot API:
import intake
import numpy as np
import hvplot.pandas
import hvplot.dask
# As we learned The hvPlot API closely mirrors the [Pandas plotting API](https://pandas.pydata.org/pandas-docs/stable/visualization.html), but instead of generating static images when used in a notebook, it uses HoloViews to generate either static or dynamically streaming Bokeh plots. Static plots can be used in any context, while streaming plots require a live [Jupyter notebook](http://jupyter.org) or a deployed [Bokeh Server app](https://bokeh.pydata.org/en/latest/docs/user_guide/server.html).
#
# HoloViews provides an extensive, very rich set of objects along with a powerful set of operations to apply, as you can find out in the [HoloViews User Guide](http://holoviews.org/user_guide/index.html). But here we will focus on the most essential mechanisms needed to make your data visualizable, without having to worry about the mechanics going on behind the scenes.
#
# We will be focusing on two different datasets:
#
# - A small CSV file of US crime data, broken down by state
# - A larger Parquet-format file of airline flight data
#
# The ``hvplot.sample_data`` module makes these datasets Intake data catalogue, which we can load either using pandas:
# +
from hvplot.sample_data import us_crime, airline_flights
crime = us_crime.read()
print(type(crime))
crime.head()
# -
# Or using dask as a ``dask.DataFrame``:
flights = airline_flights.to_dask().persist()
print(type(flights))
flights.head()
# ## The plot interface
# The ``dask.dataframe.DataFrame.hvplot``, ``pandas.DataFrame.hvplot`` and ``intake.DataSource.plot`` interfaces (and Series equivalents) from HvPlot provide a powerful high-level API to generate complex plots. The ``.hvplot`` API can be called directly or used as a namespace to generate specific plot types.
# ### The plot method
# The most explicit way to use the plotting API is to specify the names of columns to plot on the ``x``- and ``y``-axis respectively:
crime.hvplot.line(x='Year', y='Violent Crime rate')
# As you'll see in more detail below, you can choose which kind of plot you want to use for the data:
crime.hvplot(x='Year', y='Violent Crime rate', kind='scatter')
# An additional convenience on top of this explicit API is to specify an additional ``by`` variable, which groups the data by one or more additional columns. As an example here we will plot the departure delay ('depdelay') as a function of 'distance', grouping the data by the 'carrier'. There are many available carriers, so we will select only two of them so that the plot is readable:
flight_subset = flights[flights.carrier.isin([b'OH', b'F9'])]
flight_subset.hvplot(x='distance', y='depdelay', by='carrier', kind='scatter', alpha=0.2, persist=True)
# Here we have specified the `x` axis explicitly, which can be omitted if the Pandas index column is already set to what you want on the x axis. Similarly, here we specified the `y` axis; by default all of the non-index columns would be plotted (which would be a lot of data in this case). If you don't specify the 'y' axis, it will have a default label named 'value', but you can then provide a y axis label explicitly using the ``value_label`` option.
#
# Putting all of this together we will plot violent crime, robbery, and burglary rates on the y-axis, specifying 'Year' as the x, and relabel the y-axis to display the 'Rate'.
crime.hvplot(x='Year', y=['Violent Crime rate', 'Robbery rate', 'Burglary rate'],
value_label='Rate (per 100k people)')
# ### The hvplot namespace
# Instead of using the ``kind`` argument to the plot call, we can use the ``hvplot`` namespace, which lets us easily discover the range of plot types that are supported. Plot types available include:
#
# * <a href="#Area">``.area()``</a>: Plots a area chart similar to a line chart except for filling the area under the curve and optionally stacking
# * <a href="#Bars">``.bar()``</a>: Plots a bar chart that can be stacked or grouped
# * <a href="#Bivariate">``.bivariate()``</a>: Plots 2D density of a set of points
# * <a href="#Box-Whisker-Plots">``.box()``</a>: Plots a box-whisker chart comparing the distribution of one or more variables
# * <a href="#HeatMap">``.heatmap()``</a>: Plots a heatmap to visualizing a variable across two independent dimensions
# * <a href="#HexBins">``.hexbins()``</a>: Plots hex bins
# * <a href="#Histogram">``.histogram()``</a>: Plots the distribution of one or histograms as a set of bins
# * <a href="#KDE">``.kde()``</a>: Plots the kernel density estimate of one or more variables.
# * <a href="#The-plot-method">``.line()``</a>: Plots a line chart (such as for a time series)
# * <a href="#Scatter">``.scatter()``</a>: Plots a scatter chart comparing two variables
# * <a href="#Step">``.step()``</a>: Plots a step chart akin to a line plot
# * <a href="#Tables">``.table()``</a>: Generates a SlickGrid DataTable
# * <a href="#Violin-Plots">``.violin()``</a>: Plots a violin plot comparing the distribution of one or more variables using the kernel density estimate
# #### Area
#
# Like most other plot types the ``area`` chart supports the three ways of defining a plot outlined above. An area chart is most useful when plotting multiple variables in a stacked chart. This can be achieve by specifying ``x``, ``y``, and ``by`` columns or using the ``columns`` and ``index``/``use_index`` (equivalent to ``x``) options:
crime.hvplot.area(x='Year', y=['Robbery', 'Aggravated assault'], stacked=True)
# We can also explicitly set ``stacked`` to False and define an ``alpha`` value to compare the values directly:
crime.hvplot.area(x='Year', y=['Aggravated assault', 'Robbery'], stacked=False, alpha=0.4)
# Another use for an area plot is to visualize the spread of a value. For instance using the flights dataset we may want to see the spread in mean delay values across carriers. For that purpose we compute the mean delay by day and carrier and then the min/max mean delay for across all carriers:
flights.groupby(['day', 'carrier'])['carrier_delay'].mean().groupby('day').agg([np.min, np.max]).hvplot.area('day', 'amin', 'amax', alpha=0.2) *\
flights.groupby('day')['carrier_delay'].mean().hvplot()
# #### Bars
#
# In the simplest case we can use ``source.plot.bar`` to plot ``x`` against ``y``:
crime.hvplot.bar('Year', 'Violent Crime rate', rot=90)
# If we want to compare multiple columns instead we can again use the ``index`` option to treat the 'Year' column as the index and then compare the specific columns. Using the ``stacked`` option we can then compare the column values more easily:
crime.hvplot.bar('Year', ['Violent crime total', 'Property crime total'],
stacked=True, rot=90, width=800)
# #### Scatter
#
# The scatter plot supports all the same features as the other chart types we have seen so far but can also be colored by another variable using the ``c`` option and allows declaring a ``cmap``.
crime.hvplot.scatter('Violent Crime rate', 'Burglary rate', c='Year', cmap='viridis', size=12, colorbar=True)
# #### Step
#
# A step chart is very similar to a line chart but instead of linearly interpolating between samples the step chart visualizes discrete steps. The point at which to step can be controlled via the ``where`` keyword allowing 'pre', 'mid' (default) and 'post' values:
crime.hvplot.step(x='Year', y=['Robbery', 'Aggravated assault'], stacked=True)
# #### HexBins
#
# You can create hexagonal bin plots with the ``hexbin`` method. Hexbin plots can be a useful alternative to scatter plots if your data are too dense to plot each point individually.
flights.hvplot.hexbin(x='airtime', y='arrdelay', width=600, height=500)
# #### Bivariate
#
# You can create a 2D density plot with the ``bivariate`` method. Bivariate plots can be a useful alternative to scatter plots if your data are too dense to plot each point individually.
crime.hvplot.bivariate('Violent Crime rate', 'Burglary rate', colorbar=True, width=600, height=500)
# #### HeatMap
#
# A ``HeatMap`` lets us view the relationship between three variables, so we specify the 'x' and 'y' variables and an additional 'C' variable. Additionally we can define a ``reduce_function`` that computes the values for each bin from the samples that fall into it. Here we plot the 'depdelay' (i.e. departure delay) for each day of the month and carrier in the dataset:
flights.hvplot.heatmap(x='day', y='carrier', C='depdelay', reduce_function=np.mean, colorbar=True)
# #### Tables
#
# Unlike all other plot types, a table only supports one signature: either all columns are plotted, or a subset of columns can be selected by defining the ``columns`` explicitly:
crime.hvplot.table(columns=['Year', 'Population', 'Violent Crime rate'], width=400)
# ### Distributions
#
# Plotting distributions differs slightly from other plots since they plot only one variable in the simple case rather than plotting two or more variables against each other. Therefore when plotting these plot types no ``index`` or ``x`` value needs to be supplied. Instead:
#
# 1. Declare a single ``y`` variable, e.g. ``source.plot.hist(variable)``, or
# 2. Declare a ``y`` variable and ``by`` variable, e.g. ``source.plot.hist(variable, by='Group')``, or
# 3. Declare columns or plot all columns, e.g. ``source.plot.hist()`` or ``source.plot.hist(columns=['A', 'B', 'C'])``
#
# #### Histogram
#
# The Histogram is the simplest example of a distribution; often we simply plot the distribution of a single variable, in this case the 'Violent Crime rate'. Additionally we can define a range over which to compute the histogram and the number of bins using the ``bin_range`` and ``bins`` arguments respectively:
crime.hvplot.hist('Violent Crime rate')
# Or we can plot the distribution of multiple columns:
columns = ['Violent Crime rate', 'Property crime rate', 'Burglary rate']
crime.hvplot.hist(y=columns, bins=50, alpha=0.5)
# We can also group the data by another variable:
flights[flights.carrier.isin([b'AA', b'US', b'OH'])].hvplot.hist(
'depdelay', by='carrier', bins=20, bin_range=(-20, 100), alpha=0.3)
# #### KDE
#
# You can also create density plots using ``hvplot.kde()`` method:
crime.hvplot.kde('Violent Crime rate')
# Comparing the distribution of multiple columns is also possible:
columns=['Violent Crime rate', 'Property crime rate', 'Burglary rate']
crime.hvplot.kde(y=columns, alpha=0.5, value_label='Rate')
# The ``DataSource.plot.kde`` also supports the ``by`` keyword:
flights[flights.carrier.isin([b'AA', b'US', b'OH'])].hvplot.kde('depdelay', by='carrier', alpha=0.3, xlim=(-20, 70))
# #### Box-Whisker Plots
#
# Just like the other distribution-based plot types, the box-whisker plot supports plotting a single column:
crime.hvplot.box('Violent Crime rate')
# It also supports multiple columns:
columns=['Burglary rate', 'Larceny-theft rate', 'Motor vehicle theft rate',
'Property crime rate', 'Violent Crime rate']
crime.hvplot.box(y=columns, group_label='Crime', legend=False, value_label='Rate (per 100k)', invert=True)
# Lastly, it also supports using the ``by`` keyword to split the data into multiple subsets:
flights[flights.carrier.isin([b'AA', b'US', b'OH'])].hvplot.box('depdelay', by='carrier', ylim=(-10, 70))
# ## Composing Plots
#
# One of the core strengths of HoloViews is the ease of composing
# different plots. Individual plots can be composed using the ``*`` and
# ``+`` operators, which overlay and compose plots into layouts
# respectively. For more information on composing objects, see the
# HoloViews [User Guide](http://holoviews.org/user_guide/Composing_Elements.html).
#
# By using these operators we can combine multiple plots into composite plots. A simple example is overlaying two plot types:
crime.hvplot('Year', 'Violent Crime rate') * crime.hvplot.scatter('Year', 'Violent Crime rate', size=30)
# We can also lay out different plots and tables together:
(crime.hvplot.bar('Year', 'Violent Crime rate', rot=90, width=550) +
crime.hvplot.table(['Year', 'Population', 'Violent Crime rate'], width=420))
# ## Large data
#
# The previous examples summarized the fairly large airline dataset using statistical plot types that aggregate the data into a feasible subset for plotting. We can instead aggregate the data directly into the viewable image using [datashader](http://datashader.org), which provides a rendering of the entire set of raw data available (as far as the resolution of the screen allows). Here we plot the 'airtime' against the 'distance':
flights.hvplot.scatter('distance', 'airtime', datashade=True)
# ## Groupby
#
# Thanks to the ability of HoloViews to explore a parameter space with a set of widgets we can apply a groupby along a particular column or dimension. For example we can view the distribution of departure delays by carrier grouped by day, allowing the user to choose which day to display:
flights.hvplot.violin('depdelay', by='carrier', groupby='dayofweek', ylim=(-20, 60), height=500)
# This user guide merely provided an overview over the available plot types; to see a detailed description on how to customize plots see the [Customization](Customization.ipynb) user guide.
| examples/user_guide/Plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MNIST Image Classification with TensorFlow on Vertex AI
#
# This notebook demonstrates how to implement different image models on MNIST using the [tf.keras API](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras).
#
# ## Learning Objectives
# 1. Understand how to build a Dense Neural Network (DNN) for image classification
# 2. Understand how to use dropout (DNN) for image classification
# 3. Understand how to use Convolutional Neural Networks (CNN)
# 4. Know how to deploy and use an image classifcation model using Google Cloud's [Vertex AI](https://cloud.google.com/vertex-ai/)
#
# First things first. Configure the parameters below to match your own Google Cloud project details.
# +
from datetime import datetime
import os
REGION = 'us-central1'
PROJECT = !(gcloud config get-value core/project)
PROJECT = PROJECT[0]
BUCKET = PROJECT
MODEL_TYPE = "cnn" # "linear", "dnn", "dnn_dropout", or "dnn"
# Do not change these
os.environ["PROJECT"] = PROJECT
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
os.environ["MODEL_TYPE"] = MODEL_TYPE
# -
# ## Building a dynamic model
#
# In the previous notebook, <a href="mnist_linear.ipynb">mnist_linear.ipynb</a>, we ran our code directly from the notebook. In order to run it on Vertex AI, it needs to be packaged as a python module.
#
# The boilerplate structure for this module has already been set up in the folder `mnist_models`. The module lives in the sub-folder, `trainer`, and is designated as a python package with the empty `__init__.py` (`mnist_models/trainer/__init__.py`) file. It still needs the model and a trainer to run it, so let's make them.
#
# Let's start with the trainer file first. This file parses command line arguments to feed into the model.
# +
# %%writefile mnist_models/trainer/task.py
import argparse
import json
import os
import sys
from . import model
def _parse_arguments(argv):
"""Parses command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
help='Which model type to use',
type=str, default='linear')
parser.add_argument(
'--epochs',
help='The number of epochs to train',
type=int, default=10)
parser.add_argument(
'--steps_per_epoch',
help='The number of steps per epoch to train',
type=int, default=100)
parser.add_argument(
'--job-dir',
help='Directory where to save the given model',
type=str, default='mnist_models/')
return parser.parse_known_args(argv)
def main():
"""Parses command line arguments and kicks off model training."""
args = _parse_arguments(sys.argv[1:])[0]
# Configure path for hyperparameter tuning.
trial_id = json.loads(
os.environ.get('TF_CONFIG', '{}')).get('task', {}).get('trial', '')
output_path = args.job_dir if not trial_id else args.job_dir + '/'
model_layers = model.get_layers(args.model_type)
image_model = model.build_model(model_layers, args.job_dir)
model_history = model.train_and_evaluate(
image_model, args.epochs, args.steps_per_epoch, args.job_dir)
if __name__ == '__main__':
main()
# -
# Next, let's group non-model functions into a util file to keep the model file simple. We'll copy over the `scale` and `load_dataset` functions from the previous lab.
# +
# %%writefile mnist_models/trainer/util.py
import tensorflow as tf
def scale(image, label):
"""Scales images from a 0-255 int range to a 0-1 float range"""
image = tf.cast(image, tf.float32)
image /= 255
image = tf.expand_dims(image, -1)
return image, label
def load_dataset(
data, training=True, buffer_size=5000, batch_size=100, nclasses=10):
"""Loads MNIST dataset into a tf.data.Dataset"""
(x_train, y_train), (x_test, y_test) = data
x = x_train if training else x_test
y = y_train if training else y_test
# One-hot encode the classes
y = tf.keras.utils.to_categorical(y, nclasses)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(scale).batch(batch_size)
if training:
dataset = dataset.shuffle(buffer_size).repeat()
return dataset
# -
# Finally, let's code the models! The [tf.keras API](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras) accepts an array of [layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers) into a [model object](https://www.tensorflow.org/api_docs/python/tf/keras/Model), so we can create a dictionary of layers based on the different model types we want to use. The below file has two functions: `get_layers` and `create_and_train_model`. We will build the structure of our model in `get_layers`. Last but not least, we'll copy over the training code from the previous lab into `train_and_evaluate`.
#
# **TODO 1**: Define the Keras layers for a DNN model
# **TODO 2**: Define the Keras layers for a dropout model
# **TODO 3**: Define the Keras layers for a CNN model
#
# Hint: These models progressively build on each other. Look at the imported `tensorflow.keras.layers` modules and the default values for the variables defined in `get_layers` for guidance.
# +
# %%writefile mnist_models/trainer/model.py
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import (
Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Softmax)
from . import util
# Image Variables
WIDTH = 28
HEIGHT = 28
def get_layers(
model_type,
nclasses=10,
hidden_layer_1_neurons=400,
hidden_layer_2_neurons=100,
dropout_rate=0.25,
num_filters_1=64,
kernel_size_1=3,
pooling_size_1=2,
num_filters_2=32,
kernel_size_2=3,
pooling_size_2=2):
"""Constructs layers for a keras model based on a dict of model types."""
model_layers = {
'linear': [
Flatten(),
Dense(nclasses),
Softmax()
],
'dnn': [
Flatten(),
Dense(hidden_layer_1_neurons, activation='relu'),
Dense(hidden_layer_2_neurons, activation='relu'),
Dense(nclasses),
Softmax()
],
'dnn_dropout': [
# TODO
Flatten(),
Dense(hidden_layer_1_neurons, activation='relu'),
Dense(hidden_layer_2_neurons, activation='relu'),
Dropout(dropout_rate),
Dense(nclasses),
Softmax()
],
'cnn': [
# TODO
Conv2D(num_filters_1, kernel_size=kernel_size_1,
activation='relu', input_shape=(WIDTH, HEIGHT, 1)),
MaxPooling2D(pooling_size_1),
Conv2D(num_filters_2, kernel_size=kernel_size_2,
activation='relu'),
MaxPooling2D(pooling_size_2),
Flatten(),
Dense(hidden_layer_1_neurons, activation='relu'),
Dense(hidden_layer_2_neurons, activation='relu'),
Dropout(dropout_rate),
Dense(nclasses),
Softmax()
]
}
return model_layers[model_type]
def build_model(layers, output_dir):
"""Compiles keras model for image classification."""
model = Sequential(layers)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def train_and_evaluate(model, num_epochs, steps_per_epoch, output_dir):
"""Compiles keras model and loads data into it for training."""
mnist = tf.keras.datasets.mnist.load_data()
train_data = util.load_dataset(mnist)
validation_data = util.load_dataset(mnist, training=False)
callbacks = []
if output_dir:
tensorboard_callback = TensorBoard(log_dir=output_dir)
callbacks = [tensorboard_callback]
history = model.fit(
train_data,
validation_data=validation_data,
epochs=num_epochs,
steps_per_epoch=steps_per_epoch,
verbose=2,
callbacks=callbacks)
if output_dir:
export_path = os.path.join(output_dir, 'keras_export')
model.save(export_path, save_format='tf')
return history
# -
# ## Local Training
#
# With everything set up, let's run locally to test the code. Some of the previous tests have been copied over into a testing script `mnist_models/trainer/test.py` to make sure the model still passes our previous checks. On `line 13`, you can specify which model types you would like to check. `line 14` and `line 15` has the number of epochs and steps per epoch respectively.
#
# Moment of truth! Run the code below to check your models against the unit tests. If you see "OK" at the end when it's finished running, congrats! You've passed the tests!
# !python3 -m mnist_models.trainer.test
# Now that we know that our models are working as expected, let's run it on Google Cloud within Vertex AI. We can run it as a python module locally first using the command line.
#
# The below cell transfers some of our variables to the command line as well as create a job directory including a timestamp.
# +
current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
model_type = 'cnn'
os.environ["MODEL_TYPE"] = model_type
os.environ["JOB_DIR"] = "mnist_models/models/{}_{}/".format(
model_type, current_time)
# -
# The cell below runs the local version of the code. The epochs and steps_per_epoch flag can be changed to run for longer or shorther, as defined in our `mnist_models/trainer/task.py` file.
# + language="bash"
# python3 -m mnist_models.trainer.task \
# --job-dir=$JOB_DIR \
# --epochs=5 \
# --steps_per_epoch=50 \
# --model_type=$MODEL_TYPE
# -
# ## Training on the cloud
#
# For this model, we will be able to use a Tensorflow pre-built container on Vertex AI, as we do not have any particular additional prerequisites. As before, we use `setuptools` for this, and store the created source distribution on Cloud Storage.
# +
# %%writefile mnist_models/setup.py
from setuptools import find_packages
from setuptools import setup
setup(
name='mnist_trainer',
version='0.1',
packages=find_packages(),
include_package_data=True,
description='MNIST model training application.'
)
# + language="bash"
# cd mnist_models
# python ./setup.py sdist --formats=gztar
# cd ..
# gsutil cp mnist_models/dist/mnist_trainer-0.1.tar.gz gs://${BUCKET}/mnist/
# -
# Then, we can kickoff the Vertex AI Custom Job using the pre-built container. We can pass our source distribution URI using the `--python-package-uris` flag.
# +
current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
model_type = 'cnn'
os.environ["MODEL_TYPE"] = model_type
os.environ["JOB_DIR"] = "gs://{}/mnist_{}_{}/".format(
BUCKET, model_type, current_time)
os.environ["JOB_NAME"] = "mnist_{}_{}".format(
model_type, current_time)
# + language="bash"
# echo $JOB_DIR $REGION $JOB_NAME
#
# PYTHON_PACKAGE_URIS=gs://${BUCKET}/mnist/mnist_trainer-0.1.tar.gz
# MACHINE_TYPE=n1-standard-4
# REPLICA_COUNT=1
# PYTHON_PACKAGE_EXECUTOR_IMAGE_URI="us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-3:latest"
# PYTHON_MODULE=trainer.task
#
# WORKER_POOL_SPEC="machine-type=$MACHINE_TYPE,\
# replica-count=$REPLICA_COUNT,\
# executor-image-uri=$PYTHON_PACKAGE_EXECUTOR_IMAGE_URI,\
# python-module=$PYTHON_MODULE"
#
# gcloud ai custom-jobs create \
# --region=${REGION} \
# --display-name=$JOB_NAME \
# --python-package-uris=$PYTHON_PACKAGE_URIS \
# --worker-pool-spec=$WORKER_POOL_SPEC \
# --args="--job-dir=$JOB_DIR,--model_type=$MODEL_TYPE"
# + language="bash"
# SAVEDMODEL_DIR=${JOB_DIR}keras_export
# echo $SAVEDMODEL_DIR
# gsutil ls $SAVEDMODEL_DIR
# -
# ## Deploying and predicting with model
#
# Once you have a model you're proud of, let's deploy it! All we need to do is to upload the created model artifact from Cloud Storage to Vertex AI as a model, create a new endpoint, and deploy the model to the endpoint.
# + language="bash"
# TIMESTAMP=$(date -u +%Y%m%d_%H%M%S)
# MODEL_DISPLAYNAME=mnist_$TIMESTAMP
# ENDPOINT_DISPLAYNAME=mnist_endpoint_$TIMESTAMP
# IMAGE_URI="us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-3:latest"
# SAVEDMODEL_DIR=${JOB_DIR}keras_export
# echo $SAVEDMODEL_DIR
#
# # Model
# MODEL_RESOURCENAME=$(gcloud ai models upload \
# --region=$REGION \
# --display-name=$MODEL_DISPLAYNAME \
# --container-image-uri=$IMAGE_URI \
# --artifact-uri=$SAVEDMODEL_DIR \
# --format="value(model)")
#
# echo "MODEL_DISPLAYNAME=${MODEL_DISPLAYNAME}"
# echo "MODEL_RESOURCENAME=${MODEL_RESOURCENAME}"
#
# # Endpoint
# ENDPOINT_RESOURCENAME=$(gcloud ai endpoints create \
# --region=$REGION \
# --display-name=$ENDPOINT_DISPLAYNAME \
# --format="value(name)")
#
# echo "ENDPOINT_DISPLAYNAME=${ENDPOINT_DISPLAYNAME}"
# echo "ENDPOINT_RESOURCENAME=${ENDPOINT_RESOURCENAME}"
#
# # Deployment
# DEPLOYED_MODEL_DISPLAYNAME=${MODEL_DISPLAYNAME}_deployment
# MACHINE_TYPE=n1-standard-2
#
# gcloud ai endpoints deploy-model $ENDPOINT_RESOURCENAME \
# --region=$REGION \
# --model=$MODEL_RESOURCENAME \
# --display-name=$DEPLOYED_MODEL_DISPLAYNAME \
# --machine-type=$MACHINE_TYPE \
# --min-replica-count=1 \
# --max-replica-count=1 \
# --traffic-split=0=100
# -
# To predict with the model, let's take one of the example images.
#
# **TODO 4**: Write a `.json` file with image data to send to a Vertex AI deployed model
# +
import json, codecs
import tensorflow as tf
import matplotlib.pyplot as plt
HEIGHT = 28
WIDTH = 28
IMGNO = 12
mnist = tf.keras.datasets.mnist.load_data()
(x_train, y_train), (x_test, y_test) = mnist
test_image = x_test[IMGNO]
jsondata = {"instances": [
test_image.reshape(HEIGHT, WIDTH, 1).tolist()
]}
json.dump(jsondata, codecs.open("test.json", "w", encoding = "utf-8"))
plt.imshow(test_image.reshape(HEIGHT, WIDTH));
# -
# !cat test.json
# Finally, we can send it to the prediction service. The output will have a 1 in the index of the corresponding digit it is predicting. Congrats! You've completed the lab!
# + language="bash"
# ENDPOINT_RESOURCENAME="projects/432069008306/locations/us-central1/endpoints/7342002088613773312" # TODO: insert ENDPOINT_RESOURCENAME from above
#
# gcloud ai endpoints predict $ENDPOINT_RESOURCENAME \
# --region=$REGION \
# --json-request=test.json
# -
# Copyright 2021 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| notebooks/image_models/labs/2_mnist_models_vertex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import json
import pickle
import datetime
from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler
from sklearn.preprocessing import Normalizer
import numpy as np
import pandas as pd
from pprint import pprint
from bisect import bisect_left as find_prev
import matplotlib.pyplot as plt
import xml.etree.ElementTree as xml
strptime = datetime.datetime.strptime
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', None)
# -
# # READING
#
# +
#df_kept.to_csv('chosen_segments_'+str(cut)+'.csv')
cut=0.5
df_kept=pd.read_csv('chosen_segments_'+str(cut)+'.csv')
df_kept['unique_id']=df_kept['shot_id'].map(str)+'_'+df_kept['file'].map(str)
df_kept['unique_id']=df_kept['unique_id'].str.replace('.mp4','')
# -
df_kept.character.unique()
soap_opera_scale = {'extramarital affair': 1/1.98, 'get divorced': 1/1.96,'illegitimate child': 1/1.45,'institutionalized for emotional problem': 1/1.43,'happily married': 1/4.05,'serious accident': 1/2.96,'murdered': 1/1.81,'attempt suicide': 1/1.26,'blackmailed': 1/1.86,'unfaithful spouse': 1/2.23,'sexually assaulted': 1/2.60,'abortion': 1/1.41}
life_events_scale = {'death relative': 100, 'Divorce': 73, 'Marital separation': 65, 'Imprisonment': 63, 'injury illness': 53, 'Marriage': 50, 'Fired': 47, 'Marital reconciliation': 45, 'Retirement': 45, 'Pregnancy': 40, 'Sexual difficulties': 39, 'New family member': 39, 'Business readjustment': 39, 'Money change': 38, 'Work change': 36, 'Arguing': 35, 'Mortgage': 32, 'Child leaving home': 29, 'Trouble with in-laws': 29, 'Achievement': 28}
# +
# which one is what?
# original soap opera scale
soap_1_path ='../other_experiments/Hugging_Face_classification/soap_opera_scale_Max_Jack_Tanya.csv'
soap_2_path ='../other_experiments/Hugging_Face_classification/soap_opera_Peggy_Archie.csv'
# on keyword "event"
events_1_path ='../other_experiments/Hugging_Face_classification/events_ranking_Max_Jack_Tanya.csv'
# original life scale ranking (concatenate the first and second)
life_1_path ='../other_experiments/Hugging_Face_classification/life_scale_ranking_Max_Jack_Tanya.csv'
life_end_path ='../other_experiments/Hugging_Face_classification/life_scale_ranking_end_Max_Jack_Tanya.csv'
# on keyword "daily life"
classification_results_path='../other_experiments/Hugging_Face_classification/daily_life_Max_Jack_Tanya.csv'
# life scale with improved labels (top 20)
reduced_life_clean ='../other_experiments/Hugging_Face_classification/reduced_life_scale_Max_Jack_Tanya _clean.csv'
extra_columns = ['index', 'Unnamed: 0.1', 'Unnamed: 0']
hf_results=pd.read_csv(reduced_life_clean)
hf_results=pd.read_csv(soap_2_path)
hf_results=pd.read_csv(soap_1_path)
hf_results=hf_results.drop(columns=[c for c in extra_columns if c in hf_results.columns])
hf_results['filename']=hf_results['filename'].str.replace('.xml','')
hf_results['unique_id']=hf_results['shot_id'].map(str)+'_'+hf_results['filename'].map(str)
hf_results.head()
# -
hf_results.filename.unique()
hf_results.describe()
if 'extramarital affair' in hf_results.columns:
for c in hf_results.columns:
if c in soap_opera_scale:
hf_results[c] = hf_results[c] / soap_opera_scale[c]
elif 'death relative' in hf_results.columns:
for c in hf_results.columns:
if c in life_events_scale:
hf_results[c] = hf_results[c] / life_events_scale[c]
hf_results.describe()
# +
# hf_results.sort_values(by=['extramarital affair'],ascending=False).head(3)
# +
apply_coefficients = True
transform = np.log
cols_to_drop=[ 'shot_id', 'filename', 'transcript','score',
'Marital separation','New family member','Business readjustment',
'Money change', 'Work change', 'Arguing','Mortgage', 'Child leaving home',
'Trouble with in-laws', 'Achievement', 'happily married']#
min_max_scaler = MinMaxScaler()
standard_scaler = StandardScaler()
robust_scaler = RobustScaler()
chosen_cols = [c for c in hf_results.columns if c in ['death relative', 'Divorce', 'Imprisonment', 'injury illness', 'Marriage','Fired', 'Marital reconciliation', 'Retirement', 'Pregnancy','Sexual difficulties',
'extramarital affair', 'get divorced', 'illegitimate child', 'institutionalized for emotional problem', 'serious accident', 'murdered', 'attempt suicide', 'blackmailed', 'unfaithful spouse', 'sexually assaulte', 'abortion']]
final_results = hf_results.copy()
final_results[chosen_cols] = robust_scaler.fit_transform(final_results[chosen_cols])
if apply_coefficients:
for c in hf_results.columns:
if 'extramarital affair' in hf_results.columns:
if c in soap_opera_scale:
hf_results[c] = hf_results[c] * transform(soap_opera_scale[c])
elif 'death relative' in hf_results.columns:
if c in life_events_scale:
hf_results[c] = hf_results[c] * transform(life_events_scale[c])
# +
final_results['score_max'] = final_results.drop([c for c in cols_to_drop if c in final_results.columns], axis=1).max(axis=1)
final_results = final_results.sort_values(by=['score_max'],ascending=False)
filtered_results = pd.merge(final_results,df_kept,on='unique_id')
filtered_result_char=filtered_results[filtered_results.character == '<NAME>']
filtered_result_char=filtered_result_char[filtered_result_char.transcript.str.split().apply(len)>5]
filtered_result_char[['transcript', 'unique_id'] + chosen_cols + ['score_max']].head(20)
# -
n_words = 10
for character in ['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']:
filtered_result_char=filtered_results[filtered_results.character == character]
# filtered_result_char=filtered_result_char[filtered_result_char.transcript.str.split().apply(len)>n_words]
print(len(filtered_result_char), 'lines for', character)
top20 = filtered_result_char[['transcript', 'filename', 'unique_id', 'begin', 'end',] + chosen_cols + ['score_max']]
top20.to_csv('final_'+character+'_nominwords.csv')
final_results[chosen_cols].describe()
hf_results[chosen_cols].describe()
hf_results[chosen_cols].describe()
| facerec_segment/scores_selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display, HTML, IFrame
from ipywidgets import interact
import pandas as pd
from numpy import cos,sin,pi,dot,arccos
from numpy.linalg import norm # this is the magnitude function
from mpl_toolkits.mplot3d import axes3d
from itertools import combinations
plt.rcParams["figure.figsize"] = [8, 8]
# Uncomment the one that corresponds to your Jupyter theme
plt.style.use('dark_background')
# plt.style.use('fivethirtyeight')
# plt.style.use('Solarize_Light2')
# + [markdown] slideshow={"slide_type": "skip"}
# <style>
# td {
# font-size: 20px;
# }
# </style>
# + [markdown] slideshow={"slide_type": "notes"}
# $\newcommand{\RR}{\mathbb{R}}$
# $\newcommand{\bv}[1]{\begin{bmatrix} #1 \end{bmatrix}}$
# $\renewcommand{\vec}{\mathbf}$
#
# + [markdown] slideshow={"slide_type": "slide"}
#
#
# ### Example
#
# 1. Find a unit vector perpendicular to $\langle 1,2,-1\rangle$ and $\langle 3,0,1\rangle$. Is there only one?
# -
v,w = np.array(((1,2,-1),(3,0,1)))
u = np.cross(w,v)
uu = u/norm(u)
norm(uu)
np.dot(uu,w)
#
# ### Quick exercise
#
# Write a parametric form for a line containing position vectors $\vec p$ and $\vec q$.
#
# $$\vec p + t (\vec q - \vec p) = (1 - t)\vec p + t \vec q$$
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Exercises
#
# 1. Where does the line through $(2,4,-2)$ and $(3,1,-1)$ cross the $xy$-plane?
# -
# **Solution**
#
# Use the vector connecting the positions as the direction $\vec v = \langle 3 - 2, 1 - 4, -1 - (-2) \rangle = \langle 1, -3, 1 \rangle$. Use either point as the initial position, so one possibility is
# $$ \vec r(t) = \langle 2,4,-2 \rangle + t \langle 1, -3, 1 \rangle$$
#
# To find the intersection with the $xy$-plane, set the $z$-coordinate to $0$ and solve for $t$.
#
# $$ -2 + t = 0$$
#
# at $t =2$, so the line intersects the $xy$-plane at $\vec r(t) = \langle 4, -2, 0\rangle$.
# + [markdown] slideshow={"slide_type": "subslide"}
# 2. Is the line $(2,4,0)$ and $(1,1,1)$ perpendicular to the line through $(3,3,4)$ and $(3,-1,-8)$?
# -
# **Solution**
#
# CAREFUL. This depends on what we mean by lines being perpendicular in $\mathbb{R}^3$.
#
# First, we compare directions.
#
# $$\vec v_1 = \langle 2-1, 4 - 1, 0 - 1 \rangle = \langle 1, 3, -1 \rangle$$
# $$\vec v_2 = \langle 3 - 3, 3 - -1, 4 - -8 \rangle = \langle 0, 4, 12 \rangle$$
#
# Thus, $\vec v_1 \cdot \vec v_2 = 0$, so the _directions_ are perpendicular, but we will require further that for lines to be perpendicular they **must intersect**.
#
# For this, we must check if there is a solution to the system of equations
#
# $$\bv{2 \\ 4 \\ 0} + t \bv{1 \\ 3 \\ -1 } = \bv{3 \\ 3 \\ 4} + s \bv{0 \\ 4 \\ 12}$$
# where each side is the parametric form of one of the lines above. This has 3 equations and 2 unknowns. The first (top) equation says $2 + t = 0$ so $t = 1$.
#
# The second component thus says $7 = 3 + 4s $ so $s = 1$ as well, which on the third line yields
# $$ -(1) = 4 + 12$$
# so this system has no solution. The lines do **not** intersect, and thus they are **not** perpendicular.
#
# We say they are **skew lines**.
# + jupyter={"source_hidden": true}
@interact
def _(angle = (-96,108,6)):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(30,angle)
plt.plot([2,1],[4,1],[0,1])
plt.plot([3,3],[3,-1],[4,-8])
# -
#
#
# + [markdown] slideshow={"slide_type": "fragment"}
# #### Quick exercise
#
# What is a normal vector to the plane given by $$ x+2y = 16-8z?$$
# Find a point on this plane.
# -
# **Solution**
#
# Don't forget to move the $z$ term over. $$\vec n = \langle 1, 2, 8 \rangle$$
#
# A point on this plane is $(16, 0, 0)$ or $(0, 8, 0)$ or $(0,0,2)$.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Exercises
#
# 1. Find an equation of the plane through $(1,0,0)$, $(0,1,0)$, and $(0,0,1)$.
# -
# **Solution**
#
# Find two directions in the plane and cross them.
#
# $$\vec n = (\vec j - \vec i)\times(\vec k - \vec i) = \vec j \times \vec k - \vec j\times \vec i - \vec i \times \vec k = \vec i + \vec j +\vec k$$
#
# Put into eqution of plane $\vec n \cdot \vec x = \vec v \cdot \vec p$ to get $$x + y + z = 1$$
# + [markdown] slideshow={"slide_type": "slide"}
# 2. Find a parametric form for the line of intersection of the planes given by $x+y-z = 2$ and $2x - y + 3z = 1$.
# -
# **Solution**
#
# We find a point of intersection (i.e., solve a system of the two equations). Start by just adding the two equations to get $$ 3x + 2z = 3$$ which has solutions $x = 1, z = 0$. Plug these back into either of the original equations to get $y = 1$, so a point is $(1,1,0)$.
#
# More interstingly, the direction of the line is parallel to both planes, so it is orthogonal to both normals, thus we use a cross product
# $$\vec v = \vec n_1 \times \vec n_2 = \langle 1, 1, -1 \rangle \times \langle 2, -1, 3 \rangle$$
np.cross([1,1,-1],[2,-1,3])
# Thus, a parametric form of the line is $$\vec r(t) = \langle 1,1,0\rangle + t\langle 2, -5, -3\rangle $$
# + jupyter={"source_hidden": true}
t = np.array([-2,2])
p = np.array([1,1,0])
v = np.array([2, -5, -3])
x = y = np.linspace(-2,2,10)
x,y = np.meshgrid(x,y)
@interact
def _(angle = (-96,108,6)):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(30,angle)
plt.plot(p[0] + t*v[0], p[1] + t*v[1], p[2] + t*v[2])
ax.plot_surface(x, y, x + y - 2,alpha=.5)
ax.plot_surface(x, y, (1 - 2*x + y)/3,alpha=.8)
# -
| exercises/L03-Exercises-Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Importing necessary packages in Python
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np ; np.random.seed(sum(map(ord, "aesthetics")))
import pandas as pd
from sklearn.datasets import make_classification
#from sklearn.learning_curve import learning_curve
#from sklearn.cross_validation import train_test_split
#from sklearn.grid_search import GridSearchCV
#from sklearn.cross_validation import ShuffleSplit
from sklearn.metrics import classification_report,confusion_matrix, roc_curve, roc_auc_score, auc, accuracy_score
from sklearn.model_selection import ShuffleSplit,train_test_split, cross_val_score, GridSearchCV
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize, StandardScaler, MinMaxScaler
import seaborn
seaborn.set_context('notebook')
seaborn.set_style(style='darkgrid')
from pprint import pprint
# +
file = 'C:\\Users\\Forex1\\Desktop\\AI\\ml-german-credit\\data\\german.data'
url = "http://archive.ics.uci.edu/ml/machine-learning-databases/statlog/german/german.data"
names = ['existingchecking', 'duration', 'credithistory', 'purpose', 'creditamount',
'savings', 'employmentsince', 'installmentrate', 'statussex', 'otherdebtors',
'residencesince', 'property', 'age', 'otherinstallmentplans', 'housing',
'existingcredits', 'job', 'peopleliable', 'telephone', 'foreignworker', 'classification']
data = pd.read_csv(file,names = names, delimiter=' ')
print(data.shape)
print (data.columns)
data.head(10)
# -
# Binarize the y output for easier use of e.g. ROC curves -> 0 = 'bad' credit; 1 = 'good' credit
data.classification.replace([1,2], [1,0], inplace=True)
# Print number of 'good' credits (should be 700) and 'bad credits (should be 300)
data.classification.value_counts()
# +
#numerical variables labels
numvars = ['creditamount', 'duration', 'installmentrate', 'residencesince', 'age',
'existingcredits', 'peopleliable','classification']
# Standardization
numdata_std = pd.DataFrame(StandardScaler().fit_transform(data[numvars].drop(['classification'], axis=1)))
# +
from collections import defaultdict
#categorical variables labels
catvars = ['existingchecking', 'credithistory', 'purpose', 'savings', 'employmentsince',
'statussex', 'otherdebtors', 'property', 'otherinstallmentplans', 'housing', 'job',
'telephone', 'foreignworker']
d = defaultdict(LabelEncoder)
# Encoding the variable
lecatdata = data[catvars].apply(lambda x: d[x.name].fit_transform(x))
# print transformations
for x in range(len(catvars)):
print(catvars[x],": ", data[catvars[x]].unique())
print(catvars[x],": ", lecatdata[catvars[x]].unique())
#One hot encoding, create dummy variables for every category of every categorical variable
dummyvars = pd.get_dummies(data[catvars])
data[catvars]
lecatdata[catvars]
# +
data_clean = pd.concat([data[numvars], lecatdata], axis = 1)
print(data_clean.shape)
# -
data_clean.head()
# + active=""
# [('creditamount', 0.12923313611726128),
# ('existingchecking', 0.11268644840782993),
# ('age', 0.10597935702235378),
# ('duration', 0.09604917366613606),
# ('purpose', 0.0640848806635587),
# ('credithistory', 0.05887893200955861),
# ('savings', 0.05249104875800478),
# ('employmentsince', 0.05073817605704535)]
# -
data[data['classification']==0]['creditamount'].hist(alpha=0.9,color='red')
data[data['classification']==1]['creditamount'].hist(alpha=0.3,color='blue')
data[data['classification']==0]['existingchecking'].hist(alpha=0.9,color='red')
data[data['classification']==1]['existingchecking'].hist(alpha=0.3,color='blue')
# + active=""
# Status of existing checking account
# A11 : ... < 0 DM
# A12 : 0 <= ... < 200 DM
# A13 : ... >= 200 DM /salary assignments for at least 1 year
# A14 : no checking account
# -
data[data['classification']==0]['purpose'].hist(alpha=0.9,color='red')
data[data['classification']==1]['purpose'].hist(alpha=0.3,color='blue')
# + active=""
# Attribute 4: (qualitative)
# Purpose
# A40 : car (new)
# A41 : car (used)
# A42 : furniture/equipment
# A43 : radio/television
# A44 : domestic appliances
# A45 : repairs
# A46 : education
# A47 : (vacation - does not exist?)
# A48 : retraining
# A49 : busines
# -
data[data['classification']==0]['duration'].hist(alpha=0.9,color='red')
data[data['classification']==1]['duration'].hist(alpha=0.3,color='blue')
data[data['classification']==0]['otherdebtors'].hist(alpha=0.9,color='red')
data[data['classification']==1]['otherdebtors'].hist(alpha=0.3,color='blue')
# + active=""
# Attribute 10: (qualitative)
# Other debtors / guarantors
# A101 : none
# A102 : co-applicant
# A103 : guarantor
#
# + active=""
#
#
# -
data[data['classification']==0]['credithistory'].hist(alpha=0.9,color='red')
data[data['classification']==1]['credithistory'].hist(alpha=0.3,color='blue')
# + active=""
# Credit history
# A30 : no credits taken/ all credits paid back duly
# A31 : all credits at this bank paid back duly
# A32 : existing credits paid back duly till now
# A33 : delay in paying off in the past
# A34 : critical account/other credits existing (not at this bank)
# -
np.arange(0,1500,500)
# +
plt.figure(figsize=(16,10))
plt.xticks(np.arange(0, 20000, 1000))
data[data['classification']==0]['creditamount'].hist(alpha=0.9,color='red',bins=50)
data[data['classification']==1]['creditamount'].hist(alpha=0.3,color='blue',bins=50)
# -
data[data['classification']==0]['savings'].hist(alpha=0.9,color='red')
data[data['classification']==1]['savings'].hist(alpha=0.3,color='blue')
# + active=""
# Savings account/bonds
# A61 : ... < 100 DM
# A62 : 100 <= ... < 500 DM
# A63 : 500 <= ... < 1000 DM
# A64 : .. >= 1000 DM
# A65 : unknown/ no savings account
# + active=""
# [('purpose', 0.13633222521756402),
# ('duration', 0.1227859527093547),
# ('otherdebtors', 0.11975289574821513),
# ('existingchecking', 0.10922995722331769),
# ('credithistory', 0.06905427152316855),
# ('creditamount', 0.0616691028016846),
# ('savings', 0.05725744585817834),
# -
data[data['classification']==0]['age'].hist(alpha=0.9,color='red')
data[data['classification']==1]['age'].hist(alpha=0.3,color='blue')
data[data['classification']==0]['employmentsince'].hist(alpha=0.9,color='red')
data[data['classification']==1]['employmentsince'].hist(alpha=0.3,color='blue')
# + active=""
# Present employment since
# A71 : unemployed
# A72 : ... < 1 year
# A73 : 1 <= ... < 4 years
# A74 : 4 <= ... < 7 years
# A75 : .. >= 7 years
# -
data_clean['creditamount'] = data_clean['creditamount']//100
data_clean
# Unscaled, unnormalized data
X_clean = data_clean.drop('classification', axis=1)
y_clean = data_clean['classification']
X_train_clean, X_test_clean, y_train_clean, y_test_clean = train_test_split(X_clean,y_clean,test_size=0.2,random_state=111)
# +
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train_clean)
X_train_clean = scaler.transform(X_train_clean)
X_test_clean = scaler.transform(X_test_clean)
# -
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(hidden_layer_sizes=(100,40,100), max_iter=1000,verbose=True)
mlp.fit(X_train_clean, y_train_clean.values.reshape(-1))
predictions = mlp.predict(X_test_clean)
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test_clean,predictions))
print(classification_report(y_test_clean,predictions))
#a,b,c,d,=confusion_matrix(y_test,predictions)
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier(n_estimators=500)
rfc.fit(X_train_clean,y_train_clean)
predForest=rfc.predict(X_test_clean)
X_clean.columns
print(confusion_matrix(y_test_clean,predForest))
print(classification_report(y_test_clean,predForest))
roc_auc_score(y_test_clean, predForest)
np.mean(rfc.feature_importances_)
b=list(zip(X_clean.columns,rfc.feature_importances_))
b
b.sort(key=lambda b: b[1],reverse=True)
print(sum(x[1] for x in b if x[1]>np.mean(rfc.feature_importances_)))
[x for x in b if x[1]>np.mean(rfc.feature_importances_)]
[x for x in b if x[1]<np.mean(rfc.feature_importances_)]
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train_clean, y_train_clean)
y_pred = gnb.predict(X_test_clean)
# +
print(confusion_matrix(y_test_clean,y_pred))
print(classification_report(y_test_clean,y_pred))
# -
roc_auc_score(y_test_clean, y_pred)
110/133
[x[0] for x in b if x[1]>np.mean(rfc.feature_importances_)]
# Unscaled, unnormalized data
X_clean_less = data_clean[[x[0] for x in b if x[1]>np.mean(rfc.feature_importances_)]]
y_clean_less = data_clean['classification']
X_train_clean_less, X_test_clean_less, y_train_clean_less, y_test_clean_less = train_test_split(X_clean_less,y_clean_less,test_size=0.2,random_state=111)
X_clean_less
# +
scaler = StandardScaler()
scaler.fit(X_train_clean_less)
X_train_clean_less = scaler.transform(X_train_clean_less)
X_test_clean_less = scaler.transform(X_test_clean_less)
# -
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(hidden_layer_sizes=(100,40,100), max_iter=1000,verbose=True)
mlp.fit(X_train_clean_less, y_train_clean_less.values.reshape(-1))
predictions = mlp.predict(X_test_clean_less)
# +
from sklearn.metrics import classification_report, confusion_matrix
print(confusion_matrix(y_test_clean_less,predictions))
print(classification_report(y_test_clean_less,predictions))
#a,b,c,d,=confusion_matrix(y_test,predictions)
pd.DataFrame(confusion_matrix(y_test_clean_less, predictions),
columns=['Predicted Negative', 'Predicted Positive'],
index=['Actual Negative', 'Actual Positive'])
# -
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier(n_estimators=500)
rfc.fit(X_train_clean_less,y_train_clean_less)
predForest=rfc.predict(X_test_clean_less)
print(confusion_matrix(y_test_clean_less,predForest))
print(classification_report(y_test_clean_less,predForest))
predForest
y_test_clean_less
# +
pd.DataFrame(confusion_matrix(y_test_clean_less, predictions),
columns=['Predicted Negative', 'Predicted Positive'],
index=['Actual Negative', 'Actual Positive'])
# -
probs = rfc.predict_proba(X_test_clean_less)
probs=probs[:,1]
auc = roc_auc_score(y_test_clean_less, probs)
print('AUC: %.2f' % auc)
fpr, tpr, thresholds = roc_curve(y_test_clean_less, probs)
# +
def plot_roc_curve(fpr, tpr):
plt.plot(fpr, tpr, color='orange', label='ROC')
plt.plot([0, 1], [0, 1], color='darkblue', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic (ROC) Curve')
plt.legend()
plt.show()
plot_roc_curve(fpr, tpr)
# -
| German-data-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
#
# _You are currently looking at **version 1.1** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
#
# ---
# # Applied Machine Learning: Module 2 (Supervised Learning, Part I)
# ## Preamble and Review
# +
# %matplotlib notebook
import numpy as np
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
np.set_printoptions(precision=2)
fruits = pd.read_table('readonly/fruit_data_with_colors.txt')
feature_names_fruits = ['height', 'width', 'mass', 'color_score']
X_fruits = fruits[feature_names_fruits]
y_fruits = fruits['fruit_label']
target_names_fruits = ['apple', 'mandarin', 'orange', 'lemon']
X_fruits_2d = fruits[['height', 'width']]
y_fruits_2d = fruits['fruit_label']
X_train, X_test, y_train, y_test = train_test_split(X_fruits, y_fruits, random_state=0)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
# we must apply the scaling to the test set that we computed for the training set
X_test_scaled = scaler.transform(X_test)
knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(X_train_scaled, y_train)
print('Accuracy of K-NN classifier on training set: {:.2f}'
.format(knn.score(X_train_scaled, y_train)))
print('Accuracy of K-NN classifier on test set: {:.2f}'
.format(knn.score(X_test_scaled, y_test)))
example_fruit = [[5.5, 2.2, 10, 0.70]]
example_fruit_scaled = scaler.transform(example_fruit)
print('Predicted fruit type for ', example_fruit, ' is ',
target_names_fruits[knn.predict(example_fruit_scaled)[0]-1])
# -
# ## Datasets
# +
from sklearn.datasets import make_classification, make_blobs
from matplotlib.colors import ListedColormap
from sklearn.datasets import load_breast_cancer
from adspy_shared_utilities import load_crime_dataset
cmap_bold = ListedColormap(['#FFFF00', '#00FF00', '#0000FF','#000000'])
# synthetic dataset for simple regression
from sklearn.datasets import make_regression
plt.figure()
plt.title('Sample regression problem with one input variable')
X_R1, y_R1 = make_regression(n_samples = 100, n_features=1,
n_informative=1, bias = 150.0,
noise = 30, random_state=0)
plt.scatter(X_R1, y_R1, marker= 'o', s=50)
plt.show()
# synthetic dataset for more complex regression
from sklearn.datasets import make_friedman1
plt.figure()
plt.title('Complex regression problem with one input variable')
X_F1, y_F1 = make_friedman1(n_samples = 100,
n_features = 7, random_state=0)
plt.scatter(X_F1[:, 2], y_F1, marker= 'o', s=50)
plt.show()
# synthetic dataset for classification (binary)
plt.figure()
plt.title('Sample binary classification problem with two informative features')
X_C2, y_C2 = make_classification(n_samples = 100, n_features=2,
n_redundant=0, n_informative=2,
n_clusters_per_class=1, flip_y = 0.1,
class_sep = 0.5, random_state=0)
plt.scatter(X_C2[:, 0], X_C2[:, 1], c=y_C2,
marker= 'o', s=50, cmap=cmap_bold)
plt.show()
# more difficult synthetic dataset for classification (binary)
# with classes that are not linearly separable
X_D2, y_D2 = make_blobs(n_samples = 100, n_features = 2, centers = 8,
cluster_std = 1.3, random_state = 4)
y_D2 = y_D2 % 2
plt.figure()
plt.title('Sample binary classification problem with non-linearly separable classes')
plt.scatter(X_D2[:,0], X_D2[:,1], c=y_D2,
marker= 'o', s=50, cmap=cmap_bold)
plt.show()
# Breast cancer dataset for classification
cancer = load_breast_cancer()
(X_cancer, y_cancer) = load_breast_cancer(return_X_y = True)
# Communities and Crime dataset
(X_crime, y_crime) = load_crime_dataset()
# -
# ## K-Nearest Neighbors
# ### Classification
# +
from adspy_shared_utilities import plot_two_class_knn
X_train, X_test, y_train, y_test = train_test_split(X_C2, y_C2,
random_state=0)
plot_two_class_knn(X_train, y_train, 1, 'uniform', X_test, y_test)
plot_two_class_knn(X_train, y_train, 3, 'uniform', X_test, y_test)
plot_two_class_knn(X_train, y_train, 11, 'uniform', X_test, y_test)
# -
# ### Regression
# +
from sklearn.neighbors import KNeighborsRegressor
X_train, X_test, y_train, y_test = train_test_split(X_R1, y_R1, random_state = 0)
knnreg = KNeighborsRegressor(n_neighbors = 5).fit(X_train, y_train)
print(knnreg.predict(X_test))
print('R-squared test score: {:.3f}'
.format(knnreg.score(X_test, y_test)))
# +
fig, subaxes = plt.subplots(1, 2, figsize=(8,4))
X_predict_input = np.linspace(-3, 3, 50).reshape(-1,1)
X_train, X_test, y_train, y_test = train_test_split(X_R1[0::5], y_R1[0::5], random_state = 0)
for thisaxis, K in zip(subaxes, [1, 3]):
knnreg = KNeighborsRegressor(n_neighbors = K).fit(X_train, y_train)
y_predict_output = knnreg.predict(X_predict_input)
thisaxis.set_xlim([-2.5, 0.75])
thisaxis.plot(X_predict_input, y_predict_output, '^', markersize = 10,
label='Predicted', alpha=0.8)
thisaxis.plot(X_train, y_train, 'o', label='True Value', alpha=0.8)
thisaxis.set_xlabel('Input feature')
thisaxis.set_ylabel('Target value')
thisaxis.set_title('KNN regression (K={})'.format(K))
thisaxis.legend()
plt.tight_layout()
# -
# ### Regression model complexity as a function of K
# +
# plot k-NN regression on sample dataset for different values of K
fig, subaxes = plt.subplots(5, 1, figsize=(5,20))
X_predict_input = np.linspace(-3, 3, 500).reshape(-1,1)
X_train, X_test, y_train, y_test = train_test_split(X_R1, y_R1,
random_state = 0)
for thisaxis, K in zip(subaxes, [1, 3, 7, 15, 55]):
knnreg = KNeighborsRegressor(n_neighbors = K).fit(X_train, y_train)
y_predict_output = knnreg.predict(X_predict_input)
train_score = knnreg.score(X_train, y_train)
test_score = knnreg.score(X_test, y_test)
thisaxis.plot(X_predict_input, y_predict_output)
thisaxis.plot(X_train, y_train, 'o', alpha=0.9, label='Train')
thisaxis.plot(X_test, y_test, '^', alpha=0.9, label='Test')
thisaxis.set_xlabel('Input feature')
thisaxis.set_ylabel('Target value')
thisaxis.set_title('KNN Regression (K={})\n\
Train $R^2 = {:.3f}$, Test $R^2 = {:.3f}$'
.format(K, train_score, test_score))
thisaxis.legend()
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
# -
# ## Linear models for regression
# ### Linear regression
# +
from sklearn.linear_model import LinearRegression
X_train, X_test, y_train, y_test = train_test_split(X_R1, y_R1,
random_state = 0)
linreg = LinearRegression().fit(X_train, y_train)
print('linear model coeff (w): {}'
.format(linreg.coef_))
print('linear model intercept (b): {:.3f}'
.format(linreg.intercept_))
print('R-squared score (training): {:.3f}'
.format(linreg.score(X_train, y_train)))
print('R-squared score (test): {:.3f}'
.format(linreg.score(X_test, y_test)))
# -
# ### Linear regression: example plot
plt.figure(figsize=(5,4))
plt.scatter(X_R1, y_R1, marker= 'o', s=50, alpha=0.8)
plt.plot(X_R1, linreg.coef_ * X_R1 + linreg.intercept_, 'r-')
plt.title('Least-squares linear regression')
plt.xlabel('Feature value (x)')
plt.ylabel('Target value (y)')
plt.show()
# +
X_train, X_test, y_train, y_test = train_test_split(X_crime, y_crime,
random_state = 0)
linreg = LinearRegression().fit(X_train, y_train)
print('Crime dataset')
print('linear model intercept: {}'
.format(linreg.intercept_))
print('linear model coeff:\n{}'
.format(linreg.coef_))
print('R-squared score (training): {:.3f}'
.format(linreg.score(X_train, y_train)))
print('R-squared score (test): {:.3f}'
.format(linreg.score(X_test, y_test)))
# -
# ### Ridge regression
# +
from sklearn.linear_model import Ridge
X_train, X_test, y_train, y_test = train_test_split(X_crime, y_crime,
random_state = 0)
linridge = Ridge(alpha=20.0).fit(X_train, y_train)
print('Crime dataset')
print('ridge regression linear model intercept: {}'
.format(linridge.intercept_))
print('ridge regression linear model coeff:\n{}'
.format(linridge.coef_))
print('R-squared score (training): {:.3f}'
.format(linridge.score(X_train, y_train)))
print('R-squared score (test): {:.3f}'
.format(linridge.score(X_test, y_test)))
print('Number of non-zero features: {}'
.format(np.sum(linridge.coef_ != 0)))
# -
# #### Ridge regression with feature normalization
# +
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
from sklearn.linear_model import Ridge
X_train, X_test, y_train, y_test = train_test_split(X_crime, y_crime,
random_state = 0)
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
linridge = Ridge(alpha=20.0).fit(X_train_scaled, y_train)
print('Crime dataset')
print('ridge regression linear model intercept: {}'
.format(linridge.intercept_))
print('ridge regression linear model coeff:\n{}'
.format(linridge.coef_))
print('R-squared score (training): {:.3f}'
.format(linridge.score(X_train_scaled, y_train)))
print('R-squared score (test): {:.3f}'
.format(linridge.score(X_test_scaled, y_test)))
print('Number of non-zero features: {}'
.format(np.sum(linridge.coef_ != 0)))
# -
# #### Ridge regression with regularization parameter: alpha
print('Ridge regression: effect of alpha regularization parameter\n')
for this_alpha in [0, 1, 10, 20, 50, 100, 1000]:
linridge = Ridge(alpha = this_alpha).fit(X_train_scaled, y_train)
r2_train = linridge.score(X_train_scaled, y_train)
r2_test = linridge.score(X_test_scaled, y_test)
num_coeff_bigger = np.sum(abs(linridge.coef_) > 1.0)
print('Alpha = {:.2f}\nnum abs(coeff) > 1.0: {}, \
r-squared training: {:.2f}, r-squared test: {:.2f}\n'
.format(this_alpha, num_coeff_bigger, r2_train, r2_test))
# ### Lasso regression
# +
from sklearn.linear_model import Lasso
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train, X_test, y_train, y_test = train_test_split(X_crime, y_crime,
random_state = 0)
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
linlasso = Lasso(alpha=2.0, max_iter = 10000).fit(X_train_scaled, y_train)
print('Crime dataset')
print('lasso regression linear model intercept: {}'
.format(linlasso.intercept_))
print('lasso regression linear model coeff:\n{}'
.format(linlasso.coef_))
print('Non-zero features: {}'
.format(np.sum(linlasso.coef_ != 0)))
print('R-squared score (training): {:.3f}'
.format(linlasso.score(X_train_scaled, y_train)))
print('R-squared score (test): {:.3f}\n'
.format(linlasso.score(X_test_scaled, y_test)))
print('Features with non-zero weight (sorted by absolute magnitude):')
for e in sorted (list(zip(list(X_crime), linlasso.coef_)),
key = lambda e: -abs(e[1])):
if e[1] != 0:
print('\t{}, {:.3f}'.format(e[0], e[1]))
# -
# #### Lasso regression with regularization parameter: alpha
# +
print('Lasso regression: effect of alpha regularization\n\
parameter on number of features kept in final model\n')
for alpha in [0.5, 1, 2, 3, 5, 10, 20, 50]:
linlasso = Lasso(alpha, max_iter = 10000).fit(X_train_scaled, y_train)
r2_train = linlasso.score(X_train_scaled, y_train)
r2_test = linlasso.score(X_test_scaled, y_test)
print('Alpha = {:.2f}\nFeatures kept: {}, r-squared training: {:.2f}, \
r-squared test: {:.2f}\n'
.format(alpha, np.sum(linlasso.coef_ != 0), r2_train, r2_test))
# -
# ### Polynomial regression
# +
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
X_train, X_test, y_train, y_test = train_test_split(X_F1, y_F1,
random_state = 0)
linreg = LinearRegression().fit(X_train, y_train)
print('linear model coeff (w): {}'
.format(linreg.coef_))
print('linear model intercept (b): {:.3f}'
.format(linreg.intercept_))
print('R-squared score (training): {:.3f}'
.format(linreg.score(X_train, y_train)))
print('R-squared score (test): {:.3f}'
.format(linreg.score(X_test, y_test)))
print('\nNow we transform the original input data to add\n\
polynomial features up to degree 2 (quadratic)\n')
poly = PolynomialFeatures(degree=2)
X_F1_poly = poly.fit_transform(X_F1)
X_train, X_test, y_train, y_test = train_test_split(X_F1_poly, y_F1,
random_state = 0)
linreg = LinearRegression().fit(X_train, y_train)
print('(poly deg 2) linear model coeff (w):\n{}'
.format(linreg.coef_))
print('(poly deg 2) linear model intercept (b): {:.3f}'
.format(linreg.intercept_))
print('(poly deg 2) R-squared score (training): {:.3f}'
.format(linreg.score(X_train, y_train)))
print('(poly deg 2) R-squared score (test): {:.3f}\n'
.format(linreg.score(X_test, y_test)))
print('\nAddition of many polynomial features often leads to\n\
overfitting, so we often use polynomial features in combination\n\
with regression that has a regularization penalty, like ridge\n\
regression.\n')
X_train, X_test, y_train, y_test = train_test_split(X_F1_poly, y_F1,
random_state = 0)
linreg = Ridge().fit(X_train, y_train)
print('(poly deg 2 + ridge) linear model coeff (w):\n{}'
.format(linreg.coef_))
print('(poly deg 2 + ridge) linear model intercept (b): {:.3f}'
.format(linreg.intercept_))
print('(poly deg 2 + ridge) R-squared score (training): {:.3f}'
.format(linreg.score(X_train, y_train)))
print('(poly deg 2 + ridge) R-squared score (test): {:.3f}'
.format(linreg.score(X_test, y_test)))
# -
# ## Linear models for classification
# ### Logistic regression
# #### Logistic regression for binary classification on fruits dataset using height, width features (positive class: apple, negative class: others)
# +
from sklearn.linear_model import LogisticRegression
from adspy_shared_utilities import (
plot_class_regions_for_classifier_subplot)
fig, subaxes = plt.subplots(1, 1, figsize=(7, 5))
y_fruits_apple = y_fruits_2d == 1 # make into a binary problem: apples vs everything else
X_train, X_test, y_train, y_test = (
train_test_split(X_fruits_2d.as_matrix(),
y_fruits_apple.as_matrix(),
random_state = 0))
clf = LogisticRegression(C=100).fit(X_train, y_train)
plot_class_regions_for_classifier_subplot(clf, X_train, y_train, None,
None, 'Logistic regression \
for binary classification\nFruit dataset: Apple vs others',
subaxes)
h = 6
w = 8
print('A fruit with height {} and width {} is predicted to be: {}'
.format(h,w, ['not an apple', 'an apple'][clf.predict([[h,w]])[0]]))
h = 10
w = 7
print('A fruit with height {} and width {} is predicted to be: {}'
.format(h,w, ['not an apple', 'an apple'][clf.predict([[h,w]])[0]]))
subaxes.set_xlabel('height')
subaxes.set_ylabel('width')
print('Accuracy of Logistic regression classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Logistic regression classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
# -
# #### Logistic regression on simple synthetic dataset
# +
from sklearn.linear_model import LogisticRegression
from adspy_shared_utilities import (
plot_class_regions_for_classifier_subplot)
X_train, X_test, y_train, y_test = train_test_split(X_C2, y_C2,
random_state = 0)
fig, subaxes = plt.subplots(1, 1, figsize=(7, 5))
clf = LogisticRegression().fit(X_train, y_train)
title = 'Logistic regression, simple synthetic dataset C = {:.3f}'.format(1.0)
plot_class_regions_for_classifier_subplot(clf, X_train, y_train,
None, None, title, subaxes)
print('Accuracy of Logistic regression classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Logistic regression classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
# -
# #### Logistic regression regularization: C parameter
# +
X_train, X_test, y_train, y_test = (
train_test_split(X_fruits_2d.as_matrix(),
y_fruits_apple.as_matrix(),
random_state=0))
fig, subaxes = plt.subplots(3, 1, figsize=(4, 10))
for this_C, subplot in zip([0.1, 1, 100], subaxes):
clf = LogisticRegression(C=this_C).fit(X_train, y_train)
title ='Logistic regression (apple vs rest), C = {:.3f}'.format(this_C)
plot_class_regions_for_classifier_subplot(clf, X_train, y_train,
X_test, y_test, title,
subplot)
plt.tight_layout()
# -
# #### Application to real dataset
# +
from sklearn.linear_model import LogisticRegression
X_train, X_test, y_train, y_test = train_test_split(X_cancer, y_cancer, random_state = 0)
clf = LogisticRegression().fit(X_train, y_train)
print('Breast cancer dataset')
print('Accuracy of Logistic regression classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Logistic regression classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
# -
# ### Support Vector Machines
# #### Linear Support Vector Machine
# +
from sklearn.svm import SVC
from adspy_shared_utilities import plot_class_regions_for_classifier_subplot
X_train, X_test, y_train, y_test = train_test_split(X_C2, y_C2, random_state = 0)
fig, subaxes = plt.subplots(1, 1, figsize=(7, 5))
this_C = 1.0
clf = SVC(kernel = 'linear', C=this_C).fit(X_train, y_train)
title = 'Linear SVC, C = {:.3f}'.format(this_C)
plot_class_regions_for_classifier_subplot(clf, X_train, y_train, None, None, title, subaxes)
# -
# #### Linear Support Vector Machine: C parameter
# +
from sklearn.svm import LinearSVC
from adspy_shared_utilities import plot_class_regions_for_classifier
X_train, X_test, y_train, y_test = train_test_split(X_C2, y_C2, random_state = 0)
fig, subaxes = plt.subplots(1, 2, figsize=(8, 4))
for this_C, subplot in zip([0.00001, 100], subaxes):
clf = LinearSVC(C=this_C).fit(X_train, y_train)
title = 'Linear SVC, C = {:.5f}'.format(this_C)
plot_class_regions_for_classifier_subplot(clf, X_train, y_train,
None, None, title, subplot)
plt.tight_layout()
# -
# #### Application to real dataset
# +
from sklearn.svm import LinearSVC
X_train, X_test, y_train, y_test = train_test_split(X_cancer, y_cancer, random_state = 0)
clf = LinearSVC().fit(X_train, y_train)
print('Breast cancer dataset')
print('Accuracy of Linear SVC classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Linear SVC classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
# -
# ### Multi-class classification with linear models
# #### LinearSVC with M classes generates M one vs rest classifiers.
# +
from sklearn.svm import LinearSVC
X_train, X_test, y_train, y_test = train_test_split(X_fruits_2d, y_fruits_2d, random_state = 0)
clf = LinearSVC(C=5, random_state = 67).fit(X_train, y_train)
print('Coefficients:\n', clf.coef_)
print('Intercepts:\n', clf.intercept_)
# -
# #### Multi-class results on the fruit dataset
# +
plt.figure(figsize=(6,6))
colors = ['r', 'g', 'b', 'y']
cmap_fruits = ListedColormap(['#FF0000', '#00FF00', '#0000FF','#FFFF00'])
plt.scatter(X_fruits_2d[['height']], X_fruits_2d[['width']],
c=y_fruits_2d, cmap=cmap_fruits, edgecolor = 'black', alpha=.7)
x_0_range = np.linspace(-10, 15)
for w, b, color in zip(clf.coef_, clf.intercept_, ['r', 'g', 'b', 'y']):
# Since class prediction with a linear model uses the formula y = w_0 x_0 + w_1 x_1 + b,
# and the decision boundary is defined as being all points with y = 0, to plot x_1 as a
# function of x_0 we just solve w_0 x_0 + w_1 x_1 + b = 0 for x_1:
plt.plot(x_0_range, -(x_0_range * w[0] + b) / w[1], c=color, alpha=.8)
plt.legend(target_names_fruits)
plt.xlabel('height')
plt.ylabel('width')
plt.xlim(-2, 12)
plt.ylim(-2, 15)
plt.show()
# -
# ## Kernelized Support Vector Machines
# ### Classification
# +
from sklearn.svm import SVC
from adspy_shared_utilities import plot_class_regions_for_classifier
X_train, X_test, y_train, y_test = train_test_split(X_D2, y_D2, random_state = 0)
# The default SVC kernel is radial basis function (RBF)
plot_class_regions_for_classifier(SVC().fit(X_train, y_train),
X_train, y_train, None, None,
'Support Vector Classifier: RBF kernel')
# Compare decision boundries with polynomial kernel, degree = 3
plot_class_regions_for_classifier(SVC(kernel = 'poly', degree = 3)
.fit(X_train, y_train), X_train,
y_train, None, None,
'Support Vector Classifier: Polynomial kernel, degree = 3')
# -
# #### Support Vector Machine with RBF kernel: gamma parameter
# +
from adspy_shared_utilities import plot_class_regions_for_classifier
X_train, X_test, y_train, y_test = train_test_split(X_D2, y_D2, random_state = 0)
fig, subaxes = plt.subplots(3, 1, figsize=(4, 11))
for this_gamma, subplot in zip([0.01, 1.0, 10.0], subaxes):
clf = SVC(kernel = 'rbf', gamma=this_gamma).fit(X_train, y_train)
title = 'Support Vector Classifier: \nRBF kernel, gamma = {:.2f}'.format(this_gamma)
plot_class_regions_for_classifier_subplot(clf, X_train, y_train,
None, None, title, subplot)
plt.tight_layout()
# -
# #### Support Vector Machine with RBF kernel: using both C and gamma parameter
# +
from sklearn.svm import SVC
from adspy_shared_utilities import plot_class_regions_for_classifier_subplot
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_D2, y_D2, random_state = 0)
fig, subaxes = plt.subplots(3, 4, figsize=(15, 10), dpi=50)
for this_gamma, this_axis in zip([0.01, 1, 5], subaxes):
for this_C, subplot in zip([0.1, 1, 15, 250], this_axis):
title = 'gamma = {:.2f}, C = {:.2f}'.format(this_gamma, this_C)
clf = SVC(kernel = 'rbf', gamma = this_gamma,
C = this_C).fit(X_train, y_train)
plot_class_regions_for_classifier_subplot(clf, X_train, y_train,
X_test, y_test, title,
subplot)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
# -
# ### Application of SVMs to a real dataset: unnormalized data
# +
from sklearn.svm import SVC
X_train, X_test, y_train, y_test = train_test_split(X_cancer, y_cancer,
random_state = 0)
clf = SVC(C=10).fit(X_train, y_train)
print('Breast cancer dataset (unnormalized features)')
print('Accuracy of RBF-kernel SVC on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of RBF-kernel SVC on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
# -
# ### Application of SVMs to a real dataset: normalized data with feature preprocessing using minmax scaling
# +
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
clf = SVC(C=10).fit(X_train_scaled, y_train)
print('Breast cancer dataset (normalized with MinMax scaling)')
print('RBF-kernel SVC (with MinMax scaling) training set accuracy: {:.2f}'
.format(clf.score(X_train_scaled, y_train)))
print('RBF-kernel SVC (with MinMax scaling) test set accuracy: {:.2f}'
.format(clf.score(X_test_scaled, y_test)))
# -
# ## Cross-validation
# ### Example based on k-NN classifier with fruit dataset (2 features)
# +
from sklearn.model_selection import cross_val_score
clf = KNeighborsClassifier(n_neighbors = 5)
X = X_fruits_2d.as_matrix()
y = y_fruits_2d.as_matrix()
cv_scores = cross_val_score(clf, X, y)
print('Cross-validation scores (3-fold):', cv_scores)
print('Mean cross-validation score (3-fold): {:.3f}'
.format(np.mean(cv_scores)))
# -
# ### A note on performing cross-validation for more advanced scenarios.
#
# In some cases (e.g. when feature values have very different ranges), we've seen the need to scale or normalize the training and test sets before use with a classifier. The proper way to do cross-validation when you need to scale the data is *not* to scale the entire dataset with a single transform, since this will indirectly leak information into the training data about the whole dataset, including the test data (see the lecture on data leakage later in the course). Instead, scaling/normalizing must be computed and applied for each cross-validation fold separately. To do this, the easiest way in scikit-learn is to use *pipelines*. While these are beyond the scope of this course, further information is available in the scikit-learn documentation here:
#
# http://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html
#
# or the Pipeline section in the recommended textbook: Introduction to Machine Learning with Python by <NAME> and <NAME> (O'Reilly Media).
# ## Validation curve example
# +
from sklearn.svm import SVC
from sklearn.model_selection import validation_curve
param_range = np.logspace(-3, 3, 4)
train_scores, test_scores = validation_curve(SVC(), X, y,
param_name='gamma',
param_range=param_range, cv=3)
# -
print(train_scores)
print(test_scores)
# +
# This code based on scikit-learn validation_plot example
# See: http://scikit-learn.org/stable/auto_examples/model_selection/plot_validation_curve.html
plt.figure()
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title('Validation Curve with SVM')
plt.xlabel('$\gamma$ (gamma)')
plt.ylabel('Score')
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label='Training score',
color='darkorange', lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color='darkorange', lw=lw)
plt.semilogx(param_range, test_scores_mean, label='Cross-validation score',
color='navy', lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color='navy', lw=lw)
plt.legend(loc='best')
plt.show()
# -
# ## Decision Trees
# +
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from adspy_shared_utilities import plot_decision_tree
from sklearn.model_selection import train_test_split
iris = load_iris()
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state = 3)
clf = DecisionTreeClassifier().fit(X_train, y_train)
print('Accuracy of Decision Tree classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of Decision Tree classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
# -
# #### Setting max decision tree depth to help avoid overfitting
# +
clf2 = DecisionTreeClassifier(max_depth = 3).fit(X_train, y_train)
print('Accuracy of Decision Tree classifier on training set: {:.2f}'
.format(clf2.score(X_train, y_train)))
print('Accuracy of Decision Tree classifier on test set: {:.2f}'
.format(clf2.score(X_test, y_test)))
# -
# #### Visualizing decision trees
plot_decision_tree(clf, iris.feature_names, iris.target_names)
# #### Pre-pruned version (max_depth = 3)
plot_decision_tree(clf2, iris.feature_names, iris.target_names)
# #### Feature importance
# +
from adspy_shared_utilities import plot_feature_importances
plt.figure(figsize=(10,4), dpi=80)
plot_feature_importances(clf, iris.feature_names)
plt.show()
print('Feature importances: {}'.format(clf.feature_importances_))
# +
from sklearn.tree import DecisionTreeClassifier
from adspy_shared_utilities import plot_class_regions_for_classifier_subplot
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, random_state = 0)
fig, subaxes = plt.subplots(6, 1, figsize=(6, 32))
pair_list = [[0,1], [0,2], [0,3], [1,2], [1,3], [2,3]]
tree_max_depth = 4
for pair, axis in zip(pair_list, subaxes):
X = X_train[:, pair]
y = y_train
clf = DecisionTreeClassifier(max_depth=tree_max_depth).fit(X, y)
title = 'Decision Tree, max_depth = {:d}'.format(tree_max_depth)
plot_class_regions_for_classifier_subplot(clf, X, y, None,
None, title, axis,
iris.target_names)
axis.set_xlabel(iris.feature_names[pair[0]])
axis.set_ylabel(iris.feature_names[pair[1]])
plt.tight_layout()
plt.show()
# -
# #### Decision Trees on a real-world dataset
# +
from sklearn.tree import DecisionTreeClassifier
from adspy_shared_utilities import plot_decision_tree
from adspy_shared_utilities import plot_feature_importances
X_train, X_test, y_train, y_test = train_test_split(X_cancer, y_cancer, random_state = 0)
clf = DecisionTreeClassifier(max_depth = 4, min_samples_leaf = 8,
random_state = 0).fit(X_train, y_train)
plot_decision_tree(clf, cancer.feature_names, cancer.target_names)
# +
print('Breast cancer dataset: decision tree')
print('Accuracy of DT classifier on training set: {:.2f}'
.format(clf.score(X_train, y_train)))
print('Accuracy of DT classifier on test set: {:.2f}'
.format(clf.score(X_test, y_test)))
plt.figure(figsize=(10,6),dpi=80)
plot_feature_importances(clf, cancer.feature_names)
plt.tight_layout()
plt.show()
| Applied_ML_with_Python/Module_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import csv
import logging
import requests
from bs4 import BeautifulSoup
import time
import re
import json
logging.basicConfig(format='%(message)s')
def normalize_query(q):
q = q.replace('\n', '')
q = q.replace('(', '(')
q = q.replace(')', ')')
q = re.sub(r"\(.+\)$", "", q)
q = re.sub('(!|\u3000|/|\\s|>|<|\\.)+', " ", q)
return q
def search(q):
url_search = 'https://eiga.com/search/{}' .format(requests.utils.quote(normalize_query(q), safe=''))
res_search = requests.get(url_search )
res_search.encoding = res_search.apparent_encoding
soup_search = BeautifulSoup(res_search.content, "lxml")
result = soup_search.find('section', attrs={"id": "rslt-movie"})
if result != None:
path = result.find('li', attrs={"class": "col-s-3"}).find('a')["href"]
return path
else:
return None
def scrape(query, movie_id, year):
data = {
"id": -1,
"rating": -1,
"check-in":-1,
"review-count":-1
}
print("START : " + query)
path = search(query)
if path is None:
logging.warning("**************************************************")
logging.warning(query + " HAS NO RESULT")
logging.warning("**************************************************")
with open('./eigacom_id_table_{}.txt'.format(year), 'a') as f:
print(str(movie_id), "-1")
f.write(str(movie_id) + '\t' + '-1' + '\n')
return None
url_review = 'https://eiga.com' + path
eigacom_id = re.sub("\\D", "", path)
with open('./eigacom_id_table_{}.txt'.format(year), 'a') as f:
print(str(movie_id), eigacom_id)
f.write(str(movie_id) + '\t' + eigacom_id + '\n')
res = requests.get(url_review)
res.encoding = res.apparent_encoding
soup = BeautifulSoup(res.content, "lxml")
rating = soup.find('span', attrs={"class": "rating-star"})
review_count = soup.find('span', attrs={"itemprop": "reviewCount"})
check_in = soup.find('a', attrs={"class": "icon-movie-checkin"}).find('strong')
data["rating"] = 0 if rating.text == '-' else float(rating.text)
data["review-count"] = 0 if review_count is None else int(review_count.text)
data["check-in"] = 0 if check_in is None else int(check_in.text)
return data
def main():
# years = ['2017', '2016', '2015', '2014', '2013']
years = ['2014', '2013']
for y in years:
print(y)
with open( '../{}_movie_clean'.format(y), 'r') as movie_clean:
for line in csv.reader(movie_clean, delimiter='\t'):
movie_id, title, *_ = line
if y == "2014" and int(movie_id) <= 540: #あとで消す
continue
output_file = './{0}/{1}.json'.format(y, movie_id)
with open(output_file, 'w') as f:
print(movie_id)
data = scrape(title,movie_id, y)
if data == None:
continue
data["id"] = int(movie_id)
json.dump(data, f, ensure_ascii=False, indent=2)
time.sleep(1)
if __name__ == '__main__':
main()
| eigacom_star_rating/eigacom_star_rating_scraping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Import required libraries
import os
import argparse
import networkx as nx
import math
import numpy as np
import helper
import astar
import tensorflow.compat.v1 as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.gridspec as gridspec
from mpl_toolkits.mplot3d import Axes3D
import csv
from random import randint, random, seed, uniform
import time
# (restrict tensorflow memory growth)
tf.disable_v2_behavior()
os.environ["CUDA_VISIBLE_DEVICES"]="1"
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
# neural network parameters
mb_size = 256
h_Q_dim = 512
h_P_dim = 512
c = 0
# learning rate
lr = 1e-4
# problem dimensions
dim = 2
dataElements = dim*3 + 2500 # sample (2D), init(2D), goal(2D), occup_grid(100)
z_dim = 4 # latent
X_dim = dim # samples
y_dim = dim # reconstruction of the original point
c_dim = dataElements - dim # dimension of conditioning variable
# +
# define networks
print("X_dim = ",X_dim)
print("c_dim = ",c_dim)
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=[None, X_dim])
c = tf.placeholder(tf.float32, shape=[None, c_dim])
# Q
inputs_Q = tf.concat(axis=1, values=[X,c])
dense_Q1 = tf.layers.dense(inputs=inputs_Q, units=h_Q_dim, activation=tf.nn.relu)
dropout_Q1 = tf.layers.dropout(inputs=dense_Q1, rate=0.5)
dense_Q2 = tf.layers.dense(inputs=dropout_Q1, units=h_Q_dim, activation=tf.nn.relu)
z_mu = tf.layers.dense(inputs=dense_Q2, units=z_dim) # output here is z_mu
z_logvar = tf.layers.dense(inputs=dense_Q2, units=z_dim) # output here is z_logvar
# P
eps = tf.random_normal(shape=tf.shape(z_mu))
z = z_mu + tf.exp(z_logvar / 2) * eps
inputs_P = tf.concat(axis=1, values=[z,c])
dense_P1 = tf.layers.dense(inputs=inputs_P, units=h_P_dim, activation=tf.nn.relu)
dropout_P1 = tf.layers.dropout(inputs=dense_P1, rate=0.5)
dense_P2 = tf.layers.dense(inputs=dropout_P1, units=h_P_dim, activation=tf.nn.relu)
y = tf.layers.dense(inputs=dense_P2, units=X_dim) # fix to also output y
w = [[1, 1]];
recon_loss = tf.losses.mean_squared_error(labels=X, predictions=y, weights=w)
kl_loss = 10**-4 * 2 * tf.reduce_sum(tf.exp(z_logvar) + z_mu**2 - 1. - z_logvar, 1)
cvae_loss = tf.reduce_mean(kl_loss + recon_loss)
train_step = tf.train.AdamOptimizer(lr).minimize(cvae_loss)
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
it = 0;
# -
saver = tf.train.Saver()
path_ = os.getcwd() + "/../Models/checkpoints_NS_50x50/model.ckpt"
print("path = ",path_)
try:
saver.restore(sess, path_)
print("Model Restored!!")
except Exception as e:
print("Could not restore checkpoint!")
print(e)
class LEGO:
def __init__(self, start, goal, occ_grid,
path_resolution=0.005,
max_samples=1000,
graph_file="../Sparse_Graph/sparse_graph.graphml"):
self.start = np.array([start[0], start[1]])
self.goal = np.array([goal[0], goal[1]])
self.occ_grid = occ_grid
self.path_resolution = path_resolution
self.max_samples = max_samples
self.uniformG = nx.read_graphml(graph_file)
self.dim = self.occ_grid.shape[0]
self.G = self.uniformG.copy()
self.node_id = 0
self.nodes_to_index = dict()
def planning(self):
num_samples = 0
while(num_samples < self.max_samples):
num_viz = 100
num_samples += num_viz
c_sample_seed = np.concatenate((self.start, self.goal, self.occ_grid.reshape(2500)))
c_sample = np.repeat([c_sample_seed],num_viz,axis=0)
b_nodes, z_viz = sess.run([y, z], feed_dict={z: np.random.randn(num_viz, z_dim), c: c_sample})
new_nodes_ind = self.uniformG.number_of_nodes()
if num_samples == num_viz:
self.uniformG.add_node(str(self.uniformG.number_of_nodes()),state = helper.numpy_to_state(self.start))
self.uniformG.add_node(str(self.uniformG.number_of_nodes()),state = helper.numpy_to_state(self.goal))
for node in b_nodes:
if helper.is_point_free(node, self.occ_grid):
self.uniformG.add_node(str(self.uniformG.number_of_nodes()),state = helper.numpy_to_state(node))
for i in range(new_nodes_ind, self.uniformG.number_of_nodes()):
i_str = str(i)
for j in self.uniformG.nodes():
if i_str!=j:
s = helper.state_to_numpy(self.uniformG.nodes[i_str]['state'])
t = helper.state_to_numpy(self.uniformG.nodes[j]['state'])
if self.calc_dist(s,t) < 0.08:
self.uniformG.add_edge(i_str, j)
self.uniformG[i_str][j]['weight'] = self.calc_dist(s, t)
path, dis = astar.astar1(self.uniformG, str(200), str(201), self.occ_grid, inc = 0, h_weight=1)
end_time = time.time()
if dis != None:
return path, dis, end_time
return None, None, end_time
def draw_graph(self, graph, rnd=None):
plt.clf()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
occ_grid_file_addr = "../dataset/occ_grid.txt"
fig1 = plt.figure(figsize=(10,6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
for i in range(self.dim):
for j in range(self.dim):
if(self.occ_grid[i,j]==0):
ax1.add_patch(patches.Rectangle(
(i/self.dim,j/self.dim), # (x,y)
1.0/self.dim, # width
1.0/self.dim, # height
alpha=6,
color = "#676767"
))
if rnd is not None:
plt.scatter(rnd[0], rnd[1], color = "black", s = 30)
for index in graph:
s = helper.state_to_numpy(graph.nodes[index]['state'])
plt.scatter(s[0], s[1], color = "green", s = 30)
for (u,v,d) in graph.edges(data='weight'):
u_state = helper.state_to_numpy(graph.nodes[u]['state'])
v_state = helper.state_to_numpy(graph.nodes[v]['state'])
plt.plot([u_state[0],v_state[0]],[u_state[1],v_state[1]],"-y")
for sources in self.source_nodes:
plt.scatter(sources[0], sources[1], color = "magenta", s = 100, edgecolors="black")
plt.scatter(self.start[0], self.start[1], color = "red", s = 100, edgecolors="black")
plt.scatter(self.goal[0], self.goal[1], color = "blue", s = 100, edgecolors="black")
plt.xlim(0,1)
plt.ylim(0,1)
plt.xticks([])
plt.yticks([])
plt.show()
plt.pause(0.01)
def draw_nodes(self, graph, nearest_node,rnd_node):
plt.clf()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
occ_grid_file_addr = "../dataset/occ_grid.txt"
fig1 = plt.figure(figsize=(10,6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
for i in range(50):
for j in range(50):
if(self.occ_grid[i,j]==0):
ax1.add_patch(patches.Rectangle(
(i/50.0,j/50.0), # (x,y)
0.02, # width
0.02, # height
alpha=0.6,
color = "blue"
))
plt.scatter(rnd_node[0], rnd_node[1], color = "black", s = 100)
plt.scatter(nearest_node[0], nearest_node[1], color = "red", s = 100)
for index in graph:
s = helper.state_to_numpy(graph.nodes[index]['state'])
plt.scatter(s[0], s[1], color = "green", s = 30)
for (u,v,d) in graph.edges(data='weight'):
u_state = helper.state_to_numpy(graph.nodes[u]['state'])
v_state = helper.state_to_numpy(graph.nodes[v]['state'])
plt.plot([u_state[0],v_state[0]],[u_state[1],v_state[1]],"-y")
for sources in self.source_nodes:
plt.scatter(sources[0], sources[1], color = "magenta", s = 100, edgecolors="black",alpha = 0.5)
plt.scatter(self.start[0], self.start[1], color = "blue", s = 50, edgecolors="black")
plt.scatter(self.goal[0], self.goal[1], color = "blue", s = 50, edgecolors="black")
plt.xlim(0,1)
plt.ylim(0,1)
plt.xticks([])
plt.yticks([])
plt.show()
plt.pause(0.01)
@staticmethod
def get_nearest_node_index(node_list, rnd_node):
# print(node_list)
dlist = [(node[0] - rnd_node[0]) ** 2 + (node[1] - rnd_node[1])
** 2 for node in node_list]
minind = dlist.index(min(dlist))
return minind
@staticmethod
def calc_distance_and_angle(from_node, to_node):
dx = to_node[0] - from_node[0]
dy = to_node[1] - from_node[1]
d = math.hypot(dx, dy)
theta = math.atan2(dy, dx)
return d, theta
@staticmethod
def calc_dist(from_node, to_node):
return np.linalg.norm(to_node-from_node)
def draw_lego_samples(self, b_nodes):
plt.clf()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
occ_grid_file_addr = "dataset/occ_grid.txt"
fig1 = plt.figure(figsize=(10,6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
for i in range(self.dim):
for j in range(self.dim):
if(self.occ_grid[i,j]==0):
ax1.add_patch(patches.Rectangle(
(i/self.dim,j/self.dim), # (x,y)
1/self.dim, # width
1/self.dim, # height
alpha=6,
color = "#676767"
))
plt.scatter(self.start[0], self.start[1], color = "red", s = 100, edgecolors="black")
plt.scatter(self.goal[0], self.goal[1], color = "blue", s = 100, edgecolors="black")
plt.scatter(b_nodes[:,0],b_nodes[:,1], color="green", s=20)
plt.xlim(0,1)
plt.ylim(0,1)
plt.xticks([])
plt.yticks([])
plt.show()
plt.pause(0.01)
def draw_path(self, path):
plt.clf()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
occ_grid_file_addr = "dataset/occ_grid.txt"
fig1 = plt.figure(figsize=(10,6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
for i in range(self.dim):
for j in range(self.dim):
if(self.occ_grid[i,j]==0):
ax1.add_patch(patches.Rectangle(
(i/self.dim,j/self.dim), # (x,y)
1/self.dim, # width
1/self.dim, # height
alpha=6,
color = "#676767",
zorder=3
))
for index in range(self.uniformG.number_of_nodes()):
s = helper.state_to_numpy(self.uniformG.nodes[str(index)]['state'])
plt.scatter(s[0], s[1], color = "indigo", s = 50,zorder=2)
for index in range(self.G.number_of_nodes()):
s = helper.state_to_numpy(self.G.nodes[str(index)]['state'])
plt.scatter(s[0], s[1], color = "forestgreen", s = 30,zorder=2)
for index in range(len(path)-1):
u_state = helper.state_to_numpy(self.uniformG.nodes[path[index]]['state'])
v_state = helper.state_to_numpy(self.uniformG.nodes[path[index+1]]['state'])
plt.plot([u_state[0],v_state[0]],[u_state[1],v_state[1]],"-r",zorder=1)
plt.scatter(self.start[0], self.start[1], color = "red", s = 100, edgecolors="black",zorder=2)
plt.scatter(self.goal[0], self.goal[1], color = "blue", s = 100, edgecolors="black",zorder=2)
plt.xlim(0,1)
plt.ylim(0,1)
plt.xticks([])
plt.yticks([])
plt.show()
plt.pause(0.01)
#Load Examples
start_pos = np.loadtxt("../Examples/start_pos.txt")
goal_pos = np.loadtxt("../Examples/goal_pos.txt")
occ_grids = np.loadtxt("../Examples/occ_grid.txt")
def decrease_resolution(occ_grid):
new_occ_grid = np.empty([10, 10],dtype=int)
dim = occ_grid.shape[0]
scale = int(dim/10)
# print(scale)
i = 0
j = 0
while i < dim:
j=0
while j< dim:
flag=0
for m in range(scale):
for n in range(scale):
if occ_grid[i+m][j+n] == 1: #is free
flag = 1
if flag == 1:
new_occ_grid[int(i/scale)][int(j/scale)]=1
else:
new_occ_grid[int(i/scale)][int(j/scale)]=0
# print("index:",int(i/5),int(j/5)," Val:",new_occ_grid[int(i/5)][int(j/5)])
j+=scale
i+=scale
return new_occ_grid
#choose example from test_example
i = randint(0,10)
print(i)
occ_g = occ_grids[i]
occ_g_50 = occ_g.reshape(50,50)
new_occ_g = decrease_resolution(occ_g_50)
start = start_pos[i]
goal = goal_pos[i]
# +
#Show the 10x10 figure formed after application of kernel
fig1 = plt.figure(figsize=(10,6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
for i in range(10):
for j in range(10):
if(new_occ_g[i,j]==0):
ax1.add_patch(patches.Rectangle(
(i/10.0, j/10.0), # (x,y)
0.1, # width
0.1, # height
alpha=6,
color = "#676767"
))
plt.scatter(start[0], start[1], color = "red", s = 100, edgecolors="black")
plt.scatter(goal[0], goal[1], color = "blue", s = 100, edgecolors="black")
plt.xlim(0,1)
plt.ylim(0,1)
plt.show()
# +
#Show the 50x50 figure
fig1 = plt.figure(figsize=(10,6), dpi=80)
ax1 = fig1.add_subplot(111, aspect='equal')
for i in range(50):
for j in range(50):
if(occ_g_50[i,j]==0):
ax1.add_patch(patches.Rectangle(
(i/50.0, j/50.0), # (x,y)
1/50.0, # width
1/50.0, # height
alpha=6,
color = "#676767"
))
plt.scatter(start[0], start[1], color = "red", s = 100, edgecolors="black")
plt.scatter(goal[0], goal[1], color = "blue", s = 100, edgecolors="black")
plt.xlim(0,1)
plt.ylim(0,1)
plt.show()
# -
#Run LEGO-CVAE
planner = LEGO(start, goal, occ_g_50, max_samples = 1000)
start_time = time.time()
path, dis, end_time = planner.planning()
if dis is None:
print("Cannot find path")
planner.draw_path([])
else:
print("found path!!")
planner.draw_path(path)
| LEGO/LEGO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from agent import *
from environment import *
from controller import Controller
# +
agent = ClassifierDataCollectionAgent()
environment = MTurkSurveyUser(filePaths=[
'survey/ver2_mturk/results/01_1st_Batch_3137574_batch_results.csv',
'survey/ver2_mturk/results/02_Batch_3148398_batch_results.csv',
'survey/ver2_mturk/results/03_Batch_3149214_batch_results.csv',
], filterFunc=(lambda r: ord(r['rawWorkerID'][-1]) % 3 == 1))
simulationWeek = 10
controller = Controller(agent, environment, simulationWeek=simulationWeek, verbose=False)
controller.execute()
agent.saveModel("agent/pretrained_models/classifiers/mturk_3000_m3_r1.txt")
# -
| HumanModeling/main_get_classifier_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture 2: Story Proofs, Axioms of Probability
#
#
# ## Stat 110, Prof. <NAME>, Harvard University
#
# ----
# ## Sampling, continued
#
# Choose $k$ objects out of $n$
#
# | | ordered | unordered |
# |-----------|:---------:|:-----------:|
# | __w/ replacement__ | $n^k$ | ??? |
# | __w/o replacement__ | $n(n-1)(n-2) \ldots (n-k+1)$ | $\binom{n}{k}$ |
#
#
# * __ordered, w/ replacement__: there are $n$ choices for each $k$, so this follows from the multiplication rule.
# * __ordered, w/out replacement__: there are $n$ choices for the 1<sup>st</sup> position; $n-1$ for the 2<sup>nd</sup>; $n-2$ for the 3<sup>rd</sup>; and $n-k+1$ for the $k$<sup>th</sup>.
# * __unordered, w/ replacement__: _we will get to this shortly..._
# * __unordered, w/out replacement__: the binomial coefficient; think of choosing a hand from a deck of cards.
#
# To complete our discussion of sampling, recall that of the four ways of sampling as shown above, all except the case of __unordered, with replacement__ follow immediately from the multiplication rule.
#
# Now the solution is $\binom{n+k-1}{k}$, but let's see if we can prove this.
# ### A simple proof
#
# We start off with some simple edge cases.
#
# If we let $k=0$, then we are not choosing anything, and so there is only one solution to this case: the empty set.
# \begin\{align\}
# \text{let }k = 0 \Rightarrow \binom{n+0-1}{0} &= \binom{n-1}{0} \\\\
# &= 1
# \end\{align\}
#
# If we let $k=1$, then there are $n$ ways we could select a single item out of a total of $n$.
# \begin\{align\}
# \text{let }k = 1 \Rightarrow \binom{n+1-1}{1} &= \binom{n}{1} \\\\
# &= n
# \end\{align\}
#
# Now let's consider a simple but non-trivial case. If we let $n=2$, then
# \begin\{align\}
# \text{let }n = 2 \Rightarrow \binom{2+k-1}{k} &= \binom{k+1}{k} \\\\
# &= \binom{k+1}{1} \\\\
# &= k+1
# \end\{align\}
#
# Here's an example of $n=2, k=7$:
#
# ![title](images/L0201.png)
#
# But notice that we are really doing here is placing $n-1$ dividers between $k$ elements. Or in other words, we are choosing $k$ slots for the elements out of $n+k-1$ slots in total.
#
# ![title](images/L0202.png)
#
# And we can easily build on this understanding to other values of $n$ and $k$.
#
# ![title](images/L0203.png)
#
# And the number of ways to select $k$ items out of $n$, unordered and with replacement, is:
#
# \begin\{align\}
# \text{choose k out n items, unordered, with replacement} &= \binom{n+k-1}{k} \\\\
# &= \binom{n+k-1}{n-1}
# \end\{align\}
# ## Story Proof
# A story proof is a proof by _interpretation_. No algebra needed, just intuition.
#
# Here are some examples that we have already come across.
#
# ### Ex. 1
# $$ \binom{n}{k} = \binom{n}{n-k} $$
#
# Choosing $k$ elements out of $n$ is the same as choosing $n-k$ elements out of $n$. We've just seen this above!
#
# ### Ex. 2
# $$ n \binom{n-1}{k-1} = k \binom{n}{k} $$
#
# Imagine picking $k$ people out of $n$, and then designating of the $k$ as president. You can either select all $k$ people, and then choose 1 from among those $k$. Or, you can select a president, and then choose the remaining $k-1$ out of the $n-1$ people.
#
# ### Ex. 3
# $$ \binom{m+n}{k} = \sum_{j=0}^{k} \binom{m}{j} \binom{n}{k-j} $$
#
# Suppose you had $m$ boys and $n$ girls, and you needed to select $k$ children out of them all. You could do this by first choosing $j$ out of the $m$ boys, and then choosing $k-j$ of the girls. You would have to apply the multiplication rule to get the total number of combinations, and then sum them all up. This is known as [Vandermonde's identity](https://en.wikipedia.org/wiki/Vandermonde%27s_identity).
#
# ----
# ## Non-naïve Definition of Probability
#
# Now we move from the naïve definition of probability into the more abstract and general.
#
# #### Definition: non-naïve definition of probability
# > Let $S$ be a sample space, the set of all possible outcomes of some experiment. $S$ might not be _finite_ anymore, and all outcomes might not be _equally probable_, either.
# >
# > Let $A$ be an event in, or a subset of, $S$.
# >
# > Let $P$ be a function that maps an event $A$ to some value from $0$ to $1$.
#
# And we have the following axioms:
#
# ### Axiom 1
#
# > \begin\{align\}
# > P(\emptyset) = 0 \\\\
# > P(\Omega) = 1
# > \end\{align\}
#
# The probability of the empty set, or a null event, is by definition $0$.
#
# The probability of the entire space is by definition $1$.
#
# These are the 2 extremes, and this is why <NAME> lumps them together in one rule.
#
# ### Axiom 2
#
# > $$ P(\bigcup_{n=1}^{\infty} A_{n}) = \sum_{n=1}^{\infty} P(A_{n}) \iff A_1, A_2, ... A_n \text{ are disjoint (non-overlapping)} $$
#
# Every theorem about probability follows from these 2 rules. You might want to have a look at [Kolmogorov's axioms](http://mathworld.wolfram.com/KolmogorovsAxioms.html).
#
# ----
# View [Lecture 2: Story Proofs, Axioms of Probability | Statistics 110](http://bit.ly/2nOw0JV) on YouTube.
| Lecture_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
import pickle
def compute_crr(loc, date):
CRRCoefficients = [9.0e+08,
-8.28687176e-02,
1.90003955e-01,
-4.6e+01,
3.99996036e+00,
-2.3e+02,
3.99997842e+00,
2.99966504e+00]
ir = np.load(f"/data/GPM_HIM8/exp_paper/X_B14_{loc}_{date}.npy")[:,::4,::4]
wv = np.load(f"/data/GPM_HIM8/exp_paper/X_B8_{loc}_{date}.npy")[:,::4,::4]
dif = ir-wv
H_IR = CRRCoefficients[0] * np.exp(ir * CRRCoefficients[1])
C_IR = CRRCoefficients[2] * ir + CRRCoefficients[3]
W_IR = CRRCoefficients[4] * \
np.exp( -0.5 * ((ir + CRRCoefficients[5]) / CRRCoefficients[6]) ** 2.0) + \
CRRCoefficients[7]
return H_IR * np.exp(-0.5 * ((((dif) - C_IR) / W_IR) ** 2.0))
def gpm_prec(loc, date):
return np.clip(np.load(f"/data/GPM_HIM8/exp_paper/Y_{loc}_{date}.npy"),0,40)
def cnn_prec(loc, date, b1, b2, seed):
i = ["201811","201812","201901","201902"].index(date)
return np.clip(np.load(f"/data/GPM_HIM8/models/yhat_{loc}_v{i}_b{b1}_{b2}_s{seed}.npy"),0,40)
def rf_prec(loc, date):
rf = pickle.load(open(f'rf_{loc}_{date}.pkl', 'rb'))
b11 = np.load(f"/data/GPM_HIM8/exp_paper/X_B11_{loc}_{date}.npy")[:,::4,::4]
b16 = np.load(f"/data/GPM_HIM8/exp_paper/X_B16_{loc}_{date}.npy")[:,::4,::4]
x = np.stack((b11.flatten(),b16.flatten()),axis=1)
return rf.predict(x).reshape((-1,128,128))
# +
def mse_cmp(loc, date):
gpm = gpm_prec(loc, date)
crr = compute_crr(loc, date)
rf = rf_prec(loc, date)
cnn = cnn_prec(loc, date, 11, 16, 1)[:,:,:,0]
print(gpm.shape, crr.shape, rf.shape, cnn.shape)
plt.clf()
plt.title("MSE Comparison")
plt.bar(['CRR','RF','CNN'], [np.mean(np.square(gpm-crr)), np.mean(np.square(gpm-rf)), np.mean(np.square(cnn-crr))])
plt.savefig(f'MSE_{loc}_{date}.png')
for loc in ["SYD","NT","WA"]:
for date in ["201811","201812","201901","201902"]:
print(loc, date)
mse_cmp(loc, date)
# +
# %matplotlib inline
from sklearn.metrics import f1_score
import pandas as pd
import numpy as np
import pickle
def compute_crr(loc, date):
CRRCoefficients = [9.0e+08,
-8.28687176e-02,
1.90003955e-01,
-4.6e+01,
3.99996036e+00,
-2.3e+02,
3.99997842e+00,
2.99966504e+00]
ir = np.load(f"/data/GPM_HIM8/exp_paper/X_B14_{loc}_{date}.npy")[:,::4,::4]
wv = np.load(f"/data/GPM_HIM8/exp_paper/X_B8_{loc}_{date}.npy")[:,::4,::4]
dif = ir-wv
H_IR = CRRCoefficients[0] * np.exp(ir * CRRCoefficients[1])
C_IR = CRRCoefficients[2] * ir + CRRCoefficients[3]
W_IR = CRRCoefficients[4] * \
np.exp( -0.5 * ((ir + CRRCoefficients[5]) / CRRCoefficients[6]) ** 2.0) + \
CRRCoefficients[7]
return H_IR * np.exp(-0.5 * ((((dif) - C_IR) / W_IR) ** 2.0))
def gpm_prec(loc, date):
return np.clip(np.load(f"/data/GPM_HIM8/exp_paper/Y_{loc}_{date}.npy"),0,40)
def cnn_prec(loc, date, b1, b2, seed):
i = ["201811","201812","201901","201902"].index(date)
return np.clip(np.load(f"/data/GPM_HIM8/models/yhat_{loc}_v{i}_b{b1}_{b2}_s{seed}.npy"),0,40)
def rf_prec(loc, date):
rf = pickle.load(open(f'rf_{loc}_{date}.pkl', 'rb'))
b11 = np.load(f"/data/GPM_HIM8/exp_paper/X_B11_{loc}_{date}.npy")[:,::4,::4]
b16 = np.load(f"/data/GPM_HIM8/exp_paper/X_B16_{loc}_{date}.npy")[:,::4,::4]
x = np.stack((b11.flatten(),b16.flatten()),axis=1)
return rf.predict(x).reshape((-1,128,128))
data = []
for loc in ["SYD","NT","WA"]:
for date in ["201811","201812","201901","201902"]:
print(loc, date)
gpm = gpm_prec(loc, date)
crr = compute_crr(loc, date)
rf = rf_prec(loc, date)
cnn = cnn_prec(loc, date, 11, 16, 1)[:,:,:,0]
for t in [0.2,1.0,5.0]:
data.append({'Loc': loc,
'score': f1_score(gpm.flatten()>t,crr.flatten()>t),
'model': "CRR",
'threshold [mm/h]': t})
data.append({'Loc': loc,
'score': f1_score(gpm.flatten()>t,rf.flatten()>t),
'model': "RF",
'threshold [mm/h]': t})
data.append({'Loc': loc,
'score': f1_score(gpm.flatten()>t,cnn.flatten()>t),
'model': "CNN",
'threshold [mm/h]': t})
df = pd.DataFrame(data)
df
# +
import seaborn as sns
sns.set(style="whitegrid")
plt.clf()
plt.figure(figsize=(16,8))
ax = sns.barplot(data=df[df['Loc']=="SYD"], x='model', y='score', hue="threshold [mm/h]")
ax.set_title(f"Comparison F1 scores at different thresholds")
plt.plot()
# +
from sklearn.metrics import precision_score
def prec_cmp(loc, date, threshold):
gpm = gpm_prec(loc, date)
crr = compute_crr(loc, date)
rf = rf_prec(loc, date)
cnn = cnn_prec(loc, date, 11, 16, 1)[:,:,:,0]
print(gpm.shape, crr.shape, rf.shape, cnn.shape)
plt.clf()
plt.title("Precision Comparison")
plt.bar(['CRR','RF','CNN'], [precision_score(gpm.flatten()>threshold,crr.flatten()>threshold), precision_score(gpm.flatten()>threshold,rf.flatten()>threshold), precision_score(gpm.flatten()>threshold,cnn.flatten()>threshold)])
plt.savefig(f'Precision_{loc}_{date}.png')
for loc in ["SYD","NT","WA"]:
for date in ["201811","201812","201901","201902"]:
print(loc, date)
prec_cmp(loc, date, 1)
# +
from sklearn.metrics import recall_score
def rec_cmp(loc, date, threshold):
gpm = gpm_prec(loc, date)
crr = compute_crr(loc, date)
rf = rf_prec(loc, date)
cnn = cnn_prec(loc, date, 11, 16, 1)[:,:,:,0]
print(gpm.shape, crr.shape, rf.shape, cnn.shape)
plt.clf()
plt.title("Recall Comparison")
plt.bar(['CRR','RF','CNN'], [recall_score(gpm.flatten()>threshold,crr.flatten()>threshold), recall_score(gpm.flatten()>threshold,rf.flatten()>threshold), recall_score(gpm.flatten()>threshold,cnn.flatten()>threshold)])
plt.savefig(f'Recall_{loc}_{date}.png')
for loc in ["SYD","NT","WA"]:
for date in ["201811","201812","201901","201902"]:
print(loc, date)
rec_cmp(loc, date, 0.2)
# -
np.sum(pr>0.1)/pr.size
# +
#Histogram Prec
_ = plt.hist(prp, bins=100)
# +
import numpy as np
from matplotlib import pyplot as plt
from ipywidgets import interactive
def f(t):
plt.figure(figsize=(13,6))
_ = plt.hist(ir[prp>t], bins=100)
_ = plt.hist(dif[prp>t], bins=100)
interactive(f, t=(0., 10), continuous_update=False)
# -
plt.scatter(ir[prp>0.2],prp[prp>0.2])
plt.scatter(dif[prp>0.2],prp[prp>0.2])
plt.figure(figsize=(13,6))
plt.scatter(wv[prp>0.1],ir[prp>0.1])
plt.scatter(wv[prp>1],ir[prp>1])
plt.scatter(wv[prp>5],ir[prp>5])
plt.scatter(wv[prp>10],ir[prp>10])
plt.scatter(wv[prp>20],ir[prp>20])
plt.figure(figsize=(13,6))
plt.scatter(dif[prp>0.1],ir[prp>0.1])
plt.scatter(dif[prp>1],ir[prp>1])
plt.scatter(dif[prp>5],ir[prp>5])
plt.scatter(dif[prp>10],ir[prp>10])
plt.scatter(dif[prp>20],ir[prp>20])
# +
def f(t):
plt.figure(figsize=(13,6))
plt.scatter(wv[prp>0.1],ir[prp>0.1])
plt.scatter(wv[prp>t],ir[prp>t])
interactive(f, t=(0.5, 20), continuous_update=False)
# +
def calc_crr2(ir, dif):
CRRCoefficients = [9.0e+08,
-8.28687176e-02,
1.90003955e-01,
-4.6e+01,
3.99996036e+00,
-2.3e+02,
3.99997842e+00,
2.99966504e+00]
H_IR = CRRCoefficients[0] * np.exp(ir * CRRCoefficients[1])
C_IR = CRRCoefficients[2] * ir + CRRCoefficients[3]
W_IR = CRRCoefficients[4] * \
np.exp( -0.5 * ((ir + CRRCoefficients[5]) / CRRCoefficients[6]) ** 2.0) + \
CRRCoefficients[7]
return H_IR * np.exp(-0.5 * ((((dif) - C_IR) / W_IR) ** 2.0))
crr2 = calc_crr2(ir, dif)
plt.scatter(prp[prp>2],crr2[prp>2])
# +
def calc_crr(ir, dif):
a = 8e8
b = -0.082
c = 0.2
d = -45.0
f = 1.5
g = -215.0
h = 3.0
j = 2.0
return a*np.exp(b*ir)*np.exp( -0.5*np.square( (dif-(c*ir+d)) / (f*np.exp(-0.5*np.square((ir+g)/h))+j) ) )
crr = calc_crr(ir, dif)
plt.scatter(prp[prp>2],crr[prp>2])
# +
from sklearn.ensemble import RandomForestRegressor
x = np.stack((dif[prp>.2],ir[prp>.2]),axis=1)
rf = RandomForestRegressor(max_depth=6, random_state=0)
rf.fit(x, prp[prp>.2])
rf.score(x, prp[prp>.2])
# +
rfp = rf.predict(x)
plt.scatter(prp[prp>.2],rfp)
# +
x = np.stack((b11[prp>.2],b16[prp>.2]), axis=1)
rf2 = RandomForestRegressor(max_depth=6, random_state=0)
rf2.fit(x, prp[prp>.2])
rf2.score(x, prp[prp>.2])
# +
rfp2 = rf2.predict(x)
plt.scatter(prp[prp>.2],rfp2)
# +
prp_hat = np.clip(np.load("/data/GPM_HIM8/models/yhat_SYD_v0_b11_16_s1.npy").flatten(),0,40)
plt.scatter(prp[prp>.2],prp_hat[prp>.2])
# -
np.mean(np.square(prp[prp>.2]-prp_hat[prp>.2])),\
np.mean(np.square(prp[prp>.2]-rfp)),\
np.mean(np.square(prp[prp>.2]-rfp2)),\
np.mean(np.square(prp[prp>.2]-crr[prp>.2])),\
np.mean(np.square(prp[prp>.2]-crr2[prp>.2]))
# +
ir12 = np.load("/data/GPM_HIM8/exp_paper/X_B14_SYD_201812.npy")[:,::4,::4].flatten()
wv12 = np.load("/data/GPM_HIM8/exp_paper/X_B8_SYD_201812.npy")[:,::4,::4].flatten()
ir01 = np.load("/data/GPM_HIM8/exp_paper/X_B14_SYD_201901.npy")[:,::4,::4].flatten()
wv01 = np.load("/data/GPM_HIM8/exp_paper/X_B8_SYD_201901.npy")[:,::4,::4].flatten()
ir02 = np.load("/data/GPM_HIM8/exp_paper/X_B14_SYD_201902.npy")[:,::4,::4].flatten()
wv02 = np.load("/data/GPM_HIM8/exp_paper/X_B8_SYD_201902.npy")[:,::4,::4].flatten()
prp12 = np.clip(np.load("/data/GPM_HIM8/exp_paper/Y_SYD_201812.npy").flatten(),0,40)
prp01 = np.clip(np.load("/data/GPM_HIM8/exp_paper/Y_SYD_201901.npy").flatten(),0,40)
prp02 = np.clip(np.load("/data/GPM_HIM8/exp_paper/Y_SYD_201902.npy").flatten(),0,40)
irt = np.concatenate((ir12,ir01,ir02))
wvt = np.concatenate((wv12,wv01,wv02))
prpt = np.concatenate((prp12,prp01,prp02))
dift = irt-wvt
x = np.stack((dift,irt),axis=1)
rf = RandomForestRegressor(max_depth=6, random_state=0)
rf.fit(x, prpt)
rfp = rf.predict(np.stack((dif,ir), axis=1))
plt.scatter(prp[prp>.2],rfp)
# -
| baseline/Comparisons.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import itertools
import seaborn
import pandas as pd
from pathlib import Path
from matplotlib import pyplot as plt
seaborn.set_context("poster")
palette = itertools.cycle(seaborn.color_palette())
seaborn.color_palette()
inputpath = Path("/Volumes/GoogleDrive/My Drive/2019-EPSCoR_Postdoc/Spectral library/")
# +
grass_hyspexDF = pd.read_csv(inputpath / 'BC_05/envi_grass_hyspex.txt',
skiprows=12,
delimiter=r"\s+",
header=None,
names=[
'wavelength (nm)', 'sample01', 'sample02', 'sample03', 'sample04',
'sample05', 'sample06', 'sample07', 'sample08', 'sample09', 'sample10'])
grass_avirisDF = pd.read_csv(inputpath / 'BC_05/envi_grass_aviris.txt',
skiprows=7,
delimiter=r"\s+",
header=None,
names=[
'wavelength (nm)', 'sample01', 'sample02', 'sample03', 'sample04',
'sample05',])
grass_psrDF = pd.read_csv(inputpath / 'for_Edward_SpectralEvoluation/1596061_00031.sed',
skiprows=27, delimiter=r"\s+",
header=None, names=['wavelength (nm)', 'sample01'],
usecols=[0,1])
for ii in range(1,11):
colname = f'sample{str(ii).zfill(2)}'
grass_hyspexDF[colname] = grass_hyspexDF[colname] / 10000
grass_psrDF['sample01'] = grass_psrDF['sample01'] / 100
# +
picea_g_hyspexDF = pd.read_csv(inputpath / 'BC_05/envi_picea_g.txt',
skiprows=14,
delimiter=r"\s+",
header=None,
names=[
'wavelength (nm)', 'sample01', 'sample02', 'sample03', 'sample04',
'sample05', 'sample06', 'sample07', 'sample08', 'sample09', 'sample10',
'sample11', 'sample12'])
picea_m_hyspexDF = pd.read_csv(inputpath / 'BC_05/envi_picea_m.txt',
skiprows=14,
delimiter=r"\s+",
header=None,
names=[
'wavelength (nm)', 'sample01', 'sample02', 'sample03', 'sample04',
'sample05', 'sample06', 'sample07', 'sample08', 'sample09', 'sample10',
'sample11', 'sample12'])
betula_hyspexDF = pd.read_csv(inputpath / 'BC_05/envi_betula.txt',
skiprows=12,
delimiter=r"\s+",
header=None,
names=[
'wavelength (nm)', 'sample01', 'sample02', 'sample03', 'sample04',
'sample05', 'sample06', 'sample07', 'sample08', 'sample09', 'sample10'])
pop_trem_hyspexDF = pd.read_csv(inputpath / 'BC_05/envi_pop_trem.txt',
skiprows=14,
delimiter=r"\s+",
header=None,
names=[
'wavelength (nm)', 'sample01', 'sample02', 'sample03', 'sample04',
'sample05', 'sample06', 'sample07', 'sample08', 'sample09', 'sample10',
'sample11', 'sample12'])
for dframe in [betula_hyspexDF, pop_trem_hyspexDF, picea_m_hyspexDF, picea_g_hyspexDF]:
maximum = dframe.shape[1]
for ii in range(1, maximum):
colname = f'sample{str(ii).zfill(2)}'
dframe[colname] = dframe[colname] / 10000
# -
pop_trem_hyspexDF
with open(inputpath / 'BC_05/envi_pop_trem.txt', 'r') as src:
print(src.read(1500))
def get_mask(mydf):
mask = (((mydf['wavelength (nm)'] > 1340) &
(mydf['wavelength (nm)'] < 1480)) |
((mydf['wavelength (nm)'] > 1760) &
(mydf['wavelength (nm)'] < 1960)) |
(mydf['wavelength (nm)'] > 2350))
return mask
# +
fig, ax = plt.subplots(figsize=(15, 10))
grass_hyspexDF.mask(get_mask(grass_hyspexDF)).plot(
x='wavelength (nm)', y='sample01',
ax=ax,
label='HySpex', ylabel='reflectance',
ylim=(0, .75), title="Tussock Grass", lw=4)
grass_avirisDF.mask(get_mask(grass_avirisDF)).plot(x='wavelength (nm)', y='sample03',
ax=ax, label='AVIRIS-NG', lw=4)
grass_psrDF.mask(get_mask(grass_psrDF)).plot(x='wavelength (nm)', y='sample01', ax=ax, label='PSR+', lw=4)
plt.axvline(x=690, color='grey', ls=':', lw=3)
plt.axvline(x=1400, color='dimgrey', ls=':', lw=3)
plt.text(380, 0.66, "VIS", fontsize=36, color='grey', fontweight="bold")
plt.text(950, 0.66, "NIR", fontsize=36, color='grey', fontweight="bold")
plt.text(1550, 0.66, "SWIR", fontsize=36, color='grey', fontweight="bold")
# -
graph = ax.get_figure()
graph.savefig("tussock_grass_comp.png", dpi=150, bbox_inches="tight")
picea_g_hyspexDF.drop(columns=['sample10'], inplace=True)
# +
fig, ax = plt.subplots(figsize=(15, 10))
picea_g_hyspexDF.mask(get_mask(grass_hyspexDF)).plot(
x='wavelength (nm)',
ax=ax,
label='HySpex', ylabel='reflectance',
ylim=(0, .75), title="White Spruce",
# color=seaborn.color_palette()[2], lw=4)
)
plt.axvline(x=690, color='grey', ls=':', lw=3)
plt.axvline(x=1400, color='dimgrey', ls=':', lw=3)
plt.text(380, 0.66, "VIS", fontsize=36, color='grey', fontweight="bold")
plt.text(950, 0.66, "NIR", fontsize=36, color='grey', fontweight="bold")
plt.text(1750, 0.66, "SWIR", fontsize=36, color='grey', fontweight="bold")
ax.get_legend().remove()
# +
fig, ax = plt.subplots(figsize=(15, 10))
picea_m_hyspexDF.mask(get_mask(picea_m_hyspexDF)).plot(
x='wavelength (nm)',
ax=ax,
label='HySpex', ylabel='reflectance',
ylim=(0, .75), title="Black Spruce",
# color=seaborn.color_palette()[0], lw=4)
)
plt.axvline(x=690, color='grey', ls=':', lw=3)
plt.axvline(x=1400, color='dimgrey', ls=':', lw=3)
plt.text(380, 0.66, "VIS", fontsize=36, color='grey', fontweight="bold")
plt.text(950, 0.66, "NIR", fontsize=36, color='grey', fontweight="bold")
plt.text(1750, 0.66, "SWIR", fontsize=36, color='grey', fontweight="bold")
ax.get_legend().remove()
# +
fig, ax = plt.subplots(figsize=(15, 10))
betula_hyspexDF.mask(get_mask(betula_hyspexDF)).plot(
x='wavelength (nm)',
ax=ax,
label='HySpex', ylabel='reflectance',
ylim=(0, .75), title="Paper Birch",
# color=seaborn.color_palette()[5], lw=4)
)
plt.axvline(x=690, color='grey', ls=':', lw=3)
plt.axvline(x=1400, color='dimgrey', ls=':', lw=3)
plt.text(380, 0.66, "VIS", fontsize=36, color='grey', fontweight="bold")
plt.text(950, 0.66, "NIR", fontsize=36, color='grey', fontweight="bold")
plt.text(1750, 0.66, "SWIR", fontsize=36, color='grey', fontweight="bold")
ax.get_legend().remove()
# +
fig, ax = plt.subplots(figsize=(15, 10))
pop_trem_hyspexDF.mask(get_mask(pop_trem_hyspexDF)).plot(
x='wavelength (nm)',
ax=ax,
label='HySpex', ylabel='reflectance',
ylim=(0, .75), title="Quaking Aspen",
color=seaborn.color_palette()[8], lw=4)
plt.axvline(x=690, color='grey', ls=':', lw=3)
plt.axvline(x=1400, color='dimgrey', ls=':', lw=3)
plt.text(380, 0.66, "VIS", fontsize=36, color='grey', fontweight="bold")
plt.text(950, 0.66, "NIR", fontsize=36, color='grey', fontweight="bold")
plt.text(1750, 0.66, "SWIR", fontsize=36, color='grey', fontweight="bold")
ax.get_legend().remove()
# -
| notebooks/plot_spectra_hyspex.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/BokieProject/daa_2021_1/blob/master/Tarea6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="B4StbjhS3Tul" outputId="8baff310-b3b6-46cb-fef1-8ada517803b8"
cadenaPalabras= "el lema que anima a la universidad nacional por mi raza hablara el espiritu revela la vocacion humanistica con la que fue concebida el autor de esta celebre frase jose vasconcelos, asumio la rectoria en 1920 en una epoca en que las esperanzas de la revolucion aun estaban vivas habia una gran fe en la patria y el animo redentor se extendia en el ambiente."
listaPalabras = cadenaPalabras.split()
frecuenciaPalab = []
for w in listaPalabras:
frecuenciaPalab.append(listaPalabras.count(w))
print ("Cadena\n" + (cadenaPalabras) + "\n")
print ("Listas\n" + str(listaPalabras) + "\n")
print ("Frecuencias\n" + str (frecuenciaPalab) + "\n")
print ("Pares\n" + str(list(zip(listaPalabras, frecuenciaPalab))))
| Tarea6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TensorFlow 2.4 on Python 3.8 & CUDA 11.1
# language: python
# name: python3
# ---
# **Equations**
#
# *This notebook lists all the equations in the book. If you decide to print them on a T-Shirt, I definitely want a copy! ;-)*
#
# **Warning**: GitHub's notebook viewer does not render equations properly. You should either view this notebook within Jupyter itself or use [Jupyter's online viewer](http://nbviewer.jupyter.org/github/ageron/handson-ml/blob/master/book_equations.ipynb).
# # Chapter 1
# **Equation 1-1: A simple linear model**
#
# $
# \text{life_satisfaction} = \theta_0 + \theta_1 \times \text{GDP_per_capita}
# $
#
#
# # Chapter 2
# **Equation 2-1: Root Mean Square Error (RMSE)**
#
# $
# \text{RMSE}(\mathbf{X}, h) = \sqrt{\frac{1}{m}\sum\limits_{i=1}^{m}\left(h(\mathbf{x}^{(i)}) - y^{(i)}\right)^2}
# $
#
#
# **Notations (page 38):**
#
# $
# \mathbf{x}^{(1)} = \begin{pmatrix}
# -118.29 \\
# 33.91 \\
# 1,416 \\
# 38,372
# \end{pmatrix}
# $
#
#
# $
# y^{(1)}=156,400
# $
#
#
# $
# \mathbf{X} = \begin{pmatrix}
# (\mathbf{x}^{(1)})^T \\
# (\mathbf{x}^{(2)})^T\\
# \vdots \\
# (\mathbf{x}^{(1999)})^T \\
# (\mathbf{x}^{(2000)})^T
# \end{pmatrix} = \begin{pmatrix}
# -118.29 & 33.91 & 1,416 & 38,372 \\
# \vdots & \vdots & \vdots & \vdots \\
# \end{pmatrix}
# $
#
#
# **Equation 2-2: Mean Absolute Error**
#
# $
# \text{MAE}(\mathbf{X}, h) = \frac{1}{m}\sum\limits_{i=1}^{m}\left| h(\mathbf{x}^{(i)}) - y^{(i)} \right|
# $
#
# **$\ell_k$ norms (page 39):**
#
# $ \left\| \mathbf{v} \right\| _k = (\left| v_0 \right|^k + \left| v_1 \right|^k + \dots + \left| v_n \right|^k)^{\frac{1}{k}} $
#
# # Chapter 3
# **Equation 3-1: Precision**
#
# $
# \text{precision} = \cfrac{TP}{TP + FP}
# $
#
#
# **Equation 3-2: Recall**
#
# $
# \text{recall} = \cfrac{TP}{TP + FN}
# $
#
#
# **Equation 3-3: $F_1$ score**
#
# $
# F_1 = \cfrac{2}{\cfrac{1}{\text{precision}} + \cfrac{1}{\text{recall}}} = 2 \times \cfrac{\text{precision}\, \times \, \text{recall}}{\text{precision}\, + \, \text{recall}} = \cfrac{TP}{TP + \cfrac{FN + FP}{2}}
# $
#
#
# # Chapter 4
# **Equation 4-1: Linear Regression model prediction**
#
# $
# \hat{y} = \theta_0 + \theta_1 x_1 + \theta_2 x_2 + \dots + \theta_n x_n
# $
#
#
# **Equation 4-2: Linear Regression model prediction (vectorized form)**
#
# $
# \hat{y} = h_{\boldsymbol{\theta}}(\mathbf{x}) = \boldsymbol{\theta} \cdot \mathbf{x}
# $
#
#
# **Equation 4-3: MSE cost function for a Linear Regression model**
#
# $
# \text{MSE}(\mathbf{X}, h_{\boldsymbol{\theta}}) = \dfrac{1}{m} \sum\limits_{i=1}^{m}{(\boldsymbol{\theta}^T \mathbf{x}^{(i)} - y^{(i)})^2}
# $
#
#
# **Equation 4-4: Normal Equation**
#
# $
# \hat{\boldsymbol{\theta}} = (\mathbf{X}^T \mathbf{X})^{-1} \mathbf{X}^T \mathbf{y}
# $
#
#
# ** Partial derivatives notation (page 114):**
#
# $\frac{\partial}{\partial \theta_j} \text{MSE}(\boldsymbol{\theta})$
#
#
# **Equation 4-5: Partial derivatives of the cost function**
#
# $
# \dfrac{\partial}{\partial \theta_j} \text{MSE}(\boldsymbol{\theta}) = \dfrac{2}{m}\sum\limits_{i=1}^{m}(\boldsymbol{\theta}^T \mathbf{x}^{(i)} - y^{(i)})\, x_j^{(i)}
# $
#
#
# **Equation 4-6: Gradient vector of the cost function**
#
# $
# \nabla_{\boldsymbol{\theta}}\, \text{MSE}(\boldsymbol{\theta}) =
# \begin{pmatrix}
# \frac{\partial}{\partial \theta_0} \text{MSE}(\boldsymbol{\theta}) \\
# \frac{\partial}{\partial \theta_1} \text{MSE}(\boldsymbol{\theta}) \\
# \vdots \\
# \frac{\partial}{\partial \theta_n} \text{MSE}(\boldsymbol{\theta})
# \end{pmatrix}
# = \dfrac{2}{m} \mathbf{X}^T (\mathbf{X} \boldsymbol{\theta} - \mathbf{y})
# $
#
#
# **Equation 4-7: Gradient Descent step**
#
# $
# \boldsymbol{\theta}^{(\text{next step})} = \boldsymbol{\theta} - \eta \nabla_{\boldsymbol{\theta}}\, \text{MSE}(\boldsymbol{\theta})
# $
#
#
# $ O(\frac{1}{\text{iterations}}) $
#
#
# $ \hat{y} = 0.56 x_1^2 + 0.93 x_1 + 1.78 $
#
#
# $ y = 0.5 x_1^2 + 1.0 x_1 + 2.0 + \text{Gaussian noise} $
#
#
# $ \dfrac{(n+d)!}{d!\,n!} $
#
#
# $ \alpha \sum_{i=1}^{n}{{\theta_i}^2}$
#
#
# **Equation 4-8: Ridge Regression cost function**
#
# $
# J(\boldsymbol{\theta}) = \text{MSE}(\boldsymbol{\theta}) + \alpha \dfrac{1}{2}\sum\limits_{i=1}^{n}{\theta_i}^2
# $
#
#
# **Equation 4-9: Ridge Regression closed-form solution**
#
# $
# \hat{\boldsymbol{\theta}} = (\mathbf{X}^T \mathbf{X} + \alpha \mathbf{A})^{-1} \mathbf{X}^T \mathbf{y}
# $
#
#
# **Equation 4-10: Lasso Regression cost function**
#
# $
# J(\boldsymbol{\theta}) = \text{MSE}(\boldsymbol{\theta}) + \alpha \sum\limits_{i=1}^{n}\left| \theta_i \right|
# $
#
#
# **Equation 4-11: Lasso Regression subgradient vector**
#
# $
# g(\boldsymbol{\theta}, J) = \nabla_{\boldsymbol{\theta}}\, \text{MSE}(\boldsymbol{\theta}) + \alpha
# \begin{pmatrix}
# \operatorname{sign}(\theta_1) \\
# \operatorname{sign}(\theta_2) \\
# \vdots \\
# \operatorname{sign}(\theta_n) \\
# \end{pmatrix} \quad \text{where } \operatorname{sign}(\theta_i) =
# \begin{cases}
# -1 & \text{if } \theta_i < 0 \\
# 0 & \text{if } \theta_i = 0 \\
# +1 & \text{if } \theta_i > 0
# \end{cases}
# $
#
#
# **Equation 4-12: Elastic Net cost function**
#
# $
# J(\boldsymbol{\theta}) = \text{MSE}(\boldsymbol{\theta}) + r \alpha \sum\limits_{i=1}^{n}\left| \theta_i \right| + \dfrac{1 - r}{2} \alpha \sum\limits_{i=1}^{n}{{\theta_i}^2}
# $
#
#
# **Equation 4-13: Logistic Regression model estimated probability (vectorized form)**
#
# $
# \hat{p} = h_{\boldsymbol{\theta}}(\mathbf{x}) = \sigma(\boldsymbol{\theta}^T \mathbf{x})
# $
#
#
# **Equation 4-14: Logistic function**
#
# $
# \sigma(t) = \dfrac{1}{1 + \exp(-t)}
# $
#
#
# **Equation 4-15: Logistic Regression model prediction**
#
# $
# \hat{y} =
# \begin{cases}
# 0 & \text{if } \hat{p} < 0.5, \\
# 1 & \text{if } \hat{p} \geq 0.5.
# \end{cases}
# $
#
#
# **Equation 4-16: Cost function of a single training instance**
#
# $
# c(\boldsymbol{\theta}) =
# \begin{cases}
# -\log(\hat{p}) & \text{if } y = 1, \\
# -\log(1 - \hat{p}) & \text{if } y = 0.
# \end{cases}
# $
#
#
# **Equation 4-17: Logistic Regression cost function (log loss)**
#
# $
# J(\boldsymbol{\theta}) = -\dfrac{1}{m} \sum\limits_{i=1}^{m}{\left[ y^{(i)} log\left(\hat{p}^{(i)}\right) + (1 - y^{(i)}) log\left(1 - \hat{p}^{(i)}\right)\right]}
# $
#
#
# **Equation 4-18: Logistic cost function partial derivatives**
#
# $
# \dfrac{\partial}{\partial \theta_j} \text{J}(\boldsymbol{\theta}) = \dfrac{1}{m}\sum\limits_{i=1}^{m}\left(\mathbf{\sigma(\boldsymbol{\theta}}^T \mathbf{x}^{(i)}) - y^{(i)}\right)\, x_j^{(i)}
# $
#
#
# **Equation 4-19: Softmax score for class k**
#
# $
# s_k(\mathbf{x}) = ({\boldsymbol{\theta}^{(k)}})^T \mathbf{x}
# $
#
#
# **Equation 4-20: Softmax function**
#
# $
# \hat{p}_k = \sigma\left(\mathbf{s}(\mathbf{x})\right)_k = \dfrac{\exp\left(s_k(\mathbf{x})\right)}{\sum\limits_{j=1}^{K}{\exp\left(s_j(\mathbf{x})\right)}}
# $
#
#
# **Equation 4-21: Softmax Regression classifier prediction**
#
# $
# \hat{y} = \underset{k}{\operatorname{argmax}} \, \sigma\left(\mathbf{s}(\mathbf{x})\right)_k = \underset{k}{\operatorname{argmax}} \, s_k(\mathbf{x}) = \underset{k}{\operatorname{argmax}} \, \left( ({\boldsymbol{\theta}^{(k)}})^T \mathbf{x} \right)
# $
#
#
# **Equation 4-22: Cross entropy cost function**
#
# $
# J(\boldsymbol{\Theta}) = - \dfrac{1}{m}\sum\limits_{i=1}^{m}\sum\limits_{k=1}^{K}{y_k^{(i)}\log\left(\hat{p}_k^{(i)}\right)}
# $
#
# **Cross entropy between two discrete probability distributions $p$ and $q$ (page 141):**
# $ H(p, q) = -\sum\limits_{x}p(x) \log q(x) $
#
#
# **Equation 4-23: Cross entropy gradient vector for class _k_**
#
# $
# \nabla_{\boldsymbol{\theta}^{(k)}} \, J(\boldsymbol{\Theta}) = \dfrac{1}{m} \sum\limits_{i=1}^{m}{ \left ( \hat{p}^{(i)}_k - y_k^{(i)} \right ) \mathbf{x}^{(i)}}
# $
#
# # Chapter 5
# **Equation 5-1: Gaussian RBF**
#
# $
# {\displaystyle \phi_{\gamma}(\mathbf{x}, \boldsymbol{\ell})} = {\displaystyle \exp({\displaystyle -\gamma \left\| \mathbf{x} - \boldsymbol{\ell} \right\|^2})}
# $
#
#
# **Equation 5-2: Linear SVM classifier prediction**
#
# $
# \hat{y} = \begin{cases}
# 0 & \text{if } \mathbf{w}^T \mathbf{x} + b < 0, \\
# 1 & \text{if } \mathbf{w}^T \mathbf{x} + b \geq 0
# \end{cases}
# $
#
#
# **Equation 5-3: Hard margin linear SVM classifier objective**
#
# $
# \begin{split}
# &\underset{\mathbf{w}, b}{\operatorname{minimize}}\quad{\frac{1}{2}\mathbf{w}^T \mathbf{w}} \\
# &\text{subject to} \quad t^{(i)}(\mathbf{w}^T \mathbf{x}^{(i)} + b) \ge 1 \quad \text{for } i = 1, 2, \dots, m
# \end{split}
# $
#
#
# **Equation 5-4: Soft margin linear SVM classifier objective**
#
# $
# \begin{split}
# &\underset{\mathbf{w}, b, \mathbf{\zeta}}{\operatorname{minimize}}\quad{\dfrac{1}{2}\mathbf{w}^T \mathbf{w} + C \sum\limits_{i=1}^m{\zeta^{(i)}}}\\
# &\text{subject to} \quad t^{(i)}(\mathbf{w}^T \mathbf{x}^{(i)} + b) \ge 1 - \zeta^{(i)} \quad \text{and} \quad \zeta^{(i)} \ge 0 \quad \text{for } i = 1, 2, \dots, m
# \end{split}
# $
#
#
# **Equation 5-5: Quadratic Programming problem**
#
# $
# \begin{split}
# \underset{\mathbf{p}}{\text{Minimize}} \quad & \dfrac{1}{2} \mathbf{p}^T \mathbf{H} \mathbf{p} \quad + \quad \mathbf{f}^T \mathbf{p} \\
# \text{subject to} \quad & \mathbf{A} \mathbf{p} \le \mathbf{b} \\
# \text{where } &
# \begin{cases}
# \mathbf{p} & \text{ is an }n_p\text{-dimensional vector (} n_p = \text{number of parameters),}\\
# \mathbf{H} & \text{ is an }n_p \times n_p \text{ matrix,}\\
# \mathbf{f} & \text{ is an }n_p\text{-dimensional vector,}\\
# \mathbf{A} & \text{ is an } n_c \times n_p \text{ matrix (}n_c = \text{number of constraints),}\\
# \mathbf{b} & \text{ is an }n_c\text{-dimensional vector.}
# \end{cases}
# \end{split}
# $
#
#
# **Equation 5-6: Dual form of the linear SVM objective**
#
# $
# \begin{split}
# \underset{\mathbf{\alpha}}{\operatorname{minimize}}
# \dfrac{1}{2}\sum\limits_{i=1}^{m}{
# \sum\limits_{j=1}^{m}{
# \alpha^{(i)} \alpha^{(j)} t^{(i)} t^{(j)} {\mathbf{x}^{(i)}}^T \mathbf{x}^{(j)}
# }
# } \quad - \quad \sum\limits_{i=1}^{m}{\alpha^{(i)}}\\
# \text{subject to}\quad \alpha^{(i)} \ge 0 \quad \text{for }i = 1, 2, \dots, m
# \end{split}
# $
#
#
# **Equation 5-7: From the dual solution to the primal solution**
#
# $
# \begin{split}
# &\hat{\mathbf{w}} = \sum_{i=1}^{m}{\hat{\alpha}}^{(i)}t^{(i)}\mathbf{x}^{(i)}\\
# &\hat{b} = \dfrac{1}{n_s}\sum\limits_{\scriptstyle i=1 \atop {\scriptstyle {\hat{\alpha}}^{(i)} > 0}}^{m}{\left(t^{(i)} - ({\hat{\mathbf{w}}}^T \mathbf{x}^{(i)})\right)}
# \end{split}
# $
#
#
# **Equation 5-8: Second-degree polynomial mapping**
#
# $
# \phi\left(\mathbf{x}\right) = \phi\left( \begin{pmatrix}
# x_1 \\
# x_2
# \end{pmatrix} \right) = \begin{pmatrix}
# {x_1}^2 \\
# \sqrt{2} \, x_1 x_2 \\
# {x_2}^2
# \end{pmatrix}
# $
#
#
# **Equation 5-9: Kernel trick for a 2^nd^-degree polynomial mapping**
#
# $
# \begin{split}
# \phi(\mathbf{a})^T \phi(\mathbf{b}) & \quad = \begin{pmatrix}
# {a_1}^2 \\
# \sqrt{2} \, a_1 a_2 \\
# {a_2}^2
# \end{pmatrix}^T \begin{pmatrix}
# {b_1}^2 \\
# \sqrt{2} \, b_1 b_2 \\
# {b_2}^2
# \end{pmatrix} = {a_1}^2 {b_1}^2 + 2 a_1 b_1 a_2 b_2 + {a_2}^2 {b_2}^2 \\
# & \quad = \left( a_1 b_1 + a_2 b_2 \right)^2 = \left( \begin{pmatrix}
# a_1 \\
# a_2
# \end{pmatrix}^T \begin{pmatrix}
# b_1 \\
# b_2
# \end{pmatrix} \right)^2 = (\mathbf{a}^T \mathbf{b})^2
# \end{split}
# $
#
# **In the text about the kernel trick (page 162):**
# [...], then you can replace this dot product of transformed vectors simply by $ ({\mathbf{x}^{(i)}}^T \mathbf{x}^{(j)})^2 $
#
#
# **Equation 5-10: Common kernels**
#
# $
# \begin{split}
# \text{Linear:} & \quad K(\mathbf{a}, \mathbf{b}) = \mathbf{a}^T \mathbf{b} \\
# \text{Polynomial:} & \quad K(\mathbf{a}, \mathbf{b}) = \left(\gamma \mathbf{a}^T \mathbf{b} + r \right)^d \\
# \text{Gaussian RBF:} & \quad K(\mathbf{a}, \mathbf{b}) = \exp({\displaystyle -\gamma \left\| \mathbf{a} - \mathbf{b} \right\|^2}) \\
# \text{Sigmoid:} & \quad K(\mathbf{a}, \mathbf{b}) = \tanh\left(\gamma \mathbf{a}^T \mathbf{b} + r\right)
# \end{split}
# $
#
# **Equation 5-11: Making predictions with a kernelized SVM**
#
# $
# \begin{split}
# h_{\hat{\mathbf{w}}, \hat{b}}\left(\phi(\mathbf{x}^{(n)})\right) & = \,\hat{\mathbf{w}}^T \phi(\mathbf{x}^{(n)}) + \hat{b} = \left(\sum_{i=1}^{m}{\hat{\alpha}}^{(i)}t^{(i)}\phi(\mathbf{x}^{(i)})\right)^T \phi(\mathbf{x}^{(n)}) + \hat{b}\\
# & = \, \sum_{i=1}^{m}{\hat{\alpha}}^{(i)}t^{(i)}\left(\phi(\mathbf{x}^{(i)})^T \phi(\mathbf{x}^{(n)})\right) + \hat{b}\\
# & = \sum\limits_{\scriptstyle i=1 \atop {\scriptstyle {\hat{\alpha}}^{(i)} > 0}}^{m}{\hat{\alpha}}^{(i)}t^{(i)} K(\mathbf{x}^{(i)}, \mathbf{x}^{(n)}) + \hat{b}
# \end{split}
# $
#
#
# **Equation 5-12: Computing the bias term using the kernel trick**
#
# $
# \begin{split}
# \hat{b} & = \dfrac{1}{n_s}\sum\limits_{\scriptstyle i=1 \atop {\scriptstyle {\hat{\alpha}}^{(i)} > 0}}^{m}{\left(t^{(i)} - {\hat{\mathbf{w}}}^T \phi(\mathbf{x}^{(i)})\right)} = \dfrac{1}{n_s}\sum\limits_{\scriptstyle i=1 \atop {\scriptstyle {\hat{\alpha}}^{(i)} > 0}}^{m}{\left(t^{(i)} - {
# \left(\sum_{j=1}^{m}{\hat{\alpha}}^{(j)}t^{(j)}\phi(\mathbf{x}^{(j)})\right)
# }^T \phi(\mathbf{x}^{(i)})\right)}\\
# & = \dfrac{1}{n_s}\sum\limits_{\scriptstyle i=1 \atop {\scriptstyle {\hat{\alpha}}^{(i)} > 0}}^{m}{\left(t^{(i)} -
# \sum\limits_{\scriptstyle j=1 \atop {\scriptstyle {\hat{\alpha}}^{(j)} > 0}}^{m}{
# {\hat{\alpha}}^{(j)} t^{(j)} K(\mathbf{x}^{(i)},\mathbf{x}^{(j)})
# }
# \right)}
# \end{split}
# $
#
#
# **Equation 5-13: Linear SVM classifier cost function**
#
# $
# J(\mathbf{w}, b) = \dfrac{1}{2} \mathbf{w}^T \mathbf{w} \quad + \quad C {\displaystyle \sum\limits_{i=1}^{m}max\left(0, t^{(i)} - (\mathbf{w}^T \mathbf{x}^{(i)} + b) \right)}
# $
#
#
#
# # Chapter 6
# **Equation 6-1: Gini impurity**
#
# $
# G_i = 1 - \sum\limits_{k=1}^{n}{{p_{i,k}}^2}
# $
#
#
# **Equation 6-2: CART cost function for classification**
#
# $
# \begin{split}
# &J(k, t_k) = \dfrac{m_{\text{left}}}{m}G_\text{left} + \dfrac{m_{\text{right}}}{m}G_{\text{right}}\\
# &\text{where }\begin{cases}
# G_\text{left/right} \text{ measures the impurity of the left/right subset,}\\
# m_\text{left/right} \text{ is the number of instances in the left/right subset.}
# \end{cases}
# \end{split}
# $
#
# **Entropy computation example (page 173):**
#
# $ -\frac{49}{54}\log_2(\frac{49}{54}) - \frac{5}{54}\log_2(\frac{5}{54}) $
#
#
# **Equation 6-3: Entropy**
#
# $
# H_i = -\sum\limits_{k=1 \atop p_{i,k} \ne 0}^{n}{{p_{i,k}}\log_2(p_{i,k})}
# $
#
#
# **Equation 6-4: CART cost function for regression**
#
# $
# J(k, t_k) = \dfrac{m_{\text{left}}}{m}\text{MSE}_\text{left} + \dfrac{m_{\text{right}}}{m}\text{MSE}_{\text{right}} \quad
# \text{where }
# \begin{cases}
# \text{MSE}_{\text{node}} = \dfrac{1}{m_{\text{node}}} \sum\limits_{\scriptstyle i \in \text{node}}(\hat{y}_{\text{node}} - y^{(i)})^2\\
# \hat{y}_\text{node} = \dfrac{1}{m_{\text{node}}}\sum\limits_{\scriptstyle i \in \text{node}}y^{(i)}
# \end{cases}
# $
#
# # Chapter 7
#
# **Equation 7-1: Weighted error rate of the $j^\text{th}$ predictor**
#
# $
# r_j = \dfrac{\displaystyle \sum\limits_{\textstyle {i=1 \atop \hat{y}_j^{(i)} \ne y^{(i)}}}^{m}{w^{(i)}}}{\displaystyle \sum\limits_{i=1}^{m}{w^{(i)}}} \quad
# \text{where }\hat{y}_j^{(i)}\text{ is the }j^{\text{th}}\text{ predictor's prediction for the }i^{\text{th}}\text{ instance.}
# $
#
# **Equation 7-2: Predictor weight**
#
# $
# \begin{split}
# \alpha_j = \eta \log{\dfrac{1 - r_j}{r_j}}
# \end{split}
# $
#
#
# **Equation 7-3: Weight update rule**
#
# $
# \begin{split}
# & \text{ for } i = 1, 2, \dots, m \\
# & w^{(i)} \leftarrow
# \begin{cases}
# w^{(i)} & \text{if }\hat{y_j}^{(i)} = y^{(i)}\\
# w^{(i)} \exp(\alpha_j) & \text{if }\hat{y_j}^{(i)} \ne y^{(i)}
# \end{cases}
# \end{split}
# $
#
# **In the text page 194:**
#
# Then all the instance weights are normalized (i.e., divided by $ \sum_{i=1}^{m}{w^{(i)}} $).
#
#
# **Equation 7-4: AdaBoost predictions**
#
# $
# \hat{y}(\mathbf{x}) = \underset{k}{\operatorname{argmax}}{\sum\limits_{\scriptstyle j=1 \atop \scriptstyle \hat{y}_j(\mathbf{x}) = k}^{N}{\alpha_j}} \quad \text{where }N\text{ is the number of predictors.}
# $
#
#
#
# # Chapter 8
#
# **Equation 8-1: Principal components matrix**
#
# $
# \mathbf{V}^T =
# \begin{pmatrix}
# \mid & \mid & & \mid \\
# \mathbf{c_1} & \mathbf{c_2} & \cdots & \mathbf{c_n} \\
# \mid & \mid & & \mid
# \end{pmatrix}
# $
#
#
# **Equation 8-2: Projecting the training set down to _d_ dimensions**
#
# $
# \mathbf{X}_{d\text{-proj}} = \mathbf{X} \mathbf{W}_d
# $
#
#
# **Equation 8-3: PCA inverse transformation, back to the original number of dimensions**
#
# $
# \mathbf{X}_{\text{recovered}} = \mathbf{X}_{d\text{-proj}} {\mathbf{W}_d}^T
# $
#
#
# $ \sum_{j=1}^{m}{w_{i,j}\mathbf{x}^{(j)}} $
#
#
# **Equation 8-4: LLE step 1: linearly modeling local relationships**
#
# $
# \begin{split}
# & \hat{\mathbf{W}} = \underset{\mathbf{W}}{\operatorname{argmin}}{\displaystyle \sum\limits_{i=1}^{m}} \left\|\mathbf{x}^{(i)} - \sum\limits_{j=1}^{m}{w_{i,j}}\mathbf{x}^{(j)}\right\|^2\\
# & \text{subject to }
# \begin{cases}
# w_{i,j}=0 & \text{if }\mathbf{x}^{(j)} \text{ is not one of the }k\text{ c.n. of }\mathbf{x}^{(i)}\\
# \sum\limits_{j=1}^{m}w_{i,j} = 1 & \text{for }i=1, 2, \dots, m
# \end{cases}
# \end{split}
# $
#
# **In the text page 223:**
#
# [...] then we want the squared distance between $\mathbf{z}^{(i)}$ and $ \sum_{j=1}^{m}{\hat{w}_{i,j}\mathbf{z}^{(j)}} $ to be as small as possible.
#
#
# **Equation 8-5: LLE step 2: reducing dimensionality while preserving relationships**
#
# $
# \hat{\mathbf{Z}} = \underset{\mathbf{Z}}{\operatorname{argmin}}{\displaystyle \sum\limits_{i=1}^{m}} \left\|\mathbf{z}^{(i)} - \sum\limits_{j=1}^{m}{\hat{w}_{i,j}}\mathbf{z}^{(j)}\right\|^2
# $
#
# # Chapter 9
#
# **Equation 9-1: Rectified linear unit**
#
# $
# h_{\mathbf{w}, b}(\mathbf{X}) = \max(\mathbf{X} \mathbf{w} + b, 0)
# $
# # Chapter 10
#
# **Equation 10-1: Common step functions used in Perceptrons**
#
# $
# \begin{split}
# \operatorname{heaviside}(z) =
# \begin{cases}
# 0 & \text{if }z < 0\\
# 1 & \text{if }z \ge 0
# \end{cases} & \quad\quad
# \operatorname{sgn}(z) =
# \begin{cases}
# -1 & \text{if }z < 0\\
# 0 & \text{if }z = 0\\
# +1 & \text{if }z > 0
# \end{cases}
# \end{split}
# $
#
#
# **Equation 10-2: Perceptron learning rule (weight update)**
#
# $
# {w_{i,j}}^{(\text{next step})} = w_{i,j} + \eta (y_j - \hat{y}_j) x_i
# $
#
#
# **In the text page 266:**
#
# It will be initialized randomly, using a truncated normal (Gaussian) distribution with a standard deviation of $ 2 / \sqrt{\text{n}_\text{inputs}} $.
#
# # Chapter 11
# **Equation 11-1: Xavier initialization (when using the logistic activation function)**
#
# $
# \begin{split}
# & \text{Normal distribution with mean 0 and standard deviation }
# \sigma = \sqrt{\dfrac{2}{n_\text{inputs} + n_\text{outputs}}}\\
# & \text{Or a uniform distribution between -r and +r, with }
# r = \sqrt{\dfrac{6}{n_\text{inputs} + n_\text{outputs}}}
# \end{split}
# $
#
# **In the text page 278:**
#
# When the number of input connections is roughly equal to the number of output
# connections, you get simpler equations (e.g., $ \sigma = 1 / \sqrt{n_\text{inputs}} $ or $ r = \sqrt{3} / \sqrt{n_\text{inputs}} $).
#
# **Table 11-1: Initialization parameters for each type of activation function**
#
# * Logistic uniform: $ r = \sqrt{\dfrac{6}{n_\text{inputs} + n_\text{outputs}}} $
# * Logistic normal: $ \sigma = \sqrt{\dfrac{2}{n_\text{inputs} + n_\text{outputs}}} $
# * Hyperbolic tangent uniform: $ r = 4 \sqrt{\dfrac{6}{n_\text{inputs} + n_\text{outputs}}} $
# * Hyperbolic tangent normal: $ \sigma = 4 \sqrt{\dfrac{2}{n_\text{inputs} + n_\text{outputs}}} $
# * ReLU (and its variants) uniform: $ r = \sqrt{2} \sqrt{\dfrac{6}{n_\text{inputs} + n_\text{outputs}}} $
# * ReLU (and its variants) normal: $ \sigma = \sqrt{2} \sqrt{\dfrac{2}{n_\text{inputs} + n_\text{outputs}}} $
#
# **Equation 11-2: ELU activation function**
#
# $
# \operatorname{ELU}_\alpha(z) =
# \begin{cases}
# \alpha(\exp(z) - 1) & \text{if } z < 0\\
# z & if z \ge 0
# \end{cases}
# $
#
#
# **Equation 11-3: Batch Normalization algorithm**
#
# $
# \begin{split}
# 1.\quad & \mathbf{\mu}_B = \dfrac{1}{m_B}\sum\limits_{i=1}^{m_B}{\mathbf{x}^{(i)}}\\
# 2.\quad & {\mathbf{\sigma}_B}^2 = \dfrac{1}{m_B}\sum\limits_{i=1}^{m_B}{(\mathbf{x}^{(i)} - \mathbf{\mu}_B)^2}\\
# 3.\quad & \hat{\mathbf{x}}^{(i)} = \dfrac{\mathbf{x}^{(i)} - \mathbf{\mu}_B}{\sqrt{{\mathbf{\sigma}_B}^2 + \epsilon}}\\
# 4.\quad & \mathbf{z}^{(i)} = \gamma \hat{\mathbf{x}}^{(i)} + \beta
# \end{split}
# $
#
# **In the text page 285:**
#
# [...] given a new value $v$, the running average $v$ is updated through the equation:
#
# $ \hat{v} \gets \hat{v} \times \text{momentum} + v \times (1 - \text{momentum}) $
#
# **Equation 11-4: Momentum algorithm**
#
# 1. $\mathbf{m} \gets \beta \mathbf{m} - \eta \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta})$
# 2. $\boldsymbol{\theta} \gets \boldsymbol{\theta} + \mathbf{m}$
#
# **In the text page 296:**
#
# You can easily verify that if the gradient remains constant, the terminal velocity (i.e., the maximum size of the weight updates) is equal to that gradient multiplied by the learning rate η multiplied by $ \frac{1}{1 - \beta} $.
#
#
# **Equation 11-5: Nesterov Accelerated Gradient algorithm**
#
# 1. $\mathbf{m} \gets \beta \mathbf{m} - \eta \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta} + \beta \mathbf{m})$
# 2. $\boldsymbol{\theta} \gets \boldsymbol{\theta} + \mathbf{m}$
#
# **Equation 11-6: AdaGrad algorithm**
#
# 1. $\mathbf{s} \gets \mathbf{s} + \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta}) \otimes \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta})$
# 2. $\boldsymbol{\theta} \gets \boldsymbol{\theta} - \eta \, \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta}) \oslash {\sqrt{\mathbf{s} + \epsilon}}$
#
# **In the text page 298-299:**
#
# This vectorized form is equivalent to computing $s_i \gets s_i + \left( \dfrac{\partial J(\boldsymbol{\theta})}{\partial \theta_i} \right)^2$ for each element $s_i$ of the vector $\mathbf{s}$.
#
# **In the text page 299:**
#
# This vectorized form is equivalent to computing $ \theta_i \gets \theta_i - \eta \, \dfrac{\partial J(\boldsymbol{\theta})}{\partial \theta_i} \dfrac{1}{\sqrt{s_i + \epsilon}} $ for all parameters $\theta_i$ (simultaneously).
#
#
# **Equation 11-7: RMSProp algorithm**
#
# 1. $\mathbf{s} \gets \beta \mathbf{s} + (1 - \beta ) \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta}) \otimes \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta})$
# 2. $\boldsymbol{\theta} \gets \boldsymbol{\theta} - \eta \, \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta}) \oslash {\sqrt{\mathbf{s} + \epsilon}}$
#
#
# **Equation 11-8: Adam algorithm**
#
# 1. $\mathbf{m} \gets \beta_1 \mathbf{m} - (1 - \beta_1) \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta})$
# 2. $\mathbf{s} \gets \beta_2 \mathbf{s} + (1 - \beta_2) \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta}) \otimes \nabla_\boldsymbol{\theta}J(\boldsymbol{\theta})$
# 3. $\hat{\mathbf{m}} \gets \left(\dfrac{\mathbf{m}}{1 - {\beta_1}^T}\right)$
# 4. $\hat{\mathbf{s}} \gets \left(\dfrac{\mathbf{s}}{1 - {\beta_2}^T}\right)$
# 5. $\boldsymbol{\theta} \gets \boldsymbol{\theta} + \eta \, \hat{\mathbf{m}} \oslash {\sqrt{\hat{\mathbf{s}} + \epsilon}}$
#
# **In the text page 309:**
#
# We typically implement this constraint by computing $\left\| \mathbf{w} \right\|_2$ after each training step
# and clipping $\mathbf{w}$ if needed $ \left( \mathbf{w} \gets \mathbf{w} \dfrac{r}{\left\| \mathbf{w} \right\|_2} \right) $.
#
#
#
# # Chapter 13
#
# **Equation 13-1: Computing the output of a neuron in a convolutional layer**
#
# $
# z_{i,j,k} = b_k + \sum\limits_{u = 0}^{f_h - 1} \, \, \sum\limits_{v = 0}^{f_w - 1} \, \, \sum\limits_{k' = 0}^{f_{n'} - 1} \, \, x_{i', j', k'} \times w_{u, v, k', k}
# \quad \text{with }
# \begin{cases}
# i' = i \times s_h + u \\
# j' = j \times s_w + v
# \end{cases}
# $
#
# **Equation 13-2: Local response normalization**
#
# $
# b_i = a_i \left(k + \alpha \sum\limits_{j=j_\text{low}}^{j_\text{high}}{{a_j}^2} \right)^{-\beta} \quad \text{with }
# \begin{cases}
# j_\text{high} = \min\left(i + \dfrac{r}{2}, f_n-1\right) \\
# j_\text{low} = \max\left(0, i - \dfrac{r}{2}\right)
# \end{cases}
# $
#
#
#
# # Chapter 14
#
# **Equation 14-1: Output of a recurrent layer for a single instance**
#
# $
# \mathbf{y}_{(t)} = \phi\left({\mathbf{W}_x}^T{\mathbf{x}_{(t)}} + {{\mathbf{W}_y}^T\mathbf{y}_{(t-1)}} + \mathbf{b} \right)
# $
#
#
# **Equation 14-2: Outputs of a layer of recurrent neurons for all instances in a mini-batch**
#
# $
# \begin{split}
# \mathbf{Y}_{(t)} & = \phi\left(\mathbf{X}_{(t)} \mathbf{W}_{x} + \mathbf{Y}_{(t-1)} \mathbf{W}_{y} + \mathbf{b} \right) \\
# & = \phi\left(
# \left[\mathbf{X}_{(t)} \quad \mathbf{Y}_{(t-1)} \right]
# \mathbf{W} + \mathbf{b} \right) \text{ with } \mathbf{W}=
# \left[ \begin{matrix}
# \mathbf{W}_x\\
# \mathbf{W}_y
# \end{matrix} \right]
# \end{split}
# $
#
# **In the text page 391:**
#
# Just like in regular backpropagation, there is a first forward pass through the unrolled network (represented by the dashed arrows); then the output sequence is evaluated using a cost function $ C(\mathbf{Y}_{(t_\text{min})}, \mathbf{Y}_{(t_\text{min}+1)}, \dots, \mathbf{Y}_{(t_\text{max})}) $ (where $t_\text{min}$ and $t_\text{max}$ are the first and last output time steps, not counting the ignored outputs)[...]
#
#
# **Equation 14-3: LSTM computations**
#
# $
# \begin{split}
# \mathbf{i}_{(t)}&=\sigma({\mathbf{W}_{xi}}^T \mathbf{x}_{(t)} + {\mathbf{W}_{hi}}^T \mathbf{h}_{(t-1)} + \mathbf{b}_i)\\
# \mathbf{f}_{(t)}&=\sigma({\mathbf{W}_{xf}}^T \mathbf{x}_{(t)} + {\mathbf{W}_{hf}}^T \mathbf{h}_{(t-1)} + \mathbf{b}_f)\\
# \mathbf{o}_{(t)}&=\sigma({\mathbf{W}_{xo}}^T \mathbf{x}_{(t)} + {\mathbf{W}_{ho}}^T \mathbf{h}_{(t-1)} + \mathbf{b}_o)\\
# \mathbf{g}_{(t)}&=\operatorname{tanh}({\mathbf{W}_{xg}}^T \mathbf{x}_{(t)} + {\mathbf{W}_{hg}}^T \mathbf{h}_{(t-1)} + \mathbf{b}_g)\\
# \mathbf{c}_{(t)}&=\mathbf{f}_{(t)} \otimes \mathbf{c}_{(t-1)} \, + \, \mathbf{i}_{(t)} \otimes \mathbf{g}_{(t)}\\
# \mathbf{y}_{(t)}&=\mathbf{h}_{(t)} = \mathbf{o}_{(t)} \otimes \operatorname{tanh}(\mathbf{c}_{(t)})
# \end{split}
# $
#
#
# **Equation 14-4: GRU computations**
#
# $
# \begin{split}
# \mathbf{z}_{(t)}&=\sigma({\mathbf{W}_{xz}}^T \mathbf{x}_{(t)} + {\mathbf{W}_{hz}}^T \mathbf{h}_{(t-1)}) \\
# \mathbf{r}_{(t)}&=\sigma({\mathbf{W}_{xr}}^T \mathbf{x}_{(t)} + {\mathbf{W}_{hr}}^T \mathbf{h}_{(t-1)}) \\
# \mathbf{g}_{(t)}&=\operatorname{tanh}\left({\mathbf{W}_{xg}}^T \mathbf{x}_{(t)} + {\mathbf{W}_{hg}}^T (\mathbf{r}_{(t)} \otimes \mathbf{h}_{(t-1)})\right) \\
# \mathbf{h}_{(t)}&=(1-\mathbf{z}_{(t)}) \otimes \mathbf{h}_{(t-1)} + \mathbf{z}_{(t)} \otimes \mathbf{g}_{(t)}
# \end{split}
# $
#
#
#
# # Chapter 15
#
# **Equation 15-1: Kullback–Leibler divergence**
#
# $
# D_{\mathrm{KL}}(P\|Q) = \sum\limits_{i} P(i) \log \dfrac{P(i)}{Q(i)}
# $
#
#
# **Equation: KL divergence between the target sparsity _p_ and the actual sparsity _q_**
#
# $
# D_{\mathrm{KL}}(p\|q) = p \, \log \dfrac{p}{q} + (1-p) \log \dfrac{1-p}{1-q}
# $
#
# **In the text page 433:**
#
# One common variant is to train the encoder to output $\gamma = \log\left(\sigma^2\right)$ rather than $\sigma$.
# Wherever we need $\sigma$ we can just compute $ \sigma = \exp\left(\dfrac{\gamma}{2}\right) $.
#
#
#
# # Chapter 16
#
# **Equation 16-1: Bellman Optimality Equation**
#
# $
# V^*(s) = \underset{a}{\max}\sum\limits_{s'}{T(s, a, s') [R(s, a, s') + \gamma . V^*(s')]} \quad \text{for all }s
# $
#
# **Equation 16-2: Value Iteration algorithm**
#
# $
# V_{k+1}(s) \gets \underset{a}{\max}\sum\limits_{s'}{T(s, a, s') [R(s, a, s') + \gamma . V_k(s')]} \quad \text{for all }s
# $
#
#
# **Equation 16-3: Q-Value Iteration algorithm**
#
# $
# Q_{k+1}(s, a) \gets \sum\limits_{s'}{T(s, a, s') [R(s, a, s') + \gamma . \underset{a'}{\max}\,{Q_k(s',a')}]} \quad \text{for all } (s,a)
# $
#
# **In the text page 458:**
#
# Once you have the optimal Q-Values, defining the optimal policy, noted $\pi^{*}(s)$, is trivial: when the agent is in state $s$, it should choose the action with the highest Q-Value for that state: $ \pi^{*}(s) = \underset{a}{\operatorname{argmax}} \, Q^*(s, a) $.
#
#
# **Equation 16-4: TD Learning algorithm**
#
# $
# V_{k+1}(s) \gets (1-\alpha)V_k(s) + \alpha\left(r + \gamma . V_k(s')\right)
# $
#
#
# **Equation 16-5: Q-Learning algorithm**
#
# $
# Q_{k+1}(s, a) \gets (1-\alpha)Q_k(s,a) + \alpha\left(r + \gamma . \underset{a'}{\max} \, Q_k(s', a')\right)
# $
#
#
# **Equation 16-6: Q-Learning using an exploration function**
#
# $
# Q(s, a) \gets (1-\alpha)Q(s,a) + \alpha\left(r + \gamma \, \underset{a'}{\max}f(Q(s', a'), N(s', a'))\right)
# $
#
# **Equation 16-7: Target Q-Value**
#
# $
# y(s,a)=r+\gamma\,\max_{a'}\,Q_\boldsymbol\theta(s',a')
# $
# # Appendix A
#
# Equations that appear in the text:
#
# $
# \mathbf{H} =
# \begin{pmatrix}
# \mathbf{H'} & 0 & \cdots\\
# 0 & 0 & \\
# \vdots & & \ddots
# \end{pmatrix}
# $
#
#
# $
# \mathbf{A} =
# \begin{pmatrix}
# \mathbf{A'} & \mathbf{I}_m \\
# \mathbf{0} & -\mathbf{I}_m
# \end{pmatrix}
# $
#
#
# $ 1 - \frac{1}{5}^2 - \frac{4}{5}^2 $
#
#
# $ 1 - \frac{1}{2}^2 - \frac{1}{2}^2 $
#
#
# $ \frac{2}{5} \times $
#
#
# $ \frac{3}{5} \times 0 $
# # Appendix C
# Equations that appear in the text:
#
# $ (\hat{x}, \hat{y}) $
#
#
# $ \hat{\alpha} $
#
#
# $ (\hat{x}, \hat{y}, \hat{\alpha}) $
#
#
# $
# \begin{cases}
# \frac{\partial}{\partial x}g(x, y, \alpha) = 2x - 3\alpha\\
# \frac{\partial}{\partial y}g(x, y, \alpha) = 2 - 2\alpha\\
# \frac{\partial}{\partial \alpha}g(x, y, \alpha) = -3x - 2y - 1\\
# \end{cases}
# $
#
#
# $ 2\hat{x} - 3\hat{\alpha} = 2 - 2\hat{\alpha} = -3\hat{x} - 2\hat{y} - 1 = 0 $
#
#
# $ \hat{x} = \frac{3}{2} $
#
#
# $ \hat{y} = -\frac{11}{4} $
#
#
# $ \hat{\alpha} = 1 $
#
#
# **Equation C-1: Generalized Lagrangian for the hard margin problem**
#
# $
# \begin{split}
# \mathcal{L}(\mathbf{w}, b, \mathbf{\alpha}) = \frac{1}{2}\mathbf{w}^T \mathbf{w} - \sum\limits_{i=1}^{m}{\alpha^{(i)} \left(t^{(i)}(\mathbf{w}^T \mathbf{x}^{(i)} + b) - 1\right)} \\
# \text{with}\quad \alpha^{(i)} \ge 0 \quad \text{for }i = 1, 2, \dots, m
# \end{split}
# $
#
# **More equations in the text:**
#
# $ (\hat{\mathbf{w}}, \hat{b}, \hat{\mathbf{\alpha}}) $
#
#
# $ t^{(i)}(\hat{\mathbf{w}}^T \mathbf{x}^{(i)} + \hat{b}) \ge 1 \quad \text{for } i = 1, 2, \dots, m $
#
#
# $ {\hat{\alpha}}^{(i)} \ge 0 \quad \text{for } i = 1, 2, \dots, m $
#
#
# $ {\hat{\alpha}}^{(i)} = 0 $
#
#
# $ t^{(i)}((\hat{\mathbf{w}})^T \mathbf{x}^{(i)} + \hat{b}) = 1 $
#
#
# $ {\hat{\alpha}}^{(i)} = 0 $
#
#
# **Equation C-2: Partial derivatives of the generalized Lagrangian**
#
# $
# \begin{split}
# \nabla_{\mathbf{w}}\mathcal{L}(\mathbf{w}, b, \mathbf{\alpha}) = \mathbf{w} - \sum\limits_{i=1}^{m}\alpha^{(i)}t^{(i)}\mathbf{x}^{(i)}\\
# \dfrac{\partial}{\partial b}\mathcal{L}(\mathbf{w}, b, \mathbf{\alpha}) = -\sum\limits_{i=1}^{m}\alpha^{(i)}t^{(i)}
# \end{split}
# $
#
#
# **Equation C-3: Properties of the stationary points**
#
# $
# \begin{split}
# \hat{\mathbf{w}} = \sum_{i=1}^{m}{\hat{\alpha}}^{(i)}t^{(i)}\mathbf{x}^{(i)}\\
# \sum_{i=1}^{m}{\hat{\alpha}}^{(i)}t^{(i)} = 0
# \end{split}
# $
#
#
# **Equation C-4: Dual form of the SVM problem**
#
# $
# \begin{split}
# \mathcal{L}(\hat{\mathbf{w}}, \hat{b}, \mathbf{\alpha}) = \dfrac{1}{2}\sum\limits_{i=1}^{m}{
# \sum\limits_{j=1}^{m}{
# \alpha^{(i)} \alpha^{(j)} t^{(i)} t^{(j)} {\mathbf{x}^{(i)}}^T \mathbf{x}^{(j)}
# }
# } \quad - \quad \sum\limits_{i=1}^{m}{\alpha^{(i)}}\\
# \text{with}\quad \alpha^{(i)} \ge 0 \quad \text{for }i = 1, 2, \dots, m
# \end{split}
# $
#
# **Some more equations in the text:**
#
# $ \hat{\mathbf{\alpha}} $
#
#
# $ {\hat{\alpha}}^{(i)} \ge 0 $
#
#
# $ \hat{\mathbf{\alpha}} $
#
#
# $ \hat{\mathbf{w}} $
#
#
# $ \hat{b} $
#
#
# $ \hat{b} = t^{(k)} - {\hat{\mathbf{w}}}^T \mathbf{x}^{(k)} $
#
#
# **Equation C-5: Bias term estimation using the dual form**
#
# $
# \hat{b} = \dfrac{1}{n_s}\sum\limits_{\scriptstyle i=1 \atop {\scriptstyle {\hat{\alpha}}^{(i)} > 0}}^{m}{\left[t^{(i)} - {\hat{\mathbf{w}}}^T \mathbf{x}^{(i)}\right]}
# $
# # Appendix D
# **Equation D-1: Partial derivatives of $f(x,y)$**
#
# $
# \begin{split}
# \dfrac{\partial f}{\partial x} & = \dfrac{\partial(x^2y)}{\partial x} + \dfrac{\partial y}{\partial x} + \dfrac{\partial 2}{\partial x} = y \dfrac{\partial(x^2)}{\partial x} + 0 + 0 = 2xy \\
# \dfrac{\partial f}{\partial y} & = \dfrac{\partial(x^2y)}{\partial y} + \dfrac{\partial y}{\partial y} + \dfrac{\partial 2}{\partial y} = x^2 + 1 + 0 = x^2 + 1 \\
# \end{split}
# $
#
# **In the text:**
#
# $ \frac{\partial g}{\partial x} = 0 + (0 \times x + y \times 1) = y $
#
#
# $ \frac{\partial x}{\partial x} = 1 $
#
#
# $ \frac{\partial y}{\partial x} = 0 $
#
#
# $ \frac{\partial (u \times v)}{\partial x} = \frac{\partial v}{\partial x} \times u + \frac{\partial u}{\partial x} \times u $
#
#
# $ \frac{\partial g}{\partial x} = 0 + (0 \times x + y \times 1) $
#
#
# $ \frac{\partial g}{\partial x} = y $
#
#
# **Equation D-2: Derivative of a function _h_(_x_) at point _x_~0~**
#
# $
# \begin{split}
# h'(x) & = \underset{\textstyle x \to x_0}{\lim}\dfrac{h(x) - h(x_0)}{x - x_0}\\
# & = \underset{\textstyle \epsilon \to 0}{\lim}\dfrac{h(x_0 + \epsilon) - h(x_0)}{\epsilon}
# \end{split}
# $
#
#
# **Equation D-3: A few operations with dual numbers**
#
# $
# \begin{split}
# &\lambda(a + b\epsilon) = \lambda a + \lambda b \epsilon\\
# &(a + b\epsilon) + (c + d\epsilon) = (a + c) + (b + d)\epsilon \\
# &(a + b\epsilon) \times (c + d\epsilon) = ac + (ad + bc)\epsilon + (bd)\epsilon^2 = ac + (ad + bc)\epsilon\\
# \end{split}
# $
#
# **In the text:**
#
# $ \frac{\partial f}{\partial x}(3, 4) $
#
#
# $ \frac{\partial f}{\partial y}(3, 4) $
#
#
# **Equation D-4: Chain rule**
#
# $
# \dfrac{\partial f}{\partial x} = \dfrac{\partial f}{\partial n_i} \times \dfrac{\partial n_i}{\partial x}
# $
#
# **In the text:**
#
# $ \frac{\partial f}{\partial n_7} = 1 $
#
#
# $ \frac{\partial f}{\partial n_5} = \frac{\partial f}{\partial n_7} \times \frac{\partial n_7}{\partial n_5} $
#
#
# $ \frac{\partial f}{\partial n_7} = 1 $
#
#
# $ \frac{\partial n_7}{\partial n_5} $
#
#
# $ \frac{\partial n_7}{\partial n_5} = 1 $
#
#
# $ \frac{\partial f}{\partial n_5} = 1 \times 1 = 1 $
#
#
# $ \frac{\partial f}{\partial n_4} = \frac{\partial f}{\partial n_5} \times \frac{\partial n_5}{\partial n_4} $
#
#
# $ \frac{\partial n_5}{\partial n_4} = n_2 $
#
#
# $ \frac{\partial f}{\partial n_4} = 1 \times n_2 = 4 $
#
#
# $ \frac{\partial f}{\partial x} = 24 $
#
#
# $ \frac{\partial f}{\partial y} = 10 $
# # Appendix E
# **Equation E-1: Probability that the i^th^ neuron will output 1**
#
# $
# p\left(s_i^{(\text{next step})} = 1\right) \, = \, \sigma\left(\frac{\textstyle \sum\limits_{j = 1}^N{w_{i,j}s_j + b_i}}{\textstyle T}\right)
# $
#
# **In the text:**
#
# $ \dot{\mathbf{x}} $
#
#
# $ \dot{\mathbf{h}} $
#
#
# **Equation E-2: Contrastive divergence weight update**
#
# $
# w_{i,j}^{(\text{next step})} = w_{i,j} + \eta(\mathbf{x}\mathbf{h}^T - \dot{\mathbf{x}} \dot {\mathbf{h}}^T)
# $
# # Glossary
#
# In the text:
#
# $\ell _1$
#
#
# $\ell _2$
#
#
# $\ell _k$
#
#
# $ \chi^2 $
#
# Just in case your eyes hurt after all these equations, let's finish with the single most beautiful equation in the world. No, it's not $E = mc²$, it's obviously Euler's identity:
# $e^{i\pi}+1=0$
| book_equations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Lpno_zUJT8ms"
# # Cryptographically secure pseudorandom number generators for PyTorch
#
# The torchcsprng API is available in `torchcsprng` module:
#
# + id="db4YYky-PDI_"
# !pip install torchcsprng==0.2.0 torch==1.8.0 -f https://download.pytorch.org/whl/cu101/torch_stable.html
# + id="O1s_j8CPPHSn"
import torch
import torchcsprng as csprng
# + id="o1Kz25IoS9m-"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# + [markdown] id="HLlLxkDIUWCG"
# Create crypto-secure PRNG from /dev/urandom:
# + id="yyyYlq5kUQss"
urandom_gen = csprng.create_random_device_generator('/dev/urandom')
# + [markdown] id="xbUCnJfkUdUI"
# Create empty boolean tensor on the `device` and initialize it with random values from `urandom_gen`:
#
# + id="zmj_VlIzUYIO"
torch.empty(10, dtype=torch.bool, device=device).random_(generator=urandom_gen)
# + [markdown] id="ycODsYhtUud9"
# Create empty int16 tensor on the `device` and initialize it with random values in range [0, 100) from `urandom_gen`:
# + id="uel-jbW9UlZH"
torch.empty(10, dtype=torch.int16, device=device).random_(100, generator=urandom_gen)
# + [markdown] id="1jXW1FEmVMW_"
# Create non-crypto-secure MT19937 PRNG:
# + id="sL-cwFGfVOrp"
mt19937_gen = csprng.create_mt19937_generator()
torch.empty(10, dtype=torch.int64, device=device).random_(torch.iinfo(torch.int64).min, to=None, generator=mt19937_gen)
# + [markdown] id="KW96wT4UVXBm"
# Create crypto-secure PRNG from default random device:
# + id="tjwbuE6FVRgm"
default_device_gen = csprng.create_random_device_generator()
torch.randn(10, device=device, generator=default_device_gen)
# + [markdown] id="qYgdkZAYVfZT"
# Create non-crypto-secure MT19937 PRNG with seed:
# + id="xjOsYOxxVbzg"
mt19937_gen = csprng.create_mt19937_generator(42)
first = torch.empty(10, device=device).geometric_(p=0.2, generator=mt19937_gen)
# + [markdown] id="cV77v7tHVlRd"
# Recreate MT19937 PRNG with the same seed:
# + id="i0O2lC0hVjAg"
mt19937_gen = csprng.create_mt19937_generator(42)
second = torch.empty(10, device=device).geometric_(p=0.2, generator=mt19937_gen)
# + [markdown] id="OcgSK0mejcef"
# Check that `first` equals to `second`:
# + id="vMx1BRO3jh7L"
assert (first == second).all()
| examples/csprng.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import h5py
import scipy.io
np.random.seed(7) # for reproducibility
import keras.backend as K
from keras.models import Model, load_model
import matplotlib.pyplot as plt
import matplotlib
#from concise.utils.plot import seqlogo, seqlogo_fig
import sys
sys.path.append("../Scripts/")
from IntegratedGradients import *
from util_funcs import *
from plotseqlogo import seqlogo, seqlogo_fig
# -
### library to use R
# %load_ext rpy2.ipython
# %load_ext rmagic
# add the absolute path of Data Directory to path_to_data
path_to_data = "/Users/storm/git/Multi-resBind/Data/"
path_to_data_low = path_to_data + "data_RBPslow.h5"
X_test_seq_low, X_test_region_low, y_test_RBP_low, y_test_name_low, y_train_low = load_data(path_to_data_low)
concat_low= np.concatenate((X_test_seq_low, X_test_region_low[:, 50:200, :]), axis=2)
print (concat_low.shape)
#load models and obtain prediction and integrated_gradients
# add the absolute path of results Directory to path_to_model
path_to_model = "/Users/storm/git/Multi-resBind/results/multi_resBind_vs_DeepRiPe/"
path_to_model_resbind_low= path_to_model + "m_resbind_low_model.h5"
model_resbind_low=load_model(path_to_model_resbind_low, custom_objects={'precision': precision,'recall': recall })
pred_resbind_low=model_resbind_low.predict(concat_low)
#RBPnames for each model
RBPnames_low=np.array(['MBNL1', 'P53_NONO', 'PUM2', 'QKI', 'AGO3', 'FUS', 'TAF15', 'ZFP36', 'DICER1', 'EIF3A', 'EIF3D', 'EIF3G', 'SSB', 'PAPD5', 'CPSF4', 'CPSF3', 'RTCB', 'FXR1', 'NOP58', 'NOP56', 'FBL', 'LIN28A', 'LIN28B', 'UPF1', 'G35', 'G45', 'XPO5'])
#number of tasks for each model
num_task_low=len(RBPnames_low)
# +
# code for plot Fig.S4
# -
igres_resbind= integrated_gradients(model_resbind_low)
RBPnames = RBPnames_low
pred= pred_resbind_low
igres = igres_resbind
X_test_seq = X_test_seq_low
X_test_region = X_test_region_low
y_test_RBP = y_test_RBP_low
e = concat_low
RBPname = "MBNL1"
RBP_index = np.where(RBPnames == RBPname)[0][0]
#print (RBP_index)
ind = [i[0] for i in sorted(enumerate(pred[:, RBP_index]), key=lambda x: x[1],
reverse=True) if y_test_RBP[i[0], RBP_index] == 1 and pred[i[0], RBP_index] > 0.50][0:3]
ex_seq = np.array([igres.explain(e[i], outc=RBP_index, reference=False) for i in ind])
#print (ex_seq.shape)
seq_log = np.multiply(ex_seq, e[ind])
#print (seq_log.shape)
plt.close("all")
seqlogo_fig(np.transpose(seq_log[:,50:100,:4],axes=(1,2,0)), vocab="RNA", figsize=(8,3), ncol=1)
plt.show()
#plt.savefig('MBNL1_contribution_map_zero.eps', format='eps',bbox_inches='tight')
RBPname = "PUM2"
RBP_index = np.where(RBPnames == RBPname)[0][0]
#print (RBP_index)
ind = [i[0] for i in sorted(enumerate(pred[:, RBP_index]), key=lambda x: x[1],
reverse=True) if y_test_RBP[i[0], RBP_index] == 1 and pred[i[0], RBP_index] > 0.50][0:3]
ex_seq = np.array([igres.explain(e[i], outc=RBP_index, reference=False) for i in ind])
#print (ex_seq.shape)
seq_log = np.multiply(ex_seq, e[ind])
#print (seq_log.shape)
plt.close("all")
seqlogo_fig(np.transpose(seq_log[:,50:100,:4],axes=(1,2,0)), vocab="RNA", figsize=(8,3), ncol=1)
plt.show()
#plt.savefig('PUM2_contribution_map_zero.eps', format='eps',bbox_inches='tight')
RBPname = "QKI"
RBP_index = np.where(RBPnames == RBPname)[0][0]
#print (RBP_index)
ind = [i[0] for i in sorted(enumerate(pred[:, RBP_index]), key=lambda x: x[1],
reverse=True) if y_test_RBP[i[0], RBP_index] == 1 and pred[i[0], RBP_index] > 0.50][0:3]
ex_seq = np.array([igres.explain(e[i], outc=RBP_index, reference=False) for i in ind])
#print (ex_seq.shape)
seq_log = np.multiply(ex_seq, e[ind])
#print (seq_log.shape)
plt.close("all")
seqlogo_fig(np.transpose(seq_log[:,50:100,:4],axes=(1,2,0)), vocab="RNA", figsize=(8,3), ncol=1)
plt.show()
#plt.savefig('QKI_contribution_map_zero.eps', format='eps',bbox_inches='tight')
RBPname = "SSB"
RBP_index = np.where(RBPnames == RBPname)[0][0]
#print (RBP_index)
ind = [i[0] for i in sorted(enumerate(pred[:, RBP_index]), key=lambda x: x[1],
reverse=True) if y_test_RBP[i[0], RBP_index] == 1 and pred[i[0], RBP_index] > 0.50][0:3]
ex_seq = np.array([igres.explain(e[i], outc=RBP_index, reference=False) for i in ind])
#print (ex_seq.shape)
seq_log = np.multiply(ex_seq, e[ind])
#print (seq_log.shape)
plt.close("all")
seqlogo_fig(np.transpose(seq_log[:,50:100,:4],axes=(1,2,0)), vocab="RNA", figsize=(8,3), ncol=1)
plt.show()
#plt.savefig('SSB_contribution_map_zero.eps', format='eps',bbox_inches='tight')
RBPname = "NOP58"
RBP_index = np.where(RBPnames == RBPname)[0][0]
#print (RBP_index)
ind = [i[0] for i in sorted(enumerate(pred[:, RBP_index]), key=lambda x: x[1],
reverse=True) if y_test_RBP[i[0], RBP_index] == 1 and pred[i[0], RBP_index] > 0.50][0:3]
ex_seq = np.array([igres.explain(e[i], outc=RBP_index, reference=False) for i in ind])
#print (ex_seq.shape)
seq_log = np.multiply(ex_seq, e[ind])
#print (seq_log.shape)
plt.close("all")
seqlogo_fig(np.transpose(seq_log[:,50:100,:4],axes=(1,2,0)), vocab="RNA", figsize=(8,3), ncol=1)
plt.show()
#plt.savefig('NOP58_contribution_map_zero.eps', format='eps',bbox_inches='tight')
RBPname = "NOP56"
RBP_index = np.where(RBPnames == RBPname)[0][0]
#print (RBP_index)
ind = [i[0] for i in sorted(enumerate(pred[:, RBP_index]), key=lambda x: x[1],
reverse=True) if y_test_RBP[i[0], RBP_index] == 1 and pred[i[0], RBP_index] > 0.50][0:3]
ex_seq = np.array([igres.explain(e[i], outc=RBP_index, reference=False) for i in ind])
#print (ex_seq.shape)
seq_log = np.multiply(ex_seq, e[ind])
#print (seq_log.shape)
plt.close("all")
seqlogo_fig(np.transpose(seq_log[:,50:100,:4],axes=(1,2,0)), vocab="RNA", figsize=(8,3), ncol=1)
plt.show()
#plt.savefig('NOP56_contribution_map_zero.eps', format='eps',bbox_inches='tight')
| jupyter_notebooks/FigS4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Annotating Your Data
import holoviews as hv
import holoviews.util
hv.extension('bokeh')
# As introduced in the [Getting Started guide](../getting_started/1-Introduction.ipynb), HoloViews relies heavily on semantic *annotations*, i.e., metadata you declare that lets HoloViews interpret what your data represents. With these annotations, HoloViews can perform complex tasks like visualization automatically.
#
# There are three main kinds of annotation that can be associated with each element:
# 1. **Type**, used to declare the sort of data you have, which is required before it can be visualized,
# 2. **Dimensions**, used to specify the abstract space in which the data resides, allowing axis labeling and indexing, and
# 3. **Group/Label**, used to declare a meaningful category and human-readable description of the element, allowing plot labeling and selecting related sets of elements.
#
# This user guide explains each of these three types of annotation, describing why you would need or want to use them.
# ## 1. Specifying element type
#
# Basic Python data structures like dataframes, arrays, lists, and dictionaries can be used to represent an infinite variety of different types of data, and thus they cannot be visualized as any particular type of graphical representation without some additional information from the user that says what sort of data it is meant to be. The user can declare this information by selecting a suitable HoloViews element type from the many different ones available (see the [Reference Gallery](http://holoviews.org/reference/index.html)).
#
# For instance, let's say you have two lists of numbers:
xs = range(-10,11)
ys = [100-x**2 for x in xs]
# As far as Python is concerned, ``xs`` and ``ys`` are just two arbitrary lists, which could represent nearly anything imaginable. But we as humans can see that each of the ``ys`` is a value computed from one of the ``xs`` by evaluating the function $y=100-x^2$. We can convey some of that information to HoloViews by choosing a ``Curve`` element type, which is a convenient shorthand for "a discrete set of real-valued samples from a continuous function of one real-valued variable":
curve = hv.Curve((xs, ys))
curve
# As you can see, declaring the element type is the only *required* bit of annotation, instantly making your data visualizable. However, this initial visualization relies on various defaults that may not be appropriate for your data, and you can override these defaults by declaring additional annotations as described below.
# ## 2. Specifying element dimensionality
#
# Each element type can process a certain number and type of *dimensions*, i.e., ways in which the data can vary. For instance, the ``Curve`` object above has two dimensions, $x$ and $y$. If you look at how we generated the data, you can see that these two dimensions are semantically different -- we chose an arbitrary set of values for the ``xs``, and then calculated a corresponding value to make each of the ``ys``. In mathematical terms, $x$ is thus an independent variable (selected by the creator of the data), and $y$ is a dependent variable (typically measured or calculated from the independent variable(s)).
#
# HoloViews elements call these two different types of variables *key dimensions* (``kdims``) and *value dimensions* (``vdims``). The *key dimensions* are the dimensions you can index *by* to get the values corresponding to the *value* dimensions. You can learn more about indexing data in the later [Indexing and Selecting Data](./09-Indexing_and_Selecting_Data.ipynb) user guide.
#
# Different elements have different numbers of required key dimensions and value dimensions. For instance, a ``Curve`` always has one key dimension and one value dimension. As we did not explicitly specify anything regarding dimensions when declaring the curve above, the ``kdims`` and ``vidms`` use their default names 'x' and 'y':
"Object 'curve' has kdims {kdims} and vdims {vdims}".format(kdims=curve.kdims, vdims=curve.vdims)
# The easiest way to override the default dimension names is to provide strings for the dimensions, where the second argument in the Element constructor will always be the ``kdims``, and the third will always be the ``vdims``:
trajectory = hv.Curve((xs, ys), 'distance', 'height')
trajectory
"Object 'trajectory' has kdims {kdims} and vdims {vdims} ".format(kdims=trajectory.kdims, vdims=trajectory.vdims)
# We can see that the strings we provided have been 'promoted' to dimension objects. The ``kdims`` and ``vdims`` *always* contain instances of the ``Dimension`` class, described in the following section. Here, the immediate effect is to use the new names for the displayed axis labels.
# ### Dimension parameters
#
# ``Dimensions`` are not just names, they are rich objects with numerous parameters that can be used to describe the space in which the data resides. Only two of these are considered *core* parameters that uniquely identify the dimension object; the rest are auxilliary metadata. The most important parameters are:
#
# <br>
# <dl class="dl-horizontal">
# <dt>``name``</dt><dd>(core) A concise name for the dimension, which for convenient usage as a keyword argument should usually be a legal Python identifier.</dd>
# <dt>``label`` <dd>(core) A optional longer description of the dimension, which is convenient if you want the displayed label to contain arbitrary spaces, symbols, or unicode.</dd>
# <dt>``range`` <dd>The minimum and maximum allowable values for the dimension, for error checking and generating widgets when needed.</dd>
# <dt>``soft_range`` <dd>Suggested minimum and maximum values within the allowed range, used to specify a useful portion of the range for widgets and animations.</dd>
# <dt>``step`` <dd>Suggested interval for sampling a continuous range, if needed for a widget or animation.</dd>
# <dt>``unit`` <dd>The name of the unit to be associated with the dimension, if any, for labelling.</dd>
# <dt>``values`` <dd>Explicit list of allowed dimension values, for error checking, widgets, and animations.</dd>
# </dl>
#
#
# For the full list of parameters, you can call ``hv.help(hv.Dimension)``.
#
# Similar to how you can just use a string if all you want to specify is the name, you can provide a ``(name,label)`` tuple if you want to specify the ``name`` and the ``label`` to ``kdims`` and ``vdims`` without building an explicit ``Dimension``:
# +
wo_unit = hv.Curve((xs, ys),
('distance','Horizontal distance'),
('height','Height above sea level'))
distance = hv.Dimension('distance', label='Horizontal distance', unit='m')
height = hv.Dimension(('height','Height above sea level'), unit='m')
with_unit = hv.Curve((xs, ys), distance, height)
# (using + to compose elements is described in the next guide)
wo_unit + with_unit
# -
# Note that after supplying the longer labels, you can still use the short name to specify the dimension in keyword arguments. For instance, try using ``with_unit.select(distance=(5,8))`` in the cell above.
# ### Setting properties with redim
#
# Declaring dimension objects with appropriate parameters can be awkward and verbose if you only want to set a few specific parameters. You can often avoid declaring explicit dimension objects using the ``redim`` method, which returns a *clone* of the element: the same data, wrapped in a new instance of the same element type with the new dimension settings.
#
# Let's use ``redim`` to swap out the 'height' dimension for an 'altitude' dimension:
renamed_height = trajectory.redim(height='altitude')
renamed_height
# The ``redim`` "method" is actually a utility that can be used to set any of the dimension parameters, such as the label, unit, range, or values. For instance, the label can be updated on an existing object by specifying the dimension name and then the new value for that parameter:
renamed_height.redim.label(altitude='Altitude above sea-level', distance='Horizontal distance')
# ## 3. Organizing your elements with groups and labels
#
# A complex visualization you build with HoloViews may include many instances of the same element type, each built from different bits of data and potentially representing categorically distinct types of information to you. To help you keep track of these distinctions when you need to, HoloViews provides a ``group`` parameter you can use to declare semantically distinct categories for elements, and a ``label`` parameter you can use to identify which specific item the element represents within that category:
low_ys = [25-(0.5*el)**2 for el in xs]
hv.Curve((xs, low_ys), group='Trajectory', label='Shallow') + \
hv.Curve((xs, ys), group='Trajectory', label='Medium')
# As you can see, the ``group`` and ``label`` information will be used to generate sensible titles, here indicating that both sets of data represent trajectories, and that there are two different specific trajectories being shown. Once the group and/or label have been specified, they can be used for [Customizing Plots](./03-Customizing_Plots.ipynb) (e.g. to make all trajectories have the same line width and style, or to customize one particular plot out of many of the same type). The group and label are also used for indexing, as we will see in the following [Composing_Elements](./02-Composing_Elements.ipynb) guide.
| examples/user_guide/01-Annotating_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
# # Importing Library
import matplotlib.pyplot as plt
import seaborn as sns
# # Visualize the Data
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
# Load Data
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv')
test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv')
# -
#top five row of data
train.head()
test.head()
#dimension of train data
train.shape
#dimension of test data
test.shape
# As you can see dimension of test and train data is different, you can not merge the data, data exploration should be done indivisually
train.info()
# # Analyse the target variable (Univariate Analysis)
sns.distplot(train['SalePrice']);
# From the above you can see that Sales Price has Positive Skewness
#skewness
print("Skewness: %f" % train['SalePrice'].skew())
#To remove the skewness we use the log function
SalePriceLog = np.log(train['SalePrice'])
SalePriceLog.skew()
#Plot after adjusted skewness
sns.distplot(SalePriceLog);
SalePrice = SalePriceLog
# # Bivariate Analysis
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(12,9))
sns.heatmap(corrmat, vmax=0.9, square=True)
#Correlation map to see how features are correlated with SalePrice
corrmat = train.corr()
plt.subplots(figsize=(50,20))
sns.heatmap(corrmat, vmax=0.9, square=True, annot=True)
# +
Num=corrmat['SalePrice'].sort_values(ascending=False).head(10).to_frame()
Num
# -
# # Missing Variable Treatment
#missing data
total = train.isnull().sum().sort_values(ascending=False)
missing_data = pd.concat([total], axis=1, keys=['Total', 'Percent'])
missing_data.head(20)
#missing data
total = test.isnull().sum().sort_values(ascending=False)
missing_data = pd.concat([total], axis=1, keys=['Total', 'Percent'])
missing_data.head(20)
#visulize missing value using sns plot
f, ax = plt.subplots(figsize=(15, 12))
plt.xticks(rotation='90')
sns.barplot(x=total.index, y=total)
plt.xlabel('Features', fontsize=15)
plt.ylabel('Percent of missing values', fontsize=15)
plt.title('Percent missing data by feature', fontsize=15)
# More than 50% of data are missing for PoolQC, MiscFeature, Alley, Fence. So, we can drop the dataframe.
# Few data frame is uncorrelated, so we can drop them too.
#
train.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence','FireplaceQu','GarageType','GarageFinish','GarageQual','GarageCond','MasVnrType'], axis=1 ,inplace=True)
test.drop(['PoolQC', 'MiscFeature', 'Alley', 'Fence','FireplaceQu','GarageType','GarageFinish','GarageQual','GarageCond','MasVnrType'], axis=1 ,inplace=True)
# Replacing other missing data frame into median for continous variable and mode for categorical variable
# missing value treatment for continuous variable
for col in ('LotFrontage','GarageYrBlt','GarageCars','BsmtFinSF1','TotalBsmtSF','GarageArea','BsmtFinSF2','BsmtUnfSF','LotFrontage','GarageYrBlt','BsmtFullBath','BsmtHalfBath'):
train[col]=train[col].fillna(train[col].mean())
test[col]=test[col].fillna(test[col].mean())
# missing value treatment for categorical variable
for col in ('BsmtQual','BsmtCond','BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'MasVnrArea', 'Electrical','Exterior2nd','Exterior1st','KitchenQual','Functional','SaleType','Utilities','MSZoning','BsmtQual','BsmtCond','BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'MasVnrArea', 'Electrical'):
test[col]=test[col].fillna(test[col].mode()[0])
train[col]=train[col].fillna(train[col].mode()[0])
# checking if is there any missing variable left
train.isnull().sum().max()
# checking if is there any missing variable left
test.isnull().sum().max()
# # Outlier
# +
list_of_numerics=train.select_dtypes(include=['float','int']).columns
types= train.dtypes
outliers= train.apply(lambda x: sum(
(x<(x.quantile(0.25)-1.5*(x.quantile(0.75)-x.quantile(0.25))))|
(x>(x.quantile(0.75)+1.5*(x.quantile(0.75)-x.quantile(0.25))))
if x.name in list_of_numerics else ''))
explo = pd.DataFrame({'Types': types,
'Outliers': outliers}).sort_values(by=['Types'],ascending=False)
explo.transpose()
# +
fig, axes = plt.subplots(1,2, figsize=(12,5))
ax1= sns.scatterplot(x='GrLivArea', y='SalePrice', data= train,ax=axes[0])
ax2= sns.boxplot(x='GrLivArea', data= train,ax=axes[1])
# -
#removing outliers recomended by author
train= train[train['GrLivArea']<4000]
#test= test[test['GrLivArea']<4000]
# # Variable Transformation
# As you might know by now, we can’t have text in our data if we’re going to run any kind of model on it. So before we can run a model, we need to make this data ready for the model.
#
# Numerical variable which are actually categorical
# +
train['MSSubClass'] = train['MSSubClass'].apply(str)
train['YrSold'] = train['YrSold'].astype(str)
test['MSSubClass'] = test['MSSubClass'].apply(str)
test['YrSold'] = test['YrSold'].astype(str)
# -
# Extracting the categorical column from train and test data.
categorial_features_train = train.select_dtypes(include=[np.object])
categorial_features_train.head(2)
categorial_features_test = test.select_dtypes(include=[np.object])
categorial_features_test.head(2)
# Label Encoding
# +
##Label Encoding
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
label_encoders = {}
for column in categorial_features_train:
label_encoders[column] = LabelEncoder()
train[column] = label_encoders[column].fit_transform(train[column])
# +
##Label Encoding
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
label_encoders = {}
for column in categorial_features_test:
label_encoders[column] = LabelEncoder()
test[column] = label_encoders[column].fit_transform(test[column])
# -
# dividing into dependent and independent variable data set
xtrain = train.drop('SalePrice', axis = 1)
ytrain = train['SalePrice']
# # Modelling
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
import xgboost as xgb
# 1. **Linear Regression**
model1 = LinearRegression()
model1.fit(xtrain, ytrain)
# score the model
model1.score(xtrain,ytrain)
# 2. **Gradient Boost**
# Hyper Tunnning
model2 = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5)
model2.fit(xtrain,ytrain)
model2.score(xtrain,ytrain)
# 3. **XGBoost**
# Hyper Tunning
model3 = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,
learning_rate=0.05, max_depth=3,
min_child_weight=1.7817, n_estimators=2200,
reg_alpha=0.4640, reg_lambda=0.8571,
subsample=0.5213, silent=1,
random_state =7, nthread = -1)
model3.fit(xtrain, ytrain)
model3.score(xtrain, ytrain)
# Prediction on test data of different model
pred_1=model1.predict(test)
pred_2=model2.predict(test)
pred_3=model3.predict(test)
# we take an average of predictions from all the models and use it to make the final prediction
final_pred = (pred_1+pred_2+pred_3)/3
final_pred
# Creating Submission file
sample_sub = pd.read_csv('../input/house-prices-advanced-regression-techniques/sample_submission.csv')
sample_sub.head()
sample_sub['SalePrice'] = final_pred
sample_sub.to_csv('final_submission1.csv', index=False)
# # *If you like my kernel please upvote :) *
| dataset_0/notebook/house-price-prediction-for-beginners.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Taxicab (tramcar) Problem --- Using PyMC3
#
# (From Kevin Murphy: Machine Learning: A Probabalistic Perspective; Chapter 3: Question: 3.10)
#
# By <NAME>
#
# Using example code from:
# ### Solving the Bayesian German Tank problem with PyMC and PyStan19 Dec 2015
# Source: http://isaacslavitt.com/2015/12/19/german-tank-problem-with-pymc-and-pystan/
# PG: Using Anaconda Python 3.5 on Ubuntu 16.04
# Solving it:
# The Wikipedia article explains both the frequentist and Bayesian approaches, and the example in the Sampyl docs does a good job of explaining the setup for the Bayesian version. Here's the gist: we want to know the posterior probability of the number of tanks
# N
# , given data
# D
# on which tank serial numbers have been observed so far:
#
# P
# (
# N
# ∣
# D
# )
# ∝
# P
# (
# D
# ∣
# N
# )
# P
# (
# N
# )
# The right-hand side breaks down into two parts. The likelihood of observing all the serial numbers we saw is the product of the individual probabilities
# D
# i
# given the actual number of tanks in existence
# N
# :
#
# P
# (
# D
# ∣
# N
# )
# =
# ∏
# i
#
# P
# (
# D
# i
# ∣
# N
# )
# P
# (
# D
# i
# ∣
# N
# )
# ∼
# D
# i
# s
# c
# r
# e
# t
# e
# U
# n
# i
# f
# o
# r
# m
# (
# D
# ,
# m
# i
# n
# =
# 0
# ,
# m
# a
# x
# =
# N
# )
# The prior over
# N
# , which we know must be at least as high as the highest serial number we observed (call that
# m
# ) but could be much higher:
#
# P
# (
# N
# )
# ∼
# D
# i
# s
# c
# r
# e
# t
# e
# U
# n
# i
# f
# o
# r
# m
# (
# N
# ,
# m
# i
# n
# =
# m
# ,
# m
# a
# x
# =
# s
# o
# m
# e
# b
# i
# g
# n
# u
# m
# b
# e
# r
# )
# The MCMC strategy will be to try a bunch of different values for
# N
# , see how likely they are (compared to one another), and assemble the results of the sampling into a distribution over
# N
# . The solution implementation with Sampyl was interesting, but the project is still early so I wanted to give it a shot using two other popular MCMC libraries.
# ---
# PG: First repeat his work:
# +
import numpy as np
import pymc3 as pm
# D: the data
y = np.array([10, 256, 202, 97])
model = pm.Model()
with model:
# prior - P(N): N ~ uniform(max(y), 10000)
# note: we use a large-ish number for the upper bound
N = pm.DiscreteUniform("N", lower=y.max(), upper=10000)
# likelihood - P(D|N): y ~ uniform(0, N)
y_obs = pm.DiscreteUniform("y_obs", lower=0, upper=N, observed=y)
# choose the sampling method - we have to use Metropolis-Hastings because
# the variables are discrete
step = pm.Metropolis()
# we'll use four chains, and parallelize to four cores
start = {"N": y.max()} # the highest number is a reasonable starting point
trace = pm.sample(100000, step, start, chain=4, njobs=4)
# -
# PG: I think the warning above is a concern statistically. For further review.
# -----
# PG: You might run into some Python packaging issues.
# (I am using Python3 in Anaconda, on Ubuntu 16.04)
#
# Conda update conda seems to have solved them for me.
# But here are some other resources in case you need them:
#
# https://github.com/Theano/Theano/issues/6568
# -----
# summarize the trace
pm.summary(trace)
# His output also had:
# + active=""
# Posterior quantiles:
# 2.5 25 50 75 97.5
# |--------------|==============|==============|--------------|
#
# 258.000 281.000 322.000 406.000 862.000
# -
# I did not get the same output that he did here: http://isaacslavitt.com/2015/12/19/german-tank-problem-with-pymc-and-pystan/
# (That might be due to differences in python/Anaconda/PYMC3 between what he used and what I am using)
# So I tried to find the information this way:
pm.quantiles(trace)
# I guess this is similar; but perhaps I am getting 4 different outputs from the 4 jobs specified.
# My median is 321-323.
# (His output might average them.)
# But
# 873
# 913
# 816
# 837
# averages to :
# 859.75, not 862
#
# (maybe his average is weighted?)
# ##### Plot with a burn in--- throwing away the first 10,000 samples
# Why do we do a burn in?
#
# "You create the parameter trace plots to make sure that your a priori distribution is well calibrated which is indicated by your parameters having sufficient state changes as the MCMC algorithm runs..."
#
# Source: https://stats.stackexchange.com/questions/120936/why-we-need-trace-plot-for-mcmc-results
# +
# %matplotlib inline
from matplotlib import pyplot as plt
# plot the trace
burn_in = 10000 # throw away the first 10,000 samples
pm.traceplot(trace[burn_in:])
plt.show()
# -
# So the mean here was 384, and the median was 322.
# ## PG: Now let's try to apply the above advice to Exercise 3.10 Taxicab (tramcar) Problem
# Exercise 3.10 Taxicab (tramcar) problem
# Suppose you arrive in a new city and see a taxi numbered 100. How many taxis are there in this city? Let
# us assume taxis are numbered sequentially as integers starting from 0, up to some unknown upper bound
# θ. (We number taxis from 0 for simplicity; we can also count from 1 without changing the analysis.) Hence
# the likelihood function is
# p(x) = U(0, θ)
# , the uniform distribution. The goal is to estimate θ. We will use
# the Bayesian analysis from Exercise 3.9.
#
# a. Suppose we see one taxi numbered 100, so D = {100}, m = 100, N = 1. Using an (improper)
# non-informative prior on θ of the form p(θ) = P a(θ|0, 0) ∝ 1/θ, what is the posterior p(θ|D)?
# b. Compute the posterior mean, mode and median number of taxis in the city, if such quantities exist.
# c. Rather than trying to compute a point estimate of the number of taxis, we can compute the predictive
# density over the next taxicab number using
# p(D
# |D, α) =
# p(D
# |θ)p(θ|D, α)dθ = p(D
# |β) (3.96)
# where α = (b, K)
# are the hyper-parameters,
# β = (c, N + K)
# are the updated hyper-parameters.
# Now
# consider the case
# D = {m}, and D = {x}.
# Using Equation 3.95, write down an expression for
# p(x|D, α) (3.97)
#
# As above, use a non-informative prior b = K = 0.
#
# d. Use the predictive density formula to compute the probability that the next taxi you will see (say,
# the next day) has number 100, 50 or 150, i.e., compute p(x = 100|D, α), p(x = 50|D, α), p(x =
# 150|D, α).
#
# e. Briefly describe (1-2 sentences) some ways we might make the model more accurate at prediction.
# ##### a. what is the posterior p(θ|D)?
# We are just seeing the number 100.
# ##### b. Compute the posterior mean, mode and median number of taxis in the city, if such quantities exist.
# +
# D: the data
y = np.array([100])
model = pm.Model()
with model:
# prior - P(N): N ~ uniform(max(y), 10000)
# note: we use a large-ish number for the upper bound
N = pm.DiscreteUniform("N", lower=y.max(), upper=10000)
# likelihood - P(D|N): y ~ uniform(0, N)
y_obs = pm.DiscreteUniform("y_obs", lower=0, upper=N, observed=y)
# choose the sampling method - we have to use Metropolis-Hastings because
# the variables are discrete
step = pm.Metropolis()
# we'll use four chains, and parallelize to four cores
start = {"N": y.max()} # the highest number is a reasonable starting point
trace = pm.sample(100000, step, start, chain=4, njobs=4)
# -
# There is some information on chaings versus job here:
# fonnesbeck (PYMC orginator)
# PyMC_devs
# Nov '17
# I’ve submitted a PR to improve the docstring for chains. It will select the higher of njobs or 2. Most of the time you will want to sample in parallel to accomodate Gelman-Rubin diagnostic calculation. So, when you set njobs to 1 there will still be 2 chains sampled, it will just occur in serial (unless you set chains to 1 as well).
#
# Note, however, when you ask for 1000 samples (by setting iterations=1000, you will get 1000 samples, it will just be broken out over however many chains are specified.
#
# https://discourse.pymc.io/t/specifying-the-number-of-chains-chains-vs-njobs/595/7
# summarize the trace
pm.summary(trace)
# The mean is 2,191 with an upper bound of 10,000
# With this largare upper bound of 10,000, versus the 1,000 used by <NAME>, we have a much higher estimated mean.
pm.quantiles(trace)
# Median ranges from 962 to 1054
# The mode should be 100, since there is only one observation.
# ---
# ##### Let's try it with 1000, as <NAME> did:
# +
# D: the data
y = np.array([100])
model = pm.Model()
with model:
# prior - P(N): N ~ uniform(max(y), 1000)
# note: we use a large-ish number for the upper bound
N = pm.DiscreteUniform("N", lower=y.max(), upper=1000)
# likelihood - P(D|N): y ~ uniform(0, N)
y_obs = pm.DiscreteUniform("y_obs", lower=0, upper=N, observed=y)
# choose the sampling method - we have to use Metropolis-Hastings because
# the variables are discrete
step = pm.Metropolis()
# we'll use four chains, and parallelize to four cores
start = {"N": y.max()} # the highest number is a reasonable starting point
trace = pm.sample(100000, step, start, chain=4, njobs=4)
# -
# PG: Im not sure if I should be changing these specifications with only 1 sample.
# trace = pm.sample(100000, step, start, chain=4, njobs=4)
# In particular, can I reduce this from 100,000 samples to make it run faster and still get a good result?
# Need to do further reading on PYMC3, etc.
# summarize the trace
pm.summary(trace)
# The mean is 389, which is in the same ballpark; but higher than the 333, using Allen Downey's code.
pm.quantiles(trace)
# The median is 311 to 315
# Again, the mode should 100, for the one sample.
# ##### c. Rather than trying to compute a point estimate of the number of taxis, we can compute the predictive density over the next taxicab number using
# p(D |D, α) = p(D |θ)p(θ|D, α)dθ = p(D |β) (3.96)
# where α = (b, K)
# are the hyper-parameters,
# β = (c, N + K) are the updated hyper-parameters.
#
# Now consider the case
# D = {m}, and D = {x}.
# Using Equation 3.95, write down an expression for
# p(x|D, α) (3.97)
#
# As above, use a non-informative prior b = K = 0.
# Start with this...
# plot the trace
burn_in = 10000 # throw away the first 10,000 samples
pm.traceplot(trace[burn_in:])
plt.show()
# Then...?
# ##### d. Use the predictive density formula to compute the probability that the next taxi you will see (say, the next day) has number 100, 50 or 150, i.e., compute
# p(x = 100|D, α), p(x = 50|D, α), p(x = 150|D, α).
# ##### e. Briefly describe (1-2 sentences) some ways we might make the model more accurate at prediction.
| Taxicab (tramcar) Problem (from K. Murphy ML Book) Using PyMC3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Fetch the necessary data
# +
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = "datasets/housing"
HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + "/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
# -
fetch_housing_data()
# +
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
# -
housing = load_housing_data()
housing.head()
housing.info()
housing["ocean_proximity"].value_counts()
housing.describe()
# %matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
plt.show()
# +
import numpy as np
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
# -
train_set, test_set = split_train_test(housing, 0.2) #page 49
print(len(train_set), "train +", len(test_set), "test")
# +
import hashlib
def test_set_check(identifier, test_ratio, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
def split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash))
return data.loc[~in_test_set], data.loc[in_test_set]
# -
housing_with_id = housing.reset_index() # adds an `index` column
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
train_set.head()
housing_with_id["id"] = housing["longitude"] * 1000 + housing["latitude"]
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")
train_set.head()
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
train_set.head()
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5) # pg 52
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
| GetTheData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Alabama updates daily at 4:30pm CDT
from selenium import webdriver
import time
import pandas as pd
import pendulum
import re
import yaml
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
#chrome_options.add_argument("--disable-extensions")
#chrome_options.add_argument("--disable-gpu")
#chrome_options.add_argument("--no-sandbox) # linux only
chrome_options.add_argument("--start-maximized")
# chrome_options.add_argument("--headless")
chrome_options.add_argument("user-agent=[Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:73.0) Gecko/20100101 Firefox/73.0]")
with open('config.yaml', 'r') as f:
config = yaml.safe_load(f.read())
state = 'AL'
scrape_timestamp = pendulum.now().strftime('%Y%m%d%H%M%S')
url = 'https://www.alabamapublichealth.gov/infectiousdiseases/2019-coronavirus.html'
def fetch():
driver = webdriver.Chrome('../20190611 - Parts recommendation/chromedriver', options=chrome_options)
driver.get(url)
time.sleep(5)
datatbl = driver.find_element_by_css_selector('table')
data = []
for row in datatbl.find_elements_by_css_selector('tr'):
data.append([cell.text for cell in row.find_elements_by_css_selector('td')])
page_source = driver.page_source
driver.close()
return pd.DataFrame(data, columns=['county','positive_cases']), page_source
def save(df, source):
df.to_csv(f"{config['data_folder']}/{state}_county_{scrape_timestamp}.txt", sep='|', index=False)
with open(f"{config['data_source_backup_folder']}/{state}_county_{scrape_timestamp}.html", 'w') as f:
f.write(source)
def run():
df, source = fetch()
save(df, source)
| AL by county.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import csv
si_list = []
with open('dictionary.csv') as f:
reader = csv.reader(f)
for row in reader:
if row[0][0] == "し":
si_list.append(row[1][0])
# -
len(si_list)
si_list = list(set(si_list))
# +
text = "鹿児島県志布志市"
res = ""
for item in list(text):
if item in si_list:
res +="ピッピ"
else:
res += item
print(res)
# +
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="https://hms.harvard.edu/themes/harvardmedical/logo.svg" width= "250px">
#
# ---
# # <img src="https://hail.is/docs/devel/hail-logo-cropped.png" width= "50px"> **Workshop**
# This notebook is designed to provide a broad overview of Hail's functionality, with emphasis on the functionality to manipulate and query a genetic dataset. Please refer to <https://hail.is/docs/0.2/index.html> for additional information. This sample notebook was generated based on the following: <https://hail.is/docs/0.2/tutorials/01-genome-wide-association-study.html>. Note the additional functionality of library `plotting.py`, also part of the development tools from the Hail team. Additional information on `Jupyter Lab`: <https://jupyterlab.readthedocs.io/en/stable/getting_started/overview.html>
# # **Module 1**
#
# ## Introduction to `Hail`
# Load HAIL and packages
import hail as hl
import hail.expr.aggregators as agg
hl.init()
from pprint import pprint
from bokeh.io import output_notebook, show
from bokeh.layouts import gridplot
from bokeh.models import Span
from bokeh.plotting import figure, show, output_file
import pandas as pd
import os , sys, time
import numpy as np
output_notebook()
# To learn more about bokeh, look at https://bokeh.pydata.org/en/latest/
local_path=os.getcwd()
sys.path.append(local_path)
import plotting
# ---
# Load data from the 1K-Genome project
#
hl.utils.get_1kg('data/')
# Read genetic data into a matrix table.
mt = hl.read_matrix_table('data/1kg.mt/')
# Hail has its own internal data representation, called a [MatrixTable](https://hail.is/docs/0.2/tutorials/09-matrixtable.html)
#
# Dividing mt into partitions. See additional details: <https://hail.is/docs/0.2/hail.MatrixTable.html?highlight=partition#hail.MatrixTable.repartition>
CPU = 4
nodes = 1 # 1 node = 1 machine
mt = mt.repartition( 4 * CPU * nodes)
type(mt)
# The `MatrixTable.describe()` method prints all fields in the table and their types, as well as the keys.
mt.describe()
list(mt.row)
print('Samples: %d Variants: %d' % (mt.count_cols(), mt.count_rows()))
# To know exactly the number of variants per chromosome and the nature of our SNPs, we can use `summarize_variants()`.
hl.summarize_variants(mt)
mt.qual.show()
# The [rows](https://hail.is/docs/devel/hail.MatrixTable.html#hail.MatrixTable.rows) method can be used to get a table with all the row fields in our MatrixTable.
# You can use the `show` method to display the variants.
mt.AD.show()
# To look at the first few genotype calls, we can use [entries](https://hail.is/docs/devel/hail.MatrixTable.html#hail.MatrixTable.entries) along with `select` and `take`. The `take` method collects the first n rows into a list. Alternatively, we can use the `show` method, which prints the first n rows to the console in a table format.
#
# Try changing `take` to `show` in the cell below.
mt.entry.show(5)
mt.aggregate_rows(hl.agg.count_where(mt.alleles==['A','T']))
snp_counts = mt.aggregate_rows(
hl.array(hl.agg.counter(mt.alleles)))
snp_counts
type(snp_counts)
sorted(snp_counts, key=lambda x: x[1])
mt.aggregate_entries(hl.agg.stats(mt.GQ))
mt.aggregate_entries(
hl.agg.filter(mt.GT.is_hom_ref(),hl.agg.stats(mt.GQ)))
# +
# hl.agg.stats?
# -
mt.aggregate_entries(
hl.agg.filter(~mt.GT.is_hom_ref(),hl.agg.stats(mt.GQ)))
mt.aggregate_entries(
hl.agg.filter(mt.GT.is_het(),hl.agg.stats(mt.GQ)))
p=hl.plot.histogram(mt.GQ, bins=100)
show(p)
p=hl.plot.histogram(mt.filter_entries(mt.GT.is_hom_ref()).GQ, bins=100)
show(p)
p=hl.plot.histogram(
mt.filter_entries(mt.GT.is_het_ref()).GQ,
bins=100)
show(p)
p=hl.plot.histogram(
mt.filter_entries((mt.DP == 10 ) & mt.GT.is_het_ref()).GQ,
bins=100)
show(p)
# # **Module 2**
#
# ## GWAS in 5 steps
# Load phenotypic data as table
table = (hl.import_table('data/1kg_annotations.txt', impute=True)
.key_by('Sample'))
# Annotations are important in any genetic study. Column fields are where you will store information about sample like phenotypes, ancestry, sex, and covariates. Let's annotate the columns in our MatrixTable.
# Show the first 10 rows of the table
table.show(10)
# Notice that the show command only works this way in tables. In matrix tables it is necessary to specify which of the 3 tables we want to show: rows, columns or entries:
#
# `table.show()` --> Table
#
# `mt.row.alles.show()` --> Matrix Table
# # This is a magic function from Python. Not very common, but one can preview local data using a shell command
# %%sh
head plotting.py
# We use the `annotate_cols` method to join the table with the MatrixTable containing our dataset.
mt = mt.annotate_cols(pheno = table[mt.s])
# The information from the table is added to the column field of the matrixtable under "pheno".
# ### 1. QC:
mt = hl.variant_qc(mt)
mt.row.describe()
# The hardy-weinberg equilibrium (HWE) states that the allele frequency should remain unchanged within a population.
# Outliers from hwe are identified by a p-value larger than 1e-6.
mt = mt.filter_rows(mt.variant_qc.p_value_hwe > 1e-6)
mt = hl.sample_qc(mt)
mt.col.describe()
# Control for allele depth (dp_stats) and missingness (call_rate).
mt = mt.filter_cols((mt.sample_qc.dp_stats.mean >= 4) & (mt.sample_qc.call_rate >= 0.97))
# Check whether the labels homozygous to reference(hom_ref), heterozygous (het), or homozygous variants (hom_var) are indeed correct.
# Calculate number of alternates :
ab = mt.AD[1] / hl.sum(mt.AD)
# +
filter_condition_ab = ((mt.GT.is_hom_ref() & (ab <= 0.1)) |
(mt.GT.is_het() & (ab >= 0.25) & (ab <= 0.75)) |
(mt.GT.is_hom_var() & (ab >= 0.9)))
mt = mt.filter_entries(filter_condition_ab)
# -
# For each of the statistics you can filter for outliers. In this example number of singletons (variants that occur once in the dataset).
stats_singleton = mt.aggregate_cols(hl.agg.stats(mt.sample_qc.n_singleton))
mt = mt.filter_cols(mt.sample_qc.n_singleton < (stats_singleton.mean + (3 * stats_singleton.stdev)))
mt = mt.filter_cols(mt.sample_qc.n_singleton > (stats_singleton.mean - (3 * stats_singleton.stdev)))
# ### 2. Population stratification by genetic ancestry
# The primary confounder of single‐nucleotide poly- morphism (SNP) to phenotype associations is genetic ancestry. To control for this, we estimate the principal components (PCs) that summarize genetic ancestry to include as covariates in all analyses.
# Filter for common variants to preserve power in the principal component analysis.
mt_common = mt.filter_rows(mt.variant_qc.AF[1] > 0.05)
# The `pca` method produces eigenvalues as a list and sample PCs as a Table, and can also produce variant loadings when asked. The `hwe_normalized_pca` method does the same, using HWE-normalized genotypes for the PCA.
eigenvalues, scores, loadings = hl.hwe_normalized_pca(mt_common.GT, k = 10, compute_loadings = True)
pprint(eigenvalues)
scores.show(5, width = 100)
# Project the scores from the common variants onto the rare variants. The scores will be used to correct for the population stratification in the following analyses.
mt = mt.annotate_cols(scores = scores[mt.s].scores)
mt.scores.dtype
# Plot the first two PCs
# After plotting the PCA, try to click on the population labels on the left. The plot is interactive, this is done through the `plotting.py` library.
# +
pca = plotting.scatter_plot(mt.scores[0], mt.scores[1],
label_fields={
'Population': mt.pheno.SuperPopulation,
'Caffeine': mt.pheno.CaffeineConsumption},
title='PCA, first two principal components',
xlabel='PC1', ylabel='PC2')
show(pca)
# -
# ### 3. Linear regression
# Perform linear regression on caffeine consumption and the variants (that are not equal to reference, thus alternates) with covariates:
# - 1.0 is input variable number of alternate alleles, with input variable the genotype dosage derived from the PL field.
# - Gender
# - Population stratification (population structure) with 10 PCs for genetic ancestry.
gwas = hl.linear_regression_rows(
y = mt.pheno.CaffeineConsumption,
x = mt.GT.n_alt_alleles(),
covariates = [1, mt.pheno.isFemale,
mt.scores[0], mt.scores[1], mt.scores[2],
mt.scores[3], mt.scores[4], mt.scores[5],
mt.scores[6], mt.scores[7], mt.scores[8],
mt.scores[9]])
# Idenitify your top hits:
gwas_ordered = gwas.order_by(gwas.p_value)
gwas_ordered.show(10)
# ### 4. Visualization
# Quantile-quantile plot:
#
# Observed against expected p-value to assess inflation. A successful correction for population stratification should bring the observed p-values closer to expected p-values, visualized as a diagonal line.
qqplot = hl.plot.qq(gwas.p_value)
show(qqplot)
# Manhattan Plot
manh = hl.plot.manhattan(gwas.p_value,
title = "Manhattan Plot",
size = 4)
show(manh)
# ### 5. Multiple testing correction (Bonferroni)
# Calculate the Bonferroni corrected P-value cut off.
signlevel = 0.05
N = mt.count_rows()
Bonferroni_line = -np.log10(signlevel / N)
line = Span(location = Bonferroni_line,
dimension = "width",
line_color = "red",
line_width = 1)
manh.renderers.extend([line])
show(manh)
# ---
# # **Module 3**
#
# ## Variant discovery
# The `aggregate` method can be used to aggregate over rows of the table.
# `counter` is an aggregation function that counts the number of occurrences of each unique element.
pprint(mt.aggregate_cols(hl.agg.counter(mt.pheno.SuperPopulation)))
mt.aggregate_cols(hl.agg.count_where(hl.is_missing(mt.pheno)))
# `stats` is an aggregation function that produces some useful statistics about numeric collections.
# Extract entries table
entries = mt.entries()
# Group by supper population and chromosome, then count heteregeneous variants
results = (entries.group_by(pop = entries.pheno.SuperPopulation, chromosome = entries.locus.contig)
.aggregate(n_het = hl.agg.count_where(entries.GT.is_het())))
results.show(40)
# ### Rare variants
# Compute minor allele frequency and generate an annotation column for rare, low frequency and common variants
entries = entries.annotate(maf = hl.cond(entries.info.AF[0]<0.01, "<1%",
hl.cond(entries.info.AF[0]<0.05, "1%-5%", ">5%")))
# Group by minor allele frequency and hair color
results2 = (entries.group_by(af_bin = entries.maf, purple_hair = entries.pheno.PurpleHair)
.aggregate(mean_gq = hl.agg.stats(entries.GQ).mean,
mean_dp = hl.agg.stats(entries.DP).mean))
results2.show()
# Filter rare variants only
rare_vars = entries.filter(entries.maf=="<1%")
rare_vars.count()
# why this instruction works
rare_vars.aggregate(hl.agg.stats(rare_vars.DP))
# but this one does not work
rare_vars.aggregate(hl.agg.stats(rare_vars.s))
# answer below
rare_count_per_sample = rare_vars.aggregate((hl.agg.counter(rare_vars.s)))
rare_count_per_sample
count_per_sample = entries.aggregate((hl.agg.counter(entries.s)))
print(type(count_per_sample))
print(str(len(count_per_sample)) + " samples")
count_per_sample
| notebook/GWAS_tutorial_with_HMS_additions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../Pierian-Data-Logo.PNG">
# <br>
# <strong><center>Copyright 2019. Created by <NAME>.</center></strong>
# # PyTorch Basics Exercises - SOLUTIONS
# For these exercises we'll create a tensor and perform several operations on it.
#
# <div class="alert alert-danger" style="margin: 10px"><strong>IMPORTANT NOTE!</strong> Make sure you don't run the cells directly above the example output shown, <br>otherwise you will end up writing over the example output!</div>
# ### 1. Perform standard imports
# Import torch and NumPy
# CODE HERE
import torch
import numpy as np
# ### 2. Set the random seed for NumPy and PyTorch both to "42"
# This allows us to share the same "random" results.
# CODE HERE
np.random.seed(42)
torch.manual_seed(42); # the semicolon suppresses the jupyter output line
# ### 3. Create a NumPy array called "arr" that contains 6 random integers between 0 (inclusive) and 5 (exclusive)
# +
# CODE HERE
# -
# DON'T WRITE HERE
arr = np.random.randint(0,5,6)
print(arr)
# ### 4. Create a tensor "x" from the array above
# +
# CODE HERE
# -
# DON'T WRITE HERE
x = torch.from_numpy(arr)
print(x)
# ### 5. Change the dtype of x from 'int32' to 'int64'
# Note: 'int64' is also called 'LongTensor'
# +
# CODE HERE
# -
# DON'T WRITE HERE
x = x.type(torch.int64)
# x = x.type(torch.LongTensor)
print(x.type())
# ### 6. Reshape x into a 3x2 tensor
# There are several ways to do this.
# +
# CODE HERE
# -
# DON'T WRITE HERE
x = x.view(3,2)
# x = x.reshape(3,2)
# x.resize_(3,2)
print(x)
# ### 7. Return the right-hand column of tensor x
# +
# CODE HERE
# -
# DON'T WRITE HERE
print(x[:,1:])
# print(x[:,1])
# ### 8. Without changing x, return a tensor of square values of x
# There are several ways to do this.
# +
# CODE HERE
# -
# DON'T WRITE HERE
print(x*x)
# print(x**2)
# print(x.mul(x))
# print(x.pow(2))
# print(torch.mul(x,x))
# ### 9. Create a tensor "y" with the same number of elements as x, that can be matrix-multiplied with x
# Use PyTorch directly (not NumPy) to create a tensor of random integers between 0 (inclusive) and 5 (exclusive).<br>
# Think about what shape it should have to permit matrix multiplication.
# +
# CODE HERE
# -
# DON'T WRITE HERE
y = torch.randint(0,5,(2,3))
print(y)
# ### 10. Find the matrix product of x and y
# +
# CODE HERE
# -
# DON'T WRITE HERE
print(x.mm(y))
# ## Great job!
| PYTORCH_NOTEBOOKS/01-PyTorch-Basics/03-PyTorch-Basics-Exercises-Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ML
# language: python
# name: ml
# ---
import sys
sys.path.append('../')
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import cv2
import random
import numpy as np
from utils import *
# %matplotlib inline
# !ls ../data
train_df = pd.read_csv('../data/train.csv')
mkdir('../data/train_images/npy/')
train_df.head()
names = train_df.id_code.values
images_folder = '../data/train_images/'
IMG_SIZE = 224
for idx in tqdm(range(len(train_df))):
fname = names[idx]
img_path = os.path.join(images_folder, fname + ".png")
image = cv2.imread(img_path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image = cv2.resize(image, (IMG_SIZE, IMG_SIZE))
image = cv2.addWeighted(
image, 4, cv2.GaussianBlur(image, (0, 0), IMG_SIZE / 10), -4, 128
) # Ben Graham's preprocessing method [1]
## (IMG_SIZE, IMG_SIZE) -> (IMG_SIZE, IMG_SIZE, 3)
#IMG_SIZE1, IMG_SIZE2 = image.shape
image = image.reshape(IMG_SIZE, IMG_SIZE, 1)
image = np.repeat(image, 3, axis=-1)
#np.save(os.path.join(image_folder, 'npy', fname + '.npy'), image)
image.dtype
| notebooks/prepare_npy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Continuous Control
#
# ---
#
# Congratulations for completing the second project of the [Deep Reinforcement Learning Nanodegree](https://www.udacity.com/course/deep-reinforcement-learning-nanodegree--nd893) program! In this notebook, you will learn how to control an agent in a more challenging environment, where the goal is to train a creature with four arms to walk forward. **Note that this exercise is optional!**
#
# ### 1. Start the Environment
#
# We begin by importing the necessary packages. If the code cell below returns an error, please revisit the project instructions to double-check that you have installed [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents/blob/master/docs/Installation.md) and [NumPy](http://www.numpy.org/).
import torch
device = torch.cuda.set_device(0)
print(device)
from unityagents import UnityEnvironment
import numpy as np
# Next, we will start the environment! **_Before running the code cell below_**, change the `file_name` parameter to match the location of the Unity environment that you downloaded.
#
# - **Mac**: `"path/to/Crawler.app"`
# - **Windows** (x86): `"path/to/Crawler_Windows_x86/Crawler.exe"`
# - **Windows** (x86_64): `"path/to/Crawler_Windows_x86_64/Crawler.exe"`
# - **Linux** (x86): `"path/to/Crawler_Linux/Crawler.x86"`
# - **Linux** (x86_64): `"path/to/Crawler_Linux/Crawler.x86_64"`
# - **Linux** (x86, headless): `"path/to/Crawler_Linux_NoVis/Crawler.x86"`
# - **Linux** (x86_64, headless): `"path/to/Crawler_Linux_NoVis/Crawler.x86_64"`
#
# For instance, if you are using a Mac, then you downloaded `Crawler.app`. If this file is in the same folder as the notebook, then the line below should appear as follows:
# ```
# env = UnityEnvironment(file_name="Crawler.app")
# ```
env = UnityEnvironment(file_name='../../crawler/Crawler.app')
# Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# ### 2. Examine the State and Action Spaces
#
# Run the code cell below to print some information about the environment.
# +
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
# -
# ### 3. Take Random Actions in the Environment
#
# In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment.
#
# Once this cell is executed, you will watch the agent's performance, if it selects an action at random with each time step. A window should pop up that allows you to observe the agent, as it moves through the environment.
#
# Of course, as part of the project, you'll have to change the code so that the agent is able to use its experience to gradually choose better actions when interacting with the environment!
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
actions = np.clip(actions, -1, 1) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))
# When finished, you can close the environment.
env.close()
# ### 4. It's Your Turn!
#
# Now it's your turn to train your own agent to solve the environment! When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:
# ```python
# env_info = env.reset(train_mode=True)[brain_name]
# ```
| Exercise_II_Continuous_Control/.ipynb_checkpoints/Crawler-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Tce3stUlHN0L"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="tuOe1ymfHZPu"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="qFdPvlXBOdUN"
# # Title
# + [markdown] colab_type="text" id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/not_a_real_link"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/tools/templates/notebook.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/tools/templates/notebook.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/tools/templates/notebook.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="r6P32iYYV27b"
# [Update button links]
# + [markdown] colab_type="text" id="xHxb-dlhMIzW"
# ## Overview
#
# [Include a paragraph or two explaining what this example demonstrates, who should be interested in it, and what you need to know before you get started.]
# + [markdown] colab_type="text" id="MUXex9ctTuDB"
# ## Setup
# + [markdown] colab_type="text" id="1Eh-iCRVBm0p"
# [Put all your imports and installs up into a setup section.]
# + colab={} colab_type="code" id="IqR2PQG4ZaZ0"
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import numpy as np
# + [markdown] colab_type="text" id="UhNtHfuxCGVy"
# ## Resources
#
# * [TensorFlow documentation contributor guide](https://www.tensorflow.org/community/contribute/docs)
# * [TensorFlow documentation style guide](https://www.tensorflow.org/community/contribute/docs_style)
# * [Google developer documentation style guide](https://developers.google.com/style/highlights)
# + [markdown] colab_type="text" id="2V22fKegUtF9"
# ## Notebook style
#
# * Include the collapsed license at the top (uses the Colab "Form" mode to hide the cells).
# * Save the notebook with the table of contents open.
# * Use one `H1` header for the title.
# * Include the button-bar immediately after the `H1`.
# * Avoid using `H1` headers for section titles. Use `H2` and `H3` instead.
# * Include an overview section before any code.
# * Put all your installs and imports in a setup section.
# * Always include the `__future__` imports.
# * Write Python 3 compatible code.
# * Keep code and text cells as brief as possible.
# * Avoid leaving an empty cell at the end of the notebook.
# + [markdown] colab_type="text" id="QKp40qS-DGEZ"
# ### Code style
#
# * Notebooks are for people. Write code optimized for clarity.
# * Use the [Google Python Style Guide](http://google.github.io/styleguide/pyguide.html), where applicable.
# * tensorflow.org doesn't support interactive plots.
# * Keep examples quick. Use small datasets, or small slices of datasets. Don't train to convergence, train until it's obvious it's making progress.
# * Demonstrate small parts before combining them into something more complex, like this:
# + colab={} colab_type="code" id="KtylpxOmceaC"
# Build the model
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation='relu', input_shape=(None, 5)),
tf.keras.layers.Dense(3)
])
# + [markdown] colab_type="text" id="pwdM2pl3RSPb"
# Run the model on a single batch of data, and inspect the output:
# + colab={} colab_type="code" id="mMOeXVmbdilM"
result = model(tf.constant(np.random.randn(10,5), dtype = tf.float32)).numpy()
print("min:", result.min())
print("max:", result.max())
print("mean:", result.mean())
print("shape:", result.shape)
# + [markdown] colab_type="text" id="uabQmjMtRtzs"
# Compile the model for training:
# + colab={} colab_type="code" id="U82B_tH2d294"
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.categorical_crossentropy)
# + [markdown] colab_type="text" id="TJdqBNBbS78n"
# ### Code content
#
# * Use the highest level API that gets the job done (unless the goal is to demonstrate the low level API).
# * Use `keras.Sequential` > keras functional api > keras model subclassing > ...
# * Use `model.fit` > `model.train_on_batch` > manual `GradientTapes`.
# * Use eager-style code.
# * Use `tensorflow_datasets` and `tf.data` where possible.
# * Avoid `compat.v1`.
# + [markdown] colab_type="text" id="78HBT9cQXJko"
# ### Text
#
# * Use an imperative style. "Run a batch of images through the model."
# * Use sentence case in titles/headings.
# * Use short titles/headings: "Download the data", "Build the model", "Train the model".
# * Use the [Google developer documentation style guide](https://developers.google.com/style/highlights).
#
# + [markdown] colab_type="text" id="YrsKXcPRUvK9"
# ## GitHub workflow
#
# * Be consistent about how you save your notebooks, otherwise the JSON diffs are messy.
# * This notebook has the "Omit code cell output when saving this notebook" option set. GitHub refuses to diff notebooks with large diffs (inline images).
# * [ReviewNB.com](http://reviewnb.com) can help with diffs. This is linked in a comment on a notebook pull request.
# * Use the [Open in Colab](https://chrome.google.com/webstore/detail/open-in-colab/iogfkhleblhcpcekbiedikdehleodpjo) extension to open a GitHub notebook in Colab.
# * The easiest way to edit a notebook in GitHub is to open it with Colab from the branch you want to edit. Then use File --> Save a copy in GitHub, which will save it back to the branch you opened it from.
# * For PRs it's helpful to post a direct Colab link to the PR head: https://colab.research.google.com/github/{USER}/{REPO}/blob/{BRANCH}/{PATH}.ipynb
| tools/templates/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Transform UNIONS multi-band catalogue
# - Extract ra and dec for clustering redshifts.
# - Write bands in gazpar format (https://gazpar.lam.fr)
# +
# %matplotlib inline
# %config Completer.use_jedi = False
import numpy as np
from astropy.io import ascii, fits
from astropy.table import Table
# +
# Load the multi-band catalogue, matched with spectroscopic DEEP2+3 measurements
multi_band_v2_data_base = './multi-band_UNIONS/w3.ugriz.spec'
data = ascii.read(f'{multi_band_v2_data_base}.txt')
# -
print(data[0:5])
print(data.dtype)
# ## Write coordinates
# +
primary_hdu = fits.PrimaryHDU()
col_ra = fits.Column(array=data['RA'], name='RA', format='D')
col_dec = fits.Column(array=data['Dec'], name='Dec', format='D')
secondary_hdu = fits.BinTableHDU.from_columns([col_ra, col_dec])
hdu_list_out = fits.HDUList(hdus=[primary_hdu, secondary_hdu])
hdu_list_out.writeto(f'{multi_band_v2_data_base}_radec.fits', overwrite=True)
# -
# ## Write gazpar output format
# Catalogue format
#
# Your catalogue must be an ASCII file containing the following columns in this order:
#
# [id] [band1] [band1_err] [band2] [band2_err] ... [redshift] [z_flag] [ra] [dec] [mask_flag]
#
# If the column names are present, the line must be commented out with #;
#
# The identifiers (first column) must be integers;
#
# Missing values must be indicated with -9999.;
#
# Upper limits must be indicated with the value of the upper limit in the flux / magnitude column and a negative number for the error (note that the tools deals differently with upper limits);
#
# Each flux must be associated to a valid error.
# ### Mark missing data
bands = ['u', 'u_err', 'g', 'g_err', 'r', 'r_err', 'i', 'i_err', 'z', 'z_err']
for band in bands:
missing = (data[band] == -99)
data[band][missing] = -9999
print(data[0:5])
# +
keys = ['CFIS_ID', 'u', 'u_err', 'g', 'g_err', 'r', 'r_err', 'i', 'i_err', 'z', 'z_err',
'z_spec', 'z_flag', 'RA', 'Dec', 'mask_flag']
cols = []
for key in keys:
if key in ['z_flag', 'mask_flag']:
dat = np.zeros_like(data['CFIS_ID'])
elif key == 'CFIS_ID':
dat = data[key] % 100000
else:
dat = data[key]
cols.append(dat)
t = Table(cols, names=keys)
with open(f'{multi_band_v2_data_base}_gazpar.txt', 'w') as f:
ascii.write(t, f, Writer=ascii.CommentedHeader, delimiter=' ')
# -
| CFIS-W3/transform_mb_other.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="UyOeoIVvDS-I"
# # **Membership Operator in Python🐍**
# + [markdown] id="TNxg6_-IDeSO"
# Python **Membership operators** are used to validate the membership of a value. It tests for memberships in sequences, for example, strings, lists and tuples.
#
# **In simple words**, *Membership operators are used to test if a sequence is present in an object*.
#
#
#
# * `in` operator checks whether an element is present in a sequence or not. It evaluates to `True` if the element is present, else it evaluates to `False`.
#
# * Whereas, the `not in` operator evaluates to `True` if the element is not present in the sequence, else it evaluates to `False`.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="YEZRDjG0CVaY" outputId="0715e897-6599-4254-83ab-ab1c1c0307bf"
# Example of in operator
x = 'Participant'
gwoc_list = ['Contributor','Mentor','Supervisor']
print(x in gwoc_list)
# + colab={"base_uri": "https://localhost:8080/"} id="earHgoGyGfGX" outputId="edadd055-1488-4f8b-f8e9-6d05e42a7251"
# Example of not in operator
x = 'Participant'
gwoc_list = ['Contributor','Mentor','Supervisor']
print(x not in gwoc_list)
# + [markdown] id="d0eb-IC4G10G"
# # **Identity Operators in Python**🐍
#
# + [markdown] id="-id1PavqLkD7"
# Before learning about **Identity Operators** in Python, we should keep something in mind.
#
# ***Everything in Python is an object, and each object is stored in a specific memory location.***
#
#
# * `is` operator checks whether two variables refer to the same object in memory or not. It evaluates to `True` if both operands refer to the same object, else it evaluates to `False`.
#
# * Whereas, the `is not` operator evaluates to `False` if both operands refer to the same object, else it evaluates to `True`
#
# The `is` and `is not` operators can be used to
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="kmjnD52bIxAK" outputId="db51c7fd-79ef-42d0-cf1f-f52b7d5a89ba"
#Example of is operator
a = 5
if type(a) is int:
print("Yes")
else:
print("No")
# + colab={"base_uri": "https://localhost:8080/"} id="TnopwDYWNzwt" outputId="fc453384-18ed-4417-8215-7edc2ee93fca"
#Example of is not operator
a = 5.5
if type(a) is not int:
print("not int")
else:
print("int")
# + [markdown] id="QsUI-UAFOiCP"
#
# **What is the difference between `==` , `!=` and `is` and `is not` operator?**
#
#
#
# * When do we use `==` and `!=`?
#
# In this case, we are checking for the **equality** of two objects.
# We are not concerned about the memory addresses of the objects in question.
# * When do we use `is` and `is not`?
#
# In this case, we check whether or not two variables point to the same object in **memory**.
#
# *The main use case for these operators is when you’re comparing to `None`.*
#
| Python/Operators/Python_3_3_What_are_Membership_and_Identity_Operators_.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,md:myst
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="kORMl5KmfByI"
# # Advanced Auomatic Differentiation in JAX
#
# [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/google/jax/blob/master/docs/jax-101/04-advanced-autodiff.ipynb)
#
# *Authors: <NAME> & <NAME>*
#
# Computing gradients is a critical part of modern machine learning methods. This section considers a few advanced topics in the areas of automatic differentiation as it relates to modern machine learning.
#
# While understanding how automatic differentiation works under the hood isn't crucial for using JAX in most contexts, we encourage the reader to check out this quite accessible [video](https://www.youtube.com/watch?v=wG_nF1awSSY) to get a deeper sense of what's going on.
#
# [The Autodiff Cookbook](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html) is a more advanced and more detailed explanation of how these ideas are implemented in the JAX backend. It's not necessary to understand this to do most things in JAX. However, some features (like defining [custom derivatives](https://jax.readthedocs.io/en/latest/notebooks/Custom_derivative_rules_for_Python_code.html)) depend on understanding this, so it's worth knowing this explanation exists if you ever need to use them.
# + [markdown] id="bYLCEa0Jt-n3"
# ## Imports
# + id="-JJ8sMDcelto"
import jax
import jax.numpy as jnp
# + [markdown] id="qx50CO1IorCc"
# ## Higher-order derivatives
#
# JAX's autodiff makes it easy to compute higher-order derivatives, because the functions that compute derivatives are themselves differentiable. Thus, higher-order derivatives are as easy as stacking transformations.
#
# We illustrate this in the single-variable case:
#
# The derivative of $f(x) = x^3 + 2x^2 - 3x + 1$ can be computed as:
# + id="Kqsbj98UTVdi"
f = lambda x: x**3 + 2*x**2 - 3*x + 1
dfdx = jax.grad(f)
# + [markdown] id="ItEt15OGiiAF"
# The higher-order derivatives of $f$ are:
#
# $$
# \begin{array}{l}
# f'(x) = 3x^2 + 4x -3\\
# f''(x) = 6x + 4\\
# f'''(x) = 6\\
# f^{iv}(x) = 0
# \end{array}
# $$
#
# Computing any of these in JAX is as easy as chaining the `grad` function:
# + id="5X3yQqLgimqH"
d2fdx = jax.grad(dfdx)
d3fdx = jax.grad(d2fdx)
d4fdx = jax.grad(d3fdx)
# + [markdown] id="fVL2P_pcj8T1"
# Evaluating the above in $x=1$ would give us:
#
# $$
# \begin{array}{l}
# f'(1) = 4\\
# f''(1) = 10\\
# f'''(1) = 6\\
# f^{iv}(1) = 0
# \end{array}
# $$
#
# Using JAX:
# + id="tJkIp9wFjxL3" outputId="581ecf87-2d20-4c83-9443-5befc1baf51d"
print(dfdx(1.))
print(d2fdx(1.))
print(d3fdx(1.))
print(d4fdx(1.))
# + [markdown] id="3-fTelU7LHRr"
# In the multivariable case, higher-order derivatives are more complicated. The second-order derivative of a function is represented by its [Hessian matrix](https://en.wikipedia.org/wiki/Hessian_matrix), defined according to
#
# $$(\mathbf{H}f)_{i,j} = \frac{\partial^2 f}{\partial_i\partial_j}.$$
#
# The Hessian of a real-valued function of several variables, $f: \mathbb R^n\to\mathbb R$, can be identified with the Jacobian of its gradient. JAX provides two transformations for computing the Jacobian of a function, `jax.jacfwd` and `jax.jacrev`, corresponding to forward- and reverse-mode autodiff. They give the same answer, but one can be more efficient than the other in different circumstances – see the [video about autodiff](https://www.youtube.com/watch?v=wG_nF1awSSY) linked above for an explanation.
# + id="ILhkef1rOB6_"
def hessian(f):
return jax.jacfwd(jax.grad(f))
# + [markdown] id="xaENwADXOGf_"
# Let's double check this is correct on the dot-product $f: \mathbf{x} \mapsto \mathbf{x} ^\top \mathbf{x}$.
#
# if $i=j$, $\frac{\partial^2 f}{\partial_i\partial_j}(\mathbf{x}) = 2$. Otherwise, $\frac{\partial^2 f}{\partial_i\partial_j}(\mathbf{x}) = 0$.
# + id="Xm3A0QdWRdJl" outputId="e1e8cba9-b567-439b-b8fc-34b21497e67f"
def f(x):
return jnp.dot(x, x)
hessian(f)(jnp.array([1., 2., 3.]))
# + [markdown] id="7_gbi34WSUsD"
# Often, however, we aren't interested in computing the full Hessian itself, and doing so can be very inefficient. [The Autodiff Cookbook](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html) explains some tricks, like the Hessian-vector product, that allow to use it without materialising the whole matrix.
#
# If you plan to work with higher-order derivatives in JAX, we strongly recommend reading the Autodiff Cookbook.
# + [markdown] id="zMT2qAi-SvcK"
# ## Higher order optimization
#
# Some meta-learning techniques, such as Model-Agnostic Meta-Learning ([MAML](https://arxiv.org/abs/1703.03400)), require differentiating through gradient updates. In other frameworks this can be quite cumbersome, but in JAX it's much easier:
#
# ```python
# def meta_loss_fn(params, data):
# """Computes the loss after one step of SGD."""
# grads = jax.grad(loss_fn)(params, data)
# return loss_fn(params - lr * grads, data)
#
# meta_grads = jax.grad(meta_loss_fn)(params, data)
# ```
# + [markdown] id="3h9Aj3YyuL6P"
# ## Stopping gradients
#
# Auto-diff enables automatic computation of the gradient of a function with respect to its inputs. Sometimes, however, we might want some additional control: for instance, we might want to avoid back-propagating gradients through some subset of the computational graph.
#
# Consider for instance the TD(0) ([temporal difference](https://en.wikipedia.org/wiki/Temporal_difference_learning)) reinforcement learning update. This is used to learn to estimate the *value* of a state in an environment from experience of interacting with the environment. Let's assume the value estimate $v_{\theta}(s_{t-1}$) in a state $s_{t-1}$ is parameterised by a linear function.
# + id="fjLqbCb6SiOm"
# Value function and initial parameters
value_fn = lambda theta, state: jnp.dot(theta, state)
theta = jnp.array([0.1, -0.1, 0.])
# + [markdown] id="85S7HBo1tBzt"
# Consider a transition from a state $s_{t-1}$ to a state $s_t$ during which we observed the reward $r_t$
# + id="T6cRPau6tCSE"
# An example transition.
s_tm1 = jnp.array([1., 2., -1.])
r_t = jnp.array(1.)
s_t = jnp.array([2., 1., 0.])
# + [markdown] id="QO5CHA9_Sk01"
# The TD(0) update to the network parameters is:
#
# $$
# \Delta \theta = (r_t + v_{\theta}(s_t) - v_{\theta}(s_{t-1})) \nabla v_{\theta}(s_{t-1})
# $$
#
# This update is not the gradient of any loss function.
#
# However it can be **written** as the gradient of the pseudo loss function
#
# $$
# L(\theta) = [r_t + v_{\theta}(s_t) - v_{\theta}(s_{t-1})]^2
# $$
#
# if the dependency of the target $r_t + v_{\theta}(s_t)$ on the parameter $\theta$ is ignored.
#
# How can we implement this in JAX? If we write the pseudo loss naively we get:
# + id="uMcFny2xuOwz" outputId="79c10af9-10b8-4e18-9753-a53918b9d72d"
def td_loss(theta, s_tm1, r_t, s_t):
v_tm1 = value_fn(theta, s_tm1)
target = r_t + value_fn(theta, s_t)
return (target - v_tm1) ** 2
td_update = jax.grad(td_loss)
delta_theta = td_update(theta, s_tm1, r_t, s_t)
delta_theta
# + [markdown] id="CPnjm59GG4Gq"
# But `td_update` will **not** compute a TD(0) update, because the gradient computation will include the dependency of `target` on $\theta$.
#
# We can use `jax.lax.stop_gradient` to force JAX to ignore the dependency of the target on $\theta$:
# + id="WCeq7trKPS4V" outputId="0f38d754-a871-4c47-8e3a-a961418a24cc"
def td_loss(theta, s_tm1, r_t, s_t):
v_tm1 = value_fn(theta, s_tm1)
target = r_t + value_fn(theta, s_t)
return (jax.lax.stop_gradient(target) - v_tm1) ** 2
td_update = jax.grad(td_loss)
delta_theta = td_update(theta, s_tm1, r_t, s_t)
delta_theta
# + [markdown] id="TNF0CkwOTKpD"
# This will treat `target` as if it did **not** depend on the parameters $\theta$ and compute the correct update to the parameters.
#
# The `jax.lax.stop_gradient` may also be useful in other settings, for instance if you want the gradient from some loss to only affect a subset of the parameters of the neural network (because, for instance, the other parameters are trained using a different loss).
#
# ## Straight-through estimator using `stop_gradient`
#
# The straight-through estimator is a trick for defining a 'gradient' of a function that is otherwise non-differentiable. Given a non-differentiable function $f : \mathbb{R}^n \to \mathbb{R}^n$ that is used as part of a larger function that we wish to find a gradient of, we simply pretend during the backward pass that $f$ is the identity function. This can be implemented neatly using `jax.lax.stop_gradient`:
# + id="hdORJENmVHvX" outputId="f0839541-46a4-45a9-fce7-ead08f20046b"
def f(x):
return jnp.round(x) # non-differentiable
def straight_through_f(x):
return x + jax.lax.stop_gradient(f(x) - x)
print("f(x): ", f(3.2))
print("straight_through_f(x):", straight_through_f(3.2))
print("grad(f)(x):", jax.grad(f)(3.2))
print("grad(straight_through_f)(x):", jax.grad(straight_through_f)(3.2))
# + [markdown] id="Wx3RNE0Sw5mn"
# ## Per-example gradients
#
# While most ML systems compute gradients and updates from batches of data, for reasons of computational efficiency and/or variance reduction, it is sometimes necessary to have access to the gradient/update associated with each specific sample in the batch.
#
# For instance, this is needed to prioritise data based on gradient magnitude, or to apply clipping / normalisations on a sample by sample basis.
#
# In many frameworks (PyTorch, TF, Theano) it is often not trivial to compute per-example gradients, because the library directly accumulates the gradient over the batch. Naive workarounds, such as computing a separate loss per example and then aggregating the resulting gradients are typically very inefficient.
#
# In JAX we can define the code to compute the gradient per-sample in an easy but efficient way.
#
# Just combine the `jit`, `vmap` and `grad` transformations together:
# + id="tFLyd9ifw4GG" outputId="bf3ad4a3-102d-47a6-ece0-f4a8c9e5d434"
perex_grads = jax.jit(jax.vmap(jax.grad(td_loss), in_axes=(None, 0, 0, 0)))
# Test it:
batched_s_tm1 = jnp.stack([s_tm1, s_tm1])
batched_r_t = jnp.stack([r_t, r_t])
batched_s_t = jnp.stack([s_t, s_t])
perex_grads(theta, batched_s_tm1, batched_r_t, batched_s_t)
# + [markdown] id="VxvYVEYQYiS_"
# Let's walk through this one transformation at a time.
#
# First, we apply `jax.grad` to `td_loss` to obtain a function that computes the gradient of the loss w.r.t. the parameters on single (unbatched) inputs:
# + id="rPO67QQrY5Bk" outputId="fbb45b98-2dbf-4865-e6e5-87dc3eef5560"
dtdloss_dtheta = jax.grad(td_loss)
dtdloss_dtheta(theta, s_tm1, r_t, s_t)
# + [markdown] id="cU36nVAlcnJ0"
# This function computes one row of the array above.
# + [markdown] id="c6DQF0b3ZA5u"
# Then, we vectorise this function using `jax.vmap`. This adds a batch dimension to all inputs and outputs. Now, given a batch of inputs, we produce a batch of outputs -- each output in the batch corresponds to the gradient for the corresponding member of the input batch.
# + id="5agbNKavaNDM" outputId="ab081012-88ab-4904-a367-68e9f81445f0"
almost_perex_grads = jax.vmap(dtdloss_dtheta)
batched_theta = jnp.stack([theta, theta])
almost_perex_grads(batched_theta, batched_s_tm1, batched_r_t, batched_s_t)
# + [markdown] id="K-v34yLuan7k"
# This isn't quite what we want, because we have to manually feed this function a batch of `theta`s, whereas we actually want to use a single `theta`. We fix this by adding `in_axes` to the `jax.vmap`, specifying theta as `None`, and the other args as `0`. This makes the resulting function add an extra axis only to the other arguments, leaving `theta` unbatched, as we want:
# + id="S6kd5MujbGrr" outputId="d3d731ef-3f7d-4a0a-ce91-7df57627ddbd"
inefficient_perex_grads = jax.vmap(dtdloss_dtheta, in_axes=(None, 0, 0, 0))
inefficient_perex_grads(theta, batched_s_tm1, batched_r_t, batched_s_t)
# + [markdown] id="O0hbsm70be5T"
# Almost there! This does what we want, but is slower than it has to be. Now, we wrap the whole thing in a `jax.jit` to get the compiled, efficient version of the same function:
# + id="Fvr709FcbrSW" outputId="627db899-5620-4bed-8d34-cd1364d3d187"
perex_grads = jax.jit(inefficient_perex_grads)
perex_grads(theta, batched_s_tm1, batched_r_t, batched_s_t)
# + id="FH42yzbHcNs2" outputId="c8e52f93-615a-4ce7-d8ab-fb6215995a39"
# %timeit inefficient_perex_grads(theta, batched_s_tm1, batched_r_t, batched_s_t).block_until_ready()
# %timeit perex_grads(theta, batched_s_tm1, batched_r_t, batched_s_t).block_until_ready()
| docs/jax-101/04-advanced-autodiff.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
from rdkit import Chem
import pyrfume
from pyrfume import odorants
from rickpy import ProgressBar
# -
# ## Create a dictionary of mol files
file_path = os.path.join(pyrfume.DATA, 'all_cids.sdf')
f = Chem.SDMolSupplier(file_path)
result = {}
for mol in f:
x = mol.GetProp('_Name')
cid, smiles = x.split(':')
cid = int(cid)
smiles = smiles.strip()
result[cid] = {'smiles': smiles, 'mol': mol}
| notebooks/sdf_to_cids_smiles_mol.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project: Part of Speech Tagging with Hidden Markov Models
# ---
# ### Introduction
#
# Part of speech tagging is the process of determining the syntactic category of a word from the words in its surrounding context. It is often used to help disambiguate natural language phrases because it can be done quickly with high accuracy. Tagging can be used for many NLP tasks like determining correct pronunciation during speech synthesis (for example, _dis_-count as a noun vs dis-_count_ as a verb), for information retrieval, and for word sense disambiguation.
#
# In this notebook, you'll use the [Pomegranate](http://pomegranate.readthedocs.io/) library to build a hidden Markov model for part of speech tagging using a "universal" tagset. Hidden Markov models have been able to achieve [>96% tag accuracy with larger tagsets on realistic text corpora](http://www.coli.uni-saarland.de/~thorsten/publications/Brants-ANLP00.pdf). Hidden Markov models have also been used for speech recognition and speech generation, machine translation, gene recognition for bioinformatics, and human gesture recognition for computer vision, and more.
#
# ![](_post-hmm.png)
#
# The notebook already contains some code to get you started. You only need to add some new functionality in the areas indicated to complete the project; you will not need to modify the included code beyond what is requested. Sections that begin with **'IMPLEMENTATION'** in the header indicate that you must provide code in the block that follows. Instructions will be provided for each section, and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
# <div class="alert alert-block alert-info">
# **Note:** Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You must then **export the notebook** by running the last cell in the notebook, or by using the menu above and navigating to **File -> Download as -> HTML (.html)** Your submissions should include both the `html` and `ipynb` files.
# </div>
# <div class="alert alert-block alert-info">
# **Note:** Code and Markdown cells can be executed using the `Shift + Enter` keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.
# </div>
# ### The Road Ahead
# You must complete Steps 1-3 below to pass the project. The section on Step 4 includes references & resources you can use to further explore HMM taggers.
#
# - [Step 1](#Step-1:-Read-and-preprocess-the-dataset): Review the provided interface to load and access the text corpus
# - [Step 2](#Step-2:-Build-a-Most-Frequent-Class-tagger): Build a Most Frequent Class tagger to use as a baseline
# - [Step 3](#Step-3:-Build-an-HMM-tagger): Build an HMM Part of Speech tagger and compare to the MFC baseline
# - [Step 4](#Step-4:-[Optional]-Improving-model-performance): (Optional) Improve the HMM tagger
# <div class="alert alert-block alert-warning">
# **Note:** Make sure you have selected a **Python 3** kernel in Workspaces or the hmm-tagger conda environment if you are running the Jupyter server on your own machine.
# </div>
# Jupyter "magic methods" -- only need to be run once per kernel restart
# %load_ext autoreload
# %aimport helpers, tests
# %autoreload 1
# +
# import python modules -- this cell needs to be run again if you make changes to any of the files
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.display import HTML
from itertools import chain
from collections import Counter, defaultdict
from helpers import show_model, Dataset
from pomegranate import State, HiddenMarkovModel, DiscreteDistribution
# -
# ## Step 1: Read and preprocess the dataset
# ---
# We'll start by reading in a text corpus and splitting it into a training and testing dataset. The data set is a copy of the [Brown corpus](https://en.wikipedia.org/wiki/Brown_Corpus) (originally from the [NLTK](https://www.nltk.org/) library) that has already been pre-processed to only include the [universal tagset](https://arxiv.org/pdf/1104.2086.pdf). You should expect to get slightly higher accuracy using this simplified tagset than the same model would achieve on a larger tagset like the full [Penn treebank tagset](https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html), but the process you'll follow would be the same.
#
# The `Dataset` class provided in helpers.py will read and parse the corpus. You can generate your own datasets compatible with the reader by writing them to the following format. The dataset is stored in plaintext as a collection of words and corresponding tags. Each sentence starts with a unique identifier on the first line, followed by one tab-separated word/tag pair on each following line. Sentences are separated by a single blank line.
#
# Example from the Brown corpus.
# ```
# b100-38532
# Perhaps ADV
# it PRON
# was VERB
# right ADJ
# ; .
# ; .
#
# b100-35577
# ...
# ```
# +
data = Dataset("tags-universal.txt", "brown-universal.txt", train_test_split=0.8)
print("There are {} sentences in the corpus.".format(len(data)))
print("There are {} sentences in the training set.".format(len(data.training_set)))
print("There are {} sentences in the testing set.".format(len(data.testing_set)))
assert len(data) == len(data.training_set) + len(data.testing_set), \
"The number of sentences in the training set + testing set should sum to the number of sentences in the corpus"
# -
# ### The Dataset Interface
#
# You can access (mostly) immutable references to the dataset through a simple interface provided through the `Dataset` class, which represents an iterable collection of sentences along with easy access to partitions of the data for training & testing. Review the reference below, then run and review the next few cells to make sure you understand the interface before moving on to the next step.
#
# ```
# Dataset-only Attributes:
# training_set - reference to a Subset object containing the samples for training
# testing_set - reference to a Subset object containing the samples for testing
#
# Dataset & Subset Attributes:
# sentences - a dictionary with an entry {sentence_key: Sentence()} for each sentence in the corpus
# keys - an immutable ordered (not sorted) collection of the sentence_keys for the corpus
# vocab - an immutable collection of the unique words in the corpus
# tagset - an immutable collection of the unique tags in the corpus
# X - returns an array of words grouped by sentences ((w11, w12, w13, ...), (w21, w22, w23, ...), ...)
# Y - returns an array of tags grouped by sentences ((t11, t12, t13, ...), (t21, t22, t23, ...), ...)
# N - returns the number of distinct samples (individual words or tags) in the dataset
#
# Methods:
# stream() - returns an flat iterable over all (word, tag) pairs across all sentences in the corpus
# __iter__() - returns an iterable over the data as (sentence_key, Sentence()) pairs
# __len__() - returns the nubmer of sentences in the dataset
# ```
#
# For example, consider a Subset, `subset`, of the sentences `{"s0": Sentence(("See", "Spot", "run"), ("VERB", "NOUN", "VERB")), "s1": Sentence(("Spot", "ran"), ("NOUN", "VERB"))}`. The subset will have these attributes:
#
# ```
# subset.keys == {"s1", "s0"} # unordered
# subset.vocab == {"See", "run", "ran", "Spot"} # unordered
# subset.tagset == {"VERB", "NOUN"} # unordered
# subset.X == (("Spot", "ran"), ("See", "Spot", "run")) # order matches .keys
# subset.Y == (("NOUN", "VERB"), ("VERB", "NOUN", "VERB")) # order matches .keys
# subset.N == 7 # there are a total of seven observations over all sentences
# len(subset) == 2 # because there are two sentences
# ```
#
# <div class="alert alert-block alert-info">
# **Note:** The `Dataset` class is _convenient_, but it is **not** efficient. It is not suitable for huge datasets because it stores multiple redundant copies of the same data.
# </div>
# #### Sentences
#
# `Dataset.sentences` is a dictionary of all sentences in the training corpus, each keyed to a unique sentence identifier. Each `Sentence` is itself an object with two attributes: a tuple of the words in the sentence named `words` and a tuple of the tag corresponding to each word named `tags`.
key = 'b100-38532'
print("Sentence: {}".format(key))
print("words:\n\t{!s}".format(data.sentences[key].words))
print("tags:\n\t{!s}".format(data.sentences[key].tags))
# <div class="alert alert-block alert-info">
# **Note:** The underlying iterable sequence is **unordered** over the sentences in the corpus; it is not guaranteed to return the sentences in a consistent order between calls. Use `Dataset.stream()`, `Dataset.keys`, `Dataset.X`, or `Dataset.Y` attributes if you need ordered access to the data.
# </div>
#
# #### Counting Unique Elements
#
# You can access the list of unique words (the dataset vocabulary) via `Dataset.vocab` and the unique list of tags via `Dataset.tagset`.
# +
print("There are a total of {} samples of {} unique words in the corpus."
.format(data.N, len(data.vocab)))
print("There are {} samples of {} unique words in the training set."
.format(data.training_set.N, len(data.training_set.vocab)))
print("There are {} samples of {} unique words in the testing set."
.format(data.testing_set.N, len(data.testing_set.vocab)))
print("There are {} words in the test set that are missing in the training set."
.format(len(data.testing_set.vocab - data.training_set.vocab)))
assert data.N == data.training_set.N + data.testing_set.N, \
"The number of training + test samples should sum to the total number of samples"
# -
# #### Accessing word and tag Sequences
# The `Dataset.X` and `Dataset.Y` attributes provide access to ordered collections of matching word and tag sequences for each sentence in the dataset.
# accessing words with Dataset.X and tags with Dataset.Y
for i in range(2):
print("Sentence {}:".format(i + 1), data.X[i])
print()
print("Labels {}:".format(i + 1), data.Y[i])
print()
# #### Accessing (word, tag) Samples
# The `Dataset.stream()` method returns an iterator that chains together every pair of (word, tag) entries across all sentences in the entire corpus.
# use Dataset.stream() (word, tag) samples for the entire corpus
print("\nStream (word, tag) pairs:\n")
for i, pair in enumerate(data.stream()):
print("\t", pair)
if i > 5: break
#
# For both our baseline tagger and the HMM model we'll build, we need to estimate the frequency of tags & words from the frequency counts of observations in the training corpus. In the next several cells you will complete functions to compute the counts of several sets of counts.
# ## Step 2: Build a Most Frequent Class tagger
# ---
#
# Perhaps the simplest tagger (and a good baseline for tagger performance) is to simply choose the tag most frequently assigned to each word. This "most frequent class" tagger inspects each observed word in the sequence and assigns it the label that was most often assigned to that word in the corpus.
# ### IMPLEMENTATION: Pair Counts
#
# Complete the function below that computes the joint frequency counts for two input sequences.
# +
def pair_counts(sequence_A, sequence_B):
"""Return a dictionary keyed to each unique value in the first sequence list
that counts the number of occurrences of the corresponding value from the
second sequences list.
For example, if sequences_A is tags and sequences_B is the corresponding
words, then if 1244 sequences contain the word "time" tagged as a NOUN, then
you should return a dictionary such that pair_counts[NOUN][time] == 1244
"""
# TODO: Finish this function!
# tag_dic = {}
# count_seq = len(sequence_A)
# print(count_seq)
# for i in range(count_seq):
# for j in range(len(sequence_A[i])):
# if sequence_A[i][j] in tag_dic.keys():
# if sequence_B[i][j] in tag_dic[sequence_A[i][j]].keys():
# tag_dic[sequence_A[i][j]][sequence_B[i][j]] = tag_dic[sequence_A[i][j]][sequence_B[i][j]] + 1
# else:
# tag_dic[sequence_A[i][j]][sequence_B[i][j]] = 0
# else:
# tag_dic[sequence_A[i][j]] = {sequence_B[i][j]:0}
# return tag_dic
d = defaultdict(Counter)
# Loop all over the sentences
for i in range(len(sequence_A)):
# Loop all over the 2 sequences
for a, b in zip(sequence_A[i],sequence_B[i]):
# Insert the tag and word pair if needed + increment their counter
d[a][b] += 1
return d
raise NotImplementedError
# Calculate C(t_i, w_i)
emission_counts = pair_counts(data.Y, data.X)
assert len(emission_counts) == 12, \
"Uh oh. There should be 12 tags in your dictionary."
assert max(emission_counts["NOUN"], key=emission_counts["NOUN"].get) == 'time', \
"Hmmm...'time' is expected to be the most common NOUN."
HTML('<div class="alert alert-block alert-success">Your emission counts look good!</div>')
# -
test_counts = pair_counts(data.X, data.Y)
test_counts
emission_counts
data.Y
# ### IMPLEMENTATION: Most Frequent Class Tagger
#
# Use the `pair_counts()` function and the training dataset to find the most frequent class label for each word in the training data, and populate the `mfc_table` below. The table keys should be words, and the values should be the appropriate tag string.
#
# The `MFCTagger` class is provided to mock the interface of Pomegranite HMM models so that they can be used interchangeably.
# +
# Create a lookup table mfc_table where mfc_table[word] contains the tag label most frequently assigned to that word
from collections import namedtuple
FakeState = namedtuple("FakeState", "name")
class MFCTagger:
# NOTE: You should not need to modify this class or any of its methods
missing = FakeState(name="<MISSING>")
def __init__(self, table):
self.table = defaultdict(lambda: MFCTagger.missing)
self.table.update({word: FakeState(name=tag) for word, tag in table.items()})
def viterbi(self, seq):
"""This method simplifies predictions by matching the Pomegranate viterbi() interface"""
return 0., list(enumerate(["<start>"] + [self.table[w] for w in seq] + ["<end>"]))
#def getKeys(dict):
# return "".join([*dict])
# TODO: calculate the frequency of each tag being assigned to each word (hint: similar, but not
# the same as the emission probabilities) and use it to fill the mfc_table
word_counts = pair_counts(data.training_set.X, data.training_set.Y)
# def get_word_counts():
# word_counts = {}
# for k in data.training_set.vocab
mfc_table = {k:list(word_counts[k].keys())[0] for k in word_counts.keys()}
#mfc_table = {k:getKeys(word_counts[k].keys()) for k in word_counts.keys()}
# mfc_table = defaultdict()
# # Loop over the words
# for word, tags in word_counts.items():
# # Select the corresponding tag with highest count value
# tag, _ = max(tags.items(), key=lambda item: item[1])
# mfc_table[word]=tag
# DO NOT MODIFY BELOW THIS LINE
mfc_model = MFCTagger(mfc_table) # Create a Most Frequent Class tagger instance
assert len(mfc_table) == len(data.training_set.vocab), ""
assert all(k in data.training_set.vocab for k in mfc_table.keys()), ""
assert sum(int(k not in mfc_table) for k in data.testing_set.vocab) == 5521, ""
HTML('<div class="alert alert-block alert-success">Your MFC tagger has all the correct words!</div>')
# -
# ### Making Predictions with a Model
# The helper functions provided below interface with Pomegranate network models & the mocked MFCTagger to take advantage of the [missing value](http://pomegranate.readthedocs.io/en/latest/nan.html) functionality in Pomegranate through a simple sequence decoding function. Run these functions, then run the next cell to see some of the predictions made by the MFC tagger.
# +
def replace_unknown(sequence):
"""Return a copy of the input sequence where each unknown word is replaced
by the literal string value 'nan'. Pomegranate will ignore these values
during computation.
"""
return [w if w in data.training_set.vocab else 'nan' for w in sequence]
def simplify_decoding(X, model):
"""X should be a 1-D sequence of observations for the model to predict"""
_, state_path = model.viterbi(replace_unknown(X))
return [state[1].name for state in state_path[1:-1]] # do not show the start/end state predictions
# -
# ### Example Decoding Sequences with MFC Tagger
for key in data.testing_set.keys[:3]:
print("Sentence Key: {}\n".format(key))
print("Predicted labels:\n-----------------")
print(simplify_decoding(data.sentences[key].words, mfc_model))
print()
print("Actual labels:\n--------------")
print(data.sentences[key].tags)
print("\n")
# ### Evaluating Model Accuracy
#
# The function below will evaluate the accuracy of the MFC tagger on the collection of all sentences from a text corpus.
def accuracy(X, Y, model):
"""Calculate the prediction accuracy by using the model to decode each sequence
in the input X and comparing the prediction with the true labels in Y.
The X should be an array whose first dimension is the number of sentences to test,
and each element of the array should be an iterable of the words in the sequence.
The arrays X and Y should have the exact same shape.
X = [("See", "Spot", "run"), ("Run", "Spot", "run", "fast"), ...]
Y = [(), (), ...]
"""
correct = total_predictions = 0
for observations, actual_tags in zip(X, Y):
# The model.viterbi call in simplify_decoding will return None if the HMM
# raises an error (for example, if a test sentence contains a word that
# is out of vocabulary for the training set). Any exception counts the
# full sentence as an error (which makes this a conservative estimate).
try:
most_likely_tags = simplify_decoding(observations, model)
correct += sum(p == t for p, t in zip(most_likely_tags, actual_tags))
except:
pass
total_predictions += len(observations)
return correct / total_predictions
# #### Evaluate the accuracy of the MFC tagger
# Run the next cell to evaluate the accuracy of the tagger on the training and test corpus.
# +
mfc_training_acc = accuracy(data.training_set.X, data.training_set.Y, mfc_model)
print("training accuracy mfc_model: {:.2f}%".format(100 * mfc_training_acc))
mfc_testing_acc = accuracy(data.testing_set.X, data.testing_set.Y, mfc_model)
print("testing accuracy mfc_model: {:.2f}%".format(100 * mfc_testing_acc))
assert mfc_training_acc >= 0.9, "Uh oh. Your MFC accuracy on the training set doesn't look right."
assert mfc_testing_acc >= 0.9, "Uh oh. Your MFC accuracy on the testing set doesn't look right."
HTML('<div class="alert alert-block alert-success">Your MFC tagger accuracy looks correct!</div>')
# -
# ## Step 3: Build an HMM tagger
# ---
# The HMM tagger has one hidden state for each possible tag, and parameterized by two distributions: the emission probabilties giving the conditional probability of observing a given **word** from each hidden state, and the transition probabilities giving the conditional probability of moving between **tags** during the sequence.
#
# We will also estimate the starting probability distribution (the probability of each **tag** being the first tag in a sequence), and the terminal probability distribution (the probability of each **tag** being the last tag in a sequence).
#
# The maximum likelihood estimate of these distributions can be calculated from the frequency counts as described in the following sections where you'll implement functions to count the frequencies, and finally build the model. The HMM model will make predictions according to the formula:
#
# $$t_i^n = \underset{t_i^n}{\mathrm{argmax}} \prod_{i=1}^n P(w_i|t_i) P(t_i|t_{i-1})$$
#
# Refer to Speech & Language Processing [Chapter 10](https://web.stanford.edu/~jurafsky/slp3/10.pdf) for more information.
# ### IMPLEMENTATION: Unigram Counts
#
# Complete the function below to estimate the co-occurrence frequency of each symbol over all of the input sequences. The unigram probabilities in our HMM model are estimated from the formula below, where N is the total number of samples in the input. (You only need to compute the counts for now.)
#
# $$P(tag_1) = \frac{C(tag_1)}{N}$$
# +
def unigram_counts(sequences):
"""
Return a dictionary keyed to each unique value in the input sequence list that
counts the number of occurrences of the value in the sequences list. The sequences
collection should be a 2-dimensional array.
For example, if the tag NOUN appears 275558 times over all the input sequences,
then you should return a dictionary such that your_unigram_counts[NOUN] == 275558.
"""
uni_counts = {}
for tag in data.training_set.tagset:
uni_counts[tag] = 0
for tag in data.training_set.tagset:
for sequence in sequences:
for i in sequence:
uni_counts[i] = uni_counts[i]+1
return uni_counts
# TODO: Finish this function!
raise NotImplementedError
# TODO: call unigram_counts with a list of tag sequences from the training set
tag_unigrams = unigram_counts(data.training_set.Y)
assert set(tag_unigrams.keys()) == data.training_set.tagset, \
"Uh oh. It looks like your tag counts doesn't include all the tags!"
assert min(tag_unigrams, key=tag_unigrams.get) == 'X', \
"Hmmm...'X' is expected to be the least common class"
assert max(tag_unigrams, key=tag_unigrams.get) == 'NOUN', \
"Hmmm...'NOUN' is expected to be the most common class"
HTML('<div class="alert alert-block alert-success">Your tag unigrams look good!</div>')
# -
# ### IMPLEMENTATION: Bigram Counts
#
# Complete the function below to estimate the co-occurrence frequency of each pair of symbols in each of the input sequences. These counts are used in the HMM model to estimate the bigram probability of two tags from the frequency counts according to the formula: $$P(tag_2|tag_1) = \frac{C(tag_2|tag_1)}{C(tag_2)}$$
#
# +
def bigram_counts(sequences):
"""Return a dictionary keyed to each unique PAIR of values in the input sequences
list that counts the number of occurrences of pair in the sequences list. The input
should be a 2-dimensional array.
For example, if the pair of tags (NOUN, VERB) appear 61582 times, then you should
return a dictionary such that your_bigram_counts[(NOUN, VERB)] == 61582
"""
bigram_counts = {}
l = len(sequences)
for tag_tuple in (sequences):
for i in range (len(tag_tuple)):
if i > len(tag_tuple)-2:
break
key = (tag_tuple[i], tag_tuple[i+1])
if key in bigram_counts.keys():
bigram_counts[key] = bigram_counts[key] + 1
else:
bigram_counts[key] = 1
# TODO: Finish this function!
return bigram_counts
raise NotImplementedError
# TODO: call bigram_counts with a list of tag sequences from the training set
tag_bigrams = bigram_counts(data.training_set.Y)
assert len(tag_bigrams) == 144, \
"Uh oh. There should be 144 pairs of bigrams (12 tags x 12 tags)"
assert min(tag_bigrams, key=tag_bigrams.get) in [('X', 'NUM'), ('PRON', 'X')], \
"Hmmm...The least common bigram should be one of ('X', 'NUM') or ('PRON', 'X')."
assert max(tag_bigrams, key=tag_bigrams.get) in [('DET', 'NOUN')], \
"Hmmm...('DET', 'NOUN') is expected to be the most common bigram."
HTML('<div class="alert alert-block alert-success">Your tag bigrams look good!</div>')
# -
# ### IMPLEMENTATION: Sequence Starting Counts
# Complete the code below to estimate the bigram probabilities of a sequence starting with each tag.
# +
def starting_counts(sequences):
"""Return a dictionary keyed to each unique value in the input sequences list
that counts the number of occurrences where that value is at the beginning of
a sequence.
For example, if 8093 sequences start with NOUN, then you should return a
dictionary such that your_starting_counts[NOUN] == 8093
"""
starting_count = {}
for tag_tuple in sequences:
if tag_tuple[0] in starting_count.keys():
starting_count[tag_tuple[0]] = starting_count[tag_tuple[0]] + 1
else:
starting_count[tag_tuple[0]] = 1
# TODO: Finish this function!
return starting_count
raise NotImplementedError
# TODO: Calculate the count of each tag starting a sequence
tag_starts = starting_counts(data.training_set.Y)
assert len(tag_starts) == 12, "Uh oh. There should be 12 tags in your dictionary."
assert min(tag_starts, key=tag_starts.get) == 'X', "Hmmm...'X' is expected to be the least common starting bigram."
assert max(tag_starts, key=tag_starts.get) == 'DET', "Hmmm...'DET' is expected to be the most common starting bigram."
HTML('<div class="alert alert-block alert-success">Your starting tag counts look good!</div>')
# -
# ### IMPLEMENTATION: Sequence Ending Counts
# Complete the function below to estimate the bigram probabilities of a sequence ending with each tag.
# +
def ending_counts(sequences):
"""Return a dictionary keyed to each unique value in the input sequences list
that counts the number of occurrences where that value is at the end of
a sequence.
For example, if 18 sequences end with DET, then you should return a
dictionary such that your_starting_counts[DET] == 18
"""
ending_count = {}
for tag_tuple in sequences:
if tag_tuple[-1] in ending_count.keys():
ending_count[tag_tuple[-1]] = ending_count[tag_tuple[-1]] + 1
else:
ending_count[tag_tuple[-1]] = 1
# TODO: Finish this function!
return ending_count
# TODO: Finish this function!
raise NotImplementedError
# TODO: Calculate the count of each tag ending a sequence
tag_ends = ending_counts(data.training_set.Y)
assert len(tag_ends) == 12, "Uh oh. There should be 12 tags in your dictionary."
assert min(tag_ends, key=tag_ends.get) in ['X', 'CONJ'], "Hmmm...'X' or 'CONJ' should be the least common ending bigram."
assert max(tag_ends, key=tag_ends.get) == '.', "Hmmm...'.' is expected to be the most common ending bigram."
HTML('<div class="alert alert-block alert-success">Your ending tag counts look good!</div>')
# -
# ### IMPLEMENTATION: Basic HMM Tagger
# Use the tag unigrams and bigrams calculated above to construct a hidden Markov tagger.
#
# - Add one state per tag
# - The emission distribution at each state should be estimated with the formula: $P(w|t) = \frac{C(t, w)}{C(t)}$
# - Add an edge from the starting state `basic_model.start` to each tag
# - The transition probability should be estimated with the formula: $P(t|start) = \frac{C(start, t)}{C(start)}$
# - Add an edge from each tag to the end state `basic_model.end`
# - The transition probability should be estimated with the formula: $P(end|t) = \frac{C(t, end)}{C(t)}$
# - Add an edge between _every_ pair of tags
# - The transition probability should be estimated with the formula: $P(t_2|t_1) = \frac{C(t_1, t_2)}{C(t_1)}$
# +
basic_model = HiddenMarkovModel(name="base-hmm-tagger")
# TODO: create states with emission probability distributions P(word | tag) and add to the model
# (Hint: you may need to loop & create/add new states)
states = {}
# for tag in data.training_set.tagset:
# emission = DiscreteDistribution(p_dist(tag))
# state = State(emission, name=tag)
# states[tag] = state
# basic_model.add_state(states[tag])
# for tag in emission_counts:
# total = tag_unigrams[tag]
# dist_distr = {key: v/total for k, v in emission_counts[tag].items()}
# emission = DiscreteDistribution(dist_distr)
# state = State(emission, name=tag)
# states[tag] = state
# basic_model.add_state(states[tag])
# # TODO: add edges between states for the observed transition frequencies P(tag_i | tag_i-1)
# # (Hint: you may need to loop & add transitions
# total_tags = len(data.training_set.Y)
# def get_tag_seq_prob(bigram_tuple):
# bigram_count = bigram_counts(bigram_tuple)
# return tag_bigrams[bigram_tuple]/total_tags
# for tag_tuple in tag_bigrams.keys():
# t0 = tag_tuple[0]
# t1 = tag_tuple[1]
# prob_start = tag_starts[tag]/total_tags
# prob_end = tag_ends[tag]/total_tags
# basic_model.add_transition(basic_model.start, states[t0], prob_start)
# tag_seq_prob = get_tag_seq_prob((t0, t1))
# basic_model.add_transition(states[t0], states[t1], get_tag_seq_prob((t0, t1)))
# basic_model.add_transition(states[t0], basic_model.end, prob_end)
# # NOTE: YOU SHOULD NOT NEED TO MODIFY ANYTHING BELOW THIS LINE
# # finalize the model
# basic_model.bake()
# assert all(tag in set(s.name for s in basic_model.states) for tag in data.training_set.tagset), \
# "Every state in your network should use the name of the associated tag, which must be one of the training set tags."
# assert basic_model.edge_count() == 168, \
# ("Your network should have an edge from the start node to each state, one edge between every " +
# "pair of tags (states), and an edge from each state to the end node.")
# HTML('<div class="alert alert-block alert-success">Your HMM network topology looks good!</div>')
# +
basic_model = HiddenMarkovModel(name="base-hmm-tagger")
# TODO: create states with emission probability distributions P(word | tag) and add to the model
# (Hint: you may need to loop & create/add new states)
#basic_model.add_states()
# Data preparation : compute pair_counts C(t,w)
# ctw is constructed such as to get ctw[NOUN]['time']==1275
ctw = pair_counts(data.training_set.Y , data.training_set.X)
# Initialize a dictionary to store the states object by tag key
states = dict()
# Loop over all the tagset and create a state for each tags
for tag in data.training_set.tagset:
# Create a dict to store the emission distribution for the tag
emissions_distribution = dict()
# Compute the emission distribution P(w|t)=C(t,w) / C(t) and store it in a dictionary
for word in ctw[tag]:
emissions_distribution[word] = ctw[tag][word] / tag_unigrams[tag]
# Create a discrete distribution and a state for the current tag
tag_emissions = DiscreteDistribution(emissions_distribution)
tag_state = State(tag_emissions, name=tag)
# Store the created state in a dictionary
states[tag]=tag_state
# Debug info
#show_model(basic_model, figsize=(10, 10), filename="example.png", overwrite=True, show_ends=False)
# Add all the created states to the model
# Not clear in pomegranate documentation, but add_states requires to convert dict.values into list type to avoid errors
basic_model.add_states([elt for elt in states.values()])
# TODO: add edges between states for the observed transition frequencies P(tag_i | tag_i-1)
# (Hint: you may need to loop & add transitions
#basic_model.add_transition()
## Add an edge from the starting state to each tag
# Not clear in pomegranate documentation, but add_transition requires a State object in arguments, not a tag name (str)
for tag in data.training_set.tagset:
# Compute the transition probability P(t|start)=C(start,t) / C(start)
tp = tag_starts[tag] / len(tag_starts)
basic_model.add_transition(basic_model.start, states[tag], tp)
## Add an edge from each tag to the end state
for tag in data.training_set.tagset:
# Compute the transition probability P(end|t)=C(t,end) / C(t)
tp = tag_ends[tag] / tag_unigrams[tag]
basic_model.add_transition(states[tag], basic_model.end , tp)
## Add an edge between every pair of tags
for t1, t2 in tag_bigrams.keys():
# Compute the transition probability P(t2|t1)=C(t1,t2) / C(t1)
tp = tag_bigrams[(t1,t2)] / tag_unigrams[t1]
basic_model.add_transition(states[t1], states[t2] , tp)
# Debug info
#show_model(basic_model, figsize=(10, 10), filename="example.png", overwrite=True, show_ends=False)
# NOTE: YOU SHOULD NOT NEED TO MODIFY ANYTHING BELOW THIS LINE
# finalize the modelstate
basic_model.bake()
assert all(tag in set(s.name for s in basic_model.states) for tag in data.training_set.tagset), \
"Every state in your network should use the name of the associated tag, which must be one of the training set tags."
assert basic_model.edge_count() == 168, \
("Your network should have an edge from the start node to each state, one edge between every " +
"pair of tags (states), and an edge from each state to the end node.")
HTML('<div class="alert alert-block alert-success">Your HMM network topology looks good!</div>')
# +
hmm_training_acc = accuracy(data.training_set.X, data.training_set.Y, basic_model)
print("training accuracy basic hmm model: {:.2f}%".format(100 * hmm_training_acc))
hmm_testing_acc = accuracy(data.testing_set.X, data.testing_set.Y, basic_model)
print("testing accuracy basic hmm model: {:.2f}%".format(100 * hmm_testing_acc))
assert hmm_training_acc > 0.97, "Uh oh. Your HMM accuracy on the training set doesn't look right."
assert hmm_testing_acc > 0.955, "Uh oh. Your HMM accuracy on the testing set doesn't look right."
HTML('<div class="alert alert-block alert-success">Your HMM tagger accuracy looks correct! Congratulations, you\'ve finished the project.</div>')
# -
# ### Example Decoding Sequences with the HMM Tagger
for key in data.testing_set.keys[:3]:
print("Sentence Key: {}\n".format(key))
print("Predicted labels:\n-----------------")
print(simplify_decoding(data.sentences[key].words, basic_model))
print()
print("Actual labels:\n--------------")
print(data.sentences[key].tags)
print("\n")
#
# ## Finishing the project
# ---
#
# <div class="alert alert-block alert-info">
# **Note:** **SAVE YOUR NOTEBOOK**, then run the next cell to generate an HTML copy. You will zip & submit both this file and the HTML copy for review.
# </div>
!!jupyter nbconvert *.ipynb
# ## Step 4: [Optional] Improving model performance
# ---
# There are additional enhancements that can be incorporated into your tagger that improve performance on larger tagsets where the data sparsity problem is more significant. The data sparsity problem arises because the same amount of data split over more tags means there will be fewer samples in each tag, and there will be more missing data tags that have zero occurrences in the data. The techniques in this section are optional.
#
# - [Laplace Smoothing](https://en.wikipedia.org/wiki/Additive_smoothing) (pseudocounts)
# Laplace smoothing is a technique where you add a small, non-zero value to all observed counts to offset for unobserved values.
#
# - Backoff Smoothing
# Another smoothing technique is to interpolate between n-grams for missing data. This method is more effective than Laplace smoothing at combatting the data sparsity problem. Refer to chapters 4, 9, and 10 of the [Speech & Language Processing](https://web.stanford.edu/~jurafsky/slp3/) book for more information.
#
# - Extending to Trigrams
# HMM taggers have achieved better than 96% accuracy on this dataset with the full Penn treebank tagset using an architecture described in [this](http://www.coli.uni-saarland.de/~thorsten/publications/Brants-ANLP00.pdf) paper. Altering your HMM to achieve the same performance would require implementing deleted interpolation (described in the paper), incorporating trigram probabilities in your frequency tables, and re-implementing the Viterbi algorithm to consider three consecutive states instead of two.
#
# ### Obtain the Brown Corpus with a Larger Tagset
# Run the code below to download a copy of the brown corpus with the full NLTK tagset. You will need to research the available tagset information in the NLTK docs and determine the best way to extract the subset of NLTK tags you want to explore. If you write the following the format specified in Step 1, then you can reload the data using all of the code above for comparison.
#
# Refer to [Chapter 5](http://www.nltk.org/book/ch05.html) of the NLTK book for more information on the available tagsets.
# +
import nltk
from nltk import pos_tag, word_tokenize
from nltk.corpus import brown
nltk.download('brown')
training_corpus = nltk.corpus.brown
training_corpus.tagged_sents()[0]
# -
| HMM Tagger.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .virtualenv
# language: python
# name: .virtualenv
# ---
# # Sampling of strong times
# ## I. The strong stationary time T
# +
import matplotlib.pyplot as plt
import numpy as np
from strong_stationary_times import core, sampling
def sample(sampler, list_M, n):
results = []
#
for M in list_M:
A = np.zeros( shape=(M,M), dtype=int )
# Initialization with one element per fiber
# for i in range(M):
# A[i,i] = 1
# Initialization with only one element in only one fiber
A[1,1] = 1
samples = np.array( [sampler(A, debug=False) for i in range(1,n)] )
print("M: ", M)
mean = np.mean(samples)
std = np.std(samples)
result = {
"M" : M,
"n" : n,
"mean": mean,
"std" : std
}
results.append(result)
print("Result: ", result)
plt.hist(Ts/(M*M), bins=15)
plt.show()
return results
# -
n = 100 # number of iid samples
list_M = [5, 7, 9, 11, 13, 15, 17]
#list_M = [5, 7]
results = sample( sampler=sampling.sample_T, list_M=list_M, n=n)
# Plot
means = np.array( [res['mean'] for res in results])
stds = np.array( [res['std'] for res in results] )
ic1 = means + 1.96*stds/np.sqrt(n)
ic2 = means - 1.96*stds/np.sqrt(n)
plt.plot(list_M, np.log(means)/np.log(list_M), '*')
plt.plot(list_M, np.log(ic1)/np.log(list_M), '--', label="upper confidence interval")
plt.plot(list_M, np.log(ic2)/np.log(list_M), '--', label="lower confidence interval")
plt.legend()
plt.xlabel("M")
plt.ylabel("log Mean / log M")
plt.ylim(0, 4)
plt.savefig("T_log_mean_vs_M.png")
# # II. The stopping time S
n = 100 # number of iid samples
list_M = [5, 7, 9, 11, 13, 15, 17]
#list_M = [5, 7]
results = sample( sampler=sampling.sample_S, list_M=list_M, n=n)
# Plot
means = np.array( [res['mean'] for res in results])
stds = np.array( [res['std'] for res in results] )
ic1 = means + 1.96*stds/np.sqrt(n)
ic2 = means - 1.96*stds/np.sqrt(n)
plt.plot(list_M, np.log(means)/np.log(list_M), '*')
plt.plot(list_M, np.log(ic1)/np.log(list_M), '--', label="upper confidence interval")
plt.plot(list_M, np.log(ic2)/np.log(list_M), '--', label="lower confidence interval")
plt.legend()
plt.xlabel("M")
plt.ylabel("log Mean / log M")
plt.ylim(0, 4)
plt.savefig("S_log_mean_vs_M.png")
| ipynb/StrongTimes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sent2vec
import glob
from multiprocessing import Pool
import sys
import os
import re
import codecs
import nltk.data
from nltk.tokenize import TweetTokenizer
from scipy.stats import pearsonr, spearmanr
import numpy as np
data_path = '/Users/sakoju/Documents/10715/Fall2018/Project/sent2vec/'
model_path = data_path + 'pre-trained-models/'
print(model_path)
model_books = sent2vec.Sent2vecModel()
model_books.load_model(model_path + 'books_model.bin')
t1 = "<NAME> ran a great winning campaign against a very tough opponent in Oklahoma. Kevin is a very successful businessman who will be a fantastic Governor. He is strong on Crime & Borders, the 2nd Amendment, & loves our Military & Vets. He has my complete and total Endorsement!"
t2 = "To the incredible people of the Great State of Wyoming: Go VOTE TODAY for Foster Friess - He will be a fantastic Governor! Strong on Crime, Borders & 2nd Amendment. Loves our Military & our Vets. He has my complete and total Endorsement!"
def get_similarity(t1,t2,model):
tknzr = TweetTokenizer()
t1 = ' '.join(tknzr.tokenize(t1)).lower()
t2 = ' '.join(tknzr.tokenize(t2)).lower()
# print(t1)
# print(t2)
# emb = model.embed_sentence("once upon a time .")
emb = model.embed_sentences([t1,t2])
# print(emb.shape)
pearson = pearsonr(emb[0,:],emb[1,:])[0]
spearman = spearmanr(emb[0,:],emb[1,:])[0]
return np.round(pearson,3),np.round(spearman,3),np.round((pearson + spearman)/2.0,3)
print(get_similarity(t1,t2,model_books))
model_quora = sent2vec.Sent2vecModel()
model_quora.load_model(model_path + 'quora_model.bin')
print(get_similarity(t1,t2,model_quora))
t1 = "Piorot is the main detective in <NAME>'s novels. But Mr Darcy and <NAME> are main protagonists in <NAME>'s novels."
t2 = "Lirerary critics treasure O Henry's The Gift of Magi as one of the most lauded short stories in history of literature."
print(get_similarity(t1,t2,model_books))
print(get_similarity(t1,t2,model_quora))
| s2v_sushma.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# We will build a Linear regression model for Medical cost dataset. The dataset consists of age, sex, BMI(body mass index), children, smoker and region feature, which are independent and charge as a dependent feature. We will predict individual medical costs billed by health insurance.
# # Definition & Working principle
# Let's build model using **Linear regression**.
#
# Linear regression is a **supervised learining** algorithm used when target / dependent variable **continues** real number. It establishes relationship between dependent variable $y$ and one or more independent variable $x$ using best fit line. It work on the principle of ordinary least square $(OLS)$ / Mean square errror $(MSE)$. In statistics ols is method to estimated unkown parameter of linear regression function, it's goal is to minimize sum of square difference between observed dependent variable in the given data set and those predicted by linear regression fuction.
#
# ## Hypothesis representation
#
# We will use $\mathbf{x_i}$ to denote the independent variable and $\mathbf{y_i}$ to denote dependent variable. A pair of $\mathbf{(x_i,y_i)}$ is called training example. The subscripe $\mathbf{i}$ in the notation is simply index into the training set. We have $\mathbf{m}$ training example then $\mathbf{i = 1,2,3,...m}$.
#
# The goal of supervised learning is to learn a *hypothesis function $\mathbf{h}$*, for a given training set that can used to estimate $\mathbf{y}$ based on $\mathbf{x}$. So hypothesis fuction represented as
#
# $$\mathbf{ h_\theta(x_{i}) = \theta_0 + \theta_1x_i }$$
# $\mathbf{\theta_0,\theta_1}$ are parameter of hypothesis.This is equation for **Simple / Univariate Linear regression**.
#
# For **Multiple Linear regression** more than one independent variable exit then we will use $\mathbf{x_{ij}}$ to denote indepedent variable and $\mathbf{y_{i}}$ to denote dependent variable. We have $\mathbf{n}$ independent variable then $\mathbf{j=1,2,3 ..... n}$. The hypothesis function represented as
#
# $$\mathbf{h_\theta(x_{i}) = \theta_0 + \theta_1x_{i1} + \theta_2 x_{i2} + ..... \theta_j x_{ij} ...... \theta_n x_{mn} }$$
# $\mathbf{\theta_0,\theta_1,....\theta_j....\theta_n }$ are parameter of hypothesis,
# $\mathbf{m}$ Number of training exaples,
# $\mathbf{n}$ Number of independent variable,
# $\mathbf{x_{ij}}$ is $\mathbf{i^{th}}$ training exaple of $\mathbf{j^{th}}$ feature.
#
# ## Import Library and Dataset
# Now we will import couple of python library required for our analysis and import dataset
# Import library
import pandas as pd #Data manipulation
import numpy as np #Data manipulation
import matplotlib.pyplot as plt # Visualization
import seaborn as sns #Visualization
plt.rcParams['figure.figsize'] = [8,5]
plt.rcParams['font.size'] =14
plt.rcParams['font.weight']= 'bold'
plt.style.use('seaborn-whitegrid')
# +
# Import dataset
#path ='dataset/'
df = pd.read_csv('Parker.csv')
print('\nNumber of rows and columns in the data set: ',df.shape)
print('')
#Lets look into top few rows and columns in the dataset
df.head()
# -
# Now we have import dataset. When we look at the shape of dataset it has return as (1338,7).So there are $\mathbf{m=1338}$ training exaple and $\mathbf{n=7}$ independent variable. The target variable here is charges and remaining six variables such as age, sex, bmi, children, smoker, region are independent variable. There are multiple independent variable, so we need to fit Multiple linear regression. Then the hypothesis function looks like
#
# $$\mathbf{ h_\theta(x_{i}) = \theta_0+\theta_1 age + \theta_2 sex + \theta_3 bmi + \theta_4 children + \theta_5 smoker + \theta_6 region }$$
#
# This multiple linear regression equation for given dataset.
# If $\mathbf{i=1}$ then
# $$\mathbf{h_\theta(x_{1}) = \theta_0+\theta_1 19 + \theta_2 female + \theta_3 27.900 + \theta_4 1 + \theta_5 yes + \theta_6 southwest}$$
# $$\mathbf{y_1 = 16884.92400}$$
# If $\mathbf{i=3}$ then $$\mathbf{h_\theta(x_{3}) = \theta_0+\theta_1 28 + \theta_2 male + \theta_3 33.000 + \theta_4 3 + \theta_5 no + \theta_6 northwest}$$
# $$\mathbf{y_3 = 4449.46200}$$
# *Note*: In python index starts from 0.
# $$\mathbf{x_1 = \left(\begin{matrix} x_{11} & x_{12} & x_{13} & x_{14} & x_{15} & x_{16}\end{matrix}\right) = \left(\begin{matrix} 19 & female & 27.900 & 1 & no & northwest\end{matrix}\right) }$$
# ## Matrix Formulation
#
# In general we can write above vector as $$ \mathbf{ x_{ij}} = \left( \begin{smallmatrix} \mathbf{x_{i1}} & \mathbf{x_{i2}} &.&.&.& \mathbf{x_{in}} \end{smallmatrix} \right)$$
#
# Now we combine all aviable individual vector into single input matrix of size $(m,n)$ and denoted it by $\mathbf{X}$ input matrix, which consist of all training exaples,
# $$\mathbf{X} = \left( \begin{smallmatrix} x_{11} & x_{12} &.&.&.&.& x_{1n}\\
# x_{21} & x_{22} &.&.&.&.& x_{2n}\\
# x_{31} & x_{32} &.&.&.&.& x_{3n}\\
# .&.&.&. &.&.&.& \\
# .&.&.&. &.&.&.& \\
# x_{m1} & x_{m2} &.&.&.&.&. x_{mn}\\
# \end{smallmatrix} \right)_{(m,n)}$$
#
# We represent parameter of function and dependent variable in vactor form as
# $$\theta = \left (\begin{matrix} \theta_0 \\ \theta_1 \\ .\\.\\ \theta_j\\.\\.\\ \theta_n \end {matrix}\right)_{(n+1,1)}
# \mathbf{ y } = \left (\begin{matrix} y_1\\ y_2\\. \\. \\ y_i \\. \\. \\ y_m \end{matrix} \right)_{(m,1)}$$
#
# So we represent hypothesis function in vectorize form $$\mathbf{ h_\theta{(x)} = X\theta}$$.
#
#
# +
""" for our visualization purpose will fit line using seaborn library only for bmi as independent variable
and charges as dependent variable"""
sns.lmplot(x='Income_Range',y='After_FBS',data=df,aspect=2,height=6)
plt.xlabel('Boby Mass Index$(kg/m^2)$: as Independent variable')
plt.ylabel('Insurance Charges: as Dependent variable')
plt.title('Charge Vs BMI');
# -
# In above plot we fit regression line into the variables.
# ## Cost function
#
# A cost function measures how much error in the model is in terms of ability to estimate the relationship between $x$ and $y$.
# We can measure the accuracy of our hypothesis function by using a cost function. This takes an average difference of observed dependent variable in the given the dataset and those predicted by the hypothesis function.
#
# $$\mathbf{ J(\theta) = \frac{1}{m} \sum_{i=1}^{m}(\hat{y}_i - y_i)^2}$$
# $$\mathbf{J(\theta) = \frac{1}{m} \sum_{i=1}^{m}(h_\theta(x_i) - y_i)^2}$$
# To implement the linear regression, take training example add an extra column that is $x_0$ feature, where $\mathbf{x_0=1}$. $\mathbf{x_{o}} = \left( \begin{smallmatrix} x_{i0} & x_{i1} & x_{i2} &.&.&.& x_{mi} \end{smallmatrix} \right)$,where $\mathbf{x_{i0} =0}$ and input matrix will become as
#
# $$\mathbf{X} = \left( \begin{smallmatrix} x_{10} & x_{11} & x_{12} &.&.&.&.& x_{1n}\\
# x_{20} & x_{21} & x_{22} &.&.&.&.& x_{2n}\\
# x_{30} & x_{31} & x_{32} &.&.&.&.& x_{3n}\\
# .&.&.&.&. &.&.&.& \\
# .&.&.&.&. &.&.&.& \\
# x_{m0} & x_{m1} & x_{m2} &.&.&.&.&. x_{mn}\\
# \end{smallmatrix} \right)_{(m,n+1)}$$
# Each of the m input samples is similarly a column vector with n+1 rows $x_0$ being 1 for our convenience, that is $\mathbf{x_{10},x_{20},x_{30} .... x_{m0} =1}$. Now we rewrite the ordinary least square cost function in matrix form as
# $$\mathbf{J(\theta) = \frac{1}{m} (X\theta - y)^T(X\theta - y)}$$
#
# Let's look at the matrix multiplication concept,the multiplication of two matrix happens only if number of column of firt matrix is equal to number of row of second matrix. Here input matrix $\mathbf{X}$ of size $\mathbf{(m,n+1)}$, parameter of function is of size $(n+1,1)$ and dependent variable vector of size $\mathbf{(m,1)}$. The product of matrix $\mathbf{X_{(m,n+1)}\theta_{(n+1,1)}}$ will return a vector of size $\mathbf{(m,1)}$, then product of $\mathbf{(X\theta - y)^T_{(1,m})(X\theta - y)_{(m,1)}}$ will return size of unit vector.
# ## Normal Equation
# The normal equation is an analytical solution to the linear regression problem with a ordinary least square cost function. To minimize our cost function, take partial derivative of $\mathbf{J(\theta)}$ with respect to $\theta$ and equate to $0$. The derivative of function is nothing but if a small change in input what would be the change in output of function.
# $$\mathbf{min_{\theta_0,\theta_1..\theta_n} J({\theta_0,\theta_1..\theta_n})}$$
# $$\mathbf{\frac{\partial J(\theta_j)}{\partial\theta_j} =0}$$
# where $\mathbf{j = 0,1,2,....n}$
#
# Now we will apply partial derivative of our cost function,
# $$\mathbf{\frac{\partial J(\theta_j)}{\partial\theta_j} = \frac{\partial }{\partial \theta} \frac{1}{m}(X\theta - y)^T(X\theta - y) }$$
# I will throw $\mathbf{\frac {1}{m}}$ part away since we are going to compare a derivative to $0$. And solve $\mathbf{J(\theta)}$,
#
# $$\mathbf{J(\theta) = (X\theta -y)^T(X\theta - y)}$$
# $$\mathbf{= (X\theta)^T - y^T)(X\theta -y)}$$
# $$\mathbf{= (\theta^T X^T - y^T)(X\theta - y)}$$
# $$\mathbf{= \theta^T X^T X \theta - y^T X \theta - \theta^T X^T y + y^T y}$$
# $$\mathbf{ = \theta^T X^T X \theta - 2\theta^T X^T y + y^T y}$$
#
# Here $\mathbf{y^T_{(1,m)} X_{(m,n+1)} \theta_{(n+1,1)} = \theta^T_{(1,n+1)} X^T_{(n+1,m)} y_{(m,1)}}$ because unit vector.
#
# $$\mathbf{\frac{\partial J(\theta)}{\partial \theta} = \frac{\partial}{\partial \theta} (\theta^T X^T X \theta - 2\theta^T X^T y + y^T y )}$$
# $$\mathbf{ = X^T X \frac {\partial \theta^T \theta}{\partial\theta} - 2 X^T y \frac{\partial \theta^T}{\partial\theta} + \frac {\partial y^T y}{\partial\theta}}$$
# Partial derivative $\mathbf{\frac {\partial x^2}{\partial x} = 2x}$, $\mathbf{\frac {\partial kx^2}{\partial x} = kx}$,
# $\mathbf{\frac {\partial Constact}{\partial x} = 0}$
#
# $$\mathbf{\frac{\partial J(\theta)}{\partial\theta} = X^T X 2\theta - 2X^T y +0}$$
# $$\mathbf{ 0 = 2X^T X \theta - 2X^T y}$$
# $$\mathbf{ X^T X \theta = X^T }$$
# $$\mathbf{ \theta = (X^TX)^{-1} X^Ty }$$
# this the normal equation for linear regression
# ## Exploratory data analysis
df.describe()
# ### Check for missing value
plt.figure(figsize=(12,4))
sns.heatmap(df.isnull(),cbar=False,cmap='viridis',yticklabels=False)
plt.title('Missing value in the dataset');
# There is no missing value in the data sex
# ### Plots
# correlation plot
corr = df.corr()
sns.heatmap(corr, cmap = 'Wistia', annot= True);
# Thier no correlation among valiables.
# +
f= plt.figure(figsize=(12,4))
ax=f.add_subplot(121)
sns.distplot(df["Before_FBS"],bins=50,color='r',ax=ax)
ax.set_title('Distribution of Before_FBS')
ax=f.add_subplot(122)
sns.distplot(np.log10(df['After_FBS']),bins=40,color='b',ax=ax)
ax.set_title('Distribution of After_FBS')
ax.set_xscale('log');
# -
# If we look at the left plot the charges varies from 1120 to 63500, the plot is right skewed. In right plot we will apply natural log, then plot approximately tends to normal. for further analysis we will apply log on target variable charges.
# +
f = plt.figure(figsize=(14,6))
ax = f.add_subplot(121)
sns.violinplot(x='Income_Range', y='After_FBS',data=df,palette='Wistia',ax=ax)
ax.set_title('Violin plot Income')
ax = f.add_subplot(122)
sns.violinplot(x="Before_FBS", y='After_FBS',data=df,palette='magma',ax=ax)
ax.set_title('Violin plot of FBS');
# -
# From left plot the insurance charge for male and female is approximatley in same range,it is average around 5000 bucks. In right plot the insurance charge for smokers is much wide range compare to non smokers, the average charges for non smoker is approximately 5000 bucks. For smoker the minimum insurance charge is itself 5000 bucks.
df.groupby('Income_Range').agg(['mean','min','max'])['After_FBS']
# >From left plot the minimum age person is insured is 18 year. There is slabs in policy most of non smoker take $1^{st}$ and $2^{nd}$ slab, for smoker policy start at $2^{nd}$ and $3^{rd}$ slab.
#
# >Body mass index (BMI) is a measure of body fat based on height and weight that applies to adult men and women. The minimum bmi is 16$kg/m^2$ and maximum upto 54$kg/m^2$
# ## Data Preprocessing
# ### Encoding
# Machine learning algorithms cannot work with categorical data directly, categorical data must be converted to number.
# 1. Label Encoding
# 2. One hot encoding
# 3. Dummy variable trap
#
# **Label encoding** refers to transforming the word labels into numerical form so that the algorithms can understand how to operate on them.
#
# A **One hot encoding** is a representation of categorical variable as binary vectors.It allows the representation of categorical data to be more expresive. This first requires that the categorical values be mapped to integer values, that is label encoding. Then, each integer value is represented as a binary vector that is all zero values except the index of the integer, which is marked with a 1.
#
# The **Dummy variable trap** is a scenario in which the independent variable are multicollinear, a scenario in which two or more variables are highly correlated in simple term one variable can be predicted from the others.
#
# By using *pandas get_dummies* function we can do all above three step in line of code. We will this fuction to get dummy variable for sex, children,smoker,region features. By setting *drop_first =True* function will remove dummy variable trap by droping one variable and original variable.The pandas makes our life easy.
# ### Box -Cox transformation
# A Box Cox transformation is a way to transform non-normal dependent variables into a normal shape. Normality is an important assumption for many statistical techniques; if your data isn’t normal, applying a Box-Cox means that you are able to run a broader number of tests. All that we need to perform this transformation is to find lambda value and apply the rule shown below to your variable.
# $$\mathbf{ \begin {cases}\frac {y^\lambda - 1}{\lambda},& y_i\neg=0 \\
# log(y_i) & \lambda = 0 \end{cases}}$$
# The trick of Box-Cox transformation is to find lambda value, however in practice this is quite affordable. The following function returns the transformed variable, lambda value,confidence interval
# The original categorical variable are remove and also one of the one hot encode varible column for perticular categorical variable is droped from the column. So we completed all three encoding step by using get dummies function.
# ## Train Test split
# +
from sklearn.model_selection import train_test_split
X = df_encode.drop('charges',axis=1) # Independet variable
y = df_encode['charges'] # dependent variable
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=23)
# -
# ## Model building
# In this step build model using our linear regression equation $\mathbf{\theta = (X^T X)^{-1} X^Ty}$. In first step we need to add a feature $\mathbf{x_0 =1}$ to our original data set.
# +
# Step 1: add x0 =1 to dataset
X_train_0 = np.c_[np.ones((X_train.shape[0],1)),X_train]
X_test_0 = np.c_[np.ones((X_test.shape[0],1)),X_test]
# Step2: build model
theta = np.matmul(np.linalg.inv( np.matmul(X_train_0.T,X_train_0) ), np.matmul(X_train_0.T,y_train))
# -
# The parameters for linear regression model
parameter = ['theta_'+str(i) for i in range(X_train_0.shape[1])]
columns = ['intersect:x_0=1'] + list(X.columns.values)
parameter_df = pd.DataFrame({'Parameter':parameter,'Columns':columns,'theta':theta})
# +
# Scikit Learn module
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X_train,y_train) # Note: x_0 =1 is no need to add, sklearn will take care of it.
#Parameter
sk_theta = [lin_reg.intercept_]+list(lin_reg.coef_)
parameter_df = parameter_df.join(pd.Series(sk_theta, name='Sklearn_theta'))
parameter_df
# -
# The parameter obtained from both the model are same.So we succefull build our model using normal equation and verified using sklearn linear regression module. Let's move ahead, next step is prediction and model evaluvation.
# ## Model evaluation
# We will predict value for target variable by using our model parameter for test data set. Then compare the predicted value with actual valu in test set. We compute **Mean Square Error** using formula
# $$\mathbf{ J(\theta) = \frac{1}{m} \sum_{i=1}^{m}(\hat{y}_i - y_i)^2}$$
#
# $\mathbf{R^2}$ is statistical measure of how close data are to the fitted regression line. $\mathbf{R^2}$ is always between 0 to 100%. 0% indicated that model explains none of the variability of the response data around it's mean. 100% indicated that model explains all the variablity of the response data around the mean.
#
# $$\mathbf{R^2 = 1 - \frac{SSE}{SST}}$$
# **SSE = Sum of Square Error**
# **SST = Sum of Square Total**
# $$\mathbf{SSE = \sum_{i=1}^{m}(\hat{y}_i - y_i)^2}$$
# $$\mathbf{SST = \sum_{i=1}^{m}(y_i - \bar{y}_i)^2}$$
# Here $\mathbf{\hat{y}}$ is predicted value and $\mathbf{\bar{y}}$ is mean value of $\mathbf{y}$.
# +
# Normal equation
y_pred_norm = np.matmul(X_test_0,theta)
#Evaluvation: MSE
J_mse = np.sum((y_pred_norm - y_test)**2)/ X_test_0.shape[0]
# R_square
sse = np.sum((y_pred_norm - y_test)**2)
sst = np.sum((y_test - y_test.mean())**2)
R_square = 1 - (sse/sst)
print('The Mean Square Error(MSE) or J(theta) is: ',J_mse)
print('R square obtain for normal equation method is :',R_square)
# +
# sklearn regression module
y_pred_sk = lin_reg.predict(X_test)
#Evaluvation: MSE
from sklearn.metrics import mean_squared_error
J_mse_sk = mean_squared_error(y_pred_sk, y_test)
# R_square
R_square_sk = lin_reg.score(X_test,y_test)
print('The Mean Square Error(MSE) or J(theta) is: ',J_mse_sk)
print('R square obtain for scikit learn library is :',R_square_sk)
# -
# The model returns $R^2$ value of 77.95%, so it fit our data test very well, but still we can imporve the the performance of by diffirent technique. Please make a note that we have transformer out variable by applying natural log. When we put model into production antilog is applied to the equation.
# ## Model Validation
# In order to validated model we need to check few assumption of linear regression model. The common assumption for *Linear Regression* model are following
# 1. Linear Relationship: In linear regression the relationship between the dependent and independent variable to be *linear*. This can be checked by scatter ploting Actual value Vs Predicted value
# 2. The residual error plot should be *normally* distributed.
# 3. The *mean* of *residual error* should be 0 or close to 0 as much as possible
# 4. The linear regression require all variables to be multivariate normal. This assumption can best checked with Q-Q plot.
# 5. Linear regession assumes that there is little or no *Multicollinearity in the data. Multicollinearity occurs when the independent variables are too highly correlated with each other. The variance inflation factor *VIF* identifies correlation between independent variables and strength of that correlation. $\mathbf{VIF = \frac {1}{1-R^2}}$, If VIF >1 & VIF <5 moderate correlation, VIF < 5 critical level of multicollinearity.
# 6. Homoscedasticity: The data are homoscedastic meaning the residuals are equal across the regression line. We can look at residual Vs fitted value scatter plot. If heteroscedastic plot would exhibit a funnel shape pattern.
# +
# Check for Linearity
f = plt.figure(figsize=(14,5))
ax = f.add_subplot(121)
sns.scatterplot(y_test,y_pred_sk,ax=ax,color='r')
ax.set_title('Check for Linearity:\n Actual Vs Predicted value')
# Check for Residual normality & mean
ax = f.add_subplot(122)
sns.distplot((y_test - y_pred_sk),ax=ax,color='b')
ax.axvline((y_test - y_pred_sk).mean(),color='k',linestyle='--')
ax.set_title('Check for Residual normality & mean: \n Residual eror');
# +
# Check for Multivariate Normality
# Quantile-Quantile plot
f,ax = plt.subplots(1,2,figsize=(14,6))
import scipy as sp
_,(_,_,r)= sp.stats.probplot((y_test - y_pred_sk),fit=True,plot=ax[0])
ax[0].set_title('Check for Multivariate Normality: \nQ-Q Plot')
#Check for Homoscedasticity
sns.scatterplot(y = (y_test - y_pred_sk), x= y_pred_sk, ax = ax[1],color='r')
ax[1].set_title('Check for Homoscedasticity: \nResidual Vs Predicted');
# -
# Check for Multicollinearity
#Variance Inflation Factor
VIF = 1/(1- R_square_sk)
VIF
# The model assumption linear regression as follows
# 1. In our model the actual vs predicted plot is curve so linear assumption fails
# 2. The residual mean is zero and residual error plot right skewed
# 3. Q-Q plot shows as value log value greater than 1.5 trends to increase
# 4. The plot is exhibit heteroscedastic, error will insease after certian point.
# 5. Variance inflation factor value is less than 5, so no multicollearity.
| third_project/linear-regression-tutorial (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from matplotlib import pyplot
import pandas as pd
import warnings
from summer.utils import ref_times_to_dti
from autumn.tools.plots.utils import REF_DATE
from autumn.tools.project import get_project
from autumn.tools.utils.display import pretty_print
# +
pyplot.style.use("ggplot")
warnings.filterwarnings("ignore")
region = "bhutan"
project = get_project("sm_sir", region)
# -
baseline_params = project.param_set.baseline
custom_params = project.param_set.baseline.update(
{
"time": {
"end": 1000.},
}
)
# +
model = project.run_baseline_model(custom_params)
derived_df = model.get_derived_outputs_df()
model_start_time = ref_times_to_dti(REF_DATE, [custom_params["time"]["start"]])[0]
model_end_time = ref_times_to_dti(REF_DATE, [custom_params["time"]["end"]])[0]
# +
targets_dict = {
t.data.name: pd.Series(t.data.values, index=ref_times_to_dti(model.ref_date, t.data.index)) for
t in project.calibration.targets
}
death_string = "accum_deaths" if region == "bangladesh" else "infection_deaths"
outputs_to_plot = ["notifications", "infection_deaths", "icu_occupancy", "hospital_occupancy"]
fig = pyplot.figure(figsize=(15, 12))
for i_out, output in enumerate(outputs_to_plot):
axis = fig.add_subplot(2, 2, i_out + 1)
if output in targets_dict:
targets_dict[output].plot(ax=axis, style='.')
if output in derived_df:
derived_df[output].plot(ax=axis)
axis.set_title(output.replace("_", " "))
axis.set_xlim([model_start_time, model_end_time])
# -
fig = pyplot.figure(figsize=(15, 6))
pyplot.style.use("ggplot")
axis = fig.add_subplot(1,2,1)
axis = derived_df["cdr"].plot()
axis.set_title("cdr")
axis = fig.add_subplot(1,2,2)
axis = derived_df["prop_ever_infected"].plot()
axis.set_title("prop_ever_infected")
| notebooks/user/pjayasundara/bhutan_calibrate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# ## Deploy <font color='red'>For Seller to update: Steel Surface Defects Classifier </font> Model Package from AWS Marketplace
#
#
# ## <font color='red'> For Seller to update: Add overview of the ML Model here </font>
#
# This sample notebook shows you how to deploy <font color='red'> For Seller to update:Auto Insurance Claims Fraud Prediction</font> using Amazon SageMaker.
#
# > **Note**: This is a reference notebook and it cannot run unless you make changes suggested in the notebook.
#
# #### Pre-requisites:
# 1. **Note**: This notebook contains elements which render correctly in Jupyter interface. Open this notebook from an Amazon SageMaker Notebook Instance or Amazon SageMaker Studio.
# 1. Ensure that IAM role used has **AmazonSageMakerFullAccess**
# 1. To deploy this ML model successfully, ensure that:
# 1. Either your IAM role has these three permissions and you have authority to make AWS Marketplace subscriptions in the AWS account used:
# 1. **aws-marketplace:ViewSubscriptions**
# 1. **aws-marketplace:Unsubscribe**
# 1. **aws-marketplace:Subscribe**
# 2. or your AWS account has a subscription to <font color='red'> For Seller to update:Auto Insurance Claims Fraud Prediction</font>. If so, skip step: [Subscribe to the model package](#1.-Subscribe-to-the-model-package)
#
# #### Contents:
# 1. [Subscribe to the model package](#1.-Subscribe-to-the-model-package)
# 2. [Create an endpoint and perform real-time inference](#2.-Create-an-endpoint-and-perform-real-time-inference)
# 1. [Create an endpoint](#A.-Create-an-endpoint)
# 2. [Create input payload](#B.-Create-input-payload)
# 3. [Perform real-time inference](#C.-Perform-real-time-inference)
# 4. [Visualize output](#D.-Visualize-output)
# 5. [Delete the endpoint](#E.-Delete-the-endpoint)
# 3. [Perform batch inference](#3.-Perform-batch-inference)
# 4. [Clean-up](#4.-Clean-up)
# 1. [Delete the model](#A.-Delete-the-model)
# 2. [Unsubscribe to the listing (optional)](#B.-Unsubscribe-to-the-listing-(optional))
#
#
# #### Usage instructions
# You can run this notebook one cell at a time (By using Shift+Enter for running a cell).
# ### 1. Subscribe to the model package
# To subscribe to the model package:
# 1. Open the model package listing page <font color='red'> For Seller to update:[Title_of_your_product](Provide link to your marketplace listing of your product).</font>
# 1. On the AWS Marketplace listing, click on the **Continue to subscribe** button.
# 1. On the **Subscribe to this software** page, review and click on **"Accept Offer"** if you and your organization agrees with EULA, pricing, and support terms.
# 1. Once you click on **Continue to configuration button** and then choose a **region**, you will see a **Product Arn** displayed. This is the model package ARN that you need to specify while creating a deployable model using Boto3. Copy the ARN corresponding to your region and specify the same in the following cell.
model_package_arn='arn:aws:sagemaker:us-east-2:786796469737:model-package/steel-surface-defects-classifier-v1'
import base64
import json
import uuid
from sagemaker import ModelPackage
import sagemaker as sage
from sagemaker import get_execution_role
from sagemaker import ModelPackage
from urllib.parse import urlparse
import boto3
from IPython.display import Image
from PIL import Image as ImageEdit
import urllib.request
import numpy as np
# +
role = get_execution_role()
sagemaker_session = sage.Session()
bucket=sagemaker_session.default_bucket()
bucket
# -
# ### 2. Create an endpoint and perform real-time inference
# If you want to understand how real-time inference with Amazon SageMaker works, see [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-hosting.html).
# +
model_name='steel-defects'
content_type='application/zip'
real_time_inference_instance_type='ml.m5.large'
batch_transform_inference_instance_type='ml.m5.large'
# -
# #### A. Create an endpoint
# +
def predict_wrapper(endpoint, session):
return sage.predictor.Predictor(endpoint, session,content_type)
#create a deployable model from the model package.
model = ModelPackage(role=role,
model_package_arn=model_package_arn,
sagemaker_session=sagemaker_session,
predictor_cls=predict_wrapper)
#Deploy the model
predictor = model.deploy(1, real_time_inference_instance_type, endpoint_name=model_name)
# -
# Once endpoint has been created, you would be able to perform real-time inference.
# #### B. Create input payload
file_name = 'Input.zip'
# <Add code snippet that shows the payload contents>
# #### C. Perform real-time inference
# !aws sagemaker-runtime invoke-endpoint \
# --endpoint-name $model_name \
# --body fileb://$file_name \
# --content-type $content_type \
# --region $sagemaker_session.boto_region_name \
# output.csv
# #### D. Visualize output
import pandas as pd
pd.read_csv("output.csv",header=None)
# #### E. Delete the endpoint
# Now that you have successfully performed a real-time inference, you do not need the endpoint any more. You can terminate the endpoint to avoid being charged.
predictor=sage.predictor.Predictor(model_name, sagemaker_session,content_type)
predictor.delete_endpoint(delete_endpoint_config=True)
# ### 3. Perform batch inference
# In this section, you will perform batch inference using multiple input payloads together. If you are not familiar with batch transform, and want to learn more, see these links:
# 1. [How it works](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-batch-transform.html)
# 2. [How to run a batch transform job](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html)
#upload the batch-transform job input files to S3
transform_input_folder = "Input"
transform_input = sagemaker_session.upload_data(transform_input_folder, key_prefix=model_name)
print("Transform input uploaded to " + transform_input)
#Run the batch-transform job
transformer = model.transformer(1, batch_transform_inference_instance_type)
transformer.transform(transform_input, content_type=content_type)
transformer.wait()
#output is available on following path
transformer.output_path
import os
s3_conn = boto3.client("s3")
with open('output2.csv', 'wb') as f:
s3_conn.download_fileobj(bucket, os.path.basename(transformer.output_path)+'/Input.zip.out', f)
print("Output file loaded from bucket")
pd.read_csv("output2.csv",header=None)
# ### 4. Clean-up
# #### A. Delete the model
model.delete_model()
# #### B. Unsubscribe to the listing (optional)
# If you would like to unsubscribe to the model package, follow these steps. Before you cancel the subscription, ensure that you do not have any [deployable model](https://console.aws.amazon.com/sagemaker/home#/models) created from the model package or using the algorithm. Note - You can find this information by looking at the container name associated with the model.
#
# **Steps to unsubscribe to product from AWS Marketplace**:
# 1. Navigate to __Machine Learning__ tab on [__Your Software subscriptions page__](https://aws.amazon.com/marketplace/ai/library?productType=ml&ref_=mlmp_gitdemo_indust)
# 2. Locate the listing that you want to cancel the subscription for, and then choose __Cancel Subscription__ to cancel the subscription.
#
#
| Steel Surface Defects Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import geocoder
import time
import geocoder
from folium.plugins import HeatMap
import folium
df = pd.read_excel("lomonosovsky_removed.xlsx")
df = df.drop(["Unnamed: 0","ссылка",
"Серия, тип постройки здания:",
"площадь земельного участка, входящего в состав общего имущества в многоквартирном доме, кв.м"],axis=1)
df
df1 = df
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
df_uik.drop(["uik_mapurl","uik_phone"],axis=1,inplace = True)
# +
df_uik = pd.read_csv("moscow_uik_buildings.csv")
df_uik
# +
coordinates = [[0,0]]
coord_num = [0]
for index, row in df.iterrows():
temp_adress = row['адрес'].replace('д.', "")
temp_adress = temp_adress.replace(', к. ', " к")
temp_adress = temp_adress.replace('пр-кт.', "проспект")
wa = (temp_adress.split(' '))
for i,word in enumerate(wa):
if word == "Ленинский,":
wa[i-1],wa[i] = wa[i].replace(',', ""),wa[i-1]
temp_adress = ' '.join(wa)
g = geocoder.osm(temp_adress)
print(temp_adress)
if g.osm == None:
print("Обновил адрес до...",temp_adress)
temp_adress = temp_adress.replace('е', "ё")
g = geocoder.osm(temp_adress)
print('{} {}'.format(g.osm['y'], g.osm['x']))
coordinates.append([g.osm['y'],g.osm['x']])
coord_num.append(g.osm['y']+g.osm['x'])
time.sleep(0.5)
del coord_num[0]
del coordinates[0]
df1['координаты'] = pd.Series(coordinates, index=df1.index)
df1['сумма'] = pd.Series(coord_num, index=df1.index)
# -
wa = ('г. Москва, проспект Ленинский, 85 к4'.split(' '))
for i,word in enumerate(wa):
if word == "Ленинский,":
wa[i-1],wa[i] = wa[i].replace(',', ""),wa[i-1]
' '.join(wa)
df1
g = geocoder.osm("г. Москва, ул. <NAME>, 4")
print(g.osm)
uik_lom =uik_lom.append(df_uik[df_uik.street_name == ])
uik_lom = pd.DataFrame()
uik_streets = ["строителей ул.",
"вавилова ул.",
"вернадского просп.",
"пилюгина академика ул.",
"кравченко ул.",
"крупской ул.",
"ленинский просп.",
"ульяновой марии ул.",
"нахимовский просп.",
"панферова ул.",
"гарибальди ул."
]
for street in uik_streets:
uik_lom = uik_lom.append(df_uik[df_uik.street_name == street])
uik_lom
df2 = uik_lom
coordinates = [[0,0]]
coord_num = [0]
prev = 0
for index, row in df2.iterrows():
if row['street_name'] == "вернадского просп.":
temp_adress = "проспект вернадского"
elif row['street_name'] == "ленинский просп.":
temp_adress = "ленинский проспект"
elif row['street_name'] == "нахимовский просп.":
temp_adress = "нахимовский проспект"
elif row['street_name'] == "панферова ул.":
temp_adress = "улица панфёрова"
else:
temp_adress = ' '.join(row['street_name'].split(' ')[::-1])
temp_adress = "Москва, " + temp_adress + " " + row['building_name']
#print(row['building_name'])
#temp_adress = row['адрес'].replace('д.', "")
temp_adress = temp_adress.replace('корп. ', "к")
g = geocoder.osm(temp_adress)
print(temp_adress)
if g.osm == None:
temp_adress = temp_adress.replace('е', "ё")
#print("Обновил адрес до...",temp_adress)
g = geocoder.osm(temp_adress)
if prev == g.osm['y']:
print("SAME!")
#print('{} {}'.format(g.osm['y'], g.osm['x']))
#break
else:
prev = g.osm['y']
#print('{} {}'.format(g.osm['y'], g.osm['x']))
print(index)
coordinates.append([g.osm['y'],g.osm['x']])
coord_num.append(g.osm['y']+g.osm['x'])
time.sleep(0.5)
del coord_num[0]
del coordinates[0]
df2['координаты'] = pd.Series(coordinates, index=df2.index)
df2['сумма'] = pd.Series(coord_num, index=df2.index)
"52 к1" in banned
df2 = df2.drop(["uik_phone","voteplace_phone",
"voteplace_mapurl",
"voteplace_address","voteplace_description"],axis=1)
df2
df3 = df1.merge(df2, left_on='сумма', right_on='сумма')
df3 = df3.drop(["street_name","building_name","координаты_y"],axis=1)
df3
df3.to_excel("lomonosovsky_w_uik.xlsx")
df3['density'] = df3['общая площадь, кв.м'] / df3['Этажность']
df3
# +
Map = folium.Map(location=[55.68046445,37.54478094999999], zoom_start = 15, tiles = "CartoDB Positron", name='1111')
arr = []
df4 = df3.fillna(0)
for index, row in df4.iterrows():
arr.append([row['координаты_x'][0],row['координаты_x'][1],row['density']])
hm = HeatMap(arr,name='Тепловая карта плотности населения',blur=11,radius=20)
hm.add_to(Map)
Map
# +
arr = []
df4 = df3.fillna(0)
for index, row in df4.iterrows():
a = row['координаты_x'].replace('[', "")
a = a.replace(']', "")
a = a.replace(' ', "")
a = a.split(',')
arr.append([float(a[0]),float(a[1])])
arr
# -
df1.to_excel("df1.xlsx")
df2.to_excel("df2.xlsx")
df3.to_excel("df3.xlsx")
df4.to_excel("df4.xlsx")
# +
df3 = pd.read_excel('df3.xlsx')
df4 = df3.fillna(0)
gmaps.configure(api_key="AIzaSyC89kb2Ud2Tke615oRVB87l90a9i-2yJAk")
import gmaps
#locations = df4[['координаты_x'][0],row['координаты_x'][1]]
weights = df4['density']
fig = gmaps.figure(layout={'width': '1000px', 'height': '1000px'})
fig.add_layer(gmaps.heatmap_layer(arr, weights=weights,point_radius=40))
fig
# -
embed_minimal_html('export.html', views=[fig])
from ipywidgets.embed import embed_minimal_html
| .ipynb_checkpoints/lomonosovsky_vizualization-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Load data using CSV
# +
import pandas as pd
df = pd.read_csv(r'\Data\daily-total-female-births-CA.csv')
df.head(5)
# sep can be like ; |
# -
# ## Load data using EXCEL
import pandas as pd
dfExcel = pd.read_excel(r'\Data\istambul_stock_exchange.xlsx', sheet_name = 'Data')
dfExcel.head(5)
# ## Load data using JSON
dfJson = pd.read_json(r'\Data\test.json')
dfJson.head(5)
# ## Load Data using URL
dfURL = pd.read_csv(r'https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data', names =['Sex',
'Length','Diameter', 'Height','Whole weight', 'Shucked weight','Viscera weight', 'Shell weight', 'Rings'])
dfURL.head(5)
# ## Common operations between pandas and pandasql
# ## Selecting top 5
# +
import pandas as pd
dfp = pd.read_excel(r'\Data\Absenteeism_at_work.xls')
dfp.head(5)
# +
from pandasql import sqldf
dfpsql = pd.read_excel(r'\Data\Absenteeism_at_work.xls')
Query_string = """ select * from dfpsql limit 5 """
sqldf(Query_string, globals())
# -
# ## Applying Filter
dfp[(dfp['Age'] >=30) & (dfp['Age'] <=45)]
Query_string = """ select * from dfpsql where age>=30 and age<=45 """
sqldf(Query_string, globals())
# ## Distince(Unique)
dfp['ID'].unique()
Query_string = """ select distinct ID from dfpsql;"""
sqldf(Query_string, globals())
# ## IN & NOT IN
# +
dfp[dfp.Age.isin([20,30,40])]
# -
Query_string = """ select * from dfpsql where Age in(20,30,40);"""
sqldf(Query_string, globals())
# +
dfp[~dfp.Age.isin([20,30,40])]
# -
Query_string = """ select * from dfpsql where Age not in(20,30,40);"""
sqldf(Query_string, globals())
# ## Order by
dfp.sort_values(by = ['Age','Service_time'], ascending= True)
Query_string = """ select * from dfpsql order by Age,Service_time;"""
sqldf(Query_string, globals())
#Desending
dfp.sort_values(by = ['Age','Service_time'], ascending= False)
Query_string = """ select * from dfpsql order by Age Desc,Service_time Desc;"""
sqldf(Query_string, globals())
# ## Aggregration
dfp.agg({'Transportation_expense': ['count','min', 'max', 'mean']})
# +
Query_string = """ select count(Transportation_expense) as count, min(Transportation_expense) as min, max(Transportation_expense) as max, avg(Transportation_expense) as mean from dfp;"""
sqldf(Query_string, globals())
# -
# ## Group by
dfp.groupby('ID')['Service_time'].sum()
# +
Query_string = """ select ID , sum(Service_time) as Sum_Service_time from dfp
group by ID;"""
sqldf(Query_string, globals())
# -
# ## Group by with Aggregration
dfp.groupby('Reason_for_absence').agg({'Age': ['mean','min','max']})
# +
Query_string = """ select Reason_for_absence , avg(Age) as mean, min(Age) as min, max(Age) as max from dfp
group by Reason_for_absence;"""
sqldf(Query_string, globals())
# -
# ## join (merge)
#
# +
import pandas as pd
data1 = {
'Empid': [1011, 1012, 1013, 1014, 1015],
'Name': ['John', 'Rahul', 'Rick', 'Morty', 'Tim'],
'Designation': ['Manager', 'Research Engineer', ' Research Engineer', 'VP', 'Delivery Manager'],
'Date_of_joining': ['01-Jan-2000', '23-sep-2006', '11-Jan-2012', '21-Jan-1991', '12-Jan-1990']}
Emp_df = pd.DataFrame(data1, columns = ['Empid', 'Name', 'Designation','Date_of_joining'])
Emp_df
# -
data2 = {
'Empid': [1011, 1017, 1013, 1019, 1015],
'Deptartment': ['Management', 'Research', 'Research', 'Management', 'Delivery'],
'Total_Experience': [18, 10, 10, 28, 22]}
Dept_df = pd.DataFrame(data2, columns = ['Empid', 'Deptartment', 'Total_Experience'])
Dept_df
# Inner Join
pd.merge(Emp_df, Dept_df, left_on='Empid',right_on='Empid', how='inner')
# Inner Join
Query_string = """ select * from Emp_df a INNER JOIN Dept_df b ON a.Empid = b.Empid;"""
sqldf(Query_string, globals())
# Left Join
pd.merge(Emp_df, Dept_df,left_on='Empid',right_on='Empid', how='left')
# Left Join
Query_string = """ select * from Emp_df a LEFT JOIN Dept_df b ON a.Empid = b.Empid;"""
sqldf(Query_string, globals())
# Right Join
pd.merge(Emp_df, Dept_df,left_on='Empid',right_on='Empid', how='right')
# Right Join
Query_string = """ select a.Empid,Name,Designation,Date_of_joining,Deptartment,Total_Experience from Dept_df a LEFT JOIN Emp_df b ON a.Empid = b.Empid;"""
sqldf(Query_string, globals())
# ##### Note : Exact resulting OUTER join is not currenlty supported in SQL lite, Below snippet is equivalent one
# Outer Join
pd.merge(Emp_df, Dept_df,left_on='Empid',right_on='Empid', how='outer')
# OUTER join
Query_string = """ select * from Emp_df a left OUTER JOIN Dept_df b ON a.Empid = b.Empid;"""
sqldf(Query_string, globals())
# ## Summary of the DataFrame
import pandas as pd
dfsumm = pd.read_csv(r'https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data', names =['Sex',
'Length','Diameter', 'Height','Whole weight', 'Shucked weight','Viscera weight', 'Shell weight', 'Rings'])
dfsumm.head(5)
dfsumm.describe()
# ## Resampling
import pandas as pd
df = pd.read_csv(r'\Data\daily-total-female-births-CA.csv',index_col =0,
parse_dates=['date'])
df.head(5)
df.births.resample('M').mean()
df.births.resample('Q').mean()
df.births.resample('Y').mean()
df.births.resample('W').mean()
df.births.resample('SM').mean()
# ## Windowing Function
import pandas as pd
dfExcelwin = pd.read_excel(r'\Data\istambul_stock_exchang.xlsx', sheet_name = 'Data'
,index_col =0,
parse_dates=['date'])
dfExcelwin.head(5)
#rolling Window
dfExcelwin.rolling(window=4).mean().head(10)
#Expanding Window
dfExcelwin.expanding(min_periods=4).mean().head(10)
# .EVM
dfExcelwin.ewm(com=0.5).mean().head(10)
# ## Shifting
import pandas as pd
dfshift = pd.read_excel(r'\Data\istambul_stock_exchange.xlsx', sheet_name = 'Data'
,index_col =0,
parse_dates=['date'])
dfshift.head(5)
dfshift.shift(periods=3).head(7)
dfshift.shift(periods=-1).head(7)
dfshift.shift(periods=3, axis =1).head(7)
dfshift.shift(periods=3,fill_value=0).head(7)
# # Handling missing data
import pandas as pd
dfmiss = pd.read_csv(r'\Data\daily-total-female-births-CA-with_nulls.csv',index_col =0,
parse_dates=['date'])
dfmiss
# Snippet to check for nulls
dfmiss.isnull().sum()
dfmiss.bfill()
dfmiss.ffill()
dfmiss.fillna(10)
dfmiss.interpolate(method='linear',limit_direction='forward')
import numpy as np
from sklearn.impute import KNNImputer
imputer = KNNImputer(n_neighbors=3)
knnimp_df = imputer.fit_transform(dfmiss)
knnimp_df
| hands-on-time-series-analylsis-python/Chapter 2/Data wrangling using pandas and pandasql.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import xarray as xr
# #Data downloaded from http://www.unidata.ucar.edu/software/netcdf/examples/wrfout_v2_Lambert.nc #
ds=xr.open_dataset('wrfout_v2_Lambert.nc')
ds
ds.dims
# #This data has nine dimensions#
ds.data_vars
# #All the above displayed are the variables for the data set#
ds.coords
# #The above are the coordinates of the data#
ds.attrs
# #Above is the metadata
# # ATTRIBUTES OF VARIABLES
ds.Times
# This data has been captured for 13 time intervals, numbered zero to twelve, from 01/24/2000 12 noon to midnight of 01/25/2000 respectively#
ds.LU_INDEX
# #This "Land Use Category" is a function of three axis : time,south_north,west_east
ds.U
ds.LU_INDEX.values
ds.U.values
ds.QVAPOR.values
ds.U.shape
# #The shape of U ndarray is as above
print(ds.U[3][26][59][70])
# #Accessing a particular element value from the 4d array of variable "U". "3" corresponds to Times,"26" corresponds to bottom_top etc...
ds.U>=17
np.mean(ds.U)
np.max(ds.U)
import matplotlib.pyplot as plt
import xarray.plot as xplt
| First Wrf data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit (conda)
# name: python385jvsc74a57bd0dca0ade3e726a953b501b15e8e990130d2b7799f14cfd9f4271676035ebe5511
# ---
# cd "./same_class/"
# +
# %load_ext autoreload
# %autoreload 2
# target image
import torch
from pytorch_pretrained_biggan import (BigGAN, one_hot_from_names, truncated_noise_sample,
save_as_images, display_in_terminal)
import logging
logging.basicConfig(level=logging.INFO)
model = BigGAN.from_pretrained('biggan-deep-128')
truncation = 0.5
class_vector = one_hot_from_names(["dog"], batch_size=1)
noise_vector = truncated_noise_sample(truncation=truncation, batch_size=1, seed = 10)
noise_vector = torch.from_numpy(noise_vector)
class_vector = torch.from_numpy(class_vector)
with torch.no_grad():
output = model(noise_vector, class_vector, truncation)
save_as_images(output, "dog_target")
# +
from torchvision.models import squeezenet1_0
from tqdm import trange
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model = model.eval().to(DEVICE)
semantic_model = squeezenet1_0(pretrained=True).to(DEVICE)
semantic_model.classifier = torch.nn.Sequential(
torch.nn.Flatten()
)
semantic_model = semantic_model.eval()
trunction = 0.5
noise = truncated_noise_sample(truncation=truncation, batch_size=1, seed=9)
noise = torch.nn.Parameter(torch.tensor(noise, requires_grad=True).float().to(DEVICE))
noise_optim = torch.optim.Adam([noise], lr=0.05)
class_vector = one_hot_from_names(['dog'], batch_size=1)
class_vector = torch.from_numpy(class_vector)
L = []
L_pixel = []
L_semantic = []
for iteration in trange(0, 200):
noise_optim.zero_grad()
y_hat = model(noise, class_vector, truncation)
semantic_loss = -((semantic_model(y_hat) - semantic_model(output)) ** 2).mean() ** .5 #-cos_sim(semantic_model(y_hat), semantic_model(output))
L_semantic.append(semantic_loss.item())
pixel_loss = abs(y_hat - output).mean()
L_pixel.append(pixel_loss.item())
loss = semantic_loss + 30 * pixel_loss
L.append(loss.item())
loss.backward()
noise_optim.step()
if iteration % 2 == 0:
save_as_images(y_hat, f"dog2dog_{iteration}")
# +
import matplotlib.pyplot as plt
plt.plot(L)
plt.plot([x*30 for x in L_pixel], 'r')
plt.plot(L_semantic, 'b')
plt.show()
# -
| dog2dog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
#default_exp utils
# -
#hide
# %load_ext autoreload
# %autoreload 2
# # Utils
#
# > Utilities used in the rest of the notebooks
#export
from dvats.imports import *
from fastcore.all import *
import wandb
import pickle
import pandas as pd
import numpy as np
#import tensorflow as tf
import torch.nn as nn
from fastai.basics import *
# ### Generate random time series dataframe
#export
def generate_TS_df(rows, cols):
"Generates a dataframe containing a multivariate time series, where each column \
represents a variable and each row a time point (sample). The timestamp is in the \
index of the dataframe, and it is created with a even space of 1 second between samples"
index = np.arange(pd.Timestamp.now(),
pd.Timestamp.now() + pd.Timedelta(rows-1, 'seconds'),
pd.Timedelta(1, 'seconds'))
data = np.random.randn(len(index), cols)
return pd.DataFrame(data, index=index)
df = generate_TS_df(3, 5)
test_eq(df.shape, (3, 5))
# ## pandas Dataframe utilities
# ### Normalize columns
#export
def normalize_columns(df:pd.DataFrame):
"Normalize columns from `df` to have 0 mean and 1 standard deviation"
mean = df.mean()
std = df.std() + 1e-7
return (df-mean)/std
foo = generate_TS_df(3, 3)
foo.describe()
bar = normalize_columns(foo)
bar.describe()
test_close(bar.describe().loc['mean'].values, np.repeat(0.0, len(bar.columns)))
test_close(bar.describe().loc['std'].values, np.repeat(1.0, len(bar.columns)))
# ### Remove constant columns
#export
def remove_constant_columns(df:pd.DataFrame):
return df.loc[:, (df != df.iloc[0]).any()]
foo = generate_TS_df(3, 3)
foo['constant'] = [0.0]*len(foo)
foo
bar = remove_constant_columns(foo)
bar
column_diff = set(foo.columns) - set(bar.columns)
test_eq_type(column_diff, set(['constant']))
# ## Create wandb artifact containing just the reference to an object pass as argument
#export
class ReferenceArtifact(wandb.Artifact):
default_storage_path = Path('data/wandb_artifacts/') # * this path is relative to Path.home()
"This class is meant to create an artifact with a single reference to an object \
passed as argument in the contructor. The object will be pickled, hashed and stored \
in a specified folder."
@delegates(wandb.Artifact.__init__)
def __init__(self, obj, name, type='object', folder=None, **kwargs):
super().__init__(type=type, name=name, **kwargs)
# pickle dumps the object and then hash it
hash_code = str(hash(pickle.dumps(obj)))
folder = Path(ifnone(folder, Path.home()/self.default_storage_path))
with open(f'{folder}/{hash_code}', 'wb') as f:
pickle.dump(obj, f)
self.add_reference(f'file://{folder}/{hash_code}')
if self.metadata is None:
self.metadata = dict()
self.metadata['ref'] = dict()
self.metadata['ref']['hash'] = hash_code
self.metadata['ref']['type'] = str(obj.__class__)
foo = np.arange(10)
bar = ReferenceArtifact(obj=foo, name='foo', folder='.')
bar_path = Path(f'./{bar.metadata["ref"]["hash"]}')
test_eq(bar_path.exists(), True)
test_eq(bar.metadata['ref']['type'], "<class 'numpy.ndarray'>")
# When a reference artifact is used by one wandb run, we should have a method to get the original object from it
#export
@patch
def to_obj(self:wandb.apis.public.Artifact):
"""Download the files of a saved ReferenceArtifact and get the referenced object. The artifact must \
come from a call to `run.use_artifact` with a proper wandb run."""
if self.metadata.get('ref') is None:
print(f'ERROR:{self} does not come from a saved ReferenceArtifact')
return None
original_path = ReferenceArtifact.default_storage_path/self.metadata['ref']['hash']
path = original_path if original_path.exists() else Path(self.download()).ls()[0]
with open(path, 'rb') as f:
obj = pickle.load(f)
return obj
# Test with Reference artifact from a df
foo = generate_TS_df(3, 3)
bar = ReferenceArtifact(obj=foo, name='test_reference_artifact')
bar.manifest.entries.values()
test_eq(bar.name, 'test_reference_artifact')
test_eq(bar.metadata['ref']['type'], str(type(foo)))
# TODO: Test method `to_obj`
# ReferenceArtifact with a numpy array
foo = np.random.randn(5)
bar = ReferenceArtifact(obj=foo, name='test_reference_artifact')
bar.manifest.entries.values()
test_eq(bar.metadata['ref']['type'], str(type(foo)))
#export
import torch.nn as nn
class PrintLayer(nn.Module):
def __init__(self):
super(PrintLayer, self).__init__()
def forward(self, x):
# Do your print / debug stuff here
print(x.shape)
return x
#export
@patch
def export_and_get(self:Learner, keep_exported_file=False):
"""
Export the learner into an auxiliary file, load it and return it back.
"""
aux_path = Path('aux.pkl')
self.export(fname='aux.pkl')
aux_learn = load_learner('aux.pkl')
if not keep_exported_file: aux_path.unlink()
return aux_learn
# ### get_wandb_artifacts
#export
def get_wandb_artifacts(project_path, type=None, name=None, last_version=True):
"""
Get the artifacts logged in a wandb project.
Input:
- `project_path` (str): entity/project_name
- `type` (str): whether to return only one type of artifacts
- `name` (str): Leave none to have all artifact names
- `last_version`: whether to return only the last version of each artifact or not
Output: List of artifacts
"""
public_api = wandb.Api()
if type is not None:
types = [public_api.artifact_type(type, project_path)]
else:
types = public_api.artifact_types(project_path)
res = L()
for kind in types:
for collection in kind.collections():
if name is None or name == collection.name:
versions = public_api.artifact_versions(
kind.type,
"/".join([kind.entity, kind.project, collection.name]),
per_page=1,
)
if last_version: res += next(versions)
else: res += L(versions)
return list(res)
foo = get_wandb_artifacts('wandb/artifacts-example', type='model')
test_eq(len(foo), 2)
foo = get_wandb_artifacts('wandb/artifacts-example', type='model', name='convnet')
test_eq(len(foo), 1)
foo = get_wandb_artifacts('wandb/artifacts-example', type='model', name='convnet', last_version=False)
test_eq(len(foo), 2)
# ## Export -
#hide
from nbdev.export import notebook2script
notebook2script()
beep(1)
| nbs/utils.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # How to fit a rise time to an exponential instability with FITX
#
# FITX is a small library to help isolate and fit exponential rise times in unstable systems with saturation.
#
# In the following we show how to use the libary with the example of a dynamical instability in a particle accelerator which stops due to machine non-linearities.
#
# Copyright CERN, <NAME>, 2019
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set_context('talk', font_scale=1.4, rc={'lines.linewidth': 3})
sns.set_style('whitegrid',
{'grid.linestyle': ':', 'grid.color': 'red', 'axes.edgecolor': '0.5',
'axes.linewidth': 1.2, 'legend.frameon': True})
# -
# ## The Data
#
# We have stored the centroid motion of the unstable beam. Here we use both the $x$ and $x'$ data to obtain a purely positive signal to be fit exponentially. The quadrature signal $x'$ can also be obtained by using a `Hilbert` filter on $x$ (see e.g. `scipy.signal.Hilbert` with its imaginary part), e.g. for measurement data in a particle accelerator. In the present example, the data comes from a simulation with octupole amplitude detuning leading to a saturation effect.
mean_x = np.loadtxt('./example_mean_x.dat')
mean_xp = np.loadtxt('./example_mean_xp.dat')
plt.plot(mean_x)
plt.xlabel('Turns')
plt.ylabel('Horizontal centroid position');
# Let's construct the envelope or amplitude signal by using the quadrature signal $x'$:
beta_x = 92.759
signal_x = np.sqrt((mean_x)**2 + (beta_x * mean_xp)**2)
plt.plot(signal_x)
plt.xlabel('Turns')
plt.ylabel("Horizontal amplitude\n" +
r"$\sqrt{\langle x \rangle^2 + \beta_x^2\langle x'\rangle^2}$");
# ## The Instability Fit
#
# Now let's use FITX in order to isolate the pure exponential instability from this positive signal and fit the rise time:
from FITX import fit_risetime
# +
# numerical parameters
smoothing_window_size = 2000
plt.figure(figsize=(8, 5))
# set a minimum level below which the instability is not fit
min_level = 5 * np.max(signal_x[:1000])
# FITX me! --> returns the rise time in turns
rx = fit_risetime(
signal_x, min_level=min_level,
smoothing_window_size=smoothing_window_size,
matplotlib_axis=plt.gca()
)
# plotting
plt.title(r"Horizontal amplitude $\sqrt{\langle x \rangle^2 + \beta_x^2\langle x'\rangle^2}$" +
"\n" + r"exponential fit: {:.1f} turns".format(rx));
plt.ylabel(r'Centroid amplitude $J_{\langle x \rangle}$')
plt.xlabel('Turns')
for l in plt.gca().xaxis.get_ticklabels():
l.set_rotation(15)
l.set_horizontalalignment('center')
plt.plot(signal_x[:500000], ls=':', color='green', zorder=-10)
plt.savefig('fitting.png', bbox_inches='tight')
# -
# Note the isolated region between the red bars. The algorithm fits starting from a positive curvature point and stops when the curvature turns negative. Through this solid approach we avoid fitting the saturated part of the instability. The original signal is plotted in green. The fit itself is plotted with the orange broken line.
#
# We obtain an exponential rise time of...
print ('... {:.1f} turns!'.format(rx))
| HowTo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assignment 2
#
# (๑• .̫ •๑)
#
# Your last pokemon adventure went well, but you aren't quite the very best like no one ever was. Faithful to your data scientist ways, you decide to further analyse your pokedex to improve your training.
#
# The data can be found under `pokedex/pokemons.csv`, and is the same as assignment 1. Run the cell below to get an overview of the dataset:
# +
import pandas as pd
import numpy as np
df = pd.read_csv('pokedex/pokemons.csv')
df.head()
# -
# ## Problem 1
#
# Analysing and grouping "smart" pokemons by `Type 1` wasn't very successful last assignment: we got a headache from trying to train a Psyduck. Since then however, we learnt a powerful unsupervised learning method for analysing **clusters** in our datasets.
#
# 💪 **Task: Use k-Means clustering to find 4 clusters in the pokemon dataset, and store the predictions in a vector called `y_kmeans`.**
# Pro-tip 1: You should only take into account the `Attack`, `Defense`, `Sp. Atk`, `Sp. Def`, `Speed`, and `HP` columns.
# Pro-tip 2: Please use the `random_state=42` argument when constructing your sklearn class, to make sure your results are reproducible. Marks won't be taken off for using the wrong random seed, but the unit tests won't pass!
# Pro-tip 3: We have seen in lectures that sklearn expects NumPy `ndarray`s as argument to its training and prediction methods. Whilst that is true, it can also accept pandas `DataFrame`s directly, since these are `ndarray` wrappers. You can use whichever you prefer.
# +
# INSERT YOUR CODE HERE
# +
def test_kmeans():
assert len(y_kmeans) == 800, f'The size of your prediction vector is wrong: {len(y_kmeans)}. There should be 800, one per pokemon.'
unique_clusters = len(np.unique(y_kmeans))
assert unique_clusters == 4, f'There should 4 unique clusters, your prediction vector has {unique_clusters}'
assert y_kmeans.mean() == 1.5025, f'Something is not quite right with your prediction vector. Have you used a random seed of 42?'
print('Success! 🎉')
return
test_kmeans()
# -
# ## Problem 2
#
# Now that we have clustered our pokemons, we'd like to explore these groups. Specifically, we'd like to know the mean stats of each cluster, so we can compare their average strengths and weaknesses.
#
#
# 💪 **Task: Group the pokemons by cluster, and calculate the mean statistics of each group. Save this in a `DataFrame` called `cluster_means`. For example, you should be able to clearly read the average `Defense` of cluster 2 in your `cluster_means` `DataFrame`.**
# Pro-tip 1: Adding a `Cluster` column to `df` will allow you to work on a single `DataFrame` and make the task much easier 🙃
# Pro-tip 2: You should only expect numerical columns in `cluster_means`, since the mean of a string is undefined.
# +
# INSERT YOUR CODE HERE
# +
import math
def test_cluster_means():
assert len(cluster_means) == 4, f'Your dataframe has {len(cluster_means)} rows, but 4 are expected: one per cluster'
assert 'Attack' in cluster_means.columns, f'Your dataframe should contain the Attack column'
assert math.isclose(cluster_means.values.sum(), 5276.0872, rel_tol=1e-5), f'Something is not quite right with your cluster means. Have you used a random seed of 42?'
print('Success! 🎉')
return cluster_means
test_cluster_means()
# -
# 🧠 **Bonus Question: Inspect the clusters and their traits. What do you think the clusters represent? Try to identify what makes each cluster stand out and qualitatively describe the "identity" of each cluster.**
#
# ℹ️ Notice how building these kinds of clustered "profiles" is beyond anything we could have done just by manipulating the `DataFrame`. Last assignment, we split the pokemons by types, but k-Means takes into account the _density_ of the dataset to create more natural groupings.
# ## Problem 3
#
# We're getting an idea of what our clusters represent, and how their distributions vary. However, we have recently acquired data visualization powers ⚡️, so we'd like to visualize these differences.
#
# 💪 **Task: Visualize some aspect of `cluster_means`. Feel free to focus on a particular column, or to aggregate some of the data. The graph should show some differences between the clusters. Be creative!**
# Pro-tip 1: Don't overthink the chart content, you will mostly be graded on healthy visualization practices.
# Pro-tip 2: Try to use the matplotlib api instead of the `Dataframe.plot` built in pandas. This should give you more control and allow you to create a more effective visualization.
# +
# INSERT YOUR CODE HERE
# -
# 🧠 **Bonus Question: Why you chose this data to plot? Why did you represent it in this particular way?**
# ## Problem 4
#
# We have shown differences in the cluster average statistics with a beautiful graph. Now, we want to visualize the cluster assignments of ALL of the data. However, we have six "stats" columns, and even the world of pokemon is only three dimensional... Prepare for trouble, and make it double, it's time for dimensionality reduction!
#
# 💪 **Task: Reduce the dimensions of the pokemon dataset using PCA. Store the principal components in a NumPy `ndarray` called `components`. The unit test will call a `.plot_PCA()` method to display the data points, and their color coded cluster assignments.**
# Pro-tip 1: You should only use the numerical columns: `Attack`, `Defense`, `Sp. Atk`, `Sp. Def`, `Speed`, and `HP`.
# Pro-tip 2: Think of how many dimensions you must reduce the dataset to, so that we are able to visualize it. It's the same as we did in class!
# Pro-tip 3: Please use the `random_state=42` argument when constructing your sklearn class, to make sure your results are reproducible. Marks won't be taken off for using the wrong random seed, but the unit tests won't pass!
# Pro-tip 4: We have seen in lectures that sklearn expects NumPy `ndarrays` as argument to its training and prediction methods. Whilst that is true, it can also accept pandas `DataFrames` directly, since these are `ndarray` wrappers. You can use whichever you prefer.
# Pro-tip 5: The `plot_PCA()` method uses the `y_kmeans` predictions to pick marker colors. Make sure you have finished problem 1 and run the cells to make it available here.
#
# +
# INSERT YOUR CODE HERE
# +
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def plot_PCA(components):
# assign a color to each prediction
colors = ['blue', 'red', 'green', 'orange']
features_colors = [colors[y] for y in y_kmeans]
# plot the PCA components
fig = plt.figure()
ax = fig.add_subplot('111')
ax.scatter(components[:, 0], components[:, 1],
c=features_colors, marker='o',
alpha=0.4)
ax.set_title('PCA visualization of pokemon k-Means clusters')
legends = [legend(i, c) for i, c in enumerate(colors)]
ax.legend(handles=legends, loc='upper left')
plt.show()
def legend(i, color):
return Line2D([0], [0], marker='o', color='w', label=f'Cluster {i}',markerfacecolor=color, markersize=8)
def test_pca():
rows, columns = components.shape
assert columns == 2, f'Your components have {columns} dimensions. In order to visualise the data, we expect 2 dimensions.'
assert rows == 800, f'Your components have {rows} data points, but 800 are expected, one per pokemon.'
assert math.isclose(components[42, 1], -18.321118, rel_tol=1e-5), f'Something is not quite right with your dimensional reduction. Have you used a random seed of 42?'
print('Success! 🎉')
plot_PCA(components)
test_pca()
# -
# 🧠 **Bonus Question: Do you think this matches the results of problem 2? Why? What do the 2 principal axes seem to represent?**
# ## Problem 5
#
# An Old man once told you how to catch Weedles. 🐛 But he also said that winning battles comes down to unique fighting styles. We want to find the pokemons that stand out the most from the rest.
#
# 💪 **Task: Use gaussian distribution anomaly detection to identify the top 1% of most unique pokemons. Use the resulting predictions vector to filter our `df` `DataFrame`, and save the outlier pokemons in a new `DataFrame` called `outliers`.**
# Pro-tip 1: You should only use the numerical columns: `Attack`, `Defense`, `Sp. Atk`, `Sp. Def`, `Speed`, and `HP`.
# Pro-tip 2: Please use the `random_state=42` argument when constructing your sklearn class, to make sure your results are reproducible. Marks won't be taken off for using the wrong random seed, but the unit tests won't pass!
# Pro-tip 3: We have seen in lectures that sklearn expects NumPy ndarrays as argument to its training and prediction methods. Whilst that is true, it can also accept pandas DataFrames directly, since these are ndarray wrappers. So use whichever you prefer.
# Pro-tip 4: Remember that the `contamination` argument changes the percentage of our dataset we expect to be outliers.
# Pro-tip 5: It could help to add the predictions in an `Outlier` column to the original `df`, to make the filtering of the anomalous pokemons easier 🙃
#
# +
# INSERT YOUR CODE HERE
# +
def test_anomaly_detection():
assert len(outliers) == 8, f'You found {len(outliers)} outliers, but we expected 800 * 1% = 8'
assert outliers['Total'].sum() == 4284, f'Something is not quite right with your anomaly detection. Have you used a random seed of 42?'
print('Success! 🎉')
return outliers
test_anomaly_detection()
# -
# 🧠 **Bonus Question: Is this what you expected? Can you explain why these pokemons are outliers? Can you spot a pattern?**
| assignments/assignment2/assignment2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework and bake-off: Relation extraction using distant supervision
__author__ = "<NAME> from <NAME> and <NAME>"
__version__ = "CS224u, Stanford, Spring 2020"
# ## Contents
#
# 1. [Overview](#Overview)
# 1. [Set-up](#Set-up)
# 1. [Baselines](#Baselines)
# 1. [Hand-build feature functions](#Hand-build-feature-functions)
# 1. [Distributed representations](#Distributed-representations)
# 1. [Homework questions](#Homework-questions)
# 1. [Different model factory [1 points]](#Different-model-factory-[1-points])
# 1. [Directional unigram features [1.5 points]](#Directional-unigram-features-[1.5-points])
# 1. [The part-of-speech tags of the "middle" words [1.5 points]](#The-part-of-speech-tags-of-the-"middle"-words-[1.5-points])
# 1. [Bag of Synsets [2 points]](#Bag-of-Synsets-[2-points])
# 1. [Your original system [3 points]](#Your-original-system-[3-points])
# 1. [Bake-off [1 point]](#Bake-off-[1-point])
# ## Overview
#
# This homework and associated bake-off are devoted to the developing really effective relation extraction systems using distant supervision.
#
# As with the previous assignments, this notebook first establishes a baseline system. The initial homework questions ask you to create additional baselines and suggest areas for innovation, and the final homework question asks you to develop an original system for you to enter into the bake-off.
# ## Set-up
#
# See [the first notebook in this unit](rel_ext_01_task.ipynb#Set-up) for set-up instructions.
import numpy as np
import os
import rel_ext
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
import utils
# As usual, we unite our corpus and KB into a dataset, and create some splits for experimentation:
rel_ext_data_home = os.path.join('data', 'rel_ext_data')
corpus = rel_ext.Corpus(os.path.join(rel_ext_data_home, 'corpus.tsv.gz'))
kb = rel_ext.KB(os.path.join(rel_ext_data_home, 'kb.tsv.gz'))
dataset = rel_ext.Dataset(corpus, kb)
# You are not wedded to this set-up for splits. The bake-off will be conducted on a previously unseen test-set, so all of the data in `dataset` is fair game:
splits = dataset.build_splits(
split_names=['tiny', 'train', 'dev'],
split_fracs=[0.0, 0.8, 0.2],
seed=1)
splits
# ## Baselines
# ### Hand-build feature functions
def simple_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
for word in ex.middle.split(' '):
feature_counter[word] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.middle.split(' '):
feature_counter[word] += 1
return feature_counter
featurizers = [simple_bag_of_words_featurizer]
model_factory = lambda: LogisticRegression(fit_intercept=True, solver='liblinear')
baseline_results = rel_ext.experiment(
splits,
train_split='train',
test_split='dev',
featurizers=featurizers,
model_factory=model_factory,
verbose=True)
# Studying model weights might yield insights:
rel_ext.examine_model_weights(baseline_results)
# ### Distributed representations
#
# This simple baseline sums the GloVe vector representations for all of the words in the "middle" span and feeds those representations into the standard `LogisticRegression`-based `model_factory`. The crucial parameter that enables this is `vectorize=False`. This essentially says to `rel_ext.experiment` that your featurizer or your model will do the work of turning examples into vectors; in that case, `rel_ext.experiment` just organizes these representations by relation type.
GLOVE_HOME = os.path.join('data', 'glove.6B')
glove_lookup = utils.glove2dict(
os.path.join(GLOVE_HOME, 'glove.6B.300d.txt'))
def glove_middle_featurizer(kbt, corpus, np_func=np.sum):
reps = []
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
for word in ex.middle.split():
rep = glove_lookup.get(word)
if rep is not None:
reps.append(rep)
# A random representation of the right dimensionality if the
# example happens not to overlap with GloVe's vocabulary:
if len(reps) == 0:
dim = len(next(iter(glove_lookup.values())))
return utils.randvec(n=dim)
else:
return np_func(reps, axis=0)
glove_results = rel_ext.experiment(
splits,
train_split='train',
test_split='dev',
featurizers=[glove_middle_featurizer],
vectorize=False, # Crucial for this featurizer!
verbose=True)
# With the same basic code design, one can also use the PyTorch models included in the course repo, or write new ones that are better aligned with the task. For those models, it's likely that the featurizer will just return a list of tokens (or perhaps a list of lists of tokens), and the model will map those into vectors using an embedding.
# ## Homework questions
#
# Please embed your homework responses in this notebook, and do not delete any cells from the notebook. (You are free to add as many cells as you like as part of your responses.)
# ### Different model factory [1 points]
#
# The code in `rel_ext` makes it very easy to experiment with other classifier models: one need only redefine the `model_factory` argument. This question asks you to assess a [Support Vector Classifier](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html).
#
# __To submit:__ A wrapper function `run_svm_model_factory` that does the following:
#
# 1. Uses `rel_ext.experiment` with the model factory set to one based in an `SVC` with `kernel='linear'` and all other arguments left with default values.
# 1. Trains on the 'train' part of `splits`.
# 1. Assesses on the `dev` part of `splits`.
# 1. Uses `featurizers` as defined above.
# 1. Returns the return value of `rel_ext.experiment` for this set-up.
#
# The function `test_run_svm_model_factory` will check that your function conforms to these general specifications.
# +
def run_svm_model_factory():
##### YOUR CODE HERE
return rel_ext.experiment(
splits,
train_split='train',
model_factory=(lambda : SVC(
kernel='linear')),
test_split='dev',
featurizers=[simple_bag_of_words_featurizer],
#vectorize=False, # Crucial for this featurizer!
verbose=False
)
# -
def test_run_svm_model_factory(run_svm_model_factory):
results = run_svm_model_factory()
assert 'featurizers' in results, \
"The return value of `run_svm_model_factory` seems not to be correct"
# Check one of the models to make sure it's an SVC:
assert 'SVC' in results['models']['adjoins'].__class__.__name__, \
"It looks like the model factor wasn't set to use an SVC."
if 'IS_GRADESCOPE_ENV' not in os.environ:
pass
test_run_svm_model_factory(run_svm_model_factory)
# ### Directional unigram features [1.5 points]
#
# The current bag-of-words representation makes no distinction between "forward" and "reverse" examples. But, intuitively, there is big difference between _X and his son Y_ and _Y and his son X_. This question asks you to modify `simple_bag_of_words_featurizer` to capture these differences.
#
# __To submit:__
#
# 1. A feature function `directional_bag_of_words_featurizer` that is just like `simple_bag_of_words_featurizer` except that it distinguishes "forward" and "reverse". To do this, you just need to mark each word feature for whether it is derived from a subject–object example or from an object–subject example. The included function `test_directional_bag_of_words_featurizer` should help verify that you've done this correctly.
#
# 2. A call to `rel_ext.experiment` with `directional_bag_of_words_featurizer` as the only featurizer. (Aside from this, use all the default values for `rel_ext.experiment` as exemplified above in this notebook.)
#
# 3. `rel_ext.experiment` returns some of the core objects used in the experiment. How many feature names does the `vectorizer` have for the experiment run in the previous step? Include the code needed for getting this value. (Note: we're partly asking you to figure out how to get this value by using the sklearn documentation, so please don't ask how to do it!)
# +
def directional_bag_of_words_featurizer(kbt, corpus, feature_counter):
# Append these to the end of the keys you add/access in
# `feature_counter` to distinguish the two orders. You'll
# need to use exactly these strings in order to pass
# `test_directional_bag_of_words_featurizer`.
subject_object_suffix = "_SO"
object_subject_suffix = "_OS"
##### YOUR CODE HERE
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
for word in ex.middle.split(' '):
feature_counter[word + '_SO'] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.middle.split(' '):
feature_counter[word + '_OS'] += 1
return feature_counter
# Call to `rel_ext.experiment`:
##### YOUR CODE HERE
rel_ext.experiment(
splits,
train_split='train',
test_split='dev',
featurizers=[directional_bag_of_words_featurizer],
verbose=True)
# -
def test_directional_bag_of_words_featurizer(corpus):
from collections import defaultdict
kbt = rel_ext.KBTriple(rel='worked_at', sbj='Randall_Munroe', obj='xkcd')
feature_counter = defaultdict(int)
# Make sure `feature_counter` is being updated, not reinitialized:
feature_counter['is_OS'] += 5
feature_counter = directional_bag_of_words_featurizer(kbt, corpus, feature_counter)
expected = defaultdict(
int, {'is_OS':6,'a_OS':1,'webcomic_OS':1,'created_OS':1,'by_OS':1})
assert feature_counter == expected, \
"Expected:\n{}\nGot:\n{}".format(expected, feature_counter)
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_directional_bag_of_words_featurizer(corpus)
# ### The part-of-speech tags of the "middle" words [1.5 points]
#
# Our corpus distribution contains part-of-speech (POS) tagged versions of the core text spans. Let's begin to explore whether there is information in these sequences, focusing on `middle_POS`.
#
# __To submit:__
#
# 1. A feature function `middle_bigram_pos_tag_featurizer` that is just like `simple_bag_of_words_featurizer` except that it creates a feature for bigram POS sequences. For example, given
#
# `The/DT dog/N napped/V`
#
# we obtain the list of bigram POS sequences
#
# `b = ['<s> DT', 'DT N', 'N V', 'V </s>']`.
#
# Of course, `middle_bigram_pos_tag_featurizer` should return count dictionaries defined in terms of such bigram POS lists, on the model of `simple_bag_of_words_featurizer`. Don't forget the start and end tags, to model those environments properly! The included function `test_middle_bigram_pos_tag_featurizer` should help verify that you've done this correctly.
#
# 2. A call to `rel_ext.experiment` with `middle_bigram_pos_tag_featurizer` as the only featurizer. (Aside from this, use all the default values for `rel_ext.experiment` as exemplified above in this notebook.)
'The/DT dog/N napped/V'.split('/')
# +
def middle_bigram_pos_tag_featurizer(kbt, corpus, feature_counter):
##### YOUR CODE HERE
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
pos_list = get_tag_bigrams(ex.middle_POS)
for tag in pos_list:
feature_counter[tag] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
pos_list = get_tag_bigrams(ex.middle_POS)
for tag in pos_list:
feature_counter[tag] += 1
feature_counter.pop('</s>', None)
return feature_counter
def get_tag_bigrams(s):
"""Suggested helper method for `middle_bigram_pos_tag_featurizer`.
This should be defined so that it returns a list of str, where each
element is a POS bigram."""
# The values of `start_symbol` and `end_symbol` are defined
# here so that you can use `test_middle_bigram_pos_tag_featurizer`.
start_symbol = "<s>"
end_symbol = "</s>"
##### YOUR CODE HERE
pos_tags = get_tags(s)
pos_tags.insert(0, start_symbol)
pos_tags.append(end_symbol)
return [' '.join(pos_tags[i:i+2]) for i in range(len(pos_tags))]
def get_tags(s):
"""Given a sequence of word/POS elements (lemmas), this function
returns a list containing just the POS elements, in order.
"""
return [parse_lem(lem)[1] for lem in s.strip().split(' ') if lem]
def parse_lem(lem):
"""Helper method for parsing word/POS elements. It just splits
on the rightmost / and returns (word, POS) as a tuple of str."""
return lem.strip().rsplit('/', 1)
# Call to `rel_ext.experiment`:
##### YOUR CODE HERE
rel_ext.experiment(
splits,
train_split='train',
test_split='dev',
featurizers=[middle_bigram_pos_tag_featurizer],
verbose=True)
# -
def test_middle_bigram_pos_tag_featurizer(corpus):
from collections import defaultdict
kbt = rel_ext.KBTriple(rel='worked_at', sbj='Randall_Munroe', obj='xkcd')
feature_counter = defaultdict(int)
# Make sure `feature_counter` is being updated, not reinitialized:
feature_counter['<s> VBZ'] += 5
feature_counter = middle_bigram_pos_tag_featurizer(kbt, corpus, feature_counter)
expected = defaultdict(
int, {'<s> VBZ':6,'VBZ DT':1,'DT JJ':1,'JJ VBN':1,'VBN IN':1,'IN </s>':1})
assert feature_counter == expected, \
"Expected:\n{}\nGot:\n{}".format(expected, feature_counter)
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_middle_bigram_pos_tag_featurizer(corpus)
# ### Bag of Synsets [2 points]
#
# The following allows you to use NLTK's WordNet API to get the synsets compatible with _dog_ as used as a noun:
#
# ```
# from nltk.corpus import wordnet as wn
# dog = wn.synsets('dog', pos='n')
# dog
# [Synset('dog.n.01'),
# Synset('frump.n.01'),
# Synset('dog.n.03'),
# Synset('cad.n.01'),
# Synset('frank.n.02'),
# Synset('pawl.n.01'),
# Synset('andiron.n.01')]
# ```
#
# This question asks you to create synset-based features from the word/tag pairs in `middle_POS`.
#
# __To submit:__
#
# 1. A feature function `synset_featurizer` that is just like `simple_bag_of_words_featurizer` except that it returns a list of synsets derived from `middle_POS`. Stringify these objects with `str` so that they can be `dict` keys. Use `convert_tag` (included below) to convert tags to `pos` arguments usable by `wn.synsets`. The included function `test_synset_featurizer` should help verify that you've done this correctly.
#
# 2. A call to `rel_ext.experiment` with `synset_featurizer` as the only featurizer. (Aside from this, use all the default values for `rel_ext.experiment`.)
# +
from nltk.corpus import wordnet as wn
def synset_featurizer(kbt, corpus, feature_counter):
##### YOUR CODE HERE
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
synset_list = get_synsets(ex.middle_POS)
for syn in synset_list:
feature_counter[syn] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
synset_list = get_synsets(ex.middle_POS)
for syn in synset_list:
feature_counter[syn] += 1
return feature_counter
def get_synsets(s):
"""Suggested helper method for `synset_featurizer`. This should
be completed so that it returns a list of stringified Synsets
associated with elements of `s`.
"""
# Use `parse_lem` from the previous question to get a list of
# (word, POS) pairs. Remember to convert the POS strings.
wt = [parse_lem(lem) for lem in s.strip().split(' ') if lem]
##### YOUR CODE HERE
synset_list = []
for i in wt:
word = i[0]
tag = convert_tag(i[1])
synset = wn.synsets(word, pos=tag)
synset = list(map(lambda x:str(x), synset))
synset_list.extend(synset)
return synset_list
def convert_tag(t):
"""Converts tags so that they can be used by WordNet:
| Tag begins with | WordNet tag |
|-----------------|-------------|
| `N` | `n` |
| `V` | `v` |
| `J` | `a` |
| `R` | `r` |
| Otherwise | `None` |
"""
if t[0].lower() in {'n', 'v', 'r'}:
return t[0].lower()
elif t[0].lower() == 'J':
return 'a'
else:
return None
# Call to `rel_ext.experiment`:
##### YOUR CODE HERE
rel_ext.experiment(
splits,
train_split='train',
test_split='dev',
featurizers=[synset_featurizer],
verbose=True)
# -
def test_synset_featurizer(corpus):
from collections import defaultdict
kbt = rel_ext.KBTriple(rel='worked_at', sbj='Randall_Munroe', obj='xkcd')
feature_counter = defaultdict(int)
# Make sure `feature_counter` is being updated, not reinitialized:
feature_counter["Synset('be.v.01')"] += 5
feature_counter = synset_featurizer(kbt, corpus, feature_counter)
# The full return values for this tend to be long, so we just
# test a few examples to avoid cluttering up this notebook.
test_cases = {
"Synset('be.v.01')": 6,
"Synset('embody.v.02')": 1
}
for ss, expected in test_cases.items():
result = feature_counter[ss]
assert result == expected, \
"Incorrect count for {}: Expected {}; Got {}".format(ss, expected, result)
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_synset_featurizer(corpus)
# ### Your original system [3 points]
#
# There are many options, and this could easily grow into a project. Here are a few ideas:
#
# - Try out different classifier models, from `sklearn` and elsewhere.
# - Add a feature that indicates the length of the middle.
# - Augment the bag-of-words representation to include bigrams or trigrams (not just unigrams).
# - Introduce features based on the entity mentions themselves. <!-- \[SPOILER: it helps a lot, maybe 4% in F-score. And combines nicely with the directional features.\] -->
# - Experiment with features based on the context outside (rather than between) the two entity mentions — that is, the words before the first mention, or after the second.
# - Try adding features which capture syntactic information, such as the dependency-path features used by Mintz et al. 2009. The [NLTK](https://www.nltk.org/) toolkit contains a variety of [parsing algorithms](http://www.nltk.org/api/nltk.parse.html) that may help.
# - The bag-of-words representation does not permit generalization across word categories such as names of people, places, or companies. Can we do better using word embeddings such as [GloVe](https://nlp.stanford.edu/projects/glove/)?
#
# In the cell below, please provide a brief technical description of your original system, so that the teaching team can gain an understanding of what it does. This will help us to understand your code and analyze all the submissions to identify patterns and strategies.
# +
from sklearn.ensemble import GradientBoostingClassifier
rel_ext.experiment(
splits,
train_split='train',
test_split='dev',
featurizers=[directional_bag_of_words_featurizer, middle_bigram_pos_tag_featurizer],
model_factory=lambda: GradientBoostingClassifier(),
verbose=True)
# -
# Enter your system description in this cell.
# Please do not remove this comment.
'''
The model selected used sklearn's Gradient Boosting as model_factory as it might be more robust to
to noisy data, hence helping distance learning.
Moreover, I decided to include both directional BoW and POS_TAG features; the first one because
directionality seems to help the most -- it may be the case that, given that relations aren't
symmetric, adding those tags help to disambiguate relations. The second as it marginally helps
performance; very likely because they also help in disambiguating.
'''
# ## Bake-off [1 point]
#
# For the bake-off, we will release a test set. The announcement will go out on the discussion forum. You will evaluate your custom model from the previous question on these new datasets using the function `rel_ext.bake_off_experiment`. Rules:
#
# 1. Only one evaluation is permitted.
# 1. No additional system tuning is permitted once the bake-off has started.
#
# The cells below this one constitute your bake-off entry.
#
# People who enter will receive the additional homework point, and people whose systems achieve the top score will receive an additional 0.5 points. We will test the top-performing systems ourselves, and only systems for which we can reproduce the reported results will win the extra 0.5 points.
#
# Late entries will be accepted, but they cannot earn the extra 0.5 points. Similarly, you cannot win the bake-off unless your homework is submitted on time.
#
# The announcement will include the details on where to submit your entry.
# +
# Enter your bake-off assessment code in this cell.
# Please do not remove this comment.
if 'IS_GRADESCOPE_ENV' not in os.environ:
from sklearn.ensemble import GradientBoostingClassifier
bakeoff_results = rel_ext.experiment(
splits,
train_split='train',
test_split='dev',
featurizers=[directional_bag_of_words_featurizer, middle_bigram_pos_tag_featurizer],
model_factory=lambda: GradientBoostingClassifier(),
verbose=False)
rel_ext_data_home_test = os.path.join(
rel_ext_data_home, 'bakeoff-rel_ext-test-data')
rel_ext.bake_off_experiment(bakeoff_results, rel_ext_data_home_test)
# +
# On an otherwise blank line in this cell, please enter
# your macro-average f-score (an F_0.5 score) as reported
# by the code above. Please enter only a number between
# 0 and 1 inclusive. Please do not remove this comment.
if 'IS_GRADESCOPE_ENV' not in os.environ:
pass
# Please enter your score in the scope of the above conditional.
##### YOUR CODE HERE
0.608
# -
| hw_rel_ext.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../Semantic-Analysis"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# -
df = pd.read_csv("../Semantic-Analysis/blogtext.csv")
print(df.shape)
df.head()
df.drop_duplicates(subset="text",inplace=True)
df.text.str.len().describe()
df.text.str.len().plot()
df = df.loc[(df.text.str.len() < 18000) & (df.text.str.len() > 7)]
df.text.str.len().plot()
df.loc[df.text.str.len() < 30].shape
df.describe()
df.age.value_counts()
list = df.topic.unique().tolist()
dictOfWords = { list[i] : i for i in range(0, len(list) ) }
dictOfWords
df["word_count"] = df.text.str.split().str.len()
df["char_length"] = df.text.str.len()
df["id_count"] = df.groupby("id")["id"].transform("count")
df.head(12)
df.date = pd.to_datetime(df.date,errors="coerce",infer_datetime_format=True)
df.tail()
df.drop_duplicates(subset="id")["id_count"].describe()
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import decomposition, ensemble
data = pd.read_csv(
"../Semantic-Analysis/blogtext.csv",
usecols=['topic', 'text'] # Only load the three columns specified.
)
data.head()
data['topic'] = data['topic'].replace(dictOfWords)
data.head()
data['text'] = data.text.map(lambda x: x.lower())
data['text'] = data.text.str.replace('[^\w\s]', '')
import nltk
data['text'] = data['text'].apply(nltk.word_tokenize)
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
data['text'] = data['text'].apply(lambda x: [stemmer.stem(y) for y in x])
data.head()
# +
from sklearn.feature_extraction.text import CountVectorizer
# This converts the list of words into space-separated strings
data['text'] = data['text'].apply(lambda x: ' '.join(x))
count_vect = CountVectorizer()
counts = count_vect.fit_transform(data['text'])
# +
from sklearn.feature_extraction.text import TfidfTransformer
transformer = TfidfTransformer().fit(counts)
counts = transformer.transform(counts)
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(counts, data['topic'], test_size=0.33, random_state=2)
# +
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB().fit(X_train, y_train)
# +
predicted = model.predict(X_test)
print(np.mean(predicted == y_test))
# +
from sklearn.metrics import confusion_matrix
import pylab as pl
cm = confusion_matrix(y_test, predicted)
pl.matshow(cm)
pl.title('Confusion matrix of the classifier')
pl.colorbar()
pl.show()
# -
dictOfWords1 = { list[i] : i for i in range(0, len(list) ) }
dictOfWords1['Non-Profit']=2
dictOfWords1['Banking']=1
dictOfWords1['Education']=0
dictOfWords1['Engineering']=0
dictOfWords1['Science']=0
dictOfWords1['Communications-Media']=0
dictOfWords1['BusinessServices']=1
dictOfWords1['Sports-Recreation']=2
dictOfWords1['Arts']=2
dictOfWords1['Internet']=2
dictOfWords1['Museums-Libraries']=2
dictOfWords1['Accounting']=1
dictOfWords1['Technology']=0
dictOfWords1['Law']=0
dictOfWords1['Consulting']=1
dictOfWords1['Automotive']=0
dictOfWords1['Religion']=2
dictOfWords1['Fashion']=2
dictOfWords1['Publishing']=0
dictOfWords1['Marketing']=1
dictOfWords1['LawEnforcement-Security']=1
dictOfWords1['HumanResources']=1
dictOfWords1['Telecommunications']=1
dictOfWords1['Military']=2
dictOfWords1['Government']=2
dictOfWords1['Transportation']=0
dictOfWords1['Architecture']=0
dictOfWords1['Advertising']=1
dictOfWords1['Biotech']=0
dictOfWords1['RealEstate']=1
dictOfWords1['Manufacturing']=1
dictOfWords1['Construction']=1
dictOfWords1['Chemicals']=1
dictOfWords1['Maritime']=2
dictOfWords1['Tourism']=2
dictOfWords1['Environment']=2
dictOfWords1
data1 = pd.read_csv(
"../Semantic-Analysis/blogtext.csv",
usecols=['topic', 'text'] # Only load the three columns specified.
)
data1.head()
data1['topic'] = data1['topic'].replace(dictOfWords1)
data1.head()
data1['text'] = data1.text.map(lambda x: x.lower())
data1.head()
data1['text'] = data1.text.str.replace('[^\w\s]', '')
data1.head()
import nltk
data1['text'] = data1['text'].apply(nltk.word_tokenize)
data1.head()
from nltk.stem import PorterStemmer
stemmer1 = PorterStemmer()
data1['text'] = data1['text'].apply(lambda x: [stemmer1.stem(y) for y in x])
data1.head()
# +
from sklearn.feature_extraction.text import CountVectorizer
# This converts the list of words into space-separated strings
data1['text'] = data1['text'].apply(lambda x: ' '.join(x))
count_vect = CountVectorizer()
counts = count_vect.fit_transform(data1['text'])
data1.head()
# +
from sklearn.feature_extraction.text import TfidfTransformer
transformer = TfidfTransformer().fit(counts)
counts = transformer.transform(counts)
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(counts, data1['topic'], test_size=0.5, random_state=0)
# +
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB().fit(X_train, y_train)
# +
predicted = model.predict(X_test)
print(np.mean(predicted == y_test))
# +
from sklearn.metrics import confusion_matrix
import pylab as pl
cm = confusion_matrix(y_test, predicted)
pl.matshow(cm)
pl.title('Confusion matrix of the classifier')
pl.colorbar()
pl.show()
# -
predicted
import pickle
saved = pickle.dumps(model)
new_model = pickle.loads(saved)
predicted = new_model.predict(X_test)
data['1']='India is democratic'
counts = count_vect.fit_transform(data['1'])
X_train, X_test, y_train, y_test = train_test_split(counts, data['1'], test_size=0, random_state=0)
predicted = model.predict(X_test)
| .ipynb_checkpoints/code-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Developing an AI application
#
# Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications.
#
# In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below.
#
# <img src='assets/Flowers.png' width=500px>
#
# The project is broken down into multiple steps:
#
# * Load and preprocess the image dataset
# * Train the image classifier on your dataset
# * Use the trained classifier to predict image content
#
# We'll lead you through each part which you'll implement in Python.
#
# When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.
#
# First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.
#
# Please make sure if you are running this notebook in the workspace that you have chosen GPU rather than CPU mode.
# Import required modules
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import json
from collections import OrderedDict
# ## Load the data
#
# Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.
#
# The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.
#
# The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
#
# define directories
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# +
# Define transforms for the training set
train_transforms = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomRotation(40),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Define transforms for the validation set
valid_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Define your transforms for the testing set
test_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_dataset = datasets.ImageFolder(train_dir, transform=train_transforms)
valid_dataset = datasets.ImageFolder(valid_dir, transform=valid_transforms)
test_dataset = datasets.ImageFolder(test_dir, transform=test_transforms)
# Using the image datasets and the transforms, define the dataloaders
train_loader = torch.utils.data.DataLoader(train_dataset,batch_size=64, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset,batch_size=32, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,batch_size=32, shuffle=True)
# -
# ### Label mapping
#
# You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
# # Building and training the classifier
#
# Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.
#
# We're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:
#
# * Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)
# * Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
# * Train the classifier layers using backpropagation using the pre-trained network to get the features
# * Track the loss and accuracy on the validation set to determine the best hyperparameters
#
# We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!
#
# When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.
#
# One last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to GPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module.
# +
# Build and train your network
# -
# define model parameters (choose model network, dropout rate, nodes of hidden layer 1, 2 and 3 as well as learning rate)
# As recommended, we us the pretrained VGG networks
network = "vgg11"
dropout = 0.2
hidden_layer1 = 512
hidden_layer2 = 256
hidden_layer3 = 128
lr = 0.001
def nn_create(network, dropout = 0.2, hidden_layer1 = 512, hidden_layer2 = 256, hidden_layer3 = 128, lr = 0.001):
"""
This function creates a neural network by using the feauteres of a pretrained network,
which are fed into a defined classifier. The network consists of three hidden layers and has 102 output classes.
INPUT: network - string of pretrained model (vggXX)
dropout - dropout rate
hidden_layer1 - nodes of first hidden layer
hidden_layer2 - nodes of second hidden layer
hidden_layer3 - nodes of third hidden layer
lr - learning rate
OUTPUT: model - classifier
optimizer - optimizer of model parameters
criterion - defined loss
"""
model_creatable = True
if network == 'vgg11':
network = models.vgg11(pretrained = True)
elif network == 'vgg13':
network = models.vgg13(pretrained = True)
elif network == 'vgg16':
network = models.vgg16(pretrained = True)
elif network == 'vgg19':
network = models.vgg19(pretrained = True)
else:
print("Please enter an existing VGG network (vgg11, vgg13, vgg16, vgg19)")
model_creatable = False
if model_creatable:
# get in_features of model and define output classes
num_features = network.classifier[0].in_features
num_outputs = 102
# turn off gradients, since they are not required
for param in network.parameters():
param.requires_grad = False
# define classifier
classifier = nn.Sequential(OrderedDict([('dropout', nn.Dropout(dropout)),
('fc1', nn.Linear(num_features, hidden_layer1)),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_layer1, hidden_layer2)),
('relu2', nn.ReLU()),
('fc3', nn.Linear(hidden_layer2, hidden_layer3)),
('relu3', nn.ReLU()),
('fc4', nn.Linear(hidden_layer3, num_outputs)),
('output', nn.LogSoftmax(dim=1))
]))
# replace classifier
network.classifier = classifier
# move model to GPU
network.cuda()
# define optimizer and criterion
optimizer = optim.Adam(network.classifier.parameters(), lr = lr)
criterion = nn.NLLLoss()
return network, optimizer, criterion
try:
# create deep learning model
model, optimizer, criterion = nn_create(network, dropout, hidden_layer1, hidden_layer2, hidden_layer3, lr)
# Show model
print(model)
except TypeError:
print("Non-existing network. Try again!")
# +
## Perform training
# define training parameters
epochs = 10
steps = 0
print_every = 5
running_loss = 0
model.to('cuda')
train_losses, valid_losses = [], []
for epoch in range(epochs):
for inputs, labels in train_loader:
steps += 1
# move inputs and labels tensors to default devices
inputs,labels = inputs.to('cuda'), labels.to('cuda')
# clear gradient
optimizer.zero_grad()
# Forward pass
outputs = model.forward(inputs)
# calculate loss
loss = criterion(outputs, labels)
# backward pass
loss.backward()
# perform optimization step
optimizer.step()
# cumulate loss
running_loss += loss.item()
# validation pass
if steps % print_every == 0:
model.eval()
valid_loss = 0
accuracy=0
# turn off gradients, since no back propagation on validation pass required
with torch.no_grad():
for inputs_valid,labels_valid in valid_loader:
# move everything to GPU
inputs_valid, labels_valid = inputs_valid.to('cuda:0') , labels_valid.to('cuda:0')
model.to('cuda:0')
# Forward pass
outputs_valid = model.forward(inputs_valid)
# calculate validation loss
valid_loss = criterion(outputs_valid,labels_valid)
# calculate accuracy by checking if the predicted classes match the labels
ps = torch.exp(outputs_valid)
top_p, top_class = ps.topk(1,dim=1)
equals = top_class == labels_valid.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
train_losses.append(running_loss/len(train_loader))
valid_losses.append(valid_loss/len(valid_loader))
print(f"Epoch {epoch+1}/{epochs}.. "
f"Training Loss: {running_loss/len(train_loader):.3f}.. "
f"Validation loss: {valid_loss/len(valid_loader):.3f}.. "
f"Validation accuracy: {accuracy/len(valid_loader):.3f}")
running_loss = 0
model.train()
# +
# Plot Training and Validation losses over steps to validate the training behaviour and avoid overfitting
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
plt.plot(train_losses, label="Training loss")
plt.plot(valid_losses, label="Validation loss")
plt.legend()
# -
# ## Testing your network
#
# It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.
# +
# Do validation on the test set
def calc_accuracy_test(test_loader):
"""
Function calculates the accuracy of the trained model according the test dataset loaded into the test_loader
INPUT: test_loader - test dataset loaded into the test_loader
OUTPUT: test_accuracy - calculated accuracy
"""
# initialize accuracy
accuracy = 0
# move model to GPU
model.to('cuda:0')
# turn off gradients, since only forward pass is required
with torch.no_grad():
# loop images and labels of test dataset
for images, labels in test_loader:
# move everything to GPU
images, labels = images.to('cuda:0') , labels.to('cuda:0')
# Forward pass
outputs = model(images)
# calculate validation loss
valid_loss = criterion(outputs_valid,labels_valid)
# calculate accuracy by checking if the predicted classes match the labels
ps = torch.exp(outputs)
top_p, top_class = ps.topk(1,dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print('Accuracy on Test dataset: {:.2f}%'.format(accuracy/len(test_loader)*100))
return accuracy
test_acc = calc_accuracy_test(test_loader)
# -
# ## Save the checkpoint
#
# Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.
#
# ```model.class_to_idx = image_datasets['train'].class_to_idx```
#
# Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.
# TODO: Save the checkpoint
def save_checkpoint(model, network, hidden_layer1, hidden_layer2, hidden_layer3, dropout, lr, filepath, train_dataset):
"""
save the checkpoint of a deep learning model
INPUT: model - deep learning model
network - structure of deep learning model
hidden_layer1 - nodes in first hidden layer
hidden_layer2 - nodes in second hidden layer
hidden_layer3 - nodes in third hidden layer
dropout - dropout rate
lr - learning rate
filepath - destination of the checkpoint
train_dataset - training dataset
"""
model.class_to_idx = train_dataset.class_to_idx
model.cpu
torch.save({'network' : network,
'hidden_layer1' : hidden_layer1,
'hidden_layer2' : hidden_layer2,
'hidden_layer3' : hidden_layer3,
'dropout' : dropout,
'learning_rate' : lr,
'state_dict' : model.state_dict(),
'class_to_idx':model.class_to_idx},
filepath)
save_dir = 'checkpoint.pth'
save_checkpoint(model, network, hidden_layer1, hidden_layer2, hidden_layer3, dropout, lr, save_dir, train_dataset)
# ## Loading the checkpoint
#
# At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.
# TODO: Write a function that loads a checkpoint and rebuilds the model
def load_checkpoint(filepath):
'''
Load previously saved checkpoint
INPUT: filepath - path of checkpoint file
OUTPUT: loaded_model - model created from loaded checkpoint data
'''
# load checkpoint data (use CPU)
checkpoint = torch.load(filepath, map_location=lambda storage, loc: storage)
# read out model properties
network = checkpoint['network']
hidden_layer1 = checkpoint['hidden_layer1']
hidden_layer2 = checkpoint['hidden_layer2']
hidden_layer3 = checkpoint['hidden_layer3']
dropout = checkpoint['dropout']
lr = checkpoint['learning_rate']
# create model from properties using nn_create and pass state_dict
loaded_model,_,_ = nn_create(network, dropout, hidden_layer1, hidden_layer2, hidden_layer3, lr)
loaded_model.class_to_idx = checkpoint['class_to_idx']
loaded_model.load_state_dict(checkpoint['state_dict'])
return loaded_model
# load model from checkpoint file
model = load_checkpoint('checkpoint.pth')
print(model)
# # Inference for classification
#
# Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
#
# First you'll need to handle processing the input image such that it can be used in your network.
#
# ## Image Preprocessing
#
# You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training.
#
# First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.
#
# Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.
#
# As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation.
#
# And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
INPUT: image - path to image file
OUTPUT: img_np - image as Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
# open image as PIL image
img_pil = Image.open(image)
# define transformations
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# perform transformation
img_tensor = transform(img_pil)
# convert PyTorch Tensor to Numpy array and return array
return img_tensor.numpy()
# +
# load image and process image
image_path = data_dir + '/test' + '/11/' + 'image_03098.jpg'
img = process_image(image_path)
# show dimensions of img to check whether the color channel is within the first dimension
# (required result (3, 224, 224))
print(img.shape)
# -
# To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
def imshow(image, ax=None, title=None):
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
# Test process_image using imshow and the defined filepath
imshow(img)
# ## Class Prediction
#
# Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.
#
# To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.
#
# Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.
#
# ```python
# probs, classes = predict(image_path, model)
# print(probs)
# print(classes)
# > [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
# > ['70', '3', '45', '62', '55']
# ```
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
INPUT: image_path - path to image file
model - deep learning model defined by checkpoint file
topk - top k classes of the prediction
OUTPUT: probs_topk_list - list of probabilities of top k classes
classes_topk_list - list of classes with the k highest probabilites
'''
# TODO: Implement the code to predict the class from an image file
# process image, convert image to torch.FloatTensor and unsqueeze tensor to comply with model input
img = process_image(image_path)
img_tensor = torch.from_numpy(img).type(torch.FloatTensor)
img_tensor = img_tensor.unsqueeze_(0)
# load deep learning model and move to CPU
model = load_checkpoint(model).cpu()
# set model to evaluation mode
model.eval()
# turn off gradients - not required for predicting
with torch.no_grad():
# forward pass for predictions
outputs = model.forward(img_tensor)
# calculate output probabilities and get the top k classes with indices - save as lists
probs = torch.exp(outputs)
probs_topk = probs.topk(topk)[0]
idx_topk = probs.topk(topk)[1]
probs_topk_list = np.array(probs_topk)[0]
idx_topk_list = np.array(idx_topk[0])
# map indices to classes
idx_to_class = {x: y for y, x in model.class_to_idx.items()}
# create class list
classes_topk_list = []
for i in idx_topk_list:
classes_topk_list += [idx_to_class[i]]
return probs_topk_list, classes_topk_list
# +
# perform prediction
model_path = 'checkpoint.pth'
image_path = data_dir + '/test' + '/11/' + 'image_03098.jpg'
probs, classes = predict(image_path, model_path)
print(probs)
print(classes)
# -
# ## Sanity Checking
#
# Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:
#
# <img src='assets/inference_example.png' width=300px>
#
# You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.
# TODO: Display an image along with the top 5 classes
def sanity_check(image_path, model):
# get probabilities and classes of top 5 predictions
probs, classes = predict(image_path, model)
# get class names
class_names = []
for c in classes:
class_names.append(cat_to_name[c])
# process image (resize, center, normalize)
img = process_image(image_path)
# define figure size and structure
plt.figure(figsize=(4,11))
plt.subplot(211)
# show image
ax = imshow(img, ax = plt)
ax.axis('off')
ax.title(class_names[0])
# plot predicted probabilities using barh() and invert the yaxis
plt.subplot(212)
plt.grid(linestyle = '--', linewidth = 0.5)
plt.barh(np.arange(len(class_names)), probs, color = 'blue')
plt.yticks(np.arange(len(class_names)), class_names)
plt.gca().invert_yaxis()
# plot
plt.show()
# +
# perform sanity checking
model_path = 'checkpoint.pth'
image_path = data_dir + '/test' + '/17/' + 'image_03872.jpg'
sanity_check(image_path, model_path)
| Image Classifier Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pyspark.sql import SparkSession
import datetime as dt
import pyspark.sql.functions as F
spark = SparkSession.builder.appName('etl').getOrCreate()
df = spark.read.parquet('data/catalog.parquet')
filtered = df.filter(df['resourceType'] == 'Observation')
Observation = filtered.filter(filtered.valueCodeableConcept.isNotNull())
Observation = Observation.select(['id',
'subject',
'code',
'performer',
'encounter',
'meta',
'effectiveDateTime',
'valueCodeableConcept',
'category'])
split_dates = F.split(Observation["effectiveDateTime"], 'T')
Observation = Observation.withColumnRenamed("id", "observation_id")\
.withColumn("observation_type_concept_id", Observation.category.coding.getItem(0).code.getItem(0))\
.withColumn("observation_date", split_dates.getItem(0))\
.withColumn("person_id", Observation.subject.reference)\
.withColumn("value_as_string", Observation.valueCodeableConcept.text)\
.withColumnRenamed("code", "observation_concept_id")\
.withColumnRenamed("effectiveDateTime", "measurement_datetime")\
.drop("valueCodeableConcept")\
.withColumn("visit_occurrence_id", Observation.encounter.reference)\
.withColumnRenamed("performer", "provider_id")\
.drop("encounter")\
.drop("subject")\
.drop("meta")\
.drop("category")
Observation = Observation.withColumn("observation_concept_id", Observation.observation_concept_id.coding.getItem(0).code)
#Observation.toPandas().to_csv("obs.csv", header=True)
Observation.show(5)
# -
| notebooks/Observation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from pathlib import Path
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Arial']
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyprojroot
import seaborn as sns
import searchnets
# -
def cm_to_inches(cm):
return cm / 2.54
mpl.style.use(['seaborn-darkgrid', 'seaborn-paper'])
# paths
# +
SOURCE_DATA_ROOT = pyprojroot.here('results/searchstims/source_data/10stims')
FIGURES_ROOT = pyprojroot.here('docs/paper/figures/experiment-1/searchstims-10stims')
# -
# constants
# +
LEARNING_RATE = 1e-3
NET_NAMES = [
'alexnet',
'VGG16',
'CORnet_Z',
'CORnet_S',
]
METHODS = [
'initialize',
'transfer'
]
MODES = [
'classify',
]
# -
# ## load source data
# Get just the transfer learning results, then group by network, stimulus, and set size, and compute the mean accuracy for each set size.
# +
df_all = pd.read_csv(
SOURCE_DATA_ROOT.joinpath('all.csv')
)
stim_acc_diff_df = pd.read_csv(
SOURCE_DATA_ROOT.joinpath('stim_acc_diff.csv')
)
net_acc_diff_df = pd.read_csv(
SOURCE_DATA_ROOT.joinpath('net_acc_diff.csv')
)
df_acc_diff_by_stim = pd.read_csv(
SOURCE_DATA_ROOT.joinpath('acc_diff_by_stim.csv'),
index_col='net_name'
)
# -
# columns will be stimuli, in increasing order of accuracy drop across models
FIG_COLUMNS = stim_acc_diff_df['stimulus'].values.tolist()
print(FIG_COLUMNS)
# rows will be nets, in decreasing order of accuracy drops across stimuli
FIG_ROWS = net_acc_diff_df['net_name'].values.tolist()
print(FIG_ROWS)
# ## plot figure
pal = sns.color_palette("rocket", n_colors=6)
len(pal)
cmaps = {}
for net in ('alexnet', 'CORnet_Z', 'VGG16', 'CORnet_S'):
cmaps[net] = {
'transfer': {
'unit_both': pal[3],
'mn_both': pal[2],
},
'initialize': {
'unit_both': pal[5],
'mn_both': pal[4],
}
}
# +
UNIT_COLORS = {
'present': 'violet',
'absent': 'lightgreen',
'both': 'darkgrey'
}
# default colors used for plotting mean across sampling units in each condition
MN_COLORS = {
'present': 'magenta',
'absent': 'lawngreen',
'both': 'black'
}
def metric_v_set_size_df(df, net_name, method, stimulus, metric, conditions,
unit_colors=UNIT_COLORS, mn_colors=MN_COLORS,
ax=None, title=None, save_as=None, figsize=(10, 5),
set_xlabel=False, set_ylabel=False, set_ylim=True,
ylim=(0, 1.1), yticks=None, plot_mean=True, add_legend=False, dpi=600):
"""plot accuracy as a function of visual search task set size
for models trained on a single task or dataset
Accepts a Pandas dataframe and column names that determine what to plot.
Dataframe is produces by searchstims.utils.general.results_csv function.
Parameters
----------
df : pandas.Dataframe
path to results.gz file saved after measuring accuracy of trained networks
on test set of visual search stimuli
net_name : str
name of neural net architecture. Must be a value in the 'net_name' column
of df.
method : str
method used for training. Must be a value in the 'method' column of df.
stimulus : str
type of visual search stimulus, e.g. 'RVvGV', '2_v_5'. Must be a value in
the 'stimulus' column of df.
metric : str
metric to plot. One of {'acc', 'd_prime'}.
conditions : list, str
conditions to plot. One of {'present', 'absent', 'both'}. Corresponds to
'target_condition' column in df.
Other Parameters
----------------
unit_colors : dict
mapping of conditions to colors used for plotting 'sampling units', i.e. each trained
network. Default is UNIT_COLORS defined in this module.
mn_colors : dict
mapping of conditions to colors used for plotting mean across 'sampling units'
(i.e., each trained network). Default is MN_COLORS defined in this module.
ax : matplotlib.Axis
axis on which to plot figure. Default is None, in which case a new figure with
a single axis is created for the plot.
title : str
string to use as title of figure. Default is None.
save_as : str
path to directory where figure should be saved. Default is None, in which
case figure is not saved.
figsize : tuple
(width, height) in inches. Default is (10, 5). Only used if ax is None and a new
figure is created.
set_xlabel : bool
if True, set the value of xlabel to "set size". Default is False.
set_ylabel : bool
if True, set the value of ylabel to metric. Default is False.
set_ylim : bool
if True, set the y-axis limits to the value of ylim.
ylim : tuple
with two elements, limits for y-axis. Default is (0, 1.1).
plot_mean : bool
if True, find mean accuracy and plot as a separate solid line. Default is True.
add_legend : bool
if True, add legend to axis. Default is False.
Returns
-------
None
"""
if ax is None:
fig, ax = plt.subplots(dpi=dpi, figsize=figsize)
df = df[(df['net_name'] == net_name)
& (df['method'] == method)
& (df['stimulus'] == stimulus)]
if not all(
[df['target_condition'].isin([targ_cond]).any() for targ_cond in conditions]
):
raise ValueError(f'not all target conditions specified were found in dataframe.'
f'Target conditions specified were: {conditions}')
handles = []
labels = []
set_sizes = None # because we verify set sizes is the same across conditions
net_nums = df['net_number'].unique()
# get metric across set sizes for each training replicate
# we end up with a list of vectors we can pass to ax.plot,
# so that the 'line' for each training replicate gets plotted
for targ_cond in conditions:
metric_vals = []
for net_num in net_nums:
metric_vals.append(
df[(df['net_number'] == net_num)
& (df['target_condition'] == targ_cond)][metric].values
)
curr_set_size = df[(df['net_number'] == net_num)
& (df['target_condition'] == targ_cond)]['set_size'].values
if set_sizes is None:
set_sizes = curr_set_size
else:
if not np.array_equal(set_sizes, curr_set_size):
raise ValueError(
f'set size for net number {net_num}, '
f'target condition {targ_cond}, did not match others'
)
for row_num, arr_metric in enumerate(metric_vals):
x = np.arange(1, len(set_sizes) + 1) * 2
# just label first row, so only one entry shows up in legend
if row_num == 0:
label = f'training replicate, {method}'
else:
label = None
ax.plot(x, arr_metric, color=unit_colors[targ_cond], linewidth=1,
linestyle='--', alpha=0.95, label=label)
ax.set_xticks(x)
ax.set_xticklabels(set_sizes)
ax.set_xlim([0, x.max() + 2])
if plot_mean:
mn_metric = np.asarray(metric_vals).mean(axis=0)
if targ_cond == 'both':
mn_metric_label = f'mean, {method}'
else:
mn_metric_label = f'mean {metric}, {targ_cond}, {method}'
labels.append(mn_metric_label)
mn_metric_line, = ax.plot(x, mn_metric,
color=mn_colors[targ_cond], linewidth=1.5,
alpha=0.65,
label=mn_metric_label)
ax.set_xticks(x)
ax.set_xticklabels(set_sizes)
ax.set_xlim([0, x.max() + 2])
handles.append(mn_metric_line)
if title:
ax.set_title(title)
if set_xlabel:
ax.set_xlabel('set size')
if set_ylabel:
ax.set_ylabel(metric)
if yticks is not None:
ax.set_yticks(yticks)
if set_ylim:
ax.set_ylim(ylim)
if add_legend:
ax.legend(handles=handles,
labels=labels,
loc='lower left')
if save_as:
plt.savefig(save_as)
# +
if '3stims' in str(FIGURES_ROOT):
FIGSIZE = tuple(cm_to_inches(size) for size in (7, 10))
elif '10stims' in str(FIGURES_ROOT):
FIGSIZE = tuple(cm_to_inches(size) for size in (17.4, 10))
DPI = 300
n_rows = len(FIG_ROWS)
n_cols = len(FIG_COLUMNS)
fig, ax = plt.subplots(n_rows, n_cols, sharey=True, figsize=FIGSIZE, dpi=DPI)
fig.subplots_adjust(hspace=0.5)
LABELSIZE = 6
XTICKPAD = 2
YTICKPAD = 1
for ax_ in ax.ravel():
ax_.xaxis.set_tick_params(pad=XTICKPAD, labelsize=LABELSIZE)
ax_.yaxis.set_tick_params(pad=YTICKPAD, labelsize=LABELSIZE)
STIM_FONTSIZE = 4
add_legend = False
for row, net_name in enumerate(FIG_ROWS):
df_this_net = df_all[df_all['net_name'] == net_name]
for method in ['transfer', 'initialize']:
for col, stim_name in enumerate(FIG_COLUMNS):
unit_colors = {'both': cmaps[net_name][method]['unit_both']}
mn_colors = {'both': cmaps[net_name][method]['mn_both']}
ax[row, col].set_axisbelow(True) # so grid is behind
metric_v_set_size_df(df=df_this_net,
net_name=net_name,
method=method,
stimulus=stim_name,
metric='accuracy',
conditions=['both'],
unit_colors=unit_colors,
mn_colors=mn_colors,
set_ylim=True,
ax=ax[row, col],
ylim=(0.4, 1.1),
yticks=(0.5, 0.6, 0.7, 0.8, 0.9, 1.0),
add_legend=add_legend)
if row == 0:
title = stim_name.replace('_',' ')
ax[row, col].set_title(title,
fontsize=STIM_FONTSIZE,
pad=5) # pad so we can put image over title without it showing
if col == 0:
ax[row, col].set_ylabel('accuracy')
net_name_for_fig = net_name.replace('_', ' ')
ax[row, col].text(0, 0.15, net_name_for_fig, fontweight='bold', fontsize=8)
# add a big axis, hide frame
big_ax = fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axis
big_ax.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
big_ax.grid(False)
handles, labels = ax[0, 0].get_legend_handles_labels()
LEGEND_FONTSIZE = 4
if '3stims' in str(FIGURES_ROOT):
BBOX_TO_ANCHOR = (0.0125, 0.535, 0.8, .075)
elif '10stims' in str(FIGURES_ROOT):
BBOX_TO_ANCHOR = (0.005, 0.55, 0.4, .075)
big_ax.legend(handles, labels,
bbox_to_anchor=BBOX_TO_ANCHOR,
ncol=2, mode="expand", frameon=True,
borderaxespad=0., fontsize=LEGEND_FONTSIZE);
big_ax.set_xlabel("set size", labelpad=0.1);
for ext in ('svg', 'png'):
fig_path = FIGURES_ROOT.joinpath(
f'acc-v-set-size/acc-v-set-size.{ext}'
)
plt.savefig(fig_path)
| src/scripts/experiment-1-searchstims/10stims-acc-v-set-size-fig.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Chapter 11 – Training Deep Neural Networks**
# _This notebook contains all the sample code and solutions to the exercises in chapter 11._
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/11_training_deep_neural_networks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# </td>
# <td>
# <a target="_blank" href="https://kaggle.com/kernels/welcome?src=https://github.com/ageron/handson-ml2/blob/master/11_training_deep_neural_networks.ipynb"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" /></a>
# </td>
# </table>
# # Setup
# First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
# +
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
# %load_ext tensorboard
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deep"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# -
# # Vanishing/Exploding Gradients Problem
def logit(z):
return 1 / (1 + np.exp(-z))
# +
z = np.linspace(-5, 5, 200)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [1, 1], 'k--')
plt.plot([0, 0], [-0.2, 1.2], 'k-')
plt.plot([-5, 5], [-3/4, 7/4], 'g--')
plt.plot(z, logit(z), "b-", linewidth=2)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")
plt.grid(True)
plt.title("Sigmoid activation function", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("sigmoid_saturation_plot")
plt.show()
# -
# ## Xavier and He Initialization
[name for name in dir(keras.initializers) if not name.startswith("_")]
keras.layers.Dense(10, activation="relu", kernel_initializer="he_normal")
init = keras.initializers.VarianceScaling(scale=2., mode='fan_avg',
distribution='uniform')
keras.layers.Dense(10, activation="relu", kernel_initializer=init)
# ## Nonsaturating Activation Functions
# ### Leaky ReLU
def leaky_relu(z, alpha=0.01):
return np.maximum(alpha*z, z)
# +
plt.plot(z, leaky_relu(z, 0.05), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([0, 0], [-0.5, 4.2], 'k-')
plt.grid(True)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14, ha="center")
plt.title("Leaky ReLU activation function", fontsize=14)
plt.axis([-5, 5, -0.5, 4.2])
save_fig("leaky_relu_plot")
plt.show()
# -
[m for m in dir(keras.activations) if not m.startswith("_")]
[m for m in dir(keras.layers) if "relu" in m.lower()]
# Let's train a neural network on Fashion MNIST using the Leaky ReLU:
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_train_full = X_train_full / 255.0
X_test = X_test / 255.0
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
# +
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(10, activation="softmax")
])
# -
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
# Now let's try PReLU:
# +
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(10, activation="softmax")
])
# -
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
# ### ELU
def elu(z, alpha=1):
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
# +
plt.plot(z, elu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1, -1], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title(r"ELU activation function ($\alpha=1$)", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("elu_plot")
plt.show()
# -
# Implementing ELU in TensorFlow is trivial, just specify the activation function when building each layer:
keras.layers.Dense(10, activation="elu")
# ### SELU
# This activation function was proposed in this [great paper](https://arxiv.org/pdf/1706.02515.pdf) by <NAME>, <NAME> and <NAME>, published in June 2017. During training, a neural network composed exclusively of a stack of dense layers using the SELU activation function and LeCun initialization will self-normalize: the output of each layer will tend to preserve the same mean and variance during training, which solves the vanishing/exploding gradients problem. As a result, this activation function outperforms the other activation functions very significantly for such neural nets, so you should really try it out. Unfortunately, the self-normalizing property of the SELU activation function is easily broken: you cannot use ℓ<sub>1</sub> or ℓ<sub>2</sub> regularization, regular dropout, max-norm, skip connections or other non-sequential topologies (so recurrent neural networks won't self-normalize). However, in practice it works quite well with sequential CNNs. If you break self-normalization, SELU will not necessarily outperform other activation functions.
# +
from scipy.special import erfc
# alpha and scale to self normalize with mean 0 and standard deviation 1
# (see equation 14 in the paper):
alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)
scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)
# -
def selu(z, scale=scale_0_1, alpha=alpha_0_1):
return scale * elu(z, alpha)
# +
plt.plot(z, selu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1.758, -1.758], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title("SELU activation function", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("selu_plot")
plt.show()
# -
# By default, the SELU hyperparameters (`scale` and `alpha`) are tuned in such a way that the mean output of each neuron remains close to 0, and the standard deviation remains close to 1 (assuming the inputs are standardized with mean 0 and standard deviation 1 too). Using this activation function, even a 1,000 layer deep neural network preserves roughly mean 0 and standard deviation 1 across all layers, avoiding the exploding/vanishing gradients problem:
np.random.seed(42)
Z = np.random.normal(size=(500, 100)) # standardized inputs
for layer in range(1000):
W = np.random.normal(size=(100, 100), scale=np.sqrt(1 / 100)) # LeCun initialization
Z = selu(np.dot(Z, W))
means = np.mean(Z, axis=0).mean()
stds = np.std(Z, axis=0).mean()
if layer % 100 == 0:
print("Layer {}: mean {:.2f}, std deviation {:.2f}".format(layer, means, stds))
# Using SELU is easy:
keras.layers.Dense(10, activation="selu",
kernel_initializer="lecun_normal")
# Let's create a neural net for Fashion MNIST with 100 hidden layers, using the SELU activation function:
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="selu",
kernel_initializer="lecun_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="selu",
kernel_initializer="lecun_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
# Now let's train it. Do not forget to scale the inputs to mean 0 and standard deviation 1:
pixel_means = X_train.mean(axis=0, keepdims=True)
pixel_stds = X_train.std(axis=0, keepdims=True)
X_train_scaled = (X_train - pixel_means) / pixel_stds
X_valid_scaled = (X_valid - pixel_means) / pixel_stds
X_test_scaled = (X_test - pixel_means) / pixel_stds
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
# Now look at what happens if we try to use the ReLU activation function instead:
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu", kernel_initializer="he_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="relu", kernel_initializer="he_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
# Not great at all, we suffered from the vanishing/exploding gradients problem.
# # Batch Normalization
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(100, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(10, activation="softmax")
])
model.summary()
bn1 = model.layers[1]
[(var.name, var.trainable) for var in bn1.variables]
# +
#bn1.updates #deprecated
# -
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
# Sometimes applying BN before the activation function works better (there's a debate on this topic). Moreover, the layer before a `BatchNormalization` layer does not need to have bias terms, since the `BatchNormalization` layer some as well, it would be a waste of parameters, so you can set `use_bias=False` when creating those layers:
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.Dense(100, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
# ## Gradient Clipping
# All Keras optimizers accept `clipnorm` or `clipvalue` arguments:
optimizer = keras.optimizers.SGD(clipvalue=1.0)
optimizer = keras.optimizers.SGD(clipnorm=1.0)
# ## Reusing Pretrained Layers
# ### Reusing a Keras model
# Let's split the fashion MNIST training set in two:
# * `X_train_A`: all images of all items except for sandals and shirts (classes 5 and 6).
# * `X_train_B`: a much smaller training set of just the first 200 images of sandals or shirts.
#
# The validation set and the test set are also split this way, but without restricting the number of images.
#
# We will train a model on set A (classification task with 8 classes), and try to reuse it to tackle set B (binary classification). We hope to transfer a little bit of knowledge from task A to task B, since classes in set A (sneakers, ankle boots, coats, t-shirts, etc.) are somewhat similar to classes in set B (sandals and shirts). However, since we are using `Dense` layers, only patterns that occur at the same location can be reused (in contrast, convolutional layers will transfer much better, since learned patterns can be detected anywhere on the image, as we will see in the CNN chapter).
# +
def split_dataset(X, y):
y_5_or_6 = (y == 5) | (y == 6) # sandals or shirts
y_A = y[~y_5_or_6]
y_A[y_A > 6] -= 2 # class indices 7, 8, 9 should be moved to 5, 6, 7
y_B = (y[y_5_or_6] == 6).astype(np.float32) # binary classification task: is it a shirt (class 6)?
return ((X[~y_5_or_6], y_A),
(X[y_5_or_6], y_B))
(X_train_A, y_train_A), (X_train_B, y_train_B) = split_dataset(X_train, y_train)
(X_valid_A, y_valid_A), (X_valid_B, y_valid_B) = split_dataset(X_valid, y_valid)
(X_test_A, y_test_A), (X_test_B, y_test_B) = split_dataset(X_test, y_test)
X_train_B = X_train_B[:200]
y_train_B = y_train_B[:200]
# -
X_train_A.shape
X_train_B.shape
y_train_A[:30]
y_train_B[:30]
tf.random.set_seed(42)
np.random.seed(42)
model_A = keras.models.Sequential()
model_A.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_A.add(keras.layers.Dense(n_hidden, activation="selu"))
model_A.add(keras.layers.Dense(8, activation="softmax"))
model_A.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
history = model_A.fit(X_train_A, y_train_A, epochs=20,
validation_data=(X_valid_A, y_valid_A))
model_A.save("my_model_A.h5")
model_B = keras.models.Sequential()
model_B.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_B.add(keras.layers.Dense(n_hidden, activation="selu"))
model_B.add(keras.layers.Dense(1, activation="sigmoid"))
model_B.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
history = model_B.fit(X_train_B, y_train_B, epochs=20,
validation_data=(X_valid_B, y_valid_B))
model_B.summary()
model_A = keras.models.load_model("my_model_A.h5")
model_B_on_A = keras.models.Sequential(model_A.layers[:-1])
model_B_on_A.add(keras.layers.Dense(1, activation="sigmoid"))
# Note that `model_B_on_A` and `model_A` actually share layers now, so when we train one, it will update both models. If we want to avoid that, we need to build `model_B_on_A` on top of a *clone* of `model_A`:
model_A_clone = keras.models.clone_model(model_A)
model_A_clone.set_weights(model_A.get_weights())
model_B_on_A = keras.models.Sequential(model_A_clone.layers[:-1])
model_B_on_A.add(keras.layers.Dense(1, activation="sigmoid"))
# +
for layer in model_B_on_A.layers[:-1]:
layer.trainable = False
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
# +
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=4,
validation_data=(X_valid_B, y_valid_B))
for layer in model_B_on_A.layers[:-1]:
layer.trainable = True
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=16,
validation_data=(X_valid_B, y_valid_B))
# -
# So, what's the final verdict?
model_B.evaluate(X_test_B, y_test_B)
model_B_on_A.evaluate(X_test_B, y_test_B)
# Great! We got quite a bit of transfer: the error rate dropped by a factor of 4.9!
(100 - 97.05) / (100 - 99.40)
# # Faster Optimizers
# ## Momentum optimization
optimizer = keras.optimizers.SGD(learning_rate=0.001, momentum=0.9)
# ## Nesterov Accelerated Gradient
optimizer = keras.optimizers.SGD(learning_rate=0.001, momentum=0.9, nesterov=True)
# ## AdaGrad
optimizer = keras.optimizers.Adagrad(learning_rate=0.001)
# ## RMSProp
optimizer = keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9)
# ## Adam Optimization
optimizer = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)
# ## Adamax Optimization
optimizer = keras.optimizers.Adamax(learning_rate=0.001, beta_1=0.9, beta_2=0.999)
# ## Nadam Optimization
optimizer = keras.optimizers.Nadam(learning_rate=0.001, beta_1=0.9, beta_2=0.999)
# ## Learning Rate Scheduling
# ### Power Scheduling
# ```lr = lr0 / (1 + steps / s)**c```
# * Keras uses `c=1` and `s = 1 / decay`
optimizer = keras.optimizers.SGD(learning_rate=0.01, decay=1e-4)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
# +
import math
learning_rate = 0.01
decay = 1e-4
batch_size = 32
n_steps_per_epoch = math.ceil(len(X_train) / batch_size)
epochs = np.arange(n_epochs)
lrs = learning_rate / (1 + decay * epochs * n_steps_per_epoch)
plt.plot(epochs, lrs, "o-")
plt.axis([0, n_epochs - 1, 0, 0.01])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Power Scheduling", fontsize=14)
plt.grid(True)
plt.show()
# -
# ### Exponential Scheduling
# ```lr = lr0 * 0.1**(epoch / s)```
def exponential_decay_fn(epoch):
return 0.01 * 0.1**(epoch / 20)
# +
def exponential_decay(lr0, s):
def exponential_decay_fn(epoch):
return lr0 * 0.1**(epoch / s)
return exponential_decay_fn
exponential_decay_fn = exponential_decay(lr0=0.01, s=20)
# -
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
lr_scheduler = keras.callbacks.LearningRateScheduler(exponential_decay_fn)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, history.history["lr"], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling", fontsize=14)
plt.grid(True)
plt.show()
# The schedule function can take the current learning rate as a second argument:
def exponential_decay_fn(epoch, lr):
return lr * 0.1**(1 / 20)
# If you want to update the learning rate at each iteration rather than at each epoch, you must write your own callback class:
# +
K = keras.backend
class ExponentialDecay(keras.callbacks.Callback):
def __init__(self, s=40000):
super().__init__()
self.s = s
def on_batch_begin(self, batch, logs=None):
# Note: the `batch` argument is reset at each epoch
lr = K.get_value(self.model.optimizer.learning_rate)
K.set_value(self.model.optimizer.learning_rate, lr * 0.1**(1 / self.s))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.learning_rate)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
lr0 = 0.01
optimizer = keras.optimizers.Nadam(learning_rate=lr0)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
exp_decay = ExponentialDecay(s)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[exp_decay])
# -
n_steps = n_epochs * len(X_train) // 32
steps = np.arange(n_steps)
lrs = lr0 * 0.1**(steps / s)
plt.plot(steps, lrs, "-", linewidth=2)
plt.axis([0, n_steps - 1, 0, lr0 * 1.1])
plt.xlabel("Batch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling (per batch)", fontsize=14)
plt.grid(True)
plt.show()
# ### Piecewise Constant Scheduling
def piecewise_constant_fn(epoch):
if epoch < 5:
return 0.01
elif epoch < 15:
return 0.005
else:
return 0.001
# +
def piecewise_constant(boundaries, values):
boundaries = np.array([0] + boundaries)
values = np.array(values)
def piecewise_constant_fn(epoch):
return values[np.argmax(boundaries > epoch) - 1]
return piecewise_constant_fn
piecewise_constant_fn = piecewise_constant([5, 15], [0.01, 0.005, 0.001])
# +
lr_scheduler = keras.callbacks.LearningRateScheduler(piecewise_constant_fn)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
# -
plt.plot(history.epoch, [piecewise_constant_fn(epoch) for epoch in history.epoch], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Piecewise Constant Scheduling", fontsize=14)
plt.grid(True)
plt.show()
# ### Performance Scheduling
tf.random.set_seed(42)
np.random.seed(42)
# +
lr_scheduler = keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(learning_rate=0.02, momentum=0.9)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
# +
plt.plot(history.epoch, history.history["lr"], "bo-")
plt.xlabel("Epoch")
plt.ylabel("Learning Rate", color='b')
plt.tick_params('y', colors='b')
plt.gca().set_xlim(0, n_epochs - 1)
plt.grid(True)
ax2 = plt.gca().twinx()
ax2.plot(history.epoch, history.history["val_loss"], "r^-")
ax2.set_ylabel('Validation Loss', color='r')
ax2.tick_params('y', colors='r')
plt.title("Reduce LR on Plateau", fontsize=14)
plt.show()
# -
# ### tf.keras schedulers
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
learning_rate = keras.optimizers.schedules.ExponentialDecay(0.01, s, 0.1)
optimizer = keras.optimizers.SGD(learning_rate)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
# For piecewise constant scheduling, try this:
learning_rate = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[5. * n_steps_per_epoch, 15. * n_steps_per_epoch],
values=[0.01, 0.005, 0.001])
# ### 1Cycle scheduling
# +
K = keras.backend
class ExponentialLearningRate(keras.callbacks.Callback):
def __init__(self, factor):
self.factor = factor
self.rates = []
self.losses = []
def on_batch_end(self, batch, logs):
self.rates.append(K.get_value(self.model.optimizer.learning_rate))
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.learning_rate, self.model.optimizer.learning_rate * self.factor)
def find_learning_rate(model, X, y, epochs=1, batch_size=32, min_rate=10**-5, max_rate=10):
init_weights = model.get_weights()
iterations = math.ceil(len(X) / batch_size) * epochs
factor = np.exp(np.log(max_rate / min_rate) / iterations)
init_lr = K.get_value(model.optimizer.learning_rate)
K.set_value(model.optimizer.learning_rate, min_rate)
exp_lr = ExponentialLearningRate(factor)
history = model.fit(X, y, epochs=epochs, batch_size=batch_size,
callbacks=[exp_lr])
K.set_value(model.optimizer.learning_rate, init_lr)
model.set_weights(init_weights)
return exp_lr.rates, exp_lr.losses
def plot_lr_vs_loss(rates, losses):
plt.plot(rates, losses)
plt.gca().set_xscale('log')
plt.hlines(min(losses), min(rates), max(rates))
plt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 2])
plt.xlabel("Learning rate")
plt.ylabel("Loss")
# -
# **Warning**: In the `on_batch_end()` method, `logs["loss"]` used to contain the batch loss, but in TensorFlow 2.2.0 it was replaced with the mean loss (since the start of the epoch). This explains why the graph below is much smoother than in the book (if you are using TF 2.2 or above). It also means that there is a lag between the moment the batch loss starts exploding and the moment the explosion becomes clear in the graph. So you should choose a slightly smaller learning rate than you would have chosen with the "noisy" graph. Alternatively, you can tweak the `ExponentialLearningRate` callback above so it computes the batch loss (based on the current mean loss and the previous mean loss):
#
# ```python
# class ExponentialLearningRate(keras.callbacks.Callback):
# def __init__(self, factor):
# self.factor = factor
# self.rates = []
# self.losses = []
# def on_epoch_begin(self, epoch, logs=None):
# self.prev_loss = 0
# def on_batch_end(self, batch, logs=None):
# batch_loss = logs["loss"] * (batch + 1) - self.prev_loss * batch
# self.prev_loss = logs["loss"]
# self.rates.append(K.get_value(self.model.optimizer.learning_rate))
# self.losses.append(batch_loss)
# K.set_value(self.model.optimizer.learning_rate, self.model.optimizer.learning_rate * self.factor)
# ```
# +
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(learning_rate=1e-3),
metrics=["accuracy"])
# -
batch_size = 128
rates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)
plot_lr_vs_loss(rates, losses)
class OneCycleScheduler(keras.callbacks.Callback):
def __init__(self, iterations, max_rate, start_rate=None,
last_iterations=None, last_rate=None):
self.iterations = iterations
self.max_rate = max_rate
self.start_rate = start_rate or max_rate / 10
self.last_iterations = last_iterations or iterations // 10 + 1
self.half_iteration = (iterations - self.last_iterations) // 2
self.last_rate = last_rate or self.start_rate / 1000
self.iteration = 0
def _interpolate(self, iter1, iter2, rate1, rate2):
return ((rate2 - rate1) * (self.iteration - iter1)
/ (iter2 - iter1) + rate1)
def on_batch_begin(self, batch, logs):
if self.iteration < self.half_iteration:
rate = self._interpolate(0, self.half_iteration, self.start_rate, self.max_rate)
elif self.iteration < 2 * self.half_iteration:
rate = self._interpolate(self.half_iteration, 2 * self.half_iteration,
self.max_rate, self.start_rate)
else:
rate = self._interpolate(2 * self.half_iteration, self.iterations,
self.start_rate, self.last_rate)
self.iteration += 1
K.set_value(self.model.optimizer.learning_rate, rate)
n_epochs = 25
onecycle = OneCycleScheduler(math.ceil(len(X_train) / batch_size) * n_epochs, max_rate=0.05)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,
validation_data=(X_valid_scaled, y_valid),
callbacks=[onecycle])
# # Avoiding Overfitting Through Regularization
# ## $\ell_1$ and $\ell_2$ regularization
layer = keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
# or l1(0.1) for ℓ1 regularization with a factor of 0.1
# or l1_l2(0.1, 0.01) for both ℓ1 and ℓ2 regularization, with factors 0.1 and 0.01 respectively
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(10, activation="softmax",
kernel_regularizer=keras.regularizers.l2(0.01))
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
# +
from functools import partial
RegularizedDense = partial(keras.layers.Dense,
activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
RegularizedDense(300),
RegularizedDense(100),
RegularizedDense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
# -
# ## Dropout
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(300, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(100, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
# ## Alpha Dropout
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 20
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test)
model.evaluate(X_train_scaled, y_train)
history = model.fit(X_train_scaled, y_train)
# ## MC Dropout
tf.random.set_seed(42)
np.random.seed(42)
y_probas = np.stack([model(X_test_scaled, training=True)
for sample in range(100)])
y_proba = y_probas.mean(axis=0)
y_std = y_probas.std(axis=0)
np.round(model.predict(X_test_scaled[:1]), 2)
np.round(y_probas[:, :1], 2)
np.round(y_proba[:1], 2)
y_std = y_probas.std(axis=0)
np.round(y_std[:1], 2)
y_pred = np.argmax(y_proba, axis=1)
accuracy = np.sum(y_pred == y_test) / len(y_test)
accuracy
# +
class MCDropout(keras.layers.Dropout):
def call(self, inputs):
return super().call(inputs, training=True)
class MCAlphaDropout(keras.layers.AlphaDropout):
def call(self, inputs):
return super().call(inputs, training=True)
# -
tf.random.set_seed(42)
np.random.seed(42)
mc_model = keras.models.Sequential([
MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer
for layer in model.layers
])
mc_model.summary()
optimizer = keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, nesterov=True)
mc_model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
mc_model.set_weights(model.get_weights())
# Now we can use the model with MC Dropout:
np.round(np.mean([mc_model.predict(X_test_scaled[:1]) for sample in range(100)], axis=0), 2)
# ## Max norm
layer = keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
# +
MaxNormDense = partial(keras.layers.Dense,
activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
MaxNormDense(300),
MaxNormDense(100),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
# -
# # Exercises
# ## 1. to 7.
# See appendix A.
# ## 8. Deep Learning on CIFAR10
# ### a.
# *Exercise: Build a DNN with 20 hidden layers of 100 neurons each (that's too many, but it's the point of this exercise). Use He initialization and the ELU activation function.*
# +
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
activation="elu",
kernel_initializer="he_normal"))
# -
# ### b.
# *Exercise: Using Nadam optimization and early stopping, train the network on the CIFAR10 dataset. You can load it with `keras.datasets.cifar10.load_data()`. The dataset is composed of 60,000 32 × 32–pixel color images (50,000 for training, 10,000 for testing) with 10 classes, so you'll need a softmax output layer with 10 neurons. Remember to search for the right learning rate each time you change the model's architecture or hyperparameters.*
# Let's add the output layer to the model:
model.add(keras.layers.Dense(10, activation="softmax"))
# Let's use a Nadam optimizer with a learning rate of 5e-5. I tried learning rates 1e-5, 3e-5, 1e-4, 3e-4, 1e-3, 3e-3 and 1e-2, and I compared their learning curves for 10 epochs each (using the TensorBoard callback, below). The learning rates 3e-5 and 1e-4 were pretty good, so I tried 5e-5, which turned out to be slightly better.
optimizer = keras.optimizers.Nadam(learning_rate=5e-5)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
# Let's load the CIFAR10 dataset. We also want to use early stopping, so we need a validation set. Let's use the first 5,000 images of the original training set as the validation set:
# +
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.cifar10.load_data()
X_train = X_train_full[5000:]
y_train = y_train_full[5000:]
X_valid = X_train_full[:5000]
y_valid = y_train_full[:5000]
# -
# Now we can create the callbacks we need and train the model:
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
# %tensorboard --logdir=./my_cifar10_logs --port=6006
model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_model.h5")
model.evaluate(X_valid, y_valid)
# The model with the lowest validation loss gets about 47.6% accuracy on the validation set. It took 27 epochs to reach the lowest validation loss, with roughly 8 seconds per epoch on my laptop (without a GPU). Let's see if we can improve performance using Batch Normalization.
# ### c.
# *Exercise: Now try adding Batch Normalization and compare the learning curves: Is it converging faster than before? Does it produce a better model? How does it affect training speed?*
# The code below is very similar to the code above, with a few changes:
#
# * I added a BN layer after every Dense layer (before the activation function), except for the output layer. I also added a BN layer before the first hidden layer.
# * I changed the learning rate to 5e-4. I experimented with 1e-5, 3e-5, 5e-5, 1e-4, 3e-4, 5e-4, 1e-3 and 3e-3, and I chose the one with the best validation performance after 20 epochs.
# * I renamed the run directories to run_bn_* and the model file name to my_cifar10_bn_model.h5.
# +
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
model.add(keras.layers.BatchNormalization())
for _ in range(20):
model.add(keras.layers.Dense(100, kernel_initializer="he_normal"))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Activation("elu"))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(learning_rate=5e-4)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_bn_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_bn_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
model.fit(X_train, y_train, epochs=100,
validation_data=(X_valid, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_bn_model.h5")
model.evaluate(X_valid, y_valid)
# -
# * *Is the model converging faster than before?* Much faster! The previous model took 27 epochs to reach the lowest validation loss, while the new model achieved that same loss in just 5 epochs and continued to make progress until the 16th epoch. The BN layers stabilized training and allowed us to use a much larger learning rate, so convergence was faster.
# * *Does BN produce a better model?* Yes! The final model is also much better, with 54.0% accuracy instead of 47.6%. It's still not a very good model, but at least it's much better than before (a Convolutional Neural Network would do much better, but that's a different topic, see chapter 14).
# * *How does BN affect training speed?* Although the model converged much faster, each epoch took about 12s instead of 8s, because of the extra computations required by the BN layers. But overall the training time (wall time) was shortened significantly!
# ### d.
# *Exercise: Try replacing Batch Normalization with SELU, and make the necessary adjustements to ensure the network self-normalizes (i.e., standardize the input features, use LeCun normal initialization, make sure the DNN contains only a sequence of dense layers, etc.).*
# +
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(learning_rate=7e-4)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_selu_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_selu_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
X_means = X_train.mean(axis=0)
X_stds = X_train.std(axis=0)
X_train_scaled = (X_train - X_means) / X_stds
X_valid_scaled = (X_valid - X_means) / X_stds
X_test_scaled = (X_test - X_means) / X_stds
model.fit(X_train_scaled, y_train, epochs=100,
validation_data=(X_valid_scaled, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_selu_model.h5")
model.evaluate(X_valid_scaled, y_valid)
# -
model = keras.models.load_model("my_cifar10_selu_model.h5")
model.evaluate(X_valid_scaled, y_valid)
# We get 47.9% accuracy, which is not much better than the original model (47.6%), and not as good as the model using batch normalization (54.0%). However, convergence was almost as fast as with the BN model, plus each epoch took only 7 seconds. So it's by far the fastest model to train so far.
# ### e.
# *Exercise: Try regularizing the model with alpha dropout. Then, without retraining your model, see if you can achieve better accuracy using MC Dropout.*
# +
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.Nadam(learning_rate=5e-4)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
early_stopping_cb = keras.callbacks.EarlyStopping(patience=20)
model_checkpoint_cb = keras.callbacks.ModelCheckpoint("my_cifar10_alpha_dropout_model.h5", save_best_only=True)
run_index = 1 # increment every time you train the model
run_logdir = os.path.join(os.curdir, "my_cifar10_logs", "run_alpha_dropout_{:03d}".format(run_index))
tensorboard_cb = keras.callbacks.TensorBoard(run_logdir)
callbacks = [early_stopping_cb, model_checkpoint_cb, tensorboard_cb]
X_means = X_train.mean(axis=0)
X_stds = X_train.std(axis=0)
X_train_scaled = (X_train - X_means) / X_stds
X_valid_scaled = (X_valid - X_means) / X_stds
X_test_scaled = (X_test - X_means) / X_stds
model.fit(X_train_scaled, y_train, epochs=100,
validation_data=(X_valid_scaled, y_valid),
callbacks=callbacks)
model = keras.models.load_model("my_cifar10_alpha_dropout_model.h5")
model.evaluate(X_valid_scaled, y_valid)
# -
# The model reaches 48.9% accuracy on the validation set. That's very slightly better than without dropout (47.6%). With an extensive hyperparameter search, it might be possible to do better (I tried dropout rates of 5%, 10%, 20% and 40%, and learning rates 1e-4, 3e-4, 5e-4, and 1e-3), but probably not much better in this case.
# Let's use MC Dropout now. We will need the `MCAlphaDropout` class we used earlier, so let's just copy it here for convenience:
class MCAlphaDropout(keras.layers.AlphaDropout):
def call(self, inputs):
return super().call(inputs, training=True)
# Now let's create a new model, identical to the one we just trained (with the same weights), but with `MCAlphaDropout` dropout layers instead of `AlphaDropout` layers:
mc_model = keras.models.Sequential([
MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer
for layer in model.layers
])
# Then let's add a couple utility functions. The first will run the model many times (10 by default) and it will return the mean predicted class probabilities. The second will use these mean probabilities to predict the most likely class for each instance:
# +
def mc_dropout_predict_probas(mc_model, X, n_samples=10):
Y_probas = [mc_model.predict(X) for sample in range(n_samples)]
return np.mean(Y_probas, axis=0)
def mc_dropout_predict_classes(mc_model, X, n_samples=10):
Y_probas = mc_dropout_predict_probas(mc_model, X, n_samples)
return np.argmax(Y_probas, axis=1)
# -
# Now let's make predictions for all the instances in the validation set, and compute the accuracy:
# +
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
y_pred = mc_dropout_predict_classes(mc_model, X_valid_scaled)
accuracy = np.mean(y_pred == y_valid[:, 0])
accuracy
# -
# We get no accuracy improvement in this case (we're still at 48.9% accuracy).
#
# So the best model we got in this exercise is the Batch Normalization model.
# ### f.
# *Exercise: Retrain your model using 1cycle scheduling and see if it improves training speed and model accuracy.*
# +
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
# -
batch_size = 128
rates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)
plot_lr_vs_loss(rates, losses)
plt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 1.4])
# +
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[32, 32, 3]))
for _ in range(20):
model.add(keras.layers.Dense(100,
kernel_initializer="lecun_normal",
activation="selu"))
model.add(keras.layers.AlphaDropout(rate=0.1))
model.add(keras.layers.Dense(10, activation="softmax"))
optimizer = keras.optimizers.SGD(learning_rate=1e-2)
model.compile(loss="sparse_categorical_crossentropy",
optimizer=optimizer,
metrics=["accuracy"])
# -
n_epochs = 15
onecycle = OneCycleScheduler(math.ceil(len(X_train_scaled) / batch_size) * n_epochs, max_rate=0.05)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,
validation_data=(X_valid_scaled, y_valid),
callbacks=[onecycle])
# One cycle allowed us to train the model in just 15 epochs, each taking only 2 seconds (thanks to the larger batch size). This is several times faster than the fastest model we trained so far. Moreover, we improved the model's performance (from 47.6% to 52.0%). The batch normalized model reaches a slightly better performance (54%), but it's much slower to train.
| 11_training_deep_neural_networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import json
file = 'gdb_females.csv'
df = pd.read_csv(file, encoding='utf-8')
len(df)
df['clean_name3'] = df['clean_name2'].apply(lambda x: x.replace('[', '').replace(']', '').replace("'", "").replace(', ', ' '))
iguales = {
'santa rosa': ['santa rosa lima',
'santa rosa (auxiliar)',
'rosa america'],
'santa maria angeles': ['maria angeles'],
'micaela bastidas': ['bastidas',
'malecon bastidas'],
'santa maria': ['santa maria reyna',
'santa maria reina',
'maria auxiliadora'],
'clorinda matto turner': ['clorinda malto'],
'santa teresa': ['santa teresa (calle 8)'],
'santa ana': ['santa anita'],
'elvira garcia garcia': ['alameda elvira garcia garcia',
'elvira garcia gracia'],
'angelica gamarra': ['salida a angelica gamarra']
}
no_mujeres = [
'libertad',
'marina',
'tingo maria',
'belen',
'merced',
'esperanza',
'victoria',
'esmeralda',
'rio santa',
'<NAME>',
'santa fe',
'agua marina',
'irlanda',
'mar coral',
'marbella',
'nueva esperanza',
'bajada agua dulce',
'mercedes',
'<NAME>',
'alameda arco iris',
'caridad',
'villa mercedes',
'alheli',
'soledad',
'grecia',
'nevado sara sara',
'santa',
'iris',
'amarilis',
'america',
'marina (auxiliar)',
'florencia',
'villa maria',
'estrella',
'lourdes',
'nieves',
'azucena',
'perla',
'merino reyna',
'rio santa martha',
'rio elba',
'<NAME>',
'talavera reina',
'agua dulce',
'cristal'
]
df2 = df[~df['clean_name3'].isin(no_mujeres)].reset_index(drop=True)
iguales2 = {}
for key, lst in iguales.items():
for item in lst:
iguales2[item] = key
df2['clean_name3'] = df2['clean_name3'].replace(iguales2)
tabulacion = df2['clean_name3'].value_counts()
tabulacion = tabulacion.reset_index().rename(columns={'clean_name3': 'total', 'index': 'nombre'})
tabulacion.to_csv('tabulacion_nombres_mujeres.csv', index=False, encoding='utf-8')
def nombre_santa(text):
if text[:6] == 'santa ' or text[:4] == 'sor ' or text[:7] == 'virgen ':
return 'santa'
else:
return 'no santa'
tabulacion['santa'] = tabulacion['nombre'].apply(nombre_santa)
tabulacion2 = tabulacion['santa'].value_counts().reset_index().rename(columns={'santa': 'total', 'index': 'categoria'})
tabulacion2.to_csv('tabulacion_nombres_hombres_santas.csv', index=False, encoding='utf-8')
| estandarizacion-nombres-mujeres.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Import libraries
# * **Numpy** is the fundamental package for scientific computing with Python.
# * **Pandas** is for data manipulation and analysis.
# > pip install pandas
# * **Matplotlib** is a Python 2D plotting library which produces publication quality figures
# > pip install matplotlib
# * **matplotlib** The scikit-learn package is a machine learning library, written in Python. It contains numerous algorithms, datasets, utilities, and frameworks for performing machine learning.
# > pip3 install -U scikit-learn
# * **Warning** messages are typically issued in situations where it is useful to alert the user of some condition in a program,
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# # Example 1
# ## Import data set
dataset = pd.read_csv('WorldCupMatches.csv')
dataset.head(5)
# ### Print data set properties
print(dataset.shape)
print()
print(dataset.index)
print()
print(dataset.columns)
dataset.loc[0]
# ## Data Cleaning
# ### a. Check out missing data
dataset.isnull()
dataset.isnull().sum()
# ### 1. Ignore the tuple with missing data
# This method is advised only when there are enough samples in the data set. One has to make sure that after we have deleted the data, there is no addition of bias. Removing the data will lead to loss of information which will not give the expected results while predicting the output.
dataset2 = dataset.dropna(inplace=False)
dataset2.isnull().sum()
dataset2.shape
# ### 2. Replace missing values with **mean, median or mode**
# This is an approximation which can add variance to the data set. But the loss of the data can be negated by this method which yields better results compared to removal of rows and columns.
dataset['Year'].tail(10)
int(dataset['Year'].mean())
dataset['Year'] = dataset['Year'].replace(np.NaN,int(dataset['Year'].mean().round()))
dataset['Year'].tail(10)
dataset['City'].mode().item()
dataset['City'] = dataset['City'].replace(np.NaN, dataset['City'].mode().item())
dataset
# ## Data Transformation
# ### 1. Handling Categorical Data
dataset.dropna(inplace=True)
dataset
# +
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder() # creating instance of labelencoder
dataset['Stadium'] = labelencoder.fit_transform(dataset['Stadium'])
dataset['Home Team Name'] = labelencoder.fit_transform(dataset['Home Team Name'])
dataset['Away Team Name'] = labelencoder.fit_transform(dataset['Away Team Name'])
dataset
# -
# ## Data Reduction
# ### 1. Attribute Subset Selection
dataset = dataset[['Year','Stadium', 'Home Team Name', 'Away Team Name','Home Team Goals', 'Away Team Goals']]
dataset
# ## Splitting the data-set into Training and Test Set
from sklearn.model_selection import train_test_split
train, test = train_test_split(dataset, test_size = 0.2)
test
test
# # Example 2
# ## Import data and remove missing values
dataframe = pd.read_csv('diabetes_null.csv', na_values=['#NAME?'])
dataframe = dataframe.dropna(axis=0)
dataframe.head(10)
# ## Handling Noicy Data
# ### Function to find outliers
def find_outliers_tukey(data):
q1 = data.quantile(.25)
q3 = data.quantile(.75)
iqr = q3 - q1
floor = q1 - 1.5*iqr
ceiling = q3 + 1.5*iqr
outlier_indices = list(data.index[(data < floor) | (data > ceiling)])
outlier_values = list(data[outlier_indices])
return outlier_indices, outlier_values
# ### Finding outliers
glucose_indices, glucose_values = find_outliers_tukey(dataframe['Glucose'])
print("Outliers for Glucose")
print(np.sort(glucose_values))
bmi_indices, bmi_values = find_outliers_tukey(dataframe['BMI'])
print("Outliers for BMI")
print(np.sort(bmi_values))
# ### Deleting row with outlier
dataframe = dataframe.drop(bmi_indices)
dataframe.head(10)
# # Visualization
# ## Histogram
import matplotlib.pyplot as plt
dataframe.hist()
plt.show()
# ## Density Plot
dataframe.plot(kind='density', subplots=True, layout=(3,3), sharex=False)
plt.show()
# ## Box Plot
dataframe.plot(kind='box', subplots=True, layout=(3,3), sharex=False, sharey=False)
plt.show()
# ## Scatter Plot
from pandas.plotting import scatter_matrix
scatter_matrix(dataframe)
plt.show()
| Session 2/Data Preprocessing Code/Data Preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Django Shell-Plus
# language: python
# name: django_extensions
# ---
# Companies that had a 2016 entry, but not a 2017 entry
for company in EdgarCompanyInfo.objects.all().order_by('conformed_name'):
_2016 = EdgarSDFiling.objects.filter(company=company, date__year=2016)
if _2016.count() > 0:
_2017 = EdgarSDFiling.objects.filter(company=company, date__year=2017)
if _2017.count() == 0:
print(company.conformed_name, '({0})'.format(company.cik))
# Alcatel was bought by Nokia. Haven't researched others.
# ----
# Companies with less than four entries
for company in EdgarCompanyInfo.objects.all().order_by('conformed_name'):
if EdgarSDFiling.objects.filter(company=company).count() < 4:
print(company.conformed_name, '({0})'.format(company.cik))
| notebooks/Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spam data download
# This small script will allow csv spam datasets selected with this configuration:
# harvested-area_banana_total.csv.gz
# to be download to the data sources files for further manipulation
from tqdm import tqdm
import requests
# %reload_ext version_information
# %version_information tqdm, requests
# #### Configurate variables
baseUrl = 'http://spam05.harvestchoice.org/v2r0/csv/'
fileNameBase = '/home/jovyan/work/data/aqueduct/data_source/spamdata'
areas = ['physical-area']
crops = ['cereals_other', 'pulses_others', 'banana', 'barley', 'beans', 'cassava', 'chickpeas', 'cowpeas', 'groundnut', 'lentils', 'maize', 'millet', 'pigeonpeas', 'plantain', 'potato', 'rice', 'sorghum', 'soybean', 'sweet_potato', 'wheat', 'yams']
types = ['irrigated','rainfed']
#
for typez in xrange(0,len(types)):
for area in xrange(0,len(areas)):
for crop in xrange(0,len(crops)):
dataUrl=baseUrl+areas[area]+'/'+'spam2005v2r0'+'_'+areas[area]+'_'+crops[crop]+'_'+types[typez]+'.csv.gz'
response = requests.get(dataUrl, stream=True)
filename=fileNameBase+areas[area]+'_'+crops[crop]+'_'+types[typez]+'.csv.gz'
print filename
with open(filename, 'wb') as handle:
for chunk in tqdm(response.iter_content(chunk_size=128)):
handle.write(chunk)
| Aqueduct/lab/data_download.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
from transformers import *
# +
# config = BertConfig.from_json_file('./config.json')
# model = BertModel.from_pretrained('./model.ckpt.index', from_tf=True, config=config)
# -
torch_model = BertModel.from_pretrained('./') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
from tokenization import mecabTokenizer
tokenizer = mecabTokenizer(vocab_file="mecab_vocab_128000.txt")
text = '이 텍스트는 샘플인데 잘 될까여?'
tokens = tokenizer.tokenize(text)
idxs = torch.tensor([tokenizer.convert_tokens_to_ids(tokens)])
tokens, idxs
torch_model(idxs)[0]
| korquad/usage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="TWArjayNEra8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} executionInfo={"status": "ok", "timestamp": 1592688115095, "user_tz": -120, "elapsed": 13379, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="88a1d2f1-6db1-49a5-8012-baf9b302b1e1"
""" to load assets from github """
import os
if not os.path.exists("assets"):
# !git clone https://github.com/desmond-rn/assets.git
else:
print("Data already here. Let's update it!")
# %cd assets
# # %rm -rf assets
# !git pull https://github.com/desmond-rn/assets.git
# %cd ..
# !ls assets
# + id="H04r0qIPEXnM" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688115097, "user_tz": -120, "elapsed": 13363, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
# %reset -f
# + id="BQGCar8FErGB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1592688117934, "user_tz": -120, "elapsed": 16191, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="0181631c-01d1-4ad7-dca2-fd7c8a9d644e"
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import keras
import sklearn
import random # pour randomiser les donnees train, val, et test si nécessaire
import assets.dataframes.movie.movie_dico as movie_dico # les dicos crees dans le notebook precedent
from ast import literal_eval
from keras.datasets import imdb
from sklearn import metrics
from sklearn import datasets
from sklearn import tree
from sklearn import ensemble
from sklearn import neighbors
from sklearn import linear_model
from sklearn import neural_network
from sklearn import svm
# + id="DgLxR3ZFfX5a" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688117936, "user_tz": -120, "elapsed": 16185, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
np.set_printoptions(precision=2)
# + [markdown] id="b-QAdrIMG1S9" colab_type="text"
# Ce notebook s'appui sur le travail effectué dans le notebook `nb1_retraitement_et_analyse_de_données.ipynb`. Dans ce dernier notebook, nous avons traité et analysé les données, et nous les avons sauvegardé sur github.
# + [markdown] id="Lf318LlaEv-M" colab_type="text"
# Objectif de ce notebook:
#
# - Predire les genres d'un film -> apprentissage 1
# - Predire le succes d'un film -> apprentissage 2
#
# Tout ceci a partir de l'intrigue, des motes cles, du budget, etc. tous connus avant la sortie du film.
# + [markdown] id="1aLiud8IFEWj" colab_type="text"
# #I - PREDICTION DU GENRE
# + [markdown] id="nV5KUy76E8h1" colab_type="text"
# On désire prédire le(s) genre(s) d'un film à partir de son intrigue.
# + [markdown] id="L57qDXjTBKe6" colab_type="text"
# ## Preparation
# + [markdown] id="sDT_qBfrGyfZ" colab_type="text"
# ### Chargeons les donnees
# + [markdown] id="0cxUn245DcaN" colab_type="text"
# Pour cette section, nous nous servons de la dataframe imputee (**4800** lignes), celle dans laquelle nous avions remplacé les valeurs (numériques) aberantes par les moyennes. Heureusement, nous n'avons pas besoin de ces valeurs non fiables pour cet apprentissage.
# + id="KpZTUNaRJGex" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 335} executionInfo={"status": "ok", "timestamp": 1592688118754, "user_tz": -120, "elapsed": 16989, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="c8e0aba4-4029-469e-b6ef-f6be84bb7e07"
converters={"genres": literal_eval, "keywords": literal_eval, "spoken_languages":literal_eval, "production_companies":literal_eval, "production_countries":literal_eval} # on veut convertir les str en listes
df_0 = pd.read_csv("assets/dataframes/movie/tmdb_5000_movies_imputed.csv", thousands=',', converters=converters)
# d'entree, supprimons les colones inutiles, y compris les "dummies" qui nous ont servis dans le notebook precedent
df_1 = df_0.drop(['Unnamed: 0'], axis=1)
for genre in movie_dico.genres_to_id.keys():
if genre != '<PAD>':
df_1 = df_1.drop(genre, axis=1)
df_1.head(2)
# + [markdown] id="LChWJ5m5HUVb" colab_type="text"
# ### Numerisation des donnes
# + [markdown] id="xzL9BMCNHfQj" colab_type="text"
# Pour obtenir les donnes nécessaires à l’apprentissage, transformons les intrigues, slogans, et mots clés (chaines de caractères) en liste d'entiers. Utilisons le dictionnaire imdb de Keras pour les mots simples, et nos propres dictionnaires pour les groupes de mots et les noms propres.
# + id="b4Q7579aWc0E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1592688119535, "user_tz": -120, "elapsed": 17757, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="4bbc6fcf-968a-4db1-e47e-b93ec652b9e1"
""" Utilisons le dict de keras pour les mots simples """
word_to_id = {k:v+2 for (k,v) in imdb.get_word_index().items()} # on obtient donc 3 places vides reservees, car 0 est naturellement reserve
word_to_id["<PAD>"] = 0 # pour completer les intrigues une fois transformes (tous a une meme taille)
word_to_id["<START>"] = 1 # pour le debut de sequence
word_to_id["<UNK>"] = 2 # pour les mots inconnues
id_to_word = {v:k for (k,v) in word_to_id.items()} # son dictionnaire inverse
""" Utilisons nos propres dictionnaires pour les mots cles, les compagnies, les pays et les langues, (groupes de mots) """
genre_to_id = movie_dico.genres_to_id # 0 fut reverve pour le padding
keyword_to_id = movie_dico.keywords_to_id
company_to_id = movie_dico.production_companies_to_id
language_to_id = movie_dico.spoken_languages_to_id
country_to_id = movie_dico.production_countries_to_id
genre_to_id["<PAD>"] = 0
keyword_to_id["<PAD>"] = 0
company_to_id["<PAD>"] = 0
language_to_id["<PAD>"] = 0
country_to_id["<PAD>"] = 0
id_to_genre = {v:k for (k,v) in genre_to_id.items()}
id_to_keyword = {v:k for (k,v) in keyword_to_id.items()}
id_to_company = {v:k for (k,v) in company_to_id.items()}
id_to_language = {v:k for (k,v) in language_to_id.items()}
id_to_country = {v:k for (k,v) in country_to_id.items()}
# + id="6Ad-D3CwDgLk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 301} executionInfo={"status": "ok", "timestamp": 1592688120383, "user_tz": -120, "elapsed": 18592, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="e2fe597a-0501-409e-a978-427916d57a2f"
""" Genere des fonctions pour transfromer l'overview ou la tagline """
def transform_1(column):
def func(line):
sentence = line[column]
if str(sentence) == 'nan':
return [2]
else:
tab = [1]
for el in sentence.split():
word = el.lower().replace(',', '').replace('.', '').replace(';', '').replace(':', '')
tab.append(word_to_id.get(word, 2))
return tab
return func
""" Genere des fonctions pour transfromer les mots cles, les compagnuies, les languesm ou les pays """
def transform_2(column):
def func(line):
tab = []
for el in line[column]:
if column == 'genres':
tab.append(genre_to_id[el])
elif column == 'keywords':
tab.append(keyword_to_id[el])
elif column == 'production_companies':
tab.append(company_to_id[el])
elif column == 'spoken_languages':
tab.append(language_to_id[el])
elif column == 'production_countries':
tab.append(country_to_id[el])
return tab
return func
""" On numerise tout ce qui est numerisable """
def numerize_dataframe(df):
df['overview'] = df.apply(transform_1('overview'), axis=1)
df['tagline'] = df.apply(transform_1('tagline'), axis=1)
df['genres'] = df.apply(transform_2('genres'), axis=1)
df['keywords'] = df.apply(transform_2('keywords'), axis=1)
df['production_companies'] = df.apply(transform_2('production_companies'), axis=1)
df['spoken_languages'] = df.apply(transform_2('spoken_languages'), axis=1)
df['production_countries'] = df.apply(transform_2('production_countries'), axis=1)
return df
df_1 = numerize_dataframe(df_1)
df_1.head(2)
# + id="ndBE2_fwldr1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 88} executionInfo={"status": "ok", "timestamp": 1592688120384, "user_tz": -120, "elapsed": 18574, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="81175962-2775-45c5-8b68-72272decde1a"
# testons l'overview et la tagline sur le i-eme film
col = 'overview'
i = 2
original = df_0.loc[i, col]
recomputed = ' '.join([id_to_word[j] for j in df_1.loc[i, col]])
print(df_1.loc[i, 'title'], '('+df_1.loc[i, 'release_date'][:4]+')')
print(col.upper(), "ORIGINAL: ", original)
print(col.upper(), "RETROUVEE: ", recomputed)
# + [markdown] id="UtQAuBbmAyjB" colab_type="text"
# ### Créons les données train et test
# + id="oXaYCwKIFeVp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 442} executionInfo={"status": "ok", "timestamp": 1592688120386, "user_tz": -120, "elapsed": 18558, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="fe22022a-2f00-48f5-ee7a-47b8b8822bd7"
""" Visualisons les entrees et les sorties necessaires """
print("Version originale:")
display(df_0.iloc[:5, [5, 7]]) # version originale
print("\nVersion numerisee:")
df_1.iloc[:5, [4, 6]] # version numerizee
# + [markdown] id="Mae5R9DOhxf3" colab_type="text"
# On utilise la methode **one_hot_encoding** pour encoder nos vecteurs. Chaque sequence de mot est transformee en sequence de longueur 'dimension'.
# + id="STv8kVJuzVaJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1592688120387, "user_tz": -120, "elapsed": 18541, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="6def81ee-33e8-41ca-864f-35c90b8d3e35"
""" une fonction pour transformer les sequences d'entiers en sequences de longueur diemnsion en prenant en compte la repetition d'un entier dans la sequence """
def vectorize_sequence(sequence, dimension):
result = np.zeros(dimension, dtype=np.int32)
for i in sequence:
result[i] += 1 # ajoute 1 a cet indice
return result
""" un test """
test = [3,1,1,3]
print("original: ", test)
print("vectorisee:", vectorize_sequence(test, 10))
# + id="oa99D_TGFLsv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1592688123640, "user_tz": -120, "elapsed": 21778, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="3165b0b6-ff15-4930-daca-6a9be80b7f1d"
""" on fait une permutation car dans la dataframe, les films populaires on tendance a se retrouver au debut """
size = len(df_1)
indices = np.arange(size)
# random.shuffle(indices)
shuffle = {i:k for (i, k) in enumerate(indices)} # un petit dictionnaire pour acceder a la dataframe apres
""" les x """
len_x = len(word_to_id) - 1 # moins le PAD
x = np.empty(shape=(size, len_x), dtype=int)
for (i, i_prime) in shuffle.items():
x[i] = vectorize_sequence(np.array(df_1.loc[i_prime, 'overview']), len_x)
""" les y """
len_y = len(genre_to_id) - 1
y = np.empty(shape=(size, len_y), dtype=int)
for (i, i_prime) in shuffle.items():
y[i] = vectorize_sequence(np.array(df_1.loc[i_prime, 'genres'])-1, len_y)
print('x shape =', x.shape)
print('y shape =', y.shape)
# + id="wNKn7tKhNbdw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} executionInfo={"status": "ok", "timestamp": 1592688123640, "user_tz": -120, "elapsed": 21761, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="dc4d2286-8065-4ea5-ef3c-9a9f8604fcf0"
""" visualisons a quoi ressemble les inputs et les outputs juste avant l'apprentissage """
i = 0
print('input :', x[i], '\t\t\t\t', df_0.loc[i, 'overview'])
print('output:', y[i], '\t', df_0.loc[i, 'genres'])
# + id="a2YlL-Jju9l2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1592688123641, "user_tz": -120, "elapsed": 21745, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="2b0d831c-d445-4afb-d3c4-d5f97995a260"
""" separons tain, val et test """
def split_data(x, y, len_train, len_val):
train = len_train
val = len_train + len_val
x_train = x[:train]
x_val = x[train:val]
x_test = x[val:]
y_train = y[:train]
y_val = y[train:val]
y_test = y[val:]
return x_train, x_val, x_test, y_train, y_val, y_test
len_train, len_val = 3000, 1000
x_train, x_val, x_test, y_train, y_val, y_test = split_data(x, y, len_train, len_val)
print("x shapes:", x_train.shape, x_val.shape, x_test.shape)
print("y shapes:", y_train.shape, y_val.shape, y_test.shape)
# + [markdown] id="Bthl0H_DFZDH" colab_type="text"
# ## Apprentissage
# + [markdown] id="7TJl5HdSxE2N" colab_type="text"
# ### Réseau de neuronnes
# + [markdown] colab_type="text" id="WVZoaU-FzZFf"
# #### Model
# + [markdown] id="UH7uokQyImSx" colab_type="text"
# On utilise un réseau de neurones (de la librairie Keras) à deux couches complètement connectées avec une fonction d'activation relu.
# + id="QoaIyUM9HO00" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 493} executionInfo={"status": "ok", "timestamp": 1592688123961, "user_tz": -120, "elapsed": 22049, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="01f3d053-8c4b-4e64-f5cc-1f37644f4c08"
model = keras.models.Sequential()
model.add(keras.layers.Dense(256,
# kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu',
input_shape=(len_x,))) # len_x = 88586
# model.add(keras.layers.Dropout(rate=0.1))
model.add(keras.layers.Dense(64,
# kernel_regularizer=keras.regularizers.l2(0.001),
activation='relu'))
# model.add(keras.layers.Dropout(rate=0.1))
model.add(keras.layers.Dense(len_y,
activation='sigmoid')) # len_y = 20
""" Vérifions les parametres du modele """
model.summary()
print("\nInfos supplémentaires")
for index in [0, 1, 2]:
print("---------------------------")
weights, bias = model.layers[index].get_weights()
print("layer:", index)
print("weights shape:", weights.shape)
print("bias shape:", bias.shape)
# + [markdown] id="Wcmfm-agJprN" colab_type="text"
# Pour la compilation, on utlise:
# - l'optimiseur Adam
# - la `binary_crossentropy` pour fonction loss
# - on observe l'accuracy
# + id="Rqzi8dBmKELQ" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688123962, "user_tz": -120, "elapsed": 22033, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
model.compile(optimizer=keras.optimizers.Adam(0.001),
loss=keras.losses.binary_crossentropy, # La loss decent un peu plus bas avec la binary crossentropy
# loss=keras.losses.mse,
metrics=[keras.metrics.binary_accuracy])
# + [markdown] id="Dlswwf-CKTho" colab_type="text"
# On lance ensuite l'apprentissage sur 15 époques par paquets de 512. On opte pour la méthode d’**early stopping** pour lutter contre le surapprentissage.
# + id="9RjfRcRsKSzv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 544} executionInfo={"status": "ok", "timestamp": 1592688218894, "user_tz": -120, "elapsed": 116953, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="0b46ce34-e5a0-4bd4-f46b-2c4d54ed413d"
history = model.fit(x_train,
y_train,
epochs=15,
batch_size=512,
validation_data=(x_val, y_val))
# + [markdown] id="hXurbnWtK3LI" colab_type="text"
# Observons la loss et l'accuracy.
# + id="56_vw-ghK5th" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 292} executionInfo={"status": "ok", "timestamp": 1592688219305, "user_tz": -120, "elapsed": 117347, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="896fe109-5bb7-4c52-b9ad-ee679d2f80ff"
history_dict = history.history
print(history_dict.keys(), '\n')
loss = history.history['loss']
val_loss = history.history['val_loss']
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
epochs = range(1, len(loss)+1)
fig,(ax0,ax1)=plt.subplots(1,2,figsize=(10, 3))
ax0.plot(epochs, loss, 'b', label='training loss')
ax0.plot(epochs, val_loss, 'g.', label='validation loss')
ax0.set_xlabel('epochs')
ax0.set_ylabel('loss')
ax0.legend();
ax1.plot(epochs, acc, 'b', label='training accuracy')
ax1.plot(epochs, val_acc, 'g.', label='validation accuracy')
ax1.set_xlabel('epochs')
ax1.set_ylabel('accuracy')
ax1.legend();
plt.suptitle("Training and validation loss and accuracy", y=1.1, fontsize='xx-large')
plt.tight_layout()
# + [markdown] id="9qMG5GqMoZ8f" colab_type="text"
# Bien sur, pas de surapprentissage. On a bien choisi notre nombre d'epochs.
# + [markdown] id="smYT9R89EOgs" colab_type="text"
# Maintenant faisons, puis observons quelques prédictions. Pour l'instant, on choisi comme seuil `0.5`. On recherchera un meulleiur seuil plus tard.
# + id="P3OWQlJwRjrM" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688220313, "user_tz": -120, "elapsed": 118343, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
hat_y_test_proba = model.predict(x_test)
# + id="esiO6vaRPX5T" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688220314, "user_tz": -120, "elapsed": 118334, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
""" une fonction pour mmieux visulaiser les predictions des genres """
def denumerize(numbers):
words = []
for i, nb in enumerate(numbers):
if nb == 1:
words.append(id_to_genre[i+1])
return words
# + id="zgrp3rWjLxqk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 697} executionInfo={"status": "ok", "timestamp": 1592688220316, "user_tz": -120, "elapsed": 118325, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="c1ed91eb-4275-4fe0-cc7b-9d8bfe45e02b"
zeros = np.zeros_like(hat_y_test_proba, dtype=int)
ones = np.ones_like(hat_y_test_proba, dtype=int)
hat_y_test = np.where(hat_y_test_proba >= 0.5*ones, ones, zeros) # on recherchera un meilleur seuil plus tard
def print_prediction(hat_y_test, nb):
start = len_train + len_val #indique le debut des donnes test la dataframe
for i in range(0, size-start, (size-start)//nb):
print(df_1.loc[shuffle[i+start], 'title'], '('+df_1.loc[shuffle[i+start], 'release_date'][:4]+')')
print("original: ", y_test[i], " - ", denumerize(y_test[i]))
print("prediction:", hat_y_test[i], " - ", denumerize(hat_y_test[i]), "\n")
print_prediction(hat_y_test, 10)
# + [markdown] id="yIhpZ1EylWro" colab_type="text"
# On constate que les prédictions sont assez bonnes. Mais le model a l'air d'avoir peur de se tromper, et ne donne très souvent aucune prédiction. La figure 20 confirme bien cela. On résoudra ce problème en ajustant le seuillage dans la suite.
# + id="47cs2_oQKWwE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 258} executionInfo={"status": "ok", "timestamp": 1592688221111, "user_tz": -120, "elapsed": 119101, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="da1c1537-15d2-43f3-83cd-00ce4ab80739"
""" Confiance en ses predictions """
fig,(ax0,ax1)=plt.subplots(1,2,figsize=(10,3))
ax0.hist(hat_y_test_proba[y_test==0], bins=50, edgecolor="k", label="non appartenance", color='r')
ax1.hist(hat_y_test_proba[y_test==1], bins=50, edgecolor="k", label="appartenance", color='g');
ax0.legend()
ax1.legend()
plt.suptitle("Fiabilité des predictions sur le genre", y=1.1, fontsize='xx-large')
plt.tight_layout()
# + [markdown] id="5xilrFotPl5C" colab_type="text"
# Notre modele est tres prudent(LOL). Il est tres sur quand il s'agit de predire a quelle categorie un film n'appartient pas. Par contre, il se trompe la majorite des temps quand il s'agit d'effectivemetn predire les categories auxquelles appartient le film.
# + id="30pR-tIKK6tv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1592688221929, "user_tz": -120, "elapsed": 119900, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="5341e8cb-8cd1-4e7a-f707-903428769cf1"
""" Calculons le score d'accuracy """
(loss, acc) = model.evaluate(x_test, y_test)
acc
# + id="zkNK4VVITh5i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1592688221930, "user_tz": -120, "elapsed": 119885, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="e6f1e43b-51f7-4a57-9b7d-b7851cbdd223"
""" Confirmons ce score """
acc = len(hat_y_test[hat_y_test == y_test]) / len(y_test.flatten())
print(round(acc, 3))
# + [markdown] id="Tr7wA8QMPHfb" colab_type="text"
# 90 %, pas mal!
# + [markdown] id="A05IbbmVxocP" colab_type="text"
# #### Analyse
# + [markdown] id="cl4RJiQCOYLH" colab_type="text"
# On veut à présent trouver un seuil meilleur que le 0.5 utilisé jusqu’à présent. Pour cela, calculons la précision et le rappel. On décrète que la classe positive c'est la classe des 1 (pour chacun des 20 genres possibles). C'est de toute évidence la classe minoritaire.
# + id="g_C6TwwsL2Tf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 111} executionInfo={"status": "ok", "timestamp": 1592688221932, "user_tz": -120, "elapsed": 119869, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="4893b429-f9fa-465d-f5d5-5d3c838141a3"
""" la matrice de confusion """
c_matrix = sklearn.metrics.confusion_matrix(y_test.flatten(), hat_y_test.flatten())
def print_confusion(c_matrix):
display(pd.DataFrame(data=c_matrix, columns=[ r"^-", r"^+"], index=[r"-", r"+"]))
print_confusion(c_matrix)
# + id="uI5a1GDdO2Ik" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} executionInfo={"status": "ok", "timestamp": 1592688221933, "user_tz": -120, "elapsed": 119853, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="3dfd5c8e-57e7-47fe-9001-9d9d08c2b45e"
def print_score(c_matrix):
TN = c_matrix[0,0]
FP = c_matrix[0,1]
FN = c_matrix[1,0]
TP = c_matrix[1,1]
precision = TP / (TP + FP) # accuracy of the positive predictions
recall = TP / (TP + FN) # ratio of positive instances that are correctly detected
f1 = 2 / (1/precision + 1/recall)
print("precision = %.2f"%precision)
print("recall = %.2f"%recall)
print("\nf1 score = %.2f"%f1)
print_score(c_matrix)
# + [markdown] id="ER7NGBhBp8Fl" colab_type="text"
# Mediocre. On confirme ci-dessous qu'on ne peut avoir a la fois une precision et un rappel eleves.
# + id="IWHMsfQnsJmJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 411} executionInfo={"status": "ok", "timestamp": 1592688222456, "user_tz": -120, "elapsed": 120358, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="001bcfe9-9163-46a7-d6d3-53e47b82cd99"
""" courbe recall/precision """
precisions, recalls, thresholds = sklearn.metrics.precision_recall_curve(y_test.flatten(), hat_y_test_proba.flatten())
wanted_recall = 0.4 # on desire obtenir une recall de 0.4. Ca me semble un bon compromis
def plot_precision_vs_recall(precisions, recalls):
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.plot([wanted_recall, wanted_recall], [0, 1], 'r--')
plt.xlabel("recall", fontsize=16)
plt.ylabel("precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.title("Courbe precision/recall", fontsize=16)
plt.figure(figsize=(8, 6))
plot_precision_vs_recall(precisions, recalls)
# + id="HD1_x62ww1AD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1592688222457, "user_tz": -120, "elapsed": 120343, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="ff4879d7-1502-443d-cc5b-78dd35c77602"
wanted_threshold = thresholds[abs(recalls[:-1]-wanted_recall) <= 5e-4][0]
print("threshold recherchee: %.2f"%wanted_threshold)
# + [markdown] id="ODzmt7611K9y" colab_type="text"
# Comme dit plus haut, on ne peut avoir les deux bon au meme moment. Or:
# - **precision fort**: on ne selectionne que les vrai positifs.
# - **racall fort:** on s'est tres peu trompé sur les FN. Donc on a classe beacoup de données comme positives.
#
# Ici nous voulons obtenir un fort rappel. Car ce n'est pas grave si notre réseau donne quelques mauvaises prédictions sur le genre, du moment qu'on a au moins une catégorie dans laquelle classer le film. Cependant, la ci-haut montre que la dépendance précision/rappel est loin d’être idéale. Un compromis acceptable entre bonne précision et bon rappel peut être pris tel que rappel=0.4. Qui correspond a un seuil de 0.39.
# + id="RvugfzC03d0U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} executionInfo={"status": "ok", "timestamp": 1592688222458, "user_tz": -120, "elapsed": 120328, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="352a6e7a-3f5f-4fb7-96f1-7adf8c0bb972"
hat_y_test = np.where(hat_y_test_proba >= wanted_threshold*ones, ones, zeros)
""" les nouveaux scores attendus """
c_matrix = sklearn.metrics.confusion_matrix(y_test.flatten(), hat_y_test.flatten())
print_score(c_matrix)
# + [markdown] id="TgvDap_96dXE" colab_type="text"
# Le f1 score est deja meilleure. C'est bon signe.
# + id="gxWyy-fD2TcN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 697} executionInfo={"status": "ok", "timestamp": 1592688222459, "user_tz": -120, "elapsed": 120312, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="831e1627-7a23-436e-a030-fa8ac3c4d84e"
""" les nouvelles predictions """
print_prediction(hat_y_test, 10)
# + [markdown] id="VIhszuuq542u" colab_type="text"
# Cette fois, les prédictions sont bien meilleures que dernièrement. Même quand le model se trompe, la catégorie qu'il indique n'est pas très éloignée de la vraie catégorie, comme nous pouvons l’observer sur la matrice de corrélations.
# + [markdown] id="4ar-iYqQxJGB" colab_type="text"
# ### Arbres de decisions
# + [markdown] colab_type="text" id="JDAwYQYqzYWh"
# #### Model
# + [markdown] id="PnjXqnZUBZgg" colab_type="text"
# Vu qu'il s'agit d'une classification multi-label, nous devons utiliser un estimateur qui supporte l’approche **one_vs_all**. On se tourne naturellement vers les arbres de décisions.
# + id="k0oVeH_pDeb8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1592688222460, "user_tz": -120, "elapsed": 120295, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="6c47b4f8-07df-4e04-c490-6c780f3c7833"
""" une fois de plus separons tain et test """
len_train, len_val = 4000, 0 # pas de donnee val cette fois. On fait une cross-validation.
x_train, x_val, x_test, y_train, y_val, y_test = split_data(x, y, len_train, len_val)
print("x shapes:", x_train.shape, x_val.shape, x_test.shape)
print("y shapes:", y_train.shape, y_val.shape, y_test.shape)
# + id="1qk45687CCGd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} executionInfo={"status": "ok", "timestamp": 1592688250521, "user_tz": -120, "elapsed": 148338, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="72de172f-270d-40ae-939d-19095eb30603"
""" parametrisation du modele """
tree_model = sklearn.tree.DecisionTreeClassifier(max_depth=None, random_state=42)
# tree_model = sklearn.neural_network.MLPClassifier()
tree_model.fit(x_train, y_train)
# + id="AKz-Co5jCeYP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 697} executionInfo={"status": "ok", "timestamp": 1592688250522, "user_tz": -120, "elapsed": 148322, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="4d51bcd8-6522-4e37-8ceb-d41507f08f15"
""" faisons quelques predictions """
# hat_y_test_proba = tree_model.predict_proba(x_test)
hat_y_test = tree_model.predict(x_test)
# hat_y_test = np.where(hat_y_test_proba >= 0.5*ones, ones, zeros)
print_prediction(hat_y_test, 10)
# + [markdown] id="P-99HK0AT968" colab_type="text"
# Les prédictions ne sont pas vraiment meilleures que les précédentes. En l’occurrence, le model semble fixé sur la dualité 'Drama' - 'Romance'. C’est quand même vrai que les romances demandent beaucoup de drames, assez pour en décourager certains ? :)
# + [markdown] colab_type="text" id="MV-KtsZayMQ4"
# #### Analyse
# + id="ho_wdbd6KmFV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1592688250810, "user_tz": -120, "elapsed": 148592, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="5f8b3688-5f75-420d-890b-f61aa79975d9"
""" Score severe """
acc = tree_model.score(x_test, y_test)
print(round(acc, 3))
# + [markdown] id="HQb9BjcIQpIj" colab_type="text"
# Ce score est vraiment trop sevère. On est quand meme rassuré de savoir que pour 800 x 0.055 = **44 films**, les categories sont exactement celles attendues.
# + id="r9KEVo8YFY4D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 196} executionInfo={"status": "ok", "timestamp": 1592688250812, "user_tz": -120, "elapsed": 148577, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="56648123-e88b-40b0-fcec-86c27a38c88a"
""" Matrice de confusion """
c_matrix = sklearn.metrics.confusion_matrix(y_test.flatten(), hat_y_test.flatten())
print_confusion(c_matrix)
print()
print_score(c_matrix)
# + [markdown] id="wNRgdSKZSkRv" colab_type="text"
# Le score F1 confirme bien que ce modèle est moins performant que le réseau de neurones. C'est assez normal, vu que le reseau de neuronne a ete bien plus parametrise que celui ci.
# + [markdown] id="TipLPjQZyWqI" colab_type="text"
# # II - PREDICTION DU SUCCES
# + [markdown] id="Pc_lSC3uFG-0" colab_type="text"
# On désire prédire le succès/échec d'un film ("return_type") en fonction du **budget** qui est investi, de la **durée** du film, des **genres** de ce film, des **langues** qui y figurent, des **compagnies** de production qu’on embauche pour le produire, et des **mots clés** qui caractérisent le film.
#
# + [markdown] id="lO4GvX0tBYwF" colab_type="text"
# ## Preparation
# + [markdown] id="HXgQNlixBj6h" colab_type="text"
# ### Cheargeons les donnes
# + [markdown] id="cSOy3Azly40O" colab_type="text"
# Pour cette section, nous utiliserons la dataframe 2. Bien que moins riche (environ **3211** lignes) que la premeire, toutes ses valeurs numeriques sont fiables. On se servira du Bagging pour remiedier a la petitesse des donnees.
# + id="2Mld3AxYB44a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 335} executionInfo={"status": "ok", "timestamp": 1592688250813, "user_tz": -120, "elapsed": 148560, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="09d2197d-2dc6-430e-a766-c66a15d2ef07"
df_0 = pd.read_csv("assets/dataframes/movie/tmdb_5000_movies_omitted.csv", thousands=',', converters=converters) # pour l'apprentissage 2
# d'entree, supprimons les colones inutiles
df_2 = df_0.drop(['Unnamed: 0'], axis=1)
for genre in genre_to_id.keys():
if genre != '<PAD>':
df_2 = df_2.drop(genre, axis=1)
df_2.head(2)
# + [markdown] id="-Hw7YYlOBm5l" colab_type="text"
# ### Numerisons les donnnes
# + id="VZC07WrUCMFW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 301} executionInfo={"status": "ok", "timestamp": 1592688251996, "user_tz": -120, "elapsed": 149725, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="281d3904-1524-4780-ac8c-e1ea3b555527"
df_2 = numerize_dataframe(df_2)
df_2.head(2)
# + id="swIOZRCUBgni" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 88} executionInfo={"status": "ok", "timestamp": 1592688251997, "user_tz": -120, "elapsed": 149709, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="7b714f2e-6b0d-409c-b9b3-3755168d2c3b"
# testons les mots cles, sur le i-eme film
i = 0
original = df_0.loc[i, 'keywords']
recomputed = ', '.join([id_to_keyword[j] for j in df_2.loc[i, 'keywords']])
print(df_2.loc[i, 'title'], ':', df_2.loc[i, 'release_date'])
print("MOTS CLES ORIGINAUX: ", original)
print("MOTS CLES RETROUVES: ", recomputed)
# + [markdown] id="-gvNbnIiBpma" colab_type="text"
# ### Creons les donnees train, val et test
# + [markdown] id="QqzCDeFXm-yB" colab_type="text"
#
#
# ---
#
#
# On anticipe, on sait qu'on veut tester le modèle sur des donnees qui sont equilibrées. C'est a dire autant de 'succes' que de 'failure' que de 'massive succes'. On place donc assez de 'failures' et de 'massive succes' a la fin de la dataframe.
#
# ---
#
# En fin de compte, ce n'est pas interressant de la faire! En effet, lorsque la reapartition des donnes sur lesquels on teste est drastiquement differente de la repartition des donnes d'entrainement, les scores mesures sont tres faibles.
#
# + id="veIfLVJ0rPsK" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688251999, "user_tz": -120, "elapsed": 149697, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
""" on fait dabord une permutation totale complete de la dataframe """
size = len(df_2)
indices = np.arange(size)
# random.shuffle(indices)
######################################
# """ placons "number" elements de type "type" a la fin de la df, a partir de "from" """
# def put_back(type, number, start_at):
# counter = 0
# i = 0
# while i < size and counter < number:
# if df_2.loc[i, 'return_type'] == type:
# tmp = indices[i]
# indices[i] = indices[start_at-counter]
# indices[start_at-counter] = tmp
# counter += 1
# i += 1
# len_test = 450
# len_each = len_test//3
# put_back("massive success", len_each, size-1)
# put_back("failure", len_each, size-1 - len_each)
# put_back("success", len_test//3, size-1 - 2*len_each)
########################################
shuffle = {k:i for (i, k) in enumerate(indices)} # un petit dictionnaire pour recapituler et acceder a la dataframe apres
########################################
# """ un test, les 10 derniers doivent etres des 'massive success' """
# for i in list(shuffle.keys())[-10:]:
# print(df_2.loc[i, 'return_type'])
########################################
# + id="bft1xXLbB0aU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 408} executionInfo={"status": "ok", "timestamp": 1592688252000, "user_tz": -120, "elapsed": 149685, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="0c361ed2-9560-4b26-c14a-b3bbe9998635"
""" Visualisons les entrees et les sorties necessaires """
display(df_0.iloc[:5, [9, 8, 7, 18, 19, 6, 12]])
print()
df_2.iloc[:5, [8, 7, 6, 17, 18, 5, 11]]
# + [markdown] id="PYpMKU61dovl" colab_type="text"
# Un vecteur d'entree $x_i$ est juste la concatenation de:
# - La forme normalisée du budget $\frac{budget \ du \ film \ i}{maximum \ des \ budgets}$ (de longeur $1$)
# - La forme normalisée de la durée
# - La forme vectorisée des genres (de longeur $20$)
# - La forme vectorisée des langues (de longueur 62)
# - La forme vectorisée des compagnies de production (de longueur 5017)
# - La forme vectorisée des mots clés (de longueur 9813)
#
# Ce qui nous donne une longeur totale de $14914$.
#
# Un scalaire $y_i$ vaut :
# - 0 pour la classe "failure"
# - 1 pour la classe "success"
# - 2 pour la classe "massive success"
#
# Implementons tout cela.
# + id="3y78V9_kBvw-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1592688252369, "user_tz": -120, "elapsed": 150037, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="f225b4e5-023e-43b4-de46-9b7e5e7d6d71"
""" les x """
len_b, max_b = 1, max(df_2['budget']) # max_b servira pour normaliser des budgets entre 0 et 1
len_r, max_r = 1, max(df_2['runtime'])
len_g = len(genre_to_id) - 1
len_l = len(language_to_id) - 1
len_c = len(company_to_id) - 1
len_k = len(keyword_to_id) - 1
x_b = np.empty(shape=(size,), dtype=float)
x_r = np.empty(shape=(size,), dtype=float)
x_g = np.empty(shape=(size, len_g), dtype=float)
x_l = np.empty(shape=(size, len_l), dtype=float)
x_c = np.empty(shape=(size, len_c), dtype=float)
x_k = np.empty(shape=(size, len_k), dtype=float)
for (i, i_prime) in shuffle.items():
x_b[i] = np.array([df_2.loc[i_prime, 'budget']]) / max_b # juste pour ajouter des 0 et completer la taille
x_r[i] = np.array([df_2.loc[i_prime, 'runtime']]) / max_r
x_g[i] = vectorize_sequence(np.array(df_2.loc[i_prime, 'genres']), len_g)
x_l[i] = vectorize_sequence(np.array(df_2.loc[i_prime, 'spoken_languages']), len_l)
x_c[i] = vectorize_sequence(np.array(df_2.loc[i_prime, 'production_companies']), len_c)
x_k[i] = vectorize_sequence(np.array(df_2.loc[i_prime, 'keywords']), len_k)
len_final_x = len_b + len_r + len_g + len_l + len_c + len_k # la longeur finale d'un vecteur input d'entree
# len_final_x = len_b + len_g + len_l
x = np.empty(shape=(size, len_final_x), dtype=float)
for i in range(size):
x[i] = np.concatenate((x_b[i], x_r[i], x_g[i], x_l[i], x_c[i], x_k[i]), axis=None)
# x[i] = np.concatenate((x_b[i], x_g[i], x_l[i]), axis=None) # pour reduire le nombre d'inputs au cas ou
#############################
# var_per_x = 5 # budget, mot cle, etc..
# max_len_x = max(len_g, len_l, len_c, len_k) # la plus grande des longeurs
# shape_x = (var_per_x, max_len_x) # chaque tenseur d'entree a cette forme
# x = np.empty(shape=(size, shape_x[0], shape_x[1]), dtype=float) # total des features
# for (i, i_prime) in shuffle.items():
# x[i][0] = np.array([df_2.loc[i_prime, 'budget']] + [0]*(shape_x[1]-1)) / max_b # juste pour ajouter des 0 et completer la taille
# x[i][1] = vectorize_sequence(np.array(df_2.loc[i_prime, 'genres']), shape_x[1])
# x[i][2] = vectorize_sequence(np.array(df_2.loc[i_prime, 'spoken_languages']), shape_x[1])
# x[i][3] = vectorize_sequence(np.array(df_2.loc[i_prime, 'production_companies']), shape_x[1])
# x[i][4] = vectorize_sequence(np.array(df_2.loc[i_prime, 'keywords']), shape_x[1])
#############################
""" les y """
target_to_id = {'failure':0, 'success':1, 'massive success':2} # un petit dictionnaire pour les labels
id_to_target = {v:k for (k, v) in target_to_id.items()}
y = np.empty(shape=(size,), dtype=int)
for (i, i_prime) in shuffle.items():
y[i] = target_to_id[df_2.loc[i_prime, 'return_type']]
# print(len_b , len_g , len_l , len_c ,len_k)
print('x shape =', x.shape)
print('y shape =', y.shape)
# + id="0Z_jyOqWV3Dx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1592688252371, "user_tz": -120, "elapsed": 150022, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="309854f8-4de0-4aae-8ee7-34a47acd8742"
""" visualisons a quoi ressemble les inputs et les outputs juste avant l'apprentissage """
i = 0
print('input :', x[i])
print()
print('output:', y[i])
# + id="ZXbMFCEHWQgQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1592688252372, "user_tz": -120, "elapsed": 150007, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="ba0a227f-0831-45e3-d4b0-e3a6db7492a3"
""" enfin separons tain et test """
len_train, len_val = 2800, 0
x_train, x_val, x_test, y_train, y_val, y_test = split_data(x, y, len_train, len_val)
print("x shapes:", x_train.shape, x_val.shape, x_test.shape)
print("y shapes:", y_train.shape, y_val.shape, y_test.shape)
# + id="wuW2QfnimslX" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688252373, "user_tz": -120, "elapsed": 149994, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
########################################
# """ verifions bien que les labels sont equilibres """
# print(y_test)
########################################
# + [markdown] id="fWLMQL35BblB" colab_type="text"
# ## Apprentissage
# + [markdown] id="LoX6ilt8SmO5" colab_type="text"
# Il s'agit d'un apprentissage multiclasses. Nous allons essayer plusieurs "bons" models individuellment, et puis nous allons les grouper ensemble pour former un model qu'on espère sera meilleur. Pour ce faire, nous utiliserons exclusivement la librarie ScikitLearn.
# + [markdown] colab_type="text" id="O4YLSC_4ySaI"
# ### Reseau de neuronnes
# + [markdown] id="-QSnINSMzNYI" colab_type="text"
# #### Model
# + [markdown] id="L1gmmXvlcxLd" colab_type="text"
# Jusqu'a present les reseaux de neuronnes se sont montrés efficaces. Heuresement ScikitLearn propose une implementtation pour les reseaux de neuronnes. En plus c'est très facile de preciser les couches intermédiares.
# + id="vkQenC5AdBMg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} executionInfo={"status": "ok", "timestamp": 1592688273773, "user_tz": -120, "elapsed": 171383, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="e67fde33-2e42-4250-8116-d76630f6e1bf"
# model_3 = sklearn.linear_model.SGDClassifier(random_state=42, tol=1e-3, max_iter=5000)
model_3 = sklearn.neural_network.MLPClassifier(hidden_layer_sizes=(100, 10), early_stopping=True)
model_3.fit(x_train, y_train)
# + id="d3IqqgQ9lqAQ" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688273774, "user_tz": -120, "elapsed": 171370, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
hat_y_test = model_3.predict(x_test)
# + id="vGckax7dmfmg" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688273774, "user_tz": -120, "elapsed": 171361, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
""" faisons des predictions """
def prediction(hat_y_test, nb):
start = len_train + len_val #indique le debut des donnes test la dataframe
index = np.arange(start, size, (size-start)//nb)
index_prime = []
df = df_0.iloc[:, [2, 3, 12]].copy()
# df = df_0.iloc[:, [2, 3, 9, 7, 18, 19, 6, 12]].copy()
# df = df_2.iloc[:, [1, 2, 8, 6, 17, 18, 5, 11]].copy()
df['prediction'] = 0 # creons juste une nouvelle colonne
for i in index:
df.loc[shuffle[i], 'prediction'] = id_to_target[hat_y_test[i-start]]
index_prime.append(shuffle[i])
display(df.iloc[index_prime, :])
# + id="TNUSOAZuLG9v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 390} executionInfo={"status": "ok", "timestamp": 1592688273775, "user_tz": -120, "elapsed": 171351, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="71180d53-bcdd-4a0b-cab5-005d88ff2595"
prediction(hat_y_test, 10)
# + [markdown] colab_type="text" id="l1CtBxgYySaJ"
# #### Analyse
# + [markdown] id="r-BVMx4XO7S4" colab_type="text"
# On dessine la matrice de confusion.
# + id="PnXthAwqitU5" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688273776, "user_tz": -120, "elapsed": 171341, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
""" Une fonction pour afficher """
def plot_conf(y_test, hat_y_test):
print(str(id_to_target)[1:-1]) # afficahge de la legende
print()
c_matrix = sklearn.metrics.confusion_matrix(y_test, hat_y_test)
# sums = c_matrix.sum(axis=1, keepdims=True)
# norm_c_matrix = c_matrix / sums
fig, ax = plt.subplots(1, 1)
img = ax.matshow(c_matrix);
plt.colorbar(img)
xlabels = ['^'+str(x) for x in range(3)]
ax.set_xticklabels(['']+xlabels)
# + id="qiNGYS_mLQyb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 305} executionInfo={"status": "ok", "timestamp": 1592688274044, "user_tz": -120, "elapsed": 171594, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="de651203-6995-4bda-c4ea-f49c0091513b"
plot_conf(y_test, hat_y_test)
# + [markdown] id="qW0efKeI_jiK" colab_type="text"
# On calcule les scores. Le f1 score par sommation des score sur chaque categorie ponderes par le nombre d'instaces positives (TP) pour chaque classes.
# + id="G0DiX5sC7BC5" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688274045, "user_tz": -120, "elapsed": 171581, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
""" calcul des scores """
def print_scores(y_test, hat_y_test, title):
print(title)
f1 = sklearn.metrics.f1_score(y_test, hat_y_test, average='weighted')
print(" - f1 score: %.4f"%f1)
acc = np.mean(hat_y_test == y_test)
# acc = sklearn.metrics.accuracy_score(y_test, hat_y_test)
print(" - accuracy: %.4f"%acc)
# + id="_9YEo1-pLTIq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1592688274046, "user_tz": -120, "elapsed": 171571, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="3b102721-7859-4bf5-f00a-c41e4170e9d1"
print_scores(y_test, hat_y_test, "RESEAU DE NEURONES")
# + [markdown] id="K9QQge16ml2Q" colab_type="text"
# On voit que le modèle prédit bien les succès, probablement parce qu'il y a beaucoup plus de films labélisées "success" dans nos données.
# + [markdown] colab_type="text" id="fEt04Z3Kyj1I"
# ### Regression Logistique
# + [markdown] colab_type="text" id="ATHvmKr-zSIJ"
# #### Model
# + id="CFjgt3qGno55" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} executionInfo={"status": "ok", "timestamp": 1592688299930, "user_tz": -120, "elapsed": 197436, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="2eb57d44-3bc0-43fd-db01-47c6bb63e8d9"
model_4 = sklearn.linear_model.LogisticRegression(random_state=42, max_iter=1000)
model_4.fit(x_train, y_train)
# + id="jgrg1ub4EkAc" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688299931, "user_tz": -120, "elapsed": 197423, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
hat_y_test = model_4.predict(x_test)
# + id="NbBhdEOaoE95" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 390} executionInfo={"status": "ok", "timestamp": 1592688299932, "user_tz": -120, "elapsed": 197412, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="54dce34c-db10-45fe-ad61-9c2292231ef7"
prediction(hat_y_test, 10)
# + [markdown] colab_type="text" id="cCLGPH2Oyj1J"
# #### Analyse
# + id="FpIvv9CuC8Vd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 305} executionInfo={"status": "ok", "timestamp": 1592688300205, "user_tz": -120, "elapsed": 197668, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="72aa3d1a-15e4-4c05-fb92-2ac8751c3eaa"
plot_conf(y_test, hat_y_test)
# + id="y_0bWltYC9pl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1592688300206, "user_tz": -120, "elapsed": 197654, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="350ff6af-ec35-4947-89c7-457bf5cd638a"
print_scores(y_test, hat_y_test, "REGRESSION LOGISTIQUE")
# + [markdown] colab_type="text" id="hmN4OqfoykNA"
# ### Foret Aleatoire
# + [markdown] colab_type="text" id="UJpjyy6OzSfo"
# #### Model
# + id="iHLbB3_qpork" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} executionInfo={"status": "ok", "timestamp": 1592688452686, "user_tz": -120, "elapsed": 350117, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="77b02129-59d0-47cc-d978-2014a7cb0779"
# model_5 = sklearn.ensemble.RandomForestClassifier(random_state=42)
model_5 = sklearn.ensemble.RandomForestClassifier(n_estimators=500, max_leaf_nodes=None, n_jobs=-1, random_state=42)
model_5.fit(x_train, y_train)
# + id="WRdar93tEZIq" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688453195, "user_tz": -120, "elapsed": 350614, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
hat_y_test = model_5.predict(x_test)
# + id="9IR2_-MGpue-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 390} executionInfo={"status": "ok", "timestamp": 1592688453196, "user_tz": -120, "elapsed": 350602, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="7ad5f993-0604-4ac8-cbc6-9455d91cf046"
prediction(hat_y_test, 10)
# + [markdown] colab_type="text" id="NLHcACwxykNB"
# #### Analyse
# + colab_type="code" id="LzlEzfwMDjO7" colab={"base_uri": "https://localhost:8080/", "height": 305} executionInfo={"status": "ok", "timestamp": 1592688453197, "user_tz": -120, "elapsed": 350586, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="94ee8333-9791-4111-c426-e95500b8aa5a"
plot_conf(y_test, hat_y_test)
# + colab_type="code" id="wGPsBTNkDjPA" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1592688453198, "user_tz": -120, "elapsed": 350569, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="01c0b49a-d140-4576-f99f-907b9ab5271c"
print_scores(y_test, hat_y_test, "FORET ALEATOIRE")
# + [markdown] id="X1T7YS1gkMev" colab_type="text"
# Il s’agit-là de notre meilleur score jusqu'à présent. Est-ce qu’on peut faire mieux ?
# + [markdown] colab_type="text" id="g9H4x6Zsykih"
# ### Ensemble Learning
# + [markdown] colab_type="text" id="-7m7xkbozS6S"
# #### Model
# + [markdown] id="ycYpssqMp35t" colab_type="text"
# Essayons de combiner tous nos modeles dans un Soft Voting Clasifier et observons.
# + id="7jPLpEOnpVmp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 510} executionInfo={"status": "ok", "timestamp": 1592688667018, "user_tz": -120, "elapsed": 564371, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="2cc2285e-5d6b-409e-cb55-9040b7ce1a43"
model_6 = sklearn.ensemble.VotingClassifier(estimators=[('nn', model_3), ('lr', model_4), ('rf', model_5)], voting='soft')
model_6.fit(x_train, y_train)
# + id="BEfXpM6Arhp1" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688667314, "user_tz": -120, "elapsed": 564630, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
hat_y_test = model_6.predict(x_test)
# + id="e9Yt3n4UrjRd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 390} executionInfo={"status": "ok", "timestamp": 1592688667315, "user_tz": -120, "elapsed": 564609, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="06bbe77b-82bb-4ae8-aff8-d6a116dd8ac5"
prediction(hat_y_test, 10)
# + [markdown] colab_type="text" id="N2bcGBcHykih"
# #### Analyse
# + colab_type="code" id="Pc-ft1MhDtID" colab={"base_uri": "https://localhost:8080/", "height": 305} executionInfo={"status": "ok", "timestamp": 1592688667317, "user_tz": -120, "elapsed": 564590, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="702d69a8-4f1d-4664-ad1c-1f395aa808fa"
plot_conf(y_test, hat_y_test)
# + colab_type="code" id="5KeOy54gDtIF" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1592688667318, "user_tz": -120, "elapsed": 564573, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="40090c02-453b-4894-8aed-0785bd8b2989"
print_scores(y_test, hat_y_test, "ENSEMBLE LEARNING")
# + [markdown] id="r2h4lUUQgQTv" colab_type="text"
# Pas de chance, le modèle est moins performant que le meilleur de ses composants (la foret aléatoire).
# + [markdown] id="s0RFv_6qr3eJ" colab_type="text"
# ### Bagging
# + [markdown] id="kiRR_JhPgCOQ" colab_type="text"
# Nous avons à notre disposition très peu de données (3200 éléments environ), on fait du Bagging pour remédier à cela. La forêt aléatoire nous a donné le meilleur résultat jusqu’à présent ; utilisons-la ici :
# + [markdown] id="xbhUMwvGr-ZW" colab_type="text"
# #### Model
# + id="28rV5BGGubNe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 391} executionInfo={"status": "ok", "timestamp": 1592688803926, "user_tz": -120, "elapsed": 701164, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="620912d8-5dcd-4923-fc56-996d1e8335b2"
model_7 = sklearn.ensemble.BaggingClassifier(
model_5,
n_estimators=100,
max_samples=10,
bootstrap=True,
n_jobs=-1,
random_state=42)
model_7.fit(x_train, y_train)
# + id="G3XjLj7lul_w" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1592688846255, "user_tz": -120, "elapsed": 743478, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}}
hat_y_test = model_7.predict(x_test)
# + id="9q615qzyu1bG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 390} executionInfo={"status": "ok", "timestamp": 1592688846257, "user_tz": -120, "elapsed": 743469, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="fb15b845-c3a6-4b91-f6d2-b2f0bcdf369b"
prediction(hat_y_test, 10)
# + [markdown] id="bNJDyTqfsAgk" colab_type="text"
# #### Analyse
# + colab_type="code" id="bzM-LdVZDzu7" colab={"base_uri": "https://localhost:8080/", "height": 305} executionInfo={"status": "ok", "timestamp": 1592688846259, "user_tz": -120, "elapsed": 743450, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="dc343806-6521-4b6c-9b76-dbb0cbadba0c"
plot_conf(y_test, hat_y_test)
# + colab_type="code" id="VaR0m9y2Dzu9" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1592688846260, "user_tz": -120, "elapsed": 743434, "user": {"displayName": "desmond", "photoUrl": "https://lh6.googleusercontent.com/-5exToxTXVcA/AAAAAAAAAAI/AAAAAAAABrc/6MUOfJiC0-s/s64/photo.jpg", "userId": "10666133071162031678"}} outputId="3454955f-4aad-4f00-9192-8910aeec6d5a"
print_scores(y_test, hat_y_test, "BAGGING CLASSIFIER")
# + [markdown] id="JYAqjlQIh0HP" colab_type="text"
# C’est avec tristesse qu’on remarque qu’il reste moins performant que la forêt qui le compose.
# + [markdown] id="vi52kp00D6c0" colab_type="text"
# ## Conclusion sur l'etude du succes
# + [markdown] id="DzbBG9pQ0LFo" colab_type="text"
# En conclusion, on a du mal à prédire le succès avec plus de 50% d'exactitude (ce qui représente à mon avis le minimum de crédibilité). **D'une part** je suis fautif car je suis un peu stricte dans la catégorisation des succès/échecs. Par exemple, un succès massif prédit comme un simple succès est tout à fait acceptable. J'encouragerais avec enthousiasme la production d'un tel film.
#
# **D'autre part** mon jeu de données n’est pas parfait :
# - On a très peu de données : 3200 films, c'est assez petit pour se faire une idée des goûts cinématographiques de l'humanité.
# - Ces données ne sont pas assez diversifiées. Il y a beaucoup trop de succès pour très peu d'échecs. Peut-être que, dans quelques années, une métrique universelle de définition du succès sera créée. En plus on aura plus de films à étudier. Ça sera potentiellement plus facile de conduire une telle étude.
# - Le succès d'un film dépend énormément des stars à l'affiche. Ça aurait donc été intéressant d'étudier le jeu de données contenant l’équipe de tournage.
#
# **Une autre explication** est que ces faibles scores sont juste naturels. La nature contrôle tout et je n'y peux rien. Si la prédiction du succès d'un film était facile (voire possible) alors tout le monde rentrerait dans l'industrie du cinéma pour se faire riche ; et il n'y aurait plus de data scientist. Je ne veux pas ça :) !!
#
# + [markdown] id="edMp-SdtGMKU" colab_type="text"
# # III - PERPECTIVES
# + [markdown] id="QKJANjiCGPKr" colab_type="text"
# Nous avons prédit les genres des films, et leur succès grâce à des données toutes connues avant la sortie du film. Les résultats obtenus pour la prédiction des genres sont encourageants. Le réseau de neurones construit peut être adapté pour la création d’un système de recommandation de films.
#
# En ce qui concerne la prédiction du succès, nous avons obtenu moins de réussite. Nous aurions pu faire une analyse en composante principale pour déterminer quelles variables contribuent le plus au succès. Cela aurait probablement améliorer nos résultats. Nous aurions pu ajouter à tout cela la prédiction de la note moyenne du film, et/ou la somme d’argent que celui-ci rapportera. Cependant, nous avons déjà une bonne idée de la relation entre nos prédictions et ces dernières grâce à la matrice de corrélations.
#
| nb2_apprentissage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Now You Code 4: Shopping List
#
# Write a simple shopping list program. Use a Python `list` as a shopping list. Functions definitions have been written for you, so all you need to do is complete the code inside the function.
#
# The main program loop has a menu allowing you to 1) add to the list 2) remove an item from the list or 3) print the list
#
# Example Run
#
# ```
# A - Add Item to Shopping List
# P - Print Shopping List
# R - Remove Item from Shopping List
# Q - Quit Shopping List Program
# Enter choice: a
# Enter Item to Add: eggs
# A - Add Item to Shopping List
# P - Print Shopping List
# R - Remove Item from Shopping List
# Q - Quit Shopping List Program
# Enter choice: a
# Enter Item to Add: eggs
# eggs is already in the list!
# A - Add Item to Shopping List
# P - Print Shopping List
# R - Remove Item from Shopping List
# Q - Quit Shopping List Program
# Enter choice: p
# Your List: ['eggs']
# A - Add Item to Shopping List
# P - Print Shopping List
# R - Remove Item from Shopping List
# Q - Quit Shopping List Program
# Enter choice: r
# Enter Item to Remove from List: peas
# peas is not in the list!
# A - Add Item to Shopping List
# P - Print Shopping List
# R - Remove Item from Shopping List
# Q - Quit Shopping List Program
# Enter choice: r
# Enter Item to Remove from List: eggs
# A - Add Item to Shopping List
# P - Print Shopping List
# R - Remove Item from Shopping List
# Q - Quit Shopping List Program
# Enter choice: p
# Your List: []
# A - Add Item to Shopping List
# P - Print Shopping List
# R - Remove Item from Shopping List
# Q - Quit Shopping List Program
# Enter choice: q
# ```
#
# Most of the code has been written for you fill in the areas below.
#
# +
# TODO: Write Todo lists for
# #1 Add item to list if it does not exist
# #2 Remove item from list when it does not exist
# +
# Write code here
def print_menu():
print("A - Add Item to Shopping List")
print("P - Print Shopping List")
print("R - Remove Item from Shopping List")
print("Q - Quit Shopping List Program")
return
# prompts for a new item using input() then adds it to the shopping list
# only when it does not exist
# input: shopping list
# output: shopping list
def add_item(shopping_list):
#todo write code here
return shopping_list
# sorts then prints the shopping list
# input: shopping list
# output: shopping list
def print_list(shopping_list):
#todo write code here
return shopping_list
# prompts for an item then removes it from the shopping list
# only when it exists
# input: shopping list
# output: shopping list
def remove_item(shopping_list):
#todo write code here
return shopping_list
## Main Program Written For You
shopping_list = []
while True:
print_menu()
choice = input("Enter choice: ").upper()
if choice == 'A':
shopping_list = add_item(shopping_list)
elif choice == 'P':
shopping_list = print_list(shopping_list)
elif choice == 'R':
shopping_list = remove_item(shopping_list)
elif choice == "Q":
break
else:
print('ERROR:', choice,'is not a menu option')
# -
# ## Step 3: Questions
#
# 1. When we write functions we usually don't have `input()` or `print()` Python functions in them, but in this solution we do. What is the advantage of doing it in this example?
# 2. Explain a strategy you can employ to save the list to a file and load it back. How would that work?
# 3. What does the `.upper()` method function accomplish in the main program?
# ## Reminder of Evaluation Criteria
#
# 1. What the problem attempted (analysis, code, and answered questions) ?
# 2. What the problem analysis thought out? (does the program match the plan?)
# 3. Does the code execute without syntax error?
# 4. Does the code solve the intended problem?
# 5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors)
| content/lessons/09/Now-You-Code/NYC4-ShoppingList.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:gbdt-forecast]
# language: python
# name: conda-env-gbdt-forecast-py
# ---
# +
import sys
import json
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
sys.path.append('../../')
sys.path.append('../preprocess')
from ranewable.ranewable import Ra
import preprocess_gefcom2014_solar_example
# -
# ## Load json
params_path = '../params/params_gefcom2014_solar_example.json'
with open(params_path, 'r', encoding='utf-8') as file:
params_json = json.loads(file.read())
params_json
# ## Load data
df = preprocess_gefcom2014_solar_example.load_data('.'+params_json['path_raw_data'], params_json['filename_raw_data'], header=[0,1])
df.head()
# ## Convert to standard indexing
df.index.name = 'valid_datetime'
idx_ref_datetime = df.index.hour == 1
df.loc[idx_ref_datetime, 'ref_datetime'] = df.index[idx_ref_datetime]
df.loc[:, 'ref_datetime'] = df.loc[:, 'ref_datetime'].fillna(method='ffill')
df = df.set_index('ref_datetime', append=True, drop=True)[df.columns.levels[0][:-1]]
df.index = df.index.reorder_levels(['ref_datetime', 'valid_datetime'])
df = df.sort_index()
columns = [df.columns.levels[0][:-1].values, df.columns.levels[1][:-1].values]
df.columns = pd.MultiIndex.from_product(columns)
df.head(50)
# ## Preprocess data (feature engineering)
# ### Average point features
# Average point features over hour
features_point = ['VAR134', 'VAR157', 'VAR164', 'VAR165', 'VAR166', 'VAR167', 'VAR78', 'VAR79']
df_point = df.loc[:,(slice(None),features_point)]
df_point = df_point.rolling(2).mean().shift(-1).fillna(method='ffill')
#df.loc[:,(slice(None),features_point)] = df_point
df['1']['VAR134'].iloc[:40].plot(linestyle='--', marker='*', label='original')
df_point['1']['VAR134'].iloc[:40].plot(linestyle='--', marker='*', label='average')
plt.legend()
df.loc[:,(slice(None),features_point)] = df_point
# ### Differentiate accumulated features
# Differentiate accumulated features
features_accum = ['VAR169', 'VAR175', 'VAR178', 'VAR228']
df_accum = df.loc[:,(slice(None),features_accum)]
df_accum = df_accum.diff()
df_accum[df_accum.index.levels[1].hour==1] = df.loc[df_accum.index.levels[1].hour==1,(slice(None),features_accum)]
df_accum.loc[:,(slice(None),features_accum[:3])] = df_accum.loc[:,(slice(None),features_accum[:3])]/3600 # Convert from J to Wh/h
#df.loc[:,(slice(None),features_accum)] = df_accum
(df['1']['VAR169']/3600).iloc[:60].plot(linestyle='--', marker='*', label='original')
df_accum['1']['VAR169'].iloc[:60].plot(linestyle='--', marker='*', label='average')
plt.legend()
df.loc[:,(slice(None),features_accum)] = df_accum
# ### Physical features
# Features to add:
# * azimuth
# * zenith
# * diffuse radiation
# * beam radiation
# * clear sky forecast
# * physical forecast
# #### Solar position
for i, (coords, alt, cap, orien, tilt) in enumerate(zip(params_json['site_coords'],
params_json['site_altitude'],
params_json['site_capacity'],
params_json['panel_orientation'],
params_json['panel_tilt'])):
ra = Ra(longitude=coords[0],
latitude=coords[1],
altitude=alt,
capacity=cap,
orientation=orien,
tilt=tilt)
df_solpos = ra.calculate_solpos(df[str(i+1)].index)
df_clearsky = ra.calculate_clearsky(df[str(i+1)].index)
df_power_clearsky = ra.calculate_power_clearsky(df[str(i+1)].index)
df_weather = ra.weather_from_ghi(df.loc[:,(str(i+1),'VAR169')])
df_power = ra.calculate_power(df_weather.copy())
df_solpos = df_solpos.loc[:, ['zenith', 'azimuth']]
df_clearsky.columns = df_clearsky.columns+'_clearsky'
df_weather = df_weather.loc[:, ['dni', 'dhi', 'ghi']]
for column in df_solpos.columns:
df.loc[:,(str(i+1),column)] = df_solpos.loc[:, column]
for column in df_clearsky.columns:
df.loc[:,(str(i+1),column)] = df_clearsky.loc[:, column]
for column in df_weather.columns:
df.loc[:,(str(i+1),column)] = df_weather.loc[:, column]
df.loc[:,(str(i+1),'Clearsky_Forecast')] = df_power_clearsky
df.loc[:,(str(i+1),'Physical_Forecast')] = df_power
df.loc[(slice(None), slice('2013-05-10','2013-05-25')), ('3', ['POWER', 'Clearsky_Forecast', 'Physical_Forecast'])].plot(figsize=(20,5))
df['1'][['POWER', 'Physical_Forecast']].plot.scatter(x='POWER', y='Physical_Forecast')
df['2'][['POWER', 'Physical_Forecast']].plot.scatter(x='POWER', y='Physical_Forecast')
df['3'][['POWER', 'Physical_Forecast']].plot.scatter(x='POWER', y='Physical_Forecast')
# ### Power difference
for farm in df.columns.levels[0]:
df.loc[:,(farm,'DIFF')] = (df.loc[:,(farm,'POWER')]-df.loc[:,(farm,'Physical_Forecast')])
df.loc[:, (slice(None), 'DIFF')].plot()
df.loc[:, (slice(None), 'DIFF')].hist(bins=40, figsize=(10,5));
# ### Add lead_time feature
# Add lead time feature
ref_datetime = df.index.get_level_values(0)
valid_datetime = df.index.get_level_values(1)
lead_time = (valid_datetime-ref_datetime)/pd.Timedelta('1 hour')
for farm in df.columns.levels[0]:
df.loc[:,(farm,'LEAD_TIME')] = lead_time
df.loc[:,('1','LEAD_TIME')].head()
# ### Lagged features
variables_lags = {'VAR169': [-1,1,2,3]}
variables_lags
for farm in df.columns.levels[0]:
for variable, lags in variables_lags.items():
for lag in lags:
df.loc[:, (farm, variable+'_lag{0}'.format(lag))] = df.loc[:, (farm, variable)].shift(lag)
df.head()
# ## Normalise by clearsky power
# Does not seem like a good idea to normalise with the clearsky power since the real power is quite frequently higher and can be nonzero when clearsky power is zero.
for column in df.columns.levels[0]:
power_norm = df.loc[:, (column, 'POWER')] / df.loc[:, (column, 'Clearsky_Forecast')]
idx_true = power_norm <= 1
idx_false = power_norm > 1
df.loc[idx_true, (column, 'POWER_NORM')] = power_norm
df.loc[idx_false, (column, 'POWER_NORM')] = 1.0
df['1']['POWER_NORM'].iloc[400:600].plot()
df['2']['POWER_NORM'].iloc[400:600].plot()
df['3']['POWER_NORM'].iloc[400:600].plot()
| notebooks/nb-preprocess-gefcom2014-solar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project: Identify Customer Segments
#
# In this project, you will apply unsupervised learning techniques to identify segments of the population that form the core customer base for a mail-order sales company in Germany. These segments can then be used to direct marketing campaigns towards audiences that will have the highest expected rate of returns. The data that you will use has been provided by our partners at Bertelsmann Arvato Analytics, and represents a real-life data science task.
#
# This notebook will help you complete this task by providing a framework within which you will perform your analysis steps. In each step of the project, you will see some text describing the subtask that you will perform, followed by one or more code cells for you to complete your work. **Feel free to add additional code and markdown cells as you go along so that you can explore everything in precise chunks.** The code cells provided in the base template will outline only the major tasks, and will usually not be enough to cover all of the minor tasks that comprise it.
#
# It should be noted that while there will be precise guidelines on how you should handle certain tasks in the project, there will also be places where an exact specification is not provided. **There will be times in the project where you will need to make and justify your own decisions on how to treat the data.** These are places where there may not be only one way to handle the data. In real-life tasks, there may be many valid ways to approach an analysis task. One of the most important things you can do is clearly document your approach so that other scientists can understand the decisions you've made.
#
# At the end of most sections, there will be a Markdown cell labeled **Discussion**. In these cells, you will report your findings for the completed section, as well as document the decisions that you made in your approach to each subtask. **Your project will be evaluated not just on the code used to complete the tasks outlined, but also your communication about your observations and conclusions at each stage.**
# +
# import libraries here; add more as necessary
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
# magic word for producing visualizations in notebook
# %matplotlib inline
'''
Import note: The classroom currently uses sklearn version 0.19.
If you need to use an imputer, it is available in sklearn.preprocessing.Imputer,
instead of sklearn.impute as in newer versions of sklearn.
'''
# -
# ### Step 0: Load the Data
#
# There are four files associated with this project (not including this one):
#
# - `Udacity_AZDIAS_Subset.csv`: Demographics data for the general population of Germany; 891211 persons (rows) x 85 features (columns).
# - `Udacity_CUSTOMERS_Subset.csv`: Demographics data for customers of a mail-order company; 191652 persons (rows) x 85 features (columns).
# - `Data_Dictionary.md`: Detailed information file about the features in the provided datasets.
# - `AZDIAS_Feature_Summary.csv`: Summary of feature attributes for demographics data; 85 features (rows) x 4 columns
#
# Each row of the demographics files represents a single person, but also includes information outside of individuals, including information about their household, building, and neighborhood. You will use this information to cluster the general population into groups with similar demographic properties. Then, you will see how the people in the customers dataset fit into those created clusters. The hope here is that certain clusters are over-represented in the customers data, as compared to the general population; those over-represented clusters will be assumed to be part of the core userbase. This information can then be used for further applications, such as targeting for a marketing campaign.
#
# To start off with, load in the demographics data for the general population into a pandas DataFrame, and do the same for the feature attributes summary. Note for all of the `.csv` data files in this project: they're semicolon (`;`) delimited, so you'll need an additional argument in your [`read_csv()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) call to read in the data properly. Also, considering the size of the main dataset, it may take some time for it to load completely.
#
# Once the dataset is loaded, it's recommended that you take a little bit of time just browsing the general structure of the dataset and feature summary file. You'll be getting deep into the innards of the cleaning in the first major step of the project, so gaining some general familiarity can help you get your bearings.
# +
# Load in the general demographics data.
azdias = pd.read_csv('Udacity_AZDIAS_Subset.csv', error_bad_lines=False, sep=';')
# Load in the feature summary file.
feat_info = pd.read_csv('AZDIAS_Feature_Summary.csv', error_bad_lines=False, sep=';')
# -
# Check the structure of the data after it's loaded (e.g. print the number of
# rows and columns, print the first few rows).
azdias.shape
azdias.head(10)
feat_info.shape
feat_info.head(5)
# > **Tip**: Add additional cells to keep everything in reasonably-sized chunks! Keyboard shortcut `esc --> a` (press escape to enter command mode, then press the 'A' key) adds a new cell before the active cell, and `esc --> b` adds a new cell after the active cell. If you need to convert an active cell to a markdown cell, use `esc --> m` and to convert to a code cell, use `esc --> y`.
#
# ## Step 1: Preprocessing
#
# ### Step 1.1: Assess Missing Data
#
# The feature summary file contains a summary of properties for each demographics data column. You will use this file to help you make cleaning decisions during this stage of the project. First of all, you should assess the demographics data in terms of missing data. Pay attention to the following points as you perform your analysis, and take notes on what you observe. Make sure that you fill in the **Discussion** cell with your findings and decisions at the end of each step that has one!
#
# #### Step 1.1.1: Convert Missing Value Codes to NaNs
# The fourth column of the feature attributes summary (loaded in above as `feat_info`) documents the codes from the data dictionary that indicate missing or unknown data. While the file encodes this as a list (e.g. `[-1,0]`), this will get read in as a string object. You'll need to do a little bit of parsing to make use of it to identify and clean the data. Convert data that matches a 'missing' or 'unknown' value code into a numpy NaN value. You might want to see how much data takes on a 'missing' or 'unknown' code, and how much data is naturally missing, as a point of interest.
#
# **As one more reminder, you are encouraged to add additional cells to break up your analysis into manageable chunks.**
# Identify missing or unknown data values and convert them to NaNs.
# First let's lets convert strings in missing or unknown to lists.
feat_info['missing_or_unknown'] = feat_info.missing_or_unknown.apply(lambda x: x[1:-1].split(','))
#Now by using loop from this question https://knowledge.udacity.com/questions/113144 we convert all missing or unknown values
#to NaNs
for attribute, missing_or_unknown in zip(feat_info['attribute'], feat_info['missing_or_unknown']):
if missing_or_unknown[0] != '':
for val in missing_or_unknown:
if val.isnumeric() or val.lstrip('-').isnumeric():
azdias.loc[azdias[attribute] == int(val), attribute] = np.nan
else:
azdias.loc[azdias[attribute] == val, attribute] = np.nan
#Updated table
azdias.head(10)
# #### Step 1.1.2: Assess Missing Data in Each Column
#
# How much missing data is present in each column? There are a few columns that are outliers in terms of the proportion of values that are missing. You will want to use matplotlib's [`hist()`](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.hist.html) function to visualize the distribution of missing value counts to find these columns. Identify and document these columns. While some of these columns might have justifications for keeping or re-encoding the data, for this project you should just remove them from the dataframe. (Feel free to make remarks about these outlier columns in the discussion, however!)
#
# For the remaining features, are there any patterns in which columns have, or share, missing data?
# Perform an assessment of how much missing data there is in each column of the
# dataset.
missing_count = azdias.isnull().sum()/len(azdias)
plt.figure(figsize=(16,8))
plt.xticks(np.arange(len(missing_count))+0.5,missing_count.index,rotation='vertical')
plt.ylabel("Fraction of missing data")
plt.bar(np.arange(len(missing_count)),missing_count)
plt.show()
# Investigate patterns in the amount of missing data in each column.
# AGER_TYP, GEBURTSJAHR, TITEL_KZ, ALTER_HH, KK_KUNDENTYP, KBA05_BAUMAX columns have high amount of missing data.
# Lets remove them from data set
azdias_new = azdias.drop(columns = ['AGER_TYP', 'GEBURTSJAHR', 'TITEL_KZ', 'ALTER_HH', 'KK_KUNDENTYP', 'KBA05_BAUMAX'], axis = 1)
#New df without outlier columns
azdias_new.head()
# How much data is missing in each column of the dataset?
azdias_new.isnull().sum()
# #### Discussion 1.1.2: Assess Missing Data in Each Column
#
# After looking at the missing data bat plot, I decided that there are 6 outlier columns in the data.
# There columns are 'AGER_TYP', 'GEBURTSJAHR', 'TITEL_KZ', 'ALTER_HH', 'KK_KUNDENTYP', 'KBA05_BAUMAX'
#
# In remaining data each column contains either near zero missing values or have around %10 of missing data.
# #### Step 1.1.3: Assess Missing Data in Each Row
#
# Now, you'll perform a similar assessment for the rows of the dataset. How much data is missing in each row? As with the columns, you should see some groups of points that have a very different numbers of missing values. Divide the data into two subsets: one for data points that are above some threshold for missing values, and a second subset for points below that threshold.
#
# In order to know what to do with the outlier rows, we should see if the distribution of data values on columns that are not missing data (or are missing very little data) are similar or different between the two groups. Select at least five of these columns and compare the distribution of values.
# - You can use seaborn's [`countplot()`](https://seaborn.pydata.org/generated/seaborn.countplot.html) function to create a bar chart of code frequencies and matplotlib's [`subplot()`](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplot.html) function to put bar charts for the two subplots side by side.
# - To reduce repeated code, you might want to write a function that can perform this comparison, taking as one of its arguments a column to be compared.
#
# Depending on what you observe in your comparison, this will have implications on how you approach your conclusions later in the analysis. If the distributions of non-missing features look similar between the data with many missing values and the data with few or no missing values, then we could argue that simply dropping those points from the analysis won't present a major issue. On the other hand, if the data with many missing values looks very different from the data with few or no missing values, then we should make a note on those data as special. We'll revisit these data later on. **Either way, you should continue your analysis for now using just the subset of the data with few or no missing values.**
# How much data is missing in each row of the dataset?
# I printed only first 20 row because graphing or printing takes too much time
#for i in range(len(azdias_new.index)) :
# missing_count_row = azdias_new.iloc[i].isnull().sum()/len(azdias_new.iloc[i])
for i in range(0,20):
print("Nan in row ", i , " : " , azdias_new.iloc[i].isnull().sum())
# Write code to divide the data into two subsets based on the number of missing
# values in each row.
#azdias_clean consists of rows without null values
azdias_clean = azdias_new.dropna()
#azdias_null consists of rows contains any amount of null values
azdias_null = azdias_new.loc[~azdias_new.index.isin(azdias_new.dropna().index)]
# +
# Compare the distribution of values for at least five columns where there are
# no or few missing values, between the two subsets.
test_columns = ['FINANZ_MINIMALIST', 'GREEN_AVANTGARDE', 'SEMIO_SOZ', 'ALTERSKATEGORIE_GROB', 'ONLINE_AFFINITAET']
def plotColumns(dataset1, dataset2, columnlist):
sns.set(style="darkgrid")
fig, axs = plt.subplots(2, len(columnlist), figsize=(35,20))
for i in range(0, len(columnlist)):
sns.countplot(dataset1[columnlist[i]], ax=axs[0, i])
sns.countplot(dataset2[columnlist[i]], ax=axs[1, i])
plotColumns(azdias_clean, azdias_null, test_columns)
# -
# #### Discussion 1.1.3: Assess Missing Data in Each Row
# In first 3 columns in above graph there were zero null values in these columns. Data pattern is similiar and highest count values same in each dataframe. They are qualitatively different but, we can consider removed values as noise.
# ### Step 1.2: Select and Re-Encode Features
#
# Checking for missing data isn't the only way in which you can prepare a dataset for analysis. Since the unsupervised learning techniques to be used will only work on data that is encoded numerically, you need to make a few encoding changes or additional assumptions to be able to make progress. In addition, while almost all of the values in the dataset are encoded using numbers, not all of them represent numeric values. Check the third column of the feature summary (`feat_info`) for a summary of types of measurement.
# - For numeric and interval data, these features can be kept without changes.
# - Most of the variables in the dataset are ordinal in nature. While ordinal values may technically be non-linear in spacing, make the simplifying assumption that the ordinal variables can be treated as being interval in nature (that is, kept without any changes).
# - Special handling may be necessary for the remaining two variable types: categorical, and 'mixed'.
#
# In the first two parts of this sub-step, you will perform an investigation of the categorical and mixed-type features and make a decision on each of them, whether you will keep, drop, or re-encode each. Then, in the last part, you will create a new data frame with only the selected and engineered columns.
#
# Data wrangling is often the trickiest part of the data analysis process, and there's a lot of it to be done here. But stick with it: once you're done with this step, you'll be ready to get to the machine learning parts of the project!
# How many features are there of each data type?
typeArray = feat_info['type'].unique()
for tp in typeArray:
count = len(feat_info[feat_info.type == tp])
print("Type =",tp," | Count =", count)
# #### Step 1.2.1: Re-Encode Categorical Features
#
# For categorical data, you would ordinarily need to encode the levels as dummy variables. Depending on the number of categories, perform one of the following:
# - For binary (two-level) categoricals that take numeric values, you can keep them without needing to do anything.
# - There is one binary variable that takes on non-numeric values. For this one, you need to re-encode the values as numbers or create a dummy variable.
# - For multi-level categoricals (three or more values), you can choose to encode the values using multiple dummy variables (e.g. via [OneHotEncoder](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html)), or (to keep things straightforward) just drop them from the analysis. As always, document your choices in the Discussion section.
#First lets make list of previously dropped attributes
dropped_names = ['AGER_TYP', 'GEBURTSJAHR', 'TITEL_KZ', 'ALTER_HH', 'KK_KUNDENTYP', 'KBA05_BAUMAX']
#Then we take copy of original frame
cat_attr = feat_info.copy()
#Then drop attributes from copy
for name in dropped_names:
cat_attr.drop(feat_info[feat_info['attribute'] == name ].index , inplace=True)
# +
# Assess categorical variables: which are binary, which are multi-level, and
# which one needs to be re-encoded?
# In order to assess categorical values we create new frame with only categorical attributes
df_clean_copy = azdias_clean.copy()
cat_attr = cat_attr[cat_attr['type'].isin(['categorical'])]
df_clean_copy = df_clean_copy.filter(cat_attr['attribute'])
df_clean_copy.head(10)
#DF_CLEAN_COPY ONLY CONTAINS CATEGORICAL FEATURES MAKE CHANGES IN AZDIAS_CLEAN
# -
df_clean_copy.info()
#We have got three object type. First lets start with OST_WEST_KZ
print(df_clean_copy['OST_WEST_KZ'].value_counts())
#It is non numeric binary variable. We replace them with integers.
replace_map = {'OST_WEST_KZ': {'W': 1, 'O': 2}}
azdias_clean.replace(replace_map, inplace=True)
#Lets look at CAMEO_DEUG_2015 object type attribute
print(df_clean_copy['CAMEO_DEUG_2015'].value_counts())
#This only contains integer variables, we can convert this to numeric values
azdias_clean["CAMEO_DEUG_2015"] = pd.to_numeric(azdias_clean["CAMEO_DEUG_2015"])
#Lets analyze our final object attribute
print(df_clean_copy['CAMEO_DEU_2015'].value_counts())
#It contains categorical string values. We can use get_dummies
azdias_clean = pd.get_dummies(azdias_clean, columns=['CAMEO_DEU_2015'], drop_first=True)
#Now we check our frame
azdias_clean.shape
azdias_clean.head(10)
# #### Discussion 1.2.1: Re-Encode Categorical Features
#
# In our cleaned data frame we have got 18 attributes. 15 of them was float values and required no handling.
# * OST_WEST_KZ = String binary value. Replaced with integers 1,2
# * CAMEO_DEUG_2015 = Integers with object type. Cast their type to integer
# * CAMEO_DEU_2015 = String type attribute with a lot of unique value. Used get_dummies to encode
#
# I kept all of the non-null attributes.
# #### Step 1.2.2: Engineer Mixed-Type Features
#
# There are a handful of features that are marked as "mixed" in the feature summary that require special treatment in order to be included in the analysis. There are two in particular that deserve attention; the handling of the rest are up to your own choices:
# - "PRAEGENDE_JUGENDJAHRE" combines information on three dimensions: generation by decade, movement (mainstream vs. avantgarde), and nation (east vs. west). While there aren't enough levels to disentangle east from west, you should create two new variables to capture the other two dimensions: an interval-type variable for decade, and a binary variable for movement.
# - "CAMEO_INTL_2015" combines information on two axes: wealth and life stage. Break up the two-digit codes by their 'tens'-place and 'ones'-place digits into two new ordinal variables (which, for the purposes of this project, is equivalent to just treating them as their raw numeric values).
# - If you decide to keep or engineer new features around the other mixed-type features, make sure you note your steps in the Discussion section.
#
# Be sure to check `Data_Dictionary.md` for the details needed to finish these tasks.
# Investigate "PRAEGENDE_JUGENDJAHRE" and engineer two new variables.
print(azdias_clean['PRAEGENDE_JUGENDJAHRE'].value_counts())
# First we make list of movements types
mainstream_lst = [1, 3, 5, 8, 10, 12, 14]
avantgarde_lst = [2, 4, 6, 7, 9, 11, 13, 15]
# and generation lists
gen_40s_lst = [1, 2]
gen_50s_lst = [3, 4]
gen_60s_lst = [5, 6, 7]
gen_70s_lst = [8, 9]
gen_80s_lst = [10, 11, 12, 13]
gen_90s_lst = [14, 15]
# +
# TODO : ASSIGN NEW LIST AT THE END
#New column values to insert
insert_mainstream = []
insert_avantgarde = []
insert_gen40 = []
insert_gen50 = []
insert_gen60 = []
insert_gen70 = []
insert_gen80 = []
insert_gen90 = []
#Fill insert lists
for i in azdias_clean["PRAEGENDE_JUGENDJAHRE"].tolist():
insert_mainstream.append(1) if i in mainstream_lst else insert_mainstream.append(0)
insert_avantgarde.append(1) if i in avantgarde_lst else insert_avantgarde.append(0)
insert_gen40.append(1) if i in gen_40s_lst else insert_gen40.append(0)
insert_gen50.append(1) if i in gen_50s_lst else insert_gen50.append(0)
insert_gen60.append(1) if i in gen_60s_lst else insert_gen60.append(0)
insert_gen70.append(1) if i in gen_70s_lst else insert_gen70.append(0)
insert_gen80.append(1) if i in gen_80s_lst else insert_gen80.append(0)
insert_gen90.append(1) if i in gen_90s_lst else insert_gen90.append(0)
#Insert new columns
azdias_clean["MAINSTREAM"] = insert_mainstream
azdias_clean["AVANTGARDE"] = insert_avantgarde
azdias_clean["GEN40"] = insert_gen40
azdias_clean["GEN50"] = insert_gen50
azdias_clean["GEN60"] = insert_gen60
azdias_clean["GEN70"] = insert_gen70
azdias_clean["GEN80"] = insert_gen80
azdias_clean["GEN90"] = insert_gen90
#Drop original column
azdias_clean = azdias_clean.drop(['PRAEGENDE_JUGENDJAHRE'], axis=1)
azdias_clean.head(5)
# +
# Investigate "CAMEO_INTL_2015" and engineer two new variables.
insert_wealth = []
insert_life_stage = []
for i in azdias_clean["CAMEO_INTL_2015"].tolist():
insert_wealth.append(int(str(i)[0]))
insert_life_stage.append(int(str(i)[1]))
azdias_clean["WEALTH"] = insert_wealth
azdias_clean["LIFE_STAGE"] = insert_life_stage
azdias_clean = azdias_clean.drop(['CAMEO_INTL_2015'], axis=1)
azdias_clean.head(5)
# -
# #### Discussion 1.2.2: Engineer Mixed-Type Features
#
# PRAEGENDE_JUGENDJAHRE contains data in three dimensions. I ignored the nation part as instructed and focued on remaining dimensions. I created seperate lists for each movement and generation filled them with labels inside data_dictionary, created new columns for each of them, and filled them according to original column in the list. Finally I dropped the original column.
# I applied same process for CAMEO_INTL_2015 but instead of using binary values I used digits in the tens place one ones place
# #### Step 1.2.3: Complete Feature Selection
#
# In order to finish this step up, you need to make sure that your data frame now only has the columns that you want to keep. To summarize, the dataframe should consist of the following:
# - All numeric, interval, and ordinal type columns from the original dataset.
# - Binary categorical features (all numerically-encoded).
# - Engineered features from other multi-level categorical features and mixed features.
#
# Make sure that for any new columns that you have engineered, that you've excluded the original columns from the final dataset. Otherwise, their values will interfere with the analysis later on the project. For example, you should not keep "PRAEGENDE_JUGENDJAHRE", since its values won't be useful for the algorithm: only the values derived from it in the engineered features you created should be retained. As a reminder, your data should only be from **the subset with few or no missing values**.
# If there are other re-engineering tasks you need to perform, make sure you
# take care of them here. (Dealing with missing data will come in step 2.1.)
azdias_clean.info()
# All values are numeric, interval and ordinal type.
# Categorical features are numerically encoded
# Mixed features seperated into new single level features and original columns are dropped.
# ### Step 1.3: Create a Cleaning Function
#
# Even though you've finished cleaning up the general population demographics data, it's important to look ahead to the future and realize that you'll need to perform the same cleaning steps on the customer demographics data. In this substep, complete the function below to execute the main feature selection, encoding, and re-engineering steps you performed above. Then, when it comes to looking at the customer data in Step 3, you can just run this function on that DataFrame to get the trimmed dataset in a single step.
def clean_data(df):
"""
Perform feature trimming, re-encoding, and engineering for demographics
data
INPUT: Demographics DataFrame
OUTPUT: Trimmed and cleaned demographics DataFrame
"""
mainstream_lst = [1, 3, 5, 8, 10, 12, 14]
avantgarde_lst = [2, 4, 6, 7, 9, 11, 13, 15]
gen_40s_lst = [1, 2]
gen_50s_lst = [3, 4]
gen_60s_lst = [5, 6, 7]
gen_70s_lst = [8, 9]
gen_80s_lst = [10, 11, 12, 13]
gen_90s_lst = [14, 15]
insert_mainstream = []
insert_avantgarde = []
insert_gen40 = []
insert_gen50 = []
insert_gen60 = []
insert_gen70 = []
insert_gen80 = []
insert_gen90 = []
insert_wealth = []
insert_life_stage = []
# Put in code here to execute all main cleaning steps:
# convert missing value codes into NaNs, ...
for attribute, missing_or_unknown in zip(feat_info['attribute'], feat_info['missing_or_unknown']):
if missing_or_unknown[0] != '':
for val in missing_or_unknown:
if val.isnumeric() or val.lstrip('-').isnumeric():
df.loc[df[attribute] == int(val), attribute] = np.nan
else:
df.loc[df[attribute] == val, attribute] = np.nan
print("NAN Convertion Complete")
# remove selected columns and rows, ...
df = df.drop(columns = ['AGER_TYP', 'GEBURTSJAHR', 'TITEL_KZ', 'ALTER_HH', 'KK_KUNDENTYP', 'KBA05_BAUMAX'], axis = 1)
df = df.dropna()
print("Columns containing high amount of null values are dropped and row containing null values are dropped")
# select, re-encode, and engineer column values.
replace_map = {'OST_WEST_KZ': {'W': 1, 'O': 2}}
df.replace(replace_map, inplace=True)
df["CAMEO_DEUG_2015"] = pd.to_numeric(df["CAMEO_DEUG_2015"])
df = pd.get_dummies(df, columns=['CAMEO_DEU_2015'], drop_first=True)
print("Non numeric categorical features converted")
for i in df["PRAEGENDE_JUGENDJAHRE"].tolist():
insert_mainstream.append(1) if i in mainstream_lst else insert_mainstream.append(0)
insert_avantgarde.append(1) if i in avantgarde_lst else insert_avantgarde.append(0)
insert_gen40.append(1) if i in gen_40s_lst else insert_gen40.append(0)
insert_gen50.append(1) if i in gen_50s_lst else insert_gen50.append(0)
insert_gen60.append(1) if i in gen_60s_lst else insert_gen60.append(0)
insert_gen70.append(1) if i in gen_70s_lst else insert_gen70.append(0)
insert_gen80.append(1) if i in gen_80s_lst else insert_gen80.append(0)
insert_gen90.append(1) if i in gen_90s_lst else insert_gen90.append(0)
df["MAINSTREAM"] = insert_mainstream
df["AVANTGARDE"] = insert_avantgarde
df["GEN40"] = insert_gen40
df["GEN50"] = insert_gen50
df["GEN60"] = insert_gen60
df["GEN70"] = insert_gen70
df["GEN80"] = insert_gen80
df["GEN90"] = insert_gen90
df = df.drop(['PRAEGENDE_JUGENDJAHRE'], axis=1)
df.head(5)
for i in df["CAMEO_INTL_2015"].tolist():
insert_wealth.append(int(str(i)[0]))
insert_life_stage.append(int(str(i)[1]))
df["WEALTH"] = insert_wealth
df["LIFE_STAGE"] = insert_life_stage
df = df.drop(['CAMEO_INTL_2015'], axis=1)
df.head(5)
print("Multi level attributes re-engineered to low level attributes")
# Return the cleaned dataframe.
print("Data clean completed")
return df
# ## Step 2: Feature Transformation
#
# ### Step 2.1: Apply Feature Scaling
#
# Before we apply dimensionality reduction techniques to the data, we need to perform feature scaling so that the principal component vectors are not influenced by the natural differences in scale for features. Starting from this part of the project, you'll want to keep an eye on the [API reference page for sklearn](http://scikit-learn.org/stable/modules/classes.html) to help you navigate to all of the classes and functions that you'll need. In this substep, you'll need to check the following:
#
# - sklearn requires that data not have missing values in order for its estimators to work properly. So, before applying the scaler to your data, make sure that you've cleaned the DataFrame of the remaining missing values. This can be as simple as just removing all data points with missing data, or applying an [Imputer](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Imputer.html) to replace all missing values. You might also try a more complicated procedure where you temporarily remove missing values in order to compute the scaling parameters before re-introducing those missing values and applying imputation. Think about how much missing data you have and what possible effects each approach might have on your analysis, and justify your decision in the discussion section below.
# - For the actual scaling function, a [StandardScaler](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html) instance is suggested, scaling each feature to mean 0 and standard deviation 1.
# - For these classes, you can make use of the `.fit_transform()` method to both fit a procedure to the data as well as apply the transformation to the data at the same time. Don't forget to keep the fit sklearn objects handy, since you'll be applying them to the customer demographics data towards the end of the project.
# Apply feature scaling to the general population demographics data.
sc = StandardScaler()
azdias_scaled = sc.fit_transform(azdias_clean)
# ### Discussion 2.1: Apply Feature Scaling
#
# Since I used data without missing features I left the data as it is. For feature scaling I imported StandardScaler, and used fit_transform.
# ### Step 2.2: Perform Dimensionality Reduction
#
# On your scaled data, you are now ready to apply dimensionality reduction techniques.
#
# - Use sklearn's [PCA](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html) class to apply principal component analysis on the data, thus finding the vectors of maximal variance in the data. To start, you should not set any parameters (so all components are computed) or set a number of components that is at least half the number of features (so there's enough features to see the general trend in variability).
# - Check out the ratio of variance explained by each principal component as well as the cumulative variance explained. Try plotting the cumulative or sequential values using matplotlib's [`plot()`](https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html) function. Based on what you find, select a value for the number of transformed features you'll retain for the clustering part of the project.
# - Once you've made a choice for the number of components to keep, make sure you re-fit a PCA instance to perform the decided-on transformation.
# Apply PCA to the data.
pca = PCA()
pca_fit = pca.fit(azdias_scaled)
plt.plot(np.cumsum(pca_fit.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance');
plt.show()
# Re-apply PCA to the data while selecting for number of components to retain.
pca = PCA(n_components=80)
principalComponents = pca.fit_transform(azdias_scaled)
# ### Discussion 2.2: Perform Dimensionality Reduction
#
# First of all I used pca without any parameter. After that I plotted cumulative variance with number of components.
# At 80 component cumulative variance stays around %92 and we reduce component number %40.
# ### Step 2.3: Interpret Principal Components
#
# Now that we have our transformed principal components, it's a nice idea to check out the weight of each variable on the first few components to see if they can be interpreted in some fashion.
#
# As a reminder, each principal component is a unit vector that points in the direction of highest variance (after accounting for the variance captured by earlier principal components). The further a weight is from zero, the more the principal component is in the direction of the corresponding feature. If two features have large weights of the same sign (both positive or both negative), then increases in one tend expect to be associated with increases in the other. To contrast, features with different signs can be expected to show a negative correlation: increases in one variable should result in a decrease in the other.
#
# - To investigate the features, you should map each weight to their corresponding feature name, then sort the features according to weight. The most interesting features for each principal component, then, will be those at the beginning and end of the sorted list. Use the data dictionary document to help you understand these most prominent features, their relationships, and what a positive or negative value on the principal component might indicate.
# - You should investigate and interpret feature associations from the first three principal components in this substep. To help facilitate this, you should write a function that you can call at any time to print the sorted list of feature weights, for the *i*-th principal component. This might come in handy in the next step of the project, when you interpret the tendencies of the discovered clusters.
colNames = []
for i in range(1,81):
colNames.append("c" + str(i))
def sorted_weights(componentNum, components):
principalDf = pd.DataFrame(data = components, columns = colNames)
col_list = principalDf["c" + str(componentNum)].tolist()
col_list.sort(reverse=True)
return col_list
# Map weights for the first principal component to corresponding feature names
# and then print the linked values, sorted by weight.
# HINT: Try defining a function here or in a new cell that you can reuse in the
# other cells.
c1_list = sorted_weights(1, principalComponents)
print(c1_list[:10])
print(c1_list[-10:])
# Map weights for the second principal component to corresponding feature names
# and then print the linked values, sorted by weight.
c2_list = sorted_weights(2, principalComponents)
print(c2_list[:10])
print(c2_list[-10:])
# Map weights for the third principal component to corresponding feature names
# and then print the linked values, sorted by weight.
c3_list = sorted_weights(3, principalComponents)
print(c3_list[:10])
print(c3_list[-10:])
# ### Discussion 2.3: Interpret Principal Components
#
# First 2 components' weights are close to each other so we can say that these components are associated with each other. 3rd component also have a positive sign however since its more closer to 0 it is not closely correlated as first two attributes.
# ## Step 3: Clustering
#
# ### Step 3.1: Apply Clustering to General Population
#
# You've assessed and cleaned the demographics data, then scaled and transformed them. Now, it's time to see how the data clusters in the principal components space. In this substep, you will apply k-means clustering to the dataset and use the average within-cluster distances from each point to their assigned cluster's centroid to decide on a number of clusters to keep.
#
# - Use sklearn's [KMeans](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans) class to perform k-means clustering on the PCA-transformed data.
# - Then, compute the average difference from each point to its assigned cluster's center. **Hint**: The KMeans object's `.score()` method might be useful here, but note that in sklearn, scores tend to be defined so that larger is better. Try applying it to a small, toy dataset, or use an internet search to help your understanding.
# - Perform the above two steps for a number of different cluster counts. You can then see how the average distance decreases with an increasing number of clusters. However, each additional cluster provides a smaller net benefit. Use this fact to select a final number of clusters in which to group the data. **Warning**: because of the large size of the dataset, it can take a long time for the algorithm to resolve. The more clusters to fit, the longer the algorithm will take. You should test for cluster counts through at least 10 clusters to get the full picture, but you shouldn't need to test for a number of clusters above about 30.
# - Once you've selected a final number of clusters to use, re-fit a KMeans instance to perform the clustering operation. Make sure that you also obtain the cluster assignments for the general demographics data, since you'll be using them in the final Step 3.3.
# Over a number of different cluster counts run k-means clustering on the data and
# compute the average within-cluster distances.
wcss = []
for i in range(2, 22):
print('Calculating Model with ' + str(i) + ' Centers')
kmeans = KMeans(n_clusters=i, init='k-means++', max_iter=300, n_init=10, random_state=0, precompute_distances=True)
cluster_labels_KM = kmeans.fit_predict(principalComponents)
print('Calculating WCSS Score')
wcss.append(kmeans.inertia_)
print('Model with ' + str(i) + ' Centers Done')
# +
# Investigate the change in within-cluster distance across number of clusters.
# HINT: Use matplotlib's plot function to visualize this relationship.
plt.plot(range(2, 22), wcss)
plt.title('WCSS For Elbow Analysis')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS(kmeans.inertia_)')
plt.show()
# -
# Re-fit the k-means model with the selected number of clusters and obtain
# cluster predictions for the general population demographics data.
kmeans_final = KMeans(n_clusters=13, init='k-means++', max_iter=300, n_init=10, random_state=0)
azdias_labels_KM = kmeans_final.fit_predict(principalComponents)
# ### Discussion 3.1: Apply Clustering to General Population
#
# If we look at the wcss graph we can see that curve breaks in at 13 clusters, than slope of curve reduces. Because of that I picked 13 clusters for running KMeans
# ### Step 3.2: Apply All Steps to the Customer Data
#
# Now that you have clusters and cluster centers for the general population, it's time to see how the customer data maps on to those clusters. Take care to not confuse this for re-fitting all of the models to the customer data. Instead, you're going to use the fits from the general population to clean, transform, and cluster the customer data. In the last step of the project, you will interpret how the general population fits apply to the customer data.
#
# - Don't forget when loading in the customers data, that it is semicolon (`;`) delimited.
# - Apply the same feature wrangling, selection, and engineering steps to the customer demographics using the `clean_data()` function you created earlier. (You can assume that the customer demographics data has similar meaning behind missing data patterns as the general demographics data.)
# - Use the sklearn objects from the general demographics data, and apply their transformations to the customers data. That is, you should not be using a `.fit()` or `.fit_transform()` method to re-fit the old objects, nor should you be creating new sklearn objects! Carry the data through the feature scaling, PCA, and clustering steps, obtaining cluster assignments for all of the data in the customer demographics data.
# Load in the customer demographics data.
customers = pd.read_csv('Udacity_CUSTOMERS_Subset.csv', error_bad_lines=False, sep=';')
# Apply preprocessing, feature transformation, and clustering from the general
# demographics onto the customer data, obtaining cluster predictions for the
# customer demographics data.
customers = clean_data(customers)
customers_scaled = sc.fit_transform(customers)
print("Data Scaling completed")
principalComponents_customers = pca.fit_transform(customers_scaled)
print("Dimentionality reduction complete")
customers_labels_KM = kmeans_final.fit_predict(principalComponents_customers)
print("KMeans calculation complete")
# ### Step 3.3: Compare Customer Data to Demographics Data
#
# At this point, you have clustered data based on demographics of the general population of Germany, and seen how the customer data for a mail-order sales company maps onto those demographic clusters. In this final substep, you will compare the two cluster distributions to see where the strongest customer base for the company is.
#
# Consider the proportion of persons in each cluster for the general population, and the proportions for the customers. If we think the company's customer base to be universal, then the cluster assignment proportions should be fairly similar between the two. If there are only particular segments of the population that are interested in the company's products, then we should see a mismatch from one to the other. If there is a higher proportion of persons in a cluster for the customer data compared to the general population (e.g. 5% of persons are assigned to a cluster for the general population, but 15% of the customer data is closest to that cluster's centroid) then that suggests the people in that cluster to be a target audience for the company. On the other hand, the proportion of the data in a cluster being larger in the general population than the customer data (e.g. only 2% of customers closest to a population centroid that captures 6% of the data) suggests that group of persons to be outside of the target demographics.
#
# Take a look at the following points in this step:
#
# - Compute the proportion of data points in each cluster for the general population and the customer data. Visualizations will be useful here: both for the individual dataset proportions, but also to visualize the ratios in cluster representation between groups. Seaborn's [`countplot()`](https://seaborn.pydata.org/generated/seaborn.countplot.html) or [`barplot()`](https://seaborn.pydata.org/generated/seaborn.barplot.html) function could be handy.
# - Recall the analysis you performed in step 1.1.3 of the project, where you separated out certain data points from the dataset if they had more than a specified threshold of missing values. If you found that this group was qualitatively different from the main bulk of the data, you should treat this as an additional data cluster in this analysis. Make sure that you account for the number of data points in this subset, for both the general population and customer datasets, when making your computations!
# - Which cluster or clusters are overrepresented in the customer dataset compared to the general population? Select at least one such cluster and infer what kind of people might be represented by that cluster. Use the principal component interpretations from step 2.3 or look at additional components to help you make this inference. Alternatively, you can use the `.inverse_transform()` method of the PCA and StandardScaler objects to transform centroids back to the original data space and interpret the retrieved values directly.
# - Perform a similar investigation for the underrepresented clusters. Which cluster or clusters are underrepresented in the customer dataset compared to the general population, and what kinds of people are typified by these clusters?
#Convert general demographic clusters info to DF
general_data_labels = np.array(azdias_labels_KM)
unique, counts = np.unique(general_data_labels, return_counts=True)
general_data_labels_unq = dict(zip(unique, counts))
gen_df = pd.DataFrame(list(general_data_labels_unq.items()),columns = ['clusters','count'])
gen_df['percentage'] = (gen_df['count']/gen_df['count'].sum()) * 100
#Convert customer clusters info to DF
customer_data_labels = np.array(customers_labels_KM)
unique, counts = np.unique(customer_data_labels, return_counts=True)
customer_data_labels_unq = dict(zip(unique, counts))
cust_df = pd.DataFrame(list(customer_data_labels_unq.items()),columns = ['clusters','count'])
cust_df['percentage'] = (cust_df['count']/cust_df['count'].sum()) * 100
# Compare the proportion of data in each cluster for the customer data to the
# proportion of data in each cluster for the general population.
sns.set(style="darkgrid")
fig, ax = plt.subplots(1,2, figsize=(35,20))
sns.barplot(x=gen_df['clusters'], y=gen_df['percentage'], ax=ax[0])
sns.barplot(x=cust_df['clusters'], y=cust_df['percentage'], ax=ax[1])
gen_df
cust_df
azdias_clean['clusters'] = general_data_labels
customers['clusters'] = customer_data_labels
# +
# What kinds of people are part of a cluster that is overrepresented in the
# customer data compared to the general population?
#If we look at above graphics we can see that cluster 0 overrepresented compared to general population
cust_cl0 = customers.loc[customers['clusters'] == 0]
# -
cust_cl0.head()
print(cust_cl0['MAINSTREAM'].value_counts())
print(cust_cl0['GEN40'].value_counts())
print(cust_cl0['GEN50'].value_counts())
print(cust_cl0['GEN60'].value_counts())
print(cust_cl0['GEN70'].value_counts())
print(cust_cl0['GEN80'].value_counts())
print(cust_cl0['GEN90'].value_counts())
print(cust_cl0['WEALTH'].value_counts())
print(cust_cl0['LIFE_STAGE'].value_counts())
# What kinds of people are part of a cluster that is underrepresented in the
# customer data compared to the general population?
#If we look at above graphics we can see that cluster 3 underrepresented compared to general population
cust_cl3 = customers.loc[customers['clusters'] == 3]
print(cust_cl3['MAINSTREAM'].value_counts())
print(cust_cl3['GEN40'].value_counts())
print(cust_cl3['GEN50'].value_counts())
print(cust_cl3['GEN60'].value_counts())
print(cust_cl3['GEN70'].value_counts())
print(cust_cl3['GEN80'].value_counts())
print(cust_cl3['GEN90'].value_counts())
print(cust_cl3['WEALTH'].value_counts())
print(cust_cl3['LIFE_STAGE'].value_counts())
# ### Discussion 3.3: Compare Customer Data to Demographics Data
#
# Cluster 0 is overrepresented in customer dataset compared to general demographic set. So this cluster is one of our target customer cluster.
# When we analyze customers in this data list
# * They are more likely to be AVANTGARDE
# * More likely to belong GEN50/60
# * Most of them belongs to wealthy and prosperous households
# * Their life stage most probably older families & mature couples
#
# Cluster 3 is underrepresented in customer dataset compared to general demographic set. So this cluster is outside of our target demographics
# When we analyze customers in this data list
# * They are more likely to be MAINSTREAM
# * More likely to belong GEN40/50
# * Most of them belongs to poorer households
# * Their life stage most probably elders in retirement
# > Congratulations on making it this far in the project! Before you finish, make sure to check through the entire notebook from top to bottom to make sure that your analysis follows a logical flow and all of your findings are documented in **Discussion** cells. Once you've checked over all of your work, you should export the notebook as an HTML document to submit for evaluation. You can do this from the menu, navigating to **File -> Download as -> HTML (.html)**. You will submit both that document and this notebook for your project submission.
| Project_3/Identify_Customer_Segments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Basic scVI Tutorial
# cd ../..
# ### Loading Config
# +
import json
with open('docs/notebooks/basic_tutorial.config.json') as f:
config = json.load(f)
print(config)
n_epochs_all = config['n_epochs'] if 'n_epochs' in config else None
save_path = config['save_path'] if 'save_path' in config else 'data/'
n_samples_tsne = config['n_samples_tsne'] if 'n_samples_tsne' in config else None
n_samples_posterior_density = config['n_samples_posterior_density'] if 'n_samples_posterior_density' in config else None
train_size = config['train_size'] if 'train_size' in config else None
M_sampling_all = config['M_sampling'] if 'M_sampling' in config else None
M_permutation_all = config['M_permutation'] if 'M_permutation' in config else None
rate = config['rate'] if 'rate' in config else None
# +
# %matplotlib inline
import os
import numpy as np
import seaborn as sns
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from scvi.dataset import CortexDataset, RetinaDataset
from scvi.models import *
from scvi.inference import UnsupervisedTrainer
# -
# ## Loading data
#
# Here we load the CORTEX dataset described in
#
# * Zeisel, Amit, et al. "Cell types in the mouse cortex and hippocampus revealed by single-cell RNA-seq." Science 347.6226 (2015): 1138-1142.
#
# Please see our data loading Jupyter notebook for more examples of data loading -- scVI has many "built-in" datasets, as well as support for loading arbitrary .csv, .loom, and .h5ad (AnnData) files.
gene_dataset = CortexDataset(save_path=save_path)
# ## Training
# * __n_epochs__: Maximum number of epochs to train the model. If the likelihood change is small than a set threshold training will stop automatically.
# * __lr__: learning rate. Set to 0.001 here.
# * __use_batches__: If the value of true than batch information is used in the training. Here it is set to false because the cortex data only contains one batch.
# * __use_cuda__: Set to true to use CUDA.
#
n_epochs=400 if n_epochs_all is None else n_epochs_all
lr=1e-3
use_batches=False
use_cuda=True
# **Train the model and output model likelihood every 5 epochs**
vae = VAE(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * use_batches)
trainer = UnsupervisedTrainer(vae,
gene_dataset,
train_size=0.75,
use_cuda=use_cuda,
frequency=5)
trainer.train(n_epochs=n_epochs, lr=lr)
# ** Plotting the likelihood change across the 500 epochs of training: blue for training error and orange for testing error.**
ll_train_set = trainer.history["ll_train_set"]
ll_test_set = trainer.history["ll_test_set"]
x = np.linspace(0,500,(len(ll_train_set)))
plt.plot(x, ll_train_set)
plt.plot(x, ll_test_set)
plt.ylim(1150,1600)
plt.show()
# ## Visualizing the latent space
trainer.train_set.show_t_sne(n_samples=n_samples_tsne, color_by='labels')
# ## Imputation
#
#
# The ability to impute missing values is useful in practical applications in addition to providing an assay for generalization performance. In the following analysis, we benchmark scVI against BISCUIT, ZINB-WaVE and ZIFA, as well as MAGIC, which provides imputation without explicit statistical modeling. To evaluate these methods on a given dataset, we generated a **corrupted training set**, and then fitted the perturbed dataset with each of the benchmark methods and evaluate them by comparing the imputed values to the original ones (Methods 4.7). Overall, we observe that the imputation accuracy of scVI is higher or comparable (less than one transcript for median error) across all datasets
#
# #### Corrupting the datasets for imputation benchmarking.
#
# Two different approaches to measure the robustness of algorithms to noise in the data:
#
# - **Uniform zero introduction**: select randomly a rate r% of the non-zero entries and multiply the entry n with a Ber(0.9) random variable.
# - **Binomial data corruption**: select a rate r% of the matrix and replace an entry n by a Bin(n, 0.2) random variable.
#
# By default, the rate r is set a 0.1
#
# #### Accuracy of imputing missing data
#
# As imputation tantamount to replace missing data by its mean conditioned on being observed, we use the median L1 distance between the original dataset and the imputed values for corrupted entries only.
# Parameters:
# * The rate of simulated dropout is defined by __rate__, here set ot 0.1
# +
n_epochs = 400 if n_epochs_all is None else n_epochs_all
vae = VAE(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * use_batches)
trainer = UnsupervisedTrainer(vae,
gene_dataset,
train_size=0.75 if train_size is None else train_size,
use_cuda=use_cuda)
trainer.corrupt_posteriors(rate=0.1, corruption="uniform")
trainer.train(n_epochs)
trainer.uncorrupt_posteriors()
original_list, imputed_list = trainer.train_set.imputation_benchmark(verbose=True, n_samples=1)
# -
# Median of medians for all distances
imputation_errors = np.abs(np.concatenate(original_list) - np.concatenate(imputed_list))
median_imputation_score = np.median(imputation_errors)
print(median_imputation_score)
# We then plot the distribution of absolute errors between the imputed value and the true value at the dropout positions.
#
# Note: The imputed value __px_rate__ is the rate parameter (expected value) of the Zero-Inflated Negative Binomial (ZINB) distribution.
plt.hist(np.log10(imputation_errors))
# ### Imputing without artificial corruption
#
# Through the imputation benchmark above, we have assessed the strength of scVI for this task.
#
# To actually perform the imputation on a model trained on regular data, we might use the `.imputation` method. As stochasticity of the training requires iteration on permuted samples of the data, the output result is in random order.
#
# To get an ordered output result, we might use `.sequential` posterior's method which return another instance of posterior (with shallow copy of all its object references), but where the iteration is in the same ordered as its indices attribute.
#
# +
imputed_values_unordered = trainer.train_set.imputation()
indices = trainer.train_set.indices # The indices in the order that they will be processed with .sequential()
imputed_values_indices = trainer.train_set.sequential().imputation() # imputation result ordered along indices
# -
# ## Differential Expression
# From the trained VAE model we can sample the gene expression rate for each gene in each cell. For the two populations of interest, we can then randomly sample pairs of cells, one from each population to compare their expression rate for a gene. The degree of differential expression is measured by __logit(p/(1-p))__ where __p__ is the probability of a cell from population A having a higher expression than a cell from population B. We can form the null distribution of the DE values by sampling pairs randomly from the combined population.
#
# The following example is implemented for the cortext dataset, vary __cell_types__ and __genes_of_interest__ for other datasets.
# **1. Set population A and population B for comparison**
# +
cell_types = gene_dataset.cell_types
print(gene_dataset.cell_types)
# oligodendrocytes (#4) VS pyramidal CA1 (#5)
couple_celltypes = (4, 5) # the couple types on which to study DE
print("\nDifferential Expression A/B for cell types\nA: %s\nB: %s\n" %
tuple((cell_types[couple_celltypes[i]] for i in [0, 1])))
# -
# **2. Define parameters**
# * __M_sampling__: the number of times to sample __px_scales__ from the vae model for each gene in each cell.
# * __M_permutation__: Number of pairs sampled from the px_scales values for comparison.
# +
M_sampling = 100 if M_sampling_all is None else M_sampling_all
M_permutation = 100000 if M_permutation_all is None else M_permutation_all
permutation = False
# -
# **3. Sample from the gene expression level from all cells**
# Note: The expectation of the ZINB distribution __px_rate ~ library_size * px_scale__, so __px_scale__ could be understood as the mean gene expression level of each cell after adjusting for the library size factor.
px_scale, all_labels = trainer.train_set.differential_expression_stats(M_sampling=M_sampling)
# **4. Extract the sampled gene expression level for the two populations of interest, and create indexes for the samples**
# +
sample_rate_a = px_scale[(all_labels == couple_celltypes[0]).ravel()].reshape(-1, px_scale.shape[1])
sample_rate_b = px_scale[(all_labels == couple_celltypes[1]).ravel()].reshape(-1, px_scale.shape[1])
list_1 = list(np.arange(sample_rate_a.shape[0]))
list_2 = list(sample_rate_a.shape[0] + np.arange(sample_rate_b.shape[0]))
samples = np.vstack((sample_rate_a, sample_rate_b))
# -
# **5. Compute whether a gene is differentially expressed by computing pairs of cells from population A and population B**
u, v = np.random.choice(list_1, size=M_permutation), np.random.choice(list_2, size=M_permutation)
first_set = samples[u]
second_set = samples[v]
res1 = np.mean(first_set >= second_set, 0)
res1 = np.log(res1 + 1e-8) - np.log(1 - res1 + 1e-8)
# **6. Obtaining the null value by comparing pairs sampled from the combined population**
u, v = (np.random.choice(list_1 + list_2, size=M_permutation),
np.random.choice(list_1 + list_2, size=M_permutation))
first_set = samples[u]
second_set = samples[v]
res2 = np.mean(first_set >= second_set, 0)
res2 = np.log(res2 + 1e-8) - np.log(1 - res2 + 1e-8)
# **7. Print out the differential expression value from both the true comparison and the permuted comparison**
genes_of_interest = ["Thy1", "Mbp"]
gene_names = gene_dataset.gene_names
result = [(gene_name, res1[np.where(gene_names == gene_name.upper())[0]][0],res2[np.where(gene_names == gene_name.upper())[0]][0]) for gene_name in genes_of_interest]
print('\n'.join([gene_name + " : " + str(r1) + " , "+ str(r2) for (gene_name, r1,r2) in result]))
# **1-7. Step one through seven obtained with a single line **
#
# Give as argument the two cell types and genes of interest and it will return the list of the bayes factor score for each gene in the same order specified.
bayes_factors_list = trainer.train_set.differential_expression_score(
'oligodendrocytes', 'pyramidal CA1',
M_sampling=M_sampling,
M_permutation=M_permutation,
genes= ["THY1", "MBP"]
)
# **8. Plot the null distribution of the DE values**
plt.hist(res2)
# **9. Visualize top 10 most expressed genes per cell types**
# +
genes, expression = trainer.train_set.differential_expression_table(M_sampling=M_sampling)
plt.figure(figsize=(20, 20))
im = plt.imshow(expression, cmap='RdYlGn', interpolation='none', aspect='equal')
ax = plt.gca()
ax.set_xticks(np.arange(0, 7, 1))
ax.set_xticklabels(gene_dataset.cell_types, rotation='vertical')
ax.set_yticklabels(genes)
ax.set_yticks(np.arange(0, 70, 1))
ax.tick_params(labelsize=14)
plt.colorbar(shrink=0.2)
# -
# ## Correction for batch effects
#
# First we load the RETINA dataset that is described in
#
# * <NAME>, et al. "Comprehensive classification of retinal bipolar neurons by single-cell transcriptomics." Cell 166.5 (2016): 1308-1323.
gene_dataset = RetinaDataset(save_path=save_path)
# +
n_epochs=50 if n_epochs_all is None else n_epochs_all
lr=1e-3
use_batches=True
use_cuda=True
### Train the model and output model likelihood every 5 epochs
vae = VAE(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * use_batches)
trainer = UnsupervisedTrainer(vae,
gene_dataset,
train_size=0.9,
use_cuda=use_cuda,
frequency=5)
trainer.train(n_epochs=n_epochs, lr=lr)
# +
# Plotting the likelihood change across the 50 epochs of training: blue for training error and orange for testing error.
ll_train = trainer.history["ll_train_set"]
ll_test = trainer.history["ll_test_set"]
x = np.linspace(0,50,(len(ll_train)))
plt.plot(x, ll_train)
plt.plot(x, ll_test)
plt.ylim(min(ll_train)-50, 3500)
plt.show()
# -
# **Computing batch mixing**
print("Entropy batch mixing :", trainer.train_set.entropy_batch_mixing())
# **Coloring by batch and cell type**
# obtaining latent space in the same order as the input data
trainer.train_set.show_t_sne(n_samples=n_samples_tsne, color_by='batches and labels')
| docs/notebooks/basic_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
from pathlib import Path
from functools import partial
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.DEBUG)
from spleeter.audio.adapter import get_default_audio_adapter
from spleeter.separator import Separator
from spleeter.audio.convertor import to_stereo
from spleeter.model import model_fn
from spleeter.model.provider import ModelProvider
from spleeter.dataset import get_training_dataset, get_validation_dataset
config_path = "config/voice_config.json"
with open(config_path) as f:
params = json.load(f)
# #### Создаем `estimator`
def _create_estimator(params):
""" Creates estimator.
:param params: TF params to build estimator from.
:returns: Built estimator.
"""
session_config = tf.compat.v1.ConfigProto()
session_config.gpu_options.per_process_gpu_memory_fraction = 0.85
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=params['model_dir'],
params=params,
config=tf.estimator.RunConfig(
save_checkpoints_steps=params['save_checkpoints_steps'],
tf_random_seed=params['random_seed'],
save_summary_steps=params['save_summary_steps'],
session_config=session_config,
log_step_count_steps=100,
keep_checkpoint_max=5))
return estimator
estimator = _create_estimator(params)
estimator
# #### Загрузчик тренировочных данных
def _create_train_spec(params, audio_adapter, audio_path):
""" Creates train spec.
:param params: TF params to build spec from.
:returns: Built train spec.
"""
input_fn = partial(get_training_dataset, params, audio_adapter, audio_path)
train_spec = tf.estimator.TrainSpec(
input_fn=input_fn,
max_steps=params['train_max_steps'])
return train_spec
audio_adapter = get_default_audio_adapter()
audio_path = str(Path('..') / '..' / "data" / "transformed" / "nr")
assert len(list(Path(audio_path).iterdir())) >= 2
train_spec = _create_train_spec(params, audio_adapter, audio_path)
# #### Загрузчик валидационных данных
def _create_evaluation_spec(params, audio_adapter, audio_path):
""" Setup eval spec evaluating ever n seconds
:param params: TF params to build spec from.
:returns: Built evaluation spec.
"""
input_fn = partial(
get_validation_dataset,
params,
audio_adapter,
audio_path)
evaluation_spec = tf.estimator.EvalSpec(
input_fn=input_fn,
steps=None,
throttle_secs=params['throttle_secs'])
return evaluation_spec
evaluation_spec = _create_evaluation_spec(params, audio_adapter, audio_path)
# #### Обучение
# (вся магия — внутри)
tf.estimator.train_and_evaluate(estimator, train_spec, evaluation_spec)
# #### Сохраним полученные веса
ModelProvider.writeProbe(params['model_dir'])
# Вышло на плато, поэтому остановил обучение (суммарно обучалось 8 часов 50 минут, за это время 61.9к шагов)
#
# ![abs_loss](https://i.imgur.com/ycLcsGS.png)
#
# ![spectr_loss](https://i.imgur.com/Zt2hwho.png)
| src/training/2.0_model_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Check Environment
# This notebook checks that you have correctly created the environment and that all packages needed are installed.
# ## Environment
#
# The next command should return a line like (Mac/Linux):
#
# /<YOUR-HOME-FOLDER>/anaconda/envs/ztdl/bin/python
#
# or like (Windows 10):
#
# C:\\<YOUR-HOME-FOLDER>\\Anaconda3\\envs\\ztdl\\python.exe
#
# In particular you should make sure that you are using the python executable from within the course environment.
#
# If that's not the case do this:
#
# 1. close this notebook
# 2. go to the terminal and stop jupyer notebook
# 3. make sure that you have activated the environment, you should see a prompt like:
#
# (ztdl) $
# 4. (optional) if you don't see that prompt activate the environment:
# - mac/linux:
#
# source activate ztdl
#
# - windows:
#
# activate ztdl
# 5. restart jupyter notebook
import os
import sys
sys.executable
# ## Python 3.5
#
# The next line should say that you're using Python 3.5.x from Continuum Analytics. At the time of publication it looks like this (Mac/Linux):
#
# 3.5.3 |Continuum Analytics, Inc.| (default, Mar 6 2017, 12:15:08) \n[GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)
#
# or like this (Windows 10):
#
# 3.5.3 |Continuum Analytics, Inc.| (default, May 11 2017, 13:52:01) [MSC v.1900 64 bit (AMD64)]
#
# but date and exact version of GCC may change in the future.
#
# If you see a different version of python, go back to the previous step and make sure you created and activated the environment correctly.
import sys
sys.version
# ## Jupyter
#
# Check that Jupyter is running from within the environment. The next line should look like (Mac/Linux):
#
# /<YOUR-HOME-FOLDER>/anaconda/envs/ztdl/lib/python3.5/site-packages/jupyter.py'
#
# or like this (Windows 10):
#
# C:\\Users\\paperspace\\Anaconda3\\envs\\ztdl\\lib\\site-packages\\jupyter.py
import jupyter
jupyter.__file__
# ## Other packages
#
# Here we will check that all the packages are installed and have the correct versions. If everything is ok you should see:
#
# Using TensorFlow backend.
#
# Houston we are go!
#
# If there's any issue here please make sure you have checked the previous steps and if it's all good please send us a question in the Q&A forum.
# +
import pip
import numpy
import jupyter
import matplotlib
import sklearn
import scipy
import pandas
import PIL
import seaborn
import h5py
import tensorflow
import keras
assert(pip.__version__ == '9.0.1')
assert(numpy.__version__ == '1.12.0')
assert(matplotlib.__version__ == '2.0.0')
assert(sklearn.__version__ == '0.18.1')
assert(scipy.__version__ == '0.19.0')
assert(pandas.__version__ == '0.19.2')
assert(PIL.__version__ == '4.0.0')
assert(seaborn.__version__ == '0.7.1')
assert(h5py.__version__ == '2.7.0')
assert(tensorflow.__version__ == '1.1.0')
assert(keras.__version__ == '2.0.4')
print("Houston we are go!")
# -
| course/0_Check_Environment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from finviz.screener import Screener
import nest_asyncio
import datetime as dt
import pandas as pd
import yfinance as yf
nest_asyncio.apply()
filter = ['sh_avgvol_o2000', 'sh_opt_option', 'sh_price_o10', 'sh_short_o30' ]
stock_list = Screener(filters=filter, table='Ownership', order='-shortinterestshare')
#print(stock_list)
stock_df = pd.DataFrame(columns = ['stock', 'price', 'volume', 'float_short'])
print("%s %s %s %s" % ('Ticker', 'Price', 'Volume', 'Float Short'))
for stock in stock_list:
symbol = stock['Ticker']
price = stock['Price']
volume = stock['Volume']
float_short = stock['Float Short']
num_str = float_short.split("%")[0]
percent = float(num_str)
if (percent > 50):
#print("%s %s %s %s" % (stock['Ticker'], price, volume, float_short))
stock_df = stock_df.append({'stock': symbol, 'price' : price, 'volume' : volume, 'float_short': float_short}, ignore_index=True)
stock_df.head(20)
# -
| Squeeze Box Parser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
import sklearn
#Read in training data
TrainData = pd.read_csv('training/train.csv')
TrainData.head()
TrainData.columns
TrainData.shape
TrainData.describe(include='all')
#Too wide
TrainData.dtypes
#View only object columns
TrainData.describe(include=['object'])
#Only numeric columns
TrainData.describe(include=['float64', 'int64'])
from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
#Change date variables "DOB" and "Lead creation" to true dates
def date_clean(df, v):
#Use to_datetime function
df[v + '_clean'] = pd.to_datetime(df[v], dayfirst=True)
#If date > 2017, subtract 100 years
for n in range(len(df[v + '_clean'])):
x = df[v + '_clean'][n]
if x.year > 2017: df[v + '_clean'][n] = x - relativedelta(years=100)
date_clean(TrainData, 'DOB')
date_clean(TrainData, 'Lead_Creation_Date')
TrainData[['DOB_clean', 'Lead_Creation_Date_clean']].head()
#Check minimum and maximum dates of birth
max(TrainData['DOB_clean']), min(TrainData['DOB_clean'])
#Check minimum and maximum lead creation dates
max(TrainData['Lead_Creation_Date_clean']), min(TrainData['Lead_Creation_Date_clean'])
# Let's just try a logistic regression model first to see how far we get. Let's create dummies for the variables with fewer than about 10 categories, to keep the data size reasonable.
dum = pd.get_dummies(TrainData, sparse=True, drop_first=True, dummy_na=True,
columns=['Gender', 'City_Category', 'Employer_Category1', 'Primary_Bank_Type', 'Contacted',
'Source_Category', 'Employer_Category2', 'Var1'])
dum.head()
dum.columns
#Get rid of columns we won't use in logistic regression
TrainData_ForLogistic = dum.drop(['City_Code', 'Employer_Code', 'Customer_Existing_Primary_Bank_Code', 'Source',
'DOB', 'Lead_Creation_Date'], axis=1)
TrainData_ForLogistic.head()
TrainData_ForLogistic.shape
TrainData_ForLogistic.columns
TrainData_ForLogistic.to_csv('training_logistic.csv')
# Now we must clean the test data in the same way in order for the logistic regression to work on it.
TestData = pd.read_csv('test/test.csv')
TestData.head()
TestData.shape
date_clean(TestData, 'DOB')
date_clean(TestData, 'Lead_Creation_Date')
TestData[['DOB_clean', 'Lead_Creation_Date_clean']]
#Check min/max dates
min(TestData.DOB_clean), max(TestData.DOB_clean)
min(TestData.Lead_Creation_Date_clean), max(TestData.Lead_Creation_Date_clean)
#Check missing values for dates
TestData[['DOB_clean', 'Lead_Creation_Date_clean']].describe()
TrainData[['DOB_clean', 'Lead_Creation_Date_clean']].describe()
dum = pd.get_dummies(TestData, sparse=True, drop_first=True, dummy_na=True,
columns=['Gender', 'City_Category', 'Employer_Category1', 'Primary_Bank_Type', 'Contacted',
'Source_Category', 'Employer_Category2', 'Var1'])
dum.head()
#Get rid of columns we won't use in logistic regression
TestData_ForLogistic = dum.drop(['City_Code', 'Employer_Code', 'Customer_Existing_Primary_Bank_Code', 'Source',
'DOB', 'Lead_Creation_Date'], axis=1)
TestData_ForLogistic.shape
#One fewer column than training data
TestData_ForLogistic.columns
#Which is missing?
for i in TrainData_ForLogistic.columns:
if i not in TestData_ForLogistic.columns:
print i
#Makes sense
TestData_ForLogistic.to_csv('test_logistic.csv')
# ### New logistic regression method: Fill NA with zeroes, then make an indicator variable
from sklearn.linear_model import LogisticRegression
Train = TrainData_ForLogistic.copy()
Test = TestData_ForLogistic.copy()
#Need to change dates to ordinals to include them in regression
for d in ['DOB_clean', 'Lead_Creation_Date_clean']:
Train[d] = map(date.toordinal, Train[d])
Test[d] = map(date.toordinal, Test[d])
Train.head()
Train.describe()
Train.DOB_clean.value_counts()[1]
Test.DOB_clean.value_counts()[1]
# +
#NOTE: toordinal replaced missing date values with "1"
# -
#Clean up "Train" and "Test" to make ID the index
Train.index = Train['ID']
Train.drop('ID', axis=1, inplace=True)
Test.index = Test.ID
Test.drop('ID', axis=1, inplace=True)
Train.head()
Test.head()
#Create missing value indicators for numeric variables
varlist = ['Monthly_Income', 'Existing_EMI', 'Loan_Amount', 'Loan_Period', 'Interest_Rate', 'EMI']
for v in varlist:
for df in Train, Test:
df[v + '_NAind'] = pd.isnull(df[v]) * 1 #make 0/1 integer instead of T/F
Train.head(10)
#Now impute 0 to missing values
Train.fillna(0, inplace=True)
Test.fillna(0, inplace=True)
Test.head()
#Missing value indicator for DOB
for df in Train, Test:
df['DOB_NAind'] = (df.DOB_clean == 1) * 1
df.describe()
Train.describe()
Train.shape
Test.shape
#Now we are ready go start logistic model
LogisticModel_ZeroFill = LogisticRegression()
X = Train.drop('Approved', axis=1)
y = Train['Approved']
LogisticModel_ZeroFill.fit(X, y)
prob_ZeroFill = LogisticModel_ZeroFill.predict_proba(Test)
prob_ZeroFill
prob1_ZeroFill = pd.Series(x[1] for x in prob_ZeroFill)
prob1_ZeroFill.head()
Results_ZeroFill = pd.DataFrame({'ID': Test.index,
'Approved': prob1_ZeroFill})
Results_ZeroFill.head()
Results_ZeroFill = Results_ZeroFill[['ID', 'Approved']]
Results_ZeroFill.head()
Results_ZeroFill.to_csv('soln_logistic_zerofill.csv', index=False)
# ### Old data files to csv
TrainData.drop(['DOB', 'Lead_Creation_Date'], axis=1).to_csv('TrainData_clean.csv', index=False)
TestData.drop(['DOB', 'Lead_Creation_Date'], axis=1).to_csv('TestData_clean.csv', index=False)
# ## kNN classifier with same data / method of filling
from sklearn.neighbors import KNeighborsClassifier
kNN5 = KNeighborsClassifier()
kNN5.fit(X, y)
prob_kNN5 = kNN5.predict_proba(Test)
prob_kNN5
#Obviously way too few neighbors, let's try 100
kNN100 = KNeighborsClassifier(n_neighbors=100)
kNN100.fit(X, y)
prob_kNN100 = kNN100.predict_proba(Test)
prob_kNN100
prob1_kNN100 = pd.Series(x[1] for x in prob_kNN100)
Results_kNN100 = pd.DataFrame({'ID': Test.index, 'Approved': prob1_kNN100})
Results_kNN100.head()
Results_kNN100 = Results_kNN100[['ID', 'Approved']]
Results_kNN100.head()
Results_kNN100.to_csv('soln_kNN100.csv', index=False)
#Try 1000
kNN1000 = KNeighborsClassifier(n_neighbors=1000)
kNN1000.fit(X, y)
prob_kNN1000 = kNN1000.predict_proba(Test)
prob1_kNN1000 = pd.Series(x[1] for x in prob_kNN1000)
Results_kNN1000 = pd.DataFrame({'ID': Test.index, 'Approved': prob1_kNN1000})
Results_kNN1000.head()
Results_kNN1000 = pd.DataFrame({'Approved': prob1_kNN1000, 'ID': Test.index})
Results_kNN1000.head()
Results_kNN1000 = Results_kNN1000[['ID', 'Approved']]
Results_kNN1000.head()
Results_kNN1000.to_csv('soln_kNN1000.csv', index=False)
#Weight by distance
kNN1000d = KNeighborsClassifier(n_neighbors=1000, weights='distance')
kNN1000d.fit(X, y)
prob_kNN1000d = kNN1000d.predict_proba(Test)
prob1_kNN1000d = pd.Series(x[1] for x in prob_kNN1000d)
Results_kNN1000d = pd.DataFrame({'ID': Test.index, 'Approved': prob1_kNN1000d})
Results_kNN1000d.head()
Results_kNN1000d = Results_kNN1000d[['ID', 'Approved']]
Results_kNN1000d.head()
Results_kNN1000d.to_csv('soln_kNN1000_dist.csv', index=False)
#Try 2500
kNN2500 = KNeighborsClassifier(n_neighbors=2500)
kNN2500.fit(X, y)
prob_kNN2500 = kNN2500.predict_proba(Test)
prob1_kNN2500 = pd.Series(x[1] for x in prob_kNN2500)
Results_kNN2500 = pd.DataFrame({'ID': Test.index, 'Approved': prob1_kNN2500})
Results_kNN2500.head()
Results_kNN2500 = Results_kNN2500[['ID', 'Approved']]
Results_kNN2500.head()
Results_kNN2500.to_csv('soln_kNN2500.csv', index=False)
#2500 scores slightly worse than 1000
#Try 500 for completeness
kNN500 = KNeighborsClassifier(n_neighbors=500)
kNN500.fit(X, y)
prob_kNN500 = kNN500.predict_proba(Test)
prob1_kNN500 = pd.Series(x[1] for x in prob_kNN500)
Results_kNN500 = pd.DataFrame({'ID': Test.index, 'Approved': prob1_kNN500})
Results_kNN500 = Results_kNN500[['ID', 'Approved']]
Results_kNN500.head()
Results_kNN500.to_csv('soln_kNN500.csv', index=False)
#500 scored slightly better than 1000
#Last attempt: 500 with distance weighting
kNN500d = KNeighborsClassifier(n_neighbors=500, weights='distance')
kNN500d.fit(X, y)
prob_kNN500d = kNN500d.predict_proba(Test)
prob1_kNN500d = pd.Series(x[1] for x in prob_kNN500d)
Results_kNN500d = pd.DataFrame({'ID': Test.index, 'Approved': prob1_kNN500d})
Results_kNN500d = Results_kNN500d[['ID', 'Approved']]
Results_kNN500d.head()
Results_kNN500d.to_csv('soln_kNN500_dist.csv', index=False)
#This time distance weighting scored worse
#Let's try now adding some more dummies
#Recall which columns we have
Test.dtypes
# Of the variables we didn't fully encode, the ones with the fewest unique values are "Source" (29) and "Customer Primary Bank Code" (57). Let's try getting dummies for those and see if they improve the model. I don't want to get dummies for variables with thousands of values, as that would probably kill my laptop.
train_dum = pd.get_dummies(TrainData, columns=['Source', 'Customer_Existing_Primary_Bank_Code'],
dummy_na=True, sparse=True, drop_first=True)
test_dum = pd.get_dummies(TestData, columns=['Source', 'Customer_Existing_Primary_Bank_Code'],
dummy_na=True, sparse=True, drop_first=True)
train_dum.shape
test_dum.shape
for i in train_dum.columns:
if i not in test_dum.columns:
print i
for j in test_dum.columns:
if j not in train_dum.columns:
print j
#Get rid of variables that won't match up
train_dum.drop(['Source_S130', 'Source_S135', 'Source_S140', 'Source_S154', 'Source_S160',
'Customer_Existing_Primary_Bank_Code_B056'], axis=1, inplace=True)
test_dum.drop(['Source_S126', 'Source_S131', 'Source_S132', 'Source_S142'], axis=1, inplace=True)
#We already have all the other variables so drop the ones that don't start with "Source" or "Customer_Existing"
#Also keep ID
train_dum_clean = train_dum[['ID'] + [col for col in train_dum.columns if 'Source_' in col or
'Customer_Existing_' in col]]
train_dum_clean.shape
train_dum_clean.head()
test_dum_clean = test_dum[['ID'] + [col for col in train_dum.columns if 'Source_' in col or
'Customer_Existing_' in col]]
test_dum_clean.head()
#Training data with extra dummies
Train_extra = Train.merge(train_dum_clean, right_on='ID', left_index=True)
Train_extra.head()
Train_extra['ID'].head()
Test_extra = Test.merge(test_dum_clean, right_on='ID', left_index=True)
Test_extra.head()
for i in Train_extra.columns:
if i not in Test_extra.columns:
print i
X_extra = Train_extra.drop('Approved', axis=1)
y_extra = Train_extra['Approved']
kNN500.fit(X_extra, y_extra)
X_extra.dtypes
X_extra.describe(include=['object'])
#Oops
del Train_extra['Source_Category'], Test_extra['Source_Category']
Train_extra.index = Train_extra.ID
Test_extra.index = Test_extra.ID
del Train_extra['ID'], Test_extra['ID']
Train_extra.head()
Test_extra.head()
#Now this should work
X_extra = Train_extra.drop('Approved', axis=1)
y_extra = Train_extra['Approved']
kNN500.fit(X_extra, y_extra)
prob_kNN500x = kNN500.predict_proba(Test_extra)
prob1_kNN500x = [x[1] for x in prob_kNN500x]
Results_kNN500x = pd.DataFrame({'ID': Test.index, 'Approved': prob1_kNN500x})
Results_kNN500x = Results_kNN500x[['ID', 'Approved']]
Results_kNN500x.head()
Results_kNN500x.to_csv('soln_kNN500_extra.csv', index=False)
#Got the EXACT same score as kNN500...I probably messed up somewhere
#Let's make a new model variable to be sure
kNN500x = KNeighborsClassifier(n_neighbors=500)
kNN500x.fit(X_extra, y_extra)
prob_kNN500x = kNN500x.predict_proba(Test_extra)
prob1_kNN500x = [x[1] for x in prob_kNN500x]
Results_kNN500x = pd.DataFrame({'ID': Test.index, 'Approved': prob1_kNN500x})
Results_kNN500x = Results_kNN500x[['ID', 'Approved']]
Results_kNN500x.head()
Results_kNN500x.to_csv('soln_kNN500_extra.csv', index=False)
# Score is still the EXACT same...disappointing. I will move on to a different type of classifier.
# ## Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X, y)
prob_rf = rf.predict_proba(Test)
prob_rf
prob1_rf = pd.Series(x[1] for x in prob_rf)
Results_rf10 = pd.DataFrame({'ID': Test.index, 'Approved': prob1_rf})
Results_rf10 = Results_rf10[['ID', 'Approved']]
Results_rf10.head()
Results_rf10.to_csv('soln_rf10.csv', index=False)
# Score is pretty low, obviously not enough trees to assess probabilities accurately. Let's try 200.
rf200 = RandomForestClassifier(n_estimators=200)
rf200.fit(X, y)
prob_rf200 = rf200.predict_proba(Test)
prob1_rf200 = pd.Series(x[1] for x in prob_rf200)
Results_rf200 = pd.DataFrame({'ID': Test.index, 'Approved': prob1_rf200})
Results_rf200 = Results_rf200[['ID', 'Approved']]
Results_rf200.head()
Results_rf200.to_csv('soln_rf200.csv', index=False)
#Pretty good score on that one - let's try 1000
rf1000 = RandomForestClassifier(n_estimators=1000)
rf1000.fit(X, y)
prob_rf1000 = rf1000.predict_proba(Test)
prob1_rf1000 = pd.Series(x[1] for x in prob_rf1000)
Results_rf1000 = pd.DataFrame({'ID': Test.index, 'Approved': prob1_rf1000})
Results_rf1000 = Results_rf1000[['ID', 'Approved']]
Results_rf1000.head()
Results_rf1000.to_csv('soln_rf1000.csv', index=False)
# This is my best score so far and will probably be my final submission. Since the data was created way back in the logistic regression code and I just kept using the same data frames, let's recap what I did:
# * Cleaned the date fields and made sure they don't have dates after 2017
# * Converted date fields to ordinals so classifiers can use them
# * Created dummies for categorical variables with fewer than 10 values
# * Created dummies for missing values of numeric variables
# * Replaced missing values of numeric variables with 0, so that the coefficient on the null indicator dummy will represent the value that best fits in those spots
# * Fitted classification algorithms based on the numeric variables and the dummies
#
# For k-nearest neighbors, 500 was the best value I tried; once I got up to 1000 or 2500, it was including neighbors too far away so the model wasn't quite as good a fit. For random forests, 1000 trees was enough to get the ROC score above 0.8; presumably more trees would score at least a little bit higher, but my 5-year-old laptop has limited power.
#
# Including more dummies for Source and Customer Existing Primary Bank didn't seem to affect the score at all. If I had more computing power, I would try including dummies for individual cities, as well as larger random forests. If I had more time, I would experiment with the parameters on the kNN and random forest classifiers, and perhaps also try adjusting the regularization method of the logistic regressor (although kNN and random forest appear to be better fits than logistic regression for this data set).
| McKinsey/McKinsey bank data exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sssbaconegg/age-friendly-busan/blob/main/test1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="nWAJX3gSj-um"
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# + id="VVw656nmkSxA"
train = pd.read_csv('/content/sample_data/california_housing_test.csv')
test = pd.read_csv('/content/sample_data/california_housing_train.csv')
train.head()
# + id="pmS62xYfkogs"
test.head()
# + id="awpCajLXkqO-"
train.describe()
# + id="rlFb-F-4ksV2"
train.hist(figsize=(15.13), grid=False, bins=50)
plt.show()
# + id="qV9mBx6ik3Dk"
correlation=train.corr()
# + id="S04ttWM6k7cP"
plt.figure(figsize=(10,10))
sns.heatmap(correlation, annot=True)
plt.show()
| test1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/VRAraya/Actividad-2-ML/blob/main/Copia_de_SebastianG_Actividad_21_10_2020.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="gOoxTLExA005"
# #1 Ejercicio.
# Programe una red MLP con una capa oculta compuesta de 15 neuronas y salida sigmoidal, y además un clasificador bayesiano. Utilice la base datos iris (use sólo 2 clases).
# + id="jh1YbIUWKbNi"
# + id="5pH9FYmjAX4X"
import numpy as np
import matplotlib.pylab as plt
import tensorflow as tf
import tensorflow.keras as kr
from keras.layers import Dense
from keras.models import Sequential
from scipy.stats import multivariate_normal
from sklearn import datasets
from sklearn.model_selection import train_test_split
X, y = datasets.load_iris (return_X_y=True)
x = X[:100]
y = y[:100]
# + [markdown] id="2IV1rD1R8-LJ"
# Se obtiene la cantidad de datos (100) y dimensión (4) del X:
# + id="MCweV6oAx-rt" outputId="68c56650-0127-4aa6-d6cf-fce5337fc897" colab={"base_uri": "https://localhost:8080/", "height": 33}
x.shape
# + [markdown] id="ZiQIFQsr9QkT"
# Se construye la red MLP
# + id="19bPiqaEZN8E"
red = Sequential()
red.add(Dense(15, input_dim=4, activation='sigmoid'))
red.add(Dense(1, activation='sigmoid'))
# + id="EwX-wLz9y1jP" outputId="312bea67-fc39-4369-c323-a893eabfa6b7" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# Compila la red
red.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
# Train test split desde sklearn
x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size=0.2)
# Ajusta la red
red.fit(x, y, epochs=150, batch_size=100)
# Evalua el porcentaje de clasificación en la red
porcentaje_clasificacion = red.evaluate(x, y)
# + id="0xAEOyFf891V" outputId="024f3d4a-0364-4ba2-ddf3-beb54fbfea8a" colab={"base_uri": "https://localhost:8080/", "height": 1000}
red.predict(x)
# + id="ffx0th542V5I" outputId="187d5e3d-c3e5-4ea9-d8c2-2d27e8933845" colab={"base_uri": "https://localhost:8080/", "height": 98}
y
# + id="kduzNxT752tx"
mu_clase1 = np.mean(x[y==0],axis=0)
mu_clase2 = np.mean(x[y==1],axis=0)
sigma_clase1 = np.cov(x[y==0].T)
sigma_clase2 = np.cov(x[y==1].T)
# + id="8yvSVVf_7iID" outputId="6e1089ab-75a1-4b6c-ba1d-024b6eeffc5c" colab={"base_uri": "https://localhost:8080/", "height": 33}
sigma_c2.shape
# + id="ohjTrGIGId5F"
w1 = mu_clase1/(sigma_clase1[0,0]) #sigma_c1 es una matriz de 2x2. Solo consideramos el primer elemento, es decir, el elemento
w10 = -(1/(2*sigma_clase1[0,0]))*np.dot(mu_clase1.T,mu_clase1) + np.log(0.5)
w2 = mu_clase2/(sigma_clase2[0,0])
w20 = -(1/(2*sigma_clase2[0,0]))*np.dot(mu_clase2.T,mu_clase2) + np.log(0.5)
diferencia_w = w1-w2
diferencia_w0 = w10-w20
# + id="I1UoZ7urJuEX" outputId="9c73c502-defb-4194-c173-3ca89eb90968" colab={"base_uri": "https://localhost:8080/", "height": 613}
def x_final(x):
return (-diferencia_w[0]*x1 - diferencia_w0)/diferencia_w[0]
plt.figure()
plt.scatter(x[y==0], alpha=0.6)
plt.scatter(x[y==1], y, alpha=0.8,marker='x')
plt.plot(x,eq_final(x),color='r',lw=2)
plt.xlabel('x1',fontsize=30)
plt.ylabel('x2',fontsize=30)
| Victor_Araya_Act_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Get SEV from Corelogic File
# # University of Michigan
# ## School of Information
# **Masters of Applied Data Science**<br>
# **Milestone 1**<br>
# <br>
# Affordable Housing Project
# <br><br>
# **Submitted by**<br>
# ><NAME><br><NAME><br><NAME>
import pandas as pd
import geopandas
from config import basedir
base = basedir()
# ## Import Municipalities Recoreds and Corelogic Files
# +
data_dir = base + r"/data/municipalities"
fp_input_file = data_dir + r"/All Municipalities No Geometry.csv"
All_Municipalities=pd.read_csv(fp_input_file)
# All_Municipalities=pd.read_csv('documents/UofM/SIADS 591/All Municipalities No Geometry.csv')
fp_input_file = data_dir + r"Taxdata.csv"
Tax_File=pd.read_csv('documents/UofM/SIADS 591/Taxdata.csv')
# Tax_File=pd.read_csv('documents/UofM/SIADS 591/Taxdata.csv')
# -
# ## Merge Files and Export to CSV
All_Municipalities=All_Municipalities.merge(Tax_File,how='left',left_on='PNUM',right_on='ORIGINAL APN')
All_Municipalities=All_Municipalities.loc[:,['ID','TOTAL VALUE CALCULATED']]
All_Municipalities=All_Municipalities.rename(columns={'TOTAL VALUE CALCULATED':'SEV'})
# +
data_dir = base + r"/data"
fp_output_file = data_dir + r"/SEV.csv'"
All_Municipalities.to_csv(fp_output_file,index=False)
# All_Municipalities.to_csv('documents/UofM/SIADS 591/SEV.csv',index=False)
# -
| src/final/.ipynb_checkpoints/sev-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={}
# # Turning In Your GitHub Repos
# + [markdown] nbgrader={}
# The purpose of this assignment is for you to "turn in" your Github repos. In addition to being used to turn in your project, this assignment will be assigned a grade that reflects your usage of Git/GitHub.
# + [markdown] nbgrader={}
# ## Homework GitHub repo
# + [markdown] nbgrader={}
# Throughout the quarter you should have been pushing your weekly homework to a public Github repo.
#
# 1. Make sure all of your homework is pushed to this repo.
# 2. In the following markkdown cell, paste the URL to that repo. This should be something like https://github.com/ellisonbg/phys202-2015-work
# + [markdown] deletable=false nbgrader={"checksum": "00acb1f05c2e0b17272076cd16e1d65b", "grade": true, "grade_id": "githubreposa", "points": 5, "solution": true}
# https://github.com/rsterbentz/phys202-2015-work
# + [markdown] nbgrader={}
# ## Project GitHub repo
# + [markdown] nbgrader={}
# To turn in the notebooks for your project, go through the following steps:
#
# 1. Create a new public GitHub repo, named `phys202-project`. If you need a refresher on creating a repo, have a look at this [tutorial](https://help.github.com/articles/create-a-repo/).
# 2. Clone the repo onto dirac1. When you do this, you should do it in a directory that doesn't already have a directory with the name `phys202-project` and which itself is not another GitHub repo.
# 3. Copy your project materials into the new GitHub repo on dirac1.
# 4. Commit and push your changes to GitHub
# 5. In the following markdown cell, paste the URL to that repo. This should be something like https://github.com/ellisonbg/phys202-project
#
# Before turning this in you should check the following:
#
# * Make sure it will be obvious to me what each notebook contains and which order I should go through them in.
# * If there are cells or notebook that take longer than 30-60 seconds to run, you should put a uppercase bold warning in a markdown cell immediately above the cell: **THIS CELL TAKES XXX MINUTES TO RUN**. I won't run these cells.
# * You should make sure that your notebooks will run without error wth a cleared kernel, from the top to the bottom, *leaving out the long running cells*. This will require you to save the output of long running cells to disk and load them back for analysis and visualization purposes. Yes, I will run your code!
# + [markdown] deletable=false nbgrader={"checksum": "e2204fa9a8e531e9578600dd34f48cee", "grade": true, "grade_id": "githubreposb", "points": 5, "solution": true}
# https://github.com/rsterbentz/phys202-project.git
| assignments/assignment13/GitHubRepos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JImMY5232/COMP-593/blob/main/WEEK_3_WORKING_WITH_DATABASES.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ATrLBWinTKCJ"
# # LAB 3: WORKING WITH DATABASES
#
# + [markdown] id="bH4FqkVLTWRU"
# ## Introduction
#
# During this lab, we will experiment with using a few different libararies and modules to connect to and interact with an SQLite database. After creating and configuring our testing database, we will populate it with Fake data and run some queries to see what kind of information we can retreive. For context, we will be creating RSSN, the "Really Simple Social Network".
# + [markdown] id="YtugCh8QUPoi"
# ## Creating a SQLite Database
#
# We will be utilizing the `sqlite3` library to create our database file, as it offers the convienience of creating our database file automatically in the event that it cannot find an existing file at the path specified.
#
# **Before you run the below code:**
# On the left of the notebook, select the file folder icon. Then, after you run the code block, you should be able to observe the creation of the database file. If it doesn't appear, try clicking the "Refresh" button above the list of folders.
# + id="pzyTkI5PPjib" colab={"base_uri": "https://localhost:8080/"} outputId="210972b0-3abe-49b0-bc48-b84e3bea2bd9"
import sqlite3
#When we retreive a Connection object, a new database will be created for us if it doesn't already exist.
myConnection = sqlite3.connect('social_network.db')
print(sqlite3.version)
# + [markdown] id="gRxXXt8iVmAp"
# ## Creating a Table
#
# + [markdown] id="plmEQRdNaDMe"
# We'll use the below code to create a table called `people` within our database.
# + colab={"base_uri": "https://localhost:8080/"} id="uyjSS3cQVwFa" outputId="dbac04ca-4814-4c66-8431-0d5fafbbef64"
import sqlite3
#Retreive the Connection object
myConnection = sqlite3.connect('social_network.db')
#Once we have a Connection object, we can generate a Cursor object, and use that to run our SQL Queries
myCursor = myConnection.cursor()
#Let's define the SQL Query we will use to create our first table:
createPeopleTable = """ CREATE TABLE IF NOT EXISTS people (
id integer PRIMARY KEY,
name text NOT NULL,
email text NOT NULL,
address text NOT NULL,
city text NOT NULL,
province text NOT NULL,
country text NOT NULL,
phone text,
bio text,
dob date NOT NULL,
heatmap integer,
created_at datetime NOT NULL,
updated_at datetime NOT NULL,
ipv4 text
);"""
#Now that we have the string to create our table,
#Cursor objects have an execute() method which will accept an SQL string and perform the operations described.
myCursor.execute(createPeopleTable)
#We can confirm if our table was created successfully by running the following SQL Query
#pragma_table_info is an internal SQLite function that will retun information about a table
myCursor.execute("SELECT group_concat(name, ', ') FROM pragma_table_info('people')")
print(myCursor.fetchone())
#We use to the commit() method on the database Connection object to persist our changes
myConnection.commit()
#It is always a good idea to close a connection when it will no longer be used
myConnection.close()
# + [markdown] id="5swGTtjga2qo"
#
# If you received a tuple containing the names of the columns, awesome! We have successfully created our database table.
# ```
# ('id, name, email, address, city, province, country, phone, bio, created_at, updated_at',)
# ```
#
# Run the below code block to add our first entry.
# + id="ZSPqve9na7b8" colab={"base_uri": "https://localhost:8080/"} outputId="f77e8b92-8290-4ce3-8fdb-609adc05c0b0"
import sqlite3
from pprint import pprint #Outputs data in a slightly easier to read format
from datetime import datetime #For generating dates and times
#Retreive the Connection object
myConnection = sqlite3.connect('social_network.db')
#Once we have a Connection object, we can generate a Cursor object, and use that to run our SQL Queries
myCursor = myConnection.cursor()
#Let's define the SQL Query we will use to create our first entry:
addPersonQuery = """INSERT INTO people (name,
email,
address,
city,
province,
country,
phone,
bio,
dob,
heatmap,
created_at,
updated_at,
ipv4)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);"""
"""
The ?'s are placeholders that we can fill in when we use the execute() method.
This is really handy for code reuse, as we can pass those values are variables in a tuple
Instead of hard coding them into the statement.
"""
myPerson = ("<NAME>",
"<EMAIL>",
"123 Fake St.",
"Fakesville",
"Fakesdom",
"Fakopolis",
None,
None,
"1967-09-12",
0,
datetime.now(),
datetime.now(),
None)
myCursor.execute(addPersonQuery, myPerson)
#We can confirm if our table was created successfully by running the following SQL Query
#pragma_table_info is an internal SQLite function that will retun information about a table
myCursor.execute("SELECT * FROM people")
pprint(myCursor.fetchall())
# If you run this code block a few times, you will see that you only have 1 entry,
# If you uncomment the below lines and run the block a few more times, you will begin to see multiple entries.
myConnection.commit()
myConnection.close()
# + id="blL9pzRCgrMP"
# + [markdown] id="BTZ00Zscgrus"
# # Lab Submission
# + [markdown] id="rN93XC6xgvwP"
# We're going to build our experience with working with Libraries and examining documentation by populating our 'People' table with data provided to us by the `Faker` library. `Faker` is used to generate fake data and is very helpful for the rapid generation of databases for the purposes of testing. Run the two blocks below to install faker and get an idea of what it can do.
# + id="CejUXkoRhPik" colab={"base_uri": "https://localhost:8080/"} outputId="eb987399-3fe9-4bff-c6c3-847ce772e382"
# !pip install faker
# + id="vO2Uhcwrhw8-" colab={"base_uri": "https://localhost:8080/", "height": 398} outputId="e90b8834-493a-4c5a-f556-18725f1927ee"
from faker import Faker
fake = Faker()
for _ in range(10):
print('{} || {} '.format( fake.name(), fake.job() ) )
# + [markdown] id="IsYIsSvRjmiG"
# Very cool! Faker has tons of `providers` that can all be used to populate fake data. The [list of providers](https://faker.readthedocs.io/en/stable/providers.html) in the Faker documentation will help you fill out the columns for our People table.
#
# The goal of this script is to populate the people table with 1000 entries, with the following constraints:
#
# 1. The `heatmap` column must contain a random number between `999` and `2500`
# 2. The `created_at` and `updated_at` columns must use the `datetime` object (see examples above)
# 2. Use `Faker` to generate all other fields.
#
# *Hint: Each of the methods contained in the provider can be called directly from the base Faker object, for example, one can call the `file_name()` method from `faker.providers.file` by calling `Faker().file_name()`*
# + id="Yi-fE0uUlor4" colab={"base_uri": "https://localhost:8080/"} outputId="31d8350f-fde6-4422-e9bf-80dcb7f5937c"
import sqlite3
from pprint import pprint
from faker import Faker
from datetime import datetime #For generating dates and times
#Don't forget to import the module to generate random numbers!
#Retreive the Connection object
myConnection = sqlite3.connect('social_network.db')
#Once we have a Connection object, we can generate a Cursor object, and use that to run our SQL Queries
myCursor = myConnection.cursor()
#This is the same syntax as the above example:
addPersonQuery = """INSERT INTO people (name,
email,
address,
city,
province,
country,
phone,
bio,
dob,
heatmap,
created_at,
updated_at,
ipv4)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);"""
fake = Faker()
for i in range(1000):
myPerson = (fake.name(),
fake.ascii_safe_email(),
fake.street_address(),
fake.city(),
fake.administrative_unit(),
fake.country(),
fake.phone_number(),
fake.sentence(nb_words=5),
fake.date(),
fake.random_int(min=999, max=2500),
datetime.now(),
datetime.now(),
fake.ipv4())
myCursor.execute(addPersonQuery, myPerson)
myCursor.execute("SELECT * FROM people")
for record in myCursor:
pprint(record)
#Ideally, you will want to create a loop that will iterate 1000 times,
#Observe the example in the earlier code block showing the parameter tuple `myPerson`
#You can duplicate that inside your loop and replace those fields with calls to the appropriate faker provider method
#Execute your statement inside the loop,
#But, don't forget to commit and close your connection when you have finished!
myConnection.commit()
myConnection.close()
# + [markdown] id="mVEgfYArnITl"
# Finally, the last step,
# Using the code block below, combined with what you have learned above, and the Lecture notes,
# Craft a SQL Query that will return the `name` of no more than `20` `people` with a `heatmap` greater than `1500`
# + id="oN6wSS9roOP6" outputId="1b614a96-f0e6-4bac-be57-a9809f714d00" colab={"base_uri": "https://localhost:8080/"}
import sqlite3
from pprint import pprint
#Retreive the Connection object
myConnection = sqlite3.connect('social_network.db')
#Once we have a Connection object, we can generate a Cursor object, and use that to run our SQL Queries
myCursor = myConnection.cursor()
selectStatement =""" SELECT name FROM people
WHERE heatmap > 1500 LIMIT 20 """
myCursor.execute(selectStatement)
results = myCursor.fetchall()
pprint(results)
# + [markdown] id="qwNrP7FfpJg2"
# Your submission will contain, as usual, a link to your completed colab notebook, but in addition to that, you will download a copy of your social_network.db file and upload it to D2L. To download the file, right click it from the Files menu on the left of the Notebook.
| WEEK_3_WORKING_WITH_DATABASES.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Simple Linear Regression with NumPy
# In school, students are taught to draw lines like the following.
#
# $y=2x+1$
# <br>
#
# They're taught to pick two values for x and calculate the corresponding values for y using the equation. Then they draw a set of axes, plot the points, and then draw a line extending through the two dots on their axes.
# +
# numpy efficiently deals with numerical multi-dimensional arrays.
import numpy as np
# matplotlib is a plotting library, and pyplot is its easy-to-use module.
import matplotlib.pyplot as plt
# This just sets the default plot size to be bigger.
plt.rcParams['figure.figsize'] = (8, 6)
# +
# Draw some axes.
plt.plot([-1, 10], [0, 0], 'k-')
plt.plot([0, 0], [-1, 10], 'k-')
# Plot the red, blue and green lines.
plt.plot([1, 1], [-1, 3], 'b:')
plt.plot([-1, 1], [3, 3], 'r:')
# Plot the two points (1,3) and (2,5).
plt.plot([1, 2], [3, 5], 'ko')
# Join them with an (extending) green lines.
plt.plot([-1, 10], [-1, 21], 'g-')
# Set some reasonable plot limits.
plt.xlim([-1, 10])
plt.ylim([-1, 10])
# Show the plot.
plt.show()
# -
# Simple linear regression is about the opposite problem - what if you have some points and are looking for the equation? It's easy when the points are perfectly on a line already, but usually real-world data has some noise. The data might still look roughly linear, but aren't exactly so.
#
# ***
# ## Example (contrived and simulated)
# ### Scenario
# Suppose you are trying to weigh your suitcase to avoid an airline's extra charges. You don't have a weighing scales, but you do have a spring and some gym-style weights of masses 7KG, 14KG and 21KG. You attach the spring to the wall hook, and mark where the bottom of it hangs. You then hang the 7KG weight on the end and mark where the bottom of the spring is. You repeat this with the 14KG weight and the 21KG weight. Finally, you place your case hanging on the spring, and the spring hangs down halfway between the 7KG mark and the 14KG mark. Is your case over the 10KG limit set by the airline?
#
# ### Hypothesis
# When you look at the marks on the wall, it seems that the 0KG, 7KG, 14KG and 21KG marks are evenly spaced. You wonder if that means your case weighs 10.5KG. That is, you wonder if there is a linear relationship between the distance the spring's hook is from its resting position, and the mass on the end of it.
#
# ### Experiment
# You decide to experiment. You buy some new weights - a 1KG, a 2KG, a 3Kg, all the way up to 20KG. You place them each in turn on the spring and measure the distance the spring moves from the resting position. You tabulate the data and plot them.
#
# ### Analysis
# Here we'll import the Python libraries we need for or investigations below.
# # numpy efficiently deals with numerical multi-dimensional arrays.
# import numpy as np
#
# # matplotlib is a plotting library, and pyplot is its easy-to-use module.
# import matplotlib.pyplot as plt
#
# # This just sets the default plot size to be bigger.
# plt.rcParams['figure.figsize'] = (8, 6)
# Ignore the next couple of lines where I fake up some data. I'll use the fact that I faked the data to explain some results later. Just pretend that w is an array containing the weight values and d are the corresponding distance measurements.
w = np.arange(0.0, 21.0, 1.0)
d = 5.0 * w + 10.0 + np.random.normal(0.0, 5.0, w.size)
# Let's have a look at w.
w
# Let's have a look at d.
d
# +
# Create the plot.
plt.plot(w, d, 'k.')
# Set some properties for the plot.
plt.xlabel('Weight (KG)')
plt.ylabel('Distance (CM)')
# Show the plot.
plt.show()
# -
# #### Model
# It looks like the data might indeed be linear. The points don't exactly fit on a straight line, but they are not far off it. We might put that down to some other factors, such as the air density, or errors, such as in our tape measure. Then we can go ahead and see what would be the best line to fit the data.
# #### Straight lines
# All straight lines can be expressed in the form y=mx+c. The number m is the slope of the line. The slope is how much y increases by when x is increased by 1.0. The number c is the y-intercept of the line. It's the value of y when x is 0.
# #### Fitting the model
# To fit a straight line to the data, we just must pick values for m and c. These are called the parameters of the model, and we want to pick the best values possible for the parameters. That is, the best parameter values given the data observed. Below we show various lines plotted over the data, with different values for m and c.
# +
# Plot w versus d with black dots.
plt.plot(w, d, 'k.', label="Data")
# Overlay some lines on the plot.
x = np.arange(0.0, 21.0, 1.0)
plt.plot(x, 5.0 * x + 10.0, 'r-', label=r"$5x + 10$")
plt.plot(x, 6.0 * x + 5.0, 'g-', label=r"$6x + 5$")
plt.plot(x, 5.0 * x + 15.0, 'b-', label=r"$5x + 15$")
# Add a legend.
plt.legend()
# Add axis labels.
plt.xlabel('Weight (KG)')
plt.ylabel('Distance (CM)')
# Show the plot.
plt.show()
# -
# #### Calculating the cost
# You can see that each of these lines roughly fits the data. Which one is best, and is there another line that is better than all three? Is there a "best" line?
#
# It depends how you define the word best. Luckily, everyone seems to have settled on what the best means. The best line is the one that minimises the following calculated value.
#
# $$\sum_i (y_i - mx_i - c)^2 $$
#
# Here $(x_i, y_i)$ is the $i^{th}$ point in the data set and $\sum_i$ means to sum over all points. The values of $m$ and $c$ are to be determined.
# We usually denote the above as $Cost(m, c)$.
#
# Where does the above calculation come from? It's easy to explain the part in the brackets $(y_i - mx_i - c)$. The corresponding value to xi in the dataset is yi. These are the measured values. The value $m x_i + c$ is what the model says $y_i$ should have been. The difference between the value that was observed ($y_i$) and the value that the model gives ($m x_i + c$), is $y_i - mx_i - c$..
#
# Why square that value? Well note that the value could be positive or negative, and you sum over all of these values. If we allow the values to be positive or negative, then the positive could cancel the negatives. So, the natural thing to do is to take the absolute value $\mid y_i - m x_i - c \mid$.. Well it turns out that absolute values are a pain to deal with, and instead it was decided to just square the quantity instead, as the square of a number is always positive. There are pros and cons to using the square instead of the absolute value, but the square is used. This is usually called least squares fitting.
# +
# Calculate the cost of the lines above for the data above.
cost = lambda m,c: np.sum([(d[i] - m * w[i] - c)**2 for i in range(w.size)])
print("Cost with m = %5.2f and c = %5.2f: %8.2f" % (5.0, 10.0, cost(5.0, 10.0)))
print("Cost with m = %5.2f and c = %5.2f: %8.2f" % (6.0, 5.0, cost(6.0, 5.0)))
print("Cost with m = %5.2f and c = %5.2f: %8.2f" % (5.0, 15.0, cost(5.0, 15.0)))
# -
# #### Minimising the cost
# We want to calculate values of $m$ and $c$ that give the lowest value for the cost value above.
# For our given data set we can plot the cost value/function.
# Recall that the cost is:
#
# $$ Cost(m, c) = \sum_i (y_i − mx_i − c)^2 $$
#
# This is a function of two variables, $m$ and $c$, so a plot of it is three dimensional. See the **Advanced** section below for the plot.
#
# In the case of fitting a two-dimensional line to a few data points, we can easily calculate exactly the best values of $m$ and $c$.
# Some of the details are discussed in the **Advanced section**, as they involve calculus, but the resulting code is straight-forward.
# We first calculate the mean (average) values of our $x$ values and that of our $y$ values. Then we subtract the mean of $x$ from each of the $x$ values, and the mean of $y$ from each of the $y$ values.
# Then we take the *dot product* of the new x values and the new $y$ values and divide it by the dot product of the new $x$ values with themselves. That gives us m, and we use m to calculate c.
#
# Remember that in our dataset x is called $w$ (for weight) and $y$ is called $d$ (for distance). We calculate $m$ and $c$ below.
# ***Calculus is used to find the value of m and c the two values that give the minimum cost value of the cost based on the data***
# +
# Calculate the best values for m and c.
# First calculate the means (a.k.a. averages) of w and d.
w_avg = np.mean(w)
d_avg = np.mean(d)
# Subtract means from w and d.
w_zero = w - w_avg
d_zero = d - d_avg
# The best m is found by the following calculation.
m = np.sum(w_zero * d_zero) / np.sum(w_zero * w_zero)
# Use m from above to calculate the best c.
c = d_avg - m * w_avg
print("m is %8.6f and c is %6.6f." % (m, c))
# -
# Note that numpy has a function that will perform this calculation for us, called polyfit. It can be used to fit lines in many dimensions.
np.polyfit(w, d, 1)
# #### Best fit line
# So, the best values for m and c given our data and using least squares fitting are about 4.95 for m and about 11.13 for c. We plot this line on top of the data below.
# +
# Plot the best fit line.
plt.plot(w, d, 'k.', label='Original data')
plt.plot(w, m * w + c, 'b-', label='Best fit line')
# Add axis labels and a legend.
plt.xlabel('Weight (KG)')
plt.ylabel('Distance (CM)')
plt.legend()
# Show the plot.
plt.show()
# -
# Note that the $Cost$ of the best $m$ and best c is not zero in this case.
print("Cost with m = %5.2f and c = %5.2f: %8.2f" % (m, c, cost(m, c)))
# #### Summary
# In this notebook we did:
#
# 1. Investigated the data.
# 2. Picked a model.
# 3. Picked a cost function.
# 4. Estimated the model parameter values that minimised our cost function.
#
# ***
| myExercises2/simple-linear-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df=pd.read_csv("C://Users//dell//Desktop//New folder//house-prices-advanced-regression-techniques//train.csv")
pd.set_option('display.max_columns', None)
df.head(10)
df['MSZoning'].dtypes
feature=df.columns
new=np.where([df.columns].dtypes=='O')
new
# +
## Here we will check the percentage of nan values present in each feature
## 1 -step make the list of features which has missing values
features_with_na=[features for features in df.columns if df[features].isnull().sum()>1]
# -
features_with_na
for feature in features_with_na:
print(feature, np.round(df[feature].isnull().mean(), 4), ' % missing values')
np.round(.0123456,3)
df['Fence'].isnull().mean()
(1179/1460)*100
x=np.arange(20)
x.mean()
NEW=[features for features in df.columns if df[features].dtypes=='O']
len(NEW)
for feature in features_with_na:
df1 = df.copy()
# let's make a variable that indicates 1 if the observation was missing or zero otherwise
df1[feature] = np.where(df1[feature].isnull(), 1, 0)
# let's calculate the mean SalePrice where the information is missing or present
df1.groupby(feature)['SalePrice'].median().plot.bar()
plt.title(feature)
plt.show()
df1['LotFrontage'] = np.where(df1[feature].isnull(), 1, 0)
df1['MiscFeature'].head(50)
| House Prices_Advanced_Regression_Techniques_EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/parekhakhil/pyImageSearch/blob/main/1002_hyperparameter_tuning_cv_Search.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8ntZ1AkXZIxY"
#
#
# This notebook is associated with the [Grid search hyperparameter tuning with scikit-learn ( GridSearchCV )](https://www.pyimagesearch.com/2021/05/24/grid-search-hyperparameter-tuning-with-scikit-learn-gridsearchcv/) blog post published on 2021-05-24.
#
# Only the code for the blog post is here. Most codeblocks have a 1:1 relationship with what you find in the blog post with two exceptions: (1) Python classes are not separate files as they are typically organized with PyImageSearch projects, and (2) Command Line Argument parsing is replaced with an `args` dictionary that you can manipulate as needed.
#
# We recommend that you execute (press ▶️) the code block-by-block, as-is, before adjusting parameters and `args` inputs. Once you've verified that the code is working, you are welcome to hack with it and learn from manipulating inputs, settings, and parameters. For more information on using Jupyter and Colab, please refer to these resources:
#
# * [Jupyter Notebook User Interface](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html#notebook-user-interface)
# * [Overview of Google Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)
#
#
# Happy hacking!
#
#
# <hr>
#
#
# + [markdown] id="NFhAzQB3aNMa"
# ### Download the code zip file
# + id="7y0LG1EuaRlB"
# !wget https://pyimagesearch-code-downloads.s3-us-west-2.amazonaws.com/hyperparameter-tuning-cv/hyperparameter-tuning-cv.zip
# !unzip -qq hyperparameter-tuning-cv.zip
# %cd hyperparameter-tuning-cv
# + [markdown] id="_SgTVT3HagGZ"
# ## Blog Post Code
# + [markdown] id="wcrOk6pURp50"
# ### Import Packages
# + id="VJaCNlDDRz6d"
# import the necessary packages
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from skimage import feature
from imutils import paths
import numpy as np
import time
import cv2
import os
# + [markdown] id="VZ3bW6aU2a6v"
# ### Our Local Binary Pattern (LBP) descriptor
# + id="bS9nQNo02dIs"
class LocalBinaryPatterns:
def __init__(self, numPoints, radius):
# store the number of points and radius
self.numPoints = numPoints
self.radius = radius
def describe(self, image, eps=1e-7):
# compute the Local Binary Pattern representation
# of the image, and then use the LBP representation
# to build the histogram of patterns
lbp = feature.local_binary_pattern(image, self.numPoints,
self.radius, method="uniform")
(hist, _) = np.histogram(lbp.ravel(),
bins=np.arange(0, self.numPoints + 3),
range=(0, self.numPoints + 2))
# normalize the histogram
hist = hist.astype("float")
hist /= (hist.sum() + eps)
# return the histogram of Local Binary Patterns
return hist
# + [markdown] id="Jppw5-Bd56H-"
# ### Implementing our grid search for hyperparameter tuning
# + id="okM7Bpyeq8Kc"
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-d", "--dataset", required=True,
# help="path to input dataset")
# args = vars(ap.parse_args())
# since we are using Jupyter Notebooks we can replace our argument
# parsing code with *hard coded* arguments and values
args = {
"dataset": "texture_dataset"
}
# + id="J43Rixrm3Duo"
# grab the image paths in the input dataset directory
imagePaths = list(paths.list_images(args["dataset"]))
# initialize the local binary patterns descriptor along with
# the data and label lists
print("[INFO] extracting features...")
desc = LocalBinaryPatterns(24, 8)
data = []
labels = []
# + id="zD54-goR3IJV"
# loop over the dataset of images
for imagePath in imagePaths:
# load the image, convert it to grayscale, and quantify it
# using LBPs
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
hist = desc.describe(gray)
# extract the label from the image path, then update the
# label and data lists
labels.append(imagePath.split(os.path.sep)[-2])
data.append(hist)
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
print("[INFO] constructing training/testing split...")
(trainX, testX, trainY, testY) = train_test_split(data, labels,
random_state=22, test_size=0.25)
# + id="_A3Hycjs3NkY"
# construct the set of hyperparameters to tune
parameters = [
{"kernel":
["linear"],
"C": [0.0001, 0.001, 0.1, 1, 10, 100, 1000]},
{"kernel":
["poly"],
"degree": [2, 3, 4],
"C": [0.0001, 0.001, 0.1, 1, 10, 100, 1000]},
{"kernel":
["rbf"],
"gamma": ["auto", "scale"],
"C": [0.0001, 0.001, 0.1, 1, 10, 100, 1000]}
]
# + id="E1iCNr2K3WEI"
# tune the hyperparameters via a cross-validated grid search
print("[INFO] tuning hyperparameters via grid search")
grid = GridSearchCV(estimator=SVC(), param_grid=parameters, n_jobs=-1)
start = time.time()
grid.fit(trainX, trainY)
end = time.time()
# show the grid search information
print("[INFO] grid search took {:.2f} seconds".format(
end - start))
print("[INFO] grid search best score: {:.2f}%".format(
grid.best_score_ * 100))
print("[INFO] grid search best parameters: {}".format(
grid.best_params_))
# + id="NbkALIBP3Z-p"
# grab the best model and evaluate it
print("[INFO] evaluating...")
model = grid.best_estimator_
predictions = model.predict(testX)
print(classification_report(testY, predictions))
# + [markdown] id="4ogkNauArL6u"
# For a detailed walkthrough of the concepts and code, be sure to refer to the full tutorial, [*Grid search hyperparameter tuning with scikit-learn ( GridSearchCV )*](https://www.pyimagesearch.com/2021/05/24/grid-search-hyperparameter-tuning-with-scikit-learn-gridsearchcv/) published on 2021-05-24.
| 1002_hyperparameter_tuning_cv_Search.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Verifications Post-analysis
# ## Lib
# +
import os
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## Set config and paths
# +
inputs = {
"folder": lambda hdi_index: f"PRI_NA_COORTEX_HDI_{hdi_index}_21JAN2021_31AUG2021",
"only_events": lambda seed: f"EVENTOS_PAREADOS_{seed}.parquet",
"only_pairs": lambda seed: f"PAREADOS_CPF_{seed}.parquet",
"pairs_events_int": lambda event, seed: f"PAREADOS_COM_INTERVALOS_{event}_{seed}.parquet",
"survival_int": lambda event, seed: f"SURVIVAL_CORONAVAC_D1D2_{event}_{seed}.parquet"
}
config = {
"seed": 1,
"hdi_index": 2,
"event": "OBITO",
}
schema_path = os.path.join( "..", "output", "data", "SCHEMA_21JAN2021_31AUG2021.parquet")
base_path = os.path.join( "..", "output", "PAREAMENTO", "CORONAVAC", inputs["folder"](config["hdi_index"]) )
pairs_path = os.path.join( base_path, inputs["only_pairs"](config['seed']) )
events_path = os.path.join( base_path, inputs["only_events"](config['seed']) )
pairs_events_path = os.path.join( base_path, inputs["pairs_events_int"](config['event'], config['seed']) )
survival_path_death = os.path.join( base_path, "SURVIVAL", inputs['survival_int']('OBITO', config['seed']) )
survival_path_hosp = os.path.join( base_path, "SURVIVAL", inputs['survival_int']('HOSPITAL', config['seed']) )
survival_path_icu = os.path.join( base_path, "SURVIVAL", inputs['survival_int']('UTI', config['seed']) )
# -
# ## Load data
fschema_df = pd.read_parquet(schema_path)
pairs_df = pd.read_parquet(pairs_path)
events_df = pd.read_parquet(events_path)
pairs_events_df = pd.read_parquet(pairs_events_path)
survival_df_death = pd.read_parquet(survival_path_death)
survival_df_hosp = pd.read_parquet(survival_path_hosp)
survival_df_icu = pd.read_parquet(survival_path_icu)
# +
events_df = events_df[events_df["TIPO"]!="NAO PAREADO"]
def new_hospitalization_date(x, cohort):
'''
'''
if not np.any(pd.notna(x)):
return np.nan
x = np.sort([xx for xx in x if pd.notna(xx)])
condition = (x>=cohort[0]) & (x<=cohort[1])
if x[condition].shape[0]>0:
return x[condition][0]
else:
return np.nan
events_df["DATA HOSPITALIZACAO"] = events_df["DATA HOSPITALIZACAO"].apply(lambda x: new_hospitalization_date(x, (dt.datetime(2021,1,21), dt.datetime(2021,8,31))))
events_df["DATA UTI"] = events_df["DATA UTI"].apply(lambda x: new_hospitalization_date(x, (dt.datetime(2021,1,21), dt.datetime(2021,8,31))))
# -
# ## Count of events
# +
survival_death = pd.read_parquet(os.path.join( base_path, "SURVIVAL", inputs['survival_int']("OBITO", config['seed']) ))
survival_hosp = pd.read_parquet(os.path.join( base_path, "SURVIVAL", inputs['survival_int']("HOSPITAL", config['seed']) ))
survival_icu = pd.read_parquet(os.path.join( base_path, "SURVIVAL", inputs['survival_int']("UTI", config['seed']) ))
data = {
"OBITO (day 100)": [0,0,0,0], "HOSPITAL (day 100)": [0,0,0,0], "UTI (day 100)": [0,0,0,0],
"OBITO (day end)": [0,0,0,0], "HOSPITAL (day end)": [0,0,0,0], "UTI (day end)": [0,0,0,0],
}
survival, survival_names = [survival_death, survival_hosp, survival_icu], ["OBITO", "HOSPITAL", "UTI"]
for index, df in enumerate(survival):
surv_caso_100 = df[(df["TIPO"]=="CASO") & (df[f"t - D1 {survival_names[index]}"]<=30) & (df[f"t - D1 {survival_names[index]}"]>=0)]
surv_controle_100 = df[(df["TIPO"]=="CONTROLE") & (df[f"t - D1 {survival_names[index]}"]<=30) & (df[f"t - D1 {survival_names[index]}"]>=0)]
surv_caso = df[(df["TIPO"]=="CASO")]
surv_controle = df[(df["TIPO"]=="CONTROLE")]
caso_total_d1 = surv_caso[f"t - D1 {survival_names[index]}"].notnull().sum()
controle_total_d1 = surv_controle[f"t - D1 {survival_names[index]}"].notnull().sum()
caso_total_d2 = surv_caso[f"t - D2 {survival_names[index]}"].notnull().sum()
controle_total_d2 = surv_controle[f"t - D2 {survival_names[index]}"].notnull().sum()
caso_event_d1_100 = surv_caso_100[f"E - D1 {survival_names[index]}"].value_counts().loc[True]
caso_event_d2_100 = surv_caso_100[f"E - D2 {survival_names[index]}"].value_counts().loc[True]
controle_event_d1_100 = surv_controle_100[f"E - D1 {survival_names[index]}"].value_counts().loc[True]
controle_event_d2_100 = surv_controle_100[f"E - D2 {survival_names[index]}"].value_counts().loc[True]
caso_event_d1 = surv_caso[f"E - D1 {survival_names[index]}"].value_counts().loc[True]
caso_event_d2 = surv_caso[f"E - D2 {survival_names[index]}"].value_counts().loc[True]
controle_event_d1 = surv_controle[f"E - D1 {survival_names[index]}"].value_counts().loc[True]
controle_event_d2 = surv_controle[f"E - D2 {survival_names[index]}"].value_counts().loc[True]
data[f"{survival_names[index]} (day 100)"][0] = f"{caso_event_d1_100}/{caso_total_d1}"
data[f"{survival_names[index]} (day 100)"][1] = f"{controle_event_d1_100}/{controle_total_d1}"
data[f"{survival_names[index]} (day 100)"][2] = f"{caso_event_d2_100}/{caso_total_d2}"
data[f"{survival_names[index]} (day 100)"][3] = f"{controle_event_d2_100}/{controle_total_d2}"
data[f"{survival_names[index]} (day end)"][0] = f"{caso_event_d1}/{caso_total_d1}"
data[f"{survival_names[index]} (day end)"][1] = f"{controle_event_d1}/{controle_total_d1}"
data[f"{survival_names[index]} (day end)"][2] = f"{caso_event_d2}/{caso_total_d2}"
data[f"{survival_names[index]} (day end)"][3] = f"{controle_event_d2}/{controle_total_d2}"
data = pd.DataFrame(data)
data.index = ["CASO D1", "CONTROLE D1", "CASO D2", "CONTROLE D2"]
# -
data
# ## Events by condition
# +
events = events_df[(events_df["DATA OBITO COVID"]>=dt.datetime(2021,1,21)) | pd.isna(events_df["DATA OBITO COVID"])]
events_caso = events[events["TIPO"]=="CASO"].copy()
events_controle = events[events["TIPO"]=="CONTROLE"].copy()
# --> Geral
caso_obito_geral = events_caso["DATA OBITO COVID"].notnull().sum()
caso_hosp_geral = events_caso["DATA HOSPITALIZACAO"].notnull().sum()
caso_uti_geral = events_caso["DATA UTI"].notnull().sum()
controle_obito_geral = events_controle["DATA OBITO COVID"].notnull().sum()
controle_hosp_geral = events_controle["DATA HOSPITALIZACAO"].notnull().sum()
controle_uti_geral = events_controle["DATA UTI"].notnull().sum()
# --> Antes D1
func_1 = lambda x: True if pd.notna(x["DATA D1"]) and pd.notna(x["DATA OBITO COVID"]) and x["DATA OBITO COVID"]<x["DATA D1"] else ( True if pd.isna(x["DATA D1"]) and pd.notna(x["DATA OBITO COVID"]) else False)
events_caso["OBITO ANTES D1"] = events_caso[["DATA D1", "DATA OBITO COVID"]].apply(func_1, axis=1)
events_controle["OBITO ANTES D1"] = events_controle[["DATA D1", "DATA OBITO COVID"]].apply(func_1, axis=1)
# --
func_1 = lambda x: True if pd.notna(x["DATA D1"]) and pd.notna(x["DATA HOSPITALIZACAO"]) and x["DATA HOSPITALIZACAO"]<x["DATA D1"] else ( True if pd.isna(x["DATA D1"]) and pd.notna(x["DATA HOSPITALIZACAO"]) else False)
events_caso["HOSP ANTES D1"] = events_caso[["DATA D1", "DATA HOSPITALIZACAO"]].apply(func_1, axis=1)
events_controle["HOSP ANTES D1"] = events_controle[["DATA D1", "DATA HOSPITALIZACAO"]].apply(func_1, axis=1)
# --
func_1 = lambda x: True if pd.notna(x["DATA D1"]) and pd.notna(x["DATA UTI"]) and x["DATA UTI"]<x["DATA D1"] else ( True if pd.isna(x["DATA D1"]) and pd.notna(x["DATA UTI"]) else False)
events_caso["UTI ANTES D1"] = events_caso[["DATA D1", "DATA UTI"]].apply(func_1, axis=1)
events_controle["UTI ANTES D1"] = events_controle[["DATA D1", "DATA UTI"]].apply(func_1, axis=1)
# ----> counting
caso_obito_antesd1 = events_caso[events_caso["OBITO ANTES D1"]==True].shape[0]
controle_obito_antesd1 = events_controle[events_controle["OBITO ANTES D1"]==True].shape[0]
caso_hosp_antesd1 = events_caso[events_caso["HOSP ANTES D1"]==True].shape[0]
controle_hosp_antesd1 = events_controle[events_controle["HOSP ANTES D1"]==True].shape[0]
caso_uti_antesd1 = events_caso[events_caso["UTI ANTES D1"]==True].shape[0]
controle_uti_antesd1 = events_controle[events_controle["UTI ANTES D1"]==True].shape[0]
# --> Apos D1
func_2 = lambda x: True if pd.notna(x["DATA D1"]) and pd.notna(x["DATA OBITO COVID"]) and x["DATA OBITO COVID"]>=x["DATA D1"] else False
events_caso["OBITO APOS D1"] = events_caso[["DATA D1", "DATA OBITO COVID"]].apply(func_2, axis=1)
events_controle["OBITO APOS D1"] = events_controle[["DATA D1", "DATA OBITO COVID"]].apply(func_2, axis=1)
# --
func_2 = lambda x: True if pd.notna(x["DATA D1"]) and pd.notna(x["DATA HOSPITALIZACAO"]) and x["DATA HOSPITALIZACAO"]>=x["DATA D1"] else False
events_caso["HOSP APOS D1"] = events_caso[["DATA D1", "DATA HOSPITALIZACAO"]].apply(func_2, axis=1)
events_controle["HOSP APOS D1"] = events_controle[["DATA D1", "DATA HOSPITALIZACAO"]].apply(func_2, axis=1)
# --
func_2 = lambda x: True if pd.notna(x["DATA D1"]) and pd.notna(x["DATA UTI"]) and x["DATA UTI"]>=x["DATA D1"] else False
events_caso["UTI APOS D1"] = events_caso[["DATA D1", "DATA UTI"]].apply(func_2, axis=1)
events_controle["UTI APOS D1"] = events_controle[["DATA D1", "DATA UTI"]].apply(func_2, axis=1)
# ----> counting
caso_obito_aposd1 = events_caso[events_caso["OBITO APOS D1"]==True].shape[0]
controle_obito_aposd1 = events_controle[events_controle["OBITO APOS D1"]==True].shape[0]
caso_hosp_aposd1 = events_caso[events_caso["HOSP APOS D1"]==True].shape[0]
controle_hosp_aposd1 = events_controle[events_controle["HOSP APOS D1"]==True].shape[0]
caso_uti_aposd1 = events_caso[events_caso["UTI APOS D1"]==True].shape[0]
controle_uti_aposd1 = events_controle[events_controle["UTI APOS D1"]==True].shape[0]
# --> Antes D2
func_3 = lambda x: True if pd.notna(x["DATA D2"]) and pd.notna(x["DATA OBITO COVID"]) and x["DATA OBITO COVID"]<x["DATA D2"] else ( True if pd.isna(x["DATA D2"]) and pd.notna(x["DATA OBITO COVID"]) else False)
events_caso["OBITO ANTES D2"] = events_caso[["DATA D2", "DATA OBITO COVID"]].apply(func_3, axis=1)
events_controle["OBITO ANTES D2"] = events_controle[["DATA D2", "DATA OBITO COVID"]].apply(func_3, axis=1)
# --
func_3 = lambda x: True if pd.notna(x["DATA D2"]) and pd.notna(x["DATA HOSPITALIZACAO"]) and x["DATA HOSPITALIZACAO"]<x["DATA D2"] else ( True if pd.isna(x["DATA D2"]) and pd.notna(x["DATA HOSPITALIZACAO"]) else False)
events_caso["HOSP ANTES D2"] = events_caso[["DATA D2", "DATA HOSPITALIZACAO"]].apply(func_3, axis=1)
events_controle["HOSP ANTES D2"] = events_controle[["DATA D2", "DATA HOSPITALIZACAO"]].apply(func_3, axis=1)
# --
func_3 = lambda x: True if pd.notna(x["DATA D2"]) and pd.notna(x["DATA UTI"]) and x["DATA UTI"]<x["DATA D2"] else ( True if pd.isna(x["DATA D2"]) and pd.notna(x["DATA UTI"]) else False)
events_caso["UTI ANTES D2"] = events_caso[["DATA D2", "DATA UTI"]].apply(func_3, axis=1)
events_controle["UTI ANTES D2"] = events_controle[["DATA D2", "DATA UTI"]].apply(func_3, axis=1)
# ----> counting
caso_obito_antesd2 = events_caso[events_caso["OBITO ANTES D2"]==True].shape[0]
controle_obito_antesd2 = events_controle[events_controle["OBITO ANTES D2"]==True].shape[0]
caso_hosp_antesd2 = events_caso[events_caso["HOSP ANTES D2"]==True].shape[0]
controle_hosp_antesd2 = events_controle[events_controle["HOSP ANTES D2"]==True].shape[0]
caso_uti_antesd2 = events_caso[events_caso["UTI ANTES D2"]==True].shape[0]
controle_uti_antesd2 = events_controle[events_controle["UTI ANTES D2"]==True].shape[0]
# --> Apos D2
func_4 = lambda x: True if pd.notna(x["DATA D2"]) and pd.notna(x["DATA OBITO COVID"]) and x["DATA OBITO COVID"]>=x["DATA D2"] else False
events_caso["OBITO APOS D2"] = events_caso[["DATA D2", "DATA OBITO COVID"]].apply(func_4, axis=1)
events_controle["OBITO APOS D2"] = events_controle[["DATA D2", "DATA OBITO COVID"]].apply(func_4, axis=1)
# --
func_4 = lambda x: True if pd.notna(x["DATA D2"]) and pd.notna(x["DATA HOSPITALIZACAO"]) and x["DATA HOSPITALIZACAO"]>=x["DATA D2"] else False
events_caso["HOSP APOS D2"] = events_caso[["DATA D2", "DATA HOSPITALIZACAO"]].apply(func_4, axis=1)
events_controle["HOSP APOS D2"] = events_controle[["DATA D2", "DATA HOSPITALIZACAO"]].apply(func_4, axis=1)
# --
func_4 = lambda x: True if pd.notna(x["DATA D2"]) and pd.notna(x["DATA UTI"]) and x["DATA UTI"]>=x["DATA D2"] else False
events_caso["UTI APOS D2"] = events_caso[["DATA D2", "DATA UTI"]].apply(func_4, axis=1)
events_controle["UTI APOS D2"] = events_controle[["DATA D2", "DATA UTI"]].apply(func_4, axis=1)
# ----> counting
caso_obito_aposd2 = events_caso[events_caso["OBITO APOS D2"]==True].shape[0]
controle_obito_aposd2 = events_controle[events_controle["OBITO APOS D2"]==True].shape[0]
caso_hosp_aposd2 = events_caso[events_caso["HOSP APOS D2"]==True].shape[0]
controle_hosp_aposd2 = events_controle[events_controle["HOSP APOS D2"]==True].shape[0]
caso_uti_aposd2 = events_caso[events_caso["UTI APOS D2"]==True].shape[0]
controle_uti_aposd2 = events_controle[events_controle["UTI APOS D2"]==True].shape[0]
# --> Censured
caso_survival_death = survival_df_death[survival_df_death['TIPO']=="CASO"]
controle_survival_death = survival_df_death[survival_df_death['TIPO']=="CONTROLE"]
caso_survival_hosp = survival_df_hosp[survival_df_hosp['TIPO']=="CASO"]
controle_survival_hosp = survival_df_hosp[survival_df_hosp['TIPO']=="CONTROLE"]
caso_survival_icu = survival_df_icu[survival_df_icu['TIPO']=="CASO"]
controle_survival_icu = survival_df_icu[survival_df_icu['TIPO']=="CONTROLE"]
censured_caso_death_d1 = caso_survival_death[caso_survival_death["E - D1 OBITO"]==True].shape[0]
censured_caso_death_d2 = caso_survival_death[caso_survival_death["E - D2 OBITO"]==True].shape[0]
censured_controle_death_d1 = controle_survival_death[controle_survival_death["E - D1 OBITO"]==True].shape[0]
censured_controle_death_d2 = controle_survival_death[controle_survival_death["E - D2 OBITO"]==True].shape[0]
# --
censured_caso_hosp_d1 = caso_survival_hosp[caso_survival_hosp["E - D1 HOSPITAL"]==True].shape[0]
censured_caso_hosp_d2 = caso_survival_hosp[caso_survival_hosp["E - D2 HOSPITAL"]==True].shape[0]
censured_controle_hosp_d1 = controle_survival_hosp[controle_survival_hosp["E - D1 HOSPITAL"]==True].shape[0]
censured_controle_hosp_d2 = controle_survival_hosp[controle_survival_hosp["E - D2 HOSPITAL"]==True].shape[0]
# --
censured_caso_icu_d1 = caso_survival_icu[caso_survival_icu["E - D1 UTI"]==True].shape[0]
censured_caso_icu_d2 = caso_survival_icu[caso_survival_icu["E - D2 UTI"]==True].shape[0]
censured_controle_icu_d1 = controle_survival_icu[controle_survival_icu["E - D1 UTI"]==True].shape[0]
censured_controle_icu_d2 = controle_survival_icu[controle_survival_icu["E - D2 UTI"]==True].shape[0]
data = pd.DataFrame({"CASO GERAL": [caso_obito_geral, caso_hosp_geral, caso_uti_geral],
"CONTROLE GERAL": [controle_obito_geral, controle_hosp_geral, controle_uti_geral],
"CASO ANTES D1": [caso_obito_antesd1, caso_hosp_antesd1, caso_uti_antesd1],
"CASO APOS D1": [caso_obito_aposd1, caso_hosp_aposd1, caso_uti_aposd1],
"CASO ANTES D2": [caso_obito_antesd2, caso_hosp_antesd2, caso_uti_antesd2],
"CASO APOS D2": [caso_obito_aposd2, caso_hosp_aposd2, caso_uti_aposd2],
"CONTROLE ANTES D1": [controle_obito_antesd1, controle_hosp_antesd1, controle_uti_antesd1],
"CONTROLE APOS D1": [controle_obito_aposd1, controle_hosp_aposd1, controle_uti_aposd1],
"CONTROLE ANTES D2": [controle_obito_antesd2, controle_hosp_antesd2, controle_uti_antesd2],
"CONTROLE APOS D2": [controle_obito_aposd2, controle_hosp_aposd2, controle_uti_aposd2],
"CASO NAO CENSURADO D1": [censured_caso_death_d1,censured_caso_hosp_d1,censured_caso_icu_d1],
"CONTROLE NAO CENSURADO D1": [censured_controle_death_d1,censured_controle_hosp_d1,censured_controle_icu_d1],
"CASO NAO CENSURADO D2": [censured_caso_death_d2,censured_caso_hosp_d2,censured_caso_icu_d2],
"CONTROLE NAO CENSURADO D2": [censured_controle_death_d2,censured_controle_hosp_d2,censured_controle_icu_d2]})
data.index = ["OBITO", "HOSPITAL", "UTI"]
# -
data
survival_df_death
surv = survival_death
surv_caso = surv[surv["TIPO"]=="CASO"]
surv_controle = surv[surv["TIPO"]=="CONTROLE"]
surv_caso["E - D1 HOSPITAL"].value_counts()
surv_controle["E - D1 HOSPITAL"].value_counts()
from lifelines import KaplanMeierFitter
df_c = surv_caso[pd.notna(surv_caso["t - D1 OBITO"])]
kmf_caso = KaplanMeierFitter(label="caso")
kmf_caso.fit(df_c["t - D1 OBITO"], df_c["E - D1 OBITO"])
df_c = surv_controle[pd.notna(surv_controle["t - D1 OBITO"])]
kmf_controle = KaplanMeierFitter(label="controle")
kmf_controle.fit(df_c["t - D1 OBITO"], df_c["E - D1 OBITO"])
surv_controle[pd.isna(surv_controle["t - D1 OBITO"])]
surv_caso[pd.isna(surv_caso["t - D1 OBITO"])]
events_df[events_df["CPF"]=="30147484391"]
kmf_caso.event_table["observed"].sum()
kmf_controle.event_table["observed"].sum()
kmf_caso.event_table
kmf_caso.survival_table_from_events
41+49+20+130
| notebooks/post_analysis_checking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.9 64-bit (''.air'': venv)'
# language: python
# name: python3
# ---
import asyncio
from typing import Any, Awaitable
async def get_result(awaitable: Awaitable) -> str:
try:
result = await awaitable
except Exception as e:
print("oops!", e)
return "no result :("
else:
return result
f = asyncio.Future()
loop = asyncio.get_event_loop()
loop.call_later(10, f.set_result, "this is my result")
loop.run_until_complete(get_result(f))
| experiments/asyncio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial for AQCEL
# ・Quantum Gate Pattern recognition (level = 1, 2, 3)
# <br>
# ・Circuit Optimization (level = 1, 2)
# <br>
# +
import sys
sys.path.append('..')
import warnings
warnings.simplefilter('ignore')
from qiskit import(QuantumCircuit, QuantumRegister, ClassicalRegister)
# -
# ## Example Circuit
# +
q = QuantumRegister(6, 'q')
c = ClassicalRegister( 6, 'c')
circ = QuantumCircuit(q,c)
circ.toffoli(0,4,1)
circ.toffoli(2,3,0)
circ.x(2)
circ.x(3)
circ.x(2)
circ.x(3)
circ.toffoli(2,3,0)
circ.toffoli(0,4,1)
circ.x(4)
circ.x(1)
circ.toffoli(0,4,1)
circ.toffoli(2,3,0)
circ.h(2)
circ.h(3)
circ.toffoli(2,3,0)
circ.toffoli(0,4,1)
circ.x(5)
circ.cry(1,2,0)
circ.toffoli(1,5,2)
circ.toffoli(0,4,1)
circ.toffoli(2,3,0)
circ.h(2)
circ.h(3)
circ.toffoli(2,3,0)
circ.toffoli(0,4,1)
circ.toffoli(3,4,1)
circ.toffoli(1,5,2)
circ.cry(1,2,0)
circ.toffoli(1,5,2)
circ.toffoli(3,4,1)
circ.x(2)
circ.ry(1,0)
circ.x(2)
circ.toffoli(3,4,1)
circ.toffoli(1,5,2)
circ.cry(2,2,0)
circ.toffoli(1,5,2)
circ.toffoli(3,4,1)
circ.h(1)
circ.cry(2,2,0)
circ.toffoli(1,5,2)
circ.cx(3,4)
circ.cx(1,5)
circ.cry(1,2,0)
circ.cx(1,5)
circ.cx(3,4)
circ.x(2)
circ.cx(1,0)
circ.toffoli(2,3,0)
circ.x(2)
circ.cx(3,4)
circ.cx(1,5)
circ.cry(2,2,0)
circ.cx(1,5)
circ.cx(3,4)
circ.toffoli(0,4,1)
circ.toffoli(2,3,0)
circ.x(2)
circ.x(3)
circ.x(2)
circ.x(3)
circ.toffoli(2,3,0)
circ.toffoli(0,4,1)
for i in range(6):
circ.measure(i,i)
circ.draw(output='mpl',fold=100)
# -
# ## Pattern recognition
from aqcel.optimization import pattern
example2 = pattern.recognition( circuit=circ, level=2, n_patterns=4, min_num_nodes=4, max_num_nodes=8, min_n_repetition=2)
circ_max, designated_gates2 = example2.quantum_pattern()
circ_max.draw(output='mpl')
# ## Circuit Optimization level1
from aqcel.optimization import slim
print(circ.depth(), ',', circ.__len__())
print('Gate counts:', circ.count_ops())
example1 = slim.circuit_optimization( circuit=circ, threshold=None)
circ_op = example1.slim()
print(circ_op.depth(), ',', circ_op.__len__())
print('Gate counts:', circ_op.count_ops())
circ_op.draw(output='mpl',fold=100)
# ## Circuit Optimization level 2
from aqcel.optimization import optimization
example3 = optimization.optimizer( circuit=circ, slim_level=2, pattern_level =2, n_patterns=4, min_num_nodes=4, max_num_nodes=8, min_n_repetition=2)
circ_op2 = example3.slimer()
circ_op2.draw(output='mpl',fold=30)
| demo/tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="-LZqwObchh5i" colab_type="code" colab={}
# #!pip install datadotworld
# #!pip install datadotworld[pandas]
# + id="v8joBzxOiQ0z" colab_type="code" colab={}
# #!dw configure
# + id="00u6-ELCgyqI" colab_type="code" colab={}
from google.colab import drive
import pandas as pd
import numpy as np
import datadotworld as dw
# + id="uQtPhZ2DieFx" colab_type="code" colab={}
#drive.mount('/content/drive')
# + id="b1KN-CJ3irxX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="50418198-ebea-42c6-f9a1-9b964d89f58b" executionInfo={"status": "ok", "timestamp": 1581530186386, "user_tz": -60, "elapsed": 2693, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
# ls
# + id="Op9UNsHPi3WF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c28f78b2-41a1-476d-b8f2-b5a00da7b364" executionInfo={"status": "ok", "timestamp": 1581530219092, "user_tz": -60, "elapsed": 583, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
# cd "drive/My Drive/Colab Notebooks/dw_matrix"
# + id="VT-qRWg5i_9-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="371d2b64-5d34-4a40-85fd-2232b76c2b94" executionInfo={"status": "ok", "timestamp": 1581530230407, "user_tz": -60, "elapsed": 2268, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
# ls
# + id="8xzVdOuVjCMP" colab_type="code" colab={}
# !mkdir data
# + id="j371oYjNjHn-" colab_type="code" colab={}
# !echo "data" > .gitignore
# + id="htR9wqZ6jXFM" colab_type="code" colab={}
# !git add .gitignore
# + id="lgrX0Kx9jbTJ" colab_type="code" colab={}
data = dw.load_dataset('datafiniti/mens-shoe-prices')
# + id="7HrdELw2jrRc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 124} outputId="e00ec709-198f-42b2-e154-f22cbdb94d13" executionInfo={"status": "ok", "timestamp": 1581530436680, "user_tz": -60, "elapsed": 1799, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
df = data.dataframes['7004_1']
df.shape
# + id="AOk1I__nj0yK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 695} outputId="8f00334a-614d-43cc-c1f0-07f46e2938d1" executionInfo={"status": "ok", "timestamp": 1581530468607, "user_tz": -60, "elapsed": 538, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
df.sample(5)
# + id="XL2mdoFrj832" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 225} outputId="3321d758-784e-446c-a2ef-4e9523ce27c9" executionInfo={"status": "ok", "timestamp": 1581530486216, "user_tz": -60, "elapsed": 639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
df.columns
# + id="SxNKUN7LkBHx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="2ca827b4-12ef-4231-a68e-9ac35315ebf4" executionInfo={"status": "ok", "timestamp": 1581530531955, "user_tz": -60, "elapsed": 656, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
df.prices_currency.unique()
# + id="wMu79u7GkMUN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 260} outputId="aa21665e-e782-4326-f5fe-783a6c5f779b" executionInfo={"status": "ok", "timestamp": 1581530558885, "user_tz": -60, "elapsed": 574, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
df.prices_currency.value_counts()
# + id="6Ocv5T0ikS68" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3440c068-55ea-40d0-a6b7-5f4d8bf48708" executionInfo={"status": "ok", "timestamp": 1581530705140, "user_tz": -60, "elapsed": 561, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
df_usd = df[df.prices_currency == 'USD'].copy()
df_usd.shape
# + id="ZLqZKOkyk2ne" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 121} outputId="5d679282-33cc-4221-d310-70c5693ba889" executionInfo={"status": "ok", "timestamp": 1581530766914, "user_tz": -60, "elapsed": 599, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
df.prices_amountmin.head()
# + id="RTnLINnSlFso" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="3839ef94-527c-40e5-b8c7-1afa96c60320" executionInfo={"status": "ok", "timestamp": 1581530967488, "user_tz": -60, "elapsed": 653, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)
df_usd['prices_amountmin'].hist()
# + id="GbEVtoU2lpcW" colab_type="code" colab={}
filter_max = np.percentile(df_usd['prices_amountmin'], 99)
# + id="_KODRge3mIIe" colab_type="code" colab={}
df_usd_filter = df_usd[df_usd['prices_amountmin'] < filter_max]
# + id="T9_UDclzmmOL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="c472e29c-63ce-4527-c8e5-9ef8d7986933" executionInfo={"status": "ok", "timestamp": 1581531227391, "user_tz": -60, "elapsed": 1095, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
df_usd_filter.prices_amountmin.hist(bins=100)
# + id="SbvxgCXVmoie" colab_type="code" colab={}
# !git add matrix_one/Day3.ipynb
# + id="8x-pYDzYnv0P" colab_type="code" colab={}
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "<NAME>"
# + id="AW-YXLhXnbB6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="e859085f-4562-407c-baa9-4653c28c3f66" executionInfo={"status": "ok", "timestamp": 1581531524477, "user_tz": -60, "elapsed": 6529, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
# !git commit -m "Read Men's Shoe Prices dataset from data.world"
# + id="SY5brjWwnp-1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 156} outputId="3ad30928-e3f8-43ff-fd0e-d287b412f9f0" executionInfo={"status": "ok", "timestamp": 1581531666187, "user_tz": -60, "elapsed": 6246, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAShjBkPlFSui5i_WlKWOYkuI-ddG7Dk8mLUEoNNg=s64", "userId": "13976193166602868638"}}
# !git push -u origin master
# + id="TiBg3OEmof23" colab_type="code" colab={}
| matrix_one/Day3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="V8-yl-s-WKMG"
# # EfficientDet Tutorial: inference, eval, and training
#
#
#
# <table align="left"><td>
# <a target="_blank" href="https://github.com/google/automl/blob/master/efficientdet/tf2/tutorial.ipynb">
# <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on github
# </a>
# </td><td>
# <a target="_blank" href="https://colab.sandbox.google.com/github/google/automl/blob/master/efficientdet/tf2/tutorial.ipynb">
# <img width=32px src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td></table>
# + [markdown] id="muwOCNHaq85j"
# # 0. Install and view graph.
# + [markdown] id="dggLVarNxxvC"
# ## 0.1 Install package and download source code/image.
#
#
# + id="hGL97-GXjSUw"
# %%capture
#@title
import os
import sys
import tensorflow as tf
# Download source code.
if "efficientdet" not in os.getcwd():
# !git clone --depth 1 https://github.com/google/automl
os.chdir('automl/efficientdet')
sys.path.append('.')
# !pip install -r requirements.txt
# !pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
else:
# !git pull
# + id="Tow-ic7H3d7i"
MODEL = 'efficientdet-lite0' #@param
def download(m):
if m not in os.listdir():
if m.find('lite'):
# !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/{m}.tgz
# !tar zxf {m}.tgz
else:
# !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/{m}.tar.gz
# !tar zxf {m}.tar.gz
ckpt_path = os.path.join(os.getcwd(), m)
return ckpt_path
# Download checkpoint.
ckpt_path = download(MODEL)
print('Use model in {}'.format(ckpt_path))
# Prepare image and visualization settings.
image_url = 'https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png'#@param
image_name = 'img.png' #@param
# !wget {image_url} -O img.png
import os
img_path = os.path.join(os.getcwd(), 'img.png')
min_score_thresh = 0.35 #@param
max_boxes_to_draw = 200 #@param
line_thickness = 2#@param
import PIL
# Get the largest of height/width and round to 128.
image_size = max(PIL.Image.open(img_path).size)
# + [markdown] id="GvdjcYpUVuQ5"
# ## 0.2 View graph in TensorBoard
# + id="U2oz3r1LUDzr"
# !python -m model_inspect --model_name={MODEL} --logdir=logs &> /dev/null
# %load_ext tensorboard
# %tensorboard --logdir logs
# + [markdown] id="vZk2dwOxrGhY"
# # 1. inference
# + [markdown] id="_VaF_j7jdVCK"
# ## 1.1 Benchmark network latency
# There are two types of latency:
# network latency and end-to-end latency.
#
# * network latency: from the first conv op to the network class and box prediction.
# * end-to-end latency: from image preprocessing, network, to the final postprocessing to generate a annotated new image.
#
# + id="R_3gL01UbDLH"
# benchmark network latency
# !python -m tf2.inspector --mode=benchmark --model_name={MODEL} --hparams="mixed_precision=true" --only_network
# With colab + Tesla T4 GPU, here are the batch size 1 latency summary:
# D0 (AP=33.5): 14.9ms, FPS = 67.2 (batch size 8 FPS=)
# D1 (AP=39.6): 22.7ms, FPS = 44.1 (batch size 8 FPS=)
# D2 (AP=43.0): 27.9ms, FPS = 35.8 (batch size 8 FPS=)
# D3 (AP=45.8): 48.1ms, FPS = 20.8 (batch size 8 FPS=)
# D4 (AP=49.4): 81.9ms, FPS = 12.2 (batch size 8 FPS=)
# + [markdown] id="VW95IodKovEu"
# ## 1.2 Benchmark end-to-end latency
# + id="NSf6SrZcdavN"
# Benchmark end-to-end latency (: preprocess + network + posprocess).
#
# With colab + Tesla T4 GPU, here are the batch size 1 latency summary:
# D0 (AP=33.5): 22.7ms, FPS = 43.1 (batch size 4, FPS=)
# D1 (AP=39.6): 34.3ms, FPS = 29.2 (batch size 4, FPS=)
# D2 (AP=43.0): 42.5ms, FPS = 23.5 (batch size 4, FPS=)
# D3 (AP=45.8): 64.8ms, FPS = 15.4 (batch size 4, FPS=)
# D4 (AP=49.4): 93.7ms, FPS = 10.7 (batch size 4, FPS=)
batch_size = 1 # @param
saved_model_dir = 'savedmodel'
# !rm -rf {saved_model_dir}
# !python -m tf2.inspector --mode=export --model_name={MODEL} \
# --model_dir={ckpt_path} --saved_model_dir={saved_model_dir} \
# --batch_size={batch_size} --hparams="mixed_precision=true"
# !python -m tf2.inspector --mode=benchmark --model_name={MODEL} \
# --saved_model_dir={saved_model_dir} \
# --batch_size=1 --hparams="mixed_precision=true" --input_image=testdata/img1.jpg
# + [markdown] id="jGKs3w2_ZXnu"
# ## 1.3 Inference images.
#
# ---
#
#
# + id="tlh_S6M9ahe5"
# first export a saved model.
saved_model_dir = 'savedmodel'
# !rm -rf {saved_model_dir}
# !python -m tf2.inspector --mode=export --model_name={MODEL} \
# --model_dir={ckpt_path} --saved_model_dir={saved_model_dir}
# Then run saved_model_infer to do inference.
# Notably: batch_size, image_size must be the same as when it is exported.
serve_image_out = 'serve_image_out'
# !mkdir {serve_image_out}
# !python -m tf2.inspector --mode=infer \
# --saved_model_dir={saved_model_dir} \
# --model_name={MODEL} --input_image=testdata/img1.jpg \
# --output_image_dir={serve_image_out}
# + id="1q2x8s8GpUJz"
from IPython import display
display.display(display.Image(os.path.join(serve_image_out, '0.jpg')))
# + id="fHU46tfckaZo"
# In case you need to specify different image size or batch size or #boxes, then
# you need to export a new saved model and run the inferernce.
serve_image_out = 'serve_image_out'
# !mkdir {serve_image_out}
saved_model_dir = 'savedmodel'
# !rm -rf {saved_model_dir}
# Step 1: export model
# !python -m tf2.inspector --mode=export \
# --model_name={MODEL} --model_dir={MODEL} \
# --hparams="image_size=1920x1280" --saved_model_dir={saved_model_dir}
# Step 2: do inference with saved model.
# !python -m tf2.inspector --mode=infer \
# --model_name={MODEL} --saved_model_dir={saved_model_dir} \
# --input_image=img.png --output_image_dir={serve_image_out} \
#
from IPython import display
display.display(display.Image(os.path.join(serve_image_out, '0.jpg')))
# + [markdown] id="Vxm-kvfuAZne"
# ## 1.4 Inference video
# + id="3Pdnd1kQAgKY"
# step 0: download video
video_url = 'https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/data/video480p.mov' # @param
# !wget {video_url} -O input.mov
# Step 1: export model
saved_model_dir = 'savedmodel'
# !rm -rf {saved_model_dir}
# !python -m tf2.inspector --mode=export \
# --model_name={MODEL} --model_dir={MODEL} \
# --saved_model_dir={saved_model_dir} --hparams="mixed_precision=true"
# Step 2: do inference with saved model using saved_model_video
# !python -m tf2.inspector --mode=video \
# --model_name={MODEL} \
# --saved_model_dir={saved_model_dir} --hparams="mixed_precision=true" \
# --input_video=input.mov --output_video=output.mov
# Then you can view the output.mov
# + [markdown] id="M_r8Ja9aEqBP"
# # 2. TFlite
# + [markdown] id="-eDhDTLQL6gx"
# ## 2.1 COCO evaluation on validation set.
# + id="qZK6rk8OMBRZ"
if 'val2017' not in os.listdir():
# !wget http://images.cocodataset.org/zips/val2017.zip
# !wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
# !unzip -q val2017.zip
# !unzip annotations_trainval2017.zip
# !mkdir tfrecord
# !PYTHONPATH=".:$PYTHONPATH" python dataset/create_coco_tfrecord.py \
# --image_dir=val2017 \
# --caption_annotations_file=annotations/captions_val2017.json \
# --output_file_prefix=tfrecord/val \
# --num_shards=32
# + [markdown] id="Cy6doZAcMZnX"
# ## 2.2 TFlite export INT8 model
# + id="VgpOjjokE6_o"
# In case you need to specify different image size or batch size or #boxes, then
# you need to export a new saved model and run the inferernce.
serve_image_out = 'serve_image_out'
# !mkdir {serve_image_out}
saved_model_dir = 'savedmodel'
# !rm -rf {saved_model_dir}
# # Step 1: export model
# !python -m tf2.inspector --mode=export --file_pattern=tfrecord/*.tfrecord \
# --model_name={MODEL} --model_dir={MODEL} --num_calibration_steps=100 \
# --saved_model_dir={saved_model_dir} --use_xla --tflite=INT8
# Step 2: do inference with saved model.
# !python -m tf2.inspector --mode=infer --use_xla \
# --model_name={MODEL} --saved_model_dir={saved_model_dir}/int8.tflite \
# --input_image=testdata/img1.jpg --output_image_dir={serve_image_out}
from IPython import display
display.display(display.Image(os.path.join(serve_image_out, '0.jpg')))
# + id="j7Xz1FQ2PB0Z"
# Evalute on validation set (takes about 10 mins for efficientdet-d0)
# !python -m tf2.eval_tflite \
# --model_name={MODEL} --tflite_path={saved_model_dir}/int8.tflite \
# --val_file_pattern=tfrecord/val* \
# --val_json_file=annotations/instances_val2017.json --eval_samples=100
# + [markdown] id="xY14TFOWNNFl"
# ## 2.3 Compile EdgeTPU model (Optional)
# + id="jGm0q7cSNSbs"
# install edgetpu compiler
# !curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
# !echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list
# !sudo apt-get update
# !sudo apt-get install edgetpu-compiler
# + [markdown] id="z8FGuSJuOfer"
# The EdgeTPU has 8MB of SRAM for caching model paramaters ([more info](https://coral.ai/docs/edgetpu/compiler/#parameter-data-caching)). This means that for models that are larger than 8MB, inference time will be increased in order to transfer over model paramaters. One way to avoid this is [Model Pipelining](https://coral.ai/docs/edgetpu/pipeline/) - splitting the model into segments that can have a dedicated EdgeTPU. This can significantly improve latency.
#
# The below table can be used as a reference for the number of Edge TPUs to use - the larger models will not compile for a single TPU as the intermediate tensors can't fit in on-chip memory.
#
# | Model architecture | Minimum TPUs | Recommended TPUs
# |--------------------|-------|-------|
# | EfficientDet-Lite0 | 1 | 1 |
# | EfficientDet-Lite1 | 1 | 1 |
# | EfficientDet-Lite2 | 1 | 2 |
# | EfficientDet-Lite3 | 2 | 2 |
# | EfficientDet-Lite4 | 2 | 3 |
# + id="WpSryQzZODse"
NUMBER_OF_TPUS = 1
# !edgetpu_compiler {saved_model_dir}/int8.tflite --num_segments=$NUMBER_OF_TPUS
# + [markdown] id="RW26DwfirQQN"
# # 3. COCO evaluation
# + [markdown] id="cfn_tRFOWKMO"
# ## 3.1 COCO evaluation on validation set.
# + id="24l4uI15MJN6"
if 'val2017' not in os.listdir():
# !wget http://images.cocodataset.org/zips/val2017.zip
# !wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
# !unzip -q val2017.zip
# !unzip annotations_trainval2017.zip
# !mkdir tfrecord
# !python -m dataset.create_coco_tfrecord \
# --image_dir=val2017 \
# --caption_annotations_file=annotations/captions_val2017.json \
# --output_file_prefix=tfrecord/val \
# --num_shards=32
# + id="eLHZUY3jQpZr"
# Evalute on validation set (takes about 10 mins for efficientdet-d0)
# !python -m tf2.eval \
# --model_name={MODEL} --model_dir={ckpt_path} \
# --val_file_pattern=tfrecord/val* \
# --val_json_file=annotations/instances_val2017.json
# + [markdown] id="RW90fiMiyg4n"
# # 4. Training EfficientDets on PASCAL.
# + [markdown] id="C98Ye0MEyuKD"
# ## 4.1 Prepare data
# + id="6PC6QrMlylOF"
# Get pascal voc 2012 trainval data
import os
if 'VOCdevkit' not in os.listdir():
# !wget http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar
# !tar xf VOCtrainval_06-Nov-2007.tar
# !mkdir tfrecord
# !python -m dataset.create_pascal_tfrecord \
# --data_dir=VOCdevkit --year=VOC2007 --output_path=tfrecord/pascal
# Pascal has 5717 train images with 100 shards epoch, here we use a single shard
# for demo, but users should use all shards pascal-*-of-00100.tfrecord.
file_pattern = 'pascal-00000-of-00100.tfrecord' # @param
images_per_epoch = 57 * len(tf.io.gfile.glob('tfrecord/' + file_pattern))
images_per_epoch = images_per_epoch // 8 * 8 # round to 64.
print('images_per_epoch = {}'.format(images_per_epoch))
# + [markdown] id="ZcxDDCCW0ndv"
# ## 4.2 Train Pascal VOC 2007 from ImageNet checkpoint for Backbone.
# + id="SHPgm9Q13X-l"
# Train efficientdet from scratch with backbone checkpoint.
backbone_name = {
'efficientdet-d0': 'efficientnet-b0',
'efficientdet-d1': 'efficientnet-b1',
'efficientdet-d2': 'efficientnet-b2',
'efficientdet-d3': 'efficientnet-b3',
'efficientdet-d4': 'efficientnet-b4',
'efficientdet-d5': 'efficientnet-b5',
'efficientdet-d6': 'efficientnet-b6',
'efficientdet-d7': 'efficientnet-b6',
'efficientdet-lite0': 'efficientnet-lite0',
'efficientdet-lite1': 'efficientnet-lite1',
'efficientdet-lite2': 'efficientnet-lite2',
'efficientdet-lite3': 'efficientnet-lite3',
'efficientdet-lite3x': 'efficientnet-lite3',
'efficientdet-lite4': 'efficientnet-lite4',
}[MODEL]
# generating train tfrecord is large, so we skip the execution here.
import os
if backbone_name not in os.listdir():
if backbone_name.find('lite'):
# !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/{backbone_name}.tar.gz
else:
# !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/{backbone_name}.tar.gz
# !tar xf {backbone_name}.tar.gz
# !mkdir /tmp/model_dir
# key option: use --backbone_ckpt rather than --ckpt.
# Don't use ema since we only train a few steps.
# !python -m tf2.train --mode=traineval \
# --train_file_pattern=tfrecord/{file_pattern} \
# --val_file_pattern=tfrecord/{file_pattern} \
# --model_name={MODEL} \
# --model_dir=/tmp/model_dir/{MODEL}-scratch \
# --pretrained_ckpt={backbone_name} \
# --batch_size=16 \
# --eval_samples={images_per_epoch} \
# --num_examples_per_epoch={images_per_epoch} --num_epochs=1 \
# --hparams="num_classes=20,moving_average_decay=0,mixed_precision=true"
# + [markdown] id="SKHu-3lBwTiM"
# ## 4.3 Train Pascal VOC 2007 from COCO checkpoint for the whole net.
# + id="SD59rsZJc1WW"
# generating train tfrecord is large, so we skip the execution here.
import os
if MODEL not in os.listdir():
download(MODEL)
# !mkdir /tmp/model_dir/
# key option: use --ckpt rather than --backbone_ckpt.
# !python -m tf2.train --mode=traineval \
# --train_file_pattern=tfrecord/{file_pattern} \
# --val_file_pattern=tfrecord/{file_pattern} \
# --model_name={MODEL} \
# --model_dir=/tmp/model_dir/{MODEL}-finetune \
# --pretrained_ckpt={MODEL} \
# --batch_size=16 \
# --eval_samples={images_per_epoch} \
# --num_examples_per_epoch={images_per_epoch} --num_epochs=1 \
# --hparams="num_classes=20,moving_average_decay=0,mixed_precision=true"
# + [markdown] id="QcBGPMCXRC8q"
# ## 4.4 View tensorboard for loss and accuracy.
#
# + id="Vrkty06SRD0k"
# %load_ext tensorboard
# %tensorboard --logdir /tmp/model_dir/
# Notably, this is just a demo with almost zero accuracy due to very limited
# training steps, but we can see finetuning has smaller loss than training
# from scratch at the begining.
# + [markdown] id="7y4kKRr5ChwT"
# ## 5. Export to onnx
#
# + id="eh6pRLjOCsQo"
# !pip install tf2onnx
# + id="01GwBpcJDXVC"
# !python -m tf2.inspector --mode=export --model_name={MODEL} --model_dir={MODEL} --saved_model_dir={saved_model_dir} --hparams="nms_configs.method='hard', nms_configs.iou_thresh=0.5, nms_configs.sigma=0.0"
# + id="jhIxTWHLCvgO"
# !python -m tf2onnx.convert --saved-model={saved_model_dir} --output={saved_model_dir}/model.onnx --opset=11
| efficientdet/tf2/tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Importing packages
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
import os
import json
import re
import cv2
import h5py
import sys
import copy
import random
from collections import deque
tf.__version__
## defining different variables
data_dir = '../data/NIST19/'
img_size = 128
flatten_image_size = img_size**2
num_channels = 3
img_shape = (img_size,img_size,num_channels)
num_classes = 62 # 26 lower case alphabets, 26 upper case alphabets, 10 digits
# +
# making the map for label_cls<>class_name conversion
label_cls_name_map = {}
label_name_cls_map = {}
dirs = os.listdir(data_dir)
#print dirs
label_cls = 0
for dir_ in dirs:
try:
if '.DS_Store' in dir_:
continue
class_name = chr(int(dir_,16))
label_cls_name_map[label_cls] = class_name
label_name_cls_map[class_name] = label_cls
label_cls+=1
except Exception, err:
print Exception, err
label_cls_name_json = json.dumps(label_cls_name_map, indent=4, separators=(',',':'))
with open('label_cls_name.json', 'w') as f:
f.write(label_cls_name_json)
print "Class and label map generated!!!"
# +
# reading the maps
label_cls_name_map = {}
label_name_cls_map = {}
with open('label_cls_name.json', 'r') as f:
label_cls_name_map = json.loads(f.read())
for k,v in label_cls_name_map.iteritems():
label_name_cls_map[v] = k
# +
# queue of files for each class. This will be picked up by the
class_queue_map = {}
training_perc = 0.9
for dir_ in dirs:
try:
if '.DS_Store' in dir_:
continue
class_name = chr(int(dir_,16))
label_cls = label_name_cls_map[class_name]
dir_path = os.path.join(os.path.join(data_dir, dir_), 'train_%s' % dir_)
class_queue_map[label_cls] = deque(os.listdir(dir_path))
except Exception, err:
print Exception, err
train_class_queue_map = {}
test_class_queue_map = {}
for key_, queue_ in class_queue_map.iteritems():
train_class_queue_map[key_] = deque(list(queue_)[:int(len(queue_)*training_perc)])
test_class_queue_map[key_] = deque(list(queue_)[int(len(queue_)*training_perc):])
# -
def batch_generator(train_test_class_queue_map, each_class_len=10):
print 'Generating the batch!!!'
class_queue_map = copy.deepcopy(train_test_class_queue_map)
total_images = 0
for key_, queue_ in class_queue_map.iteritems():
total_images+=len(queue_)
print 'Total Images: ', total_images
batch_size = len(class_queue_map)*each_class_len
for iter_ in range(total_images/batch_size):
batch_images = []
batch_labels = []
for key_, queue_ in class_queue_map.iteritems():
try:
for i in range(each_class_len):
class_name = label_cls_name_map[key_]
hex_name = class_name.encode('hex')
full_file_path = os.path.join(os.path.join(os.path.join(data_dir, hex_name), 'train_%s' % hex_name),
queue_.popleft())
img = cv2.imread(full_file_path)
res_img = img
batch_images.append(img)
categorical_label = [0]*num_classes
categorical_label[key_] = 1
batch_labels.append(categorical_label)
except:
continue
data = zip(batch_images, batch_labels)
np.random.shuffle(data)
batch_images, batch_labels = zip(*data)
yield np.array(batch_images), np.array(batch_labels)
sample_gen = batch_generator(test_class_queue_map, 10)
sample_images, sample_labels = next(sample_gen)
sample_images.shape
def plot_images(images, cls_true, cls_pred=None, smooth=True):
assert len(images) == len(cls_true) == 9
# create the figure with subplots
fig, axes = plt.subplots(3,3, figsize=(10, 10))
wspace=2
if cls_pred is None:
hspace=0.1
else:
hspace=0.3
fig.subplots_adjust(hspace=hspace, wspace=wspace)
for i,ax in enumerate(axes.flat):
if smooth:
interpolation = 'spline16'
else:
interpolation = 'nearest'
ax.imshow(images[i], interpolation=interpolation)
cls_true_name = label_cls_name_map[np.argmax(cls_true[i])]
if cls_pred is None:
xlabel = "True: {0}".format(cls_true_name)
else:
# Name of the predicted class.
cls_pred_name = label_cls_name_map[np.argmax(cls_pred[i])]
xlabel = "True: {0}\nPred: {1}".format(cls_true_name, cls_pred_name)
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
plt.show()
plot_images(sample_images[9:18,:], cls_true=sample_labels[9:18], smooth=True)
# ## Tensorflow based model
# +
# CNN MODEL
tf.reset_default_graph()
x = tf.placeholder(tf.float32, shape=[None, img_size, img_size, num_channels], name='x')
conv1 = tf.layers.conv2d(inputs=x, name='layer_conv1', padding='same', filters=32, kernel_size=5,
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=2, strides=2)
conv2 = tf.layers.conv2d(inputs=pool1, name='layer_conv2', padding='same', filters=64, kernel_size=5,
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=2, strides=2)
layer3 = tf.contrib.layers.flatten(pool2)
fc1 = tf.layers.dense(inputs=layer3, name='layer_fc1', units=512, activation=tf.nn.relu)
fc2 = tf.layers.dense(inputs=fc1, name='layer_fc2', units=256, activation=tf.nn.relu)
net = tf.layers.dense(inputs=fc2, name='layer_fc_out', units=num_classes, activation=None)
logits = net
y_pred = tf.nn.softmax(logits=logits)
y_pred_cls = tf.argmax(y_pred, dimension=1)
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], name='y_true')
y_true_cls = tf.argmax(y_true, axis=1)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=logits)
loss = tf.reduce_mean(cross_entropy)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(loss)
# -
session = tf.Session()
session.run(tf.global_variables_initializer())
def optimize(train_class_queue_map, epochs=1, each_class_len=10):
# Ensure we update the global variable rather than a local copy.
for epoch_ in range(epochs):
print 'Epoch: ', epoch_+1
avg_acc = []
train_gen = batch_generator(train_class_queue_map, each_class_len)
i=0
y_pred_list = []
y_true_list = []
for x_batch, y_true_batch in train_gen:
# x_batch now holds a batch of images and
# y_true_batch are the true labels for those images.
# Put the batch into a dict with the proper names
feed_dict_train = {x: x_batch, y_true: y_true_batch}
session.run(optimizer, feed_dict=feed_dict_train)
acc = session.run(accuracy, feed_dict=feed_dict_train)
avg_acc.append(acc)
y_pred_list.extend(session.run(y_pred_cls, feed_dict=feed_dict_train))
y_true_list.extend(session.run(y_true_cls, feed_dict=feed_dict_train))
if i % 500 == 0:
# Message for printing every 10th batch.
msg = "Optimization batch: {0:>6}, Training Accuracy: {1:>6.1%}"
print(msg.format(i + 1, acc))
i+=1
# avg accuracy for 1 epoch
correct_prediction = (np.array(y_pred_list) == np.array(y_true_list))
correct_sum = correct_prediction.sum()
acc_ = float(correct_sum) / len(y_true_list)
print 'Average accuracy for the Epoch: ', acc_
# +
#optimize(train_class_queue_map,epochs=5, each_class_len=5)
# +
# saver = tf.train.Saver()
# save_dir = 'ocr_checkpoints/'
# if not os.path.exists(save_dir):
# os.makedirs(save_dir)
# save_path = os.path.join(save_dir, 'best_validation')
# saver.save(sess=session, save_path=save_path)
# -
saver= tf.train.Saver()
saver.restore(sess=session, save_path='../dl_models/ocr_checkpoints/best_validation')
def predict(test_class_queue_map, inception=False, transfer_layer=False, limit=None):
'''
takes the list of np array of images as input and predicts the class.
'''
y_pred_cls_list = []
y_true_cls_list = []
gen_ = batch_generator(test_class_queue_map, 1)
for j, (x_batch, y_true_batch) in enumerate(gen_):
if limit:
if j == limit:
break
for i, image_ in enumerate(x_batch):
shape_ = image_.shape
if inception==True and transfer_layer==True:
pass
#feed_dict = {x: transfer_values_test[i].reshape(1,-1)}
#y_pred_ = session.run(y_pred, feed_dict=feed_dict)
#y_pred_ = np.squeeze(y_pred_)
#y_pred_cls_ = np.argmax(y_pred_)
#y_pred_list.append(y_pred_cls_)
#y_pred_cls_list.append(y_pred_cls_)
elif inception==True and transfer_layer==False:
pass
#feed_dict = {tensor_name_input_image: image_}
#y_pred_ = session.run(y_pred, feed_dict=feed_dict)
#y_pred_ = np.squeeze(y_pred_)
#tmp_cls = np.argmax(y_pred_)
#tmp_name = uid_name[cls_uid[tmp_cls]].split(',')[0]
#tmp_name = re.sub(' ', '_', tmp_name)
#print tmp_name
#try:
# y_pred_cls_ = label_name_cls_map[tmp_name]
#except:
# y_pred_cls_ = 0
#y_pred_list.append(y_pred_cls_)
#y_pred_cls_list.append(y_pred_cls_)
else:
feed_dict = {x: np.reshape(image_, (1, shape_[0], shape_[1], shape_[2]))}
y_pred_ = session.run(y_pred, feed_dict=feed_dict)
y_pred_ = np.squeeze(y_pred_)
y_pred_cls_ = np.argmax(y_pred_)
y_pred_cls_list.append(y_pred_cls_)
y_true_cls_list.append(np.argmax(y_true_batch[i]))
return np.array(y_true_cls_list), np.array(y_pred_cls_list)
# +
test_true_cls, test_pred_cls = predict(test_class_queue_map, inception=False, transfer_layer=False)
correct_prediction = (test_true_cls == test_pred_cls)
correct_sum = correct_prediction.sum()
acc = float(correct_sum) / len(test_true_cls)
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, correct_sum, len(test_true_cls)))
# -
train_true_cls, train_pred_cls = predict(train_class_queue_map, inception=False, transfer_layer=False)
correct_prediction = (traiiiin_true_cls == train_pred_cls)
correct_sum = correct_prediction.sum()
acc = float(correct_sum) / len(train_true_cls)
msg = "Accuracy on Test-Set: {0:.1%} ({1} / {2})"
print(msg.format(acc, correct_sum, len(train_true_cls)))
| ocr_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Detect Bash command
# +
# forces CPU usage
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# in case of problem with OpenMP runtime
import os
os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'
# +
import time
t_start = time.time()
# !python3 -Wignore glomerulus.py detect \
# --dataset='/home/Fred/Data/glomerulus/run' \
# --subset='Elisabeth' \
# --weights='../logs/glomerulus20200616T1225/mask_rcnn_glomerulus_0040.h5' \
# --logs='../logs'
t_total = time.time() - t_start
print("Total time: {:.1f} seconds".format(t_total))
# +
## LOOP OVER ALL MODEL TO SEE EVOLUTION OVER AN IMAGE
import time
import os
t_start = time.time()
for i in rev:
model_path = '../logs/glomerulus20200616T1225/mask_rcnn_glomerulus_00'+str(i+1)+'.h5'
os.environ['MODEL']=model_path
# !python3 glomerulus.py detect \
# --dataset='/home/Fred/Data/glomerulus' \
# --subset='evol' \
# --weights=$MODEL \
# --logs='../logs'
t_total = time.time() - t_start
print("Total time: {:.1f} seconds".format(t_total))
| Code/5. detect_glomerulus_bash.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Spell Checker Notebook
# Builds a frequencty dictionary tailored to corpus.
# Builds a string to string lookup dictionary to speed up spellchecking.
#
# Disclaimer:
# Spell corrections are done on a best effort basis.
# Since words are taken out of context, corrections may be wrong.
# Warning, some of the corrections found are explicit words.
# These corrections are not intended to offend the reader.
# Please pardon mistaken corrections as consequence of algorithmic generalization.
import os
import sys
import regex as re
import pandas as pd
import numpy as np
import nltk
from collections import Counter
from collections import defaultdict
import pickle
from spellhelper import Spellhelper
import time
# #### Text Cleaning Routines
def CleanNumeric(token):
'''
input numeric token
clean the token
return simplified token list
'''
n_map = {ord(c): " " for c in "0123456789"}
alphas = token.translate(n_map)
toks = ['number'] + nltk.word_tokenize(alphas)
return toks
def CleanSymbol(token):
'''
input symbolic token
clean the token
return simplified string
'''
#simplify paths
if (token.count('/')>2)|(token[:2]=='//')|(token[:4]=='www.'):
clean_tok = ['path']
else:
# remove special characters
sc_map = {ord(c): " " for c in \
"!@#$£¥¢©®™§†±≠%^&*()[]{};:,‚./<>?\|¨´'`~-=_+¬∞µ•√∏∫°áàâæëéîñöōüû"}
clear_sym = token.translate(sc_map)
ctoks = nltk.word_tokenize(clear_sym)
clean_tok = []
for tok in ctoks:
if not bool(re.search(r'\d', tok)):
clean_tok.append(tok)
# process numericals
else:
clean_tok += CleanNumeric(tok)
return clean_tok
def PreClean(token):
'''
sort token
preclean by type
return cleaned token list
'''
if bool(re.match("^[a-zA-Z0-9]*$", token)):
if bool(re.search(r'\d', token)):
clean = CleanNumeric(token)
else:
clean = [token]
else:
clean = CleanSymbol(token)
return clean
# #### Data Loading Routines
def LoadCorpus(file):
'''
Load paragraphs into dataframe
Convert paragraphs to tokens
Return counter and tokens
'''
# Load Corpus of Paragraphs
df_mess = pd.read_csv(file)
# put all messages into a single string
TEXT=""
for i,nrows in df_mess.iterrows():
TEXT += (nrows['message'])
# tokenize the text string
raw_tokens = nltk.word_tokenize(TEXT.lower())
# pre-clean the tokens
pc_tokens = []
for token in raw_tokens:
pc_tokens += PreClean(token)
#build counter of corpus tokens
C = nltk.FreqDist(pc_tokens)
return C, pc_tokens
# #### Processing Routines
def BuildSCdict(speller, C_corpus):
'''
import speller function and Corpus counter
build a spell correction dictionary
return spell correction dictionary and rejects list
'''
scdict = defaultdict(str)
sp_rejects = []
for token in sorted(C_corpus.keys()):
if token in speller.counter:
scdict[token] = token
else:
sc = speller.spellcheck(token)
if sc != token:
scdict[token] = sc
print(token, ' -> ', sc)
else:
sp_rejects.append(token)
return scdict, sp_rejects
def ApplyCorr(string_dict, tokens):
'''
import string dictionary
import list of tokens to correct
compiled corrected version
render from token list to string and back
return list of corrected tokens
'''
# input string corrections into list
corr_strings = []
for token in tokens:
if token in string_dict:
corr_strings.append(string_dict[token])
else:
corr_strings.append(token)
# convert list to string then into tokens
big_string = ' '.join(corr_strings)
corr_tokens = nltk.word_tokenize(big_string)
return corr_tokens
def FindTypos(speller, C_corpus):
'''
import speller function and Corpus counter
build an string lookup dictionary of correctly spelled tokens
return string lookup dictionary and typos list
'''
scdict = defaultdict(str)
sp_typos = []
for token in sorted(C_corpus.keys()):
# spelling okay
if token in speller.counter:
scdict[token] = token
# typos
else:
sp_typos.append(token)
return sp_typos, scdict
def SpellSeg(speller, token):
'''
import spelling function
import token
attempt spelling correction and score
attempt segmentation correction and score
compare scores and select winning score as correction
return corrected string and score
'''
# spell check
spell_cor = speller.spellcheck(token)
if spell_cor != token:
sc_score = speller.counter[spell_cor]
else:
sc_score = 0
# seg check
segs = speller.segcheck(token)
valid = len(''.join(segs))/len(segs)
if valid > 1.4:
best = [t for t in segs if len(t)>1]
seg_cor = ' '.join(best)
scores = [speller.counter[t] for t in best]
sg_score = np.mean(scores)
else:
sg_score = 0
# flunked out of being corrected
if (sc_score==0) & (sg_score==0):
bstring = token
score = 0
# correction found
else:
if sc_score > sg_score:
bstring = spell_cor
score = sc_score
else:
bstring = seg_cor
score = sg_score
return bstring, score
def FirstPass(speller, string_dict, typos_list):
'''
import speller function
import string lookup dictionary
import typo token list
attempt spelling correction and score
attempt segmentation correction and score
compare scores and select winning score as correction
add best effort correction to string dictionary
return dictionary and flunked token list
'''
flunk_list = []
for token in typos_list:
# attempt correction
bstring, score = SpellSeg(speller, token)
# flunkies
if score == 0:
flunk_list.append(token)
else:
# update correction
string_dict[token] = bstring
for seg in bstring.split():
if seg not in string_dict:
string_dict[seg] = seg
print(token, ' -> ', bstring)
return string_dict, flunk_list
def SecondPass(speller, string_dict, flunk_list):
'''
import speller function
import string dictionary
import flunked token list
iterate through flunked texts
split off probable noise
permutate splitting within text
limit splits out to 30 characters
attempt corrections on split sides
recombine correction results
adopt optimal split result for text
return dictionary and noise
'''
# combine split with segmenting
noise = []
for text in flunk_list:
# assume shorts are noise
if len(text) < 8:
noise.append(text)
# attempt to find best correction pair
else:
# noise assumption as base case
maxscore = 0
maxres = text
# slice text and attempt fix
split_range = min(30, len(text))
split_min = int(split_range*0.4)
split_max = int(split_range*0.6)+1
for i in range(split_min,split_max):
# split
text_left = text[:i]
text_right = text[i:]
# attempt corrections
tl_string, tl_score = SpellSeg(speller, text_left)
tr_string, tr_score = SpellSeg(speller, text_right)
split_result = ''
# recombine
if tl_score > 0:
split_result += tl_string + ' '
if tr_score > 0:
split_result += tr_string
score = tl_score + tr_score
# looking for the optimal scoring split pair
if score > maxscore:
maxscore = score
maxres = split_result.strip()
# process optimal split pair result
if maxres == text:
noise.append(text)
else:
print(text, ' -> ', maxres)
for t in maxres.split():
if t not in string_dict:
string_dict[t] = t
string_dict[text] = maxres
return string_dict, noise
# #### Main Routine
# +
# Load Data
# Instantiate Spelling tool
speller = Spellhelper()
# big text corpus
C_corpus, corpus_tokens = LoadCorpus('messages.csv')
# estimate number of unique spelling errors
sp_errors = len([t for t in C_corpus if t not in speller.counter])
# +
# Process new words
# new words in corpus but maybe not in english dictionary
new_words = ['ayuda', 'bbc', 'cbs', 'center', 'centered',
'cyber', 'debris', 'donde', 'euro', 'fecal',
'feces', 'fema', 'foxnews', 'franken', 'fyi',
'giardia', 'gmo', 'google', 'gps', 'gui',
'haiti', 'http', 'https', 'hungry', 'instagram',
'mbc', 'meds', 'msnbc', 'nbc', 'nyc', 'omg', 'ppe',
'redcross', 'reiki', 'rescue', 'sandy', 'scary',
'skyfm', 'skynews', 'sulfate', 'sulfide', 'sulfur',
'tele', 'terre', 'tumblr', 'tweeting', 'tweets',
'twitter', 'ucla', 'unicef', 'vegan', 'volcano',
'wikipedia','wtf']
# Update speller's frequency dictionary
speller.addwords(C_corpus, new_words)
# +
# Process spell checking
# find typos
sp_typos, scdict = FindTypos(speller, C_corpus)
# attempt first pass corrections
sc_dict, flunk_list = FirstPass(speller, scdict, sp_typos)
# apply corrections
sc_tokens = ApplyCorr(scdict, corpus_tokens)
#build counter of corpus tokens
C_spell = nltk.FreqDist(sc_tokens)
# update speller frequency dict with corpus words
speller.updatefreq(C_spell)
# +
# Process Second Pass Segmentations and Spelling
sc_dict, noise = SecondPass(speller, sc_dict, flunk_list)
# apply corrections to corpus
splitseg_tokens = ApplyCorr(sc_dict, sc_tokens)
#build counter of corpus tokens
C_splits = nltk.FreqDist(splitseg_tokens)
# update frequencies of corpus words
speller.updatefreq(C_splits)
# update noise entries
for n in noise:
scdict[n] = 'noise'
# -
print(noise)
# #### Results
# print results
print('Spelling Error Set Size: ', sp_errors)
print('1st Pass Corrections: ', sp_errors - len(flunk_list))
print('2nd Pass Split Corrections: ', len(flunk_list)-len(noise))
print('Best Guess Possible Noise: ', len(noise))
print('Initial Vocab Count', len(C_corpus))
print('Final Vocab Count:', len(C_splits))
# #### Export Files
#save speller frequency dictionary
speller.savefreqdict("disaster_dict_r2.txt")
# pickle string to string type
with open('spell_lookup_r2.pkl', 'wb') as handle:
pickle.dump(scdict, handle, protocol=pickle.HIGHEST_PROTOCOL)
# #### Test
with open('spell_lookup_r2.pkl', 'rb') as handle:
cust_dict = pickle.load(handle)
cust_dict.get('valueadded', 'name')
cust_dict.get('visualizationgonewrong', 'name')
cust_dict.get('tuberculosious', 'name')
cust_dict.get('tombouctou', 'name')
cust_dict.get('tzgmhzg', 'name')
cust_dict.get('neverbeforeseen', 'name')
| data/spell_check.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
#
# # FreeSurfer integration with MNE-Python
#
#
# FreeSurfer is an open source analysis toolbox for MRI data. It contains several
# command line tools and graphical user interfaces. FreeSurfer can be obtained
# from https://surfer.nmr.mgh.harvard.edu/
#
# In MNE, FreeSurfer is used to provide structural information of various
# kinds, for `source estimation <tut-inverse-methods>`. Thereby a
# subject specific structural MRI will be used to obtain various structural
# representations like spherical or inflated brain surfaces. Furthermore features
# like curvature as well as various labels for areas of interest (such as V1) are
# computed.
#
# Thus FreeSurfer provides an easy way to shift anatomically related
# data between different representations and spaces. See e.g.
# `ch_morph` for information about how to
# use FreeSurfer surface representations to allow functional data to morph
# between different subjects.
#
# First steps
# ===========
#
# After downloading and installing, the environment needs to be set up correctly.
# This can be done by setting the FreeSurfer's root directory correctly and
# sourcing the setup file::
#
# $ export FREESURFER_HOME=/path/to/FreeSurfer
# $ source $FREESURFER_HOME/SetUpFreeSurfer.sh
#
# <div class="alert alert-info"><h4>Note</h4><p>The FreeSurfer home directory might vary depending on your operating
# system. See the `FreeSurfer installation guide
# <https://surfer.nmr.mgh.harvard.edu/fswiki/DownloadAndInstall>`_ for more.</p></div>
#
# Another important step is to define the subject directory correctly.
# ``SUBJECTS_DIR`` must be defined such, that it contains the individual
# subject's reconstructions in separate sub-folders. Those sub-folders will be
# created upon the reconstruction of the anatomical data. Nevertheless the parent
# directory has to be set beforehand::
#
# $ export SUBJECTS_DIR=~/subjects
#
# Again see the `FreeSurfer installation guide
# <https://surfer.nmr.mgh.harvard.edu/fswiki/DownloadAndInstall>`_ for more.
#
# Once setup correctly, FreeSurfer will create a new subject folder in
# ``$SUBJECTS_DIR``.
#
# Anatomical reconstruction
# =========================
#
# MNE-Python works together with FreeSurfer in order to compute the forward model
# and setting up the corresponding :class:`source space <mne.SourceSpaces>`. See
# `setting_up_source_space` for more information. Usually a full FreeSurfer
# reconstruction is obtained by prompting the following command to a bash
# console (e.g. Linux or MacOS Terminal)::
#
# $ my_subject=sample
# $ my_NIfTI=/path/to/NIfTI.nii.gz
# $ recon-all -i $my_NIfTI -s $my_subject -all
#
# where :code:`i` stands for "input" and :code:`s` for "subject". Executing
# this, will create the folder "~/subjects/sample", where all
# results are stored.
#
# <div class="alert alert-info"><h4>Note</h4><p>This compution takes often several hours. Please be patient.</p></div>
#
# Within a single subject all the files MNE-Python uses (and some more) are
# grouped into meaningful sub-folders (such that "surf" contains surface
# representations, "mri" volumetric files, etc.).
#
# FreeSurfer performs a hemispheric separation and most results are present
# in a left and right hemisphere version. This is often indicated by the
# prefix ``lh`` or ``rh`` to refer to the aforementioned. For that reason
# data representations such as :class:`mne.SourceEstimate` carry two sets of
# spatial locations (vertices) for both hemispheres separately. See also
# `tut-source-estimate-class`.
#
# 'fsaverage'
# ===========
#
# During installation, FreeSurfer copies a "default" subject, called
# ``'fsaverage'`` to ``$FREESURFER_HOME/subjects/fsaverage``. It contains all
# data types that a subject reconstruction would yield and is required by
# MNE-Python.
#
# See https://surfer.nmr.mgh.harvard.edu/fswiki/FsAverage for more
# information. Furthermore a copy of 'fsaverage' can be found in
# `sample-data`.
#
# When using ``'fsaverage'`` as value for the definition
# of a subject when calling a function, the corresponding data will be read (e.g.
# ``subject='fsaverage'``) from '~/subjects/fsaverage'. This becomes especially
# handy, when attempting statistical analyses on group level, based on
# individual's brain space data. In that case ``'fsaverage'`` will by default act
# as reference space for `source estimate transformations <ch_morph>`.
#
# Use with MNE-Python
# ===================
#
# For source localization analyses to work properly, it is important, that the
# FreeSurfer reconstruction has completed beforehand. Furthermore, when using
# related functions, such as :func:`mne.setup_source_space`, ``SUBJECTS_DIR`` has
# to be defined either globally by setting :func:`mne.set_config` or for
# each function separately, by passing the respective keyword argument
# ``subjects_dir='~/subjects'``. See also `setting_up_source_space` to get
# an idea of how this works.
#
| dev/_downloads/bd04f5896aa164e42641929000e3e800/plot_background_freesurfer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/usm.jpg" width="480" height="240" align="left"/>
# # MAT281 - Reducir Overfitting
# ## Objetivos de la clase
#
# * Aprender técnicas para prevenir el overfitting y underfitting en los modelos de machine learning.
#
# ## Contenidos
#
# * [Métodos para reducir el overfitting](#c1)
# <a id='c1'></a>
# ## 1.- Métodos para reducir el overfitting
#
#
# Algunas de las técnicas que podemos utilizar para reducir el overfitting, son:
#
# * Recolectar más datos.
# * Introducir una penalización a la complejidad con alguna técnica de regularización.
# * Utilizar modelos ensamblados.
# * Utilizar validación cruzada.
# * Optimizar los parámetros del modelo con *grid search*.
# * Reducir la dimensión de los datos.
# * Aplicar técnicas de selección de atributos.
#
#
# Veremos ejemplos de algunos métodos para reducir el sobreajuste.
# ### a) Validación cruzada
#
# La **validación cruzada** se inicia mediante el fraccionamiento de un conjunto de datos en un número $k$ de particiones (generalmente entre 5 y 10) llamadas *pliegues*.
#
# La validación cruzada luego itera entre los datos de *evaluación* y *entrenamiento* $k$ veces, de un modo particular. En cada iteración de la validación cruzada, un *pliegue* diferente se elige como los datos de *evaluación*. En esta iteración, los otros *pliegues* $k-1$ se combinan para formar los datos de *entrenamiento*. Por lo tanto, en cada iteración tenemos $(k-1) / k$ de los datos utilizados para el *entrenamiento* y $1 / k$ utilizado para la *evaluación*.
#
# Cada iteración produce un modelo, y por lo tanto una estimación del rendimiento de la *generalización*, por ejemplo, una estimación de la precisión. Una vez finalizada la validación cruzada, todos los ejemplos se han utilizado sólo una vez para *evaluar* pero $k -1$ veces para *entrenar*. En este punto tenemos estimaciones de rendimiento de todos los *pliegues* y podemos calcular la media y la desviación estándar de la precisión del modelo.
#
# <img alt="Validacion cruzada" title="Validacion cruzada" src="http://relopezbriega.github.io/images/validacion_cruzada.png">
# Veamos un ejemplo en python, ocupando el conjunto de datos **make_classification**.
# +
# librerias
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier,DecisionTreeRegressor
import random
random.seed(1982) # semilla
# graficos incrustados
# %matplotlib inline
# parametros esteticos de seaborn
sns.set_palette("deep", desat=.6)
sns.set_context(rc={"figure.figsize": (12, 4)})
# +
# Ejemplo en python - árboles de decisión
# dummy data con 100 atributos y 2 clases
X, y = make_classification(10000, 100, n_informative=3, n_classes=2,
random_state=1982)
# separ los datos en train y eval
x_train, x_eval, y_train, y_eval = train_test_split(X, y, test_size=0.35,
train_size=0.65,
random_state=1982)
# Grafico de ajuste del árbol de decisión
train_prec = []
eval_prec = []
max_deep_list = list(range(2, 20))
# +
# Ejemplo cross-validation
from sklearn.model_selection import cross_validate,StratifiedKFold
# creando pliegues
skf = StratifiedKFold(n_splits=20,
random_state=2016)
precision = []
model = DecisionTreeClassifier(criterion='entropy', max_depth=5)
skf.get_n_splits(x_train, y_train)
for k, (train_index, test_index) in enumerate(skf.split(X, y)):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train,y_train)
score = model.score(X_test,y_test)
precision.append(score)
print('Pliegue: {0:}, Dist Clase: {1:}, Prec: {2:.3f}'.format(k+1,
np.bincount(y_train), score))
# -
# En este ejemplo, utilizamos el iterador `StratifiedKFold` que nos proporciona Scikit-learn. Este iterador es una versión mejorada de la validación cruzada, ya que cada *pliegue* va a estar estratificado para mantener las proporciones entre las *clases* del conjunto de datos original, lo que suele dar mejores estimaciones del sesgo y la varianza del modelo.
#
# También podríamos utilizar `cross_val_score` que ya nos proporciona los resultados de la precisión que tuvo el modelo en cada *pliegue*.
# +
# Ejemplo con cross_val_score
from sklearn.model_selection import cross_val_score
# separ los datos en train y eval
x_train, x_eval, y_train, y_eval = train_test_split(X, y, test_size=0.35,
train_size=0.65,
random_state=1982)
model = DecisionTreeClassifier(criterion='entropy',
max_depth=5)
precision = cross_val_score(estimator=model,
X=x_train,
y=y_train,
cv=20)
# -
precision = [round(x,2) for x in precision]
print('Precisiones: {} '.format(precision))
print('Precision promedio: {0: .3f} +/- {1: .3f}'.format(np.mean(precision),
np.std(precision)))
# ### Más datos y curvas de aprendizaje
#
# * Muchas veces, reducir el Sobreajuste es tan fácil como conseguir más datos, dame más datos y te predeciré el futuro!.
# * En la vida real nunca es una tarea tan sencilla conseguir más datos.
# * Una técnica para reducir el sobreajuste son las *curvas de aprendizaje*, las cuales grafican la precisión en función del tamaño de los datos de entrenamiento.
#
# <img alt="Curva de aprendizaje" title="Curva de aprendizaje" src="http://relopezbriega.github.io/images/curva_aprendizaje.png" width="600px" height="600px" >
# Para graficar las curvas de aprendizaje es necesario ocupar el comando de sklearn llamado `learning_curve`.
# +
# Ejemplo Curvas de aprendizaje
from sklearn.model_selection import learning_curve
train_sizes, train_scores, test_scores = learning_curve(
estimator=model,
X=x_train,
y=y_train,
train_sizes=np.linspace(0.1, 1.0, 20),
cv=10,
n_jobs=-1
)
# calculo de metricas
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
# -
# Veamos que el comando `learning_curve` va creando conjunto de datos, pero de distintos tamaños.
# tamano conjunto de entrenamiento
for k in range(len(train_sizes)):
print('Tamaño Conjunto {}: {}'.format(k+1,train_sizes[k]))
# Finalmente, graficamos las precisiones tanto para el conjunto de entranamiento como de evaluación para los distintos conjuntos de datos generados.
# graficando las curvas
plt.plot(train_sizes, train_mean, color='r', marker='o', markersize=5,
label='entrenamiento')
plt.fill_between(train_sizes, train_mean + train_std,
train_mean - train_std, alpha=0.15, color='r')
plt.plot(train_sizes, test_mean, color='b', linestyle='--',
marker='s', markersize=5, label='evaluacion')
plt.fill_between(train_sizes, test_mean + test_std,
test_mean - test_std, alpha=0.15, color='b')
plt.grid()
plt.title('Curva de aprendizaje')
plt.legend(loc='upper right')
plt.xlabel('Cant de ejemplos de entrenamiento')
plt.ylabel('Precision')
plt.show()
# En este gráfico podemos concluir que:
#
# * Con pocos datos la precisión entre los datos de entrenamiento y los de evaluación son muy distintas y luego a medida que la cantidad de datos va aumentando, el modelo puede generalizar mucho mejor y las precisiones se comienzan a emparejar.
#
#
# * Este gráfico también puede ser importante a la hora de decidir invertir en la obtención de más datos, ya que por ejemplo nos indica que a partir las 2500 muestras, el modelo ya no gana mucha más precisión a pesar de obtener más datos.
# ### b) Optimización de parámetros con Grid Search
#
# La mayoría de los modelos de Machine Learning cuentan con varios parámetros para ajustar su comportamiento, por lo tanto, otra alternativa que tenemos para reducir el Sobreajuste es optimizar estos parámetros por medio de un proceso conocido como **grid search** e intentar encontrar la combinación ideal que nos proporcione mayor precisión.
#
# El enfoque que utiliza *grid search* es bastante simple, se trata de una búsqueda exhaustiva por el paradigma de fuerza bruta en el que se especifica una lista de valores para diferentes parámetros, y la computadora evalúa el rendimiento del modelo para cada combinación de éstos parámetros para obtener el conjunto óptimo que nos brinda el mayor rendimiento.
#
#
# <img alt="Curva de aprendizaje" title="Curva de aprendizaje" src="https://miro.medium.com/max/3200/1*yHNmVkf43eTJF5QDebUltQ.png" width="700" height="600px" >
# +
# Ejemplo de grid search con SVM.
from sklearn.model_selection import GridSearchCV
# creación del modelo
model = DecisionTreeClassifier()
# rango de parametros
rango_criterion = ['gini','entropy']
rango_max_depth =np.array( [4,5,6,7,8,9,10,11,12,15,20,30,40,50,70,90,120,150])
param_grid = dict(criterion=rango_criterion, max_depth=rango_max_depth)
param_grid
# +
# aplicar greed search
gs = GridSearchCV(estimator=model,
param_grid=param_grid,
scoring='accuracy',
cv=5,
n_jobs=-1)
gs = gs.fit(x_train, y_train)
# -
# imprimir resultados
print(gs.best_score_)
print(gs.best_params_)
# utilizando el mejor modelo
mejor_modelo = gs.best_estimator_
mejor_modelo.fit(x_train, y_train)
print('Precisión: {0:.3f}'.format(mejor_modelo.score(x_eval, y_eval)))
# En este ejemplo, primero utilizamos el objeto `GridSearchCV` que nos permite realizar *grid search* junto con validación cruzada, luego comenzamos a ajustar el modelo con las diferentes combinaciones de los valores de los parámetros `criterion` y `max_depth`. Finalmente imprimimos el mejor resultado de precisión y los valores de los parámetros que utilizamos para obtenerlos; por último utilizamos este mejor modelo para realizar las predicciones con los datos de *evaluación*.
#
# Podemos ver que la precisión que obtuvimos con los datos de evaluación es casi idéntica a la que nos indicó *grid search*, lo que indica que el modelo *generaliza* muy bien.
#
# ### c) Reducción de dimensionalidad
#
# La **reducción de dimensiones** es frecuentemente usada como una etapa de preproceso en el entrenamiento de
# sistemas, y consiste en escoger un subconjunto de
# variables, de tal manera, que el espacio de características
# quede óptimamente reducido de acuerdo a un criterio de
# evaluación, cuyo fin es distinguir el subconjunto que
# representa mejor el espacio inicial de entrenamiento.
#
# Como cada característica que se incluye en el análisis,
# puede incrementar el costo y el tiempo de proceso de los
# sistemas, hay una fuerte motivación para diseñar e
# implementar sistemas con pequeños conjuntos de
# características. Sin dejar de lado, que al mismo tiempo,
# hay una opuesta necesidad de incluir un conjunto
# suficiente de características para lograr un alto
# rendimiento.
#
#
# La reducción de dimensionalidad se puede separar en dos tipos: **Selección de aributos** y **Extracción de atributos**
#
# ### c.1) Selección de atributos
#
# Proceso por el cual seleccionamos un subconjunto de atributos (representados por cada una de las columnas en un datasetde forma tabular) que son más relevantes para la construcción del modelo predictivo sobre el que estamos trabajando.
#
#
# El objetivo de la selección de atributos es :
# * mejorar la capacidad predictiva de nuestro modelo,
# * proporcionando modelos predictivos más rápidos y eficientes,
# * proporcionar una mejor comprensión del proceso subyacente que generó los datos.
#
#
# Los métodos de selección de atributos se pueden utilizar para identificar y eliminar los atributos innecesarios, irrelevantes y redundantes que no contribuyen a la exactitud del modelo predictivo o incluso puedan disminuir su precisión.
#
#
#
# ### Algoritmos para selección de atributos
#
# Podemos encontrar dos clases generales de algoritmos de [selección de atributos](https://en.wikipedia.org/wiki/Feature_selection): los métodos de filtrado, y los métodos empaquetados.
#
# * **Métodos de filtrado**: Estos métodos aplican una medida estadística para asignar una puntuación a cada atributo. Los atributos luego son clasificados de acuerdo a su puntuación y son, o bien seleccionados para su conservación o eliminados del conjunto de datos. Los métodos de filtrado son a menudo [univariantes](https://en.wikipedia.org/wiki/Univariate_analysis) y consideran a cada atributo en forma independiente, o con respecto a la variable dependiente.
# * Ejemplos : [prueba de Chi cuadrado](https://es.wikipedia.org/wiki/Prueba_%CF%87%C2%B2), [prueba F de Fisher](https://es.wikipedia.org/wiki/Prueba_F_de_Fisher), [ratio de ganancia de información](https://en.wikipedia.org/wiki/Information_gain_ratio) y los [coeficientes de correlación](https://es.wikipedia.org/wiki/Correlaci%C3%B3n).
#
#
# * **Métodos empaquetados**: Estos métodos consideran la selección de un conjunto de atributos como un problema de búsqueda, en donde las diferentes combinaciones son evaluadas y comparadas. Para hacer estas evaluaciones se utiliza un modelo predictivo y luego se asigna una puntuación a cada combinación basada en la precisión del modelo.
# * Un ejemplo de este método es el algoritmo de eliminación recursiva de atributos.
# Un método popular en sklearn es el método **SelectKBest**, el cual selecciona las características de acuerdo con las $k$ puntuaciones más altas (de acuerdo al criterio escogido).
#
# Para entender este conceptos, transformemos el conjunto de datos anterior a formato pandas DataFrame.
df = pd.DataFrame(X)
df.columns = [f'V{k}' for k in range(1,X.shape[1]+1)]
df['y']=y
df.head()
# Comencemos con un simple algoritmo [univariante](https://en.wikipedia.org/wiki/Univariate_analysis) que aplica el método de filtrado. Para esto vamos a utilizar los objetos `SelectKBest` y `f_classif` del paquete `sklearn.feature_selection`.
#
# Este algoritmo selecciona a los mejores atributos basándose en una prueba estadística [univariante](https://en.wikipedia.org/wiki/Univariate_analysis). Al objeto `SelectKBest` le pasamos la prueba estadística que vamos a a aplicar, en este caso una [prueba F](https://es.wikipedia.org/wiki/Prueba_F_de_Fisher) definida por el objeto `f_classif`, junto con el número de atributos a seleccionar. El algoritmo va a aplicar la prueba a todos los atributos y va a seleccionar los que mejor resultado obtuvieron.
#
# +
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
# Separamos las columnas objetivo
x_training = df.drop(['y',], axis=1)
y_training = df['y']
# Aplicando el algoritmo univariante de prueba F.
k = 15 # número de atributos a seleccionar
columnas = list(x_training.columns.values)
seleccionadas = SelectKBest(f_classif, k=k).fit(x_training, y_training)
# -
catrib = seleccionadas.get_support()
atributos = [columnas[i] for i in list(catrib.nonzero()[0])]
atributos
# Como podemos ver, el algoritmo nos seleccionó la cantidad de atributos que le indicamos; en este ejemplo decidimos seleccionar solo 15; obviamente, cuando armemos nuestro modelo final vamos a tomar un número mayor de atributos.
# ### ii) Extracción de atributos
#
# La **extracción de atributos** comienza a partir de un conjunto inicial de datos medidos y crea valores derivados (características) destinados a ser informativos y no redundantes, lo que facilita los pasos de aprendizaje y generalización posteriores, y en algunos casos conduce a a mejores interpretaciones humanas.
#
# Cuando los datos de entrada a un algoritmo son demasiado grandes para ser procesados y se sospecha que son redundantes (por ejemplo, la misma medición en pies y metros, o la repetitividad de las imágenes presentadas como píxeles), entonces se puede transformar en un conjunto reducido de características (también denominado un vector de características).
#
# ### Análisis de componentes principales
#
#
# El **análisis de componentes principales** (PCA) es un procedimiento estadístico que utiliza una transformación ortogonal para convertir un conjunto de observaciones de variables posiblemente correlacionadas (entidades que adquieren varios valores numéricos) en un conjunto de valores de variables linealmente no correlacionadas llamadas componentes principales.
#
# Esta transformación se define de tal manera que el primer componente principal tiene la mayor varianza posible (es decir, representa la mayor variabilidad posible en los datos), y cada componente subsiguiente a su vez tiene la mayor varianza posible bajo la restricción que es ortogonal a los componentes anteriores.
#
# Los vectores resultantes (cada uno de los cuales es una combinación lineal de las variables y contiene n observaciones) son un conjunto de bases ortogonales no correlacionadas. PCA es sensible a la escala relativa de las variables originales.
#
# <img alt="Series de tiempo con Python" title="Series de tiempo con Python" src="https://devopedia.org/images/article/139/4543.1548137789.jpg" high=400px width=600px>
# Para ser un poco más ilustrativo con este ejemplo, ocuparemos el conjunto de datos **Iris**
# +
# cargar datos
from sklearn import datasets
iris = datasets.load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['TARGET'] = iris.target
df.columns = ['sepal length', 'sepal width' , 'petal lenght', 'petal width', 'target']
df.head() # estructura de nuestro dataset.
# -
#
# Como el algoritmo de PCA es sensible a la escala relativa de las variables originales, es necesario **escalar** los datos, es decir, centrar y escalar los datos. De esta manera, el valor promedio de cada registro sería 0 y la varianza para cada registro sería 1.
#
# Para escalar nuestros datos, usaríamos `StandardScalar`, que está disponible en sklearn.
# +
from sklearn.preprocessing import StandardScaler
features = ['sepal length', 'sepal width' , 'petal lenght', 'petal width']
x = df.loc[:, features].values
y = df.loc[:, ['target']].values
x = StandardScaler().fit_transform(x)
# -
# Luego aplicamos el algoritmo de `PCA` sobre todos los componenetes, para ver cuanta varianza explica cada variable.
# +
# ajustar modelo
from sklearn.decomposition import PCA
pca = PCA(n_components=4)
principalComponents = pca.fit_transform(x)
# graficar varianza por componente
percent_variance = np.round(pca.explained_variance_ratio_* 100, decimals =2)
columns = ['PC1', 'PC2', 'PC3', 'PC4']
plt.figure(figsize=(12,4))
plt.bar(x= range(1,5), height=percent_variance, tick_label=columns)
plt.ylabel('Percentate of Variance Explained')
plt.xlabel('Principal Component')
plt.title('PCA Scree Plot')
plt.show()
# -
# Ahora realizamos el gráfico pero respecto a la suma acumulada.
# +
# graficar varianza por la suma acumulada de los componente
percent_variance_cum = np.cumsum(percent_variance)
columns = ['PC1', 'PC1+PC2', 'PC1+PC2+PC3', 'PC1+PC2+PC3+PC4']
plt.figure(figsize=(12,4))
plt.bar(x= range(1,5), height=percent_variance_cum, tick_label=columns)
plt.ylabel('Percentate of Variance Explained')
plt.xlabel('Principal Component Cumsum')
plt.title('PCA Scree Plot')
plt.show()
# -
# Es decir, que la varianza explicada de las variables se puede explicar en 95.81% considerando solo las dos componentes principales (PC1 y PC2).
#
# Realicemos el ajuste para las dos componentes principales y realicemos la nueva gráfica proyectada a estas componentes
# +
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDataframe = pd.DataFrame(data = principalComponents, columns = ['PC1', 'PC2'])
targetDataframe = df[['target']]
newDataframe = pd.concat([principalDataframe, targetDataframe],axis = 1)
newDataframe.head()
# +
sns.set(rc={'figure.figsize':(12,4)})
sns.scatterplot(x='PC1',
y='PC2',
hue='target',
palette="Set1",
data=newDataframe)
# -
# Finalmente, para poder trabajar los modelos pero en con sus componenetes proyectadas, se debe aplicar la función `fit_transform` sobre el cojunto de atributos.
# +
# componenetes proyectadas
Y= df[['target']]
X_new = pca.fit_transform(df[['sepal length', 'sepal width', 'petal lenght', 'petal width']])
# -
# crear conjunto de entrenamiento y testeo pero en con las componentes proyectadas
X_train, X_test, Y_train, Y_test = train_test_split(X_new, Y, test_size=0.2, random_state = 2)
# Para finalizar, se deja como ejercicio aplicar el algoritmo de PCA para el conjunto de datos **make_classification** para el número de componentes principales: $$[5,10,20,,50,70,80,90]$$
#
# y sacar sus propias conclusiones.
# ## Referencia
#
# 1. [K-Fold Cross Validation](https://medium.com/datadriveninvestor/k-fold-cross-validation-6b8518070833)
# 2. [Cross Validation and Grid Search for Model Selection in Python](https://stackabuse.com/cross-validation-and-grid-search-for-model-selection-in-python/)
# 3. [Feature selection for supervised models using SelectKBest](https://www.kaggle.com/jepsds/feature-selection-using-selectkbest?utm_campaign=News&utm_medium=Community&utm_source=DataCamp.com)
# 4. [In Depth: Principal Component Analysis](https://www.aprendemachinelearning.com/comprende-principal-component-analysis/)
| labs/C2_machine_learning/05_overfitting/04_reducir_overfitting.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
// [this doc on github](https://github.com/dotnet/interactive/tree/master/samples/notebooks/csharp/Samples)
//
// +
#i "nuget:https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet5/nuget/v3/index.json"
#i "nuget:https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet-tools/nuget/v3/index.json"
#r "nuget:Microsoft.Data.Analysis,0.2.0"
#r "nuget: XPlot.Plotly.Interactive, 4.0.1"
using Microsoft.Data.Analysis;
using static Microsoft.DotNet.Interactive.Formatting.PocketViewTags;
using Microsoft.DotNet.Interactive.Formatting;
// -
PrimitiveDataFrameColumn<DateTime> dateTimes = new PrimitiveDataFrameColumn<DateTime>("DateTimes"); // Default length is 0.
PrimitiveDataFrameColumn<int> ints = new PrimitiveDataFrameColumn<int>("Ints", 3); // Makes a column of length 3. Filled with nulls initially
StringDataFrameColumn strings = new StringDataFrameColumn("Strings", 3); // Makes a column of length 3. Filled with nulls initially
// Append 3 values to dateTimes
dateTimes.Append(DateTime.Parse("2019/01/01"));
dateTimes.Append(DateTime.Parse("2019/01/01"));
dateTimes.Append(DateTime.Parse("2019/01/02"));
DataFrame df = new DataFrame(dateTimes, ints, strings ); // This will throw if the columns are of different lengths
df
using Microsoft.AspNetCore.Html;
Formatter.Register<DataFrame>((df, writer) =>
{
var headers = new List<IHtmlContent>();
headers.Add(th(i("index")));
headers.AddRange(df.Columns.Select(c => (IHtmlContent) th(c.Name)));
var rows = new List<List<IHtmlContent>>();
var take = 20;
for (var i = 0; i < Math.Min(take, df.Rows.Count); i++)
{
var cells = new List<IHtmlContent>();
cells.Add(td(i));
foreach (var obj in df.Rows[i])
{
cells.Add(td(obj));
}
rows.Add(cells);
}
var t = table(
thead(
headers),
tbody(
rows.Select(
r => tr(r))));
writer.Write(t);
}, "text/html");
df
// To change a value directly through df
df[0, 1] = 10; // 0 is the rowIndex, and 1 is the columnIndex. This sets the 0th value in the Ints columns to 10
df
// Modify ints and strings columns by indexing
ints[1] = 100;
strings[1] = "Foo!";
df
// Indexing can throw when types don't match.
// ints[1] = "this will throw because I am a string";
// Info can be used to figure out the type of data in a column.
df.Info()
// Add 5 to ints through the DataFrame
df["Ints"].Add(5, inPlace: true);
df
// We can also use binary operators. Binary operators produce a copy, so assign it back to our Ints column
df["Ints"] = (ints / 5) * 100;
df
// Fill nulls in our columns, if any. Ints[2], Strings[0] and Strings[1] are null
df["Ints"].FillNulls(-1, inPlace: true);
df["Strings"].FillNulls("Bar", inPlace: true);
df
// To inspect the first row
DataFrameRow row0 = df.Rows[0];
row0
using Microsoft.AspNetCore.Html;
Formatter.Register<DataFrameRow>((dataFrameRow, writer) =>
{
var cells = new List<IHtmlContent>();
cells.Add(td(i));
foreach (var obj in dataFrameRow)
{
cells.Add(td(obj));
}
var t = table(
tbody(
cells));
writer.Write(t);
}, "text/html");
row0
// Filter rows based on equality
PrimitiveDataFrameColumn<bool> boolFilter = df["Strings"].ElementwiseEquals("Bar");
boolFilter
DataFrame filtered = df.Filter(boolFilter);
filtered
// Sort our dataframe using the Ints column
DataFrame sorted = df.Sort("Ints");
sorted
// GroupBy
GroupBy groupBy = df.GroupBy("DateTimes");
// Count of values in each group
DataFrame groupCounts = groupBy.Count();
groupCounts
// Alternatively find the sum of the values in each group in Ints
DataFrame intsGroupSum = groupBy.Sum("Ints");
intsGroupSum
using XPlot.Plotly;
using System.Linq;
#r "nuget:MathNet.Numerics,4.9.0"
// +
using MathNet.Numerics.Distributions;
double mean = 0;
double stdDev = 0.1;
MathNet.Numerics.Distributions.Normal normalDist = new Normal(mean, stdDev);
// -
PrimitiveDataFrameColumn<double> doubles = new PrimitiveDataFrameColumn<double>("Normal Distribution", normalDist.Samples().Take(1000));
display(Chart.Plot(
new Histogram()
{
x = doubles,
nbinsx = 30
}
));
| samples/notebooks/csharp/Samples/DataFrame-Getting Started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %store -r x_train
# %store -r x_test
# %store -r y_train
# %store -r y_test
# %store -r yy
# %store -r le
# +
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import Adam
from keras.utils import np_utils
from sklearn import metrics
num_labels = yy.shape[1]
filter_size = 2
model = Sequential()
model.add(Dense(256, input_shape=(40,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_labels))
model.add(Activation('softmax'))
# -
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')
# +
model.summary()
score = model.evaluate(x_test, y_test, verbose=0)
accuracy = 100*score[1]
print("Pre-training accuracy: %.4f%%" % accuracy)
# +
from keras.callbacks import ModelCheckpoint
from datetime import datetime
num_epochs = 100
num_batch_size = 32
start = datetime.now()
model.fit(x_train, y_train, batch_size=num_batch_size, epochs=num_epochs, validation_data=(x_test, y_test), verbose=1)
duration = datetime.now() -start
print("Training completed in timeL ", duration)
# +
score = model.evaluate(x_train, y_train, verbose=1)
print("Training Accuracy: ", score[1])
score = model.evaluate(x_test, y_test, verbose=1)
print("Training Accuracy: ", score[1])
# -
| Model_Training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Pandas
#
# ### Instructions
#
# This assignment will be done completely inside this Jupyter notebook with answers placed in the cell provided.
#
# All python imports that are needed shown.
#
# Follow all the instructions in this notebook to complete these tasks.
#
# Make sure the CSV data files is in the same folder as this notebook - alumni.csv, groceries.csv
# Imports needed to complete this assignment
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#import seaborn as sns
# ### Question 1 : Import CSV file (1 Mark)
#
#
# Write code to load the alumni csv dataset into a Pandas DataFrame called 'alumni'.
#
#q1 (1)
alumni = pd.read_csv("alumni.csv")
alumni
# ### Question 2 : Understand the data set (5 Marks)
#
# Use the following pandas commands to understand the data set: a) head, b) tail, c) dtypes, d) info, e) describe
#a) (1)
alumni.head()
#b) (1)
alumni.tail()
#c) (1)
alumni.dtypes
#d) (1)
alumni.info()
#e) (1)
alumni.describe()
# ### Question 3 : Cleaning the data set - part A (3 Marks)
#
# a) Use clean_currency method below to strip out commas and dollar signs from Savings ($) column and put into a new column called 'Savings'.
# +
def clean_currency(curr):
return float(curr.replace(",", "").replace("$", ""))
clean_currency(" $66,000")
# +
#a) (2)
alumni["Savings"]= alumni["Savings ($)"].apply(clean_currency)
alumni.head()
# -
# b) Uncomment 'alumni.dtypes.Savings' to check that the type change has occurred
#b) (1)
alumni.dtypes.Savings
# +
# Question 4 : Cleaning the data set - part B (5 Marks)
#a) Run the 'alumni["Gender"].value_counts()' to see the incorrect 'M' fields that need to be converted to 'Male'
# -
# a) (1)
alumni["Gender"].value_counts()
# +
#b) Now use a '.str.replace' on the 'Gender' column to covert the incorrect 'M' fields. Hint: We must use ^...$ to restrict the pattern to match the whole string.
# b) (1)
alumni["Gender"]=alumni["Gender"].str.replace("^[M]$","Male")
# +
#c) That didn't the set alumni["Gender"] column however. You will need to update the column when using the replace command 'alumni["Gender"]=<replace command>', show how this is done below
# -
# c) (1)
alumni.reset_index(drop=True)
# +
# d)You can set it directly by using the df.loc command, show how this can be done by using the'df.loc[row_indexer,col_indexer]= value' command to convert the 'M' to 'Male'#
# -
#d) (1)
# e) Now run the 'value_counts' for Gender again to see the correct columns - 'Male' and 'Female'
# e) (1)
alumni["Gender"].value_counts()
# ### Question 5 : Working with the data set (4)
#
# a) get the median, b) mean and c) standard deviation for the 'Salary' column
# a)(1)
alumni["Salary"].median()
# b)(1)
alumni["Salary"].mean()
# c)(1)
alumni["Salary"].std()
# d) identify which alumni paid more than $15000 in fees, using the 'Fee' column
# d) (1)
x=alumni[alumni["Fee"]>15000].index.values.astype(int)[0]
x
# ### Question 6 : Visualise the data set (4 Marks)
#
# a) Using the 'Diploma Type' column, plot a bar chart and show its value counts.
#a) (1)
plt.rcParams["figure.figsize"]=(5,5)
alumni['Diploma Type'].value_counts().plot(kind='bar')
plt.ylabel('Number of people')
plt.xlabel('Diploma type')
plt.title("Bar Plot")
# b) Now create a box plot comparison between 'Savings' and 'Salary' columns
# +
#b) (1)
# -
# c) Generate a histogram with the 'Salary' column and use 12 bins.
#c) (1)
d=alumni["Salary"]
n, bins, patches = plt.hist(x=d, bins=12, color='#0504aa',
alpha=0.7, rwidth=0.85)
plt.grid(axis='y',alpha=0.75)
plt.xlabel('Salary')
plt.ylabel('No. of people')
plt.title('My Histogram')
plt.show()
# d) Generate a scatter plot comparing 'Salary' and 'Savings' columns.
#d) (1)
sns.set()
sns.set_style("Whitegrid")
sns.relplot(x="Salary", y="Savings",hue="Gender",data="alumni")
# ### Question 7 : Contingency Table (2 Marks)
#
# Using both the 'Martial Status' and 'Defaulted' create a contingency table. Hint: crosstab
# Q7 (2)
| .ipynb_checkpoints/Pandas Assignment-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py3-TF2.0]
# language: python
# name: conda-env-py3-TF2.0-py
# ---
# # Practical example. Audiobooks
# ## Problem
#
# You are given data from an Audiobook app. Logically, it relates only to the audio versions of books. Each customer in the database has made a purchase at least once, that's why he/she is in the database. We want to create a machine learning algorithm based on our available data that can predict if a customer will buy again from the Audiobook company.
#
# The main idea is that if a customer has a low probability of coming back, there is no reason to spend any money on advertizing to him/her. If we can focus our efforts ONLY on customers that are likely to convert again, we can make great savings. Moreover, this model can identify the most important metrics for a customer to come back again. Identifying new customers creates value and growth opportunities.
#
# You have a .csv summarizing the data. There are several variables: Customer ID, Book length in mins_avg (average of all purchases), Book length in minutes_sum (sum of all purchases), Price Paid_avg (average of all purchases), Price paid_sum (sum of all purchases), Review (a Boolean variable), Review (out of 10), Total minutes listened, Completion (from 0 to 1), Support requests (number), and Last visited minus purchase date (in days).
#
# So these are the inputs (excluding customer ID, as it is completely arbitrary. It's more like a name, than a number).
#
# The targets are a Boolean variable (so 0, or 1). We are taking a period of 2 years in our inputs, and the next 6 months as targets. So, in fact, we are predicting if: based on the last 2 years of activity and engagement, a customer will convert in the next 6 months. 6 months sounds like a reasonable time. If they don't convert after 6 months, chances are they've gone to a competitor or didn't like the Audiobook way of digesting information.
#
# The task is simple: create a machine learning algorithm, which is able to predict if a customer will buy again.
#
# This is a classification problem with two classes: won't buy and will buy, represented by 0s and 1s.
#
# Good luck!
# ## Create the machine learning algorithm
#
#
# ### Import the relevant libraries
# we must import the libraries once again since we haven't imported them in this file
import numpy as np
import tensorflow as tf
# ### Data
# +
# let's create a temporary variable npz, where we will store each of the three Audiobooks datasets
npz = np.load('Audiobooks_data_train.npz')
# we extract the inputs using the keyword under which we saved them
# to ensure that they are all floats, let's also take care of that
train_inputs = npz['inputs'].astype(np.float)
# targets must be int because of sparse_categorical_crossentropy (we want to be able to smoothly one-hot encode them)
train_targets = npz['targets'].astype(np.int)
# we load the validation data in the temporary variable
npz = np.load('Audiobooks_data_validation.npz')
# we can load the inputs and the targets in the same line
validation_inputs, validation_targets = npz['inputs'].astype(np.float), npz['targets'].astype(np.int)
# we load the test data in the temporary variable
npz = np.load('Audiobooks_data_test.npz')
# we create 2 variables that will contain the test inputs and the test targets
test_inputs, test_targets = npz['inputs'].astype(np.float), npz['targets'].astype(np.int)
# -
| Resources/Data-Science/Deep-Learning/AudioBooks NN/TensorFlow_Audiobooks_Machine_Learning_Part1_with_comments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LAMP FPGA
# This notebook executes the LAMP model inference on Ultra96-V2 board
# +
from pynq_dpu import DpuOverlay
import numpy as np
import random
import cv2
from MPTimeSeriesGenerator import MPTimeseriesGenerator
import scipy.io as sio
import numpy as np
from pynq import Clocks
import threading
import time
# -
# ## Clock Frequency
# We set the PL clock frequency to 100MHz and PS clock frequency to 1.2GHz
# +
Clocks.cpu_mhz = 1200.0
Clocks.fclk0_mhz = 100.0
Clocks.fclk1_mhz = 100.0
Clocks.fclk2_mhz = 100.0
Clocks.fclk3_mhz = 100.0
print(f'CPU: {Clocks.cpu_mhz:.6f}MHz')
print(f'FCLK0: {Clocks.fclk0_mhz:.6f}MHz')
print(f'FCLK1: {Clocks.fclk1_mhz:.6f}MHz')
print(f'FCLK2: {Clocks.fclk2_mhz:.6f}MHz')
print(f'FCLK3: {Clocks.fclk3_mhz:.6f}MHz')
# -
# Prepare the DPU overlay and the compiled LAMP model, create dpu kernel
# +
overlay = DpuOverlay("dpu.bit")
overlay.load_model("dpu_lamp_0.elf")
n2cube.dpuOpen()
kernel = n2cube.dpuLoadKernel("lamp_0")
# -
# ## Prepare input data
# Prepare the time series input data using MPTimerSeriesGenerator class, this class takes in a sequence of data-points gathered at equal intervals with other parameters such as window size, stride, sample rate, etc., and generates batches of temporal data used as model input
# +
matrix_profile_window = 256
sample_rate = 20
lookbehind_seconds = 0
lookahead_seconds = 0
subsequence_stride = 256
lookbehind = sample_rate * lookbehind_seconds
num_outputs = 256
lookahead = sample_rate * lookahead_seconds
forward_sequences = lookahead + num_outputs
subsequences_per_input = lookbehind + num_outputs + lookahead
channel_stride = 8
n_input_series = 1
subsequences_per_input = subsequences_per_input // channel_stride
high_weight = 1
low_thresh = -1
high_thresh = 1
batch_size = 128
all_data = sio.loadmat('insect_no_classification.mat')
mp_val = np.array(all_data['mp_val'])
ts_val = np.array(all_data['ts_val'])
valid_gen = MPTimeseriesGenerator(ts_val, mp_val, num_input_timeseries=1, internal_stride=8, num_outputs=256,lookahead=forward_sequences, lookbehind=lookbehind, important_upper_threshold=high_thresh, important_lower_threshold=low_thresh, important_weight=high_weight, length=256, mp_window=256, stride=num_outputs, batch_size)
# -
# Having a batch normalization layer before the activation layer reduces the compiled model accuracy, since the tool can not merge these layers; hence, this layer has been removed from the compiled model and the normalized data is computed in the PS and then fed into the model
# +
# batch normalization
epsilon=1e-3
N, C, H, W = data.shape
# mini-batch mean
mean = np.mean(data, axis=(0, 2, 3))
# mini-batch variance
variance = np.mean((data - mean.reshape((1, C, 1, 1))) ** 2, axis=(0, 2, 3))
# normalize
X_hat = (data - mean.reshape((1, C, 1, 1))) * 1.0 / np.sqrt(variance.reshape((1, C, 1, 1)) + epsilon)
# -
# ## Run the application
# In order to increase the DPU kernel utilization and achieve a more efficient scheduling, we use a multithreading model. Each thread runs the model for one batch and moves to the next batch. The model is broken into four kernels, first we run the first kernel on FPGA, store the results, and feed them into the next kernel
#
# +
def run_dpu_task(index):
task = n2cube.dpuCreateTask(0)
result_index = index
scale_in = n2cube.dpuGetInputTensorScale(task, "conv2d_4_Conv2D", 0)
scale_out = n2cube.dpuGetOutputTensorScale(task, "conv2d_12_Conv2D", 0)
while index < len(valid_gen):
vg = valid_gen[index]
x_test, y_test = vg
x_test = np.float32(x_test)
for i in range(batch_size):
data = x_test[i][np.newaxis,...]
feed_data = data / scale_in
input_len = n2cube.dpuGetInputTensorSize(task, "conv2d_4_Conv2D")
n2cube.dpuSetInputTensorInHWCFP32(task, "conv2d_4_Conv2D", feed_data, input_len)
n2cube.dpuSetInputTensorInHWCFP32(task, "conv2d_1_Conv2D", feed_data, input_len)
n2cube.dpuRunTask(task)
conv_size = n2cube.dpuGetOutputTensorSize(task, "conv2d_12_Conv2D")
conv_out = n2cube.dpuGetOutputTensorInHWCFP32(task, "conv2d_12_Conv2D", conv_size)
conv_out = np.reshape(conv_out1, (1, 256, 1, 192))
results[result_index].append(conv_out)
index += thread_num
n2cube.dpuDestroyTask(task)
thread_num = 8
thread_all = []
results = [None] * thread_num
for i in range(thread_num):
t1 = threading.Thread(target=run_dpu_task, args=(i))
thread_all.append(t1)
for t in thread_all:
t.start()
for t in thread_all:
t.join()
n2cube.dpuDestroyKernel(kernel)
# -
# The second kernel which is global average pool is implemented in the host CPU
# +
results_avg = [None] * thread_num
for i in range(thread_num):
for r in results[i]:
out_scaled = r / scale_out
global_avg = np.apply_over_axes(np.mean, out_scaled, [1, 2])
results_avg[i].append(globa_avg)
# -
# The third kernel which is the dense layer is implemented on the FPGA, similar to the first layer we use a multithreading model to implement this kernel and gather the results
#
# +
overlay.load_model("dpu_dense_2.elf")
n2cube.dpuOpen()
kernel = n2cube.dpuLoadKernel("dense_2")
# +
def run_dpu_task_dense(index):
task = n2cube.dpuCreateTask(kernel, 0)
scale_in = n2cube.dpuGetInputTensorScale(task, "dense_1_MatMul", 0)
scale_out = n2cube.dpuGetOutputTensorScale(task, "dense_1_MatMul", 0)
for res in results_avg[index]:
feed_input = res / scale_in
input_len = n2cube.dpuGetInputTensorSize(task, "dense_1_MatMul")
n2cube.dpuSetInputTensorInHWCFP32(task, "dense_1_MatMul", feed_data, input_len)
n2cube.dpuRunTask(task)
dense_size = n2cube.dpuGetOutputTensorSize(task, "dense_1_MatMul")
dense_out = n2cube.dpuGetOutputTensorInHWCFP32(task, "dense_1_MatMul", dense_size)
dense_out = np.reshape(conv_out1, (1, 1, 1, 256))
result_dense[index].append(dense_out)
n2cube.dpuDestroyTask(task)
thread_all = []
result_dense = [None] * thread_num
for i in range(thread_num):
t1 = threading.Thread(target=run_dpu_task_dense, args=(i))
threadAll.append(t1)
for t in thread_all:
t.start()
for t in thread_all:
t.join()
n2cube.dpuDestroyKernel(kernel)
# -
# Finally, the last layer (Sigmoid function) is implemented in host and the results are written in a text file
# +
f = open('predict.txt','a+')
for i in range(thread_num):
for r in result_dense[i]:
out_scaled = r / scale_out
sigmoid_out = 1/(1 + np.exp(-out_scaled))
np.savetxt(f, sigmoid_out)
f.close()
# +
#print(np.mean(np.abs((sigmoid_out - y) / sigmoid_out)) * 100)
| src/lamp_dpu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: srs
# language: python
# name: srs
# ---
# # Table of contents
#
# 1. [Load the dataset](#load_the_dataset)
# 2. [Split the dataset](#split_the_dataset)
# 3. [Fitting the recommender](#fitting)
# 4. [Sequential evaluation](#seq_evaluation)
# 4.1 [Evaluation with sequentially revaeled user profiles](#eval_seq_rev)
# 4.2 [Evaluation with "static" user profiles](#eval_static)
# 5. [Analysis of next-item recommendation](#next-item)
# 5.1 [Evaluation with different recommendation list lengths](#next-item_list_length)
# 5.2 [Evaluation with different user profile lengths](#next-item_profile_length)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
from util.data_utils import create_seq_db_filter_top_k, sequences_to_spfm_format
from util.split import last_session_out_split
from util.metrics import precision, recall, mrr
from util import evaluation
from recommenders.FPMCRecommender import FPMCRecommender
import datetime
def get_test_sequences_and_users(test_data, given_k, train_users):
# we can run evaluation only over sequences longer than abs(LAST_K)
mask = test_data['sequence'].map(len) > abs(given_k)
mask &= test_data['user_id'].isin(train_users)
test_sequences = test_data.loc[mask, 'sequence'].values
test_users = test_data.loc[mask, 'user_id'].values
return test_sequences, test_users
# <a id='load_the_dataset'></a>
# # 1. Load the dataset
#
# For this hands-on session we will use a dataset of user-listening sessions crawled from [last.fm](https://www.last.fm/). In detail, we will use a subset of the following dataset:
#
# * 30Music listening and playlists dataset, Turrin et al., ACM RecSys 2015 ([paper](https://home.deib.polimi.it/pagano/portfolio/papers/30Musiclisteningandplaylistsdataset.pdf))
# +
# unzip the dataset, if you haven't already done it
# # ! unzip datasets/sessions.zip -d datasets
# +
dataset_path = 'datasets/sessions.csv'
# load this sample if you experience a severe slowdown with the previous dataset
#dataset_path = 'datasets/sessions_sample_10.csv'
# for the sake of speed, let's keep only the top-1k most popular items in the last month
dataset = create_seq_db_filter_top_k(path=dataset_path, topk=1000, last_months=1)
# -
# Let's see at how the dataset looks like
dataset.head()
# Let's show some statistics about the dataset
from collections import Counter
cnt = Counter()
dataset.sequence.map(cnt.update);
# +
sequence_length = dataset.sequence.map(len).values
n_sessions_per_user = dataset.groupby('user_id').size()
print('Number of items: {}'.format(len(cnt)))
print('Number of users: {}'.format(dataset.user_id.nunique()))
print('Number of sessions: {}'.format(len(dataset)) )
print('\nSession length:\n\tAverage: {:.2f}\n\tMedian: {}\n\tMin: {}\n\tMax: {}'.format(
sequence_length.mean(),
np.quantile(sequence_length, 0.5),
sequence_length.min(),
sequence_length.max()))
print('Sessions per user:\n\tAverage: {:.2f}\n\tMedian: {}\n\tMin: {}\n\tMax: {}'.format(
n_sessions_per_user.mean(),
np.quantile(n_sessions_per_user, 0.5),
n_sessions_per_user.min(),
n_sessions_per_user.max()))
# -
print('Most popular items: {}'.format(cnt.most_common(5)))
# <a id='split_the_dataset'></a>
# # 2. Split the dataset
# For simplicity, let's split the dataset by assigning the **last session** of every user to the **test set**, and **all the previous** ones to the **training set**.
train_data, test_data = last_session_out_split(dataset)
print("Train sessions: {} - Test sessions: {}".format(len(train_data), len(test_data)))
# <a id='fitting'></a>
# # 3. Fitting the recommender
#
# Here we fit the recommedation algorithm over the sessions in the training set.
# This recommender is based on the following paper:
#
# _<NAME>., <NAME>., & <NAME>. (2010). Factorizing personalized Markov chains for next-basket recommendation. Proceedings of the 19th International Conference on World Wide Web - WWW ’10, 811_
#
# In short, FPMC factorizes a personalized order-1 transition tensor using Tensor Factorization with pairwise loss function akin to BPR (Bayesian Pairwise Ranking).
#
# <img src="images/fpmc.png" width="200px" />
#
# TF allows to impute values for the missing transitions between items for each user. For this reason, FPMC can be used for generating _personalized_ recommendations in session-aware recommenders as well.
#
# In this notebook, you will be able to change the number of latent factors and a few other learning hyper-parameters and see the impact on the recommendation quality.
#
# The class `FPMCRecommender` has the following initialization hyper-parameters:
# * `n_factor`: (optional) the number of latent factors
# * `learn_rate`: (optional) the learning rate
# * `regular`: (optional) the L2 regularization coefficient
# * `n_epoch`: (optional) the number of training epochs
# * `n_neg`: (optional) the number of negative samples used in BPR learning
#
recommender = FPMCRecommender(n_factor=16,
n_epoch=5)
recommender.fit(train_data)
# <a id='seq_evaluation'></a>
#
# # 4. Sequential evaluation
#
# In the evaluation of sequence-aware recommenders, each sequence in the test set is split into:
# - the _user profile_, used to compute recommendations, is composed by the first *k* events in the sequence;
# - the _ground truth_, used for performance evaluation, is composed by the remainder of the sequence.
#
# In the cells below, you can control the dimension of the _user profile_ by assigning a **positive** value to `GIVEN_K`, which correspond to the number of events from the beginning of the sequence that will be assigned to the initial user profile. This ensures that each user profile in the test set will have exactly the same initial size, but the size of the ground truth will change for every sequence.
#
# Alternatively, by assigning a **negative** value to `GIVEN_K`, you will set the initial size of the _ground truth_. In this way the _ground truth_ will have the same size for all sequences, but the dimension of the user profile will differ.
METRICS = {'precision':precision,
'recall':recall,
'mrr': mrr}
TOPN = 10 # length of the recommendation list
# <a id='eval_seq_rev'></a>
# ## 4.1 Evaluation with sequentially revealed user-profiles
#
# Here we evaluate the quality of the recommendations in a setting in which user profiles are revealed _sequentially_.
#
# The _user profile_ starts from the first `GIVEN_K` events (or, alternatively, from the last `-GIVEN_K` events if `GIVEN_K<0`).
# The recommendations are evaluated against the next `LOOK_AHEAD` events (the _ground truth_).
# The _user profile_ is next expanded to the next `STEP` events, the ground truth is scrolled forward accordingly, and the evaluation continues until the sequence ends.
#
# In typical **next-item recommendation**, we start with `GIVEN_K=1`, generate a set of **alternatives** that will evaluated against the next event in the sequence (`LOOK_AHEAD=1`), move forward of one step (`STEP=1`) and repeat until the sequence ends.
#
# You can set the `LOOK_AHEAD='all'` to see what happens if you had to recommend a **whole sequence** instead of a set of a set of alternatives to a user.
#
# NOTE: Metrics are averaged over each sequence first, then averaged over all test sequences.
#
# ** (TODO) Try out with different evaluation settings to see how the recommandation quality changes. **
#
#
# ![](gifs/sequential_eval.gif)
# GIVEN_K=1, LOOK_AHEAD=1, STEP=1 corresponds to the classical next-item evaluation
GIVEN_K = 1
LOOK_AHEAD = 1
STEP=1
# +
test_sequences, test_users = get_test_sequences_and_users(test_data, GIVEN_K, train_data['user_id'].values) # we need user ids now!
print('{} sequences available for evaluation ({} users)'.format(len(test_sequences), len(np.unique(test_users))))
results = evaluation.sequential_evaluation(recommender,
test_sequences=test_sequences,
users=test_users,
given_k=GIVEN_K,
look_ahead=LOOK_AHEAD,
evaluation_functions=METRICS.values(),
top_n=TOPN,
scroll=True, # scrolling averages metrics over all profile lengths
step=STEP)
# -
print('Sequential evaluation (GIVEN_K={}, LOOK_AHEAD={}, STEP={})'.format(GIVEN_K, LOOK_AHEAD, STEP))
for mname, mvalue in zip(METRICS.keys(), results):
print('\t{}@{}: {:.4f}'.format(mname, TOPN, mvalue))
# <a id='eval_static'></a>
# ## 4.2 Evaluation with "static" user-profiles
#
# Here we evaluate the quality of the recommendations in a setting in which user profiles are instead _static_.
#
# The _user profile_ starts from the first `GIVEN_K` events (or, alternatively, from the last `-GIVEN_K` events if `GIVEN_K<0`).
# The recommendations are evaluated against the next `LOOK_AHEAD` events (the _ground truth_).
#
# The user profile is *not extended* and the ground truth *doesn't move forward*.
# This allows to obtain "snapshots" of the recommendation performance for different user profile and ground truth lenghts.
#
# Also here you can set the `LOOK_AHEAD='all'` to see what happens if you had to recommend a **whole sequence** instead of a set of a set of alternatives to a user.
#
# **(TODO) Try out with different evaluation settings to see how the recommandation quality changes.**
GIVEN_K = 1
LOOK_AHEAD = 'all'
STEP=1
# +
test_sequences, test_users = get_test_sequences_and_users(test_data, GIVEN_K, train_data['user_id'].values) # we need user ids now!
print('{} sequences available for evaluation ({} users)'.format(len(test_sequences), len(np.unique(test_users))))
results = evaluation.sequential_evaluation(recommender,
test_sequences=test_sequences,
users=test_users,
given_k=GIVEN_K,
look_ahead=LOOK_AHEAD,
evaluation_functions=METRICS.values(),
top_n=TOPN,
scroll=False # notice that scrolling is disabled!
)
# -
print('Sequential evaluation (GIVEN_K={}, LOOK_AHEAD={}, STEP={})'.format(GIVEN_K, LOOK_AHEAD, STEP))
for mname, mvalue in zip(METRICS.keys(), results):
print('\t{}@{}: {:.4f}'.format(mname, TOPN, mvalue))
# <a id='next-item'></a>
# ## 5. Analysis of next-item recommendation
#
# Here we propose to analyse the performance of the recommender system in the scenario of *next-item recommendation* over the following dimensions:
#
# * the *length* of the **recommendation list**, and
# * the *length* of the **user profile**.
#
# NOTE: This evaluation is by no means exhaustive, as different the hyper-parameters of the recommendation algorithm should be *carefully tuned* before drawing any conclusions. Unfortunately, given the time constraints for this tutorial, we had to leave hyper-parameter tuning out. A very useful reference about careful evaluation of (session-based) recommenders can be found at:
#
# * Evaluation of Session-based Recommendation Algorithms, Ludewig and Jannach, 2018 ([paper](https://arxiv.org/abs/1803.09587))
# <a id='next-item_list_length'></a>
# ### 5.1 Evaluation for different recommendation list lengths
GIVEN_K = 1
LOOK_AHEAD = 1
STEP = 1
topn_list = [1, 5, 10, 20, 50, 100]
# ensure that all sequences have the same minimum length
test_sequences, test_users = get_test_sequences_and_users(test_data, GIVEN_K, train_data['user_id'].values) # we need user ids now!
print('{} sequences available for evaluation ({} users)'.format(len(test_sequences), len(np.unique(test_users))))
# +
res_list = []
for topn in topn_list:
print('Evaluating recommendation lists with length: {}'.format(topn))
res_tmp = evaluation.sequential_evaluation(recommender,
test_sequences=test_sequences,
users=test_users,
given_k=GIVEN_K,
look_ahead=LOOK_AHEAD,
evaluation_functions=METRICS.values(),
top_n=topn,
scroll=True, # here we average over all profile lengths
step=STEP)
mvalues = list(zip(METRICS.keys(), res_tmp))
res_list.append((topn, mvalues))
# -
# show separate plots per metric
fig, axes = plt.subplots(nrows=1, ncols=len(METRICS), figsize=(15,5))
res_list_t = list(zip(*res_list))
for midx, metric in enumerate(METRICS):
mvalues = [res_list_t[1][j][midx][1] for j in range(len(res_list_t[1]))]
ax = axes[midx]
ax.plot(topn_list, mvalues)
ax.set_title(metric)
ax.set_xticks(topn_list)
ax.set_xlabel('List length')
# <a id='next-item_profile_length'></a>
# ### 5.2 Evaluation for different user profile lengths
given_k_list = [1, 2, 3, 4]
LOOK_AHEAD = 1
STEP = 1
TOPN = 20
# +
res_list = []
for gk in given_k_list:
print('Evaluating profiles having length: {}'.format(gk))
res_tmp = evaluation.sequential_evaluation(recommender,
test_sequences=test_sequences,
users=test_users,
given_k=gk,
look_ahead=LOOK_AHEAD,
evaluation_functions=METRICS.values(),
top_n=TOPN,
scroll=False, # here we stop at each profile length
step=STEP)
mvalues = list(zip(METRICS.keys(), res_tmp))
res_list.append((gk, mvalues))
# -
# show separate plots per metric
fig, axes = plt.subplots(nrows=1, ncols=len(METRICS), figsize=(15,5))
res_list_t = list(zip(*res_list))
for midx, metric in enumerate(METRICS):
mvalues = [res_list_t[1][j][midx][1] for j in range(len(res_list_t[1]))]
ax = axes[midx]
ax.plot(given_k_list, mvalues)
ax.set_title(metric)
ax.set_xticks(given_k_list)
ax.set_xlabel('Profile length')
| 03_FPMC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (reco_base)
# language: python
# name: reco_base
# ---
# <i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
#
# <i>Licensed under the MIT License.</i>
# # LightFM - hybrid matrix factorisation on MovieLens (Python, CPU)
# This notebook explains the concept of a hybrid matrix factorisation based model for recommendation, it also outlines the steps to construct a pure matrix factorisation and a hybrid models using the [LightFM](https://github.com/lyst/lightfm) package. It also demonstrates how to extract both user and item affinity from a fitted hybrid model.
#
# ## 1. Hybrid matrix factorisation model
#
# ### 1.1 Background
#
# In general, most recommendation models can be divided into two categories:
# - Content based model,
# - Collaborative filtering model.
#
# The content-based model recommends based on similarity of the items and/or users using their description/metadata/profile. On the other hand, collaborative filtering model (discussion is limited to matrix factorisation approach in this notebook) computes the latent factors of the users and items. It works based on the assumption that if a group of people expressed similar opinions on an item, these peole would tend to have similar opinions on other items. For further background and detailed explanation between these two approaches, the reader can refer to machine learning literatures [3, 4].
#
# The choice between the two models is largely based on the data availability. For example, the collaborative filtering model is usually adopted and effective when sufficient ratings/feedbacks have been recorded for a group of users and items.
#
# However, if there is a lack of ratings, content based model can be used provided that the metadata of the users and items are available. This is also the common approach to address the cold-start issues, where there are insufficient historical collaborative interactions available to model new users and/or items.
#
# <!-- In addition, most collaborative filtering models only consume explicit ratings e.g. movie
#
# **NOTE** add stuff about implicit and explicit ratings -->
#
# ### 1.2 Hybrid matrix factorisation algorithm
#
# In view of the above problems, there have been a number of proposals to address the cold-start issues by combining both content-based and collaborative filtering approaches. The hybrid matrix factorisation model is among one of the solutions proposed [1].
#
# In general, most hybrid approaches proposed different ways of assessing and/or combining the feature data in conjunction with the collaborative information.
#
# ### 1.3 LightFM package
#
# LightFM is a Python implementation of a hybrid recommendation algorithms for both implicit and explicit feedbacks [1].
#
# It is a hybrid content-collaborative model which represents users and items as linear combinations of their content features’ latent factors. The model learns **embeddings or latent representations of the users and items in such a way that it encodes user preferences over items**. These representations produce scores for every item for a given user; items scored highly are more likely to be interesting to the user.
#
# The user and item embeddings are estimated for every feature, and these features are then added together to be the final representations for users and items.
#
# For example, for user i, the model retrieves the i-th row of the feature matrix to find the features with non-zero weights. The embeddings for these features will then be added together to become the user representation e.g. if user 10 has weight 1 in the 5th column of the user feature matrix, and weight 3 in the 20th column, the user 10’s representation is the sum of embedding for the 5th and the 20th features multiplying their corresponding weights. The representation for each items is computed in the same approach.
#
# #### 1.3.1 Modelling approach
#
# Let $U$ be the set of users and $I$ be the set of items, and each user can be described by a set of user features $f_{u} \subset F^{U}$ whilst each items can be described by item features $f_{i} \subset F^{I}$. Both $F^{U}$ and $F^{I}$ are all the features which fully describe all users and items.
#
# The LightFM model operates based binary feedbacks, the ratings will be normalised into two groups. The user-item interaction pairs $(u,i) \in U\times I$ are the union of positive (favourable reviews) $S^+$ and negative interactions (negative reviews) $S^-$ for explicit ratings. For implicit feedbacks, these can be the observed and not observed interactions respectively.
#
# For each user and item feature, their embeddings are $e_{f}^{U}$ and $e_{f}^{I}$ respectively. Furthermore, each feature is also has a scalar bias term ($b_U^f$ for user and $b_I^f$ for item features). The embedding (latent representation) of user $u$ and item $i$ are the sum of its respective features’ latent vectors:
#
# $$
# q_{u} = \sum_{j \in f_{u}} e_{j}^{U}
# $$
#
# $$
# p_{i} = \sum_{j \in f_{i}} e_{j}^{I}
# $$
#
# Similarly the biases for user $u$ and item $i$ are the sum of its respective bias vectors. These variables capture the variation in behaviour across users and items:
#
# $$
# b_{u} = \sum_{j \in f_{u}} b_{j}^{U}
# $$
#
# $$
# b_{i} = \sum_{j \in f_{i}} b_{j}^{I}
# $$
#
# In LightFM, the representation for each user/item is a linear weighted sum of its feature vectors.
#
# The prediction for user $u$ and item $i$ can be modelled as sigmoid of the dot product of user and item vectors, adjusted by its feature biases as follows:
#
# $$
# \hat{r}_{ui} = \sigma (q_{u} \cdot p_{i} + b_{u} + b_{i})
# $$
#
# As the LightFM is constructed to predict binary outcomes e.g. $S^+$ and $S^-$, the function $\sigma()$ is based on the [sigmoid function](https://mathworld.wolfram.com/SigmoidFunction.html).
#
# The LightFM algorithm estimates interaction latent vectors and bias for features. For model fitting, the cost function of the model consists of maximising the likelihood of data conditional on the parameters described above using stochastic gradient descent. The likelihood can be expressed as follows:
#
# $$
# L = \prod_{(u,i) \in S+}\hat{r}_{ui} \times \prod_{(u,i) \in S-}1 - \hat{r}_{ui}
# $$
#
# Note that if the feature latent vectors are not available, the algorithm will behaves like a [logistic matrix factorisation model](http://stanford.edu/~rezab/nips2014workshop/submits/logmat.pdf).
# ## 2. Movie recommender with LightFM using only explicit feedbacks
# ### 2.1 Import libraries
# +
import sys
import os
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import lightfm
from lightfm import LightFM
from lightfm.data import Dataset
from lightfm import cross_validation
# Import LightFM's evaluation metrics
from lightfm.evaluation import precision_at_k as lightfm_prec_at_k
from lightfm.evaluation import recall_at_k as lightfm_recall_at_k
# Import repo's evaluation metrics
from reco_utils.evaluation.python_evaluation import (
precision_at_k, recall_at_k)
from reco_utils.common.timer import Timer
from reco_utils.dataset import movielens
from reco_utils.recommender.lightfm.lightfm_utils import (
track_model_metrics, prepare_test_df, prepare_all_predictions,
compare_metric, similar_users, similar_items)
print("System version: {}".format(sys.version))
print("LightFM version: {}".format(lightfm.__version__))
# -
# ### 2.2 Defining variables
# + tags=["parameters"]
# Select MovieLens data size
MOVIELENS_DATA_SIZE = '100k'
# default number of recommendations
K = 10
# percentage of data used for testing
TEST_PERCENTAGE = 0.25
# model learning rate
LEARNING_RATE = 0.25
# no of latent factors
NO_COMPONENTS = 20
# no of epochs to fit model
NO_EPOCHS = 20
# no of threads to fit model
NO_THREADS = 32
# regularisation for both user and item features
ITEM_ALPHA=1e-6
USER_ALPHA=1e-6
# seed for pseudonumber generations
SEEDNO = 42
# -
# ### 2.2 Retrieve data
data = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
genres_col='genre',
header=["userID", "itemID", "rating"]
)
# quick look at the data
data.sample(5)
# ### 2.3 Prepare data
# Before fitting the LightFM model, we need to create an instance of `Dataset` which holds the interaction matrix.
dataset = Dataset()
# The `fit` method creates the user/item id mappings.
# +
dataset.fit(users=data['userID'],
items=data['itemID'])
# quick check to determine the number of unique users and items in the data
num_users, num_topics = dataset.interactions_shape()
print(f'Num users: {num_users}, num_topics: {num_topics}.')
# -
# Next is to build the interaction matrix. The `build_interactions` method returns 2 COO sparse matrices, namely the `interactions` and `weights` matrices.
(interactions, weights) = dataset.build_interactions(data.iloc[:, 0:3].values)
# LightLM works slightly differently compared to other packages as it expects the train and test sets to have same dimension. Therefore the conventional train test split will not work.
#
# The package has included the `cross_validation.random_train_test_split` method to split the interaction data and splits it into two disjoint training and test sets.
#
# However, note that **it does not validate the interactions in the test set to guarantee all items and users have historical interactions in the training set**. Therefore this may result into a partial cold-start problem in the test set.
train_interactions, test_interactions = cross_validation.random_train_test_split(
interactions, test_percentage=TEST_PERCENTAGE,
random_state=np.random.RandomState(SEEDNO))
# Double check the size of both the train and test sets.
print(f"Shape of train interactions: {train_interactions.shape}")
print(f"Shape of test interactions: {test_interactions.shape}")
# ### 2.4 Fit the LightFM model
# In this notebook, the LightFM model will be using the weighted Approximate-Rank Pairwise (WARP) as the loss. Further explanation on the topic can be found [here](https://making.lyst.com/lightfm/docs/examples/warp_loss.html#learning-to-rank-using-the-warp-loss).
#
#
# In general, it maximises the rank of positive examples by repeatedly sampling negative examples until a rank violation has been located. This approach is recommended when only positive interactions are present.
model1 = LightFM(loss='warp', no_components=NO_COMPONENTS,
learning_rate=LEARNING_RATE,
random_state=np.random.RandomState(SEEDNO))
# The LightFM model can be fitted with the following code:
model1.fit(interactions=train_interactions,
epochs=NO_EPOCHS);
# ### 2.5 Prepare model evaluation data
# Before we can evaluate the fitted model and to get the data into a format which is compatible with the existing evaluation methods within this repo, the data needs to be massaged slightly.
#
# First the train/test indices need to be extracted from the `lightfm.cross_validation` method as follows:
# +
uids, iids, interaction_data = cross_validation._shuffle(
interactions.row, interactions.col, interactions.data,
random_state=np.random.RandomState(SEEDNO))
cutoff = int((1.0 - TEST_PERCENTAGE) * len(uids))
test_idx = slice(cutoff, None)
# -
# Then the the mapping between internal and external representation of the user and item are extracted as follows:
uid_map, ufeature_map, iid_map, ifeature_map = dataset.mapping()
# Once the train/test indices and mapping are ready, the test dataframe can be constructed as follows:
with Timer() as test_time:
test_df = prepare_test_df(test_idx, uids, iids, uid_map, iid_map, weights)
print(f"Took {test_time.interval:.1f} seconds for prepare and predict test data.")
time_reco1 = test_time.interval
# And samples of the test dataframe:
test_df.sample(5)
# In addition, the predictions of all unseen user-item pairs (e.g. removing those seen in the training data) can be prepared as follows:
with Timer() as test_time:
all_predictions = prepare_all_predictions(data, uid_map, iid_map,
interactions=train_interactions,
model=model1,
num_threads=NO_THREADS)
print(f"Took {test_time.interval:.1f} seconds for prepare and predict all data.")
time_reco2 = test_time.interval
# Samples of the `all_predictions` dataframe:
all_predictions.sample(5)
# Note that the **raw prediction values from the LightFM model are for ranking purposes only**, they should not be used directly. The magnitude and sign of these values do not have any specific interpretation.
# ### 2.6 Model evaluation
# Once the evaluation data are ready, they can be passed into to the repo's evaluation methods as follows. The performance of the model will be tracked using both Precision@K and Recall@K.
#
# In addition, the results have also being compared with those computed from LightFM's own evaluation methods to ensure accuracy.
# +
with Timer() as test_time:
eval_precision = precision_at_k(rating_true=test_df,
rating_pred=all_predictions, k=K)
eval_recall = recall_at_k(test_df, all_predictions, k=K)
time_reco3 = test_time.interval
with Timer() as test_time:
eval_precision_lfm = lightfm_prec_at_k(model1, test_interactions,
train_interactions, k=K).mean()
eval_recall_lfm = lightfm_recall_at_k(model1, test_interactions,
train_interactions, k=K).mean()
time_lfm = test_time.interval
print(
"------ Using Repo's evaluation methods ------",
f"Precision@K:\t{eval_precision:.6f}",
f"Recall@K:\t{eval_recall:.6f}",
"\n------ Using LightFM evaluation methods ------",
f"Precision@K:\t{eval_precision_lfm:.6f}",
f"Recall@K:\t{eval_recall_lfm:.6f}",
sep='\n')
# -
# ## 3. Movie recommender with LightFM using explicit feedbacks and additional item and user features
# As the LightFM was designed to incorporates both user and item metadata, the model can be extended to include additional features such as movie genres and user occupations.
# ### 3.1 Extract and prepare movie genres
# In this notebook, the movie's genres will be used as the item metadata. As the genres have already been loaded during the initial data import, it can be processed directly as follows:
# split the genre based on the separator
movie_genre = [x.split('|') for x in data['genre']]
# retrieve the all the unique genres in the data
all_movie_genre = sorted(list(set(itertools.chain.from_iterable(movie_genre))))
# quick look at the all the genres within the data
all_movie_genre
# ### 3.2 Retrieve and prepare movie genres
# Further user features can be included as part of the model fitting process. In this notebook, **only the occupation of each user will be included** but the feature list can be extended easily.
#
# #### 3.2.1 Retrieve and merge data
# The user features can be retrieved directly from the grouplens website and merged with the existing data as follows:
# +
user_feature_URL = 'http://files.grouplens.org/datasets/movielens/ml-100k/u.user'
user_data = pd.read_table(user_feature_URL,
sep='|', header=None)
user_data.columns = ['userID','age','gender','occupation','zipcode']
# merging user feature with existing data
new_data = data.merge(user_data[['userID','occupation']], left_on='userID', right_on='userID')
# quick look at the merged data
new_data.sample(5)
# -
# #### 3.2.2 Extract and prepare user occupations
# retrieve all the unique occupations in the data
all_occupations = sorted(list(set(new_data['occupation'])))
# ### 3.3 Prepare data and features
# Similar to the previous model, the data is required to be converted into a `Dataset` instance and then create a user/item id mapping with the `fit` method.
dataset2 = Dataset()
dataset2.fit(data['userID'],
data['itemID'],
item_features=all_movie_genre,
user_features=all_occupations)
# The movie genres are then converted into a item feature matrix using the `build_item_features` method as follows:
item_features = dataset2.build_item_features(
(x, y) for x,y in zip(data.itemID, movie_genre))
# The user occupations are then converted into an user feature matrix using the `build_user_features` method as follows:
user_features = dataset2.build_user_features(
(x, [y]) for x,y in zip(new_data.userID, new_data['occupation']))
# Once the item and user features matrices have been completed, the next steps are similar as before, which is to build the interaction matrix and split the interactions into train and test sets as follows:
# +
(interactions2, weights2) = dataset2.build_interactions(data.iloc[:, 0:3].values)
train_interactions2, test_interactions2 = cross_validation.random_train_test_split(
interactions2, test_percentage=TEST_PERCENTAGE,
random_state=np.random.RandomState(SEEDNO))
# -
# ### 3.3 Fit the LightFM model with additional user and item features
# The parameters of the second model will be similar to the first model to facilitates comparison.
#
# The model performance at each epoch is also tracked by the same metrics as before.
model2 = LightFM(loss='warp', no_components=NO_COMPONENTS,
learning_rate=LEARNING_RATE,
item_alpha=ITEM_ALPHA,
user_alpha=USER_ALPHA,
random_state=np.random.RandomState(SEEDNO))
# The LightFM model can then be fitted:
model2.fit(interactions=train_interactions2,
user_features=user_features,
item_features=item_features,
epochs=NO_EPOCHS);
# ### 3.4 Prepare model evaluation data
# Similar to the previous model, the evaluation data needs to be prepared in order to get them into a format consumable with this repo's evaluation methods.
#
# Firstly the train/test indices and id mappings are extracted using the new interations matrix as follows:
# +
uids, iids, interaction_data = cross_validation._shuffle(
interactions2.row, interactions2.col, interactions2.data,
random_state=np.random.RandomState(SEEDNO))
uid_map, ufeature_map, iid_map, ifeature_map = dataset2.mapping()
# -
# The test dataframe is then constructed as follows:
with Timer() as test_time:
test_df2 = prepare_test_df(test_idx, uids, iids, uid_map, iid_map, weights2)
print(f"Took {test_time.interval:.1f} seconds for prepare and predict test data.")
# The predictions of all unseen user-item pairs can be prepared as follows:
# +
with Timer() as test_time:
all_predictions2 = prepare_all_predictions(data, uid_map, iid_map,
interactions=train_interactions2,
user_features=user_features,
item_features=item_features,
model=model2,
num_threads=NO_THREADS)
print(f"Took {test_time.interval:.1f} seconds for prepare and predict all data.")
# -
# ### 3.5 Model evaluation and comparison
# The predictive performance of the new model can be computed and compared with the previous model (which used only the explicit rating) as follows:
# +
eval_precision2 = precision_at_k(rating_true=test_df2,
rating_pred=all_predictions2, k=K)
eval_recall2 = recall_at_k(test_df2, all_predictions2, k=K)
print(
"------ Using only explicit ratings ------",
f"Precision@K:\t{eval_precision:.6f}",
f"Recall@K:\t{eval_recall:.6f}",
"\n------ Using both implicit and explicit ratings ------",
f"Precision@K:\t{eval_precision2:.6f}",
f"Recall@K:\t{eval_recall2:.6f}",
sep='\n')
# -
# The new model which used both implicit and explicit data performed consistently better than the previous model which used only the explicit data, thus highlighting the benefits of including such additional features to the model.
# ### 3.6 Evaluation metrics comparison
# Note that the evaluation approaches here are solely for demonstration purposes only.
#
# If the reader were using the LightFM package and/or its models, the LightFM's built-in evaluation methods are much more efficient and are the recommended approach for production usage as they are designed and optimised to work with the package.
#
# As a comparison, the times recorded to compute Precision@K and Recall@K for model1 are shown as follows:
print(
"------ Using Repo's evaluation methods ------",
f"Time [sec]:\t{(time_reco1+time_reco2+time_reco3):.1f}",
"\n------ Using LightFM evaluation methods ------",
f"Time [sec]:\t{time_lfm:.1f}",
sep='\n')
# ## 4. Evaluate model fitting process
# In addition to the inclusion of both implicit and explicit data, the model fitting process can also be monitored in order to determine whether the model is being trained properly.
#
# This notebook also includes a `track_model_metrics` method which plots the model's metrics e.g. Precision@K and Recall@K as model fitting progresses.
#
# For the first model (using only explicit data), the model fitting progress is shown as follows:
output1, _ = track_model_metrics(model=model1, train_interactions=train_interactions,
test_interactions=test_interactions, k=K,
no_epochs=NO_EPOCHS, no_threads=NO_THREADS)
# The second model (with both implicit and explicit data) fitting progress:
output2, _ = track_model_metrics(model=model2, train_interactions=train_interactions2,
test_interactions=test_interactions2, k=K,
no_epochs=NO_EPOCHS, no_threads=NO_THREADS,
item_features=item_features,
user_features=user_features)
# These show slightly different behaviour with the two approaches, the reader can then tune the hyperparameters to improve the model fitting process.
#
# ### 4.1 Performance comparison
# In addition, the model's performance metrics (based on the test dataset) can be plotted together to facilitate easier comparison as follows:
for i in ['Precision', 'Recall']:
sns.set_palette("Set2")
plt.figure()
sns.scatterplot(x="epoch", y="value", hue='data',
data=compare_metric(df_list = [output1, output2], metric=i)
).set_title(f'{i} comparison using test set');
# Referring to the figures above, it is rather obvious that the number of epochs is too low as the model's performances have not stabilised. Reader can decide on the number of epochs and other hyperparameters to adjust suit the application.
#
# As stated previously, it is interesting to see model2 (using both implicit and explicit data) performed consistently better than model1 (using only explicit ratings).
# ## 5. Similar users and items
# As the LightFM package operates based on latent embeddings, these can be retrieved once the model has been fitted to assess user-user and/or item-item affinity.
# ### 5.1 User affinity
# The user-user affinity can be retrieved with the `get_user_representations` method from the fitted model as follows:
_, user_embeddings = model2.get_user_representations(features=user_features)
user_embeddings
# In order to retrieve the top N similar users, we can use the `similar_users` from `reco_utils`. For example, if we want to choose top 10 users most similar to the user 1:
similar_users(user_id=1, user_features=user_features,
model=model2)
# ### 5.2 Item affinity
# Similar to the user affinity, the item-item affinity can be retrieved with the `get_item_representations` method using the fitted model.
_, item_embeddings = model2.get_item_representations(features=item_features)
item_embeddings
# The function to retrieve the top N similar items is similar to similar_users() above. For example, if we want to choose top 10 items most similar to the item 10:
similar_items(item_id=10, item_features=item_features,
model=model2)
# ## 6. Conclusion
# In this notebook, the background of hybrid matrix factorisation model has been explained together with a detailed example of LightFM's implementation.
#
# The process of incorporating additional user and item metadata has also been demonstrated with performance comparison. Furthermore, the calculation of both user and item affinity scores have also been demonstrated and extracted from the fitted model.
#
# This notebook remains a fairly simple treatment on the subject and hopefully could serve as a good foundation for the reader.
# ## References
# - [[1](https://arxiv.org/abs/1507.08439)]. <NAME> - Metadata Embeddings for User and Item Cold-start Recommendations, 2015. arXiv:1507.08439
# - [[2](https://making.lyst.com/lightfm/docs/home.html)]. LightFM documentation,
# - [3]. <NAME> - Recommender Systems: The Textbook, Springer, April 2016. ISBN 978-3-319-29659-3
# - [4]. <NAME>, <NAME> - Statistical Methods for Recommender Systems, 2016. ISBN: 9781107036079
#
| examples/02_model_hybrid/lightfm_deep_dive.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Debugging XGBoost training jobs in real time with Amazon SageMaker Debugger
#
# This notebook uses the MNIST dataset to demonstrate real-time analysis of XGBoost training jobs while the training jobs are running.
#
# This notebook was created and tested on an ml.m5.4xlarge notebook instance using 100GB instance volume.
#
# ## Overview
#
# Amazon SageMaker Debugger allows debugging machine learning training.
# SageMaker Debugger helps you to monitor your training in near real time using rules and provides alerts if it detects issues in training.
#
# Using SageMaker Debugger is a two step process: Saving model parameters and analysis.
# Let's look at each one of them closely.
#
# ### Saving model parameters
#
# In machine learning process, model parameters are updated every forward and backward pass and can describe the state of the training job at any particular instant in an ML lifecycle.
# Amazon SageMaker Debugger allows you to capture the model parameters and save them for analysis.
# Although XGBoost is not a deep learning algorithm, Amazon SageMaker Debugger is highly customizable and can help you interpret results by saving insightful metrics. For example, performance metrics or the importance of features at different frequencies.
# Refer to [SageMaker Debugger documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-configuration.html) for details on how to save the metrics you want.
#
#
# ### Analysis
#
# There are two ways to get to model parameters and run analysis on them.
#
# One way is to use concept called ***Rules***. On a very broad level, a rule is Python code used to detect certain conditions during training.
# Some of the conditions that a data scientist training an algorithm may care about are monitoring for gradients getting too large or too small, detecting overfitting, and so on.
# Amazon SageMaker Debugger comes pre-packaged with certain built-in rules that can be invoked on Amazon SageMaker. You can also write your own rules using the Amazon SageMaker Debugger APIs.
# For more details about automatic analysis using rules, see [Configure Debugger Built-in Rules](https://docs.aws.amazon.com/sagemaker/latest/dg/use-debugger-built-in-rules.html) and [List of Debugger Built-in Rules](https://docs.aws.amazon.com/sagemaker/latest/dg/debugger-built-in-rules.html).
#
# This notebook also walk you through how to use the SMDebug client library for analysis in real time while training jobs are running. The SMDebug client library enables you to retrieve model parameters and scalars saved during training job via few lines of code.
#
# Through the model parameter analysis, you can drill down into training issues you're running into. You save raw model parameter data in order to understand your model better, and figure out the root cause of training problems.
#
# ![Animated confusion matrix](cm.gif)
# ## Import SageMaker Python SDK and the SMDebug client library
# <font color='red'>**Important**</font>: To use the new Debugger features, you need to upgrade the SageMaker Python SDK and the SMDebug libary. In the following cell, change the third line to `install_needed=True` and run to upgrade the libraries.
import sys
import IPython
install_needed = False # Set to True to upgrade
if install_needed:
print("installing deps and restarting kernel")
# !{sys.executable} -m pip install -U sagemaker
# !{sys.executable} -m pip install -U smdebug
IPython.Application.instance().kernel.do_shutdown(True)
import boto3
import sagemaker
# Amazon SageMaker Debugger is available in Amazon SageMaker XGBoost container version `0.90-2` or later. The following cell retrieves the SageMaker XGBoost 0.90-2 container.
# +
from sagemaker import image_uris
# Below changes the region to be one where this notebook is running
region = boto3.Session().region_name
container = sagemaker.image_uris.retrieve("xgboost", region, "0.90-2")
# -
# ## Training XGBoost models in Amazon SageMaker with Amazon SageMaker Debugger
#
# In this section you learn to train an XGBoost model with Amazon SageMaker Debugger enabled and monitor the training jobs.
# This is done using the SageMaker [Estimator API](https://sagemaker.readthedocs.io/en/stable/estimators.html#sagemaker.estimator.Estimator).
# While training job is running, use the SageMaker Debugger API to access saved model parameters in real time and visualize them.
# You can also download a fresh set of model parameters every time you query for using the SMDebug library.
#
# This notebook is adapted from [XGBoost for Classification](https://github.com/aws/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/xgboost_mnist/xgboost_mnist.ipynb).
#
# ### Data preparation
#
# Use the [MNIST data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html) stored in [LIBSVM](https://www.csie.ntu.edu.tw/~cjlin/libsvm/) format.
# +
from data_utils import load_mnist, upload_to_s3
bucket = sagemaker.Session().default_bucket()
prefix = "DEMO-smdebug-xgboost-mnist"
# +
# %%time
train_file, validation_file = load_mnist()
upload_to_s3(train_file, bucket, f"{prefix}/train/mnist.train.libsvm")
upload_to_s3(validation_file, bucket, f"{prefix}/validation/mnist.validation.libsvm")
# -
# ### Enabling Amazon SageMaker Debugger in the estimator object
#
# Enabling Amazon SageMaker Debugger in a training job can be accomplished by adding its configuration into an Estimator object constructor:
#
# ```
# from sagemaker.debugger import DebuggerHookConfig
#
# estimator = Estimator(
# ...,
# debugger_hook_config = DebuggerHookConfig(
# s3_output_path="s3://{bucket_name}/{location_in_bucket}", # Optional
# collection_configs=[
# CollectionConfig(
# name="metrics",
# parameters={
# "save_interval": "10"
# }
# )
# ]
# )
# )
# ```
# Here, the `DebuggerHookConfig` object configures which data `Estimator` should save for the real-time visualization. Provide two parameters:
#
# - `s3_output_path`: Points to an S3 bucket where you intend to store model parameters. Amount of data saved depends on multiple factors, major ones are training job, data set, model, frequency of saving model parameters. This S3 bucket should be in your AWS account so that you have full access to control over the stored data. **Note**: The S3 bucket should be originally created in the same Region where your training job is running, otherwise you might run into problems with cross-Region access.
#
# - `collection_configs`: It enumerates named collections of model parameters to save. Collections are a convenient way to organize relevant model parameters under same umbrella to make it easy to navigate them during analysis. In this particular example, you are interested in a single collection named `metrics`. You also configured Amazon SageMaker Debugger to save metrics every 10 iterations. For all parameters that are supported by Collections and DebuggerConfig, see [Collection documentation](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/api.md).
# ### Using Amazon SageMaker Debugger with XGBoost Classification
#
# Import the libraries for the demo of Amazon SageMaker Debugger.
# +
from sagemaker import get_execution_role
role = get_execution_role()
base_job_name = "demo-smdebug-xgboost-classification"
bucket_path = "s3://{}".format(bucket)
num_round = 25
save_interval = 3
hyperparameters = {
"max_depth": "5",
"eta": "0.1",
"gamma": "4",
"min_child_weight": "6",
"silent": "0",
"objective": "multi:softmax",
"num_class": "10", # num_class is required for 'multi:*' objectives
"num_round": num_round,
}
# +
from sagemaker.estimator import Estimator
from sagemaker.debugger import DebuggerHookConfig, CollectionConfig
xgboost_algorithm_mode_estimator = Estimator(
role=role,
base_job_name=base_job_name,
instance_count=1,
instance_type="ml.m5.xlarge",
image_uri=container,
hyperparameters=hyperparameters,
max_run=1800,
debugger_hook_config=DebuggerHookConfig(
s3_output_path=bucket_path, # Required
collection_configs=[
CollectionConfig(name="metrics", parameters={"save_interval": str(save_interval)}),
CollectionConfig(name="predictions", parameters={"save_interval": str(save_interval)}),
CollectionConfig(name="labels", parameters={"save_interval": str(save_interval)}),
],
),
)
# + [markdown] pycharm={"name": "#%% md\n"}
# With the next step you are going to actually start a training job using the Estimator object you created above. This job is started in asynchronous, non-blocking way. This means that control is passed back to the notebook and further commands can be run while training job is progressing.
# +
from sagemaker.session import TrainingInput
train_s3_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "train"), content_type="libsvm"
)
validation_s3_input = TrainingInput(
"s3://{}/{}/{}".format(bucket, prefix, "validation"), content_type="libsvm"
)
# This is a fire and forget event. By setting wait=False, you just submit the job to run in the background.
# Amazon SageMaker will start one training job and release control to next cells in the notebook.
# Follow this notebook to see status of the training job.
xgboost_algorithm_mode_estimator.fit(
{"train": train_s3_input, "validation": validation_s3_input}, wait=False
)
# -
# ### Result
#
# As a result of the above command, Amazon SageMaker starts one training job for you and it produces model parameters to be analyzed.
# This job will run in a background without you having to wait for it to complete in order to continue with the rest of the notebook.
# Because of this asynchronous nature of a training job, you need to monitor its status so that you don't start to request debugging too early.
#
#
# ## Analysis and Visualization
#
# ### Checking on the training job status
#
# Check the status of the training job by running the following code.
# It checks on the status of an Amazon SageMaker training job every 15 seconds.
# Once a training job has started its training cycle, it proceeds to the next cells in the notebook.
# That means training job started to tune the model and, in parallel, emit model parameters.
# +
import time
from time import gmtime, strftime
# Below command will give the status of training job
job_name = xgboost_algorithm_mode_estimator.latest_training_job.name
client = xgboost_algorithm_mode_estimator.sagemaker_session.sagemaker_client
description = client.describe_training_job(TrainingJobName=job_name)
print("Training job name: " + job_name)
if description["TrainingJobStatus"] != "Completed":
while description["SecondaryStatus"] not in ["Training", "Completed"]:
description = client.describe_training_job(TrainingJobName=job_name)
primary_status = description["TrainingJobStatus"]
secondary_status = description["SecondaryStatus"]
print("{}: {}, {}".format(strftime("%X", gmtime()), primary_status, secondary_status))
time.sleep(15)
# -
# ### Retrieving and analyzing model parameters
#
# Before getting to analysis, here are some notes on concepts being used in Amazon SageMaker Debugger that help with analysis.
# - ***Trial*** - Object that is a centerpiece of the SageMaker Debugger API when it comes to getting access to model parameters. It is a top level abstract that represents a single run of a training job. All model parameters emitted by a training job are associated with its *trial*.
# - ***Tensor*** - Object that represents model parameters, such as weights, gradients, accuracy, and loss, that are saved during training job.
#
# For more details on aforementioned concepts as well as on SageMaker Debugger API and examples, see [SageMaker Debugger Analysis API](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md) documentation.
#
# In the following code cell, use a ***Trial*** to access model parameters. You can do that by inspecting currently running training job and extract necessary parameters from its debug configuration to instruct SageMaker Debugger where the data you are looking for is located. Keep in mind the following:
# - Model parameters are being stored in your own S3 bucket to which you can navigate and manually inspect its content if desired.
# - You might notice a slight delay before trial object is created. This is normal as SageMaker Debugger monitors the corresponding bucket and waits until model parameters data to appear. The delay is introduced by less than instantaneous upload of model parameters from a training container to your S3 bucket.
# +
from smdebug.trials import create_trial
description = client.describe_training_job(TrainingJobName=job_name)
s3_output_path = xgboost_algorithm_mode_estimator.latest_job_debugger_artifacts_path()
# This is where we create a Trial object that allows access to saved model parameters.
trial = create_trial(s3_output_path)
# +
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
from IPython.display import display, clear_output
def plot_confusion_for_one_step(trial, step, ax=None):
if ax is None:
fig, ax = plt.subplots()
cm = confusion_matrix(
trial.tensor("labels").value(step), trial.tensor("predictions").value(step)
)
normalized_cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
sns.heatmap(normalized_cm, cmap="bone", ax=ax, cbar=False, annot=cm, fmt="")
print(f"iteration: {step}")
def plot_and_update_confusion_for_all_steps(trial):
fig, ax = plt.subplots()
rendered_steps = []
# trial.loaded_all_steps is a way to keep monitoring for a state of a training job
# as seen by Amazon SageMaker Debugger.
# When training job is completed Trial becomes aware of it.
while not rendered_steps or not trial.loaded_all_steps:
steps = trial.steps()
# quick way to get diff between two lists
steps_to_render = list(set(steps).symmetric_difference(set(rendered_steps)))
# plot only from newer chunk
for step in steps_to_render:
clear_output(wait=True)
plot_confusion_for_one_step(trial, step, ax=ax)
display(fig)
plt.pause(5)
ax.clear()
rendered_steps.extend(steps_to_render)
fig.clear()
plt.close()
# -
# ### Visualizing confusion matrix of a running training job
#
# Finally, wait until Amazon SageMaker Debugger has downloaded initial collection of model parameters to look at. Once that collection is ready you keep getting new model parameters every five seconds and plot them correspondingly one under another.
plot_and_update_confusion_for_all_steps(trial)
| sagemaker-debugger/xgboost_realtime_analysis/xgboost-realtime-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="UrjQGgr5nUHC"
# <h1> Imports
# + id="eGl9mcc0nOMP"
import matplotlib.pyplot as plt
import numpy as np
# Required imports for neural network
import torch.nn as nn
import torch
from torch.autograd import Variable
import random
# + [markdown] id="T3KVOwFXFOY0"
# <h1> Data Loading and Generation
# + [markdown] id="nMUUm70ufKHH"
# This Sine function generator is based on the repostory: https://github.com/AdrienLE/ANIML/blob/master/ANIML.ipynb
# + id="a3X51uGHDvSV"
class SineWaveTask:
def __init__(self):
self.a = np.random.uniform(0.1, 5.0)
self.b = np.random.uniform(0, 2*np.pi)
self.train_x = None
def f(self, x):
return self.a * np.sin(x + self.b)
def training_set(self, size=10, force_new=False):
if self.train_x is None and not force_new:
self.train_x = np.random.uniform(-5, 5, size)
x = self.train_x
elif not force_new:
x = self.train_x
else:
x = np.random.uniform(-5, 5, size)
y = self.f(x)
return torch.Tensor(x), torch.Tensor(y)
def test_set(self, size=50):
x = np.linspace(-5, 5, size)
y = self.f(x)
return torch.Tensor(x), torch.Tensor(y)
def plot(self, *args, **kwargs):
x, y = self.test_set(size=100)
return plt.plot(x.numpy(), y.numpy(), *args, **kwargs)
def plot_model(self, new_model, *args, **kwargs):
x, y_true = self.test_set(size=100)
x = Variable(x[:, None])
y_true = Variable(y_true[:, None])
y_pred = new_model(x)
plt.plot(x.data.numpy().flatten(),
y_pred.data.numpy().flatten(),
*args, **kwargs)
TRAIN_SIZE = 20000
TEST_SIZE = 1000
SINE_TRAIN = [SineWaveTask() for _ in range(TRAIN_SIZE)]
SINE_TEST = [SineWaveTask() for _ in range(TEST_SIZE)]
# + [markdown] id="cu4urLF7Q88A"
# <h1> Neural Network Model
# + id="R1B0YTz6ytyN"
# Define network
class Neural_Network(nn.Module):
def __init__(self, input_size=1, hidden_size=40, output_size=1):
super(Neural_Network, self).__init__()
# network layers
self.hidden1 = nn.Linear(input_size,hidden_size)
self.hidden2 = nn.Linear(hidden_size,hidden_size)
self.output_layer = nn.Linear(hidden_size,output_size)
#Activation functions
self.relu = nn.ReLU()
def forward(self, x):
x = self.hidden1(x)
x = self.relu(x)
x = self.hidden2(x)
x = self.relu(x)
x = self.output_layer(x)
y = x
return y
# + [markdown] id="G-ExWACxQ3mt"
# <h1> Helper functions
# + id="1zyNHFXdOnug"
# The Minimum Square Error is used to evaluate the difference between prediction and ground truth
criterion = nn.MSELoss()
def copy_existing_model(model):
# Function to copy an existing model
# We initialize a new model
new_model = Neural_Network()
# Copy the previous model's parameters into the new model
new_model.load_state_dict(model.state_dict())
return new_model
def get_samples_in_good_format(wave):
#This function is used to sample data from a wave
x, y_true = wave.training_set()
# We add [:,None] to get the right dimensions to pass to the model: we want K x 1 (we have scalars inputs hence the x 1)
# Note that we convert everything torch tensors
x = torch.tensor(x[:,None])
y_true = torch.tensor(y_true[:,None])
return x,y_true
def initialization_to_store_meta_losses():
# This function creates lists to store the meta losses
global store_train_loss_meta; store_train_loss_meta = []
global store_test_loss_meta; store_test_loss_meta = []
def test_set_validation(model,new_model,wave,lr_inner,k,store_test_loss_meta):
# This functions does not actually affect the main algorithm, it is just used to evaluate the new model
new_model = training(model, wave, lr_inner, k)
# Obtain the loss
loss = evaluation(new_model, wave)
# Store loss
store_test_loss_meta.append(loss)
def train_set_evaluation(new_model,wave,store_train_loss_meta):
loss = evaluation(new_model, wave)
store_train_loss_meta.append(loss)
def print_losses(epoch,store_train_loss_meta,store_test_loss_meta,printing_step=1000):
if epoch % printing_step == 0:
print(f'Epochh : {epoch}, Average Train Meta Loss : {np.mean(store_train_loss_meta)}, Average Test Meta Loss : {np.mean(store_test_loss_meta)}')
#This is based on the paper update rule, we calculate the difference between parameters and then this is used by the optimizer, rather than doing the update by hand
def reptile_parameter_update(model,new_model):
# Zip models for the loop
zip_models = zip(model.parameters(), new_model.parameters())
for parameter, new_parameter in zip_models:
if parameter.grad is None:
parameter.grad = torch.tensor(torch.zeros_like(parameter))
# Here we are adding the gradient that will later be used by the optimizer
parameter.grad.data.add_(parameter.data - new_parameter.data)
# Define commands in order needed for the metaupdate
# Note that if we change the order it doesn't behave the same
def metaoptimizer_update(metaoptimizer):
# Take step
metaoptimizer.step()
# Reset gradients
metaoptimizer.zero_grad()
def metaupdate(model,new_model,metaoptimizer):
# Combine the two previous functions into a single metaupdate function
# First we calculate the gradients
reptile_parameter_update(model,new_model)
# Use those gradients in the optimizer
metaoptimizer_update(metaoptimizer)
def evaluation(new_model, wave, item = True):
# Get data
x, label = get_samples_in_good_format(wave)
# Make model prediction
prediction = new_model(x)
# Get loss
if item == True: #Depending on whether we need to return the loss value for storing or for backprop
loss = criterion(prediction,label).item()
else:
loss = criterion(prediction,label)
return loss
def training(model, wave, lr_k, k):
# Create new model which we will train on
new_model = copy_existing_model(model)
# Define new optimizer
koptimizer = torch.optim.SGD(new_model.parameters(), lr=lr_k)
# Update the model multiple times, note that k>1 (do not confuse k with K)
for i in range(k):
# Reset optimizer
koptimizer.zero_grad()
# Evaluate the model
loss = evaluation(new_model, wave, item = False)
# Backpropagate
loss.backward()
koptimizer.step()
return new_model
# + [markdown] id="-4Ps8P2IRCmF"
# <h1> Reptile
# + id="8ogpg_DHizlC"
#Define important variables
epochs = int(1e5) # number of epochs
lr_meta=0.001 # Learning rate for meta model (outer loop)
printing_step=1000 # how many epochs should we wait to print the loss
lr_k=0.01 # Internal learning rate
k=5 # Number of internal updates for each task
# Initializations
initialization_to_store_meta_losses()
model = Neural_Network()
metaoptimizer = torch.optim.Adam(model.parameters(), lr=lr_meta)
# + colab={"base_uri": "https://localhost:8080/", "height": 887} id="-4-zQWWKFt3s" outputId="a90fc4c6-006c-43c8-d882-4c3a983a5923"
# Training loop
for epoch in range(epochs):
# Sample a sine wave (Task from training data)
wave = random.sample(SINE_TRAIN, 1)
# Update model predefined number of times based on k
new_model = training(model, wave[0], lr_k, k)
# Evalaute the loss for the training data
train_set_evaluation(new_model,wave[0],store_train_loss_meta)
#Meta-update --> Get gradient for meta loop and update
metaupdate(model,new_model,metaoptimizer)
# Evalaute the loss for the test data
# Note that we need to sample the wave from the test data
wave = random.sample(SINE_TEST, 1)
test_set_validation(model,new_model,wave[0],lr_k,k,store_test_loss_meta)
# Print losses every 'printing_step' epochs
print_losses(epoch,store_train_loss_meta,store_test_loss_meta,printing_step)
# + [markdown] id="bQjoz6FYctJM"
# <h1> Few Shot learning with new meta-model
# + [markdown] id="m-SPUG5Bfpe9"
# The model performs good few shot learning
# + colab={"base_uri": "https://localhost:8080/", "height": 441} id="GY84TNs8JXVH" outputId="b9f0dc18-b1c4-47e0-bb2b-905303279df4"
wave = SineWaveTask();
k_shot_updates = 4
initialization_to_store_meta_losses()
for shots in range(k_shot_updates):
new_model = training(model, wave, lr_k, shots)
train_set_evaluation(new_model,wave,store_train_loss_meta)
wave.plot_model(new_model, label=f'{shots+1} gradient steps')
wave.plot(label='Original Wave')
plt.legend()
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="F4_Y4VF_dpNb" outputId="4e200e1a-efb2-40a8-e404-dbdd3386a355"
plt.plot(store_train_loss_meta,label = 'Loss')
plt.legend()
plt.xlabel('k shots')
# + id="-4nGnU0foEYo"
| Reptile/Reptile_sinewave.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import pickle
from scipy import signal
from scipy import stats
import numpy as np
from sklearn.model_selection import ShuffleSplit
from sklearn.metrics import cohen_kappa_score
import math
from collections import OrderedDict
import matplotlib.pyplot as plt
sys.path.append('D:\Diamond\code')
from csp_james_2 import *
sys.path.append('D:\Diamond\code')
from thesis_funcs_19_03 import *
import torch
import torch.nn as nn
import torch.nn.functional as nnF
import torch.optim as optim
from torch.autograd import Variable
from torch.optim import lr_scheduler
import csv
from random import randint
import random
import datetime
import scipy
# +
save_root = 'E:\\Diamond\\own_expo\\pilot_test\\'
config_root= 'E:\\Diamond\\bci_iv\\MODELS\\fbcsp_mibif_cnn\\2a\\configs\\'
k_fold = 5
# initialize csp
m = 2# m is Nw in the paper "learning temporal information for brain-copmuter interface, Sakhavi et.al"
n_components = 2 * m # pick some components
down_sample_step = 10 #Hilbert evelope
# select Ns pairs of csp filters
Ns = 4
CLASSES =[0,1]
C_OVR = [0,1]
balance_classes = 1
########################################################################################################################
# DEFINE FILTER BANK
########################################################################################################################
#Filter Bank
FB = [[4., 8.], [8., 12.], [12., 16.], [16., 20.], [20., 24.], [24., 28.], [28., 32.], [32., 36.], [36., 40.]]
FB = np.array(FB)
#argumaents for Chebyl II filtering
# Nyquist frequency
# min. attenuation in stop band
gstop = 45
# max. attenuation in passband
gpass= 5
#EEG_PERIOD = [[1,2.5]]#actual MI eeg period, from 0.5s after cue to end of MI at 4 seconds after cue onset
FS = [512]
to_center_matrix = 0
calc_kl_d = 0
calc_mi_discrim =0
# -
# +
# read _lambda values
with open(config_root +'_lambda_config.csv', mode = 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
for row in csv_reader:
_lambda_list_str = row[1:]
csv_file.close()
#create _lambda value in list
_lambda_list = []
for i in _lambda_list_str:
_lambda_list.append(float(i))
# -
# # Make training and evaluation sets
def seperate_train_eval_ind(EEG_MI_RAW_load, LABELS_load):
"""
take the EEG matrix in form [trial, channel, time] of extracted MI (cue-break),
seperate first half of the trials into the training set, the second half becomes the evaluaiton set
INPUTS:
EEG_MI_RAW_load: EEG matrix of shape [total_trials, channels, timepoints], extracted MI signals from the total number of recorded trials
LABELS_load: MI class LABELS for each trial, in shape [total_trials]
OUTPUTS:
EEG_MI_RAW_T: training EEG matirx, in shape [total_trial//2, chanl, timepoints], the first half of the total recorded trials
LABELS_load_T: training labels, in shape [total_trial//2]
EEG_MI_RAW_E: evaluation EEG matrix, in shape [total_trial//2, chanl, timepoints], the second half of the total recorded trials
LABELS_load_E: evaluation labels, in shape [total_trial//2]
"""
#fist half of recording is training, second half is evaluation
train_set_ind = np.arange(0, int(len(LABELS_load)/2))
eval_set_ind = np.arange(int(len(LABELS_load)/2), int(len(LABELS_load)))
EEG_MI_RAW_T = EEG_MI_RAW_load[train_set_ind]
EEG_MI_RAW_E = EEG_MI_RAW_load[eval_set_ind]
LABELS_load_T = LABELS_load[train_set_ind]
LABELS_load_E = LABELS_load[eval_set_ind]
return EEG_MI_RAW_T, LABELS_load_T, EEG_MI_RAW_E, LABELS_load_E
np.shape(EEG_filt_FB_go)
plt.plot(EEG_filt_FB_go[2,0,0])
FB
int(16<17)
2304/FS[0]
def calc_k_covs_all_fold_own_expo(EEG_extract_raw, LABELS, portion, k_file_root, balance_classes, FB, gpass, gstop, fs, eeg_period,
k_fold, C_OVR, load_idx, cv_train_size, calc_covs_all_fd = 1, to_center_matrix = 0, clip_before_filter = 1):
"""note:clip_before_filter: boolean, wheether to extract MI eeg periods before filtering the signal (filtering effect)"""
"""
#filename_T = 'B0'+str(subject)+'T'
#ilename_save = filename
#rint (filename_save)
#k_file_root = 'E:\\Diamond\\bci_iv\\MODELS\\fbcsp_mibif_cnn\\2a\\AUTORUN_50sel\\' + filename_T + '\\4s\\cv_30\\'
########################################################################################################################
#LOAD DATA
########################################################################################################################
#load raw eeg and labels
EEG_extract_raw = pickle.load(open(raw_data_root
+ filename_T + '.pickle', 'rb'),encoding='iso-8859-1')
#Class labels
LABELS_raw = pickle.load(open(raw_data_root
+ filename_T + "_LABELS.pickle", 'rb'), encoding='iso-8859-1')
LABELS = LABELS_raw - 1 #already in 1,2,3,4, the labels are. but we nned them to be likw 0,1,2,3
#LABELS = LABELS_raw
"""
C_labels = []
for c_ovr in C_OVR:
c_label = np.where(LABELS == c_ovr)[0]
#random.shuffle(c_label)
c_label_p = c_label[0:int(portion*len(c_label))]
C_labels = C_labels + c_label_p.tolist()
C_labels= np.array(C_labels)
EEG_extract_raw = EEG_extract_raw[C_labels, :]
LABELS = LABELS[C_labels]
#RUN THIS CODE ONLY ONCE, LABELS0 is the original labels, and LABELS will be changed in one-versus-rest strategy, later
LABELS0 = LABELS.copy()
########################################################################################################################
#APPLY FILTER BANK
########################################################################################################################
if clip_before_filter:
EEG_extract_raw = EEG_extract_raw[:,:, int(eeg_period[0]*fs):int(eeg_period[1]*fs)]
#Store Filter bank filtered raw EEG data, in the shape of num_filter_bank X num_trials X num_chanl X num_samples
#initiate empty matrix
EEG_filt_FB_L = np.empty( [len(FB),
np.shape(EEG_extract_raw)[0], np.shape(EEG_extract_raw)[1],np.shape(EEG_extract_raw)[2]] )
Nf = fs / 2.
for fb in range (0, len(FB)):
passband = FB[fb]
stopband = FB[fb] + np.array([-2., +2.])
EEG_filt_FB_L[fb] = filter_signal(EEG_extract_raw, passband, stopband, Nf, gpass, gstop)
#elimintae the start and end 0.5 secs, due to filter disturbance
#EEG_filt_FB = EEG_filt_FB_L[:, :, :, int(0.5*fs):int(-0.5*fs)]
EEG_filt_FB = EEG_filt_FB_L
if to_center_matrix == 1:
EEG_filt_FB = center_matrix(EEG_filt_FB)
if clip_before_filter == 0:
#trake only the MI 3.5 seconds
EEG_filt_FB_go = EEG_filt_FB[:,:,:,int(eeg_period[0]*fs):int(eeg_period[1]*fs)]
else:
EEG_filt_FB_go = EEG_filt_FB
LABELS0_go = LABELS0.copy()
if load_idx == 0:
TRAIN_IDX, TEST_IDX = cv_train_test_strat(k_fold, cv_train = cv_train_size, LABELS0 = LABELS0_go, C_OVR = C_OVR)
elif load_idx == 1:
TRAIN_IDX = pickle.load(open(k_file_root + '\\TRAIN_IDX.pickle', 'rb'))
TEST_IDX = pickle.load(open(k_file_root + '\\TEST_IDX.pickle', 'rb'))
Train_idx = ovr_train_ind(k_fold, C_OVR, LABELS0_go, TRAIN_IDX, balance_classes)
########################################################################################################################
if calc_covs_all_fd == 1:
COVS_AL_FD = []
for fold in range (0, k_fold):
Covs = np.zeros((len(FB), len(C_OVR), 2, np.shape(EEG_filt_FB_go)[2], np.shape(EEG_filt_FB_go)[2]))
for c_ovr in C_OVR:
train_idx = Train_idx[fold][c_ovr]
#training labels
LABELS_train = LABELS0_go[train_idx]
#binaritize LABELS_train so that "one" is class 0, and "rest" is class 1
LABELS_train = make_ovr_01_labels(LABELS_train, c_ovr)
#training eeg signals
EEG_train_FB = EEG_filt_FB_go[:,train_idx,:]
for fb in range (0, len(FB)):
covs = covs_classes(_classes = [0,1], n_ch = np.shape(EEG_train_FB)[2], X = EEG_train_FB[fb], y = LABELS_train)
Covs[fb][c_ovr] = covs
COVS_AL_FD.append(Covs)
elif calc_covs_all_fd == 0:
COVS_AL_FD = None
return COVS_AL_FD, EEG_filt_FB_go, LABELS0_go,TRAIN_IDX, TEST_IDX,Train_idx
EEG_PERIOD
ep = '05_15'
float(ep.split('_')[1])/10
def signal_len_to_eeg_period(signal_len):
"""
Translate "05_15" in file name into eeg_period [0.5, 1.5], for example
INPUT:
signal_len: string, in form 'nn_mm' where nn is the starting decisenonds, mm is the ending deciseconds
OUTPUT:
eeg_period: array of float, [nn/10, mm/10]
"""
EP = signal_len.split('_')
assert len(EP) == 2, 'check feature file name (signal_len), there sare more than one _ in the name. Should be in form nn_mm, where nn is the starting decisecond, mm is the end decisedon'
eeg_period = [float(EP[0])/10 , float(EP[1])/10 ]
return eeg_period
# +
to_save = 1
#Signal_lens = ['05_15', '10_20', '15_25', '20_30', '25_35', '30_40']
Signal_lens = ['05_25', '10_30', '15_35', '20_40']
portion = 1
portion_train = str(int(portion*100)) #mistake carried over - cv_train is actually the portion of training data used, and the directory name
cv_train_size = 0.5 #cv_train_size if the actually cv split
for subject in range(3,4):
sub_id = 's' + str(subject)
print (subject)
for signal_len in Signal_lens:
file_root = save_root + sub_id + '\\models\\' + signal_len + '\\'
k_file_root = file_root + 'pt_'+ portion_train + '\\'
#eeg_period = EEG_PERIOD[0]
eeg_period = signal_len_to_eeg_period(signal_len)
fs = FS[0]
"""
#set load_idx to 0 if we want to create new and overwirite old T_IND files
COVS_AL_FD, EEG_filt_FB_go, LABELS0_go, TRAIN_IDX, TEST_IDX, Train_idx = calc_k_covs_all_fold_current(filename_T, raw_data_root,
portion, k_file_root,
balance_classes,
FB, gpass, gstop, fs, eeg_period,
k_fold, C_OVR, load_idx = 0,
cv_train_size = cv_train_size,
to_center_matrix = to_center_matrix)
"""
## LOAD in raw EEG(MI) portion and LABELS
EEG_MI_RAW_load = pickle.load(open( 'E:\\Diamond\\own_expo\\pilot_test\\' + sub_id + '\\signals\\' + sub_id + ".pickle", "rb" ) )
LABELS_load = pickle.load(open( 'E:\\Diamond\\own_expo\\pilot_test\\' + sub_id + '\\signals\\' + sub_id + "_LABELS.pickle", "rb" ) )
#seperate the raw EEG into training and evaluaiton sets
EEG_MI_RAW_T, LABELS_load_T, EEG_MI_RAW_E, LABELS_load_E= seperate_train_eval_ind(EEG_MI_RAW_load, LABELS_load)
LABELS = LABELS_load_T - 1
COVS_AL_FD, EEG_filt_FB_go, LABELS0_go, TRAIN_IDX, TEST_IDX, Train_idx = calc_k_covs_all_fold_own_expo(EEG_MI_RAW_T, LABELS, portion, k_file_root, balance_classes, FB, gpass, gstop, fs, eeg_period,
k_fold, C_OVR, load_idx = 0,
cv_train_size = cv_train_size,
to_center_matrix = to_center_matrix)
if to_save == 1:
pickle.dump(TRAIN_IDX , open(file_root +'pt_'+ portion_train + '\\TRAIN_IDX' + ".pickle", "wb" ) )
pickle.dump(TEST_IDX , open(file_root +'pt_'+ portion_train + '\\TEST_IDX' + ".pickle", "wb" ) )
pickle.dump(LABELS0_go , open(file_root +'pt_'+ portion_train + '\\LABELS0_go' + ".pickle", "wb" ) )
for fold in range (0, k_fold):
Covs_k = COVS_AL_FD[fold] #9 4 2 22 22
for _lambda in _lambda_list[0:1]:
for c_ovr in C_OVR:
W_B = []
for i in range (0, len(FB)):
W_B.append([])
train_idx = Train_idx[fold][c_ovr]
EEG_train = EEG_filt_FB_go[:, train_idx, :]
LABELS_train = LABELS0_go[train_idx]
LABELS_train = make_ovr_01_labels(LABELS_train, c_ovr)
V= []
for tr in range (0, len(train_idx)):
V.append([])
for fb in range (0, len(FB)):
C_ck = (1-_lambda)*Covs_k[fb, c_ovr] #+ _lambda*sec_term
eigen_vectors_sorted, eigen_values = calc_sort_eigenvectors(C_ck)
W_b_ = np.concatenate ( [eigen_vectors_sorted[:, 0:int(m)], eigen_vectors_sorted[:, -int(m):]], axis = 1)
W_B[fb].append(W_b_)
for tr in range (0, len(train_idx)):
eeg = EEG_train[fb, tr, :]
#calculate features
v_bi = csp_features(W_b_, m, eeg)
#contruct feature matrix
V[tr].append(v_bi)
V = np.array(V)
V = V.reshape([np.shape(V)[0], np.shape(V)[1]*np.shape(V)[2]])
W_B = np.array(W_B).squeeze()
#follow MIBIF Algorithm in bci_iv_fbcsp paper
F = np.transpose(V)
####CALCULATE MUTUAL INFORMATION FOR THE FEATURES
I_fj = calc_MI(FB, m, CLASSES, LABELS_train, F)
####SORT FEATURES ACCORDING TO MUTAUL INFORMATION
# I_fj_sorted is the indices of features ranked by decending mutal information, in shape (num_features, );
#where num_features = 2* m * num_FB
I_fj_sorted = np.argsort(I_fj)[::-1]
#the filter bank indices from which the features come from, in shape (num_features, )
FB_IND = np.floor(I_fj_sorted/(2*m)).astype(int)
#the filter channel (csp.eigen_vectors_sorted column) indiceis from which the features come from, in shape (num_features, )
FILTER_IND = (I_fj_sorted) - FB_IND * (2*m)
#combine the filterbank indicies and the CSP filter indicies,
#first column tells which filter bank the feature came from,
#second column tells which csp filter channel the feature came from
FB_FILTER_IND = np.array(list(zip(FB_IND, FILTER_IND)))
if to_save == 1:
pickle.dump( W_B, open(file_root + 'pt_' + portion_train
+ '\\W_B_fold_' + str(fold) +
'_c_ovr_' + str(c_ovr) + '_lambda_' + str(_lambda) +
".pickle", "wb" ) )
pickle.dump( FB_FILTER_IND, open(file_root + 'pt_' + portion_train + '\\FB_FILTER_IND_fold_' + str(fold) +
'_c_ovr_' + str(c_ovr) + '_lambda_' + str(_lambda) +
".pickle", "wb" ) )
########################################################################################################################
for fold in range (0, k_fold):
#print ('fold', fold)
for _lambda in _lambda_list[0:1]:
for c_ovr in C_OVR:
#print (c_ovr)
#load in csp filters and mutual informtaion ranked indicies
W_B = pickle.load(open( file_root + 'pt_' + portion_train
+ '\\W_B_fold_' + str(fold) +
'_c_ovr_' + str(c_ovr) + '_lambda_' + str(_lambda) +
".pickle", 'rb'))
FB_FILTER_IND = pickle.load(open( file_root + 'pt_' + portion_train
+ '\\FB_FILTER_IND_fold_' + str(fold) +
'_c_ovr_' + str(c_ovr) + '_lambda_' + str(_lambda) +
".pickle", 'rb'))
#find the selected csp filters indicies
FB_FILTER_IND_slt = find_selected_csp_filters(Ns, m, FB_FILTER_IND)
#construct selected csp filters, W_B_slt has shape (2*Ns, num_chls), (8,22) for example
W_B_slt = W_B[FB_FILTER_IND_slt[:,0], :, FB_FILTER_IND_slt[:,1]]
#load in training eeg signals and testing EEG
train_idx = TRAIN_IDX[fold]
test_idx = TEST_IDX[fold]
EEG_train_FB = EEG_filt_FB_go[:,train_idx,:]
EEG_test_FB = EEG_filt_FB_go[:,test_idx,:]
#pick only the eeg signals filtered by selected filter banks
EEG_train_FB_slt = EEG_train_FB[FB_FILTER_IND_slt[:,0], :]
EEG_test_FB_slt = EEG_test_FB[FB_FILTER_IND_slt[:,0], :]
#transform into z space, then take the hilbert envelope of the transformed signal
Z_env_train = calc_z_features(W_B_slt, EEG_train_FB_slt, Ns, down_sample_step)
Z_env_test = calc_z_features(W_B_slt, EEG_test_FB_slt, Ns, down_sample_step)
#concatenate all classes
if c_ovr == C_OVR[0]:
Z_all_classes_train = Z_env_train
Z_all_classes_test = Z_env_test
else:
Z_all_classes_train = np.concatenate((Z_all_classes_train, Z_env_train), axis = 0)
Z_all_classes_test = np.concatenate((Z_all_classes_test, Z_env_test), axis = 0)
#trasnpose into shape (num_trials, num_features, sample points)
Z_all_classes_train = np.transpose(Z_all_classes_train, [1,0,2])
Z_all_classes_test = np.transpose(Z_all_classes_test, [1,0,2])
#save the Z space hilbert envelop signals
if to_save == 1:
pickle.dump( Z_all_classes_train, open(file_root +'pt_' + portion_train
+ '\\Z_all_classes_train_fold_' + str(fold) + '_lambda_' + str(_lambda) + ".pickle", "wb" ) )
pickle.dump( Z_all_classes_test, open(file_root +'pt_' + portion_train
+ '\\Z_all_classes_test_fold_' + str(fold) + '_lambda_' + str(_lambda) + ".pickle", "wb" ) )
# -
eeg_period
np.shape(EEG_filt_FB_go)
plt.plot(EEG_filt_FB_go[2][0][0])
| investigate_feature_eng_gold_stand_gtec.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Visualizing the Titanic Disaster
# ### Introduction:
#
# This exercise is based on the titanic Disaster dataset avaiable at [Kaggle](https://www.kaggle.com/c/titanic).
# To know more about the variables check [here](https://www.kaggle.com/c/titanic/data)
#
#
# ### Step 1. Import the necessary libraries
# +
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# %matplotlib inline
# -
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/07_Visualization/Titanic_Desaster/train.csv)
# ### Step 3. Assign it to a variable titanic
# ### Step 4. Set PassengerId as the index
# ### Step 5. Create a pie chart presenting the male/female proportion
# ### Step 6. Create a scatterplot with the Fare payed and the Age, differ the plot color by gender
# ### Step 7. How many people survived?
# ### Step 8. Create a histogram with the Fare payed
# ### BONUS: Create your own question and answer it.
| 07_Visualization/Titanic_Desaster/Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:plantcv] *
# language: python
# name: conda-env-plantcv-py
# ---
# # Interactive Seed Counting Workflow
# ## Part 1: Workflow setup
# Import plantcv
from plantcv import plantcv as pcv
# Set the debugging parameter to plot
pcv.params.debug = "plot"
# Set the notebook display method
# inline = embedded plots, widget = interactive plots
# %matplotlib inline
# Updated text params so that labels are readable
pcv.params.text_size = 50
pcv.params.text_thickness = 35
# Change plot size to be larger to better see each colorspace
import matplotlib
matplotlib.rcParams["figure.figsize"] = [8,8]
# +
# Read image
# Inputs:
# filename - Image file to be read in
# mode - How to read in the image; either 'native' (default), 'rgb', 'gray', or 'csv'
img, path, filename = pcv.readimage(filename="imgs/camelina_seeds.jpg")
# -
# ## Part 1: Create seed mask
# #### Visualize colorspaces
#
# The visualization tool converts the color image into HSV and LAB colorspaces and displays the grayscale channels in a matrix so that they can be visualized simultaneously. The idea is to select a channel that maximizes the difference between the plant and the background pixels.
# Visualize all colorspaces to help determine a thresholding channel
# Inputs:
# rbg_img = original image
# original_img = whether to include the original RGB images in the display: True (default) or False
colorspaces = pcv.visualize.colorspaces(img)
# #### Convert the color image to grayscale
#
# Converts the input color image into the LAB colorspace and returns the l (lightness) channel as a grayscale image.
# Inputs:
# rbg_img - original image
# channel - desired colorspace ('l', 'a', or 'b')
l = pcv.rgb2gray_lab(rgb_img=img, channel="l")
# Inputs:
# img = gray image in selected colorspace
# mask = None (default), or mask
# bins = 100 (default) or number of desired number of evenly spaced bins
# lower-bound = None (default) or minimum value on x-axis
# upper-bound = None (default) or maximum value on x-axis
# title = None (default) or custom plot title
# hist_data = False (default) or True (if frequency distribution data is desired)
hist = pcv.visualize.histogram(img=l)
# #### Threshold the grayscale image
#
# Use a threshold function (binary in this case) to segment the grayscale image into plant (white) and background (black) pixels. Using the histogram above, a threshold point between 120-125 will segment the plant and background peaks. Because the plant has darker pixels in this image, use object_type="dark" to do a traditional threshold.
l_thresh = pcv.threshold.binary(gray_img=l, threshold=120, max_value=255, object_type="dark")
# #### Identify objects within image
#
# From the binary mask, we can identify the object (plant) within the image, which can then be used later for analyses.
# Inputs:
# img = rgb image
# mask = binary mask
plant_obj, plant_hier = pcv.find_objects(img=img, mask=l_thresh)
# #### Identify objects within a region of interest (ROI)
# To eventually combine all of the objects into a singular object that identifies the plant, we need to identify a region of interest (ROI) which will either fully encapsulate or overlap with plant material. This way, if objects are identified due to "salt" noise or other background elements, they will be filtered out. In this case, a rectangular ROI that partially overlaps with the plant object can be used to filter out some of the excess noise around the plant.
# Inputs:
# img = image
# x = leftmost x coordinate of ROI
# y = topmost y coordinate of ROI
# h = height of ROI
# w = width of ROI
roi_contour, roi_hierarchy = pcv.roi.rectangle(img=img, x=0, y=900, h=2168, w=3500)
# Inputs:
# img = image
# roi_type = 'partial' to include objects that partially overlap with the ROI, 'cutto' to exclude any objects that
# are not within the ROI, or 'largest' to keep the largest contour
# roi_contour = pre-defined region of interest
# roi_hierarchy = pre-defined roi hierarchy
# object_contour = plant object
# obj_hierarchy = object hierarchy
objects, hier, seed_mask, obj_area = pcv.roi_objects(img=img, roi_contour=roi_contour, roi_hierarchy=roi_hierarchy,
object_contour=plant_obj, obj_hierarchy=plant_hier, roi_type='partial')
# # Part 2: Detect disks
#
# In this case, detecting disks how to identify camilina seeds and label them in a way that can be annotated downstream in the workflow.
# Inputs:
# bin_img = binary mask image
# ecc_thresh = eccentricity threshold below which a region is detected
discs_mask, discs_coor = pcv.detect_discs(bin_img=seed_mask, ecc_thresh=0.9)
# We can visualize the contents of the disc coordinates output from this function.
print(discs_coor)
# # Part 3: Click count
#
# Using the `ClickCount` class from PlantCV we can interactively annotate the seeds identified in the disc detection step.
# Initialization
seed_counter = pcv.visualize.ClickCount(img=img)
# Import coordinates (if available, you can also populate all annotations by hand optionally)
seed_counter.import_coords(coords=discs_coor, label="total")
# +
# View "total" class and update annotations if desired
seed_counter.view(label="total", color="c", view_all=True)
# -
# Notice one rogue object detected on the right hand side and two at the upper left that need to be deselected. No seeds are unaccounted for which is good!
# Save out coordinates of seeds
seed_counter.save_coords(coord_file="example_seed_coords.json")
| interactive_ClickCount_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Lift and coverage for tops
# %run ../dstools/ml/metrics.py
# +
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
from sklearn.model_selection import train_test_split
import seaborn as sns
sns.set_palette("deep", desat=.6)
ds = pd.read_csv('../datasets/titanic.csv')
features = pd.get_dummies(ds.drop(['survived', 'alive'], axis=1)).fillna(0)
target = ds.survived
split = train_test_split(features, target, test_size=0.5)
x_train, x_test, y_train, y_test = split
y_score = RandomForestClassifier().fit(x_train, y_train).predict_proba(x_test)
lift(y_test, y_score[:, 1], n_buckets=10)
# -
| demo/lift-coverage-table.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# +
# Visualization of the KO+ChIP Gold Standard from:
# Miraldi et al. (2018) "Leveraging chromatin accessibility for transcriptional regulatory network inference in Th17 Cells"
# TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load
# NOTE: Default limits networks to TF-TF edges in top 1 TF / gene model (.93 quantile), to see the full
# network hit "restore" (in the drop-down menu in cell below) and set threshold to 0 and hit "threshold"
# You can search for gene names in the search box below the network (hit "Match"), and find regulators ("targeted by")
# Change "canvas" to "SVG" (drop-down menu in cell below) to enable drag interactions with nodes & labels
# Change "SVG" to "canvas" to speed up layout operations
# More info about jp_gene_viz and user interface instructions are available on Github:
# https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/dNetwork%20widget%20overview.ipynb
# directory containing gene expression data and network folder
directory = "."
# folder containing networks
netPath = 'Networks'
# network file name
networkFile = 'ChIP_A17_bias50_TFmRNA_sp.tsv'
# title for network figure
netTitle = 'ChIP/sA(Th17), bias = 50_TFmRNA, TFA = TF mRNA'
# name of gene expression file
expressionFile = 'Th0_Th17_48hTh.txt'
# column of gene expression file to color network nodes
rnaSampleOfInt = 'Th17(48h)'
# edge cutoff -- for Inferelator TRNs, corresponds to signed quantile (rank of edges in 15 TFs / gene models),
# increase from 0 --> 1 to get more significant edges (e.g., .33 would correspond to edges only in 10 TFs / gene
# models)
edgeCutoff = .93
# -
import sys
if ".." not in sys.path:
sys.path.append("..")
from jp_gene_viz import dNetwork
dNetwork.load_javascript_support()
# from jp_gene_viz import multiple_network
from jp_gene_viz import LExpression
LExpression.load_javascript_support()
# Load network linked to gene expression data
L = LExpression.LinkedExpressionNetwork()
L.show()
# +
# Load Network and Heatmap
L.load_network(directory + '/' + netPath + '/' + networkFile)
L.load_heatmap(directory + '/' + expressionFile)
N = L.network
N.set_title(netTitle)
N.threshhold_slider.value = edgeCutoff
N.apply_click(None)
N.draw()
# Add labels to nodes
N.labels_button.value=True
# Limit to TFs only, remove unconnected TFs, choose and set network layout
N.restore_click()
N.tf_only_click()
N.connected_only_click()
N.layout_dropdown.value = 'fruchterman_reingold'
N.layout_click()
# Interact with Heatmap
# Limit genes in heatmap to network genes
L.gene_click(None)
# Z-score heatmap values
L.expression.transform_dropdown.value = 'Z score'
L.expression.apply_transform()
# Choose a column in the heatmap (e.g., 48h Th17) to color nodes
L.expression.col = rnaSampleOfInt
L.condition_click(None)
# Switch SVG layout to get line colors, then switch back to faster canvas mode
N.force_svg(None)
# -
| TRN_Notebooks/ChIP_Atac17_bias50_TFmRNA_TFmRNA.ipynb |