code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.9 64-bit
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="darkgrid")
dataframe = pd.read_csv('PODs.csv', delimiter=';', header=0, index_col=0)
dataframe=dataframe.astype(float)
# +
figure, axes = plt.subplots(2, 1,figsize=(6,6))
dataframe.plot( kind='line', y='CU CPU (Millicores)', label='vCU', ax=axes[0])
dataframe.plot(kind='line', y='DU CPU (Millicores)', label='vDU', ax=axes[0])
dataframe.plot(kind='line', y='RU CPU (Millicores)', label='vRU', ax=axes[0])
dataframe.plot(kind='line', y='K8S CPU (Millicores)', label='K8S', ax=axes[0])
dataframe.plot(kind='line', y='OPERATOR CPU (Millicores)', label='OPlaceRAN', ax=axes[0])
axes[0].annotate('$\it{t0}$', xy=(0,0), xytext=(15, 3), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[0].axvline(x = 0,linestyle ="dotted", color='black', alpha=0.3)
axes[0].annotate('$\it{t1}$', xy=(70,2002), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[0].axvline(x = 70,linestyle ="dotted", color='black', alpha=0.3)
axes[0].annotate('$\it{t2}$', xy=(125,2002), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[0].axvline(x = 125,linestyle ="dotted", color='black', alpha=0.3)
axes[0].annotate('$\it{t3}$', xy=(180,1639), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[0].axvline(x = 180,linestyle ="dotted", color='black', alpha=0.3)
axes[0].annotate('$\it{t4}$', xy=(245,299), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[0].axvline(x = 245,linestyle ="dotted", color='black', alpha=0.3)
axes[0].annotate('$\it{t5}$', xy=(300,1026), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[0].axvline(x = 300,linestyle ="dotted", color='black', alpha=0.3)
axes[0].annotate('$\it{t6}$', xy=(360,1068), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[0].axvline(x = 360,linestyle ="dotted", color='black', alpha=0.3)
axes[0].annotate('$\it{t7}$', xy=(415,1042), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[0].axvline(x = 415,linestyle ="dotted", color='black', alpha=0.3)
dataframe.plot(kind='line', y='CU MEMORY (MiBytes)', label='vCU', ax=axes[1])
dataframe.plot(kind='line', y='DU MEMORY (MiBytes)', label='vDU', ax=axes[1])
dataframe.plot(kind='line', y='RU MEMORY (MiBytes)', label='vRU', ax=axes[1])
dataframe.plot(kind='line', y='K8S MEMORY (MiBytes)', label='K8S', ax=axes[1])
dataframe.plot(kind='line', y='OPERATOR MEMORY (MiBytes)', label='OPlaceRAN', ax=axes[1])
axes[1].annotate('$\it{t0}$', xy=(0,94), xytext=(15, 5), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[1].axvline(x = 0,linestyle ="dotted", color='black', alpha=0.3)
axes[1].annotate('$\it{t1}$', xy=(70,94), xytext=(15, 5), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[1].axvline(x = 70,linestyle ="dotted", color='black', alpha=0.3)
axes[1].annotate('$\it{t2}$', xy=(130,147), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[1].axvline(x = 130,linestyle ="dotted", color='black', alpha=0.3)
axes[1].annotate('$\it{t3}$', xy=(185,1396), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[1].axvline(x = 185,linestyle ="dotted", color='black', alpha=0.3)
axes[1].annotate('$\it{t4}$', xy=(245,1580), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[1].axvline(x = 245,linestyle ="dotted", color='black', alpha=0.3)
axes[1].annotate('$\it{t5}$', xy=(300,1685), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[1].axvline(x = 300,linestyle ="dotted", color='black', alpha=0.3)
axes[1].annotate('$\it{t6}$', xy=(360,1688), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[1].axvline(x = 360,linestyle ="dotted", color='black', alpha=0.3)
axes[1].annotate('$\it{t7}$', xy=(415,1688), xytext=(15, 15), textcoords='offset points', arrowprops=dict(arrowstyle='->', color='black'), fontsize=10)
axes[1].axvline(x = 415,linestyle ="dotted", color='black', alpha=0.3)
axes[0].set_ylabel('CPU (Millicores)')
axes[1].set_ylabel('Memory (MiBytes)')
axes[0].set_title(None)
axes[1].set_title(None)
axes[0].set_xticklabels([])
axes[0].set_xlabel(None)
axes[0].legend(loc='center left', prop={'size': 8})
axes[1].legend(prop={'size': 8})
plt.savefig('out/CR_PODS.pdf', bbox_inches='tight')
plt.savefig('out/CR_PODS.png', dpi=300, bbox_inches='tight')
| performance-analysis/article/mini-topo/pods-resources-OAI-vs-K8S-vs-Operator/CR_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda3]
# language: python
# name: conda-env-anaconda3-py
# ---
import pickle
# +
def my_tokenizer(x):
return x.split()
pickle.load(open("../data/m_result/tfkdl_params_correct.pickle", "rb"))
# -
import jieba
import pandas as pd
import sklearn
import numpy as np
data = pd.read_csv("../data/sim_train_seg_process.csv", sep="\t", header=None)
data.columns = ["label", "sent1", "sent2"]
data.head(2)
cosine_val = pickle.load(open("../data/tfkdl_pred_withoutdr", "rb"))
cosine_val
# +
data["sim"] = cosine_val
data.head(2)
# -
data.loc[lambda x: (x.sim >= 0.50) & (x.label == 0), :].sort_values(by="sim", ascending=False)
data.loc[lambda x: (x.sim < 0.5) & (x.label == 1), :].sort_values(by="sim", ascending=False)
| data_explore/.ipynb_checkpoints/tf-kdl-result_explore-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise A — apply numba the brute-force prime search function
# +
# The pure python version
def nth_prime(n):
n_found = 0
candidate = 2
while True:
good = True
for div in range(2, candidate):
if candidate % div == 0:
good = False
break
if good:
n_found += 1
if n_found == n:
return candidate
# try with the next number
candidate += 1
# -
import numba
@numba.jit
def nth_prime_nb(n):
n_found = 0
candidate = 2
while True:
good = True
for div in range(2, candidate):
if candidate % div == 0:
good = False
break
if good:
n_found += 1
if n_found == n:
return candidate
# try with the next number
candidate += 1
[nth_prime_nb(i) for i in range(1, 10)]
# %timeit nth_prime(1000)
# %timeit nth_prime_nb(1000)
# # Exercise B — apply numba the sieve prime search function
# +
import numpy as np
def nth_prime_sieve(n):
n_found = 0
candidate = 2
sieve = np.empty(n-1, dtype=int)
while True:
for div in sieve[:n_found]:
if candidate % div == 0:
break
else:
n_found += 1
if n_found == n:
return candidate
sieve[n_found-1] = candidate
# try with the next number
candidate += 1
assert [nth_prime_sieve(i) for i in range(1, 10)] == [2, 3, 5, 7, 11, 13, 17, 19, 23]
# -
# %timeit nth_prime_sieve(1000)
# +
import numpy as np
@numba.jit
def nth_prime_sieve_nb(n):
n_found = 0
candidate = 2
sieve = np.empty(n-1, dtype=int)
while True:
for div in sieve[:n_found]:
if candidate % div == 0:
break
else:
n_found += 1
if n_found == n:
return candidate
sieve[n_found-1] = candidate
# try with the next number
candidate += 1
assert [nth_prime_sieve_nb(i) for i in range(1, 10)] == [2, 3, 5, 7, 11, 13, 17, 19, 23]
# -
# %timeit nth_prime_sieve_nb(1000)
| numba-primes/solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <div align="right"><i>COM418 - Computers and Music</i></div>
# <div align="right"><a href="https://people.epfl.ch/paolo.prandoni"><NAME></a>, <a href="https://www.epfl.ch/labs/lcav/">LCAV, EPFL</a></div>
#
# <p style="font-size: 30pt; font-weight: bold; color: #B51F1F;">Hearing the phase of a sound </p>
# + [markdown] slideshow={"slide_type": "skip"}
# In this notebook we will investigate the effect of phase on the perceptual quality of a sound. It is often said that the human ear is insensitive to phase and that's why most of the equalization in commercial-grade audio equipment takes place in the magnitude domain only.
#
# But is it really so? Let's find out.
# + slideshow={"slide_type": "skip"}
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import IPython
from scipy.io import wavfile
# + slideshow={"slide_type": "skip"}
plt.rcParams["figure.figsize"] = (14,4)
# + [markdown] slideshow={"slide_type": "skip"}
# # Helper functions
#
# We will be synthesizing audio clips so let's set the sampling rate for the rest of the notebook:
# + slideshow={"slide_type": "skip"}
Fs = 16000 # sampling freqency
TWOPI = 2 * np.pi
# + slideshow={"slide_type": "skip"}
import ipywidgets as widgets
def multiplay(clips, rate=Fs, title=None):
outs = [widgets.Output() for c in clips]
for ix, item in enumerate(clips):
with outs[ix]:
print(title[ix] if title is not None else "")
display(IPython.display.Audio(prepare(item), rate=rate))
return widgets.HBox(outs)
# + [markdown] slideshow={"slide_type": "skip"}
# Let's also define a helper function that plays our synthesized clips a bit more gracefully: basically, we want a gentle fade-in and fade-out to avoid the abrupt "clicks" that occur when the data file begins and ends.
#
# Also, there is a "bug" in the some versions of IPython whereby audio data is forcibly normalized prior to playing (see [here](https://github.com/ipython/ipython/issues/8608) for details; this may have been solved in the meantime). We want to avoid normalization so that we keep control over the volume of the sound. A way to do so is to make sure that all audio clips have at least one sample at a pre-defined maximum value, and this value is the same for all clips; to achieve this we add a slow "tail" to the data which will not result in an audible sound but will set a common maximum value to all clips.
# + slideshow={"slide_type": "skip"}
def prepare(x, max_value = 3):
N = len(x)
# fade-in and fade-out times max 0.2 seconds
tf = min(int(0.2 * Fs), int(0.1 * N))
for n in range(0, int(tf)):
s = float(n) / float(tf)
x[n] = x[n] * s
x[N-n-1] *= s
# let's append an anti-normalization tail; drawback is one second of silence in the end
x = np.concatenate((x, np.linspace(0, max_value, int(Fs/2)), np.linspace(max_value, 0, int(Fs/2))))
return x
# + [markdown] slideshow={"slide_type": "slide"}
# # Sustained sounds
#
# The first experiment will use sustained sounds, i.e. sounds where the "shape" of the waveform does not change over time:
#
# * a periodic sustained waveform is the sum of harmonically-related sinusoidal components
# * frequency of first component determines pitch
# * relative amplitude of harmonic overtones determines timbre
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## A simple clarinet model
#
# <img src="img/clarinet.png" style="float: right; width: 400px; margin: 20px 30px;"/>
#
#
# * simple additive synthesis
# * only odd multiples of the fundamental (see [here](http://www.phy.mtu.edu/~suits/clarinet.html)
# * we will use just five components
# + slideshow={"slide_type": "slide"}
def clarinet(f, phase = []):
# length in seconds of audio clips
T = 3
# we will keep 5 harmonics and the fundamental
# amplitude of components:
ha = [0.75, 0.5, 0.14, 0.5, 0.12, 0.17]
# phase
phase = np.concatenate((phase, np.zeros(len(ha)-len(phase))))
x = np.zeros((T * Fs))
# clarinet has only odd harmonics
n = np.arange(len(x))
for k, h in enumerate(ha):
x += h * np.sin(phase[k] + TWOPI * (2*k + 1) * (float(f)/Fs) * n)
return x
# + slideshow={"slide_type": "slide"}
# fundamental frequency: D4
D4 = 293.665
x = clarinet(D4)
# let's look at the waveform, nice odd-harmonics shape:
plt.plot(x[0:300])
plt.show()
# and of course we can play it (using our preparing function):
IPython.display.Audio(prepare(x), rate=Fs)
# + [markdown] slideshow={"slide_type": "skip"}
# Ok, so it's not the best clarinet sound in the universe but it's not bad for just a few lines of code!
# + [markdown] slideshow={"slide_type": "slide"}
# ## Changing the phase
#
# * random phase offsets for each component
# * waveform completely different in time domain
# * can you hear the difference?
# +
xrp = clarinet(D4, [3.84, 0.90, 3.98, 4.50, 4.80, 2.96])
plt.plot(xrp[0:300])
plt.show()
# + slideshow={"slide_type": "slide"}
multiplay([xrp, x], title=['random phase', 'original'])
# + [markdown] slideshow={"slide_type": "skip"}
# OK, so it seems that phase is not important after all. To check once again, run the following notebook cell as many times as you want and see if you can tell the difference between the original zero-phase and a random-phase sustained note (the phases will be different every time you run the cell):
# + slideshow={"slide_type": "slide"}
xrp = clarinet(D4, np.random.rand(6) * TWOPI)
plt.plot(xrp[0:300])
plt.show()
multiplay([xrp, x], title=['random phase', 'original'])
# + [markdown] slideshow={"slide_type": "slide"}
# # Dynamic sounds
#
# <img src="img/piano.jpg" style="float: right; width: 400px; margin: 20px 30px;"/>
#
# In the second experiment we will use real-world dynamic sounds, i.e. sounds that display time-varying characteristics. Typically, a physical musical instrument will produce sounds whose envelope displays four subsequent portions:
#
# * the **attack** time is the time taken for the sound to go from silence to max amplitude
# * the **decay** time is the time taken for the sound to decrease to sustain level
# * the **sustain** time is the time during the sound is kept at the same amplitude
# * the **release** time is the time taken for sound to go to zero after the stimulation is stopped.
# + [markdown] slideshow={"slide_type": "skip"}
# Consider for instance a piano note: the attack time is very quick (the hammer hits the string); the decay is quite rapid as the string settles into harmonic equilibrium but there is no sustain since once the hammer hits, the stimulation ends. So a piano note has a distinct volume envelope that rises very fast and then releases slowly:
# + slideshow={"slide_type": "slide"}
from scipy.io import wavfile
Fs, x = wavfile.read("snd/piano.wav")
plt.plot(x)
plt.show()
IPython.display.Audio(x, rate=Fs)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Changing the phase
#
# The "shape" of a waveform in time is determined by the phase as we saw with the clarinet.
#
# To alter the phase of the real piano sound:
# * compute the DFT of the sound
# * set the phase to arbitrary values
# * compute the inverse DFT
# + slideshow={"slide_type": "skip"}
# first some prep work; let's make sure that
# the length of the signal is even
# (it will be useful later)
if len(x) % 2 != 0:
x = x[:-1]
# let's also store the maximum value for our
# "prepare" function
mv = int(max(abs(x)) * 1.2)
# + slideshow={"slide_type": "slide"}
# Let's take the Fourier transform
X = np.fft.fft(x)
# we can plot the DFT and verify we have a nice
# harmonic spectrum
plt.plot(np.abs(X[0:int(len(X)/2)]))
plt.show()
# + slideshow={"slide_type": "slide"}
# now we set the phase to zero; we just need to
# take the magnitude of the DFT
xzp = np.fft.ifft(np.abs(X))
# in theory, xzp should be real; however, because
# of numerical imprecision, we're left with some imaginary crumbs:
print (max(np.imag(xzp)) / max(np.abs(xzp)))
# + slideshow={"slide_type": "slide"}
# the imaginary part is negligible, as expected,
# so let's just get rid of it
xzp = np.real(xzp)
# and now we can plot:
plt.plot(xzp)
plt.show()
# -
IPython.display.Audio(prepare(xzp, mv), rate=Fs)
# + [markdown] slideshow={"slide_type": "slide"}
# Gee, what happened?!? Well, by removing the phase, we have destroyed the timing information that, for instance, made the sharp attack possible (mathematically, note that by creating a zero-phase spectrum we did obtain a symmetric signal in the time domain!).
#
# If we play the waveform, we can hear that the pitch and some of the timbral quality have been preserved (after all, the magnitude spectrum is the same), but the typical piano-like envelope has been lost.
# + [markdown] slideshow={"slide_type": "slide"}
# We can amuse ourselves with even more brutal phase mangling: let's for instance set a random phase for each DFT component. The only tricky thing here is that we need to preserve the Hermitian symmetry of the DFT in order to have a real-valued time-domain signal:
# +
# we know the signal is even-length so we need to build
# a phase vector of the form [0 p1 p2 ... pM -pM ... -p2 -p1]
# where M = len(x)/2
ph = np.random.rand(int(len(x) / 2) ) * TWOPI * 1j
# tricky but cute Python slicing syntax...
ph = np.concatenate(([0], ph, -ph[-2::-1]))
# now let's add the phase offset and take the IDFT
xrp = np.fft.ifft(X * np.exp(ph))
# always verify that the imaginary part is only roundoff error
print (max(np.imag(xrp))/max(np.abs(xrp)))
# + slideshow={"slide_type": "slide"}
xrp = np.real(xrp)
plt.plot(xrp)
plt.show()
IPython.display.Audio(prepare(xrp, mv), rate=Fs)
# + [markdown] slideshow={"slide_type": "skip"}
# Pretty bad, eh? So, in conclusion, phase is very important to the temporal aspects of the sound, but not so important for sustained sounds. In fact, the brain processes the temporal and spectral cues of sound very differently: when we concentrate on attacks and sound envelope, the brain uses time-domain processing, whereas for pitch and timbre, it uses primarily the magnitude of the spectrum!
| AudioPhase/AudioPhase.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Occupation
# ### Introduction:
#
# Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
import numpy as np
import pandas as pd
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/u.user).
# ### Step 3. Assign it to a variable called users.
users=pd.read_csv('../../exercise_data/occupy.csv',sep='|')
users.head()
# ### Step 4. Discover what is the mean age per occupation
users.groupby('occupation').age.mean()
# ### Step 5. Discover the Male ratio per occupation and sort it from the most to the least
# +
'''
总人数count得出
算一下M人数
'''
def num_M(x):
if x=='M':
return 1
else :
return 0
users['num_M']=users.gender.apply(num_M)
'''
按职业分组聚合sum出男性数量
除以
按职业计数总量
'''
a=users.groupby('occupation').num_M.sum()
b=users.occupation.value_counts()
print(a/b*100)
# -
# ### Step 6. For each occupation, calculate the minimum and maximum ages
users.groupby('occupation').age.agg([np.min,np.max])
# ### Step 7. For each combination of occupation and gender, calculate the mean age
users.groupby(['occupation','gender']).age.mean()
#多组分组
# ### Step 8. For each occupation present the percentage of women and men
# +
Men_nums=users.groupby('occupation').num_M.sum()
Women_nums=users.occupation.value_counts()-Men_nums
round(Men_nums/Women_nums,4).sort_values()
| 03_Grouping/Occupation/Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial 06: Setting spatially varying fields
#
# > Interactive online tutorial:
# > [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/ubermag/discretisedfield/master?filepath=docs%2Fipynb%2Findex.ipynb)
#
# There are several different ways how a spatially varying field can be defined. Let us first define a mesh we are going to use to define the fields.
# +
import discretisedfield as df
p1 = (-50, -50, -50)
p2 = (50, 50, 50)
n = (2, 2, 2)
mesh = df.Mesh(p1=p1, p2=p2, n=n)
# -
# ## Using a Python function
#
# One of the ways how a spatially varying field can be defined is by using a Python function, which can be passed as `value` argument to `discretisedfield.Field`. It should satisfy three main criteria:
# 1. It takes one argument. `discretisedfield.Field` is going to pass the coordinates of discertisation cells as tuples of length 3 to this argument.
# 2. Function should be able to return a value for any coordinate in the mesh
# 3. The value returned must be of the same dimension as the dimension of the field.
#
# Let us assume we want to have a scalar field which has a value 0 for all points with negative $x$ coordinate and value 1 otherwise.
#
# $$
# f(x, y, z)=
# \begin{cases}
# 0, & \text{if}\ x<0 \\
# 1, & \text{otherwise}
# \end{cases}
# $$
#
# The Python function is then:
def my_value_function(pos):
x, y, z = pos
if x < 0:
return 0
else:
return 1
# After defining the value function, we can define the field.
field = df.Field(mesh, dim=1, value=my_value_function)
# If we sample the field at a point with negative value of $x$
field((-10, 5, 5))
# If the $x$ coordinate is positive, we get 1.
field((25, -3, 14))
# The array now has different values
field.array
# ### Value property
#
# It is not very informative to look at `discretisedfield.Field.array` to understand what is the actual value of the field. Therefore, if a unique representation value exists, `discretisedfield.Field.value` is going to return it. For instance:
field.value
# The source code of this function can be seen as
import inspect
print(inspect.getsource(field.value))
# Now, if we change the value of the field as
field.value = 5
# the value of the field is changed
field.value
# as well as the underlying array
field.array
# If we violently change the value of a single discretisation cell via array
field.array[0, 0, 0, 0] = 1
field.array
# no unique representation exists and `field.value` returns an array.
field.value
# Similar to scalar fields, a Python function can be used to set the value of a vector field. This time, the function should return three-dimensional values.
def vector_value_function(pos):
x, y, z = pos
vx = x
vy = x*y
vz = x*y*z
return (vx, vy, vz)
# This function can now be used at the definition of the field:
field = df.Field(mesh, dim=3, value=vector_value_function)
# Its value is now:
field.value
field.array
# ## Using mesh regions
#
# If regions were defined as a part of the mesh, and we want to set the value of the field differently in those regions, we can employ some of the functionality of regions. Let us assume that in the mesh we defined we want to have two regions. Region 1 is going to include all cells with negative $y$ coordinate and region 2 cells with positive $y$ coordinate. Our mesh would be:
regions = {'region1': df.Region(p1=(-50, -50, -50), p2=(50, 0, 50)),
'region2': df.Region(p1=(-50, 0, -50), p2=(50, 50, 50))}
mesh = df.Mesh(p1=p1, p2=p2, n=n, regions=regions)
# Python function employing these regions can now be
def regions_function(pos):
if pos in mesh.regions['region1']:
return (1, 0, 0)
elif pos in mesh.regions['region2']:
return (0, 1, 0)
else:
return (0, 0, 0)
# We can now pass this function to the `discretisedfield.Field` class
field = df.Field(mesh, dim=3, value=regions_function)
# For a negative value of $y$, we get:
field((10, -10, 10))
# And for positive:
field((10, 30, 10))
# Another way of setting the field is passing the dictionary as a value to the field. However, there are several warnings that must be taken care of:
# 1. Region names must be the same as defined regions in `discretisedfield.Mesh`.
# 2. Only those points in the mesh which belong to one of the regions will be set. If there is a point which is not in any of the regions, its value will remain unchanged.
region_values = {'region1': (1, 1, 1), 'region2': (2, 2, 2)}
field.value = region_values
# Now, we can sample points in two regions.
field((-10, -10, -10))
field((10, 10, 10))
# ## Using another Field object
#
# Sometimes it is necessary to "resample" the field using a different mesh. Another field can be passed as a value to the new field. If our new mesh is:
p1 = (-10, -10, -10)
p2 = (10, 10, 10)
cell = (5, 5, 5)
new_mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
# The field we initialised previouly has the value
field.array
# We can now resample that field as
new_field = df.Field(new_mesh, dim=3, value=field)
# The values are now
new_field.array.shape
new_field((-5, -5, -5))
new_field((5, 5, 5))
# ## Other
#
# Full description of all existing functionality can be found in the [API Reference](https://discretisedfield.readthedocs.io/en/latest/api_documentation.html).
| docs/ipynb/06-tutorial-spatially-varying-field.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["header"]
# <table width="100%">
# <tr style="border-bottom:solid 2pt #009EE3">
# <td style="text-align:left" width="10%">
# <a href="classification_game_orange.dwipynb" download><img src="../../images/icons/download.png"></a>
# </td>
# <td style="text-align:left" width="10%">
# <a href="https://mybinder.org/v2/gh/biosignalsnotebooks/biosignalsnotebooks/master?filepath=header_footer%2Fbiosignalsnotebooks_environment%2Fcategories%2FTrain_and_Classify%2Fclassification_game_orange.dwipynb" target="_blank"><img src="../../images/icons/program.png" title="Be creative and test your solutions !"></a>
# </td>
# <td></td>
# <td style="text-align:left" width="5%">
# <a href="../MainFiles/biosignalsnotebooks.ipynb"><img src="../../images/icons/home.png"></a>
# </td>
# <td style="text-align:left" width="5%">
# <a href="../MainFiles/contacts.ipynb"><img src="../../images/icons/contacts.png"></a>
# </td>
# <td style="text-align:left" width="5%">
# <a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank"><img src="../../images/icons/github.png"></a>
# </td>
# <td style="border-left:solid 2pt #009EE3" width="15%">
# <img src="../../images/ost_logo.png">
# </td>
# </tr>
# </table>
# + [markdown] tags=["intro_info_title"]
# <link rel="stylesheet" href="../../styles/theme_style.css">
# <!--link rel="stylesheet" href="../../styles/header_style.css"-->
# <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css">
#
# <table width="100%">
# <tr>
# <td id="image_td" width="15%" class="header_image_color_7"><div id="image_img"
# class="header_image_7"></div></td>
# <td class="header_text"> Stone, Paper or Scissor Game - Train and Classify [Orange] </td>
# </tr>
# </table>
# + [markdown] tags=["intro_info_tags"]
# <div id="flex-container">
# <div id="diff_level" class="flex-item">
# <strong>Difficulty Level:</strong> <span class="fa fa-star checked"></span>
# <span class="fa fa-star checked"></span>
# <span class="fa fa-star checked"></span>
# <span class="fa fa-star"></span>
# <span class="fa fa-star"></span>
# </div>
# <div id="tag" class="flex-item-tag">
# <span id="tag_list">
# <table id="tag_list_table">
# <tr>
# <td class="shield_left">Tags</td>
# <td class="shield_right" id="tags">train_and_classify☁machine-learning☁features☁train☁nearest-neighbour☁orange</td>
# </tr>
# </table>
# </span>
# <!-- [OR] Visit https://img.shields.io in order to create a tag badge-->
# </div>
# </div>
# + [markdown] tags=["test"]
# Through the set of 4 <span class="color4"><strong>Jupyter Notebooks</strong></span> (referred on the list below) that are part of our <strong>"Stone, Paper or Scissor Classification Game"</strong> a reasonable understanding of the different <strong>Machine Learning</strong> stages, that need to be followed in order to train an effective classification system, can be reached.
#
# <span class="color4"><strong>Previous Notebooks that are part of "Stone, Paper or Scissor Game - Train and Classify" module</strong></span>
# <ul>
# <li><a href="classification_game_volume_1.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 1] | Experimental Setup <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a></li>
# <li><a href="classification_game_volume_2.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 2] | Feature Extraction <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a></li>
# <li><a href="classification_game_volume_3.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 3] | Train and Classify <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a></li>
# <li><a href="classification_game_volume_4.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 4] | Performance Evaluation <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a></li>
# </ul>
#
# All the previous <span class="color4"><strong>Jupyter Notebooks</strong></span> are focused on the application of <a href="https://scikit-learn.org/stable/index.html">scikit-learn <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a> (<span class="color1"><strong>Python</strong></span>) functionalities.
#
# However <span class="color4"><strong>Anaconda</strong></span> toolbox (presented at <a href="../Install/prepare_anaconda.ipynb"><strong>Download, Install and Execute Anaconda <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>) includes a very intuitive and graphical resource called <a href="https://anaconda.org/anaconda/orange"><span class="color13"><strong>Orange <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></span></a>, that can be an interesting tool to complement our 4 volumes of <span class="color7"><strong>"Stone, Paper or Scissor Game - Train and Classify" module</strong></span>
#
# On the current <span class="color4"><strong>Jupyter Notebook</strong></span> it will be done a very quick presentation of <span class="color13"><strong>Orange</strong></span>
# -
# <hr>
# <p class="steps">PR - Installation of <span class="color13"><strong>Orange</strong></span> through <span class="color4"><strong>Anaconda</strong></span>
# <br>PR1 - Launch <span class="color4">Anaconda Navigator</span></p>
# <span class="color13" style="font-size:30px">⚠</span> You will find <span class="color4"><strong>Anaconda Navigator</strong></span> by using the search engine of your operating system and typing <strong>"Anaconda Navigator"</strong>. On the following animation, as an illustrative example, we show how to find it on <span class="color1"><strong>Microsoft Windows 10</strong></span>.
# <video id="video_1" muted loop src="../../images/other/orange_install_launch.mp4" class="video"></video>
# + tags=["hide_both"] language="javascript"
# document.getElementById("video_1").play()
# -
# <p class="steps">PR2 - Click on "Install" button below the <span class="color13"><strong>Orange</strong></span> icon</p>
# <img src="../../images/other/orange_install_button.gif">
# <p class="steps">PR3 - Please, wait a few moments for the end of installation procedure</p>
# When the installation is finished the <strong>"Launch"</strong> button becomes available below <span class="color13"><strong>Orange</strong></span> icon !
# <img src="../../images/other/orange_install_finish.gif">
# <hr>
# <p class="steps">1 - Start <span class="color13"><strong>Orange</strong></span> by clicking on "Launch" button</p>
# <p class="steps">2 - Click on "New" icon in order to create a new project</p>
# <img src="../../images/other/orange_new_project.png">
# <strong>For training our classifier we need to be in possession of training data, i.e., multiple training examples of each class objectively described by a set of features (as demonstrated at</strong> <a href="classification_game_volume_2.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 2] | Feature Extraction <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>)
#
# In the following steps we will try to replicate, using <span class="color13"><strong>Orange</strong></span>, the training process of a <strong>k-Nearest Neighbour</strong> classifier (<a href="classification_game_volume_3.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 3] | Train and Classify <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>) and the performance evaluation of the trained system (<a href="../Evaluate/classification_game_volume_4.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 4] | Performance Evaluation <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>).
#
# As a starting point, we will use the extracted list of features on <a href="classification_game_volume_2.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 2] | Feature Extraction <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>.
#
# <span class="color13" style="font-size:30px">⚠</span> The features list file should be in a compatible format to be read by <span class="color13"><strong>Orange</strong></span>. For our example we choose <strong>.tab</strong> file format, where each row <span class="color4"><strong>j</strong></span> represents a training example and each column <span class="color1"><strong>i</strong></span> (consecutive columns are separated by a <span class="color7"><strong>tab</strong></span>) contains the value of the feature number <span class="color4"><strong>j</strong></span> for the respective training examples <span class="color1"><strong>i</strong></span>.
#
# Below, it is shown our file with 20 training examples (number of rows) and the 13 selected features (number of columns).
# + tags=["hide_in"]
# Embedding of .pdf file
from IPython.display import IFrame
IFrame(src="../../signal_samples/classification_game/features/classification_game_features_final_orange.txt", width="100%", height="350")
# -
# <p class="steps">3 - Import a file, with training example data, into <span class="color13">Orange</span> </p>
# Click on "File" icon located at "Data" tab
# <img src="../../images/other/orange_import_file.gif">
# <p class="steps">4 - Double-click the new "File" icon added to our workspace</p>
# A new screen will appear, which enables the specification of features file location !
# <img src="../../images/other/orange_import_file_click.gif">
# <p class="steps">5 - Specify which column contain the target variable (list with class labels of each training example)</p>
# Scroll down the list until finding the target variable. Then change the <strong>"Role"</strong> to <span class="color1"><strong>target</strong></span>.
# <img src="../../images/other/orange_target_variable.gif">
# <p class="steps">6 - Define a name for each column/feature</p>
# It is really simple, at each row you only need to double-click on the predefined name and the edit functionality will be automatically enabled.
# <img src="../../images/other/orange_rename_features.gif">
# <p class="steps">7 - After renaming all the features, close the window in order to confirm your choices and proceed the configuration procedure</p>
# <img src="../../images/other/orange_import_file_confirm.gif">
# <p class="steps">8 - For visualising how different pairs of variables are "separating" the four classes, let's explore a visualisation functionality of <span class="color13">Orange</span>
# <br>8.1 - Access <span class="color7">Visualize</span> tab</p>
# Located at the left side of <span class="color13"><strong>Orange</strong></span> window.
# <img src="../../images/other/orange_visualise_tab.gif">
# <p class="steps">8.2 - Click on <span class="color1">"Scatter Plot"</span> icon</p>
# <img src="../../images/other/orange_scatter_select.gif">
# <p class="steps">8.3 - Link the two elements <span class="color7">"File"</span> and <span class="color1">"Scatter Plot"</span></p>
# <img src="../../images/other/orange_link_elements.gif">
# <p class="steps">8.4 - Double-click on <span class="color1">"Scatter Plot"</span> icon for visualising the distribution of classes for each pair of features</p>
# <img src="../../images/other/orange_visualise_pairs.gif">
# <p class="steps">9 - After preparing our classification environment we can select our model and advance to the respective training</p>
# The selected model was the <strong>k-Nearest Neighbour</strong> classifier (<a href="classification_game_volume_3.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 3] | Train and Classify <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>)
#
# <p class="steps">9.1 - Access <span class="color8">Model</span> tab</p>
# Located at the left side of <span class="color13"><strong>Orange</strong></span> window.
# <img src="../../images/other/orange_model_tab_click.gif">
# <p class="steps">9.2 - Drag <span class="color13">kNN</span> icon to the working environment</p>
# <img src="../../images/other/orange_model_knn.gif">
# <p class="steps">9.3 - Double-click on <span class="color13">kNN</span> icon for prompting the configuration window of our model</p>
# We will keep the predefined values, choosing k (number of neighbours) as 5 and the "Euclidean Norm" to estimate the distance between testing point and training examples.
# <img src="../../images/other/orange_model_knn_settings.gif">
# <p class="steps">9.4 - Link <span class="color7">"File"</span> icon with <span class="color13">"kNN"</span> icon</p>
# With this operation we are specifying the inputs (training data) that will be used to train our model.
# <img src="../../images/other/orange_link_file_model.gif">
# <p class="steps">10 - There is only one step remaining... "Evaluation" our classifier</p>
# Repeating the methodology of <a href="../Evaluate/classification_game_volume_4.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 4] | Performance Evaluation <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a>, we will use a <strong>Leave One Out Strategy</strong> to test the accuracy of our classifier.
#
# <p class="steps">10.1 - Open <span class="color1">"Evaluate"</span> tab</p>
# Located at the left side of <span class="color13"><strong>Orange</strong></span> window.
# <img src="../../images/other/orange_evaluate_click.gif">
# <p class="steps">10.2 - Drag <span class="color4">"Test & Score"</span> to the working environment</p>
# <img src="../../images/other/orange_evaluate_drag.gif">
# <p class="steps">10.3 - Link <span class="color7">"File"</span> and <span class="color13">"kNN"</span> icons with <span class="color4">"Test & Score"</span> icon</p>
# Essentially what we are doing is specifying which is the training data and the model that will be fitted to it.
# <img src="../../images/other/orange_link_data_model_to_test.gif">
# <p class="steps">10.4 - Double-click on <span class="color4">"Test & Score"</span> icon to reach the testing results screen</p>
#
# <p class="steps">10.5 - Specify the desired test procedure</p>
# Like previously referred, we will choose <strong>"Leave One Out"</strong> strategy
# <img src="../../images/other/orange_evaluate_results.gif">
# A <span class="color1"><strong>"Classification Accuracy"</strong></span> of 90 % was obtained, which is identical to the obtained value at <a href="../Evaluate/classification_game_volume_4.ipynb"><strong>Stone, Paper or Scissor Game - Train and Classify [Volume 4] | Performance Evaluation <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></strong></a> when a set with less features is used.
# <img src="../../images/other/orange_evaluate_accuracy.png">
# We reach the end of our introductory journey through <span class="color13"><strong>Orange</strong></span> environment and, as you can see, it is amazing and extremely intuitive.
#
# However we only explored a small fraction of the full capabilities of this environment.
#
# With the previous steps the contents described on <strong>"Classification Game" Volumes 2 to 4</strong> are replicated through a more graphical perspective.
#
# <strong><span class="color7">We hope that you have enjoyed this guide. </span><span class="color2">biosignalsnotebooks</span><span class="color4"> is an environment in continuous expansion, so don't stop your journey and learn more with the remaining <a href="../MainFiles/biosignalsnotebooks.ipynb">Notebooks <img src="../../images/icons/link.png" width="10px" height="10px" style="display:inline"></a></span></strong> !
# + [markdown] tags=["footer"]
# <hr>
# <table width="100%">
# <tr>
# <td style="border-right:solid 3px #009EE3" width="20%">
# <img src="../../images/ost_logo.png">
# </td>
# <td width="40%" style="text-align:left">
# <a href="../MainFiles/aux_files/biosignalsnotebooks_presentation.pdf" target="_blank">☌ Project Presentation</a>
# <br>
# <a href="https://github.com/biosignalsnotebooks/biosignalsnotebooks" target="_blank">☌ GitHub Repository</a>
# <br>
# <a href="https://pypi.org/project/biosignalsnotebooks/" target="_blank">☌ How to install biosignalsnotebooks Python package ?</a>
# <br>
# <a href="../MainFiles/signal_samples.ipynb">☌ Signal Library</a>
# </td>
# <td width="40%" style="text-align:left">
# <a href="../MainFiles/biosignalsnotebooks.ipynb">☌ Notebook Categories</a>
# <br>
# <a href="../MainFiles/by_diff.ipynb">☌ Notebooks by Difficulty</a>
# <br>
# <a href="../MainFiles/by_signal_type.ipynb">☌ Notebooks by Signal Type</a>
# <br>
# <a href="../MainFiles/by_tag.ipynb">☌ Notebooks by Tag</a>
# </td>
# </tr>
# </table>
# + tags=["hide_both"]
from biosignalsnotebooks.__notebook_support__ import css_style_apply
css_style_apply()
# + tags=["hide_both"] language="html"
# <script>
# // AUTORUN ALL CELLS ON NOTEBOOK-LOAD!
# require(
# ['base/js/namespace', 'jquery'],
# function(jupyter, $) {
# $(jupyter.events).on("kernel_ready.Kernel", function () {
# console.log("Auto-running all cells-below...");
# jupyter.actions.call('jupyter-notebook:run-all-cells-below');
# jupyter.actions.call('jupyter-notebook:save-notebook');
# });
# }
# );
# </script>
| notebookToHtml/biosignalsnotebooks_html_publish/Categories_/Train_and_Classify/classification_game_orange.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="cx82H4OmEZVR" colab_type="text"
# # Análisis de datos y relaciones entre variables.
# + [markdown] id="gqdtISDZFmhw" colab_type="text"
# ## Importación de librerías y datos
#
# Por medio de nuestra libería ESIOS_contoller.py importamos nuestro último dataset de datos y lo parseamos para su uso. Sirve tanto como para Drive como jupiter.
# + id="o2xOODokFkmg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="6892c4b2-d957-4f42-c699-4b45c787b922" executionInfo={"status": "ok", "timestamp": 1565634503027, "user_tz": -120, "elapsed": 2855, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-jjbI5e-2QHY/AAAAAAAAAAI/AAAAAAAADZI/IB6lKN_AuUI/s64/photo.jpg", "userId": "10058377044009387405"}}
import json, urllib, datetime, pickle, time
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import *
from keras.models import *
from keras.layers import *
from sklearn.preprocessing import *
from keras.optimizers import *
from scipy.stats import *
from importlib.machinery import SourceFileLoader
try:
from google.colab import drive
drive.mount('/content/drive')
path = '/content/drive/My Drive/TFM/Utils/ESIOS_contoller.py'
in_colab = True
except:
path = '../utils/ESIOS_contoller.py'
in_colab = False
esios_assembler = SourceFileLoader('esios', path).load_module()
esios_controller = esios_assembler.ESIOS(in_colab)
data_consumo = esios_controller.get_data()
# + [markdown] id="9AOyGsv9Qx20" colab_type="text"
# ## Estudio de la variable precio global
# Vamos a ver las relaciones y los los metadatos asociados de la variable del precio.
# + id="z9TmIt7MLhzN" colab_type="code" outputId="1f04fc3a-d052-443a-9719-2840fb23326d" executionInfo={"status": "ok", "timestamp": 1565634505871, "user_tz": -120, "elapsed": 507, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-jjbI5e-2QHY/AAAAAAAAAAI/AAAAAAAADZI/IB6lKN_AuUI/s64/photo.jpg", "userId": "10058377044009387405"}} colab={"base_uri": "https://localhost:8080/", "height": 170}
x = data_consumo['date_timestamp']
data_pvpc = data_consumo['PVPC_DEF']
data_pvpc.describe()
# + id="xHsGDxhIPQiX" colab_type="code" outputId="ccb4de13-42d4-40ce-eb4e-9da76c3fed73" executionInfo={"status": "ok", "timestamp": 1565634507730, "user_tz": -120, "elapsed": 921, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-jjbI5e-2QHY/AAAAAAAAAAI/AAAAAAAADZI/IB6lKN_AuUI/s64/photo.jpg", "userId": "10058377044009387405"}} colab={"base_uri": "https://localhost:8080/", "height": 301}
sns.distplot(data_pvpc)
# + id="fD8OG28-RlXS" colab_type="code" outputId="8c948d4c-116d-447d-edb0-abcc95dd8ec7" executionInfo={"status": "ok", "timestamp": 1565634512207, "user_tz": -120, "elapsed": 5056, "user": {"displayName": "<NAME>", "photoUrl": "https://lh4.googleusercontent.com/-jjbI5e-2QHY/AAAAAAAAAAI/AAAAAAAADZI/IB6lKN_AuUI/s64/photo.jpg", "userId": "10058377044009387405"}} colab={"base_uri": "https://localhost:8080/", "height": 301}
sns.lineplot(x="date_timestamp", y="PVPC_DEF",data=data_consumo)
# + id="4uzqokYtUEWk" colab_type="code" outputId="138093df-213f-4bb7-d36b-65305e11f9c0" executionInfo={"status": "ok", "timestamp": 1565552858068, "user_tz": -120, "elapsed": 15519, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02399746530348164073"}} colab={"base_uri": "https://localhost:8080/", "height": 464}
sns.jointplot(x="date_timestamp", y="PVPC_DEF", kind='reg', data=data_consumo, joint_kws={'line_kws':{'color':'red'}})
# + id="v3nqsOB0RPI6" colab_type="code" outputId="d353eac3-b5ce-4aff-b0dc-7824f28a4983" executionInfo={"status": "error", "timestamp": 1565552858075, "user_tz": -120, "elapsed": 15502, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02399746530348164073"}} colab={"base_uri": "https://localhost:8080/", "height": 163}
sns.kdeplot(x, y, shade=True)
# + [markdown] id="0wdvKEhwWrD0" colab_type="text"
# ## Estudio de la variable precio a nivel semanal y mensual
# + id="K7SXsRsIW9DW" colab_type="code" outputId="02db91ef-f390-4249-aa2c-9ea2d53ce118" executionInfo={"status": "ok", "timestamp": 1565552886120, "user_tz": -120, "elapsed": 2230, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02399746530348164073"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
data_tiempo_dia = data_consumo.loc[1004:1022,'fecha']
data_pvpc_dia = data_consumo.loc[1004:1022,'PVPC_DEF']
data_tiempo_semana = data_consumo.loc[1004:1172,'fecha']
data_pvpc_semana = data_consumo.loc[1004:1172,'PVPC_DEF']
df = sns.lineplot(data_tiempo_dia, data_pvpc_dia)
labels = df.get_xticklabels()
plt.setp(labels, rotation=90)
# + id="JtcVXeIAZV6h" colab_type="code" outputId="2683f410-210d-4016-fe49-ce530e571fd0" executionInfo={"status": "ok", "timestamp": 1565552889617, "user_tz": -120, "elapsed": 5701, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02399746530348164073"}} colab={"base_uri": "https://localhost:8080/", "height": 297}
sns.lineplot(data_tiempo_semana, data_pvpc_semana)
# + [markdown] id="9yBprV1FaPuH" colab_type="text"
# ## Detección de Outliers
# Podemos observar que la variable del precio tiene muchos Outliers, esto hará que la exactitud de nuestro modelo decaiga mucho. Será requerido probar el estudio con la regularización de estos o sin ella.
# + id="N3DSOdA7aSLN" colab_type="code" outputId="b4b3bd47-02b8-425b-aa95-8dd5738d5906" executionInfo={"status": "ok", "timestamp": 1565552889621, "user_tz": -120, "elapsed": 5685, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "02399746530348164073"}} colab={"base_uri": "https://localhost:8080/", "height": 297}
sns.boxplot(data_consumo['PVPC_DEF'])
# + id="Qi-7rZfoe0B4" colab_type="code" colab={}
| analisis_datos/estudio_del_precio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Clustering
#
# Last class we studied document vectors and how to find key words and similar documents. What else can we do with vectors? We can cluster them to find natural groups or categories, or visualize them directly by projecting them to 2D or 3D space.
import pandas as pd
import numpy as np
from textblob import TextBlob
from sklearn.feature_extraction.text import TfidfVectorizer
# We'll start by getting tf-idf vectors for the Menendez press releases, like we did last class.
pr = pd.read_csv('menendez-press-releases.csv')
len(pr)
# +
# need a tokenizer
# +
# create tf-idf vectors
# -
# We're going to use a clustering algorithm called k-means. Here's an interactive demo of how it works.
# See this [interactive demo](http://web.stanford.edu/class/ee103/visualizations/kmeans/kmeans.html) or [this one](https://www.naftaliharris.com/blog/visualizing-k-means-clustering/).
from sklearn.cluster import KMeans
# +
# cluster
# -
# Ok, let's see what's in each cluster!
def print_sorted_vector(v):
# this "lambda" thing is an anonymous function, google me to unluck bonus coding knowledge
sorted_list = sorted(v.items(), key=lambda x: (x[1],x[0]), reverse=True)
sorted_list = sorted_list[:10]
print('\n'.join([str(x) for x in sorted_list]))
# Now we're going to print out the top words of the center vector of each cluster, to see how the k-means algorithm did.
# +
# print cluster centroids
# -
# In fact, Overview uses k-means in its "topic tree" visualization
# ### Visualizing clusters to understand politics
# This is a fairly literal translation of a [previous post](http://www.compjournalism.com/?p=13) of mine (it was done in R at the time). We're going to load up the voting record of the U.K. House of Lords, turn each MP's voting record into a vector, and see how all these politicians relate in this abstract ideological space.
#
# The daia is circa 2012, because they had an interesting coalition government at the time.
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# %matplotlib inline
votes = pd.read_csv('uk-lords-votes.csv')
votes.shape
# +
# print out this votes matrix
# -
# This is data I processed earlier, and you can think of it as a template for the format you will need to get your data into to do your homework. Each row is one member of parliament. Each of the numbered columns is one vote, where 1 means aye, 0 means abstain, and -1 means nay. The `party` column indicates which political party that MP belonged to at the time.
#
# If you're interested in the original data, including the names of these politicians and what they were voting on, you can find it all [here](http://www.compjournalism.com/?p=13).
#
# We'll want to turn the list of parties in to a list of colors.
# compute the color that each MP should be, based on their party
# Now that we've set everything up, we're ready to start projecting. We can view at most three dimensions at once with our puny human visual system. The simplest projection is just to pick three dimensions of our vectors and plot them.
# +
# 3d scatterplot of three votes
# -
# Not very interesting. All of vote coordinates are in [-1,0,1] so no matter which votes (dimensions) we pick we can only get the corners, edges, and center of a cube. Plus, all 613 MPs overlap each other -- many MPs voted the same way on this set of three votes -- so we only see a few dots.
# Instead, we're going to let the computer pick the right projection from this wacky high dimensional space to two dimensions. We are using PCA, "principal components analysis," which tries to find a direction to project that gives maximum separation of all the points. This dimension doesn't have to be aligned to any of our dimension axes -- PCA will "rotate" the points in high dimensional space until they are as spread out as possible.
# +
# PCA to 2D
# +
# 2D scatterplot
# -
# We can actually project down to any number of dimensions. More than 3 but less than the original 100 can be useful for some data processing operations.) Here, we'll project down to 3 and take a look at our voting clusters in glorious 3D.
# +
# PCA to 3D
# +
# 3D scatterplot
| week-2/week-2-2-class-empty.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="7OGyhw9v31o1"
# # Gamma Distributions
# + id="iPOUzf2j0lNB"
import jax
import jax.numpy as jnp
from jax.scipy.stats import gamma
import matplotlib.pyplot as plt
import seaborn as sns
try:
from probml_utils import savefig, latexify
except ModuleNotFoundError:
# %pip install git+https://github.com/probml/probml-utils.git
from probml_utils import savefig, latexify
# + id="58mmIS_10m1K"
latexify(width_scale_factor=2, fig_height=2)
# + id="iH5XbYaWeW-7"
def make_graph(data):
x = data["x"]
a_list = data["a_list"]
b_list = data["b_list"]
props = data["props"]
fig_name = data["fig_name"]
for a, b, prop in zip(a_list, b_list, props):
y = gamma.pdf(x, a, scale=1 / b, loc=0)
plt.plot(x, y, prop, label="a=%.1f, b=%.1f" % (a, b))
plt.xlabel("$x$")
plt.ylabel("$p(x)$")
plt.legend(loc="upper right")
plt.title(fig_name)
sns.despine()
# + id="kk0h8yftdDcp"
x = jnp.linspace(0, 7, 100)
a_list = [1.0, 1.5, 2.0, 1.0, 1.5, 2.0]
b_list = [1.0, 1.0, 1.0, 2.0, 2.0, 2.0]
props = ["b-", "r-", "k-", "b:", "r:", "k:"]
data = {"x": x, "a_list": a_list, "b_list": b_list, "props": props, "fig_name": "Gamma distributions"}
make_graph(data)
savefig("gammadist_latexified")
# + id="3d6PKkO8g9Fj"
x = jnp.linspace(0, 7, 100)
a_list = [1, 1.5, 2]
b_list = [1, 1, 1]
props = ["b-", "r-", "g-"]
data = {"x": x, "a_list": a_list, "b_list": b_list, "props": props, "fig_name": "Gamma(a,b) distributions"}
make_graph(data)
# + id="PG7W-4PAhPae"
x = jnp.linspace(0, 7, 100)
a_list = [1]
b_list = [1]
props = ["b-"]
data = {"x": x, "a_list": a_list, "b_list": b_list, "props": props, "fig_name": "Gamma(1,1) distribution"}
make_graph(data)
plt.axvline(1, color="r")
plt.show()
# + id="XUpV9fGMfU7c"
from ipywidgets import interact
@interact(a=(0.1, 5), b=(0.1, 5))
def generate_random(a, b):
data = {
"x": jnp.linspace(0, 7, 100),
"a_list": [a],
"b_list": [b],
"props": ["b"],
"fig_name": "Gamma distributions",
}
make_graph(data)
plt.yticks(jnp.arange(0, 6, step=1))
# + id="TkppVzlWn990"
| notebooks/book1/02/gamma_dist_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
from matplotlib import pyplot as plt
import math
import sys
from skimage.filters import threshold_otsu
from skimage.morphology import disk
from skimage.morphology import dilation
from PIL import Image
import pytesseract
import os
# +
class resturant_menu_expert:
def __init__(self ,path ,max_distance):
orig_img = self.get_roi(path)
img = orig_img.copy()
img1 = self.invert(img)
img2 = self.get_otsu(img1)
img3 = self.disk_dilate(img2 ,5)
final_angle = self.fast_featureless_rotation(img3)
img4 = self.rotate_image(img2,final_angle)
img5 = self.line_dilate(img4)
segs = self.dish_name_segmentation(img5 ,img4)
text = self.ocr(segs)
db = self.get_database()
dish_names = self.OCR_Correction(text ,db , max_distance)
print(dish_names)
output = self.get_finla_output(orig_img , dish_names ,final_angle)
plt.figure(figsize=(15,15))
plt.imshow(output)
def get_roi(self ,img_path):
img_raw = cv2.imread(img_path)
roi = cv2.selectROI(img_raw)
roi_cropped = img_raw[int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2])]
cv2.waitKey(0)
cv2.destroyAllWindows()
img = cv2.cvtColor(roi_cropped, cv2.COLOR_RGB2GRAY)
return img
def get_otsu(sefl ,img):
otsu = threshold_otsu(img)
binary = img > otsu
res = np.zeros(binary.shape)
for i in range(binary.shape[0]):
for j in range(binary.shape[1]):
if binary[i][j] == False:
res[i][j] = 0
else:
res[i][j] = 255
return res.astype(np.uint8)
def invert(self ,img):
return 255-img
def disk_dilate(self ,img ,redius):
selem = disk(redius)
dilated = dilation(img, selem)
return dilated
def plot_gray(self ,img):
plt.figure(figsize=(15,15))
plt.imshow(img ,cmap='gray')
def plot_rgb(self ,img):
plt.figure(figsize=(15,15))
plt.imshow(img)
def get_bounding_boxes(self ,num_labels ,labels_im):
componants = []
for label in range(1,num_labels):
comp = labels_im.copy()
for i in range(labels_im.shape[0]):
for j in range(labels_im.shape[1]):
if comp[i][j] == label:
comp[i][j] = 255
else:
comp[i][j] = 0
comp = np.stack((comp,)*3, axis=-1)
active_px = np.argwhere(comp!=0)
active_px = active_px[:,[1,0]]
x,y,w,h = cv2.boundingRect(active_px)
componants.append( (label,w,h ,x,y))
return componants
def merge_box(self , a , b ):
new_y = min( a[4] ,b[4])
new_x = min( a[3] ,b[3])
y1 = a[4]+a[2]
y2 = b[4]+b[2]
x1 = a[3]+a[1]
x2 = b[3]+b[1]
new_h = max( y1 - new_y , y2 - new_y)
new_w = max( x1 - new_x , x2 - new_x)
return (a[0] ,new_w ,new_h ,new_x ,new_y)
def merge_bounding_boxes(self ,boxes ,dx ,dy):
new_box = []
merged = [0] * len(boxes)
for i in range( len(boxes)-1):
for j in range(i+1 ,len(boxes)):
y_diff = abs( boxes[i][4] - boxes[j][4])
if y_diff<=dy:
new_box.append(self.merge_box( boxes[i],boxes[j]))
merged[i] = 1
merged[j] = 1
for i in range(len(boxes)):
if merged[i] == 0 :
new_box.append( boxes[i])
return new_box
def rotate_image(self ,mat, angle):
height, width = mat.shape[:2]
image_center = (width / 2, height / 2)
rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1)
radians = math.radians(angle)
sin = math.sin(radians)
cos = math.cos(radians)
bound_w = int((height * abs(sin)) + (width * abs(cos)))
bound_h = int((height * abs(cos)) + (width * abs(sin)))
rotation_mat[0, 2] += ((bound_w / 2) - image_center[0])
rotation_mat[1, 2] += ((bound_h / 2) - image_center[1])
rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h))
return rotated_mat
def find_final_rotation(self ,img ,coarse_angle):
aspect_ratios = []
theta = []
for angle in range(coarse_angle-10,coarse_angle+10):
theta.append(angle)
image_center = tuple(np.array(img.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
res = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
num_labels, labels_im = cv2.connectedComponents(res)
boxes = self.get_bounding_boxes(num_labels ,labels_im)
aspect = 0
for i in boxes:
label = i[0]
width = i[1]
height = i[2]
aspect += (height/width)
aspect_ratios.append( aspect/len(boxes) )
idx = aspect_ratios.index(min(aspect_ratios))
return theta[idx]
def fast_featureless_rotation(self ,img):
aspect_ratios = []
theta = []
for angle in range(-90,90 ,10):
theta.append(angle)
image_center = tuple(np.array(img.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
res = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
num_labels, labels_im = cv2.connectedComponents(res)
boxes = self.get_bounding_boxes(num_labels ,labels_im)
aspect = 0
for i in boxes:
label = i[0]
width = i[1]
height = i[2]
aspect += (height/width)
aspect_ratios.append( aspect/len(boxes) )
idx = aspect_ratios.index(min(aspect_ratios))
coarse_angle = theta[idx]
final_angle = self.find_final_rotation(img ,coarse_angle)
return final_angle
def line_dilate(self ,img):
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10,2))
dilate = cv2.dilate(img, kernel, iterations=2)
return dilate
def dish_name_segmentation(self ,dilated_img ,img):
num_labels, labels_im = cv2.connectedComponents(dilated_img)
boxes = self.get_bounding_boxes(num_labels ,labels_im)
boxes = self.merge_bounding_boxes(boxes ,30 ,50)
segments = []
for box in boxes:
label = box[0]
w = box[1]
h = box[2]
x = box[3]
y = box[4]
cropped = img[y:y+h ,x:x+w]
segments.append( [ 255-cropped , x,y,w,h ] )
return segments
def ocr(self ,segs):
final_text = []
for i in segs:
PIL_image = Image.fromarray(i[0])
text = pytesseract.image_to_string(PIL_image)
temp = text.split('\x0c')[0]
line = temp.split('\n')[0]
for j in [line]:
final_text.append([j ,i[1] ,i[2] ,i[3] ,i[4] ])
return final_text
def get_bounding_boxes_img(self ,num_labels ,labels_im):
ops = []
for label in range(1,num_labels):
comp = labels_im.copy()
for i in range(labels_im.shape[0]):
for j in range(labels_im.shape[1]):
if comp[i][j] == label:
comp[i][j] = 255
else:
comp[i][j] = 0
comp = np.stack((comp,)*3, axis=-1)
active_px = np.argwhere(comp!=0)
active_px = active_px[:,[1,0]]
x,y,w,h = cv2.boundingRect(active_px)
op = cv2.rectangle(comp,(x,y),(x+w,y+h),(255,0,0),1)
ops.append(op)
res = ops[0]
for i in range(1,len(ops)):
res += ops[i]
plt.figure(figsize = (10,10))
plt.imshow(res)
def get_database(self ):
rootdir = '../img/menu_items'
db = []
for subdir, dirs, files in os.walk(rootdir):
for file in files:
temp = file.split('.')[0]
db.append(temp)
return db
def edit_distance(self ,s1 ,s2 ,max_dist):
l1 = len(s1)
l2 = len(s2)
dp = np.zeros((2 ,l1+1))
for i in range(l1+1):
dp[0][i] = i
for i in range(1,l2+1):
for j in range(0,l1+1):
if j==0:
dp[i%2][j] = i
elif s1[j-1] == s2[i-1]:
dp[i%2][j] = dp[(i-1)%2][j-1]
else:
dp[i%2][j] = 1 + min(dp[(i-1)%2][j], min(dp[i%2][j-1], dp[(i-1)%2][j-1]))
dist = dp[l2%2][l1]
if dist > max_dist:
return max_dist+1
return dist
def db_lookup(self ,test_str , db ,max_dist):
min_dist = sys.maxsize
match = None
for i in db:
dist = self.edit_distance(test_str ,i ,max_dist)
if dist < min_dist:
min_dist = dist
match = i
if min_dist == 0 :
break
if min_dist < max_dist:
return match
def OCR_Correction(self , final_text ,db ,max_dist):
corrected_img = []
for i in final_text:
dish = i[0].lower()
op = self.db_lookup(dish ,db ,max_dist)
i.append(op)
corrected_img.append(i)
return corrected_img
def get_finla_output( self ,menu , dish_names ,final_angle):
img = 255-menu
res = self.rotate_image(img,final_angle)
res = 255- res
siz = res.shape
width = 800
hi ,wi = int(siz[0]*width/siz[1]) , width
cropped_img = cv2.resize(res ,(wi, hi))
new_cropped_img = np.stack((cropped_img,)*3, axis=-1)
for i in dish_names:
test = i
if test[5] != None:
path = test[5]+'.jpeg'
w = test[3]
h = test[4]
dish_img = cv2.imread('../img/menu_items/' + path )
dish_img = cv2.cvtColor(dish_img, cv2.COLOR_BGR2RGB)
ratio = width/siz[1]
new_dish_img = cv2.resize(dish_img , (int((siz[1]*h*ratio)/siz[0]) ,int(h*ratio) ) )
x,y,w,h = test[1] ,test[2] ,test[3] ,test[4]
x = int(x*ratio)
y = int(y*ratio)
w = int(w*ratio)
h = int(h*ratio)
sz = new_dish_img.shape
if (x+w+sz[1] > new_cropped_img.shape[1]):
diff = x+w+sz[1]-new_cropped_img.shape[1]
new_dish_img = cv2.resize(new_dish_img ,(new_dish_img.shape[1]-diff , new_dish_img.shape[0] ))
sz = new_dish_img.shape
new_cropped_img[ y:y+sz[0] ,x+w:x+w+sz[1],:] = new_dish_img[:,:,:]
return new_cropped_img
# -
resturant_menu_expert('../img/menu1.jpg' ,4)
| src/Menu.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++14
// language: C++14
// name: xcpp14
// ---
// +
#ifndef VEHICLE_H
#define VEHICLE_H
#include <map>
#include <string>
#include <vector>
using std::map;
using std::string;
using std::vector;
class Vehicle {
public:
// Constructors
Vehicle();
Vehicle(int lane, float s, float v, float a, string state="CS");
// Destructor
virtual ~Vehicle();
// Vehicle functions
vector<Vehicle> choose_next_state(map<int, vector<Vehicle>> &predictions);
vector<string> successor_states();
vector<Vehicle> generate_trajectory(string state,
map<int, vector<Vehicle>> &predictions);
vector<float> get_kinematics(map<int, vector<Vehicle>> &predictions, int lane);
vector<Vehicle> constant_speed_trajectory();
vector<Vehicle> keep_lane_trajectory(map<int, vector<Vehicle>> &predictions);
vector<Vehicle> lane_change_trajectory(string state,
map<int, vector<Vehicle>> &predictions);
vector<Vehicle> prep_lane_change_trajectory(string state,
map<int, vector<Vehicle>> &predictions);
void increment(int dt);
float position_at(int t);
bool get_vehicle_behind(map<int, vector<Vehicle>> &predictions, int lane,
Vehicle &rVehicle);
bool get_vehicle_ahead(map<int, vector<Vehicle>> &predictions, int lane,
Vehicle &rVehicle);
vector<Vehicle> generate_predictions(int horizon=2);
void realize_next_state(vector<Vehicle> &trajectory);
void configure(vector<int> &road_data);
// public Vehicle variables
struct collider{
bool collision; // is there a collision?
int time; // time collision happens
};
map<string, int> lane_direction = {{"PLCL", 1}, {"LCL", 1},
{"LCR", -1}, {"PLCR", -1}};
int L = 1;
int preferred_buffer = 6; // impacts "keep lane" behavior.
int lane, s, goal_lane, goal_s, lanes_available;
float v, target_speed, a, max_acceleration;
string state;
};
#endif // VEHICLE_H
| behavior Planning/behavior_planner/vehicle.h.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) <NAME>, <NAME> 2017. Thanks to NSF for support via CAREER award #1149784.
# -
# [@LorenaABarba](https://twitter.com/LorenaABarba)
# 12 steps to Navier–Stokes
# ======
# ***
# This Jupyter notebook continues the presentation of the **12 steps to Navier–Stokes**, the practical module taught in the interactive CFD class of [Prof. <NAME>](http://lorenabarba.com). You should have completed [Step 1](./01_Step_1.ipynb) before continuing, having written your own Python script or notebook and having experimented with varying the parameters of the discretization and observing what happens.
#
# Step 2: Nonlinear Convection
# -----
# ***
# Now we're going to implement nonlinear convection using the same methods as in step 1. The 1D convection equation is:
#
# $$\frac{\partial u}{\partial t} + u \frac{\partial u}{\partial x} = 0$$
#
# Instead of a constant factor $c$ multiplying the second term, now we have the solution $u$ multiplying it. Thus, the second term of the equation is now *nonlinear*. We're going to use the same discretization as in Step 1 — forward difference in time and backward difference in space. Here is the discretized equation.
#
# $$\frac{u_i^{n+1}-u_i^n}{\Delta t} + u_i^n \frac{u_i^n-u_{i-1}^n}{\Delta x} = 0$$
#
# Solving for the only unknown term, $u_i^{n+1}$, yields:
#
# $$u_i^{n+1} = u_i^n - u_i^n \frac{\Delta t}{\Delta x} (u_i^n - u_{i-1}^n)$$
# As before, the Python code starts by loading the necessary libraries. Then, we declare some variables that determine the discretization in space and time (you should experiment by changing these parameters to see what happens). Then, we create the initial condition $u_0$ by initializing the array for the solution using $u = 2\ @\ 0.5 \leq x \leq 1$ and $u = 1$ everywhere else in $(0,2)$ (i.e., a hat function).
# +
import numpy # we're importing numpy
from matplotlib import pyplot # and our 2D plotting library
# %matplotlib inline
nx = 41
dx = 2 / (nx - 1)
nt = 2 #nt is the number of timesteps we want to calculate
dt = .025 #dt is the amount of time each timestep covers (delta t)
u = numpy.ones(nx) #as before, we initialize u with every value equal to 1.
u[int(.5 / dx) : int(1 / dx + 1)] = 2 #then set u = 2 between 0.5 and 1 as per our I.C.s
un = numpy.ones(nx) #initialize our placeholder array un, to hold the time-stepped solution
# -
# The code snippet below is *unfinished*. We have copied over the line from [Step 1](./01_Step_1.ipynb) that executes the time-stepping update. Can you edit this code to execute the nonlinear convection instead?
# +
for n in range(nt): #iterate through time
un = u.copy() ##copy the existing values of u into un
for i in range(1, nx): ##now we'll iterate through the u array
###This is the line from Step 1, copied exactly. Edit it for our new equation.
###then uncomment it and run the cell to evaluate Step 2
u[i] = un[i] - un[i] * dt / dx * (un[i] - un[i-1])
pyplot.plot(numpy.linspace(0, 2, nx), u) ##Plot the results
# -
# What do you observe about the evolution of the hat function under the nonlinear convection equation? What happens when you change the numerical parameters and run again?
# ## Learn More
# For a careful walk-through of the discretization of the convection equation with finite differences (and all steps from 1 to 4), watch **Video Lesson 4** by <NAME> on YouTube.
from IPython.display import YouTubeVideo
YouTubeVideo('y2WaK7_iMRI')
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
# > (The cell above executes the style for this notebook.)
| lessons/02_Step_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import everything and define a test runner function
from importlib import reload
from io import BytesIO
import op, script, tx
from helper import (
hash256,
run,
)
from script import Script
from tx import Tx, TxIn
# +
# Example opcode processing
def op_dup(stack):
if len(stack) < 1:
return False
stack.append(stack[-1])
return True
def op_hash256(stack):
if len(stack) < 1:
return False
element = stack.pop()
stack.append(hash256(element))
return True
# -
# ### Exercise 1
#
# #### 1.1. Make [this test](/edit/session4/op.py) pass
# ```
# op.py:OpTest:test_op_hash160
# ```
# +
# Exercise 1.1
reload(op)
run(op.OpTest('test_op_hash160'))
# -
# Example of evaluation
z = 0x7c076ff316692a3d7eb3c3bb0f8b1488cf72e1afcd929e29307032997a838a3d
sec = bytes.fromhex('04887387e452b8eacc4acfde10d9aaf7f6d9a0f975aabb10d006e4da568744d06c61de6d95231cd89026e286df3b6ae4a894a3378e393e93a0f45b666329a0ae34')
sig = bytes.fromhex('3045022000eff69ef2b1bd93a66ed5219add4fb51e11a840f404876325a1e8ffe0529a2c022100c7207fee197d27c618aea621406f6bf5ef6fca38681d82b2f06fddbdce6feab601')
script_pubkey = Script([sec, 0xac])
script_sig = Script([sig])
combined_script = script_sig + script_pubkey
print(combined_script.evaluate(z))
# ### Exercise 2
#
# #### 2.1. Make [this test](/edit/session4/op.py) pass
# ```
# op.py:OpTest:test_op_checksig
# ```
# +
# Exercise 2.1
reload(op)
run(op.OpTest('test_op_checksig'))
# +
# Example Script
reload(script)
hex_script_pubkey = '0455935987'
script_pubkey = Script.parse(BytesIO(bytes.fromhex(hex_script_pubkey)))
hex_script_sig = '0154'
script_sig = Script.parse(BytesIO(bytes.fromhex(hex_script_sig)))
combined_script = script_sig + script_pubkey
print(combined_script.evaluate(0))
# -
# ### Exercise 3
#
# #### 3.1. Determine a ScriptSig that will satisfy this scriptPubKey:
# ```
# 06767695935687
# ```
# #### Hint: use the Script.parse method
# +
# Exercise 3.1
hex_script_pubkey = '06767695935687'
# bytes.fromhex the script
bin_script_pubkey = bytes.fromhex(hex_script_pubkey)
# get the stream using BytesIO
stream = BytesIO(bin_script_pubkey)
# parse the ScriptPubKey
script_pubkey = Script.parse(stream)
# Find the right scriptSig
script_sig = Script([0x52])
# combine the scripts
combined_script = script_sig + script_pubkey
# evaluate combined script
print(combined_script.evaluate(0))
# -
# ### Exercise 4
#
# #### 4.1. Determine what this ScriptPubKey is doing:
# ```
# 086e879169a77ca787
# ```
#
# * 69 = OP_VERIFY (exits if top element not true)
# * 6e = OP_2DUP (duplicates top 2 elements)
# * 7c = OP_SWAP (swaps top 2 elements)
# * 87 = OP_EQUAL
# * 91 = OP_NOT (inverts top element)
# * a7 = OP_SHA1 (sha1 of top element)
# +
# Exercise 4.1
hex_script_pubkey = '086e879169a77ca787'
# create a script_pubkey object
script_pubkey = Script.parse(BytesIO(bytes.fromhex(hex_script_pubkey)))
# print the script
print(script_pubkey)
# SOLUTION
hex_script_sig = 'fd86024d4001255044462d312e330a25e2e3cfd30a0a0a312030206f626a0a3c3c2f57696474682032203020522f4865696768742033203020522f547970652034203020522f537562747970652035203020522f46696c7465722036203020522f436f6c6f7253706163652037203020522f4c656e6774682038203020522f42697473506572436f6d706f6e656e7420383e3e0a73747265616d0affd8fffe00245348412d3120697320646561642121212121852fec092339759c39b1a1c63c4c97e1fffe017f46dc93a6b67e013b029aaa1db2560b45ca67d688c7f84b8c4c791fe02b3df614f86db1690901c56b45c1530afedfb76038e972722fe7ad728f0e4904e046c230570fe9d41398abe12ef5bc942be33542a4802d98b5d70f2a332ec37fac3514e74ddc0f2cc1a874cd0c78305a21566461309789606bd0bf3f98cda8044629a14d4001255044462d312e330a25e2e3cfd30a0a0a312030206f626a0a3c3c2f57696474682032203020522f4865696768742033203020522f547970652034203020522f537562747970652035203020522f46696c7465722036203020522f436f6c6f7253706163652037203020522f4c656e6774682038203020522f42697473506572436f6d706f6e656e7420383e3e0a73747265616d0affd8fffe00245348412d3120697320646561642121212121852fec092339759c39b1a1c63c4c97e1fffe017346dc9166b67e118f029ab621b2560ff9ca67cca8c7f85ba84c79030c2b3de218f86db3a90901d5df45c14f26fedfb3dc38e96ac22fe7bd728f0e45bce046d23c570feb141398bb552ef5a0a82be331fea48037b8b5d71f0e332edf93ac3500eb4ddc0decc1a864790c782c76215660dd309791d06bd0af3f98cda4bc4629b1'
script_sig = Script.parse(BytesIO(bytes.fromhex(hex_script_sig)))
combined_script = script_sig + script_pubkey
print(combined_script.evaluate(0))
# -
# ### Exercise 5
#
# #### 5.1. Make [this test](/edit/session4/tx.py) pass
# ```
# tx.py:TxTest:test_serialize
# ```
# +
# Exercise 5.1
reload(tx)
run(tx.TxTest('test_serialize'))
# +
# Example of how to look up a transaction using fetch_tx() method
from tx import TxIn
prev_tx = bytes.fromhex('d1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81')
tx_in = TxIn(prev_tx, 0)
print(tx_in.fetch_tx())
# -
# ### Exercise 6
#
#
# #### 6.1. What is the value and scriptPubKey of the 0th output of this transaction?
# ```
# d1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81
# ```
#
# #### 6.2. Make [these tests](/edit/session4/tx.py) pass
# ```
# tx.py:TxTest:test_input_value
# tx.py:TxTest:test_input_pubkey
# ```
# +
# Exercise 6.1
prev_tx = bytes.fromhex('d1c789a9c60383bf715f3f6ad9d14b91fe55f3deb369fe5d9280cb1a01793f81')
prev_index = 0
# create the transaction input
tx_in = TxIn(prev_tx, 0)
# fetch the transaction
t = tx_in.fetch_tx()
# grab the output at the index
prev_output = t.tx_outs[prev_index]
# show the amount
print(prev_output.amount)
# show the script_pubkey
print(prev_output.script_pubkey)
# +
# Exercise 6.2
reload(tx)
run(tx.TxTest('test_input_value'))
run(tx.TxTest('test_input_pubkey'))
# -
# ### Exercise 7
#
# #### 7.1. How much is the transaction fee of this transaction?
# ```
# 010000000456919960ac691763688d3d3bcea9ad6ecaf875df5339e148a1fc61c6ed7a069e010000006a47304402204585bcdef85e6b1c6af5c2669d4830ff86e42dd205c0e089bc2a821657e951c002201024a10366077f87d6bce1f7100ad8cfa8a064b39d4e8fe4ea13a7b71aa8180f012102f0da57e85eec2934a82a585ea337ce2f4998b50ae699dd79f5880e253dafafb7feffffffeb8f51f4038dc17e6313cf831d4f02281c2a468bde0fafd37f1bf882729e7fd3000000006a47304402207899531a52d59a6de200179928ca900254a36b8dff8bb75f5f5d71b1cdc26125022008b422690b8461cb52c3cc30330b23d574351872b7c361e9aae3649071c1a7160121035d5c93d9ac96881f19ba1f686f15f009ded7c62efe85a872e6a19b43c15a2937feffffff567bf40595119d1bb8a3037c356efd56170b64cbcc160fb028fa10704b45d775000000006a47304402204c7c7818424c7f7911da6cddc59655a70af1cb5eaf17c69dadbfc74ffa0b662f02207599e08bc8023693ad4e9527dc42c34210f7a7d1d1ddfc8492b654a11e7620a0012102158b46fbdff65d0172b7989aec8850aa0dae49abfb84c81ae6e5b251a58ace5cfeffffffd63a5e6c16e620f86f375925b21cabaf736c779f88fd04dcad51d26690f7f345010000006a47304402200633ea0d3314bea0d95b3cd8dadb2ef79ea8331ffe1e61f762c0f6daea0fabde022029f23b3e9c30f080446150b23852028751635dcee2be669c2a1686a4b5edf304012103ffd6f4a67e94aba353a00882e563ff2722eb4cff0ad6006e86ee20dfe7520d55feffffff0251430f00000000001976a914ab0c0b2e98b1ab6dbf67d4750b0a56244948a87988ac005a6202000000001976a9143c82d7df364eb6c75be8c80df2b3eda8db57397088ac46430600
# ```
#
# Fee is simply the sum of the inputs (use the value() method) minus the outputs (use the amount property)
#
# #### 7.2. Make [this test](/edit/session4/tx.py) pass
# ```
# tx.py:TxTest:test_fee
# ```
# +
# Exercise 7.1
hex_tx = '010000000456919960ac691763688d3d3bcea9ad6ecaf875df5339e148a1fc61c6ed7a069e010000006a47304402204585bcdef85e6b1c6af5c2669d4830ff86e42dd205c0e089bc2a821657e951c002201024a10366077f87d6bce1f7100ad8cfa8a064b39d4e8fe4ea13a7b71aa8180f012102f0da57e85eec2934a82a585ea337ce2f4998b50ae699dd79f5880e253dafafb7feffffffeb8f51f4038dc17e6313cf831d4f02281c2a468bde0fafd37f1bf882729e7fd3000000006a47304402207899531a52d59a6de200179928ca900254a36b8dff8bb75f5f5d71b1cdc26125022008b422690b8461cb52c3cc30330b23d574351872b7c361e9aae3649071c1a7160121035d5c93d9ac96881f19ba1f686f15f009ded7c62efe85a872e6a19b43c15a2937feffffff567bf40595119d1bb8a3037c356efd56170b64cbcc160fb028fa10704b45d775000000006a47304402204c7c7818424c7f7911da6cddc59655a70af1cb5eaf17c69dadbfc74ffa0b662f02207599e08bc8023693ad4e9527dc42c34210f7a7d1d1ddfc8492b654a11e7620a0012102158b46fbdff65d0172b7989aec8850aa0dae49abfb84c81ae6e5b251a58ace5cfeffffffd63a5e6c16e620f86f375925b21cabaf736c779f88fd04dcad51d26690f7f345010000006a47304402200633ea0d3314bea0d95b3cd8dadb2ef79ea8331ffe1e61f762c0f6daea0fabde022029f23b3e9c30f080446150b23852028751635dcee2be669c2a1686a4b5edf304012103ffd6f4a67e94aba353a00882e563ff2722eb4cff0ad6006e86ee20dfe7520d55feffffff0251430f00000000001976a914ab0c0b2e98b1ab6dbf67d4750b0a56244948a87988ac005a6202000000001976a9143c82d7df364eb6c75be8c80df2b3eda8db57397088ac46430600'
# bytes.fromhex the tx, make stream
stream = BytesIO(bytes.fromhex(hex_tx))
# parse the tx
t = Tx.parse(stream)
# initialize input sum
input_sum = 0
# iterate over all inputs (t.tx_ins)
for tx_in in t.tx_ins:
# get the values from the TxIn.value method you wrote in 4.2
value = tx_in.value()
# add to input sum
input_sum += value
# initialize output sum
output_sum = 0
# iterate over all outputs (t.tx_outs)
for tx_out in t.tx_outs:
# get the amounts from the TxOut.amount property
value = tx_out.amount
# add to output sum
output_sum += value
# fee is input sum - output sum
fee = input_sum - output_sum
print(fee)
# +
# Exercise 7.2
reload(tx)
run(tx.TxTest('test_fee'))
# +
# hash256 example to get z
modified_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000001976a914a802fc56c704ce87c42d7c92eb75e7896bdc41ae88acfeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac1943060001000000')
h = hash256(modified_tx)
z = int.from_bytes(h, 'big')
print(hex(z))
# -
# ### Exercise 8
#
# #### 8.1. Make [this test](/edit/session4/tx.py) pass
# ```
# tx.py:TxTest:test_sig_hash
# ```
# +
# Exercise 8.1
reload(tx)
run(tx.TxTest('test_sig_hash'))
# +
# Validation example
modified_tx = bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000001976a914a802fc56c704ce87c42d7c92eb75e7896bdc41ae88acfeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac1943060001000000')
h256 = hash256(modified_tx)
z = int.from_bytes(h256, 'big')
stream = BytesIO(bytes.fromhex('0100000001813f79011acb80925dfe69b3def355fe914bd1d96a3f5f71bf8303c6a989c7d1000000006b483045022100ed81ff192e75a3fd2304004dcadb746fa5e24c5031ccfcf21320b0277457c98f02207a986d955c6e0cb35d446a89d3f56100f4d7f67801c31967743a9c8e10615bed01210349fc4e631e3624a545de3f89f5d8684c7b8138bd94bdd531d2e213bf016b278afeffffff02a135ef01000000001976a914bc3b654dca7e56b04dca18f2566cdaf02e8d9ada88ac99c39800000000001976a9141c4bc762dd5423e332166702cb75f40df79fea1288ac19430600'))
transaction = Tx.parse(stream)
tx_in = transaction.tx_ins[0]
combined_script = tx_in.script_sig + tx_in.script_pubkey()
print(combined_script.evaluate(z))
# -
# ### Exercise 9
#
# #### 9.1. Validate the signature for the first input in this transaction.
# ```
# 01000000012f5ab4d2666744a44864a63162060c2ae36ab0a2375b1c2b6b43077ed5dcbed6000000006a473044022034177d53fcb8e8cba62432c5f6cc3d11c16df1db0bce20b874cfc61128b529e1022040c2681a2845f5eb0c46adb89585604f7bf8397b82db3517afb63f8e3d609c990121035e8b10b675477614809f3dde7fd0e33fb898af6d86f51a65a54c838fddd417a5feffffff02c5872e00000000001976a91441b835c78fb1406305727d8925ff315d90f9bbc588acae2e1700000000001976a914c300e84d277c6c7bcf17190ebc4e7744609f8b0c88ac31470600
# ```
# +
# Exercise 9.1
from tx import Tx
hex_tx = '01000000012f5ab4d2666744a44864a63162060c2ae36ab0a2375b1c2b6b43077ed5dcbed6000000006a473044022034177d53fcb8e8cba62432c5f6cc3d11c16df1db0bce20b874cfc61128b529e1022040c2681a2845f5eb0c46adb89585604f7bf8397b82db3517afb63f8e3d609c990121035e8b10b675477614809f3dde7fd0e33fb898af6d86f51a65a54c838fddd417a5feffffff02c5872e00000000001976a91441b835c78fb1406305727d8925ff315d90f9bbc588acae2e1700000000001976a914c300e84d277c6c7bcf17190ebc4e7744609f8b0c88ac31470600'
stream = BytesIO(bytes.fromhex(hex_tx))
index = 0
# parse the transaction using Tx.parse
t = Tx.parse(stream)
# grab the input at index
tx_in = t.tx_ins[index]
# use the sig_hash method on index and hash_type to get z
z = t.sig_hash(index)
# combine the script_sig and script_pubkey
combined_script = tx_in.script_sig + tx_in.script_pubkey()
# evaluate the combined script
print(combined_script.evaluate(z))
| session4/complete/session4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: rl_research
# language: python
# name: rl_research
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# This code creates a virtual display to draw game images on.
# If you are running locally, just ignore it
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY"))==0:
# !bash ../xvfb start
# %env DISPLAY=:1
# -
# ### OpenAI Gym
#
# We're gonna spend several next weeks learning algorithms that solve decision processes. We are then in need of some interesting decision problems to test our algorithms.
#
# That's where OpenAI gym comes into play. It's a python library that wraps many classical decision problems including robot control, videogames and board games.
#
# So here's how it works:
# +
import gym
env = gym.make("MountainCar-v0")
plt.imshow(env.render('rgb_array'))
print("Observation space:", env.observation_space)
print("Action space:", env.action_space)
# -
# Note: if you're running this on your local machine, you'll see a window pop up with the image above. Don't close it, just alt-tab away.
# ### Gym interface
#
# The three main methods of an environment are
# * __reset()__ - reset environment to initial state, _return first observation_
# * __render()__ - show current environment state (a more colorful version :) )
# * __step(a)__ - commit action __a__ and return (new observation, reward, is done, info)
# * _new observation_ - an observation right after commiting the action __a__
# * _reward_ - a number representing your reward for commiting action __a__
# * _is done_ - True if the MDP has just finished, False if still in progress
# * _info_ - some auxilary stuff about what just happened. Ignore it ~~for now~~.
# +
obs0 = env.reset()
print("initial observation code:", obs0)
# Note: in MountainCar, observation is just two numbers: car position and velocity
# +
print("taking action 2 (right)")
new_obs, reward, is_done, _ = env.step(2)
print("new observation code:", new_obs)
print("reward:", reward)
print("is game over?:", is_done)
# Note: as you can see, the car has moved to the riht slightly (around 0.0005)
# -
# ### Play with it
#
# Below is the code that drives the car to the right.
#
# However, it doesn't reach the flag at the far right due to gravity.
#
# __Your task__ is to fix it. Find a strategy that reaches the flag.
#
# You're not required to build any sophisticated algorithms for now, feel free to hard-code :)
#
# _Hint: your action at each step should depend either on __t__ or on __s__._
# +
# create env manually to set time limit. Please don't change this.
TIME_LIMIT = 250
env = gym.wrappers.TimeLimit(gym.envs.classic_control.MountainCarEnv(),
max_episode_steps=TIME_LIMIT + 1)
s = env.reset()
actions = {'left': 0, 'stop': 1, 'right': 2}
# prepare "display"
# %matplotlib notebook
fig = plt.figure()
ax = fig.add_subplot(111)
fig.show()
for t in range(TIME_LIMIT):
# change the line below to reach the flag
if t < 40:
s, r, done, _ = env.step(actions['left'])
else:
s, r, done, _ = env.step(actions['right'])
#draw game image on display
ax.clear()
ax.imshow(env.render('rgb_array'))
fig.canvas.draw()
if done:
print("Well done!")
break
else:
print("Time limit exceeded. Try again.")
# -
assert s[0] > 0.47
print("You solved it!")
| week1_intro/seminar_gym_interface.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Dictionary Comprehensions
#
# Assim como o List Comprehensions, dicionários também suportam sua própria versão para criação rápida. Não é tão comum como o List Comprehensions, mas a sintaxe é:
{x:x**2 for x in range(10)}
# Uma das razões pelas quais não é tão comum é a dificuldade em estruturar os nomes das chaves que não se baseiam nos valores.
#
# ## Iteração sobre chaves, valores e itens
# Os dicionários podem ser iterados ao usar os métodos iterativos disponíveis em um dicionário. Por exemplo:
| log-prog-python/Compreensao em dicio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow import keras
import keras_tuner as kt
(img_train, label_train), (img_test, label_test) = keras.datasets.fashion_mnist.load_data()
# Normalize pixel values between 0 and 1
img_train = img_train.astype('float32') / 255.0
img_test = img_test.astype('float32') / 255.0
def model_builder(hp):
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
# Tune the number of units in the first Dense layer
# Choose an optimal value between 32-512
hp_units = hp.Int('units', min_value=32, max_value=512, step=32)
model.add(keras.layers.Dense(units=hp_units, activation='relu'))
model.add(keras.layers.Dense(10))
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values=[1e-2, 1e-3, 1e-4])
model.compile(optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
tuner = kt.Hyperband(model_builder,
objective='val_accuracy',
max_epochs=10,
factor=3,
directory='my_dir',
project_name='intro_to_kt')
#Create a callback to stop training early after reaching a certain value for the validation loss.
stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5)
# +
tuner.search(img_train, label_train, epochs=50, validation_split=0.2, callbacks=[stop_early])
# Get the optimal hyperparameters
best_hps=tuner.get_best_hyperparameters(num_trials=1)[0]
print(f"""
The hyperparameter search is complete. The optimal number of units in the first densely-connected
layer is {best_hps.get('units')} and the optimal learning rate for the optimizer
is {best_hps.get('learning_rate')}.
""")
# +
# Build the model with the optimal hyperparameters and train it on the data for 50 epochs
model = tuner.hypermodel.build(best_hps)
history = model.fit(img_train, label_train, epochs=50, validation_split=0.2)
val_acc_per_epoch = history.history['val_accuracy']
best_epoch = val_acc_per_epoch.index(max(val_acc_per_epoch)) + 1
print('Best epoch: %d' % (best_epoch,))
# +
hypermodel = tuner.hypermodel.build(best_hps)
# Retrain the model
hypermodel.fit(img_train, label_train, epochs=best_epoch, validation_split=0.2)
# -
eval_result = hypermodel.evaluate(img_test, label_test)
print("[test loss, test accuracy]:", eval_result)
| 01_ML_basics_with_Keras/07. Tuning hyperparams with the Keras tuner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS109B Data Science 2: Advanced Topics in Data Science
# ## Homework 1 - Smoothers and Generalized Additive Models
#
#
#
# **Harvard University**<br/>
# **Spring 2020**<br/>
# **Instructors**: <NAME>, <NAME>, & <NAME>
#
#
# <hr style="height:2pt">
# ### Homework 1 is due February 6th
#PLEASE RUN THIS CELL
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text
HTML(styles)
# ### INSTRUCTIONS
#
# - To submit your assignment, please follow the instructions on Canvas.
# - Please restart the kernel and run the entire notebook again before you submit.
#
# <hr style="height:2pt">
# ### Please use the libraries below:
# +
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
# %matplotlib inline
import statsmodels.formula.api as sm
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import r2_score
from sklearn.model_selection import KFold
from pygam import LinearGAM, s, f
from sklearn.preprocessing import LabelEncoder
# Seaborn visualization library
import seaborn as sns
# +
# import sys
# # !{sys.executable} -m pip install pygam
# -
#
# <hr style="height:2pt">
# ### Problem 1 - Modeling Seasonality of Airbnb Prices
# In this problem, the task is to build a regression model to predict the price of an Airbnb rental for a given date. The data are provided in `calendar_train.csv` and `calendar_test.csv`, which contain availability and price data for a sample of Airbnb units in the Boston area from 2017 to 2018, about 4 observations per day in each set.
# Start by loading the data using pandas.
# *Hint*: You likely want to have pandas parse the `date` column as a datetime object via the `parse_dates` argument of `pd.read_csv`
# +
#your code here
cal_train = pd.read_csv("data/calendar_train.csv", parse_dates = ['date'])
cal_test = pd.read_csv("data/calendar_test.csv", parse_dates = ['date'])
print(cal_train.dtypes)
cal_train['Month'] = cal_train['date'].dt.month
cal_train['Day'] = cal_train['date'].dt.dayofweek
cal_test['Month'] = cal_test['date'].dt.month
cal_test['Day'] = cal_test['date'].dt.dayofweek
# -
cal_train.dropna(inplace=True)
cal_test.dropna(inplace=True)
# ### Exploratory Analysis
#
# Visualize the average price by month and day of the week (i.e., Monday, Tuesday, etc.) for the training set. Point out any trends you notice and explain whether or not they make sense.
avg_price_month = cal_train.groupby(['Month'], as_index=False).agg({'price': 'mean'})
avg_price_day = cal_train.groupby(['Day'], as_index=False).agg({'price':'mean'})
avg_price_month.describe()
# +
#Plotting the average price of AirBnbs by month
plt.figure(figsize=(10,8))
sns.set_style("darkgrid")
sns.set_context("paper")
chart2 = sns.barplot(x = 'Month', y='price', data = avg_price_month)
plt.title('Average Price of Air Bnb By Month', fontsize = 20)
plt.ylabel('Price', fontsize = 20)
plt.xlabel('Month', fontsize = 20)
chart2.set_xticklabels(
chart2.get_xticklabels(),
rotation=0,
horizontalalignment = 'right',
fontweight = 'light',
fontsize = 'x-large'
)
plt.yticks(np.arange(0, 350, 20))
plt.tight_layout(h_pad = 0.4)
plt.show()
# plot2 = avg_price_month.plot.bar(x = "Month", y= 'price', color='Teal', title='Average Price per Month')
# _ = plot2.set_xlabel("Month")
# _ = plot2.set_ylabel("Price")
# plt.ylim(100,350)
# plt.show()
# Plotting distribution of prices in training dataset. We see the majority of prices hover in the range of $190 to $300.
#There's an outlier at $6000
plot3 = sns.violinplot(avg_price_month.price, palette = 'Set3', bw=0.1, cut = 1)
plt.title("Average Price Per Month")
plt.xlabel('Average Price')
plt.show()
# plot4 = avg_price_day.plot.bar(x = "Day", y= 'price', color='Red', title='Average Price per Day')
# _ = plot4.set_xlabel("Day")
# _ = plot4.set_ylabel("Price")
# plt.ylim(200,350)
# plt.show()
plt.figure(figsize=(6,8))
sns.set_style("darkgrid")
sns.set_context("paper")
chart2 = sns.barplot(x = 'Day', y='price', data = avg_price_day)
plt.title('Average Price of Air Bnb By Day', fontsize = 20)
plt.ylabel('Price', fontsize = 20)
plt.xlabel('Day', fontsize = 20)
chart2.set_xticklabels(
chart2.get_xticklabels(),
rotation=0,
horizontalalignment = 'right',
fontweight = 'light',
fontsize = 'x-large'
)
plt.yticks(np.arange(0, 350, 20))
plt.tight_layout(h_pad = 0.4)
plt.show()
plot5 = sns.violinplot(avg_price_day.price, palette = 'Set3', bw=0.1, cut = 1)
plt.title("Average Price Per Day")
plt.xlabel('Average Price')
plt.show()
plot6 = sns.boxplot(avg_price_day.price, palette = 'Set3')
plt.title("Average Price Per Day")
plt.xlabel('Average Price')
plt.show()
# -
# First I will discuss the plot shwoing average price by day, then I will dicuss the average price by month.
#
# Unsurprisingly, there seems to be no relationship between AirBnb's prices and the day of the week. In the above plot, we see that the average price per day sits in the range of $\$230$ to $\$250$, with the median of prices at approximately $\$237.5$.
#
# Contrastingly, we see more fluctuation in AirBnb's prices when we consider the month. We seem from the above plots, that the range of average prices ranges from $\$185.94$ to $\$288.17$. Furthermore, by examining the violin plot, we see that average prices cluster into three groups: around $\$190$, $\$220$, and $\$270$. Further analysis is needed to understand this interesting behavior.
# ### Explore different regression models
#
# Fit a regression model that uses the date as a predictor and predicts the price of an Airbnb rental on that date. In this section, you should ignore all other predictors besides the date. Fit the following models on the training set, and compare the $R^2$ of the fitted models on the test set. Include plots of the fitted models for each method.
#
# *Hint*: You may want to convert the `date` column into a numerical variable by taking the difference in days between each date and the earliest date in the column.
#
# 1. Fit simple polynomial models of degree 2, 3, 5, and 8 to the training data. Provide train and test R^2 scores and provide plot(s) of a) the daily averages and b) each model's predictions.
#
# 2. You should see that the degree 8 polynomial's predictions are awful- either nonsensically near zero [or far too large] at many input values. This isn't just simple overfitting. What is going wrong in this model, and how might it be addressed?
#
# 3. Fit a Smoothing spline model in `pygam` with the smoothness parameter chosen by cross-validation on the training set. Provide the train and test scores of the best-performing model, and plot its predictions. Also plot the predictions of models with more and less smoothing. When plotting the smooths, show a scatterplot of the daily averages on the same axes, for context.
#
# 4. Of the models, which performs the best? What is the effect of the tuning parameters (degree and smoothness penalty)?
# +
#your code here
#Converting Date Column
from datetime import datetime
date_min = min(cal_train.date)
date_mintest = min(cal_test.date)
cal_train['date_new'] = cal_train.apply(lambda row:((row['date'] - date_min).days), axis=1)
avg_price_daily= cal_train.groupby(['date_new'], as_index=False).agg({'price':'mean'})
cal_test['date_new'] = cal_test.apply(lambda row:((row['date'] - date_mintest).days), axis=1)
avg_price_daily_test= cal_test.groupby(['date_new'], as_index=False).agg({'price':'mean'})
# -
xpred = pd.DataFrame({"date_new":np.arange(1,366,1)})
xpred
# ### 1. Polynomials
# +
#your code here
#Polynomial Degrees 2
model1 = sm.ols(formula = 'price ~ np.vander(date_new,3, increasing=True)-1', data = cal_train).fit()
poly_predictions = model1.get_prediction(xpred).summary_frame()
poly_predictions_train = model1.get_prediction(cal_train).summary_frame()
##On test data
model1test = sm.ols(formula = 'price ~ np.vander(date_new,3, increasing=True)-1', data = cal_test).fit()
poly_predictionstest = model1test.get_prediction(xpred).summary_frame()
poly_predictions_test = model1test.get_prediction(cal_test).summary_frame()
# Polynomial Degree 3
model2 = sm.ols(formula = 'price ~ np.vander(date_new,4, increasing=True)-1', data = cal_train).fit()
poly_predictions2 = model2.get_prediction(xpred).summary_frame()
poly_predictions2_train = model2.get_prediction(cal_train).summary_frame()
##On test data
model2test = sm.ols(formula = 'price ~ np.vander(date_new,4, increasing=True)-1', data = cal_test).fit()
poly_predictions2test = model2test.get_prediction(xpred).summary_frame()
poly_predictions2_test = model2test.get_prediction(cal_test).summary_frame()
#polynomial Degree 5
model3 = sm.ols(formula = 'price ~ np.vander(date_new,6, increasing=True)-1', data = cal_train).fit()
poly_predictions3 = model3.get_prediction(xpred).summary_frame()
poly_predictions3_train = model3.get_prediction(cal_train).summary_frame()
##On test data
model3test = sm.ols(formula = 'price ~ np.vander(date_new,6, increasing=True)-1', data = cal_test).fit()
poly_predictions3test = model3test.get_prediction(xpred).summary_frame()
poly_predictions3_test = model3test.get_prediction(cal_test).summary_frame()
#Polynomial Degree 8
model4 = sm.ols(formula = 'price ~ np.vander(date_new,9, increasing=True)-1', data = cal_train).fit()
poly_predictions4 = model4.get_prediction(xpred).summary_frame()
poly_predictions4_train = model4.get_prediction(cal_train).summary_frame()
##On test data
model4test = sm.ols(formula = 'price ~ np.vander(date_new,9, increasing=True)-1', data = cal_test).fit()
poly_predictions4test = model4test.get_prediction(xpred).summary_frame()
poly_predictions4_test = model4test.get_prediction(cal_test).summary_frame()
# poly_predictions.head()
#Plots for Degree 2
plot_poly1 = avg_price_daily.plot.scatter(x='date_new',y='price',c='Teal',title="Price data with least-squares quadratic fit")
plot_poly1.set_xlabel("Days from Initial Date")
plot_poly1.set_ylabel("Price")
plot_poly1.plot(xpred, poly_predictions['mean'],color="green")
plot_poly1.plot(xpred, poly_predictions['mean_ci_lower'], color="blue",linestyle="dashed")
plot_poly1.plot(xpred, poly_predictions['mean_ci_upper'], color="blue",linestyle="dashed");
#Plots for Degree 3
plot_poly2 = avg_price_daily.plot.scatter(x='date_new',y='price',c='Teal',title="Price data with least-squares cubic fit")
plot_poly2.set_xlabel("Days from Initial Date")
plot_poly2.set_ylabel("Price")
plot_poly2.plot(xpred, poly_predictions2['mean'],color="green")
plot_poly2.plot(xpred, poly_predictions2['mean_ci_lower'], color="blue",linestyle="dashed")
plot_poly2.plot(xpred, poly_predictions2['mean_ci_upper'], color="blue",linestyle="dashed");
#Plots for Degree 5
plot_poly3 = avg_price_daily.plot.scatter(x='date_new',y='price',c='Teal',title="Price data with least-squares quintic fit")
plot_poly3.set_xlabel("Days from Initial Date")
plot_poly3.set_ylabel("Price")
plot_poly3.plot(xpred, poly_predictions3['mean'],color="green")
plot_poly3.plot(xpred, poly_predictions3['mean_ci_lower'], color="blue",linestyle="dashed")
plot_poly3.plot(xpred, poly_predictions3['mean_ci_upper'], color="blue",linestyle="dashed");
#Plots for Degree 8
plot_poly4 = avg_price_daily.plot.scatter(x='date_new',y='price',c='Teal',title="Price data with least-squares octic fit")
plot_poly4.set_xlabel("Days from Initial Date")
plot_poly4.set_ylabel("Price")
plot_poly4.plot(xpred, poly_predictions4['mean'],color="green")
plot_poly4.plot(xpred, poly_predictions4['mean_ci_lower'], color="blue",linestyle="dashed")
plot_poly4.plot(xpred, poly_predictions4['mean_ci_upper'], color="blue",linestyle="dashed");
# -
#R2 Scores for Each Model on Training and Test Data
train_model = sm.ols('price~date_new',data=cal_train)
fit1 = train_model.fit()
pred1 = fit1.predict(cal_train.date_new)
print(cal_train.price.shape)
print(poly_predictions_train.shape)
print(cal_test.shape)
print(poly_predictionstest.shape)
poly_predictions_test
cal_test.price
train_modeltest = sm.ols('price~date_new',data=cal_test)
fit1test = train_modeltest.fit()
pred1test = fit1test.predict(cal_test.date_new)
# +
print("R^2 score for training data on Quadratic Model:", r2_score(cal_train.price, poly_predictions_train['mean']))
print("R^2 score for test data on Quadratic Model:",r2_score(cal_test.price, poly_predictions_test['mean']))
print("R^2 score for training data on Cubic Model:",r2_score(cal_train.price, poly_predictions2_train['mean']))
print("R^2 score for test data on Cubic Model:",r2_score(cal_test.price, poly_predictions2_test['mean']))
print("R^2 score for training data on Quintic Model:",r2_score(cal_train.price, poly_predictions3_train['mean']))
print("R^2 score for test data on Quintic Model:",r2_score(cal_test.price, poly_predictions3_test['mean']))
print("R^2 score for training data on Octic Model:",r2_score(cal_train.price, poly_predictions4_train['mean']))
print("R^2 score for test data on Octic Model:",r2_score(cal_test.price, poly_predictions4_test['mean']))
# +
#0.04 R2 is correct answer
# train_model = sm.ols('price~date_new',data=avg_price_daily)
# fit1 = train_model.fit()
# pred1 = fit1.predict(avg_price_daily.date_new)
# train_modeltest = sm.ols('price~date_new',data=avg_price_daily_test)
# fit1test = train_modeltest.fit()
# pred1test = fit1test.predict(avg_price_daily_test.date_new)
# print("R^2 score for full training data :", r2_score(avg_price_daily.price,pred1))
# print("R^2 score for full test data :", r2_score(avg_price_daily_test.price,pred1test))
# print("R^2 score for training data on Quadratic Model:", r2_score(avg_price_daily.price, poly_predictions['mean']))
# print("R^2 score for test data on Quadratic Model:",r2_score(avg_price_daily.price, poly_predictionstest['mean']))
# print("R^2 score for training data on Cubic Model:",r2_score(avg_price_daily.price, poly_predictions2['mean']))
# print("R^2 score for test data on Cubic Model:",r2_score(avg_price_daily.price, poly_predictions2test['mean']))
# print("R^2 score for training data on Quintic Model:",r2_score(avg_price_daily.price, poly_predictions3['mean']))
# print("R^2 score for test data on Quintic Model:",r2_score(avg_price_daily.price, poly_predictions3test['mean']))
# print("R^2 score for training data on Octic Model:",r2_score(avg_price_daily.price, poly_predictions4['mean']))
# print("R^2 score for test data on Octic Model:",r2_score(avg_price_daily.price, poly_predictions4test['mean']))
# print("R^2 score for full training data :", r2_score(cal_train.price,pred1))
# print("R^2 score for full test data :", r2_score(cal_test.price,pred1test))
# -
# _your answer here_
#
# ### 2. Degree 8 oddity
#
#
# This may be a case of ill-conditioning. Let $X$ be the matrix of observed data. We find that as the order of a polynomial regression increases, the $X'X$ matrix becomes ill-conditioned. It is possible that this ill-conditioning introduces multicollinearity in the columns of the $X$ matrix.
#
# Source: http://home.iitk.ac.in/~shalab/regression/Chapter12-Regression-PolynomialRegression.pdf
# ### 3. Smoothing spline model with the smoothness parameter chosen by cross-validation on the training set.
#
# Fit a Smoothing spline model in pygam with the smoothness parameter chosen by cross-validation on the training set. Provide the train and test scores of the best-performing model, and plot its predictions. Also plot the predictions of models with more and less smoothing. When plotting the smooths, show a scatterplot of the daily averages on the same axes, for context.
# +
# from sklearn.model_selection import KFold
# from sklearn.metrics import r2_score
# lam= np.logspace(-10,10,21)
# kf = KFold(n_splits=21, random_state=47, shuffle=True)
# scores = np.zeros((21,len(lam)))
# for i, (train_index, test_index) in enumerate(kf.split(avg_price_daily)):
# train_df = avg_price_daily.iloc[train_index,:]
# #test_df = sorted_noisy_diab.iloc[test_index,:]
# for j,cur_smoothing in enumerate(lam):
# gam = LinearGAM(lam = cur_smoothing, n_splines=25).fit(train_df.date_new,train_df.price)
# scores[i,j] = r2_score(train_df['price'], gam.predict(train_df['date_new']))
# np.mean(scores, axis=0)
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
lam= np.logspace(-3,2,6)
kf = KFold(n_splits=6, random_state=47, shuffle=True)
scores = np.zeros((6,len(lam)))
for i, (train_index, test_index) in enumerate(kf.split(cal_train)):
train_df = cal_train.iloc[train_index,:]
#test_df = sorted_noisy_diab.iloc[test_index,:]
for j,cur_smoothing in enumerate(lam):
gam = LinearGAM(lam = cur_smoothing, n_splines=25).fit(train_df.date_new,train_df.price)
scores[i,j] = r2_score(train_df['price'], gam.predict(train_df['date_new']))
np.mean(scores, axis=0)
# -
best_s = lam[np.argmax(np.mean(scores, axis=0))]
best_s
# +
for i, (train_index, test_index) in enumerate(kf.split(cal_test)):
# train_df = avg_price_daily.iloc[train_index,:]
test_df = cal_test.iloc[test_index,:]
for j,cur_smoothing in enumerate(lam):
gam = LinearGAM(lam = cur_smoothing, n_splines=25).fit(train_df.date_new,train_df.price)
scores[i,j] = r2_score(train_df['price'], gam.predict(train_df['date_new']))
np.mean(scores, axis=0)
# -
best_s_test = lam[np.argmax(np.mean(scores, axis=0))]
best_s_test
# Best Model for Training Data
X = avg_price_daily.date_new
Y = avg_price_daily.price
gam = LinearGAM( lam = best_s, n_splines=25).fit(X,Y)
XX=gam.generate_X_grid(term=0)
plt.scatter(X,Y,alpha=0.3);
plt.title('Best Model Chosen by Cross Validation for Training Data')
plt.ylabel("Price")
plt.xlabel('Days from Initial Day')
plt.plot(XX,gam.predict(XX), color='red')
messy_lams= [1e-5,0,10,1e2,1e5]
for i in messy_lams:
X = avg_price_daily.date_new
Y = avg_price_daily.price
gam = LinearGAM( lam = i, n_splines=25).fit(X,Y)
XX=gam.generate_X_grid(term=0)
print(i)
plt.figure()
plt.scatter(X,Y,alpha=0.3);
plt.title('Not Best Model')
plt.ylabel("Price")
plt.xlabel('Days from Initial Day')
plt.plot(XX,gam.predict(XX), color='red')
plt.show()
# +
#Best model for Test Data
X_test = avg_price_daily_test.date_new
Y_test = avg_price_daily_test.price
gam_test = LinearGAM( lam = best_s_test, n_splines=25).fit(X_test,Y_test)
XX_test=gam_test.generate_X_grid(term=0)
plt.scatter(X_test,Y_test,alpha=0.3);
plt.plot(XX_test,gam_test.predict(XX_test), color='red')
plt.title('Best Model Chosen by Cross Validation for Test Data')
plt.ylabel("Price")
plt.xlabel('Days from Initial Day')
gam_testr2 = LinearGAM( lam = best_s_test, n_splines=25).fit(cal_test.date_new,cal_test.price)
print(r2_score(cal_test.price,gam_testr2.predict(cal_test.date_new)))
# -
messy_lams= [1e-5,0,10,1e2,1e5]
for i in messy_lams:
X = avg_price_daily_test.date_new
Y = avg_price_daily_test.price
gam = LinearGAM( lam = i, n_splines=25).fit(X,Y)
XX=gam.generate_X_grid(term=0)
print(i)
plt.figure()
plt.scatter(X,Y,alpha=0.3);
plt.title('Not Best Model')
plt.ylabel("Price")
plt.xlabel('Days from Initial Day')
plt.plot(XX,gam.predict(XX), color='red')
plt.show()
# +
# from sklearn.model_selection import GridSearchCV
# #parameters= {'lam' :[0,1e3], 'n_splines':[0,50]}
# # lam= np.logspace(-3,11,11)
# # lams = np.array([lam])
# X = avg_price_daily.date_new
# Y = avg_price_daily.price
# gam = LinearGAM(n_splines=25).fit(X,Y)
# XX=gam.generate_X_grid(term=0)
# lam = gam.lam
# gam.gridsearch(X,Y,lam=lam)
# # gam.summary()
# # plt.scatter(X,Y,alpha=0.3);
# # plt.plot(XX,gam.predict(XX))
# -
gam = LinearGAM()
gam.gridsearch(X,y)
# ### 4. Effects of parameters
# Using the Linear General Additive Model maximized the R^2 value.
# ### Part 1b: Adapting to weekends
#
# It is possible/likely that prices on weekends (Friday nights and Saturday nights) are systematically higher than prices on weekdays, and that including this aspect of the data-generating process in our model will produce a better fit.
#
# 5. Create a binary indicator of weekend versus non-weekend, and include it in your best-performing model from above. How does it impact the model's performance?
# *Hint*: include a categorical variable in a GAM by importing and using `pygam.f` in the same way you've used `pygam.s` for continuous variables.
#
cal_train['weekend'] = 1*(cal_train['Day']==4)| 1*(cal_train['Day']==5)
cal_train.head(10)
X = cal_train.date_new
Y = cal_train.price
gam = LinearGAM(lam = best_s, n_splines=25).fit(X,Y)
XX=gam.generate_X_grid(term=0)
plt.scatter(X,Y,alpha=0.3);
plt.title('Best Model Chosen by Cross Validation for Training Data')
plt.ylabel("Price")
plt.xlabel('Days from Initial Day')
plt.plot(XX,gam.predict(XX), color='red')
# ### Part 1c: Going the Distance
#
# You may have noticed from your scatterplots of average price versus day on the training set that there are a few days with abnormally high average prices.
#
# Sort the training data in decreasing order of average price, extracting the 10 most expensive dates. Why are the prices on these days so high? Is it something about the days themselves, noise, an artifact of data collection, or something else?
#your code here
# _your answer here_
# ### Problem 2: Predicting Airbnb Rental Price Through Listing Features
#
# In this problem, we'll continue our exploration of Airbnb data by predicting price based on listing features. The data can be found in `listings_train.csv` and `listings_test.csv`.
#
# First, visualize the relationship between each of the predictors and the response variable. Does it appear that some of the predictors have a nonlinear relationship with the response variable?
# +
#your code here
# -
# _your answer here_
# ### Part 2a: Polynomial Regression
#
# Fit the following models on the training set and compare the $R^2$ score of the fitted models on the test set:
#
# * Linear regression
# * Regression with polynomial basis functions of degree 3 (i.e., basis functions $x$, $x^2$, $x^3$ for each predictor $x$) for quantitative predictors.
# +
#your code here
# -
# ### Part 2b: Generalized Additive Model (GAM)
#
# 1. Do you see any advantage in fitting an additive regression model to these data, compared to the above models?
#
# 1. Fit a GAM to the training set, and compare the test $R^2$ of the fitted model to the above models. You may use a smoothing spline basis function on each predictor, with the same smoothing parameter for each basis function, tuned using cross-validation on the training set.
#
# 2. Plot and examine the smooth of each predictor for the fitted GAM. What are some useful insights conveyed by these plots?
#
# _your answer here_
#
from pygam import LinearGAM, s, f
from sklearn.preprocessing import LabelEncoder
#your code here
# +
#your code here
# -
# ### Part 2c: Putting it All Together
# Using the GAM outputs and insights from part 1), discuss the most important variables in the driving prices, and give advide to a frugal traveller hoping to visit Boston.
# _your answer here_
| hw1/cs109b_hw1_jason.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id="1"></a> <br>
# ## Step 1 : Reading and Understanding Data
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
# +
# Source:
# Dr <NAME>, Director: Public Analytics group. chend '@' lsbu.ac.uk, School of Engineering, London South Bank University, London SE1 0AA, UK.
# Data Set Information:
# This is a transnational data set which contains all the transactions occurring between 01/12/2010 and 09/12/2011 for a UK-based and registered non-store online retail.The company mainly sells unique all-occasion gifts. Many customers of the company are wholesalers.
# Attribute Information:
# InvoiceNo: Invoice number. Nominal, a 6-digit integral number uniquely assigned to each transaction. If this code starts with letter 'c', it indicates a cancellation.
# StockCode: Product (item) code. Nominal, a 5-digit integral number uniquely assigned to each distinct product.
# Description: Product (item) name. Nominal.
# Quantity: The quantities of each product (item) per transaction. Numeric.
# InvoiceDate: Invice Date and time. Numeric, the day and time when each transaction was generated.
# UnitPrice: Unit price. Numeric, Product price per unit in sterling.
# CustomerID: Customer number. Nominal, a 5-digit integral number uniquely assigned to each customer.
# Country: Country name. Nominal, the name of the country where each customer resides.
# +
# Reading the data on which analysis needs to be done
retail = retail = pd.read_csv("./datasets/OnlineRetail.csv", encoding= 'unicode_escape')
retail.tail(200)
# +
# shape of df
retail.shape
# +
# df info
retail.info()
# +
# df description
retail.describe()
# -
# <a id="2"></a> <br>
# ## Step 2 : Data Cleansing
# +
# Calculating the Missing Values % contribution in DF
df_null = round(100*(retail.isnull().sum())/len(retail), 2)
df_null
# +
# Droping rows having missing values
retail = retail.dropna()
retail.shape
# +
# Changing the datatype of Customer Id as per Business understanding
retail['CustomerID'] = retail['CustomerID'].astype(str)
# -
# <a id="3"></a> <br>
# ## Step 3 : Data Preparation
# #### We are going to analysis the Customers based on below 3 factors:
# - R (Recency): Number of days since last purchase
# - F (Frequency): Number of tracsactions
# - M (Monetary): Total amount of transactions (revenue contributed)
# +
# New Attribute : Monetary
retail['Amount'] = retail['Quantity']*retail['UnitPrice']
rfm_m = retail.groupby('CustomerID')['Amount'].sum()
rfm_m = rfm_m.reset_index()
rfm_m.head()
# +
# New Attribute : Frequency
rfm_f = retail.groupby('CustomerID')['InvoiceNo'].count()
rfm_f = rfm_f.reset_index()
rfm_f.columns = ['CustomerID', 'Frequency']
rfm_f.head()
# +
# Merging the two dfs
rfm = pd.merge(rfm_m, rfm_f, on='CustomerID', how='inner')
rfm.head()
# +
# New Attribute : Recency
# Convert to datetime to proper datatype
retail['InvoiceDate'] = pd.to_datetime(retail['InvoiceDate'],format='%m/%d/%Y %H:%M')
# +
# Compute the maximum date to know the last transaction date
max_date = max(retail['InvoiceDate'])
max_date
# +
# Compute the difference between max date and transaction date
retail['Diff'] = max_date - retail['InvoiceDate']
retail.head()
# +
# Compute last transaction date to get the recency of customers
rfm_p = retail.groupby('CustomerID')['Diff'].min()
rfm_p = rfm_p.reset_index()
rfm_p.head()
# +
# Extract number of days only
rfm_p['Diff'] = rfm_p['Diff'].dt.days
rfm_p.head()
# -
rfm.head()
rfm_p.head()
rfm_p['CustomerID'] = rfm_p.CustomerID.astype(str)
# +
# Merge tha dataframes to get the final RFM dataframe
rfm = pd.merge(rfm, rfm_p, on='CustomerID', how='inner')
rfm.columns = ['CustomerID', 'Amount', 'Frequency', 'Recency']
rfm.head()
# -
# #### There are 2 types of outliers and we will treat outliers as it can skew our dataset
# - Statistical
# - Domain specific
# +
# Outlier Analysis of Amount Frequency and Recency
attributes = ['Amount','Frequency','Recency']
plt.rcParams['figure.figsize'] = [10,8]
sns.boxplot(data = rfm[attributes], orient="v", palette="Set2" ,whis=1.5,saturation=1, width=0.7)
plt.title("Outliers Variable Distribution", fontsize = 14, fontweight = 'bold')
plt.ylabel("Range", fontweight = 'bold')
plt.xlabel("Attributes", fontweight = 'bold')
# +
# Removing (statistical) outliers for Amount
Q1 = rfm.Amount.quantile(0.05)
Q3 = rfm.Amount.quantile(0.95)
IQR = Q3 - Q1
rfm = rfm[(rfm.Amount >= Q1 - 1.5*IQR) & (rfm.Amount <= Q3 + 1.5*IQR)]
# Removing (statistical) outliers for Recency
Q1 = rfm.Recency.quantile(0.05)
Q3 = rfm.Recency.quantile(0.95)
IQR = Q3 - Q1
rfm = rfm[(rfm.Recency >= Q1 - 1.5*IQR) & (rfm.Recency <= Q3 + 1.5*IQR)]
# Removing (statistical) outliers for Frequency
Q1 = rfm.Frequency.quantile(0.05)
Q3 = rfm.Frequency.quantile(0.95)
IQR = Q3 - Q1
rfm = rfm[(rfm.Frequency >= Q1 - 1.5*IQR) & (rfm.Frequency <= Q3 + 1.5*IQR)]
# -
# ### Rescaling the Attributes
#
# It is extremely important to rescale the variables so that they have a comparable scale.|
# There are two common ways of rescaling:
#
# 1. Min-Max scaling
# 2. Standardisation (mean-0, sigma-1)
#
# Here, we will use Standardisation Scaling.
# +
# Rescaling the attributes
rfm_df = rfm[['Amount', 'Frequency', 'Recency']]
# Instantiate
scaler = StandardScaler()
# fit_transform
rfm_df_scaled = scaler.fit_transform(rfm_df)
rfm_df_scaled.shape
# -
rfm_df_scaled = pd.DataFrame(rfm_df_scaled)
rfm_df_scaled.columns = ['Amount', 'Frequency', 'Recency']
rfm_df_scaled.head()
# <a id="4"></a> <br>
# ## Step 4 : Building the Model
# ### K-Means Clustering
# +
# k-means with some arbitrary k
kmeans = KMeans(n_clusters=4, max_iter=50)
kmeans.fit(rfm_df_scaled)
# -
kmeans.labels_
# ### Finding the Optimal Number of Clusters
# #### Elbow Curve to get the right number of Clusters
# A fundamental step for any unsupervised algorithm is to determine the optimal number of clusters into which the data may be clustered. The Elbow Method is one of the most popular methods to determine this optimal value of k.
# +
# Elbow-curve/SSD
# inertia
# Sum of squared distances of samples to their closest cluster center.
ssd = []
range_n_clusters = [2, 3, 4, 5, 6, 7, 8]
for num_clusters in range_n_clusters:
kmeans = KMeans(n_clusters=num_clusters, max_iter=50)
kmeans.fit(rfm_df_scaled)
ssd.append(kmeans.inertia_)
# plot the SSDs for each n_clusters
plt.plot(ssd)
# -
# Final model with k=3
kmeans = KMeans(n_clusters=3, max_iter=50)
kmeans.fit(rfm_df_scaled)
kmeans.labels_
# assign the label
rfm['Cluster_Id'] = kmeans.labels_
rfm.head()
# +
# Box plot to visualize Cluster Id vs Frequency
sns.boxplot(x='Cluster_Id', y='Amount', data=rfm)
# +
# Box plot to visualize Cluster Id vs Frequency
sns.boxplot(x='Cluster_Id', y='Frequency', data=rfm)
# +
# Box plot to visualize Cluster Id vs Recency
sns.boxplot(x='Cluster_Id', y='Recency', data=rfm)
# -
# ### Inference:
# K-Means Clustering with 3 Cluster Ids
# - Customers with Cluster Id 1 are the customers with high amount of transactions as compared to other customers.
# - Customers with Cluster Id 1 are frequent buyers.
# - Customers with Cluster Id 2 are not recent buyers and hence least of importance from business point of view.
| 6_K_Means_Clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Recommendations with MovieTweetings: Getting to Know The Data
#
# Throughout this lesson, you will be working with the [MovieTweetings Data](https://github.com/sidooms/MovieTweetings/tree/master/recsyschallenge2014). To get started, you can read more about this project and the dataset from the [publication here](http://crowdrec2013.noahlab.com.hk/papers/crowdrec2013_Dooms.pdf).
#
# **Note:** There are solutions to each of the notebooks available by hitting the orange jupyter logo in the top left of this notebook. Additionally, you can watch me work through the solutions on the screencasts that follow each workbook.
#
# To get started, read in the libraries and the two datasets you will be using throughout the lesson using the code below.
#
#
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tests as t
# %matplotlib inline
# Read in the datasets
movies = pd.read_csv('https://raw.githubusercontent.com/sidooms/MovieTweetings/master/latest/movies.dat', delimiter='::', header=None, names=['movie_id', 'movie', 'genre'], dtype={'movie_id': object}, engine='python')
reviews = pd.read_csv('https://raw.githubusercontent.com/sidooms/MovieTweetings/master/latest/ratings.dat', delimiter='::', header=None, names=['user_id', 'movie_id', 'rating', 'timestamp'], dtype={'movie_id': object, 'user_id': object, 'timestamp': object}, engine='python')
# -
# #### 1. Take a Look At The Data
#
# Take a look at the data and use your findings to fill in the dictionary below with the correct responses to show your understanding of the data.
# +
# number of movies
print("The number of movies is {}.".format(movies.shape[0]))
# number of ratings
print("The number of ratings is {}.".format(reviews.shape[0]))
# unique users
print("The number of unique users is {}.".format(reviews.user_id.nunique()))
# missing ratings
print("The number of missing reviews is {}.".format(int(reviews.rating.isnull().mean()*reviews.shape[0])))
# the average, min, and max ratings given
print("The average, minimum, and max ratings given are {}, {}, and {}, respectively.".format(np.round(reviews.rating.mean(), 0), reviews.rating.min(), reviews.rating.max()))
# +
# number of different genres
genres = []
for val in movies.genre:
try:
genres.extend(val.split('|'))
except AttributeError:
pass
# we end up needing this later
genres = set(genres)
print("The number of genres is {}.".format(len(genres)))
# +
# Use your findings to match each variable to the correct statement in the dictionary
a = 53968
b = 10
c = 7
d = 31245
e = 15
f = 0
g = 4
h = 712337
i = 28
dict_sol1 = {
'The number of movies in the dataset': d,
'The number of ratings in the dataset': h,
'The number of different genres': i,
'The number of unique users in the dataset': a,
'The number missing ratings in the reviews dataset': f,
'The average rating given across all ratings': c,
'The minimum rating given across all ratings': f,
'The maximum rating given across all ratings': b
}
# Check your solution
t.q1_check(dict_sol1)
# -
# #### 2. Data Cleaning
#
# Next, we need to pull some additional relevant information out of the existing columns.
#
# For each of the datasets, there are a couple of cleaning steps we need to take care of:
#
# #### Movies
# * Pull the date from the title and create new column
# * Dummy the date column with 1's and 0's for each century of a movie (1800's, 1900's, and 2000's)
# * Dummy column the genre with 1's and 0's for each genre
#
# #### Reviews
# * Create a date out of time stamp
#
# You can check your results against the header of my solution by running the cell below with the **show_clean_dataframes** function.
# +
# pull date if it exists
create_date = lambda val: val[-5:-1] if val[-1] == ')' else np.nan
# apply the function to pull the date
movies['date'] = movies['movie'].apply(create_date)
# Return century of movie as a dummy column
def add_movie_year(val):
if val[:2] == yr:
return 1
else:
return 0
# Apply function
for yr in ['18', '19', '20']:
movies[str(yr) + "00's"] = movies['date'].apply(add_movie_year)
# +
# Function to split and return values for columns
def split_genres(val):
try:
if val.find(gene) >-1:
return 1
else:
return 0
except AttributeError:
return 0
# Apply function for each genre
for gene in genres:
movies[gene] = movies['genre'].apply(split_genres)
# -
movies.head() #Check what it looks like
# +
import datetime
change_timestamp = lambda val: datetime.datetime.fromtimestamp(int(val)).strftime('%Y-%m-%d %H:%M:%S')
reviews['date'] = reviews['timestamp'].apply(change_timestamp)
# -
# now reviews and movies are the final dataframes with the necessary columns
reviews.to_csv('./reviews_clean.csv')
movies.to_csv('./movies_clean.csv')
| lessons/Recommendations/1_Intro_to_Recommendations/.ipynb_checkpoints/1_Introduction to the Recommendation Data - Solution-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="0qesHlWDmzVS" colab_type="text"
# ## Imports
# + id="R2rm41sQmyig" colab_type="code" colab={}
import csv
import numpy as np
from random import randint
# + id="HmydZ6wJmYDT" colab_type="code" outputId="94a3151a-0844-4eb7-99a9-d210a634070d" colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="eOKBwJ0do-fs" colab_type="text"
# ## Utils
# + id="AnrOkra1qVUP" colab_type="code" colab={}
MIN_SCORE = 3
MAX_SCORE = 8
USERS_NO = 25
# + id="WNNayKd8o_lH" colab_type="code" colab={}
def getScore(name, queryWords):
numberOfOcurrences = 0
for queryWord in queryWords:
if (name.lower().find(queryWord) != -1):
numberOfOcurrences += randint(MIN_SCORE, MAX_SCORE)
return numberOfOcurrences
# + id="VSF44zFSvTvr" colab_type="code" colab={}
DEFAULT_VALUE = -1
def getSortedScores(namesScores):
counter = 1
namesScoresCopy = namesScores.copy()
sortedScores = namesScores.copy()
maximum = max(namesScoresCopy)
while (maximum != DEFAULT_VALUE):
index = namesScoresCopy.index(maximum)
namesScoresCopy[index] = DEFAULT_VALUE
sortedScores[index] = counter
counter += 1
maximum = max(namesScoresCopy)
return sortedScores
# + id="5AP3t3Y7wh-V" colab_type="code" outputId="944a5013-d993-46b0-bf7e-a3ecff04b280" colab={"base_uri": "https://localhost:8080/", "height": 51}
x = [0, 7, 7, 2]
print(getSortedScores(x))
print(x)
# + [markdown] id="Sioz5fTBm3F8" colab_type="text"
# ## Read dataset
# + id="rNJIrBfem672" colab_type="code" colab={}
ids = []
names = []
with open('/content/drive/My Drive/Colab Notebooks/MLRanking/dataset_1.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
ids.append(row[0].strip())
names.append(row[1].strip())
ids = ids[1:]
names = names[1:]
# + id="xN9MoNmCrtoj" colab_type="code" outputId="e1e01ee8-95f2-4070-a428-99fe06f765e7" colab={"base_uri": "https://localhost:8080/", "height": 54}
print(names)
# + [markdown] id="pB-6QUjtogeO" colab_type="text"
# ## Queries Keywords
# + id="oZX2fTH5opNA" colab_type="code" colab={}
queriesKeywords = [
["white", "blood", "cells", "count", "hemoglobin", "plasma", "leucocyte"],
["glucose", "blood", "sugar", "hemoglobin"],
["bilirubin", "plasma"]
]
# + id="Nf3Iy71Wo8FL" colab_type="code" outputId="ad88eab0-a1be-40a9-da06-57bf9c2b5604" colab={"base_uri": "https://localhost:8080/", "height": 34}
getScore("white", queriesKeywords[0])
# + [markdown] id="PefIHL6pqnAW" colab_type="text"
# ## Get N scores per query
# + id="SADV0Bo-qt0u" colab_type="code" colab={}
# The format in the dictionary would be "queryX-userY": score
user_query_scores = {}
query_index = 1
for query_keywords in queriesKeywords:
# print("Query " + str(query_index))
# print(query_keywords)
for user_index in range(1, USERS_NO + 1):
names_scores = []
for name in names:
name_score = getScore(name, query_keywords)
names_scores.append(name_score)
sorted_scores = getSortedScores(names_scores)
user_query_scores["query" + str(query_index) + "-user" + str(user_index)] = sorted_scores
# print("User " + str(user_index) + ": [" + ",".join(str(x) for x in names_scores) + "]")
# print("User " + str(user_index) + ": [" + ",".join(str(x) for x in sorted_scores) + "]")
query_index += 1
# + id="IUhUOKl2zjJL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="46f26a24-1b51-4c0a-a881-bf6e4c476d15"
user_query_scores["query1-user1"]
# + [markdown] id="2SShmAERz-kx" colab_type="text"
# ## Write data
# + id="IMInvz9z0OkT" colab_type="code" colab={}
output_file_name = "dataset_modified"
output_file_ext = ".csv"
output_file_path = "/content/drive/My Drive/Colab Notebooks/MLRanking/output/"
# + id="CPAhO-qp0PSM" colab_type="code" outputId="fb442486-7b92-48ec-ac75-4fbb6594b79c" colab={"base_uri": "https://localhost:8080/", "height": 1000}
for i in range(1, 4):
full_output_file_path = output_file_path + output_file_name + str(i) + output_file_ext
with open(full_output_file_path, "w") as csv_file:
csv_writer = csv.writer(csv_file)
# Per each row
for index in range(0, len(ids)):
row_dic = {'id': ids[index] }
# Add user score indeex
for key, value in user_query_scores.items():
if (key.startswith("query" + str(i))):
column_name = key[7:]
row_dic[column_name] = value[index]
print(row_dic)
csv_writer.writerow(row_dic.values())
# + [markdown] id="2YgZeFdZ5XBa" colab_type="text"
# ## Feature Extraction
# + [markdown] id="jTOPqZAe5j-q" colab_type="text"
# ### Cosine
# + id="E-E63FyS5ltR" colab_type="code" colab={}
import math
import re
from collections import Counter
WORD = re.compile(r"\w+")
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in list(vec1.keys())])
sum2 = sum([vec2[x] ** 2 for x in list(vec2.keys())])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
# + id="gPMdI76e5ony" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8bc4f1d9-6ad1-4631-8e8b-71a53ff27712"
vector1 = text_to_vector(names[0])
for name in names[1:]:
vector2 = text_to_vector(name)
cosine = get_cosine(vector1, vector2)
print(cosine)
# + [markdown] id="WOm5n1Sd_pcp" colab_type="text"
# ## Pairwise
# + id="ifAd2NvFAJwM" colab_type="code" colab={}
import itertools
import numpy as np
from sklearn import svm, linear_model
from sklearn.model_selection import train_test_split
# + id="MEeR-ewE__7l" colab_type="code" colab={}
def transform_pairwise(X, y):
"""Transforms data into pairs with balanced labels for ranking
Transforms a n-class ranking problem into a two-class classification
problem. Subclasses implementing particular strategies for choosing
pairs should override this method.
In this method, all pairs are choosen, except for those that have the
same target value. The output is an array of balanced classes, i.e.
there are the same number of -1 as +1
Parameters
----------
X : array, shape (n_samples, n_features)
The data
y : array, shape (n_samples,) or (n_samples, 2)
Target labels. If it's a 2D array, the second column represents
the grouping of samples, i.e., samples with different groups will
not be considered.
Returns
-------
X_trans : array, shape (k, n_feaures)
Data as pairs
y_trans : array, shape (k,)
Output class labels, where classes have values {-1, +1}
"""
X_new = []
y_new = []
y = np.asarray(y)
if y.ndim == 1:
y = np.c_[y, np.ones(y.shape[0])]
comb = itertools.combinations(range(X.shape[0]), 2)
for k, (i, j) in enumerate(comb):
if y[i, 0] == y[j, 0] or y[i, 1] != y[j, 1]:
# skip if same target or different group
continue
X_new.append(X[i] - X[j])
y_new.append(np.sign(y[i, 0] - y[j, 0]))
# output balanced classes
if y_new[-1] != (-1) ** k:
y_new[-1] = - y_new[-1]
X_new[-1] = - X_new[-1]
return np.asarray(X_new), np.asarray(y_new).ravel()
# + id="x8ze1XIm_7_3" colab_type="code" colab={}
class RankSVM(svm.LinearSVC):
"""Performs pairwise ranking with an underlying LinearSVC model
Input should be a n-class ranking problem, this object will convert it
into a two-class classification problem, a setting known as
`pairwise ranking`.
See object :ref:`svm.LinearSVC` for a full description of parameters.
"""
def fit(self, X, y):
"""
Fit a pairwise ranking model.
Parameters
----------
X : array, shape (n_samples, n_features)
y : array, shape (n_samples,) or (n_samples, 2)
Returns
-------
self
"""
X_trans, y_trans = transform_pairwise(X, y)
super(RankSVM, self).fit(X_trans, y_trans)
return self
def decision_function(self, X):
return np.dot(X, self.coef_.ravel())
def predict(self, X):
"""
Predict an ordering on X. For a list of n samples, this method
returns a list from 0 to n-1 with the relative order of the rows of X.
The item is given such that items ranked on top have are
predicted a higher ordering (i.e. 0 means is the last item
and n_samples would be the item ranked on top).
Parameters
----------
X : array, shape (n_samples, n_features)
Returns
-------
ord : array, shape (n_samples,)
Returns a list of integers representing the relative order of
the rows in X.
"""
if hasattr(self, 'coef_'):
return np.argsort(np.dot(X, self.coef_.ravel()))
else:
raise ValueError("Must call fit() prior to predict()")
def score(self, X, y):
"""
Because we transformed into a pairwise problem, chance level is at 0.5
"""
X_trans, y_trans = transform_pairwise(X, y)
return np.mean(super(RankSVM, self).predict(X_trans) == y_trans)
# + id="Gt6BOCe6PPpG" colab_type="code" colab={}
def get_features(query):
features = []
for name in names:
feature_vector = [get_cosine(text_to_vector(name), query)]
features.append(feature_vector)
return features
# + id="OBXVThbPAEEU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="f9c7b245-38bd-4b3b-ca02-eb09cbf3a7a1"
rank_svm = RankSVM()
# Train SVM
features = []
for name in names:
feature_vector = [get_cosine(text_to_vector(name), text_to_vector("Glucose in blood"))]
features.append(feature_vector)
X = np.array(features)
Y = np.array(user_query_scores['query1-user1'])
rank_svm.fit(X, Y)
# + id="yUNcP9fuMsEe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="407dca99-6f14-4e9e-a244-1f6b768ffe75"
## Test with query 1
query1 = text_to_vector("Glucose in blood")
result = rank_svm.predict(get_features(query1))
for index in result[:10]:
print(names[index])
# + id="q2BGVjIyM1rV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="55b89522-fc98-44c2-a131-e1c240115ebf"
## Test with query 2
query2 = text_to_vector("Bilirubin in plasma")
result = rank_svm.predict(get_features(query2))
for index in result[:10]:
print(names[index])
# + id="1UhXlMGWM2XR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="d3d02446-48c6-4a70-cd83-0237ae03a4f8"
## Test with query 3
query3 = text_to_vector("White blood cells count")
result = rank_svm.predict(get_features(query3))
for index in result[:10]:
print(names[index])
| src/MLRanking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
x=input()
print(x)
x=input("enter your name")
x
x=input("enter your name")
y=input("enter date of birth")
z=input("enter phone number")
x=input("first number")
y=input("second number")
z=int(x)+int(y)
print(z)
type(x)
x="india is my country"
x[0]
x[1]
x[2]
x[3]
x[5]
x[12]
x[-1]
x[1:5]
x[0]
x[4]
x[5]
x[0:5]
p=input("p")
r=input("r")
p1=int(p)+int(p)*int(n)*float(r)
n=input("n")
p1=int(p)+int(p)*int(n)*float(r)
print(p1)
| input fn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="https://maltem.com/wp-content/uploads/2020/04/LOGO_MALTEM.png" style="float: left; margin: 20px; height: 55px">
#
# <br>
# <br>
# <br>
# <br>
#
# # Introduction to Logistic Regression
#
# _Authors: <NAME>, <NAME>, <NAME>_
#
# ---
#
# ### Learning Objectives
# - Distinguish between regression and classification problems.
# - Understand how logistic regression is similar to and different from linear regression.
# - Fit, generate predictions from, and evaluate a logistic regression model in `sklearn`.
# - Understand how to interpret the coefficients of logistic regression.
# - Know the benefits of logistic regression as a classifier.
# <a id='introduction'></a>
#
# ## Introduction
#
# ---
#
# Logistic regression is a natural bridge to connect regression and classification.
# - Logistic regression is the most common binary classification algorithm.
# - Because it is a regression model, logistic regression will predict continuous values.
# - Logistic regression will predict continuous probabilities between 0 and 1.
# - Example: What is the probability that someone shows up to vote?
# - However, logistic regression almost always operates as a classification model.
# - Logistic regression will use these continuous predictions to classify something as 0 or 1.
# - Example: Based on the predicted probability, do we predict that someone votes?
#
# In this lecture, we'll only be reviewing the binary outcome case with two classes, but logistic regression can be generalized to predicting outcomes with 3 or more classes.
#
# **Some examples of when logistic regression could be used:**
# - Will a user will purchase a product, given characteristics like income, age, and number of family members?
# - Does this patient have a specific disease based on their symptoms?
# - Will a person default on their loan?
# - Is the iris flower in front of me an "*Iris versicolor*?"
# - Given one's GPA and the prestige of a college, will a student be admitted to a specific graduate program?
#
# And many more.
# +
# imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Import train_test_split.
from sklearn.model_selection import train_test_split
# Import logistic regression
from sklearn.linear_model import LogisticRegression
# -
# ## Graduate School Admissions
#
# ---
#
# Today, we'll be applying logistic regression to solve the following problem: "Given one's GPA, will a student be admitted to a specific graduate program?"
# Read in the data.
admissions = pd.read_csv('data/grad_admissions.csv')
# Check first five rows.
admissions.head()
admissions.shape
# The columns are:
# - `admit`: A binary 0/1 variable indicating whether or not a student was admitted, where 1 means admitted and 0 means not admitted.
# - `gre`: The student's [GRE (Graduate Record Exam)](https://en.wikipedia.org/wiki/Graduate_Record_Examinations) score.
# - `gpa`: The student's GPA.
# How many missing values do we have in each column?
admissions.info()
admissions.isnull().sum()
# Drop every row that has an NA.
admissions.dropna(inplace=True)
admissions.shape
# <details><summary>What assumption are we making when we drop rows that have at least one NA in it?</summary>
#
# - We assume that what we drop looks like what we have observed. That is, there's nothing special about the rows we happened to drop.
# - We might say that what we dropped is a random sample of our whole data.
# - It's not important to know this now, but the formal term is that our data is missing completely at random.
# </details>
# ## Recap of Notation
#
# You're quite familiar with **linear** regression:
#
# $$
# \begin{eqnarray*}
# \hat{\mathbf{y}} &=& \hat{\beta}_0 + \hat{\beta}_1x_1 + \hat{\beta}_2x_2 + \cdots + \hat{\beta}_px_p \\
# &=& \hat{\beta}_0 + \sum_{j=1}^p\hat{\beta}_jX_j
# \end{eqnarray*}
# $$
#
# Where:
# - $\hat{\mathbf{y}}$ is the predicted values of $\mathbf{y}$ based on all of the inputs $x_j$.
# - $x_1$, $x_2$, $\ldots$, $x_p$ are the predictors.
# - $\hat{\beta}_0$ is the estimated intercept.
# - $\hat{\beta}_j$ is the estimated coefficient for the predictor $x_j$, the $j$th column in variable matrix $X$.
#
# <a id='plot-reg'></a>
# ### What if we predicted `admit` with `gpa` using Linear Regression?
#
# Looking at the plot below, what are problems with using a regression?
# plot admissions vs. gpa and line of best fit
plt.figure(figsize = (12, 5))
sns.regplot(admissions['gpa'], admissions['admit'], admissions,
ci = False, scatter_kws = {'s': 2},
line_kws = {'color': 'orange'})
plt.ylim(-0.1, 1.1);
# <a id='pred-binary'></a>
#
# ## Predicting a Binary Class
#
# ---
#
# In our case we have two classes: `1=admitted` and `0=rejected`.
#
# The logistic regression is still solving for $\hat{y}$. However, in our binary classification case, $\hat{y}$ will be the probability of $y$ being one of the classes.
#
# $$
# \hat{y} = P(y = 1)
# $$
#
# We'll still try to fit a "line" of best fit to this... except it won't be perfectly linear. We need to *guarantee* that the right-hand side of the regression equation will evaluate to a probability. (That is, some number between 0 and 1!)
# ## The Logit Link Function (advanced)
#
# ---
#
# We will use something called a **link function** to effectively "bend" our line of best fit so that it is a curve of best fit that matches the range or set of values in which we're interested.
#
# For logistic regression, that specific link function that transforms ("bends") our line is known as the **logit** link.
#
# $$
# \text{logit}\left(P(y = 1)\right) = \beta_0 + \beta_1x_1 + \beta_2x_2 + \cdots + \beta_px_p
# $$
#
# $$
# \log\left(\frac{P(y = 1)}{1 - P(y = 1)}\right) = \beta_0 + \beta_1x_1 + \beta_2x_2 + \cdots + \beta_px_p
# $$
#
# Equivalently, we assume that each independent variable $x_i$ is linearly related to the **log of the odds of success**.
#
# Remember, the purpose of the link function is to bend our line of best fit.
# - This is convenient because we can have any values of $X$ inputs that we want, and we'll only ever predict between 0 and 1!
# - However, interpreting a one-unit change gets a little harder. (More on this later.)
# <img src="./images/logregmeme.png" style="height: 400px">
#
# [*image source*](https://twitter.com/ChelseaParlett/status/1279111984433127425?s=20)
# ## Fitting and making predictions with the logistic regression model.
#
# We can follow the same steps to build a logistic regression model that we follow to build a linear regression model.
#
# 1. Define X & y
# 2. Instantiate the model.
# 3. Fit the model.
# 4. Generate predictions.
# 5. Evaluate model.
admissions.head()
# +
# Step 1: Split into training & testing sets
X = admissions[['gpa']]
y = admissions['admit']
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state=50)
# +
# Step 2: Instantiate our model.
logreg = LogisticRegression()
# Step 3: Fit our model.
logreg.fit(X_train,y_train)
# -
logreg.intercept_
logreg.coef_
# There are two methods in `sklearn` to be aware of when using logistic regression:
# - `.predict()`
# - `.predict_proba()`
# Step 4 (part 1): Generate predicted values.
logreg.predict(X_test)[:10]
# Step 4 (part 2): Generate predicted probabilities.
np.round(logreg.predict_proba(X_test),3)
# <details><summary>How would you interpret the predict_proba() output?</summary>
#
# - This shows the probability of being rejected ($P(Y=0)$) and the probability of being admitted ($P(Y=1)$) for each observation in the testing dataset.
# - The first array, corresponds to the first testing observation.
# - The `.predict()` value for this observation is 0. This is because $P(Y=0) > P(Y=1)$.
# - The second array, corresponds to the second testing observation.
# - The `.predict()` value for this observation is 0. This is because $P(Y=0) > P(Y=1)$.
# </details>
# +
# Visualizing logistic regression probabilities.
plt.figure(figsize = (10, 5))
plt.scatter(X_test, y_test, s = 10);
plt.plot(X_test.sort_values('gpa'),
logreg.predict_proba(X_test.sort_values('gpa'))[:,1],
color = 'grey', alpha = 0.8, lw = 3)
plt.xlabel('GPA')
plt.ylabel('Admit')
plt.title('Predicting Admission from GPA');
# +
# Step 5: Evaluate model.
logreg.score(X_train,y_train)
# -
logreg.score(X_test,y_test)
# By default, the `.score()` method for classification models gives us the accuracy score.
#
# $$
# \begin{eqnarray*}
# \text{Accuracy} = \frac{\text{number of correct predictions}}{\text{number of total predictions}}
# \end{eqnarray*}
# $$
# <details><summary>Remind me: what does .score() tell me for a regression model?</summary>
#
# - The $R^2$ score.
# - Remember that $R^2$ is the proportion of variance in our $Y$ values that are explained by our model.
# </details>
# ### Using the log-odds —the natural logarithm of the odds.
#
# The combination of converting the "probability of success" to "odds of success," then taking the logarithm of that is called the **logit link function**.
#
# $$
# \text{logit}\big(P(y=1)\big) = \log\bigg(\frac{P(y=1)}{1-P(y=1)}\bigg) = \beta_0 + \beta_1x_1 + \beta_2x_2 + \cdots + \beta_px_p
# $$
#
# We've bent our line how we want... but how do we interpret our coefficients?
# ### Odds
#
# Probabilities and odds represent the same thing in different ways. The odds for probability **p** is defined as:
#
# $$
# \text{odds}(p) = \frac{p}{1-p}
# $$
#
# The odds of a probability is a measure of how many times as likely an event is to happen than it is to not happen.
#
# **Example**: Suppose I'm looking at the probability and odds of a specific horse, "Secretariat," winning a race.
#
# - When **`p = 0.5`**: **`odds = 1`**
# - The horse Secretariat is as likely to win as it is to lose.
# - When **`p = 0.75`**: **`odds = 3`**
# - The horse Secretariat is three times as likely to win as it is to lose.
# - When **`p = 0.40`**: **`odds = 0.666..`**
# - The horse Secretariat is two-thirds as likely to win as it is to lose.
# ## Interpreting a one-unit change in $x_i$.
#
# $$\log\bigg(\frac{P(y=1)}{1-P(y=1)}\bigg) = \beta_0 + \beta_1x_1 + \beta_2x_2 + \cdots + \beta_px_p$$
#
# Given this model, a one-unit change in $x_i$ implies a $\beta_i$ unit change in the log odds of success.
#
# **This is annoying**.
#
# We often convert log-odds back to "regular odds" when interpreting our coefficient... our mind understands odds better than the log of odds.
#
# **(BONUS)** So, let's get rid of the log on the left-hand side. Mathematically, we do this by "exponentiating" each side.
# $$
# \begin{eqnarray*}
# \log\bigg(\frac{P(y=1)}{1-P(y=1)}\bigg) &=& \beta_0 + \beta_1x_1 + \beta_2x_2 + \cdots + \beta_px_p \\
# \Rightarrow e^{\Bigg(\log\bigg(\frac{P(y=1)}{1-P(y=1)}\bigg)\Bigg)} &=& e^{\Bigg(\beta_0 + \beta_1x_1 + \beta_2x_2 + \cdots + \beta_px_p\Bigg)} \\
# \Rightarrow \frac{P(y=1)}{1-P(y=1)} &=& e^{\Bigg(\beta_0 + \beta_1x_1 + \beta_2x_2 + \cdots + \beta_px_p\Bigg)} \\
# \end{eqnarray*}
# $$
#
# **Interpretation**: A one-unit change in $x_i$ means that success is $e^{\beta_i}$ times as likely.
logreg.coef_
# <details><summary> I want to interpret the coefficient $\hat{\beta}_1$ for my logistic regression model. How would I interpret this coefficient?</summary>
#
# - Our model is that $\log\bigg(\frac{P(admit=1)}{1-P(admit=1)}\bigg) = \beta_0 + \beta_1\text{GPA}$.
# - As GPA increases by 1, the log-odds of someone being admitted increases by 4.92.
# - As GPA increases by 1, someone is $e^{4.92}$ times as likely to be admitted.
# - As GPA increases by 1, someone is about 137.06 times as likely to be admitted to grad school.
# </details>
#
# > Hint: Use the [np.exp](https://docs.scipy.org/doc/numpy/reference/generated/numpy.exp.html) function.
# +
#exponentiate the coefficient
np.exp(logreg.coef_)
# -
# ## Conclusion
#
# The goal of logistic regression is to find the best-fitting model to describe the relationship between a binary outcome and a set of independent variables.
#
# Logistic regression generates the coefficients of a formula to predict a logit transformation of the probability that the characteristic of interest is present.
# ## Interview Questions
# <details><summary>What is the difference between a classification and a regression problem?</summary>
#
# - A classification problem has a categorical $Y$ variable. A regression problem has a numeric $Y$ variable.
# </details>
# <details><summary>What are some of the benefits of logistic regression as a classifier?</summary>
#
# (Answers may vary; this is not an exhaustive list!)
# - Logistic regression is a classification algorithm that shares similar properties to linear regression.
# - The coefficients in a logistic regression model are interpretable. (They represent the change in log-odds caused by the input variables.)
# - Logistic regression is a very fast model to fit and generate predictions from.
# - It is by far the most common classification algorithm.
#
# **Note**: The original interview question was "If you're comparing decision trees and logistic regression, what are the pros and cons of each?"
# </details>
# ## (BONUS) Solving for the Beta Coefficients
#
# Logistic regression minimizes the "deviance," which is similar to the residual sum of squares in linear regression, but is a more general form.
#
# **There's no closed-form solution to the beta coefficients like in linear regression, and the betas are found through optimization procedures.**
# - We can't just do $\hat{\beta} = (X^TX)^{-1}X^Ty$ like we can in linear regression!
#
# The `solver` hyperparameter in sklearn's LogisticRegression class specifies which method should be used to solve for the optimal beta coefficients (the coefficients that minimize our cost function). A former DC DSI instructor <NAME> has a great blog post about which solver to choose [here](https://towardsdatascience.com/dont-sweat-the-solver-stuff-aea7cddc3451).
#
# If you're particularly interested in the math, here are two helpful resources:
# - [A good blog post](http://www.win-vector.com/blog/2011/09/the-simpler-derivation-of-logistic-regression/) on the logistic regression beta coefficient derivation.
# - [This paper](https://www.stat.cmu.edu/~cshalizi/402/lectures/14-logistic-regression/lecture-14.pdf) is also a good reference.
# ## (BONUS) The Logistic Function
#
# The inverse function of the logit is called the **logistic function**.
#
# By inverting the logit, we can have the right side of our regression equation solve explicitly for $P(y = 1)$:
#
# $$
# P(y=1) = logit^{-1}\left(\beta_0 + \sum_{j}^p\beta_jx_j\right)
# $$
#
# Where:
#
# $$
# logit^{-1}(a) = logistic(a) = \frac{e^{a}}{e^{a} + 1}
# $$
#
# Giving us:
#
# $$
# P(y=1) = \frac{e^{\left(\beta_0 + \beta_1x_1 + \beta_2x_2 + \cdots + \beta_px_p\right)}}{e^{\left(\beta_0 + \beta_1x_1 + \beta_2x_2 + \cdots + \beta_px_p\right)}+1}
# $$
| Notebook/Lesson-logistic-regression/starter-code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ray RLlib Multi-Armed Bandits - Exploration-Exploitation Strategies
#
# © 2019-2020, Anyscale. All Rights Reserved
#
# ![Anyscale Academy](../../images/AnyscaleAcademyLogo.png)
# What strategy should we follow for selecting actions that balance the exploration-exploitation tradeoff, yielding the maximum average reward over time? This is the core challenge of RL/bandit algorithms.
#
# This lesson has two goals, to give you an intuitive sense of what makes a good algorithm and to introduce several popular examples.
#
# > **Tip:** For the first time through this material, you may wish to focus on the first goal, developing an intuitive sense of the requirements for a good algorithm. Come back later to explore the details of the algorithms discussed.
#
# So, at least read through the first sections, stopping at _UCB in More Detail_ under _Upper Confidence Bound_.
# ## What Makes a Good Exploration-Exploitation Algorithm?
#
# Let's first assume we are considered only stationary bandits. The ideal algorithm achieves these properties:
#
# 1. It explores all the actions reasonably aggressively.
# 2. When exploring, it picks the action most likely to produce an optimal reward, rather than making random choices.
# 3. It converges quickly to the action that optimizes the mean reward.
# 4. It stops exploration once the optimal action is known and just exploits!
#
# For non-stationary and context bandits, the optimal action will likely change over time, so some exploration may always be needed.
# ## Popular Algorithms
#
# With these properties in mind, let's briefly discuss four algorithms. We'll use two of them in examples over several subsequent lessons.
# ### $\epsilon$-Greedy
#
# One possible strategy is quite simple, called $\epsilon$-Greedy, where $\epsilon$ is small number that determines how frequently exploration is done. The best-known action is exploited most of the time ("greedily"), governed by probability $1 - \epsilon$ (i.e., in percentage terms $100*(1 - \epsilon)$%). With probability $\epsilon$, an action is picked at random in the hopes of finding a new action that provides even better rewards.
#
# Typical values of $\epsilon$ are between 0.01 and 0.1. A larger value, like 0.1, explores more aggressively and finds the optimal policy more quickly, but afterwards the aggressive exploration strategy becomes a liability, as it only selects the optimal action ~90% of the time, continuing excessive exploration that is now counterproductive. In contrast, smaller values, like 0.01, are slower to find the optimal policy, but once found continue to select it ~99% of the time, so over time the mean reward is _higher_ for _smaller_ $\epsilon$ values, as the optimal action is selected more often.
#
# How does $\epsilon$-Greedy stack up against our desired properties?
#
# 1. The higher the $\epsilon$ value, the more quickly the action space is explored.
# 2. It randomly picks the next action, so there is no "intelligence" involved in optimizing the choice.
# 3. The higher the $\epsilon$ value, the more quickly the optimal action is found.
# 4. Just as this algorithm makes no attempt to optimize the choice of action during exploration, it makes no attempt to throttle back exploration when the optimal value is found.
#
# To address point 4, you could adopt an enhancement that decays the $\epsilon$ value over time, rather than keeping it fixed.
#
# See [Wikipedia - MAB Approximate Solutions](https://en.wikipedia.org/wiki/Multi-armed_bandit) and [Sutton 2018](https://mitpress.mit.edu/books/reinforcement-learning-second-edition) for more information.
# ### Upper Confidence Bound
#
# A limitation about $\epsilon$-greedy is that exploration is done indiscriminately. Is it possible to make a more informed choice about which alternative actions are more likely to yield a good result, so we preferentially pick one of them? That's what the Upper Confidence Bound (UCB) algorithm attempts to do. It weights some choices over others.
#
# It's worth looking at the formula that governs the choice for the next action at time $t$:
#
# $$A_t \doteq \frac{argmax}{a}\bigg[ Q_t(a) + c\sqrt{ \dfrac{\ln(t)}{N_t(a)} }\bigg]$$
#
# It's not essential to fully understand all the details, but here is the gist of it; the best action to take at time $t$, $A_t$, is decided by picking the best known action for returning the highest value (the $Q_t(a)$ term in the brackets [...] computes this), but with a correction that encourages exploration, especially for smaller $t$, but penalizing particular actions $a$ if we've already picked them a lot previously (the second term starting with a constant $c$ that governs the "strength" of this correction).
#
# UCB is one of the best performing algorithms [Sutton 2018](https://mitpress.mit.edu/books/reinforcement-learning-second-edition). How does it stack up against our desired properties?
#
# 1. Exploration is reasonably quick, governed by the $c$ hyperparameter for the "correction term".
# 2. It attempts to pick a good action when exploring, rather than randomly.
# 3. Finding the optimal action occurs efficiently, governed by the constant $c$.
# 4. The $ln(t)$ factor in the correction term grows more slowly over time relative to the counts $N_t(a)$, so exploration occurs less frequently at longer time scales.
#
# Because UCB is based on prior measured results, it is an example of a _Frequentist_ approach that is _model free_, meaning we just measure outcomes, we don't build a model to explain the environment.
# #### UCB in More Detail
#
# Let's explain the equation in more detail. If you are just interested in developing an intuition about strategies, this is a good place to stop and go to the next lesson, [Simple Multi-Armed-Bandit](03-Simple-Multi-Armed-Bandit.ipynb).
#
# * $A_t$ is the action we want to select at time $t$, the action that is most likely to produce the best reward or most likely to be worth exploring.
# * For all the actions we can choose from, we pick the action $a$ that maximizes the formula in the brackets [...].
# * $Q_t(a)$ is any equation we're using to measure the "value" received at time $t$ for action $a$. This is the greedy choice, i.e., the equation that tells us which action $a$ we currently know will give us the highest value. If we never wanted to explore, the second term in the brackets wouldn't exist. $Q_t(a)$ alone would always tell us to pick the best action we already know about. (The use of $Q$ comes from an early RL algorithm called _Q learning_ that models the _value_ returned from actions over time.)
# * The second term in the brackets is the correction that UCB gives us. As time $t$ increases, the natural log of $t$ also increases, but slower and slower for larger $t$. This is good because we hope we will find the optimal action at some earlier time $t$, so exploration at large $t$ is less useful (as long as the bandit is stationary or slowly changing). However, the denominator, $N_t(a)$ is the number of times we've selected $a$ already. The more times we've already tried $a$, the less "interesting" it is to try again, so this term penalizes choosing $a$. Finally, $c$ is a constant, a "knob" or _hyperparameter_ that determines how much we weight exploration vs. exploitation.
#
#
# When we use UCB in subsequent lessons, we'll use a simple _linear_ equation for $Q_t(a)$, i.e., something of the form $z = ax + by + c$.
#
# See [Wikipedia - MAB Approximate solutions for contextual bandit](https://en.wikipedia.org/wiki/Multi-armed_bandit), [these references](../06-RL-References.ipynb#Upper-Confidence-Bound), and the [RLlib documentation](https://docs.ray.io/en/latest/rllib-algorithms.html?highlight=greedy#linear-upper-confidence-bound-contrib-linucb) for more information.
# ### Thompson Sampling
#
# Thompson sampling, developed in the 30s, is similar to UCB in that it picks the action that is believed to have the highest potential of maximum reward. It is a _Bayesian, model-based_ approach, where the model is the posterior distribution and may incorporate prior belief about the environment.
#
# The agent samples weights for each action, using their posterior distributions, and chooses the action that produces the highest reward. Calculating the exact posterior is intractable in most cases, so they are usually approximated. Hence, the algorithm models beliefs about the problem. Then, during each iteration, the agent initializes with a random belief acts acts optimally based on it.
#
# One trade-off is that Thompson Sampling requires an accurate model of the past policy and may suffer from large variance when the past policy differs significantly from a policy being evaluated. You may observe this if you rerun experiments in subsequent lessons that use Thompson Sampling. The graphs of rewards and especially the ranges from high to low, may change significantly from run to run.
#
# Relatively speaking, the Thompson Sampling exploration strategies are newer than UCB and tend to perform better (as we'll see in subsequent lessons), although the math for their theoretical performance is less rigorous than for UCB.
#
# For more information, see [Wikipedia](https://en.wikipedia.org/wiki/Thompson_sampling), [A Tutorial on Thompson Sampling](https://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf), [RLlib documentation](https://docs.ray.io/en/latest/rllib-algorithms.html?highlight=greedy#linear-thompson-sampling-contrib-lints), and other references in [RL References](../References-Reinforcement-Learning.ipynb).
# ### Gradient Bandit Algorithms
#
# Focusing explicitly on rewards isn't the only approach. What if we use a more general measure, a _preference_, for selecting an action $a$ at time $t$? We'll use $H_t(a)$ to represent this preference at time $t$ for action $a$. We need to model this so we have a probability of selecting an action $a$. Using the _soft-max distribution_ works, also known as the Gibbs or Boltzmann distribution:
#
# $Pr\{A_t = a\} \doteq \frac{e^{H_t(a)}}{\sum^{k}_{b=1}e^{H_t(b)}} \doteq \pi_t(a)$
#
# $\pi_t(a)$ is defined to encapsulate this formula for the probability of taking action $a$ at time $t$.
#
# The term _gradient_ is used for this algorithm because the training update formula for $H_t(a)$ is very similar to the _stochastic gradient descent_ formula used in other ML problems.
#
# After an action $A_t$ is selected at a time $t$ and reward $R_t$ is received, the action preferences are updated as follows:
#
# $ H_{t+1}(A_t) \doteq H_t(A_t) + \alpha(R_t - \overset{\_}{R_t})(1 - \pi_t(A_t))$, and
#
# $ H_{t+1}(a) \doteq H_t(a) - \alpha(R_t - \overset{\_}{R_t})(\pi_t(a))$, for all $a \ne A_t$
#
# where $H_0(a)$ values are initialized to zero, $\alpha > 0$ is a step size parameter and $\overset{\_}{R_t}$ is the average of all the rewards up through and including time $t$. Note that if $R_t - \overset{\_}{R_t}$ is positive, meaning the current reward is larger than the average, the preference $H(A_t)$ increases. Otherwise, it decreases.
#
# Note the plus vs. minus signs in the two equations before the $\alpha$ term. If our preference for $A_t$ increases, our preferences for the other actions should decrease.
#
# How does Thompson Sampling satisfy our desired properties?
#
# 1. As shown, this algorithm doesn't have tuning parameters to control the rate of exploration or convergence to the optimal solution. However, the convergence is reasonably quick if the variance in reward values is relatively high, so that the difference $R_t - \overset{\_}{R_t}$ is also relatively large for low $t$ values.
# 2. It attempts to pick a good action when exploring, rather than randomly.
# 3. See 1.
# 4. As $\overset{\_}{R_t}$ converges to a maximum, the difference $R_t - \overset{\_}{R_t}$ and hence all the preference values $H_t(a)$ will become relatively stationary, with the optimal action having the highest $H$. Since the $H_t(a)$ values govern the probability of being selected, based on the _soft-max distribution_, if the optimal action has a significantly higher $H_t(a)$ than the other actions, it will be chosen most frequently. If the differences between $H_t(a)$ values are not large, then several will be chosen frequently, but that also means their rewards are relatively close. Hence, in either case, the average reward over time will still be close to optimal.
#
# There are many more details about Thompson Sampling, but we won't discuss them further here. See [Sutton 2018](https://mitpress.mit.edu/books/reinforcement-learning-second-edition) for the details.
| ray-rllib/multi-armed-bandits/02-Exploration-vs-Exploitation-Strategies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise 3.2 Learning From Data
# + pycharm={"is_executing": true}
from matplotlib import pyplot as plt
import numpy as np
import random
from random import seed
# -
thk = 5
rad = 10
s = np.linspace(0.2, 5, 25)
misrepresented_list_count = []
# +
for sep in s:
xs_red = []
ys_red = []
for x_coord in np.arange(-(rad + thk), rad + thk, 0.6):
for y_coord in np.arange(0, rad + thk, 0.6):
if rad ** 2 <= (x_coord - 0) ** 2 + (y_coord - 0) ** 2 <= (rad + thk) ** 2:
xs_red.append(x_coord)
ys_red.append(y_coord)
xs_blue = []
ys_blue = []
for x_coord in np.arange(-(thk / 2), (thk / 2 + (2 * rad) + thk), 0.6):
for y_coord in np.arange(-sep, -(rad + +sep + thk), -0.6):
if rad ** 2 <= (x_coord - ((thk / 2) + rad)) ** 2 + (y_coord - (-sep)) ** 2 <= (rad + thk) ** 2:
xs_blue.append(x_coord)
ys_blue.append(y_coord)
"""
A function for prediction of Y
"""
def Y_predict(x_vector, w):
x_new = [1]
for i in x_vector:
x_new.append(i)
x_new = np.array((x_new))
res = (np.dot(x_new, w))
if res > 0:
Y = 1
return Y
elif res < 0:
Y = -1
return Y
elif res == 0:
Y = 0
return Y
count = 0
"""
The main training function for the data, with the
Attributes
----------
X - The data set
iterations - the number of times the weights are iterated
eta - the learning rate
"""
misrepresented_list = []
def train(X, iterations, eta):
global count
global w
global all_combined_targets
for y_idx in range(len(X)):
ran_num = random.randint(0, len(X) - 1)
x_train = X[ran_num]
y_t = Y_predict(x_train, w)
misrepresented_list = []
for i, j in enumerate(all_combined_targets):
if j != y_t:
misrepresented_list.append(i)
if len(misrepresented_list) == 0:
print('Full accuracy achieved')
break
random_selection = random.randint(0, len(misrepresented_list) - 1)
random_index = misrepresented_list[random_selection]
x_selected = X[random_index]
y_selected = all_combined_targets[random_index]
x_with1 = [1]
for i in x_selected:
x_with1.append(i)
x_with1 = np.array((x_with1))
s_t = np.matmul(w, x_with1)
if (y_selected * s_t) <= 1:
w = w + (eta * (y_selected - s_t) * x_with1)
count += 1
if (count == iterations):
break
xs_red = np.array(xs_red)
ys_red = np.array(ys_red)
xs_blue = np.array(xs_blue)
ys_blue = np.array(ys_blue)
points_1 = []
res1 = []
for i in range(len(xs_red)):
points_1.append([xs_red[i], ys_red[i]])
res1.append(-1)
points_1 = np.array(points_1)
points_2 = []
res2 = []
for i in range(len(xs_blue)):
points_2.append([xs_blue[i], ys_blue[i]])
res2.append(1)
points_2 = np.array(points_2)
all_input = np.concatenate((points_1, points_2)) # creating a combined dataset
all_d = np.concatenate((res2, res1))
# Visualizing the linearly separable dataset
length_dataset = len(xs_red)
d1 = -1 * (np.ones(int(length_dataset / 2)))
d2 = np.ones(int(length_dataset / 2))
all_combined_targets = np.concatenate((d2, d1))
# initializing all parameters
count = 0
w0, w1, w2 = 0, 0, 0
w = np.array((w0, w1, w2))
weight = 0
iterations = 100
eta = 0.01
# calling the function
train(all_input, iterations, eta)
iter_list.append(count)
plt.plot(iter_list)
plt.show()
| .ipynb_checkpoints/Exercise_3_2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="JVca8a6AQ_SI" colab_type="code" colab={}
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import LSTM, Dense, Bidirectional, BatchNormalization, Dropout
from keras import optimizers
from sklearn.model_selection import train_test_split
from keras.models import load_model
# + id="imlEXSOcT4vK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 411} outputId="027e7842-3449-478d-c12c-2f883bc8ba25"
# load model
model = load_model('weights-improvement-29-0.98.h5',compile=False)
# compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit model
# summarize model.
model.summary()
# + id="uzNMlLP0WbG_" colab_type="code" colab={}
import pickle
with open("mapping.pickle", "rb") as f:
mapping = pickle.load(f)
# + id="OQoX4YshUxQg" colab_type="code" colab={}
testing_filename="equations1.csv"
# + id="xr6mtAdkVO2-" colab_type="code" colab={}
def get_test_data(filename):
testing = pd.read_csv(filename)
test_eqs=testing.math[:]
test_sequences = list()
for eq in test_eqs:
# integer encode line
encoded_seq = [mapping[char] for char in eq]
# store
test_sequences.append(encoded_seq)
test_padded = pad_sequences(test_sequences, padding='post',value=mapping["?"],maxlen=50)
test_sequences = [to_categorical(x, num_classes=128) for x in test_padded]
X_predict = np.array(test_sequences)
return X_predict,test_eqs
# + id="xH4eeJ6EVlLC" colab_type="code" colab={}
X_predict,test_eqs=get_test_data(testing_filename)
# + id="uSaQEqwDVw_J" colab_type="code" colab={}
y_pred=model.predict_classes(X_predict)
# + id="_kU_THHbWxtv" colab_type="code" colab={}
class_names=["Unknown","Simple","Linear Equation for degree one","Fraction"]
# + id="CsXCggSqWrd3" colab_type="code" colab={}
y_pred_class=[class_names[3-y] for y in y_pred]
# + id="l1JCxTXUWtVo" colab_type="code" colab={}
df = pd.DataFrame(list(zip(list(test_eqs), y_pred_class)),
columns =['Math', 'Class'])
# + id="Yfj9_B7FW8lW" colab_type="code" colab={}
export_csv = df.to_csv (r'prediction.csv', index = None, header=True)
# + id="SvCYtMdJXF9N" colab_type="code" colab={}
| prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# Encode MNIST images as compressed vectors that can later be decoded back into
# images.
using Flux, Flux.Data.MNIST
using Flux: @epochs, onehotbatch, mse, throttle
using Base.Iterators: partition
using Parameters: @with_kw
using CUDAapi
if has_cuda()
@info "CUDA is on"
import CuArrays
CuArrays.allowscalar(false)
end
@with_kw mutable struct Args
lr::Float64 = 1e-3 # Learning rate
epochs::Int = 10 # Number of epochs
N::Int = 32 # Size of the encoding
batchsize::Int = 1000 # Batch size for training
sample_len::Int = 20 # Number of random digits in the sample image
throttle::Int = 5 # Throttle timeout
end
function get_processed_data(args)
# Loading Images
imgs = MNIST.images()
#Converting image of type RGB to float
imgs = channelview.(imgs)
# Partition into batches of size 1000
train_data = [float(hcat(vec.(imgs)...)) for imgs in partition(imgs, args.batchsize)]
train_data = gpu.(train_data)
return train_data
end
function train(; kws...)
args = Args(; kws...)
train_data = get_processed_data(args)
@info("Constructing model......")
# You can try to make the encoder/decoder network larger
# Also, the output of encoder is a coding of the given input.
# In this case, the input dimension is 28^2 and the output dimension of
# encoder is 32. This implies that the coding is a compressed representation.
# We can make lossy compression via this `encoder`.
encoder = Dense(28^2, args.N, leakyrelu) |> gpu
decoder = Dense(args.N, 28^2, leakyrelu) |> gpu
# Defining main model as a Chain of encoder and decoder models
m = Chain(encoder, decoder)
@info("Training model.....")
loss(x) = mse(m(x), x)
## Training
evalcb = throttle(() -> @show(loss(train_data[1])), args.throttle)
opt = ADAM(args.lr)
@epochs args.epochs Flux.train!(loss, params(m), zip(train_data), opt, cb = evalcb)
return m, args
end
# +
using Images
img(x::Vector) = Gray.(reshape(clamp.(x, 0, 1), 28, 28))
function sample(m, args)
imgs = MNIST.images()
#Converting image of type RGB to float
imgs = channelview.(imgs)
# `args.sample_len` random digits
before = [imgs[i] for i in rand(1:length(imgs), args.sample_len)]
# Before and after images
after = img.(map(x -> cpu(m)(float(vec(x))), before))
# Stack them all together
hcat(vcat.(before, after)...)
end
# -
cd(@__DIR__)
m, args= train()
# Sample output
@info("Saving image sample as sample_ae.png")
save("test_flux_autoencoder.png", sample(m, args))
# +
img(x::Vector) = Gray.(reshape(clamp.(x, 0, 1), 28, 28))
function sample_encoder(m, args)
imgs = MNIST.images()
#Converting image of type RGB to float
imgs = channelview.(imgs)
# `args.sample_len` random digits
before = [imgs[i] for i in rand(1:length(imgs), args.sample_len)]
# Before and after images
after = img.(map(x -> cpu(m(1))(float(vec(x))), before))
# Stack them all together
hcat(vcat.(before, after)...)
end
| flux/test_flux_autoencoder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: local-venv
# language: python
# name: local-venv
# ---
# # Plots for documentation
import numpy as np
import matplotlib.pyplot as plt
from manual_tuning_helper import load_silvia_response, load_silvia_responses
# +
dataset_filenames = [
"silvia_response_data_kp40ki0.02kd500_v61.json",
"silvia_response_data_kp40ki0.02kd500_v62.json",
]
datasets = load_silvia_responses(dataset_filenames, time=[0, 5000])
fig, [ax_u, ax_T] = plt.subplots(nrows=2, ncols=1, figsize=(14, 10))
for dataset in datasets:
label = "Kp:{0}, Ki:{1}, Kd:{2}".format(dataset["K"][0], dataset["K"][1], dataset["K"][2])
ax_u.plot(dataset["t"], dataset["u"], label=label)
ax_T.plot(dataset["t"], dataset["T"], label=label)
ax_T.plot(ax_T.get_xlim(), [95, 95], '--', c='k')
# ax_u.set_xlabel('time [s]')
ax_u.set_ylabel('duty [%]')
ax_u.set_title('Controller output')
ax_T.set_xlabel('time [s]')
ax_T.set_ylabel('temperature [C]')
ax_T.set_title('Temperature response')
ax_u.legend()
ax_u.set_xlim(0, 1500)
ax_T.set_xlim(0, 1500)
plt.savefig("setup_tuning_final.png")
# -
# ## Proportional gain
# +
setpoint = 85 # degC
dataset_filenames = [
"silvia_response_data_kp10ki0kd0_v01.json",
"silvia_response_data_kp25ki0kd0_v01.json",
# "silvia_response_data_kp40ki0kd0_v01.json",
"silvia_response_data_kp50ki0kd0_v01.json",
# "silvia_response_data_kp100ki0kd0_v01.json",
"silvia_response_data_kp150ki0kd0_v01.json",
# "silvia_response_data_kp250ki0kd0_v01.json",
# "silvia_response_data_kp500ki0kd0_v01.json"
]
datasets = load_silvia_responses(dataset_filenames, time=[0, 1000])
fig, [ax_u, ax_T] = plt.subplots(nrows=2, ncols=1, figsize=(14, 10))
for dataset in datasets:
label = "Kp:{0}, Ki:{1}, Kd:{2}".format(dataset["K"][0], dataset["K"][1], dataset["K"][2])
ax_u.plot(dataset["t"], dataset["u"], label=label)
ax_T.plot(dataset["t"], dataset["T"], label=label)
ax_T.plot(ax_T.get_xlim(), [setpoint, setpoint], '--', c='k')
# ax_u.set_xlabel('time [s]')
ax_u.set_ylabel('duty [%]')
ax_u.set_title('Controller output')
ax_T.set_xlabel('time [s]')
ax_T.set_ylabel('temperature [C]')
ax_T.set_title('Temperature response')
ax_u.legend()
ax_u.set_xlim(0, 1000)
ax_T.set_xlim(0, 1000)
plt.savefig("setup_tuning_proportional.png")
# -
# ## Derivative
# +
setpoint = 85 # degC
dataset_filenames = [
"silvia_response_data_kp25ki0kd0_v01.json",
"silvia_response_data_kp25ki0kd100_v01.json",
"silvia_response_data_kp25ki0kd1000_v01.json",
]
datasets = load_silvia_responses(dataset_filenames, time=[0, 1000])
fig, [ax_u, ax_T] = plt.subplots(nrows=2, ncols=1, figsize=(14, 10))
for dataset in datasets:
label = "Kp:{0}, Ki:{1}, Kd:{2}".format(dataset["K"][0], dataset["K"][1], dataset["K"][2])
ax_u.plot(dataset["t"], dataset["u"], label=label)
ax_T.plot(dataset["t"], dataset["T"], label=label)
ax_T.plot(ax_T.get_xlim(), [setpoint, setpoint], '--', c='k')
# ax_u.set_xlabel('time [s]')
ax_u.set_ylabel('duty [%]')
ax_u.set_title('Controller output')
ax_T.set_xlabel('time [s]')
ax_T.set_ylabel('temperature [C]')
ax_T.set_title('Temperature response')
ax_u.legend()
ax_u.set_xlim(0, 1000)
ax_T.set_xlim(0, 1000)
plt.savefig("setup_tuning_derivative.png")
# -
# ## Integral
# +
setpoint = 85 # degC
dataset_filenames = [
"silvia_response_data_kp15ki0kd50_v41.json",
# "silvia_response_data_kp15ki0.01kd50_v41.json",
"silvia_response_data_kp15ki0.02kd50_v41.json",
"silvia_response_data_kp15ki0.05kd50_v41.json",
"silvia_response_data_kp15ki0.1kd50_v41.json",
"silvia_response_data_kp15ki4.5kd50_v41.json",
]
datasets = load_silvia_responses(dataset_filenames, time=[0, 1000])
fig, [ax_u, ax_T] = plt.subplots(nrows=2, ncols=1, figsize=(14, 10))
for dataset in datasets:
label = "Kp:{0}, Ki:{1}, Kd:{2}".format(dataset["K"][0], dataset["K"][1], dataset["K"][2])
ax_u.plot(dataset["t"], dataset["u"], label=label)
ax_T.plot(dataset["t"], dataset["T"], label=label)
ax_T.plot(ax_T.get_xlim(), [setpoint, setpoint], '--', c='k')
# ax_u.set_xlabel('time [s]')
ax_u.set_ylabel('duty [%]')
ax_u.set_title('Controller output')
ax_T.set_xlabel('time [s]')
ax_T.set_ylabel('temperature [C]')
ax_T.set_title('Temperature response')
ax_u.legend()
ax_u.set_xlim(0, 1000)
ax_T.set_xlim(0, 1000)
plt.savefig("setup_tuning_integral.png")
# -
# ## Brewing
# +
setpoint = 95 # degC
dataset_filenames = [
"silvia_response_data_kp15ki0.01kd50_v51.json",
]
datasets = load_silvia_responses(dataset_filenames, time=[0, 5000])
fig, [ax_u, ax_T] = plt.subplots(nrows=2, ncols=1, figsize=(14, 10))
for dataset in datasets:
label = "Kp:{0}, Ki:{1}, Kd:{2}".format(dataset["K"][0], dataset["K"][1], dataset["K"][2])
ax_u.plot(dataset["t"], dataset["u"], label=label)
ax_T.plot(dataset["t"], dataset["T"], label=label)
ax_T.plot(ax_T.get_xlim(), [setpoint, setpoint], '--', c='k')
# ax_u.set_xlabel('time [s]')
ax_u.set_ylabel('duty [%]')
ax_u.set_title('Controller output')
ax_T.set_xlabel('time [s]')
ax_T.set_ylabel('temperature [C]')
ax_T.set_title('Temperature response')
ax_u.legend()
ax_u.set_xlim(0, 2500)
ax_T.set_xlim(0, 2500)
plt.savefig("setup_tuning_brew.png")
# -
# ## Fine tuning
# +
setpoint = 95 # degC
dataset_filenames = [
# "silvia_response_data_kp15ki0.01kd50_v51.json",
# "silvia_response_data_kp25ki0.008kd100_v51.json",
# "silvia_response_data_kp25ki0.02kd100_v51.json",
# "silvia_response_data_kp30ki0.02kd150_v51.json",
# "silvia_response_data_kp35ki0.1kd100_v51.json",
# "silvia_response_data_kp20ki0.01kd100_v61.json",
# "silvia_response_data_kp30ki0.01kd200_v61.json",
# "silvia_response_data_kp30ki0.015kd300_v61.json",
# "silvia_response_data_kp30ki0.015kd500_v61.json",
"silvia_response_data_kp25ki0.02kd500_v61.json",
"silvia_response_data_kp30ki0.01kd500_v61.json",
"silvia_response_data_kp50ki0.01kd500_v61.json",
"silvia_response_data_kp30ki0.01kd400_v61.json",
"silvia_response_data_kp30ki0.02kd500_v61.json",
"silvia_response_data_kp40ki0.02kd500_v61.json",
]
datasets = load_silvia_responses(dataset_filenames, time=[0, 5000])
fig, ax_T = plt.subplots(figsize=(14, 10))
for dataset in datasets:
label = "Kp:{0}, Ki:{1}, Kd:{2}".format(dataset["K"][0], dataset["K"][1], dataset["K"][2])
ax_T.plot(dataset["t"], dataset["T"], label=label)
ax_T.plot(ax_T.get_xlim(), [setpoint, setpoint], '--', c='k')
ax_T.set_xlabel('time [s]')
ax_T.set_ylabel('temperature [C]')
ax_T.set_title('Temperature response')
ax_T.legend()
ax_T.set_xlim(0, 1500)
ax_T.set_ylim(50, 110)
plt.savefig("setup_tuning_fine.png")
| docs/docs/control/assets/doc_plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regresión Lineal con Python
#
# Su vecina es una agente de bienes raíces y quiere ayuda para predecir los precios de las viviendas en las regiones de EE. UU. Sería genial si de alguna manera pudieras crear un modelo para ella que le permitiera poner algunas características de una casa y devolver un estimado del precio en la que la casa se vendería.
#
# Ella le ha preguntado si podría ayudarla con sus nuevas habilidades de ciencia de datos. ¡Usted dice que sí y decide que la Regresión lineal podría ser un buen camino para resolver este problema!
#
# Luego, su vecino le brinda información sobre un grupo de casas en regiones de los Estados Unidos, todo está en el conjunto de datos: USA_Housing.csv.
#
# Los datos contienen las siguientes columnas:
#
# * 'Avg. Area Income': Prom de ingresos de los residentes de la ciudad donde la casa está ubicada.
# * 'Avg. Area House Age': Promedio de edad de las casas en la misma ciudad
# * 'Avg. Area Number of Rooms': Promedio del Número de ambientes de las casas en la misma ciudad
# * 'Avg. Area Number of Bedrooms': Promedio del número de dormitorios para las casas en la misma ciudad
# * 'Area Population': Población de la ciudad en la que la casa esta ubicada
# * 'Price': Precio en la que la casa se vendió
# * 'Address': Dirección de la casa
# **¡Empecemos!**
# ## Revisemos los datos
# Hemos podido obtener algunos datos de los precios de vivienda como un conjunto de csv, ¡preparemos nuestro entorno con las bibliotecas que necesitaremos y luego importemos los datos!
# ### Importación de librerias
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# ### Revisemos los datos
USAhousing = pd.read_csv('USA_Housing.csv')
USAhousing.head(10)
USAhousing.tail()
USAhousing.info()
USAhousing.describe()
USAhousing.columns
# # Análisis de Datos Exploratorio
#
# ¡Creamos algunos gráficos simples para verificar los datos!
sns.pairplot(USAhousing)
sns.distplot(USAhousing['Price'])
sns.heatmap(USAhousing.corr())
# ## Entrenando el Modelo de Regresión Lineal
#
# ¡Comencemos ahora por entrenar el modelo de regresión! Tendremos que dividir primero nuestros datos en una matriz X que contenga las características para entrenar, y un arreglo y con la variable objetivo, en este caso la columna Precio. Descartamos la columna 'Address' porque solo tiene información de texto que el modelo de regresión lineal no puede usar.
#
# ### arreglos x e y
X = USAhousing[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms',
'Avg. Area Number of Bedrooms', 'Area Population']]
y = USAhousing['Price']
# ## Dividir datos de entrenamiento y prueba
#
# Ahora dividamos los datos en un conjunto de entrenamiento y un conjunto de prueba. Formaremos un modelo en el conjunto de entrenamiento y luego usaremos el conjunto de prueba para evaluar el modelo.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)
# ## Crear y Entrenar el Modelo
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X_train,y_train)
# ## Evaluación del Modelo
#
# Evaluemos el modelo comprobando sus coeficientes y cómo podemos interpretarlos.
# imprime el interceptor
print(lm.intercept_)
coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Coeficiente'])
coeff_df
# Interpretación de los coeficientes:
#
# - Manteniendo las otras características fijas, un incremento de 1 unidad en **Avg. Area Income** está asociado con un **incremento de \$21.52 **.
# - Manteniendo las otras características fijas, un incremento de 1 unidad en **Avg. Area House Age** está asociado con un **incremento de \$164883.28 **.
# - Manteniendo las otras características fijas, un incremento de 1 unidad en **Avg. Area Number of Rooms** está asociado con un **incremento de \$122368.67 **.
# - Manteniendo las otras características fijas, un incremento de 1 unidad en **Avg. Area Number of Bedrooms** está asociado con un **incremento de \$2233.80 **.
# - Manteniendo las otras características fijas, un incremento de 1 unidad en **Area Population** está asociado con un **incremento de \$15.15 **.
#
# ¿Esto tiene sentido? Probablemente no porque se inventó esta información. Si quieres datos reales para repetir este tipo de análisis, revise el dataset [boston](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html):
#
#
# from sklearn.datasets import load_boston
# boston = load_boston()
# print(boston.DESCR)
# boston_df = boston.data
# ## Predicciones de nuestro modelo
#
# ¡Aprovechemos las predicciones de nuestro conjunto de pruebas y veamos qué tan bien lo hizó!
X_test.head()
predictions = lm.predict(X_test)
predictions
plt.scatter(y_test,predictions)
# **Histograma residual**
sns.distplot((y_test-predictions),bins=50);
# ## Metricas de Evaluación de Regresión
#
#
# Aquí hay tres métricas de evaluación comunes para los problemas de regresión:
#
# **Mean Absolute Error** (MAE) es la media del valor absoluto de los errores:
#
# $$\frac 1n\sum_{i=1}^n|y_i-\hat{y}_i|$$
#
# **Mean Squared Error** (MSE) es la media de los errores al cuadrado:
#
# $$\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2$$
#
# **Root Mean Squared Error** (RMSE) es la raíz cuadrada de la media de los errores al cuadrado:
#
# $$\sqrt{\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2}$$
#
# Comparing these metrics:
#
# - **MAE** es el más fácil de entender, porque es el error promedio.
# - **MSE** es más popular que MAE, porque MSE "castiga" los errores más grandes, lo que tiende a ser útil en el mundo real.
# - **RMSE** es aún más popular que MSE, porque RMSE es interpretable en las unidades de "y".
#
# Todas estas son **funciones de pérdida**, y las queremos minimizar.
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, predictions))
print('MSE:', metrics.mean_squared_error(y_test, predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))
# Explora el Dataset Boston mencionado anteriormente.
#
# ¡A continuación resuelve el ejercicio propuesto!
#
# ## ¡Muy bien!
| 02RegresionLineal/01RegresionLinealConPython.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
using Plots, LaTeXStrings
using STMO
# ![](Figures/logo.png)
#
# # Motivation
#
# Up to now, we confidently assumed that we would always be able to compute the derivative or gradient of any function. Despite differentiation being a relatively easy operation, it is frequenty not feasible (or desirable) to compute this by hand. *Numerical differentiation* can provide approximations of th derivate or gradient at a particular point. *Automatic differentiation* directly manipulates the computational graph to generate a function that computes the (exact) derivate. Such methods have advanced greatly in the last years and it is no exageration that their easy use in popular software libraries such as TenserFlow and PyTorch are a cornerstone of deep learning and other machine learning and scientific computing fields.
#
# # Definition of a derivative
using Plots, BenchmarkTools
using STMO
# # Definition of a derivative
#
# $$
# \frac{\text{d}f(x)}{\text{d}x} = f'(x) = \lim _{h\to 0}{\frac {f(x+h)-f(x)}{h}}.
# $$
#
# Derivation is in essence a mechanical process, following the rules below.
#
# ![](Figures/derivatives.jpeg)
#
# When we work with function of several variables, we use *partial derivatives* (e.g. $\frac{\partial f(x, y)}{\partial x}$), indicating we keep all variables but $x$ fixed.
#
# Our running example:
#
# $$
# f(x) = \log x + \frac{\sin x}{x}
# $$
f(x) = log(x) + sin(x) / x;
# # Symbolic differentiation
#
# Computing derivatives, as you have seen in basic calculus courses.
#
# By hand or automatically:
# - Maple
# - Sympy (python)
# - Mathematica
# - Maxima
#
# Differentiation is *easy* compared to *integration* or *sampling*.
#
# Advantages:
# - exact derivatives!
# - gives the formula for different evaluations.
# - insight in the system
# - in some cases, closed-form solution extrema by solving $\frac{\text{d}f(x)}{\text{d}x}=0$
# - no hyperparameters or tweaking: just works!
#
# Disadvantages:
# - some software not flexible enough (gradients, arrays, for-loops,...)
# - sometimes explosion of terms: *expression swell*
# - not always numerically optimal!
using SymEngine
@vars x # define variable
df = diff(f(x), x)
df(2.0)
plot(f, 1, 5, label="\$f(x)\$", xlabel="\$x\$", lw=2, color=mygreen)
plot!(df, 1, 5, label="\$f'(x)\$", lw=2, color=myorange)
# # Numerical differentiation
#
# Finite difference approximation of the derivative/gradient based on a number of function evaluations.
#
# Often based on the limit definition of a derivative. Theoretical analysis using Taylor approximation:
#
# $$
# f(x + h) = f(x) + \frac{h}{1!}f'(x) + \frac{h^2}{2!}f''(x) + \frac{h^3}{3!}f^{(3)}(x)+\ldots
# $$
#
# **Forward difference**
#
# $$
# f'(x)\approx \frac{f(x+h) - f(x)}{h}
# $$
#
# **Central difference**
#
# $$
# f'(x)\approx \frac{f(x+h) - f(x-h)}{2h}
# $$
#
# **Complex step method**
#
# $$
# f'(x)\approx \frac{\text{Im}(f(x +ih))}{h}
# $$
diff_fordiff(f, x; h=1e-10) = (f(x + h) - f(x)) / h;
diff_centrdiff(f, x; h=1e-10) = (f(x + h) - f(x - h)) / 2h;
diff_complstep(f, x; h=1e-10) = imag(f(x + im * h)) / h;
diff_fordiff(f, 2.0)
diff_centrdiff(f, 2.0)
diff_complstep(f, 2.0)
# ## Intermezzo: floats
#
# Real numbers are always represented as floating point numbers in a computer.
#
# ![Encoding of a real number using a `Float32`.](Figures/floats.png)
#
# By default, Julia uses double precision floats (`Float64`). For brevity, let us take a look at the bit representation of a float. We use `Float32` for brevity's sake.
num = Float32(10.789)
bitstring(num)
# The first bit encodes the *sign*, here positive.
sign(num)
# The next eight bits specify the *exponent*, the magnitude of the number.
exponent(num)
# While the final 23 bits specify the *mantissa*, a number between $[1,2]$ representing the precision.
significand(num)
# These can be used to reconstrunct the number.
significand(num) * 2^exponent(num)
# The *machine precision* of a number can be retained using `eps`. This is the relative error.
eps(num) # not very high because it is only Float32
# This means that larger numbers have a larger absolute error compared to small numbers.
eps(1.2)
eps(1.2e10)
eps(1.2e-10)
# This brings us with numerical issues we might encounter using numerical differentiation.
#
# **First sin of numerical analysis**:
#
# > *thou shalt not add small numbers to big numbers*
#
# **second sin of numerical analysis**:
#
# > *thou shalt not subtract numbers which are approximately equal*
#
# ## Back to numerical differentiation
fexamp(x) = 64x*(1-x)*(1-2x)^2*(1-8x+8x^2)^2
dfexamp = diff(fexamp(x), x)
error(diff, h; x=1.0) = max(abs(Float64(dfexamp(x)) - diff(fexamp, x, h=h)), 1e-50);
stepsizes = map(t->10.0^t, -20:0.1:-1);
plot(stepsizes, error.(diff_fordiff, stepsizes), label="forward difference",
xscale=:log10, yscale=:log10, lw=2, legend=:bottomright, color=myblue)
plot!(stepsizes, error.(diff_centrdiff, stepsizes), label="central difference", lw=2,
color=myred)
plot!(stepsizes, error.(diff_complstep, stepsizes), label="complex step", lw=2,
color=myyellow)
#xlims!(1e-15, 1e-1)
xlabel!("\$h\$")
ylabel!("absolute error")
# Advantages of numerical differentiation:
# - easy to implement
# - general, no assumptions needed
#
# Disadvantages:
# - not numerically stable (round-off errors)
# - not efficient for gradients ($\mathcal{O}(n)$ evaluations for $n$-dimensional vectors)
#
#
# ## Approximations of multiplications with gradients
#
# **Gradient-vector approximation**
#
# $$
# \nabla f(\mathbf{x})^\intercal \mathbf{d} \approx \frac{f(\mathbf{x}+h\cdot\mathbf{d}) - f(\mathbf{x}-h\cdot\mathbf{d})}{2h}
# $$
#
# **Hessian-vector approximation**
#
# $$
# \nabla^2 f(\mathbf{x}) \mathbf{d} \approx \frac{\nabla f(\mathbf{x}+h\cdot\mathbf{d}) - \nabla f(\mathbf{x}-h\cdot\mathbf{d})}{2h}
# $$
grad_vect(f, x, d; h=1e-10) = (f(x + h * d) - f(x - h * d)) / (2h)
# +
dvect = randn(10) / 10
xvect = 2rand(10)
A = randn(10, 10)
A = A * A' / 100
#g(x) = exp(- x' * A * x) # adjoint does not play with Zygote
g(x) = exp(- sum(x .* (A * x)))
# correct gradient and Hessian (by hand)
Dg(x) = -2g(x) * A * x
D²g(x) = -2g(x) * A - 2A * x * Dg(x)'
# -
g(xvect)
Dg(xvect)
Dg(xvect)' * dvect
grad_vect(g, xvect, dvect)
D²g(xvect) * dvect
h = 1e-10
(Dg(xvect + h * dvect) - Dg(xvect - h * dvect)) / 2h
# # Forward differentiation
#
# Accumulation of the gradients along the *computational graph*.
#
# <img src="Figures/compgraph.png" alt="drawing" width="400"/>
#
# Forward differentiation computes the gradient from the inputs to the outputs.
#
# ## Differentiation rules
#
# **Sum rule**:
#
# $$
# \frac{\partial (f(x)+g(x))}{\partial x} = \frac{\partial f(x)}{\partial x} + \frac{\partial f(x)}{\partial x}
# $$
#
# **Product rule**:
#
# $$
# \frac{\partial (f(x)g(x))}{\partial x} = f(x)\frac{\partial g(x)}{\partial x} + g(x)\frac{\partial f(x)}{\partial x}
# $$
#
# **Chain rule**:
#
# $$
# \frac{\partial (g(f(x))}{\partial x} = \frac{\partial g(u)}{\partial u}\mid_{u=f(x)} \frac{\partial f(x)}{\partial x}
# $$
#
# ## Example of the forward differentiation
#
# <img src="Figures/forwarddiff.png" alt="drawing" width="600"/>
#
# ## Dual numbers
#
# Forward differentiation can be viewed as evaluating function using *dual numbers*, which can be viewed as truncated Taylor series:
#
# $$
# v + \dot{v}\epsilon\,,
# $$
#
# where $v,\dot{v}\in\mathbb{R}$ and $\epsilon$ a nilpotent number, i.e. $\epsilon^2=0$. For example, we have
#
# $$
# (v + \dot{v}\epsilon) + (u + \dot{u}\epsilon) = (v+u) + (\dot{v} +\dot{u})\epsilon
# $$
#
#
# $$
# (v + \dot{v}\epsilon)(u + \dot{u}\epsilon) = (vu) + (v\dot{u} +\dot{v}u)\epsilon\,.
# $$
#
#
# These dual numbers can be used as
#
# $$
# f(v+\dot{v}\epsilon) = f(v) + f'(v)\dot{v}\epsilon\,.
# $$
struct Dual{T}
v::T
vdot::T
end
# Let's implement some basic rules showing linearity.
Base.:+(a::Dual, b::Dual) = Dual(a.v + b.v, a.vdot + b.vdot)
Base.:*(a::Dual, b::Dual) = Dual(a.v * b.v, a.v * b.vdot + b.v * a.vdot)
Base.:+(c::Real, b::Dual) = Dual(c + b.v, b.vdot)
Base.:*(v::Real, b::Dual) = Dual(v, 0.0) * b
# And some more advanced ones, based on differentiation.
Base.:sin(a::Dual) = Dual(sin(a.v), cos(a.v) * a.vdot)
Base.:exp(a::Dual) = Dual(exp(a.v), exp(a.v) * a.vdot)
Base.:log(a::Dual) = Dual(log(a.v), 1.0 / a.v * a.vdot)
Base.:/(a::Dual, b::Dual) = Dual(a.v / b.v, (a.vdot * b.v - a.v * b.vdot) / b.v^2)
f(Dual(2.0, 1.0))
# +
myforwarddiff(f, x) = f(Dual(x, 1.0)).vdot
myforwarddiff(f, 2.0)
# -
# This directly works for vectors!
q(x) = 10.0 * x[1] * x[2] + x[1] * x[1] + sin(x[1]) / x[2]
q([1, 2])
q(Dual.([1, 2], [1, 0])) # partial wrt x1
q(Dual.([1, 2], [0, 1])) # partial wrt x2
# In practice, we prefer to use a package to do this.
using ForwardDiff
ForwardDiff.derivative(f, 2.0)
ForwardDiff.gradient(g, xvect)
ForwardDiff.gradient(q, [1, 2])
# Forward differentiation:
#
# - exact gradients!
# - computational complexity scales with **number of inputs**
# - used when you have more outputs than inputs
#
# # Reverse differentiation
#
# Compute the gradient from the output toward the inputs using the chain rule.
#
# <img src="Figures/reversediff.png" alt="drawing" width="600"/>
#
# Reverse differentiation:
#
# - also exact!
# - main workhorse for training artificial neural networks.
# - efficient when more inputs than outputs (machine learning: thousands of parameters vs. one loss)
using Zygote
f'(2.0) # that's it
# Works as well:
Zygote.gradient(f, 2.0)
# Fuctions with more than one variable.
g'(xvect)
# Finding the Hessian:
Zygote.hessian(g, xvect)
# ## Artificial neural networks
#
# Multi-layer perceptron.
#
# <img src="Figures/ANN_example.png" alt="drawing" width="200"/>
#
# Forward differentiation.
#
# <img src="Figures/Forwardprop.png" alt="drawing" width="500"/>
#
#
# Reverse differentation or backpropagation.
#
# <img src="Figures/Backprop.png" alt="drawing" width="500"/>
#
# Returns effect of changing layer output on the loss. Can be related directly to the parameters!
#
# ## Exercise: logistic regression
#
# Recall logistic regression on a training set $S=\{(\mathbf{x}_i, y_i)\mid i=1,\ldots,n\}$ with $y\in\{0,1\}$.
#
# Prediction:
#
# $$
# f(\mathbf{x}) = \sigma(\mathbf{w}^\intercal\mathbf{x})\,,
# $$
#
# with $\sigma(t) = 1 /(1+exp(t))$.
#
# To find the parameter vector $\mathbf{w}$, we minimize the cross-entropy:
#
# $$
# L(\mathbf{w};S)= \sum_{i=1}^n = - y_i \log(f(\mathbf{x})) - (1-y_i)\log(1-f(\mathbf{x}))\,.
# $$
# +
# artificial data
X = [randn(50, 2); randn(50, 2) .+ [-1.0 2.4]];
y = [i <= 50 ? 0 : 1 for i in 1:100];
n = length(y);
scatter(X[:,1], X[:,2], color=y)
# -
σ(t) = 1.0 / (1.0 + exp(t))
f(x, w) = σ(sum(x .* w))
L(w; X=X, y=y) = sum(- y .* log.(σ.(X * w)) - (1.0 .- y) .* log.(1. .- σ.( X * w)))
w = [0.1, 0.1]
L(w)
# **Assignments**
#
# 1. Compute the gradient of $L$ w.r.t. $\mathbf{w}$ using
# - numerical method
# - forward differentiation
# - backward differentiation
# 2. (optional) Implement a simple gradient descent to find $\mathbf{w}^\star$.
# 3. Add a bias to the prediction function. Use `Zygote` to compute the gradients w.r.t. both parameters.
# # Differentiating ODE
#
# Automatic differentiation can be used beyond machine learning and optimization:
#
# - [physical engines](https://arxiv.org/abs/1611.01652) to learn robot control
# - differentiating [protein](https://github.com/lupoglaz/TorchProteinLibrary) [structures](https://www.cell.com/cell-systems/fulltext/S2405-4712(19)30076-6)
# - Sinkhorn algorithm
# - [dynamic programming](https://arxiv.org/abs/1802.03676)
# - [differential equations](https://julialang.org/blog/2019/01/fluxdiffeq)
#
# Everything is computed by some straightforward and differentiable functions!
#
# # Exercise
#
# Consider the *Wheeler's Ridge* function:
#
# $$
# f(\mathbf{x}) = -\exp(-(x_1 x_2 - a)^2 -(x_2 -a)^2)\,,
# $$
#
# at the point $\mathbf{x}_0=[1.5, 1.5]^T$. We set $a=1.5$.
#
# Implement this function.
# Compute the gradient by hand.
#
# Find the gradient and Hessian at $\mathbf{x}_0$ by numerical differentiation.
# Compute the gradient and Hessian at $\mathbf{x}_0$ using automatic differentiation.
# # References
#
# - <NAME>. al. (2015) *Automatic differentiation in machine learning: a survey*
# - <NAME>. and <NAME>., '*Algorithms for Optimization*'. MIT Press (2019)
| chapters/03.AutoDiff/autodiff.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=[]
# <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# + [markdown] papermill={} tags=[]
# # Google Sheets - Gsheets Send data
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Google%20Sheets/Gsheets_Send_data.ipynb" target="_parent"><img src="https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHR<KEY>>
# + [markdown] papermill={} tags=[]
# **Tags:** #gsheet #data #naas_drivers
# + [markdown] papermill={} tags=[]
# Pre-requisite: share your Google Sheet with our service account
# For the driver to fetch the contents of your google sheet, you need to share it with the service account linked with Naas.
#
# 🔗 <EMAIL>
# + [markdown] papermill={} tags=[]
# ## Input
# + [markdown] papermill={} tags=[]
# ### Import library
# + papermill={} tags=[]
from naas_drivers import gsheet
# + [markdown] papermill={} tags=[]
# ### Variables
# + papermill={} tags=[]
data = [{ "name": "Jean", "email": "<EMAIL>" }, { "name": "Bunny", "email": "<EMAIL>" }]
spreadsheet_id = "id"
# + [markdown] papermill={} tags=[]
# ## Model
# + [markdown] papermill={} tags=[]
# ### Connect to gsheet
# + papermill={} tags=[]
gsheet.connect(spreadsheet_id)
# + [markdown] papermill={} tags=[]
# ## Output
# + [markdown] papermill={} tags=[]
# ### Send the data
# + papermill={} tags=[]
gsheet.send(
sheet_name="TSLA",
data=data
)
| Google Sheets/Gsheets_Send_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="copyright"
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="title"
# # Vertex SDK: AutoML training image object detection model for export to edge
#
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_object_detection_online_export_edge.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_object_detection_online_export_edge.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# <td>
# <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_object_detection_online_export_edge.ipynb">
# Open in Google Cloud Notebooks
# </a>
# </td>
# </table>
# <br/><br/><br/>
# + [markdown] id="overview:automl,export_edge"
# ## Overview
#
#
# This tutorial demonstrates how to use the Vertex SDK to create image object detection models to export as an Edge model using a Google Cloud AutoML model.
# + [markdown] id="dataset:salads,iod"
# ### Dataset
#
# The dataset used for this tutorial is the Salads category of the [OpenImages dataset](https://www.tensorflow.org/datasets/catalog/open_images_v4) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the bounding box locations and corresponding type of salad items in an image from a class of five items: salad, seafood, tomato, baked goods, or cheese.
# + [markdown] id="objective:automl,training,export_edge"
# ### Objective
#
# In this tutorial, you create a AutoML image object detection model from a Python script using the Vertex SDK, and then export the model as an Edge model in TFLite format. You can alternatively create models with AutoML using the `gcloud` command-line tool or online using the Cloud Console.
#
# The steps performed include:
#
# - Create a Vertex `Dataset` resource.
# - Train the model.
# - Export the `Edge` model from the `Model` resource to Cloud Storage.
# - Download the model locally.
# - Make a local prediction.
# + [markdown] id="costs"
# ### Costs
#
# This tutorial uses billable components of Google Cloud:
#
# * Vertex AI
# * Cloud Storage
#
# Learn about [Vertex AI
# pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
# + [markdown] id="setup_local"
# ### Set up your local development environment
#
# If you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step.
#
# Otherwise, make sure your environment meets this notebook's requirements. You need the following:
#
# - The Cloud Storage SDK
# - Git
# - Python 3
# - virtualenv
# - Jupyter notebook running in a virtual environment with Python 3
#
# The Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
#
# 1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/).
#
# 2. [Install Python 3](https://cloud.google.com/python/setup#installing_python).
#
# 3. [Install virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3. Activate the virtual environment.
#
# 4. To install Jupyter, run `pip3 install jupyter` on the command-line in a terminal shell.
#
# 5. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.
#
# 6. Open this notebook in the Jupyter Notebook Dashboard.
#
# + [markdown] id="install_aip:mbsdk"
# ## Installation
#
# Install the latest version of Vertex SDK for Python.
# + id="install_aip:mbsdk"
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
# ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
# + [markdown] id="install_storage"
# Install the latest GA version of *google-cloud-storage* library as well.
# + id="install_storage"
# ! pip3 install -U google-cloud-storage $USER_FLAG
# + id="install_tensorflow"
if os.environ["IS_TESTING"]:
# ! pip3 install --upgrade tensorflow $USER_FLAG
# + [markdown] id="restart"
# ### Restart the kernel
#
# Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
# + id="restart"
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="before_you_begin:nogpu"
# ## Before you begin
#
# ### GPU runtime
#
# This tutorial does not require a GPU runtime.
#
# ### Set up your Google Cloud project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
#
# 3. [Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com)
#
# 4. If you are running this notebook locally, you will need to install the [Cloud SDK]((https://cloud.google.com/sdk)).
#
# 5. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`.
# + id="set_project_id"
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# + id="autoset_project_id"
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
# + id="set_gcloud_project_id"
# ! gcloud config set project $PROJECT_ID
# + [markdown] id="region"
# #### Region
#
# You can also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
#
# - Americas: `us-central1`
# - Europe: `europe-west4`
# - Asia Pacific: `asia-east1`
#
# You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
#
# Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)
# + id="region"
REGION = "us-central1" # @param {type: "string"}
# + [markdown] id="timestamp"
# #### Timestamp
#
# If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
# + id="timestamp"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# + [markdown] id="gcp_authenticate"
# ### Authenticate your Google Cloud account
#
# **If you are using Google Cloud Notebooks**, your environment is already authenticated. Skip this step.
#
# **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
#
# **Otherwise**, follow these steps:
#
# In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
#
# **Click Create service account**.
#
# In the **Service account name** field, enter a name, and click **Create**.
#
# In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
#
# Click Create. A JSON file that contains your key downloads to your local environment.
#
# Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
# + id="gcp_authenticate"
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# + [markdown] id="bucket:mbsdk"
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
# When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
#
# Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
# + id="bucket"
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
# + id="autoset_bucket"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
# + [markdown] id="create_bucket"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
# + id="create_bucket"
# ! gsutil mb -l $REGION $BUCKET_NAME
# + [markdown] id="validate_bucket"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + id="validate_bucket"
# ! gsutil ls -al $BUCKET_NAME
# + [markdown] id="setup_vars"
# ### Set up variables
#
# Next, set up some variables used throughout the tutorial.
# ### Import libraries and define constants
# + id="import_aip:mbsdk"
import google.cloud.aiplatform as aip
# + [markdown] id="init_aip:mbsdk"
# ## Initialize Vertex SDK for Python
#
# Initialize the Vertex SDK for Python for your project and corresponding bucket.
# + id="init_aip:mbsdk"
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
# + [markdown] id="tutorial_start:automl"
# # Tutorial
#
# Now you are ready to start creating your own AutoML image object detection model.
# + [markdown] id="import_file:u_dataset,csv"
# #### Location of Cloud Storage training data.
#
# Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.
# + id="import_file:salads,csv,iod"
IMPORT_FILE = "gs://cloud-samples-data/vision/salads.csv"
# + [markdown] id="quick_peek:csv"
# #### Quick peek at your data
#
# This tutorial uses a version of the Salads dataset that is stored in a public Cloud Storage bucket, using a CSV index file.
#
# Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows.
# + id="quick_peek:csv"
if "IMPORT_FILES" in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
# ! gsutil cat $FILE | head
# + [markdown] id="create_dataset:image,iod"
# ### Create the Dataset
#
# Next, create the `Dataset` resource using the `create` method for the `ImageDataset` class, which takes the following parameters:
#
# - `display_name`: The human readable name for the `Dataset` resource.
# - `gcs_source`: A list of one or more dataset index files to import the data items into the `Dataset` resource.
# - `import_schema_uri`: The data labeling schema for the data items.
#
# This operation may take several minutes.
# + id="create_dataset:image,iod"
dataset = aip.ImageDataset.create(
display_name="Salads" + "_" + TIMESTAMP,
gcs_source=[IMPORT_FILE],
import_schema_uri=aip.schema.dataset.ioformat.image.bounding_box,
)
print(dataset.resource_name)
# + [markdown] id="create_automl_pipeline:image,edge,iod"
# ### Create and run training pipeline
#
# To train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.
#
# #### Create training pipeline
#
# An AutoML training pipeline is created with the `AutoMLImageTrainingJob` class, with the following parameters:
#
# - `display_name`: The human readable name for the `TrainingJob` resource.
# - `prediction_type`: The type task to train the model for.
# - `classification`: An image classification model.
# - `object_detection`: An image object detection model.
# - `multi_label`: If a classification task, whether single (`False`) or multi-labeled (`True`).
# - `model_type`: The type of model for deployment.
# - `CLOUD`: Deployment on Google Cloud
# - `CLOUD_HIGH_ACCURACY_1`: Optimized for accuracy over latency for deployment on Google Cloud.
# - `CLOUD_LOW_LATENCY_`: Optimized for latency over accuracy for deployment on Google Cloud.
# - `MOBILE_TF_VERSATILE_1`: Deployment on an edge device.
# - `MOBILE_TF_HIGH_ACCURACY_1`:Optimized for accuracy over latency for deployment on an edge device.
# - `MOBILE_TF_LOW_LATENCY_1`: Optimized for latency over accuracy for deployment on an edge device.
# - `base_model`: (optional) Transfer learning from existing `Model` resource -- supported for image classification only.
#
# The instantiated object is the DAG (directed acyclic graph) for the training job.
# + id="create_automl_pipeline:image,edge,iod"
dag = aip.AutoMLImageTrainingJob(
display_name="salads_" + TIMESTAMP,
prediction_type="object_detection",
multi_label=False,
model_type="MOBILE_TF_LOW_LATENCY_1",
base_model=None,
)
print(dag)
# + [markdown] id="run_automl_pipeline:image"
# #### Run the training pipeline
#
# Next, you run the DAG to start the training job by invoking the method `run`, with the following parameters:
#
# - `dataset`: The `Dataset` resource to train the model.
# - `model_display_name`: The human readable name for the trained model.
# - `training_fraction_split`: The percentage of the dataset to use for training.
# - `test_fraction_split`: The percentage of the dataset to use for test (holdout data).
# - `validation_fraction_split`: The percentage of the dataset to use for validation.
# - `budget_milli_node_hours`: (optional) Maximum training time specified in unit of millihours (1000 = hour).
# - `disable_early_stopping`: If `True`, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements.
#
# The `run` method when completed returns the `Model` resource.
#
# The execution of the training pipeline will take upto 20 minutes.
# + id="run_automl_pipeline:image"
model = dag.run(
dataset=dataset,
model_display_name="salads_" + TIMESTAMP,
training_fraction_split=0.8,
validation_fraction_split=0.1,
test_fraction_split=0.1,
budget_milli_node_hours=20000,
disable_early_stopping=False,
)
# + [markdown] id="evaluate_the_model:mbsdk"
# ## Review model evaluation scores
# After your model has finished training, you can review the evaluation scores for it.
#
# First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project.
# + id="evaluate_the_model:mbsdk"
# Get model resource ID
models = aip.Model.list(filter="display_name=salads_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
# + [markdown] id="export_model:mbsdk,image"
# ## Export as Edge model
#
# You can export an AutoML image object detection model as a `Edge` model which you can then custom deploy to an edge device or download locally. Use the method `export_model()` to export the model to Cloud Storage, which takes the following parameters:
#
# - `artifact_destination`: The Cloud Storage location to store the SavedFormat model artifacts to.
# - `export_format_id`: The format to save the model format as. For AutoML image object detection there is just one option:
# - `tf-saved-model`: TensorFlow SavedFormat for deployment to a container.
# - `tflite`: TensorFlow Lite for deployment to an edge or mobile device.
# - `edgetpu-tflite`: TensorFlow Lite for TPU
# - `tf-js`: TensorFlow for web client
# - `coral-ml`: for Coral devices
#
# - `sync`: Whether to perform operational sychronously or asynchronously.
# + id="export_model:mbsdk,image"
response = model.export_model(
artifact_destination=BUCKET_NAME, export_format_id="tflite", sync=True
)
model_package = response["artifactOutputUri"]
# + [markdown] id="download_model_artifacts:tflite"
# #### Download the TFLite model artifacts
#
# Now that you have an exported TFLite version of your model, you can test the exported model locally, but first downloading it from Cloud Storage.
# + id="download_model_artifacts:tflite"
# ! gsutil ls $model_package
# Download the model artifacts
# ! gsutil cp -r $model_package tflite
tflite_path = "tflite/model.tflite"
# + [markdown] id="instantiate_tflite_interpreter"
# #### Instantiate a TFLite interpreter
#
# The TFLite version of the model is not a TensorFlow SavedModel format. You cannot directly use methods like predict(). Instead, one uses the TFLite interpreter. You must first setup the interpreter for the TFLite model as follows:
#
# - Instantiate an TFLite interpreter for the TFLite model.
# - Instruct the interpreter to allocate input and output tensors for the model.
# - Get detail information about the models input and output tensors that will need to be known for prediction.
# + id="instantiate_tflite_interpreter"
import tensorflow as tf
interpreter = tf.lite.Interpreter(model_path=tflite_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]["shape"]
print("input tensor shape", input_shape)
# + [markdown] id="get_test_item"
# ### Get test item
#
# You will use an arbitrary example out of the dataset as a test item. Don't be concerned that the example was likely used in training the model -- we just want to demonstrate how to make a prediction.
# + id="get_test_item:image,224x224"
test_items = ! gsutil cat $IMPORT_FILE | head -n1
test_item = test_items[0].split(",")[0]
with tf.io.gfile.GFile(test_item, "rb") as f:
content = f.read()
test_image = tf.io.decode_jpeg(content)
print("test image shape", test_image.shape)
test_image = tf.image.resize(test_image, (224, 224))
print("test image shape", test_image.shape, test_image.dtype)
test_image = tf.cast(test_image, dtype=tf.uint8).numpy()
# + [markdown] id="invoke_tflite_interpreter"
# #### Make a prediction with TFLite model
#
# Finally, you do a prediction using your TFLite model, as follows:
#
# - Convert the test image into a batch of a single image (`np.expand_dims`)
# - Set the input tensor for the interpreter to your batch of a single image (`data`).
# - Invoke the interpreter.
# - Retrieve the softmax probabilities for the prediction (`get_tensor`).
# - Determine which label had the highest probability (`np.argmax`).
# + id="invoke_tflite_interpreter"
import numpy as np
data = np.expand_dims(test_image, axis=0)
interpreter.set_tensor(input_details[0]["index"], data)
interpreter.invoke()
softmax = interpreter.get_tensor(output_details[0]["index"])
label = np.argmax(softmax)
print(label)
# + [markdown] id="cleanup:mbsdk"
# # Cleaning up
#
# To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial:
#
# - Dataset
# - Pipeline
# - Model
# - Endpoint
# - AutoML Training Job
# - Batch Job
# - Custom Job
# - Hyperparameter Tuning Job
# - Cloud Storage Bucket
# + id="cleanup:mbsdk"
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
# ! gsutil rm -r $BUCKET_NAME
| notebooks/community/sdk/sdk_automl_image_object_detection_online_export_edge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
# ## DataTypes & Attributes
# Numpy's main datatype is ndarray
a1 = np.array([1, 2, 3])
a1
type(a1)
# +
a2 = np.array([[1, 2.0, 3.3],
[4, 5, 6.5]])
a3 = np.array([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]])
# -
a2
a3
a1.shape, a2.shape, a3.shape
a1.ndim, a2.ndim, a3.ndim
a2.shape
a1.dtype, a2.dtype, a3.dtype
a1.size, a2.size, a3.size
type(a1), type(a2), type(a3)
# +
# Create a DataFrame from a Numpy array
import pandas as pd
df = pd.DataFrame(a2)
df
# -
# ## 2. Creating Arrays
sample_array = np.array([1, 2, 3])
sample_array
sample_array.dtype
ones = np.ones((2, 3))
ones
zeros = np.zeros((2, 3))
zeros
range_array = np.arange(0, 10, 2)
range_array
random_array = np.random.randint(0, 10, size=(3,5))
random_array
random_array_2 = np.random.random((5, 3))
random_array_2
random_array_2.shape
random_array_3 = np.random.rand(5,3)
random_array_3
# +
# Pseudo-random numbers
np.random.seed(seed=0)
random_array_4 = np.random.randint(10, size=(5, 3))
random_array_4
# -
np.random.seed(7)
random_array_5 = np.random.random((5, 3))
random_array_5
random_array_5 = np.random.random((5, 3))
random_array_5
# ## 3. Viewing arrays and matrices
np.unique(random_array_4)
a1
a2
a3
a1[0]
a2[0]
a3[0]
a2[1]
a3[:2, :2, :2]
a4 = np.random.randint(10, size=(2, 3, 4, 5))
a4
a4.shape, a4.ndim
# Get the first 4 numbers of the inner most arrays
a4[:, :, :, :4]
# ## 4. Manipulating & Comparing Arrays
# ### Arithmetic
a1
ones = np.ones((3))
ones
a1 + ones
a1 - ones
a1 * ones
a2
a1 * a2
a3
a1 / ones
# Floor division removes the decimals (rounds down)
a2 // a1
a2
a2 ** 2
np.square(a2)
np.add(a1, ones)
a1
a1 % 2
a2 % 2
np.exp(a1)
np.log(a1)
# ### Aggregation
#
# Aggregation = performing the same operations on a number of things
listy_list = [1, 2, 3]
type(listy_list)
sum(listy_list)
a1
type(a1)
sum(a1)
np.sum(a1)
# Use Python's methods (`sum()`) on Python datatypes and use NumPy's methods on NumPy arrays (`np.sum()`)
# Creating a massive NumPy array
massive_array = np.random.random(100000)
massive_array.size
massive_array[:10]
# %timeit sum(massive_array) # Python's sum()
# %timeit np.sum(massive_array) # NumPy's np.sum()
a2
np.mean(a2)
np.max(a2)
np.min(a2)
# Standard deviation = a measure of how spread out a group of numbers is from the mean
np.std(a2)
# Variance = measue of the average degree to which each number is different from the mean
# Higher variance = wider range of numbers
# Lower variance = lower range of numbers
np.var(a2)
# Standard deviation = squareroot of variance
np.sqrt(np.var(a2))
# Demo of std and var
high_var_array = np.array([1, 100, 200, 300, 4000, 5000])
low_var_array = np.array([2, 4, 6, 8, 10])
np.var(high_var_array), np.var(low_var_array)
np.std(high_var_array), np.std(low_var_array)
np.mean(high_var_array), np.mean(low_var_array)
# %matplotlib inline
import matplotlib.pyplot as plt
plt.hist(high_var_array)
plt.show
plt.hist(low_var_array)
plt.show
# ## Reshaping & Transposing
a2
a2.shape
a3
a3.shape
a2.shape
a2 * a3
a2.reshape(2, 3, 1)
a2_reshape = a2.reshape(2, 3, 1)
a2_reshape * a3
a2
# Transpose = switches the axis'
a2.T
a2.shape, a2.T.shape
a3
a3.T
a3.shape, a3.T.shape
# ## Dot Product
# +
np.random.seed(0)
mat1 = np.random.randint(10, size=(5, 3))
mat2 = np.random.randint(10, size=(5, 3))
# -
mat1, mat2
mat1.shape, mat2.shape
# Element-wise multiplication (Hadamard product)
mat1 * mat2
# Dot product (inner dimensions must be the same, reshaping mat2 from a 5x3 to a 3x5)
np.dot(mat1, mat2.reshape(3,5))
# Transpose mat1
mat1.T
mat1.T.shape, mat2.shape
np.dot(mat1.T, mat2)
# Transpose mat2
mat3 = np.dot(mat1, mat2.T)
mat3, mat3.shape
# # Dot product example (nut butter sales)
np.random.seed(0)
# Number of jars sold
sales_amounts = np.random.randint(30, size=(5,3))
sales_amounts
# Create weekly_sales DataFrame
weekly_sales = pd.DataFrame(sales_amounts,
index=["Mon", "Tues", "Wed", "Thurs", "Fri"],
columns=["Almond butter", "Peanut butter", "Cashew butter"])
weekly_sales
# Create prices array
prices = np.array([10, 8, 12])
prices
prices.shape
# Create butter_prices DataFrame
butter_prices = pd.DataFrame(prices.reshape(1, 3),
index=["Price"],
columns=["Almond butter", "Peanut butter", "Cashew butter"])
butter_prices
# Shapes need to be aligned so transpose
total_sales = prices.dot(sales_amounts.T)
total_sales
# Create daily_sales
butter_prices
daily_sales = butter_prices.dot(weekly_sales.T)
daily_sales
weekly_sales
weekly_sales["Total ($)"] = daily_sales.T
weekly_sales
| introduction-to-numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Nickamaes/Linear-Algebra_58109/blob/main/Python_Exercise_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="YjGF1CT_nYTw"
# ##Representing Vectors
# + colab={"base_uri": "https://localhost:8080/"} id="m9lKhqGhj7N_" outputId="4e5e2347-6beb-436c-f8d9-d1e694da20ce"
import numpy as np
A = np.array([4,3])
B = np.array([2,-5])
print('Vector A is',A)
print('Vector B is',B)
# + [markdown] id="xQuu4lLio8EJ"
# ##Describing Vectors in NumPy
# + colab={"base_uri": "https://localhost:8080/"} id="tlmEg4YZpG8J" outputId="b71427d5-cf80-4cef-bd3a-7b27c878c14d"
import numpy as np
ball1 = np.array([1,2,3])
ball2 = np.array([0,1,-1])
pool = np.array([ball1,ball2])
pool.shape
pool.ndim
# + colab={"base_uri": "https://localhost:8080/"} id="F6CJ2bKwqfCz" outputId="a9b0dce2-3416-48e8-c72f-ea52f1adc9b1"
U = np.array([[1,2,3],[4,5,6]])
U
U.size
# + [markdown] id="G4v6fKwGr_kw"
# ##Space
# + [markdown] id="-oIV1HKesTj2"
# ###Vector spaces are mathematical objects that abstractly capture the geometry and algebra of linear equations.
# + [markdown] id="j22gJAx1tnzJ"
# ###Addition of Vectors
# + colab={"base_uri": "https://localhost:8080/"} id="VB0-qdeksUxn" outputId="6be31232-8f4a-498f-8b64-3ab628f1ac36"
addend1 = np.array([0,0,0])
addend2 = np.array([1,1,0])
sum = addend1 + addend2
sum
# + colab={"base_uri": "https://localhost:8080/"} id="M0XgYVgNuiqS" outputId="65ec52a9-40f3-4ca3-f84b-0481d631bfc2"
addend1 = np.array([0,0,0])
addend2 = np.array([1,1,0])
resultant = np.add(addend1,addend2)
resultant
# + [markdown] id="Y8TqBAHhvKlm"
# ###Subtraction of Vectors
# + colab={"base_uri": "https://localhost:8080/"} id="_pk_-E0AvPw0" outputId="3205d878-d199-439e-c6ed-c3242e904ffc"
difference = addend1 - addend2
difference
# + colab={"base_uri": "https://localhost:8080/"} id="xl-cb1OYwNzu" outputId="af52138c-58ba-4861-fb38-6b130d58198b"
difference2 = np.subtract(addend1,addend2)
difference2
# + [markdown] id="zNiEiZwDwaLz"
# ###Scaling
# + colab={"base_uri": "https://localhost:8080/"} id="3pMnjGOiwezU" outputId="eff11a21-4582-4c8d-880a-45b15358fd09"
A = np.array([1,5,8,9])
S = 5*A
S
# + [markdown] id="zXUtT7BYx_3Y"
# ###Cross Product
#
# + colab={"base_uri": "https://localhost:8080/"} id="rTIZO5jtxqFO" outputId="a596a315-ff44-4803-ef07-2feca7479b7b"
import numpy as np
A = np.array([2,3])
B = np.array([1,7])
dot = np.dot(A,B)
print(dot)
| Python_Exercise_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spectral Embedding Methods
# One of the primary embedding tools we'll use in this book is a set of methods called *spectral embedding* {cite:t}`spectraltutorial`. You'll see spectral embedding and variations on it repeatedly, both throughout this section and when we get into applications, so it's worth taking the time to understand spectral embedding deeply. If you're familiar with Principal Component Analysis (PCA), this method has a lot of similarities. We'll need to get into a bit of linear algebra to understand how it works.
#
# Remember that the basic idea behind any network embedding method is to take the network and put it into Euclidean space - meaning, a nice data table with rows as observations and columns as features (or dimensions), which you can then plot on an x-y axis. In this section, you'll see the linear algebra-centric approach that spectral embedding uses to do this.
#
# Spectral methods are based on a bit of linear algebra, but hopefully a small enough amount to still be understandable. The overall idea has to do with eigenvectors, and more generally, something called "singular vectors" - a generalization of eigenvectors. It turns out that the biggest singular vectors of a network's adjacency matrix contain the most information about that network - and as the singular vectors get smaller, they contain less information about the network (we're glossing over what 'information' means a bit here, so just think about this as a general intuition). So if you represent a network in terms of its singular vectors, you can drop the smaller ones and still retain most of the information. This is the essence of what spectral embedding is about (here "biggest" means "the singular vector corresponding to the largest singular value").
#
# ```{admonition} Singular Values and Singular Vectors
# If you don't know what singular values and singular vectors are, don't worry about it. You can think of them as a generalization of eigenvalues/vectors (it's also ok if you don't know what those are): all matrices have singular values and singular vectors, but not all matrices have eigenvalues and eigenvectors. In the case of square, symmetric matrices with positive eigenvalues, the eigenvalues/vectors and singular values/vectors are the same thing.
#
# If you want some more background information on eigenstuff and singularstuff, there are some explanations in the Math Refresher section in the introduction. They're an important set of vectors associated with matrices with a bunch of interesting properties. A lot of linear algebra is built around exploring those properties.
# ```
#
# You can see visually how Spectral Embedding works below. We start with a 20-node Stochastic Block Model with two communities, and then found its singular values and vectors. It turns out that because there are only two communities, only the first two singular vectors contain information -- the rest are just noise! (you can see this if you look carefully at the first two columns of the eigenvector matrix). So, we took these two columns and scaled them by the first two singular vectors of the singular value matrix $D$. The final embedding is that scaled matrix, and the plot you see takes the rows of that matrix and puts them into Euclidean space (an x-y axis) as points. This matrix is called the *latent position matrix*, and the embeddings for the nodes are called the *latent positions*. Underneath the figure is a list that explains how the algorithm works, step-by-step.
# + tags=["hide-input"]
from graspologic.simulations import sbm
from graphbook_code import heatmap, cmaps, plot_latents
from graspologic.utils import to_laplacian
from scipy.linalg import svd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def rm_ticks(ax, x=False, y=False, **kwargs):
if x is not None:
ax.axes.xaxis.set_visible(x)
if y is not None:
ax.axes.yaxis.set_visible(y)
sns.despine(ax=ax, **kwargs)
# Make network
B = np.array([[0.8, 0.1],
[0.1, 0.8]])
n = [10, 10]
A, labels = sbm(n=n, p=B, return_labels=True)
L = to_laplacian(A)
U, E, Ut = svd(L)
n_components = 2
Uc = U[:, :n_components]
Ec = E[:n_components]
latents = Uc @ np.diag(Ec)
fig = plt.figure();
ax = fig.add_axes([.06, -.06, .8, .8])
ax = heatmap(L, ax=ax, cbar=False)
ax.set_title("Network Representation", loc="left", fontsize=16)
# add arrow
arrow_ax = fig.add_axes([.8, .3, .3, .1])
rm_ticks(arrow_ax, left=True, bottom=True)
plt.arrow(x=0, y=0, dx=1, dy=0, width=.1, color="black")
# add joint matrix
ax = fig.add_axes([1, -.02*3, .8, .8])
ax = heatmap(U, ax=ax, cbar=False)
ax.set_title("Left Singular vector matrix $U$", loc="left")
ax = fig.add_axes([1.55, -.06, .8, .8])
ax = heatmap(np.diag(E), ax=ax, cbar=False)
ax.set_title("Singular value matrix $S$", loc="left")
ax = fig.add_axes([2.1, -.06, .8, .8])
ax = heatmap(Ut, ax=ax, cbar=False)
ax.set_title("Right singular vector matrix $V^T$", loc="left")
# add second arrow
arrow_ax = fig.add_axes([1.5, -1.2, 1.2, 1])
rm_ticks(arrow_ax, left=True, bottom=True)
style = "Simple, tail_width=10, head_width=40, head_length=20"
kw = dict(arrowstyle=style, color="k", alpha=1)
text_arrow = patches.FancyArrowPatch((0.33, .9), (.1, .5), connectionstyle="arc3, rad=-.55", **kw)
arrow_ax.add_patch(text_arrow)
# Embedding
ax = fig.add_axes([.185, -1.2, .4, .8])
cmap = cmaps["sequential"]
ax = sns.heatmap(latents, cmap=cmap,
ax=ax, cbar=False, xticklabels=False, yticklabels=False)
ax.set_title("Latent Positions \n(matrix representation)", loc="left")
ax.set_xlabel("First two scaled columns of $U$")
ax = fig.add_axes([.185+.45, -1.2, .8, .8])
plot_latents(latents, ax=ax, labels=labels)
ax.set_title("Latent Positions (Euclidean representation)", loc="left")
ax.set_xlabel("Plotting the rows of U as points in space")
fig.suptitle("The Spectral Embedding Algorithm", fontsize=32, x=1.5);
# -
# ```{admonition} The Spectral Embedding Algorithm
# 1. Take a network's adjacency matrix. Optionally take its Laplacian as a network representation.
# 2. Decompose it into a a singular vector matrix, a singular value matrix, and the singular vector matrix's transpose.
# 3. Remove every column of the singular vector matrix except for the first $k$ vectors, corresponding to the $k$ largest singular values.
# 4. Scale the $k$ remaining columns by their corresponding singular values to create the embedding.
# 5. The rows of this embedding matrix are the locations in Euclidean space for the nodes of the network (called the latent positions). The embedding matrix is an estimate of the latent position matrix (which we talked about in the 'why embed networks' section)
# ```
# We need to dive into a few specifics to understand spectral embedding better. We need to figure out how to find our network's singular vectors, for instance, and we also need to understand why those singular vectors can be used to form a representation of our network. To do this, we'll explore a few concepts from linear algebra like matrix rank, and we'll see how understanding these concepts connects to understanding spectral embedding.
#
# Let's scale down and make a simple network, with only six nodes. We'll take its Laplacian just to show what that optional step looks like, and then we'll find its singular vectors with a technique we'll explore called Singular Value Decomposition. Then, we'll explore why we can use the first $k$ singular values and vectors to find an embedding. Let's start with creating the simple network.
# ## A Simple Network
# Say we have the simple network below. There are six nodes total, numbered 0 through 5, and there are two distinct connected groups (called "connected components" in network theory land). Nodes 0 through 2 are all connected to each other, and nodes 3 through 5 are also all connected to each other.
# +
from itertools import combinations
import numpy as np
def add_edge(A, edge: tuple):
"""
Add an edge to an undirected graph.
"""
i, j = edge
A[i, j] = 1
A[j, i] = 1
return A
A = np.zeros((6, 6))
for edge in combinations([0, 1, 2], 2):
add_edge(A, edge)
for edge in combinations([3, 4, 5], 2):
add_edge(A, edge)
# -
# You can see the adjacency matrix and network below. Notice that there are two distrinct blocks in the adjacency matrix: in its upper-left, you can see the edges between the first three nodes, and in the bottom right, you can see the edges between the second three nodes.
# + tags=["hide-input"]
from graphbook_code import draw_multiplot
import networkx as nx
draw_multiplot(A, pos=nx.kamada_kawai_layout, title="Our Simple Network");
# -
# ## The Laplacian Matrix
# With spectral embedding, we'll either find the singular vectors of the Laplacian or the singular vectors of the Adjacency Matrix itself (For undirected Laplacians, the singular vectors are the same thing as the eigenvectors). Since we already have the adjacency matrix, let's take the Laplacian just to see what that looks like.
#
# Remember from chapter four that there are a few different types of Laplacian matrices. By default, for undirected networks, Graspologic uses the normalized Laplacian $L = D^{-1/2} A D^{-1/2}$, where $D$ is the degree matrix. Remember that the degree matrix has the degree, or number of edges, of each node along the diagonals. Variations on the normalized Laplacian are generally what we use in practice, but for simplicity and illustration, we'll just use the basic, cookie-cutter version of the Laplacian $L = D - A$.
# Here's the degree matrix $D$.
# Build the degree matrix D
degrees = np.count_nonzero(A, axis=0)
D = np.diag(degrees)
D
# And here's the Laplacian matrix, written out in full.
# Build the Laplacian matrix L
L = D-A
L
# Below, you can see these matrices visually.
# + tags=["hide-input"]
from graphbook_code import heatmap
import seaborn as sns
from matplotlib.colors import Normalize
from graphbook_code import GraphColormap
import matplotlib.cm as cm
import matplotlib.pyplot as plt
fig, axs = plt.subplots(1, 5, figsize=(25, 5))
# First axis (Degree)
heatmap(D, ax=axs[0], cbar=False, title="Degree Matrix $D$")
# Second axis (-)
axs[1].text(x=.5, y=.5, s="-", fontsize=200,
va='center', ha='center')
axs[1].get_xaxis().set_visible(False)
axs[1].get_yaxis().set_visible(False)
sns.despine(ax=axs[1], left=True, bottom=True)
# Third axis (Adjacency matrix)
heatmap(A, ax=axs[2], cbar=False, title="Adjacency Matrix $A$")
# Third axis (=)
axs[3].text(x=.5, y=.5, s="=", fontsize=200,
va='center', ha='center')
axs[3].get_xaxis().set_visible(False)
axs[3].get_yaxis().set_visible(False)
sns.despine(ax=axs[3], left=True, bottom=True)
# Fourth axis
heatmap(L, ax=axs[4], cbar=False, title="Laplacian Matrix $L$")
# Colorbar
vmin, vmax = np.array(L).min(), np.array(L).max()
norm = Normalize(vmin=vmin, vmax=vmax)
im = cm.ScalarMappable(cmap=GraphColormap("sequential").color, norm=norm)
fig.colorbar(im, ax=axs, shrink=0.8, aspect=10);
fig.suptitle("The Laplacian is just a function of the adjacency matrix", fontsize=24);
# -
# ## Finding Singular Vectors With Singular Value Decomposition
# + [markdown] tags=["hide-input"]
# Now that we have a Laplacian matrix, we'll want to find its singular vectors. To do this, we'll need to use a technique called *Singular Value Decomposition*, or SVD.
#
# SVD is a way to break a single matrix apart (also known as factorizing) into three distinct new matrices -- In our case, the matrix will be the Laplacian we just built. These three new matrices correspond to the singular vectors and singular values of the original matrix: the algorithm will collect all of the singular vectors as columns of one matrix, and the singular values as the diagonals of another matrix.
#
# In the case of the Laplacian (as with all symmetric matrices that have real, positive eigenvalues), remember that the singular vectors/values and the eigenvectors/values are the same thing. For more technical and generalized details on how SVD works, or for explicit proofs, we would recommend a Linear Algebra textbook [Trefethan, LADR]. Here, we'll look at the SVD with a bit more detail here in the specific case where we start with a matrix which is square, symmetric, and has real eigenvalues.
#
# **Singular Value Decomposition** Suppose you have a square, symmetrix matrix $X$ with real eigenvalues. In our case, $X$ corresponds to the Laplacian $L$ (or the adjacency matrix $A$).
#
# \begin{align*}
# \begin{bmatrix}
# x_{11} & & & " \\
# & x_{22} & & \\
# & & \ddots & \\
# " & & & x_{nn}
# \end{bmatrix}
# \end{align*}
#
# Then, you can find three matrices - one which rotates vectors in space, one which scales them along each coordinate axis, and another which rotates them back - which, when you multiply them all together, recreate the original matrix $X$. This is the essence of singular value decomposition: you can break down any linear transformation into a rotation, a scaling, and another rotation. Let's call the matrix which rotates $U$ (this type of matrix is called "orthogonal"), and the matrix that scales $S$.
#
# \begin{align*}
# X &= U S V^T
# \end{align*}
#
# Since $U$ is a matrix that just rotates any vector, all of its column-vectors are orthogonal (all at right angles) from each other and they all have the unit length of 1. These columns are more generally called the **singular vectors** of X. In some specific cases, these are also called the eigenvectors. Since $S$ just scales, it's a diagonal matrix: there are values on the diagonals, but nothing (0) on the off-diagonals. The amount that each coordinate axis is scaled are the values on the diagonal entries of $S$, $\sigma_{i}$. These are **singular values** of the matrix $X$, and, also when some conditions are met, these are also the eigenvalues. Assuming our network is undirected, this will be the case with the Laplacian matrix, but not necessarily the adjacency matrix.
#
# \begin{align*}
# X &= \begin{bmatrix}
# \uparrow & \uparrow & & \uparrow \\
# u_1 & \vec u_2 & ... & \vec u_n \\
# \downarrow & \downarrow & & \downarrow
# \end{bmatrix}\begin{bmatrix}
# \sigma_1 & & & \\
# & \sigma_2 & & \\
# & & \ddots & \\
# & & & \sigma_n
# \end{bmatrix}\begin{bmatrix}
# \leftarrow & \vec u_1^T & \rightarrow \\
# \leftarrow & \vec u_2^T & \rightarrow \\
# & \vdots & \\
# \leftarrow & \vec u_n^T & \rightarrow \\
# \end{bmatrix}
# \end{align*}
# -
# ## Breaking Down Our Network's Laplacian matrix
# Now we know how to break down any random matrix into singular vectors and values with SVD, so let's apply it to our toy network. We'll break down our Laplacian matrix into $U$, $S$, and $V^\top$. The Laplacian is a special case where the singular values and singular vectors are the same as the eigenvalues and eigenvectors, so we'll just refer to them as eigenvalues and eigenvectors from here on, since those terms are more common. For similar (actually the same) reasons, in this case $V^\top = U^\top$.
#
# Here, the leftmost column of $U$ (and the leftmost eigenvalue in $S$) correspond to the eigenvector with the highest eigenvalue, and they're organized in descending order (this is standard for Singular Value Decomposition).
from scipy.linalg import svd
U, S, Vt = svd(L)
# + tags=["hide-input"]
fig, axs = plt.subplots(1, 5, figsize=(25, 5))
# First axis (Laplacian)
heatmap(L, ax=axs[0], cbar=False, title="$L$")
# Second axis (=)
axs[1].text(x=.5, y=.5, s="=", fontsize=200,
va='center', ha='center')
axs[1].get_xaxis().set_visible(False)
axs[1].get_yaxis().set_visible(False)
sns.despine(ax=axs[1], left=True, bottom=True)
# Third axis (U)
U_ax = heatmap(U, ax=axs[2], cbar=False, title="$U$")
U_ax.set_xlabel("Columns of eigenvectors")
# Third axis (S)
E_ax = heatmap(np.diag(S), ax=axs[3], cbar=False, title="$S$")
E_ax.set_xlabel("Eigenvalues on diagonal")
# Fourth axis (V^T)
Ut_ax = heatmap(Vt, ax=axs[4], cbar=False, title="$V^T$")
Ut_ax.set_xlabel("Rows of eigenvectors")
# Colorbar
vmin, vmax = np.array(L).min(), np.array(L).max()
norm = Normalize(vmin=vmin, vmax=vmax)
im = cm.ScalarMappable(cmap=GraphColormap("sequential").color, norm=norm)
fig.colorbar(im, ax=axs, shrink=0.8, aspect=10);
fig.suptitle("Decomposing our simple Laplacian into eigenvectors and eigenvalues with SVD", fontsize=24);
# -
# So now we have a collection of eigenvectors organized into a matrix with $U$, and a collection of their corresponding eigenvalues organized into a matrix with $S$. Remember that with Spectral Embedding, we keep only the largest eigenvalues/vectors and "clip" columns off of $U$.
#
# Why exactly do these matrices reconstruct our Laplacian when multiplied together? Why does the clipped version of $U$ give us a lower-dimensional representation of our network? To answer that question, we'll need to start talking about a concept in linear algebra called the *rank* of a matrix.
#
# The essential idea is that you can turn each eigenvector/eigenvalue pair into a low-information matrix instead of a vector and number. Summing all of these matrices lets you reconstruct $L$. Summing only a few of these matrices lets you get *close* to $L$. In fact, if you were to unwrap the two matrices into single vectors, the vector you get from summing is as close in Euclidean space as you possibly can get to $L$ given the information you deleted when you removed the smaller eigenvectors.
#
# Let's dive into it!
# ## Why We Care About Taking Eigenvectors: Matrix Rank
# When we embed anything to create a new representation, we're essentially trying to find a simpler version of that thing which preserves as much information as possible. This leads us to the concept of **matrix rank**.
#
# **Matrix Rank**: The rank of a matrix $X$, defined $rank(X)$, is the number of linearly independent rows and columns of $X$.
#
# At a very high level, we can think of the matrix rank as telling us just how "simple" $X$ is. A matrix which is rank $1$ is very simple: all of its rows or columns can be expressed as a weighted sum of just a single vector. On the other hand, a matrix which has "full rank", or a rank equal to the number of rows (or columns, whichever is smaller), is a bit more complex: no row nor column can be expressed as a weighted sum of other rows or columns.
#
# There are a couple ways that the rank of a matrix and the singular value decomposition interact which are critical to understand: First, you can make a matrix from your singular vectors and values (eigenvectors and values, in our Laplacian's case), and summing all of them recreates your original, full-rank matrix. Each matrix that you add to the sum increases the rank of the result by one. Second, summing only a few of them gets you to the best estimation of the original matrix that you can get to, given the low-rank result. Let's explore this with a bit more depth.
#
# We'll be using the Laplacian as our examples, which has the distinctive quality of having its eigenvectors be the same as its singular vectors. For the adjacency matrix, this theory all still works, but you'd just have to replace $\vec u_i \vec u_i^\top$ with $\vec u_i \vec v_i^\top$ throughout (the adjacency matrices' SVD is $A = U S V^\top$, since the right singular vectors might be different than the left singular vectors).
# ### Summing Rank 1 Matrices Recreates The Original Matrix
# You can actually create an $n \times n$ matrix using any one of the original Laplacian's eigenvectors $\vec u_i$ by taking its outer product $\vec{u_i} \vec{u_i}^T$. This creates a rank one matrix which only contains the information stored in the first eigenvector. Scale it by its eigenvalue $\sigma_i$ and you have something that feels suspiciously similar to how we take the first few singular vectors of $U$ and scale them in the spectral embedding algorithm.
#
# It turns out that we can express any matrix $X$ as the sum of all of these rank one matrices.
# Take the $i^{th}$ column of $U$. Remember that we've been calling this $\vec u_i$: the $i^{th}$ eigenvector of our Laplacian. Its corresponding eigenvalue is the $i^{th}$ element of the diagonal eigenvalue matrix $E$. You can make a rank one matrix from this eigenvalue/eigenvector pair by taking the outer product and scaling the result by the eigenvalue: $\sigma_i \vec u_i \vec u_i^T$.
#
# It turns out that when we take the sum of all of these rank $1$ matrices--each one corresponding to a particular eigenvalue/eigenvector pair--we'll recreate the original matrix.
#
# \begin{align*}
# X &= \sum_{i = 1}^n \sigma_i \vec u_i \vec u_i^T = \sigma_1 \begin{bmatrix}\uparrow \\ \vec u_1 \\ \downarrow\end{bmatrix}\begin{bmatrix}\leftarrow & \vec u_1^T & \rightarrow \end{bmatrix} +
# \sigma_2 \begin{bmatrix}\uparrow \\ \vec u_2 \\ \downarrow\end{bmatrix}\begin{bmatrix}\leftarrow & \vec u_2^T & \rightarrow \end{bmatrix} +
# ... +
# \sigma_n \begin{bmatrix}\uparrow \\ \vec u_n \\ \downarrow\end{bmatrix}\begin{bmatrix}\leftarrow & \vec u_n^T & \rightarrow \end{bmatrix}
# \end{align*}
#
# Here are all of the $\sigma_i \vec u_i \vec u_i^T$ for our Laplacian L. Since there were six nodes in the original network, there are six eigenvalue/vector pairs, and six rank 1 matrices.
# +
n_nodes = U.shape[0]
# For each eigenvector/value,
# find its outer product,
# and append it to a list.
low_rank_matrices = []
for node in range(n_nodes):
ui = np.atleast_2d(U[:, node]).T
vi = np.atleast_2d(Vt.T[:, node]).T
low_rank_matrix = S[node] * ui @ vi.T
low_rank_matrices.append(low_rank_matrix)
# Take the elementwise sum of every matrix in the list.
laplacian_sum = np.array(low_rank_matrices).sum(axis=0)
# -
# You can see the result of the sum below. On the left are all of the low-rank matrices - one corresponding to each eigenvector - and on the right is the sum of all of them. You can see that the sum is just our Laplacian!
# + tags=["hide-input"]
from matplotlib.gridspec import GridSpec
import warnings
fig = plt.figure(figsize=(10, 6))
gs = GridSpec(3, 5)
ax_laplacian = fig.add_subplot(gs[:, 2:])
# Plot low-rank matrices
i = 0
for row in range(3):
for col in range(2):
ax = fig.add_subplot(gs[row, col])
title = f"$\sigma_{i+1} u_{i+1} v_{i+1}^T$"
heatmap(low_rank_matrices[i], ax=ax, cbar=False, title=title)
i += 1
# Plot Laplacian
heatmap(laplacian_sum, ax=ax_laplacian, cbar=False, title="$L = \sum_{i = 1}^n \sigma_i u_i v_i^T$")
# # Colorbar
cax = fig.add_axes([1, 0, .04, .8])
vmin, vmax = np.array(laplacian_sum).min(), np.array(laplacian_sum).max()
norm = Normalize(vmin=vmin, vmax=vmax)
im = cm.ScalarMappable(cmap=GraphColormap("sequential").color, norm=norm)
fig.colorbar(im, cax=cax, use_gridspec=False);
fig.suptitle("We can recreate our simple Laplacian by summing all the low-rank matrices", fontsize=24)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plt.tight_layout();
# -
# Next up, we'll estimate the Laplacian by only taking a few of these matrices. You can already kind of see in the figure above that this'll work - the last two matrices don't even have anything in them (they're just 0)!
# ### We can approximate our simple Laplacian by only summing a few of the low-rank matrices
# When you sum the first few of these low-rank $\sigma_i u_i u_i^T$, you can *approximate* your original matrix.
#
# This tells us something interesting about Spectral Embedding: the information in the first few eigenvectors of a high rank matrix lets us find a more simple approximation to it. You can take a matrix that's extremely complicated (high-rank) and project it down to something which is much less complicated (low-rank).
#
# Look below. In each plot, we're summing more and more of these low-rank matrices. By the time we get to the fourth sum, we've totally recreated the original Laplacian.
# + tags=["hide-input"]
fig, axs = plt.subplots(2, 3, figsize=(9,6))
current = np.zeros(L.shape)
for i, ax in enumerate(axs.flat):
new = low_rank_matrices[i]
current += new
heatmap(current, ax=ax, cbar=False,
title=f"$\sum_{{i = 1}}^{i+1} \sigma_i u_i u_i^T$")
fig.suptitle("Each of these is the sum of an \nincreasing number of low-rank matrices", fontsize=16)
plt.tight_layout()
# -
# ### Approximating becomes extremely useful when we have a bigger (now regularized) Laplacian
# This becomes even more useful when we have huge networks with thousands of nodes, but only a few communities. It turns out, especially in this situation, we can usually sum a very small number of low-rank matrices and get to an excellent approximation for our network that uses much less information.
#
# Take the network below, for example. It's generated from a Stochastic Block Model with 1000 nodes total (500 in one community, 500 in another). We took its normalized Laplacian (remember that this means $L = D^{-1/2} A D^{-1/2}$), decomposed it, and summed the first two low-rank matrices that we generated from the eigenvector columns.
#
# The result is not exact, but it looks pretty close. And we only needed the information from the first two singular vectors instead of all of the information in our full $n \times n$ matrix!
# +
from graspologic.simulations import sbm
from graspologic.utils import to_laplacian
# Make network
B = np.array([[0.8, 0.1],
[0.1, 0.8]])
n = [25, 25]
A2, labels2 = sbm(n=n, p=B, return_labels=True)
# Form new laplacian
L2 = to_laplacian(A2)
# decompose
k = 2
U2, E2, Ut2 = svd(L2)
k_matrices = U2[:, k]
low_rank_approximation = U2[:,0:k] @ (np.diag(E2[0:k]) @ Ut2[0:k, :])
# Plotting
fig, axs = plt.subplots(1, 2, figsize=(12, 6))
l2_hm = heatmap(L2, ax=axs[0], cbar=False, title="$L$")
l2approx_hm = heatmap(low_rank_approximation, ax=axs[1], cbar=False, title="$\sum_{{i = 1}}^{2} \sigma_i u_i u_i^T$")
l2_hm.set_xlabel("Full-rank Laplacian for a 50-node matrix", fontdict={'size': 15})
l2approx_hm.set_xlabel("Sum of only two low-rank matrices", fontdict={'size': 15});
fig.suptitle("Summing only two low-rank matrices approximates the normalized Laplacian pretty well!", fontsize=24)
plt.tight_layout()
# -
# This is where a lot of the power of an SVD comes from: you can approximate extremely complicated (high-rank) matrices with extremely simple (low-rank) matrices.
# ## How This Matrix Rank Stuff Helps Us Understand Spectral Embedding
# Remember the actual spectral embedding algorithm: we take a network, decompose it with Singular Value Decomposition into its singular vectors and values, and then cut out everything but the top $k$ singular vector/value pairs. Once we scale the columns of singular vectors by their corresponding values, we have our embedding. That embedding is called the latent position matrix, and the locations in space for each of our nodes are called the latent positions.
#
# Let's go back to our original, small (six-node) network and make an estimate of the latent position matrix from it. We'll embed down to three dimensions.
# +
k = 3
U_cut = U[:, :k]
E_cut = E[:k]
latents_small = U_cut @ np.diag(E_cut)
# + tags=["hide-input"]
fig, ax = plt.subplots(figsize=(4, 8))
cmap = cmaps["sequential"]
ax = sns.heatmap(latents_small, cmap=cmap, ax=ax, cbar=False,
xticklabels=1, yticklabels=1)
ax.set_xlabel("Eigenvector")
ax.set_ylabel("Node")
ax.set_title("Latent Position Matrix", fontsize=22, y=1.01)
plt.tight_layout();
# -
# How does what we just talked about help us understand spectral embedding?
#
# Well, each column of the latent position matrix is the $i^{th}$ eigenvector scaled by the $i^{th}$ eigenvalue: $\sigma_i \vec{u_i}$. If we right-multiplied one of those columns by its unscaled transpose $\vec{u_i}^\top$, we'd have one of our rank one matrices. This means that you can think of our rank-one matrices as essentially just fancy versions of the columns of a latent position matrix (our embedding). They contain all the same information - they're just matrices instead of vectors!
# + tags=["hide-input"]
fig, axs = plt.subplots(1, 4, figsize=(20, 5))
# First axis (Degree)
first_col = E[0] * latents_small[:, 0, None]
first_mat = first_col @ first_col.T
ax = sns.heatmap(first_col, cmap=cmap, ax=axs[0], cbar=False,
xticklabels=1, yticklabels=1)
ax.set_aspect(1.5)
ax.set_xlabel("First Eigenvector")
ax.set_ylabel("Node")
ax.set_title("First column of \nlatent position matrix $u_0$", fontsize=12, y=1.01)
# Third axis (Adjacency matrix)
ax = sns.heatmap(first_col.T, cmap=cmap, ax=axs[1], cbar=False,
xticklabels=1, yticklabels=1, square=False)
ax.set_aspect(1)
ax.set_xlabel("Node")
ax.set_title("First column of latent position matrix $u_0^T$", fontsize=12, y=1.01)
# Third axis (=)
axs[2].text(x=.5, y=.5, s="=", fontsize=200,
va='center', ha='center')
axs[2].get_xaxis().set_visible(False)
axs[2].get_yaxis().set_visible(False)
sns.despine(ax=axs[2], left=True, bottom=True)
# Fourth axis
heatmap(first_mat, ax=axs[3], cbar=False, title="First low-rank \nmatrix $\sigma_0 u_0 u_0^T$")
# Colorbar
vmin, vmax = np.array(L).min(), np.array(L).max()
norm = Normalize(vmin=vmin, vmax=vmax)
im = cm.ScalarMappable(cmap=GraphColormap("sequential").color, norm=norm)
fig.colorbar(im, ax=axs, shrink=0.8, aspect=10);
fig.suptitle("Our low-rank matrices contain the same information\n as the columns of the latent position matrix", fontsize=22, y=1.1);
# -
# In fact, you can express the sum we did earlier - our lower-rank estimation of L - with just our latent position matrix! Remember that $U_k$ is the first $k$ eigenvectors of our Laplacian, and $S_k$ is the diagonal matrix with the first $k$ eigenvalues (and that we named them $\sigma_1$ through $\sigma_k$).
#
# + tags=["hide-input"]
fig, axs = plt.subplots(1, 5, figsize=(20, 5))
from matplotlib.transforms import Affine2D
import mpl_toolkits.axisartist.floating_axes as floating_axes
# First axis (sum matrix)
current = np.zeros(L.shape)
for i in range(2):
new = low_rank_matrices[i]
current += new
heatmap(current, ax=axs[0], cbar=False, title="$\sum_{i=1}^2 \sigma_i u_i u_i^T$")
# Second axis (=)
axs[1].text(x=.5, y=.5, s="=", fontsize=200,
va='center', ha='center')
axs[1].get_xaxis().set_visible(False)
axs[1].get_yaxis().set_visible(False)
sns.despine(ax=axs[1], left=True, bottom=True)
# Third axis (Uk)
k = 2
Uk = U[:, :k]
Ek = np.diag(E)[:k, :k]
ax = sns.heatmap(Uk, cmap=cmap, ax=axs[2], cbar=False,
xticklabels=1, yticklabels=1)
ax.set_box_aspect(2)
ax.set_xlabel("Eigenvector")
ax.set_title("$U_k$", fontsize=12, y=1.01)
# Ek
ax = sns.heatmap(Ek, cmap=cmap, ax=axs[3], cbar=False,
xticklabels=1, yticklabels=1, square=True)
ax.set_title("$S_k$", fontsize=12, y=1.01)
sns.despine(bottom=False, top=False, right=False, left=False, ax=ax)
# Uk^T
# TODO: make this the same size as Uk, just rotated (currently too small)
# Will probably involve revamping all this code to make subplots differently,
# because the reason it's that size is that the dimensions are constrained by the `plt.subplots` call.
transform = Affine2D().rotate_deg(90)
axs[4].set_transform(transform)
ax = sns.heatmap(Uk.T, cmap=cmap, ax=axs[4], cbar=False,
xticklabels=1, yticklabels=1)
ax.set_box_aspect(.5)
ax.set_title("$U_k^T$", fontsize=12, y=1.01)
sns.despine(bottom=False, top=False, right=False, left=False, ax=ax)
# -
# This helps gives an intuition for why our latent position matrix gives a representation of our network. You can take columns of it, turn those columns into matrices, and sum those matrices, and then estimate the Laplacian for the network. That means the columns of our embedding network contain all of the information necessary to estimate the network!
# ## Figuring Out How Many Dimensions To Embed Your Network Into
# One thing we haven't addressed is how to figure out how many dimensions to embed down to. We've generally been embedding into two dimensions throughout this chapter (mainly because it's easier to visualize), but you can embed into as many dimensions as you want.
#
# If you don't have any prior information about the "true" dimensionality of your latent positions, by default you'd just be stuck guessing. Fortunately, there are some rules-of-thumb to make your guess better, and some methods people have developed to make fairly decent guesses automatically.
#
# The most common way to pick the number of embedding dimensions is with something called a scree plot. Essentially, the intuition is this: the top singular vectors of an adjacency matrix contain the most useful information about your network, and as the singular vectors have smaller and smaller singular values, they contain less important and so are less important (this is why we're allowed to cut out the smallest $n-k$ singular vectors in the spectral embedding algorithm).
#
# The scree plot just plots the singular values by their indices: the first (biggest) singular value is in the beginning, and the last (smallest) singular value is at the end.
#
# You can see the scree plot for the Laplacian we made earlier below. We're only plotting the first ten singular values for demonstration purposes.
# + tags=["hide-input"]
# from graspologic.plot import screeplot
from matplotlib.patches import Circle
from matplotlib.patheffects import withStroke
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredDrawingArea
from scipy.linalg import svdvals
fig, ax = plt.subplots(figsize=(8, 5))
# eigval plot
D = svdvals(L2)
ax.plot(D[:10])
ax.set_xlabel("Singular value index")
ax.set_ylabel("Singular value")
# plot circle
x, y = .15, .15
radius = .15
ada = AnchoredDrawingArea(150, 150, 0, 0, loc='lower left', pad=0., frameon=False)
circle = Circle((105, 35), 20, clip_on=False, zorder=10, linewidth=1,
edgecolor='black', facecolor=(0, 0, 0, .0125),
path_effects=[withStroke(linewidth=5, foreground='w')])
ada.da.add_artist(circle)
ax.add_artist(ada)
# add text
def text(x, y, text):
ax.text(x, y, text, backgroundcolor="white",
ha='center', va='top', color='blue')
text(2, .19, "Elbow")
# -
# You'll notice that there's a marked area called the "elbow". This is an area where singular values stop changing in magnitude as much when they get smaller: before the elbow, singular values change rapidly, and after the elbow, singular values barely change at all. (It's called an elbow because the plot kind of looks like an arm, viewed from the side!)
#
# The location of this elbow gives you a rough indication for how many "true" dimensions your latent positions have. The singular values after the elbow are quite close to each other and have singular vectors which are largely noise, and don't tell you very much about your data. It looks from the scree plot that we should be embedding down to two dimensions, and that adding more dimensions would probably just mean adding noise to our embedding.
#
# One drawback to this method is that a lot of the time, the elbow location is pretty subjective - real data will rarely have a nice, pretty elbow like the one you see above. The advantage is that it still generally works pretty well; embedding into a few more dimensions than you need isn't too bad, since you'll only have a few noies dimensions and there still may be *some* signal there.
#
# In any case, Graspologic automates the process of finding an elbow using a popular method developed in 2006 by <NAME> and <NAME> at the University of Waterloo. We won't get into the specifics of how it works here, but you can usually find fairly good elbows automatically.
# ## Using Graspologic to embed networks
# It's pretty straightforward to use graspologic's API to embed a network. The setup works like an SKlearn class: you instantiate an AdjacencySpectralEmbed class, and then you use it to transform data. You set the number of dimensions to embed to (the number of eigenvector columns to keep!) with `n_components`.
# ### Adjacency Spectral Embedding
# +
from graspologic.embed import AdjacencySpectralEmbed as ASE
# Generate a network from an SBM
B = np.array([[0.8, 0.1],
[0.1, 0.8]])
n = [25, 25]
A, labels = sbm(n=n, p=B, return_labels=True)
# Instantiate an ASE model and find the embedding
ase = ASE(n_components=2)
embedding = ase.fit_transform(A)
# -
plot_latents(embedding, labels=labels, title="Adjacency Spectral Embedding");
# ### Laplacian Spectral Embedding
# +
from graspologic.embed import LaplacianSpectralEmbed as LSE
embedding = LSE(n_components=2).fit_transform(A)
# -
plot_latents(embedding, labels=labels, title="Laplacian Spectral Embedding")
# + [markdown] tags=[]
# ## When should you use ASE and when should you use LSE?
# -
# Throughout this article, we've primarily used LSE, since Laplacians have some nice properties (such as having singular values being the same as eigenvalues) that make stuff like SVD easier to explain. However, you can embed the same network with either ASE or LSE, and you'll get two different (but equally true) embeddings.
#
# Since both embeddings will give you a reasonable clustering, how are they different? When should you use one compared to the other?
#
# Well, it turns out that LSE and ASE capture different notions of "clustering". <NAME> and collaborators at Johns Hopkins University investigated this recently - in 2018 - and discovered that LSE lets you capture "affinity" structure, whereas ASE lets you capture "core-periphery" structure (their paper is called "On a two-truths phenomenon in spectral graph clustering" - it's an interesting read for the curious). The difference between the two types of structure is shown in the image below.
#
#
# ```{figure} ../../Images/two-truths.jpeg
# ---
# height: 400px
# name: two-truths
# ---
# Affinity vs. Core-periphery Structure
# ```
# The "affinity" structure - the one that LSE is good at finding - means that you have two groups of nodes which are well-connected within the groups, and aren't very connected with each other. Think of a friend network in two schools, where people within the same school are much more likely to be friends than people in different schools. This is a type of structure we've seen a lot in this book in our Stochastic Block Model examples. If you think the communities in your data look like this, you should apply LSE to your network.
#
# The name "core-periphery" is a good description for this type of structure (which ASE is good at finding). In this notion of clustering, you have a core group of well-connected nodes surrounded by a bunch of "outlier" nodes which just don't have too many edges with anything in general. Think of a core of popular, well-liked, and charismatic kids at a high school, with a periphery of loners or people who prefer not to socialize as much.
| network_machine_learning_in_python/representations/ch6/spectral-embedding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="WBk0ZDWY-ff8"
# <table align="center">
# <td align="center"><a target="_blank" href="http://introtodeeplearning.com">
# <img src="https://i.ibb.co/Jr88sn2/mit.png" style="padding-bottom:5px;" />
# Visit MIT Deep Learning</a></td>
# <td align="center"><a target="_blank" href="https://colab.research.google.com/github/aamini/introtodeeplearning/blob/master/lab1/Part1_TensorFlow.ipynb">
# <img src="https://i.ibb.co/2P3SLwK/colab.png" style="padding-bottom:5px;" />Run in Google Colab</a></td>
# <td align="center"><a target="_blank" href="https://github.com/aamini/introtodeeplearning/blob/master/lab1/Part1_TensorFlow.ipynb">
# <img src="https://i.ibb.co/xfJbPmL/github.png" height="70px" style="padding-bottom:5px;" />View Source on GitHub</a></td>
# </table>
#
#
# # Copyright Information
#
# + id="3eI6DUic-6jo"
# Copyright 2022 MIT 6.S191 Introduction to Deep Learning. All Rights Reserved.
#
# Licensed under the MIT License. You may not use this file except in compliance
# with the License. Use and/or modification of this code outside of 6.S191 must
# reference:
#
# © MIT 6.S191: Introduction to Deep Learning
# http://introtodeeplearning.com
#
# + [markdown] id="57knM8jrYZ2t"
# # Lab 1: Intro to TensorFlow and Music Generation with RNNs
#
# In this lab, you'll get exposure to using TensorFlow and learn how it can be used for solving deep learning tasks. Go through the code and run each cell. Along the way, you'll encounter several ***TODO*** blocks -- follow the instructions to fill them out before running those cells and continuing.
#
#
# # Part 1: Intro to TensorFlow
#
# ## 0.1 Install TensorFlow
#
# TensorFlow is a software library extensively used in machine learning. Here we'll learn how computations are represented and how to define a simple neural network in TensorFlow. For all the labs in 6.S191 2022, we'll be using the latest version of TensorFlow, TensorFlow 2, which affords great flexibility and the ability to imperatively execute operations, just like in Python. You'll notice that TensorFlow 2 is quite similar to Python in its syntax and imperative execution. Let's install TensorFlow and a couple of dependencies.
#
# + id="LkaimNJfYZ2w" outputId="ad23af07-8721-47bb-f66c-d256b2c682c5"
import tensorflow as tf
# Download and import the MIT 6.S191 package
# !pip install mitdeeplearning
import mitdeeplearning as mdl
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="2QNMcdP4m3Vs"
# ## 1.1 Why is TensorFlow called TensorFlow?
#
# TensorFlow is called 'TensorFlow' because it handles the flow (node/mathematical operation) of Tensors, which are data structures that you can think of as multi-dimensional arrays. Tensors are represented as n-dimensional arrays of base dataypes such as a string or integer -- they provide a way to generalize vectors and matrices to higher dimensions.
#
# The ```shape``` of a Tensor defines its number of dimensions and the size of each dimension. The ```rank``` of a Tensor provides the number of dimensions (n-dimensions) -- you can also think of this as the Tensor's order or degree.
#
# Let's first look at 0-d Tensors, of which a scalar is an example:
# + id="tFxztZQInlAB" outputId="56cbaf8d-1391-4c8a-aec0-055fd1d0c609"
sport = tf.constant("Tennis", tf.string)
number = tf.constant(1.41421356237, tf.float64)
print("`sport` is a {}-d Tensor".format(tf.rank(sport).numpy()))
print("`number` is a {}-d Tensor".format(tf.rank(number).numpy()))
# + [markdown] id="-dljcPUcoJZ6"
# Vectors and lists can be used to create 1-d Tensors:
# + id="oaHXABe8oPcO" outputId="ce718788-29bd-49ea-e99b-4f1e3207deb1"
sports = tf.constant(["Tennis", "Basketball"], tf.string)
numbers = tf.constant([3.141592, 1.414213, 2.71821], tf.float64)
print("`sports` is a {}-d Tensor with shape: {}".format(tf.rank(sports).numpy(), tf.shape(sports)))
print("`numbers` is a {}-d Tensor with shape: {}".format(tf.rank(numbers).numpy(), tf.shape(numbers)))
# + [markdown] id="gvffwkvtodLP"
# Next we consider creating 2-d (i.e., matrices) and higher-rank Tensors. For examples, in future labs involving image processing and computer vision, we will use 4-d Tensors. Here the dimensions correspond to the number of example images in our batch, image height, image width, and the number of color channels.
# + id="tFeBBe1IouS3"
### Defining higher-order Tensors ###
'''TODO: Define a 2-d Tensor'''
matrix = tf.constant([[1,2,3], [7,8,9]])
assert isinstance(matrix, tf.Tensor), "matrix must be a tf Tensor object"
assert tf.rank(matrix).numpy() == 2
# + id="Zv1fTn_Ya_cz"
'''TODO: Define a 4-d Tensor.'''
# Use tf.zeros to initialize a 4-d Tensor of zeros with size 10 x 256 x 256 x 3.
# You can think of this as 10 images where each image is RGB 256 x 256.
images = tf.zeros([10,256,256,3])
assert isinstance(images, tf.Tensor), "matrix must be a tf Tensor object"
assert tf.rank(images).numpy() == 4, "matrix must be of rank 4"
assert tf.shape(images).numpy().tolist() == [10, 256, 256, 3], "matrix is incorrect shape"
# + [markdown] id="wkaCDOGapMyl"
# As you have seen, the ```shape``` of a Tensor provides the number of elements in each Tensor dimension. The ```shape``` is quite useful, and we'll use it often. You can also use slicing to access subtensors within a higher-rank Tensor:
# + id="FhaufyObuLEG" outputId="47aee9ef-1f50-4183-e876-dcf8050841e0"
row_vector = matrix[1]
column_vector = matrix[:,2]
scalar = matrix[1, 2]
print("`row_vector`: {}".format(row_vector.numpy()))
print("`column_vector`: {}".format(column_vector.numpy()))
print("`scalar`: {}".format(scalar.numpy()))
# + [markdown] id="iD3VO-LZYZ2z"
# ## 1.2 Computations on Tensors
#
# A convenient way to think about and visualize computations in TensorFlow is in terms of graphs. We can define this graph in terms of Tensors, which hold data, and the mathematical operations that act on these Tensors in some order. Let's look at a simple example, and define this computation using TensorFlow:
#
# ![alt text](https://raw.githubusercontent.com/aamini/introtodeeplearning/master/lab1/img/add-graph.png)
# + id="X_YJrZsxYZ2z" outputId="d029ec1b-ff6a-411d-cfef-c404c5ad873b"
# Create the nodes in the graph, and initialize values
a = tf.constant(15)
b = tf.constant(61)
# Add them!
c1 = tf.add(a,b)
c2 = a + b # TensorFlow overrides the "+" operation so that it is able to act on Tensors
print(c1)
print(c2)
# + [markdown] id="Mbfv_QOiYZ23"
# Notice how we've created a computation graph consisting of TensorFlow operations, and how the output is a Tensor with value 76 -- we've just created a computation graph consisting of operations, and it's executed them and given us back the result.
#
# Now let's consider a slightly more complicated example:
#
# ![alt text](https://raw.githubusercontent.com/aamini/introtodeeplearning/master/lab1/img/computation-graph.png)
#
# Here, we take two inputs, `a, b`, and compute an output `e`. Each node in the graph represents an operation that takes some input, does some computation, and passes its output to another node.
#
# Let's define a simple function in TensorFlow to construct this computation function:
# + id="PJnfzpWyYZ23"
### Defining Tensor computations ###
# Construct a simple computation function
def func(a,b):
'''TODO: Define the operation for c, d, e (use tf.add, tf.subtract, tf.multiply).'''
c = tf.add(a,b)
d = tf.subtract(b,1)
e = tf.multiply(c,d)
return e
# + [markdown] id="AwrRfDMS2-oy"
# Now, we can call this function to execute the computation graph given some inputs `a,b`:
# + id="pnwsf8w2uF7p" outputId="029cfd71-915b-4f55-cd99-7d1e452c84f3"
# Consider example values for a,b
a, b = 1.5, 2.5
# Execute the computation
e_out = func(a,b)
print(e_out)
# + [markdown] id="6HqgUIUhYZ29"
# Notice how our output is a Tensor with value defined by the output of the computation, and that the output has no shape as it is a single scalar value.
# + [markdown] id="1h4o9Bb0YZ29"
# ## 1.3 Neural networks in TensorFlow
# We can also define neural networks in TensorFlow. TensorFlow uses a high-level API called [Keras](https://www.tensorflow.org/guide/keras) that provides a powerful, intuitive framework for building and training deep learning models.
#
# Let's first consider the example of a simple perceptron defined by just one dense layer: $ y = \sigma(Wx + b)$, where $W$ represents a matrix of weights, $b$ is a bias, $x$ is the input, $\sigma$ is the sigmoid activation function, and $y$ is the output. We can also visualize this operation using a graph:
#
# ![alt text](https://raw.githubusercontent.com/aamini/introtodeeplearning/master/lab1/img/computation-graph-2.png)
#
# Tensors can flow through abstract types called [```Layers```](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) -- the building blocks of neural networks. ```Layers``` implement common neural networks operations, and are used to update weights, compute losses, and define inter-layer connectivity. We will first define a ```Layer``` to implement the simple perceptron defined above.
# + id="HutbJk-1kHPh" outputId="cd5c70ed-c296-4da6-ab73-64d8ebc7234b"
### Defining a network Layer ###
# n_output_nodes: number of output nodes
# input_shape: shape of the input
# x: input to the layer
class OurDenseLayer(tf.keras.layers.Layer):
def __init__(self, n_output_nodes):
super(OurDenseLayer, self).__init__()
self.n_output_nodes = n_output_nodes
def build(self, input_shape):
d = int(input_shape[-1])
# Define and initialize parameters: a weight matrix W and bias b
# Note that parameter initialization is random!
self.W = self.add_weight("weight", shape=[d, self.n_output_nodes]) # note the dimensionality
self.b = self.add_weight("bias", shape=[1, self.n_output_nodes]) # note the dimensionality
def call(self, x):
'''TODO: define the operation for z (hint: use tf.matmul)'''
z = tf.matmul(x, self.W) + self.b
'''TODO: define the operation for out (hint: use tf.sigmoid)'''
y = tf.sigmoid(z)
return y
# Since layer parameters are initialized randomly, we will set a random seed for reproducibility
tf.random.set_seed(1)
layer = OurDenseLayer(3)
layer.build((1,2))
x_input = tf.constant([[1,2.]], shape=(1,2))
y = layer.call(x_input)
# test the output!
print(y.numpy())
mdl.lab1.test_custom_dense_layer_output(y)
# + [markdown] id="Jt1FgM7qYZ3D"
# Conveniently, TensorFlow has defined a number of ```Layers``` that are commonly used in neural networks, for example a [```Dense```](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense?version=stable). Now, instead of using a single ```Layer``` to define our simple neural network, we'll use the [`Sequential`](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Sequential) model from Keras and a single [`Dense` ](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/layers/Dense) layer to define our network. With the `Sequential` API, you can readily create neural networks by stacking together layers like building blocks.
# + id="7WXTpmoL6TDz"
### Defining a neural network using the Sequential API ###
# Import relevant packages
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
# Define the number of outputs
n_output_nodes = 3
# First define the model
model = Sequential()
'''TODO: Define a dense (fully connected) layer to compute z'''
# Remember: dense layers are defined by the parameters W and b!
# You can read more about the initialization of W and b in the TF documentation :)
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense?version=stable
dense_layer = # TODO
# Add the dense layer to the model
model.add(dense_layer)
# + [markdown] id="HDGcwYfUyR-U"
# That's it! We've defined our model using the Sequential API. Now, we can test it out using an example input:
# + id="sg23OczByRDb"
# Test model with example input
x_input = tf.constant([[1,2.]], shape=(1,2))
'''TODO: feed input into the model and predict the output!'''
model_output = # TODO
print(model_output)
# + [markdown] id="596NvsOOtr9F"
# In addition to defining models using the `Sequential` API, we can also define neural networks by directly subclassing the [`Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model?version=stable) class, which groups layers together to enable model training and inference. The `Model` class captures what we refer to as a "model" or as a "network". Using Subclassing, we can create a class for our model, and then define the forward pass through the network using the `call` function. Subclassing affords the flexibility to define custom layers, custom training loops, custom activation functions, and custom models. Let's define the same neural network as above now using Subclassing rather than the `Sequential` model.
# + id="K4aCflPVyViD"
### Defining a model using subclassing ###
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
class SubclassModel(tf.keras.Model):
# In __init__, we define the Model's layers
def __init__(self, n_output_nodes):
super(SubclassModel, self).__init__()
'''TODO: Our model consists of a single Dense layer. Define this layer.'''
self.dense_layer = '''TODO: Dense Layer'''
# In the call function, we define the Model's forward pass.
def call(self, inputs):
return self.dense_layer(inputs)
# + [markdown] id="U0-lwHDk4irB"
# Just like the model we built using the `Sequential` API, let's test out our `SubclassModel` using an example input.
#
#
# + id="LhB34RA-4gXb"
n_output_nodes = 3
model = SubclassModel(n_output_nodes)
x_input = tf.constant([[1,2.]], shape=(1,2))
print(model.call(x_input))
# + [markdown] id="HTIFMJLAzsyE"
# Importantly, Subclassing affords us a lot of flexibility to define custom models. For example, we can use boolean arguments in the `call` function to specify different network behaviors, for example different behaviors during training and inference. Let's suppose under some instances we want our network to simply output the input, without any perturbation. We define a boolean argument `isidentity` to control this behavior:
# + id="P7jzGX5D1xT5"
### Defining a model using subclassing and specifying custom behavior ###
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense
class IdentityModel(tf.keras.Model):
# As before, in __init__ we define the Model's layers
# Since our desired behavior involves the forward pass, this part is unchanged
def __init__(self, n_output_nodes):
super(IdentityModel, self).__init__()
self.dense_layer = tf.keras.layers.Dense(n_output_nodes, activation='sigmoid')
'''TODO: Implement the behavior where the network outputs the input, unchanged,
under control of the isidentity argument.'''
def call(self, inputs, isidentity=False):
x = self.dense_layer(inputs)
'''TODO: Implement identity behavior'''
if isidentity:
return inputs
return x
# + [markdown] id="Ku4rcCGx5T3y"
# Let's test this behavior:
# + id="NzC0mgbk5dp2" outputId="c1f430d7-6a60-4e04-9d18-ca4e0fad63de"
n_output_nodes = 3
model = IdentityModel(n_output_nodes)
x_input = tf.constant([[1,2.]], shape=(1,2))
'''TODO: pass the input into the model and call with and without the input identity option.'''
out_activate = model.call(x_input)
out_identity = model.call(x_input, isidentity = True)
print("Network output with activation: {}; network identity output: {}".format(out_activate.numpy(), out_identity.numpy()))
# + [markdown] id="7V1dEqdk6VI5"
# Now that we have learned how to define `Layers` as well as neural networks in TensorFlow using both the `Sequential` and Subclassing APIs, we're ready to turn our attention to how to actually implement network training with backpropagation.
# + [markdown] id="dQwDhKn8kbO2"
# ## 1.4 Automatic differentiation in TensorFlow
#
# [Automatic differentiation](https://en.wikipedia.org/wiki/Automatic_differentiation)
# is one of the most important parts of TensorFlow and is the backbone of training with
# [backpropagation](https://en.wikipedia.org/wiki/Backpropagation). We will use the TensorFlow GradientTape [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape?version=stable) to trace operations for computing gradients later.
#
# When a forward pass is made through the network, all forward-pass operations get recorded to a "tape"; then, to compute the gradient, the tape is played backwards. By default, the tape is discarded after it is played backwards; this means that a particular `tf.GradientTape` can only
# compute one gradient, and subsequent calls throw a runtime error. However, we can compute multiple gradients over the same computation by creating a ```persistent``` gradient tape.
#
# First, we will look at how we can compute gradients using GradientTape and access them for computation. We define the simple function $ y = x^2$ and compute the gradient:
# + id="tdkqk8pw5yJM"
### Gradient computation with GradientTape ###
# y = x^2
# Example: x = 3.0
x = tf.Variable(3.0)
# Initiate the gradient tape
with tf.GradientTape() as tape:
# Define the function
y = x * x
# Access the gradient -- derivative of y with respect to x
dy_dx = tape.gradient(y, x)
assert dy_dx.numpy() == 6.0
# + [markdown] id="JhU5metS5xF3"
# In training neural networks, we use differentiation and stochastic gradient descent (SGD) to optimize a loss function. Now that we have a sense of how `GradientTape` can be used to compute and access derivatives, we will look at an example where we use automatic differentiation and SGD to find the minimum of $L=(x-x_f)^2$. Here $x_f$ is a variable for a desired value we are trying to optimize for; $L$ represents a loss that we are trying to minimize. While we can clearly solve this problem analytically ($x_{min}=x_f$), considering how we can compute this using `GradientTape` sets us up nicely for future labs where we use gradient descent to optimize entire neural network losses.
# + attributes={"classes": ["py"], "id": ""} id="7g1yWiSXqEf-" outputId="b277e685-51c4-4b37-<PASSWORD>-c<PASSWORD>"
### Function minimization with automatic differentiation and SGD ###
# Initialize a random value for our initial x
x = tf.Variable([tf.random.normal([1])])
print("Initializing x={}".format(x.numpy()))
learning_rate = 1e-2 # learning rate for SGD
history = []
# Define the target value
x_f = 4
# We will run SGD for a number of iterations. At each iteration, we compute the loss,
# compute the derivative of the loss with respect to x, and perform the SGD update.
for i in range(500):
with tf.GradientTape() as tape:
'''TODO: define the loss as described above'''
loss = (x - x_f)**2
# loss minimization using gradient tape
grad = tape.gradient(loss, x) # compute the derivative of the loss with respect to x
new_x = x - learning_rate*grad # sgd update
x.assign(new_x) # update the value of x
history.append(x.numpy()[0])
# Plot the evolution of x as we optimize towards x_f!
plt.plot(history)
plt.plot([0, 500],[x_f,x_f])
plt.legend(('Predicted', 'True'))
plt.xlabel('Iteration')
plt.ylabel('x value')
# + [markdown] id="pC7czCwk3ceH"
# `GradientTape` provides an extremely flexible framework for automatic differentiation. In order to back propagate errors through a neural network, we track forward passes on the Tape, use this information to determine the gradients, and then use these gradients for optimization using SGD.
# + id="Gfj2AobCmYQj"
| Part1_TensorFlow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
from sklearn import preprocessing
import matplotlib.cm as cm
# %matplotlib inline
df = pd.read_csv('ALS_train.csv')
df
als = df['ALSFRS_slope'].values
df2 = df.drop(['ID','SubjectID','ALSFRS_slope','ALSFRS_Total_max','ALSFRS_Total_median','ALSFRS_Total_min','ALSFRS_Total_range'],axis='columns')
als
cov = np.zeros(df.shape[1])
for i,col in enumerate(df2.columns):
cov[i] = np.cov(als,df2[col].values)[0][1]
ind = np.argpartition(abs(cov), -10)[-10:]
df2.columns[ind]
features = df2[df2.columns[ind]].values
features[:,0].shape
scaler = preprocessing.StandardScaler().fit(features)
feat_scaled =(features)
e = [KMeans(n_clusters=k, random_state=42,init='random').fit(feat_scaled).inertia_ for k in range(1,10)]
kmeans_per_ke = [KMeans(n_clusters=k, random_state=42, init='random').fit(feat_scaled) for k in range(1,10)]
kmeans_per_ke[1].cluster_centers_
s_scors = [silhouette_score(feat_scaled, model.labels_) for model in kmeans_per_ke[1:]]
plt.plot(range(2,10),s_scors)
k = np.argmax(s_scors)+2
plt.scatter(k,s_scors[k-2],c='r',s=400)
# +
range_n_clusters = [2, 3, 4, 5]
for n_clusters in range_n_clusters:
fig, ax1 = plt.subplots()
fig.set_size_inches(18, 7)
ax1.set_xlim([-0.1, 1])
ax1.set_ylim([0, len(features) + (n_clusters + 1) * 10])
clusterer = KMeans(n_clusters=n_clusters, random_state=10,init='random')
cluster_labels = clusterer.fit_predict(feat_scaled)
silhouette_avg = silhouette_score(feat_scaled, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
sample_silhouette_values = silhouette_samples(feat_scaled, cluster_labels)
y_lower = 10
for i in range(n_clusters):
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([])
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# -
lab = kmeans_per_ke[1].predict(feat_scaled)
Z = lab[lab == 0].shape[0]
O = lab[lab == 1].shape[0]
plt.bar(['First Cluster','Second Cluster'],[Z,O],width=0.2)
plt.ylabel('# of Samples')
| 6-c.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building first level models using _nipype_ and _SPM12_
#
# ## Base functionality for _megameta_ project
#
# -------
# #### History
#
# * 3/28/19 mbod - update pipeline to include resampling to template & SPM path reference
# * 3/23/19 mbod - include contrast definition in the config JSON file
# * 3/9/19 mbod - updates from testing template with `darpa1`
# * 2/27/19 mbod - modify example notebook to make base functionality notebook
#
# -----
#
# ### Description
#
# * Set up a nipype workflow to use SPM12 to make first level models for _megameta_ task data (preprocessed using `batch8` SPM8 scripts) in BIDS derivative format
#
# -------------------
#
# ### Template variables
#
# * Specify the following values:
# 1. project name - should be name of folder under `/data00/project/megameta`, e.g. `project1`
# 2. filename for JSON model specification (should be inside `model_specification` folder), e.g. `p1_image_pmod_likeme.json`
# 3. TR value in seconds
#
#
#
# -------------------
#
# ### Setup
#
# * import required modules and define parameters
# +
import os # system functions
# NIYPE FUNCTIONS
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.spm as spm # spm
import nipype.interfaces.matlab as mlab # how to run matlab
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model specification
from nipype.interfaces.base import Bunch
from nipype.algorithms.misc import Gunzip
from itertools import combinations
from nilearn import plotting, image
from nistats import thresholding
from IPython.display import Image
import scipy.io as sio
import numpy as np
import json
import pandas as pd
# -
# #### Matlab path
#
# Set the way matlab should be called
mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")
# If SPM is not in your MATLAB path you should add it here
mlab.MatlabCommand.set_default_paths(PATH_TO_SPM_FOLDER)
# ### Parameters
#
# * These need to be reformatted to be consistent
# * as data is not smoothed commented out the `fwhm_size` param - but data probably has a value
# #### Load JSON model config
JSON_MODEL_FILE = os.path.join('/data00/projects/megameta/scripts/jupyter_megameta/first_level_models',
PROJECT_NAME, 'model_specifications',
MODEL_SPEC_FILE)
with open(JSON_MODEL_FILE) as fh:
model_def = json.load(fh)
TASK_NAME = model_def['TaskName']
RUNS = model_def['Runs']
MODEL_NAME = model_def['ModelName']
PROJECT_NAME = model_def['ProjectID']
# +
PROJECT_DIR = os.path.join('/data00/projects/megameta', PROJECT_NAME)
SUBJ_DIR = os.path.join(PROJECT_DIR, 'derivatives', 'batch8')
task_func_template = "{PID}_task-{TASK}_run-0{RUN}_space-MNI152-T1-1mm_desc-preproc_bold.nii.gz"
subject_list = [subj for subj in os.listdir(SUBJ_DIR)
if os.path.exists(os.path.join(SUBJ_DIR,subj,'func',
task_func_template.format(PID=subj, TASK=TASK_NAME, RUN=1)))]
output_dir = os.path.join(PROJECT_DIR,'derivatives', 'nipype','model_{}_{}'.format(TASK_NAME.upper(), MODEL_NAME)) # name of 1st-level output folder
working_dir = os.path.join(PROJECT_DIR, 'working',
'nipype', 'workingdir_model_{}_{}'.format(TASK_NAME.upper(), MODEL_NAME)) # name of 1st-level working directory
# +
# check to see if output and work directories exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(working_dir):
os.makedirs(working_dir)
# +
try:
subject_list = [ s for s in subject_list if s not in exclude_subjects ]
print('\n\nApplied subject inclusion list:\n\t',' '.join(exclude_subjects))
except:
print('\n\nNo subject exclusions applied')
try:
subject_list = [ s for s in subject_list if s in include_subjects ]
print('\n\nApplied subject inclusion list:\n\t',' '.join(include_subjects))
except:
print('\n\nNo subject inclusions applied')
print('\n\nSUBJECT LIST IS:\n\t', ' '.join(subject_list))
# -
# ### Utility functions for subject info and contrasts
# ### Setup design matrix data for subject
#
# * need a function to set up the nipype `Bunch` format used
# * https://nipype.readthedocs.io/en/latest/users/model_specification.html
# * read the onsets/dur/conditions from task logs and extract needed data
def get_subject_info(subject_id, model_path, DEBUG=False):
'''
1. load model specification from JSON spec file
2. get confound file for subject for task to add to design matrix
3. get task spec CSV for subject for task
4. setup subject info structure
'''
import os
import pandas as pd
import json
from nipype.interfaces.base import Bunch
def make_pmod(df, conditions, pmods={}, normalize='mean'):
pmod = []
for cond in conditions:
if not pmods.get(cond):
pmod.append(None)
else:
df2 = df[df.trial_type==cond]
pmod_name = pmods.get(cond)
#pmod = [pmod] if not type(pmods) is list else pmod
# MAKE SURE THERE IS VARIANCE IN PMOD VECTOR
if df2[pmod_name].var()==0:
df2[pmod_name]+=0.001
# APPLY NORMALIZATION
if normalize=='mean':
df2[pmod_name] = df2[pmod_name] - df2[pmod_name].mean()
pmod.append(Bunch(name=[pmod_name],
param=[df2[pmod_name].values.tolist()
],
poly=[1]
))
return pmod
def map_spec_to_model(spec_df,model):
"""
Maps spec trial names to model contrast trials.
Args:
spec: the events.tsv spec file
model: the model.json file
Returns:
pandas dataframe object
"""
spec=spec_df.copy()
for con in model['Conditions']:
spec_trials = model['Conditions'][con]
spec.loc[spec.trial_type.isin(spec_trials),'trial_type'] = con
spec.onset.sort_values()
return spec
with open(model_path) as fh:
model_def = json.load(fh)
pmod = None if not model_def.get('Modulators') else []
TASK_NAME = model_def['TaskName']
TASK_RUNS = model_def['Runs']
MODEL_NAME = model_def['ModelName']
PROJECT_ID = model_def['ProjectID']
condition_names = list(model_def['Conditions'].keys())
PROJECT_DIR = os.path.join('/data00/projects/megameta', PROJECT_ID)
SUBJ_DIR = os.path.join(PROJECT_DIR,'derivatives', 'batch8')
realign_files = []
subject_info = []
# check to see which runs exist for subject
# by looking for appropriate events.tsv files
# this could (should?) also include looking for the nifti file?
runs_for_subj = [run for run in TASK_RUNS
if
os.path.exists(os.path.join(SUBJ_DIR, subject_id, 'func',
'{}_task-{}_run-0{}_events.tsv'.format(subject_id,
TASK_NAME,
run)))
]
if DEBUG:
print("runs_for_subj", runs_for_subj)
print("checked paths:")
for run in TASK_RUNS:
print('\t', os.path.join(SUBJ_DIR, subject_id, 'func',
'{}_task-{}_run-0{}_events.tsv'.format(subject_id,
TASK_NAME,
run)))
print("TASK NAME", TASK_NAME)
print("pmod", pmod)
print("TASK_RUNS", TASK_RUNS)
print("subject_id", subject_id)
for run_num, _ in enumerate(runs_for_subj,1):
events_df = pd.read_csv(os.path.join(SUBJ_DIR, subject_id, 'func',
'{}_task-{}_run-0{}_events.tsv'.format(subject_id,
TASK_NAME,
run_num)),
sep='\t')
onsets_df = map_spec_to_model(events_df, model_def)
realign_file = os.path.join(PROJECT_DIR, 'working','nipype',
'workingdir_model_{}_{}'.format(TASK_NAME.upper(),MODEL_NAME),
'{}-run-0{}-realign.txt'.format(subject_id, run_num))
confound_file=os.path.join(SUBJ_DIR, subject_id, 'func',
'{}_task-{}_run-0{}_desc-confounds-regressors.tsv'.format(subject_id,
TASK_NAME,
run_num)
)
confound_df = pd.read_csv(confound_file, sep='\t')
cols_to_use = [ 'TransX','TransY', 'TransZ', 'RotX', 'RotY', 'RotZ']
confound_df[cols_to_use].to_csv(realign_file,
header=False,
index=False,
sep='\t')
realign_files.append(realign_file)
onsets = []
dur = []
for cond in model_def['Conditions']:
onsets.append(onsets_df[onsets_df.trial_type==cond].onset.values)
dur.append(onsets_df[onsets_df.trial_type==cond].duration.values)
#pmod = make_pmod(rdf, condition_names)
if model_def.get('Modulators'):
pmod = make_pmod(onsets_df, condition_names,
pmods=model_def['Modulators'])
subject_info.append(Bunch(conditions=condition_names,
onsets=onsets,
durations=dur,
amplitudes=None,
tmod=None,
pmod=pmod,
regressor_names=None,
regressors=None))
DM_regressors = []
for cond in condition_names:
DM_regressors.append(cond)
if pmod and model_def['Modulators'].get(cond):
DM_regressors.append('{}x{}^1'.format(cond, model_def['Modulators'].get(cond)))
return subject_info, realign_files, DM_regressors
# ### Set up contrasts
#
# * This part of the template needs work to provide a cleaner way to specify contrasts
# * Could use the same vector contrasts approach as we have in batch8 and then have a function to convert this into the list of list data structure that nipype spm contrasts node looks for
def make_contrast_list(subject_id, condition_names, model_path, DEBUG=False):
import json
condition_names.append('constant')
cont = []
for idx, cname in enumerate(condition_names):
ccode = [0 if pos!=idx else 1 for pos in range(len(condition_names))]
cont.append([cname, 'T', condition_names, ccode])
# add custom contrasts from the JSON model file
with open(model_path) as fh:
model_def = json.load(fh)
contrasts = model_def.get('Contrasts')
if not contrasts:
return cont
for contrast in contrasts:
cname = contrast['name']
pos_idx = [condition_names.index(p) for p in contrast['pos']]
neg_idx = [condition_names.index(n) for n in contrast['neg']]
pos_length = len(contrast['pos'])
neg_length = len(contrast['neg'])
ccode = []
for idx, _ in enumerate(condition_names):
if idx in pos_idx:
ccode.append(1/pos_length)
elif idx in neg_idx:
ccode.append(-1/pos_length)
else:
ccode.append(0)
cont.append([cname, 'T', condition_names, ccode])
if DEBUG:
print(contrast)
print(ccode)
return cont
# ## Set up processing nodes for modeling workflow
# #### Specify model node
# SpecifyModel - Generates SPM-specific Model
modelspec = pe.Node(model.SpecifySPMModel(concatenate_runs=False,
input_units='secs',
output_units='secs',
time_repetition=TR,
high_pass_filter_cutoff=128),
output_units = 'scans',
name="modelspec")
# #### Level 1 Design node
#
# ** TODO -- get the right matching template file for fmriprep **
#
# * ??do we need a different mask than:
#
# `'/data00/tools/spm8/apriori/brainmask_th25.nii'`
# Level1Design - Generates an SPM design matrix
level1design = pe.Node(spm.Level1Design(bases={'hrf': {'derivs': [0, 0]}},
timing_units='secs',
interscan_interval=TR,
model_serial_correlations='none', #'AR(1)',
mask_image = '/data00/tools/spm8/apriori/brainmask_th25.nii',
global_intensity_normalization='none'
),
name="level1design")
# #### Estimate Model node
# EstimateModel - estimate the parameters of the model
level1estimate = pe.Node(spm.EstimateModel(estimation_method={'Classical': 1}),
name="level1estimate")
# #### Estimate Contrasts node
# EstimateContrast - estimates contrasts
conestimate = pe.Node(spm.EstimateContrast(), name="conestimate")
# ## Setup pipeline workflow for level 1 model
# +
# Initiation of the 1st-level analysis workflow
l1analysis = pe.Workflow(name='l1analysis')
# Connect up the 1st-level analysis components
l1analysis.connect([(modelspec, level1design, [('session_info',
'session_info')]),
(level1design, level1estimate, [('spm_mat_file',
'spm_mat_file')]),
(level1estimate, conestimate, [('spm_mat_file',
'spm_mat_file'),
('beta_images',
'beta_images'),
('residual_image',
'residual_image')])
])
# -
# ## Set up nodes for file handling and subject selection
# ### `getsubjectinfo` node
#
# * Use `get_subject_info()` function to generate spec data structure for first level model design matrix
# Get Subject Info - get subject specific condition information
getsubjectinfo = pe.Node(util.Function(input_names=['subject_id', 'model_path'],
output_names=['subject_info', 'realign_params', 'condition_names'],
function=get_subject_info),
name='getsubjectinfo')
makecontrasts = pe.Node(util.Function(input_names=['subject_id', 'condition_names', 'model_path'],
output_names=['contrasts'],
function=make_contrast_list),
name='makecontrasts')
# ### `infosource` node
#
# * iterate over list of subject ids and generate subject ids and produce list of contrasts for subsequent nodes
# +
# Infosource - a function free node to iterate over the list of subject names
infosource = pe.Node(util.IdentityInterface(fields=['subject_id', 'model_path']
),
name="infosource")
infosource.iterables = [('subject_id', subject_list),
('model_path', [JSON_MODEL_FILE]*len(subject_list))
]
# -
# ### `selectfiles` node
#
# * match template to find source files (functional) for use in subsequent parts of pipeline
# +
# SelectFiles - to grab the data (alternativ to DataGrabber)
## TODO: here need to figure out how to incorporate the run number and task name in call
templates = {'func': '{subject_id}/func/{subject_id}_task-'+TASK_NAME+'_run-0*_space-MNI152-T1-1mm_desc-preproc_bold.nii.gz'}
selectfiles = pe.Node(nio.SelectFiles(templates,
base_directory='/data00/projects/megameta/{}/derivatives/batch8'.format(PROJECT_NAME)),
working_dir=working_dir,
name="selectfiles")
# -
# ## Unzip and smoothing steps
#
# * BIDS derivatives folders contain unsmoothed functional NIFTI files in zipped (.nii.gz) format
# * This subflow adds three nodes:
# 1. gunzip
# 2. resample
# 3. smooth
# #### Specify unzip node
#
# * transform `.nii.gz` to `.nii`
gunzip = pe.MapNode(Gunzip(),name="gunzip", iterfield=['in_file'])
# #### Specify smoothing node
smooth = pe.Node(interface=spm.Smooth(), name="smooth")
#fwhmlist = [4,6,8]
fwhmlist = [8]
smooth.iterables = ('fwhm', fwhmlist)
# #### Specify resampling node
resample = pe.MapNode(interface=spm.utils.Reslice(),
name='resample',
iterfield=['in_file'])
resample.inputs.space_defining = '/data00/projects/megameta/templates/reference_medium_wad.nii'
# +
unzip_resample_and_smooth = pe.Workflow(name='unzip_resample_and_smooth')
unzip_resample_and_smooth.base_dir = os.path.join(SUBJ_DIR, working_dir)
unzip_resample_and_smooth.connect(
[
(gunzip, resample, [('out_file', 'in_file')]),
(resample, smooth, [('out_file', 'in_files')])
]
)
# -
# ### Specify datasink node
#
# * copy files to keep from various working folders to output folder for model for subject
# +
# Datasink - creates output folder for important outputs
datasink = pe.Node(nio.DataSink(base_directory=SUBJ_DIR,
parameterization=True,
#container=output_dir
),
name="datasink")
datasink.inputs.base_directory = output_dir
# Use the following DataSink output substitutions
substitutions = []
subjFolders = [('_model_path.*subject_id_%s/_fwhm_%s' % (sub,f), 'fwhm_%s' % (f))
for f in fwhmlist
for sub in subject_list]
substitutions.extend(subjFolders)
datasink.inputs.regexp_substitutions = substitutions
# -
# ---------
# ## Set up workflow for whole process
# +
pipeline = pe.Workflow(name='first_level_model_{}_{}'.format(TASK_NAME.upper(),MODEL_NAME))
pipeline.base_dir = os.path.join(SUBJ_DIR, working_dir)
pipeline.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),
(infosource, getsubjectinfo, [('subject_id', 'subject_id'),
('model_path', 'model_path')
]),
(infosource, makecontrasts, [('subject_id', 'subject_id'),
('model_path', 'model_path')
]),
(getsubjectinfo, makecontrasts, [('condition_names', 'condition_names')]),
(getsubjectinfo, l1analysis, [('subject_info',
'modelspec.subject_info'),
('realign_params',
'modelspec.realignment_parameters')]),
(makecontrasts, l1analysis, [('contrasts',
'conestimate.contrasts')]),
(selectfiles, unzip_resample_and_smooth, [('func','gunzip.in_file')]),
(unzip_resample_and_smooth, l1analysis, [('smooth.smoothed_files',
'modelspec.functional_runs')]),
(infosource, datasink, [('subject_id','container')]),
(l1analysis, datasink, [('conestimate.spm_mat_file','@spm'),
('level1estimate.beta_images','@betas'),
('level1estimate.mask_image','@mask'),
('conestimate.spmT_images','@spmT'),
('conestimate.con_images','@con'),
('conestimate.spmF_images','@spmF')
])
]
)
# -
| archive/first_level.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, WeightedRandomSampler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# +
##############
## Column Names
##############
dtypes = {
"duration": np.int8,
"protocol_type": np.object,
"service": np.object,
"flag": np.object,
"src_bytes": np.int8,
"dst_bytes": np.int8,
"land": np.int8,
"wrong_fragment": np.int8,
"urgent": np.int8,
"hot": np.int8,
"m_failed_logins": np.int8,
"logged_in": np.int8,
"num_compromised": np.int8,
"root_shell": np.int8,
"su_attempted": np.int8,
"num_root": np.int8,
"num_file_creations": np.int8,
"num_shells": np.int8,
"num_access_files": np.int8,
"num_outbound_cmds": np.int8,
"is_host_login": np.int8,
"is_guest_login": np.int8,
"count": np.int8,
"srv_count": np.int8,
"serror_rate": np.float16,
"srv_serror_rate": np.float16,
"rerror_rate": np.float16,
"srv_rerror_rate": np.float16,
"same_srv_rate": np.float16,
"diff_srv_rate": np.float16,
"srv_diff_host_rate": np.float16,
"dst_host_count": np.int8,
"dst_host_srv_count": np.int8,
"dst_host_same_srv_rate": np.float16,
"dst_host_diff_srv_rate": np.float16,
"dst_host_same_src_port_rate": np.float16,
"dst_host_srv_diff_host_rate": np.float16,
"dst_host_serror_rate": np.float16,
"dst_host_srv_serror_rate": np.float16,
"dst_host_rerror_rate": np.float16,
"dst_host_srv_rerror_rate": np.float16,
"label": np.object
}
columns = ["duration","protocol_type","service","flag","src_bytes","dst_bytes","land","wrong_fragment","urgent","hot","m_failed_logins",
"logged_in", "num_compromised","root_shell","su_attempted","num_root","num_file_creations","num_shells","num_access_files",
"num_outbound_cmds","is_host_login","is_guest_login","count","srv_count","serror_rate","srv_serror_rate","rerror_rate","srv_rerror_rate",
"same_srv_rate","diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count","dst_host_same_srv_rate","dst_host_diff_srv_rate",
"dst_host_same_src_port_rate", "dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate","dst_host_rerror_rate",
"dst_host_srv_rerror_rate","label"]
# +
##############
## Data Loading + Prepping Stage
##############
#df = pd.read_csv('/home/mheichler/venv/Datasets/UNSW_NB15_training-set.csv')
df = pd.read_csv('C:/Users/Michael/anac_enviro/Datasets/kddcup.data_10_percent', sep=",", names=columns, dtype=dtypes, index_col=None)
df.label = df.label.apply(lambda x: 'Normal' if x == 'normal.' else 'Attack')
class2idx = {
'Normal':0,
'Attack':1,
}
idx2class = {v: k for k, v in class2idx.items()}
df['label'].replace(class2idx, inplace=True)
#df = df.drop('label', axis = 1)
category_col = ["duration","protocol_type","service","flag","land","wrong_fragment","urgent","hot","m_failed_logins",
"logged_in", "num_compromised","root_shell","su_attempted","num_root","num_file_creations","num_shells","num_access_files",
"num_outbound_cmds","is_host_login","is_guest_login","count","srv_count",
"dst_host_count","dst_host_srv_count"]
numerical_col = ["src_bytes","dst_bytes","rerror_rate","srv_rerror_rate", "same_srv_rate","diff_srv_rate","srv_diff_host_rate", "dst_host_same_srv_rate","dst_host_diff_srv_rate",
"dst_host_same_src_port_rate", "dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate","dst_host_rerror_rate",
"dst_host_srv_rerror_rate"]
num_feature = df.drop(category_col,axis = 1)
X = num_feature.iloc[:, 0:-1]
y = num_feature.iloc[:, -1]
# -
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
df.head()
# +
##############
## Unsupervised: Only training with normal data
##############
X_train = num_feature[num_feature.label==0].sample(10000)
y_train = X_train.iloc[:, -1]
X_train = X_train.drop('label', axis = 1)
##############
## Split into train+val and test
##############
X_val, X_test, y_val, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42)
# +
##############
## Min/Max Scaling
##############
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_val = scaler.transform(X_val)
X_test = scaler.transform(X_test)
X_train, y_train = np.array(X_train), np.array(y_train)
X_val, y_val = np.array(X_val), np.array(y_val)
X_test, y_test = np.array(X_test), np.array(y_test)
# +
def get_class_distribution(obj):
count_dict = {
'Normal':0,
'Attack':1,
}
for i in obj:
if i == 0:
count_dict['Normal'] += 1
elif i == 1:
count_dict['Attack'] += 1
else:
print("Check classes.")
return count_dict
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(25,7))# Train
sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_train)]).melt(), x = "variable", y="value", hue="variable", ax=axes[0]).set_title('Class Distribution in Train Set')# Validation
sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_val)]).melt(), x = "variable", y="value", hue="variable", ax=axes[1]).set_title('Class Distribution in Val Set')# Test
sns.barplot(data = pd.DataFrame.from_dict([get_class_distribution(y_test)]).melt(), x = "variable", y="value", hue="variable", ax=axes[2]).set_title('Class Distribution in Test Set')
# -
X_train = np.pad(X_train, ((0, 0), (0, 64 - len(X_train[0]))), 'constant').reshape(-1, 1, 8, 8)
X_val = np.pad(X_val, ((0, 0), (0, 64 - len(X_val[0]))), 'constant').reshape(-1, 1, 8, 8)
X_test = np.pad(X_test, ((0, 0), (0, 64 - len(X_test[0]))), 'constant').reshape(-1, 1, 8, 8)
# +
class ClassifierDataset(Dataset):
def __init__(self, X_data, y_data):
self.X_data = X_data
self.y_data = y_data
def __getitem__(self, index):
return self.X_data[index], self.y_data[index], index
def __len__ (self):
return len(self.X_data)
train_dataset = ClassifierDataset(torch.from_numpy(X_train).float(), torch.from_numpy(y_train).long())
val_dataset = ClassifierDataset(torch.from_numpy(X_val).float(), torch.from_numpy(y_val).long())
test_dataset = ClassifierDataset(torch.from_numpy(X_test).float(), torch.from_numpy(y_test).long())
# +
EPOCHS = 50
BATCH_SIZE = 32
LEARNING_RATE = 1e-3
NUM_FEATURES = len(X.columns)
NUM_CLASSES = 1
nu = 0.1
test_auc = None
test_scores = None
# +
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE
)
val_loader = DataLoader(dataset=val_dataset, batch_size=BATCH_SIZE)
test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE)
# -
class CNN(nn.Module):
def __init__(self, num_feature, num_class):
super(CNN, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 6, 3, stride=1, padding=1),
nn.BatchNorm2d(6),
nn.ReLU(True),
nn.Conv2d(6, 16, 3, stride=1, padding=0),
nn.BatchNorm2d(16),
nn.ReLU(True),
nn.MaxPool2d(2, 2)
)
self.fc = nn.Sequential(
nn.Linear(144, 512),
nn.Linear(512, 256),
nn.Linear(256, num_class)
)
def forward(self, x):
out = self.conv(x)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def load(self, model_path):
self.load_state_dict(torch.load(model_path))
self.eval()
def predict(self, dataset):
torch.sum((y_test_pred - c)**2, dim=1)
outputs = self(dataset)
_, predicted = torch.max(outputs, 1)
return predicted
# +
model = CNN(num_feature = NUM_FEATURES, num_class=NUM_CLASSES)
model.to(device)
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE)
print(model)
# -
def init_center_c(dataloader,net,eps=0.1):
n_samples = 0
c = torch.zeros(NUM_CLASSES, device='cuda')
model.eval()
with torch.no_grad():
for data, _, _ in train_loader:
inputs = data.to(device)
outputs = model(inputs)
n_samples += outputs.shape[0]
c += torch.sum(outputs, dim=0)
c /= n_samples
print(c)
c[(abs(c) < eps) & (c < 0)] = -eps
c[(abs(c) < eps) & (c > 0)] = eps
return c
# +
def get_radius(dist, nu):
return np.quantile(np.sqrt(dist.clone().data.cpu()), 1 - nu)
R = 0
R = torch.tensor(R, device='cuda')
# -
c = init_center_c(train_loader, model)
accuracy_stats = {
'train': [],
"val": []
}
loss_stats = {
'train': [],
"val": []
}
# +
print("Begin training.")
for e in tqdm(range(1, EPOCHS+1)):
# TRAINING
n_batches = 0
n_batches_val = 0
train_epoch_loss = 0
train_epoch_acc = 0
model.train()
for data in train_loader:
inputs, _, _ = data
inputs = inputs.to(device)
optimizer.zero_grad()
y_train_pred = model(inputs)
dist = torch.sum((y_train_pred - c) ** 2, dim=1)
train_loss = torch.mean(dist)
train_loss.backward()
optimizer.step()
train_epoch_loss += train_loss.item()
n_batches += 1
# VALIDATION
with torch.no_grad():
val_epoch_loss = 0
val_epoch_acc = 0
model.eval()
for data in val_loader:
inputs, label, _ = data
inputs = inputs.to(device)
y_val_pred = model(inputs)
dist_val = torch.sum((y_val_pred - c) ** 2, dim=1)
val_loss = torch.mean(dist_val)
val_epoch_loss += val_loss.item()
n_batches += 1
loss_stats['train'].append(train_epoch_loss/len(train_loader))
loss_stats['val'].append(val_epoch_loss/len(val_loader))
print(f'Epoch {e+0:03}: | Train Loss: {train_epoch_loss/n_batches:.5f} | Val Loss: {val_epoch_loss/len(val_loader):.5f}')
# -
test_auc = None
idx_label_score = []
with torch.no_grad():
model.eval()
for data in test_loader:
inputs, labels, idx = data
inputs = inputs.to(device)
y_test_pred = model(inputs)
dist = torch.sum((y_test_pred - c)**2, dim=1)
scores_fin = dist
idx_label_score += list(zip(idx.cpu().data.numpy().tolist(),
labels.cpu().data.numpy().tolist(),
scores_fin.cpu().data.numpy().tolist()))
test_scores = idx_label_score
_, labels, scores = zip(*idx_label_score)
labels = np.array(labels)
scores = np.array(scores)
(unique, counts) = np.unique(labels, return_counts=True)
freq_test = np.asarray((unique, counts)).T
print(freq_test)
from sklearn.metrics import roc_auc_score
test_auc = roc_auc_score(labels, scores)
print('Test set AUC: {:.2f}%'.format(100. * test_auc))
# +
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(2):
fpr[i], tpr[i], _ = roc_curve(labels, scores)
roc_auc[i] = auc(fpr[i], tpr[i])
print (roc_auc_score(labels, scores))
plt.figure()
plt.plot(fpr[1], tpr[1])
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.show()
# -
plt.figure()
plt.plot(fpr[1], tpr[1])
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.show()
torch.save(model.state_dict(), "unsw_nb15")
model.load("unsw_nb15")
# +
#df_test = pd.read_csv('/home/mheichler/venv/Datasets/UNSW_NB15_testing-set.csv')
#df_test = pd.read_csv('C:/Users/Michael/anac_enviro/Datasets/111554_267091_compressed_UNSW_NB15_testing-set.csv')
df_test = pd.read_csv('C:/Users/Michael/anac_enviro/Datasets/UNSW_NB15_testing-set.csv')
df_test = df_test.drop('attack_cat', axis = 1)
pred_label = df_test.label
pred_label = pred_label.to_numpy()
df_test = df_test.drop('label', axis = 1)
num_feature_test = df_test.drop(category_col,axis = 1)
numeric_dataset = scaler.fit_transform(num_feature_test)
numeric_dataset = np.array(numeric_dataset)
numeric_dataset = np.pad(numeric_dataset, ((0, 0), (0, 64 - len(numeric_dataset[0]))), 'constant').reshape(-1, 1, 8, 8)
numeeric_tensor = ClassifierDataset(torch.from_numpy(numeric_dataset).float(), torch.from_numpy(pred_label).long())
#numeric_tensor = torch.from_numpy(numeric_dataset).float().to(device)
pred_loader = DataLoader(dataset=numeric_tensor, batch_size=BATCH_SIZE)
model.eval()
for data in pred_loader:
inputs = data
inputs = inputs.to(device)
y_test_pred = model(inputs)
dist = torch.sum((y_test_pred - c)**2, dim=1)
score_pred = dist
pred_label = score_pred.cpu().data.numpy().tolist()
# -
pred_label
(unique, counts) = np.unique(pred_scores, return_counts=True)
freq_pred = np.asarray((unique, counts)).T
print(freq_pred)
pred_scores
pre_numpy = predicted.detach().numpy()
(unique, counts) = np.unique(pre_numpy, return_counts=True)
frequencies = np.asarray((unique, counts)).T
print(frequencies)
#df_test2 = pd.read_csv('/home/mheichler/venv/Datasets/UNSW_NB15_testing-set.csv')
df_test2 = pd.read_csv('C:/Users/Michael/anac_enviro/Datasets/111554_267091_compressed_UNSW_NB15_testing-set.csv.zip')
df_test2['attack_cat'].replace(class2idx, inplace=True)
result = pd.DataFrame(data={'Id': df_test2['id'], 'attack_cat': df_test2['attack_cat'], 'predict': predicted})
result.to_csv(path_or_buf='submittion.csv', index = False, header = True)
sns.catplot(y="attack_cat", hue="predict", kind="count",
palette="pastel", edgecolor=".6",
data=result);
result.predict.unique()
result.attack_cat.unique()
result.Id.count()
| Kddcup99_Dataset-OneClass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:miniconda3-cesm2-marbl]
# language: python
# name: conda-env-miniconda3-cesm2-marbl-py
# ---
# +
# %load_ext autoreload
# %autoreload 2
import json
import os
import pprint
import random
import shutil
from functools import reduce, partial
from operator import mul
import yaml
import xarray as xr
import yaml
from distributed import Client
from distributed.utils import format_bytes
from tqdm.auto import tqdm
import pandas as pd
from collections import Counter
import dask
import intake
from ncar_jobqueue import NCARCluster
from helpers import (create_grid_dataset, enforce_chunking, get_grid_vars,
print_ds_info, process_variables, save_data, zarr_store, fix_time, inspect_written_stores)
#dask.config.set({"distributed.dashboard.link": "/proxy/{port}/status"})
xr.set_options(keep_attrs=True)
import numpy as np
# -
cluster = NCARCluster(memory="10GB")
cluster.scale(20)
client = Client(cluster)
client
cluster
col = intake.open_esm_datastore(
"/glade/work/mgrover/intake-esm-catalogs/new-cesm2-le.json",
)
col
cluster
dirout = "/glade/scratch/abanihi/lens2-aws"
def _preprocess(ds, variables):
"""Drop all unnecessary variables and coordinates"""
vars_to_drop = [vname for vname in ds.data_vars if vname not in variables]
coord_vars = [
vname
for vname in ds.data_vars
if "time" not in ds[vname].dims or "bound" in vname or "bnds" in vname
]
ds_fixed = ds.set_coords(coord_vars)
data_vars_dims = []
for data_var in ds_fixed.data_vars:
data_vars_dims.extend(list(ds_fixed[data_var].dims))
coords_to_drop = [
coord for coord in ds_fixed.coords if coord not in data_vars_dims
]
grid_vars = list(
set(vars_to_drop + coords_to_drop)
- set(["time", "time_bound", "time_bnds", "time_bounds"])
)
ds_fixed = ds_fixed.drop(grid_vars).reset_coords()
if "history" in ds_fixed.attrs:
del ds_fixed.attrs["history"]
return ds_fixed
variables=['T']
# +
with open("test_config.yaml") as f:
config = yaml.safe_load(f)
print(config)
# -
run_config = []
variables = []
for component, stream_val in config.items():
for stream, v in stream_val.items():
frequency = v["frequency"]
freq = v["freq"]
time_bounds_dim = v["time_bounds_dim"]
variable_categories = list(v["variable_category"].keys())
for v_cat in variable_categories:
experiments = list(
v["variable_category"][v_cat]["experiment"].keys()
)
for exp in experiments:
chunks = v["variable_category"][v_cat]["experiment"][exp][
"chunks"
]
variable = v["variable_category"][v_cat]["variable"]
variables.extend(variable)
col_subset, query = process_variables(
col, variable, component, stream, exp
)
if not col_subset.df.empty:
d = {
"query": query,
"col": col_subset,
"chunks": chunks,
"frequency": frequency,
"freq": freq,
"time_bounds_dim": time_bounds_dim,
}
run_config.append(d)
run_config
def determine_chunk_size(ds):
ntime = len(ds.time) # the number of time slices
chunksize_optimal = 100e6 # desired chunk size in bytes
ncfile_size = ds.nbytes # the netcdf file size
chunksize = max(int(ntime* chunksize_optimal/ ncfile_size),1)
target_chunks = ds.dims.mapping
target_chunks['time'] = chunksize
return target_chunks # a dictionary giving the chunk sizes in each dimension
field_separator = '.'
for run in tqdm(run_config, desc="runs"):
print("*" * 120)
query = run["query"]
print(f"query = {query}")
frequency = run["frequency"]
chunks = run["chunks"]
cftime_freq = run["freq"]
time_bounds_dim = run["time_bounds_dim"]
#if query["experiment"] == "20C" and query["stream"] == "cice.h1":
# if query["component"] == "ice_sh":
# preprocess = _preprocess_ice_sh
# elif query["component"] == "ice_nh":
# preprocess = _preprocess_ice_nh
#elif query["component"] == "lnd":
# preprocess = _preprocess_lnd
#elif query["component"] == "atm":
# preprocess = _preprocess_atm
#print(preprocess.__name__)
with dask.config.set(**{'array.slicing.split_large_chunks': False}):
dsets = run["col"].to_dataset_dict(
cdf_kwargs={"chunks": chunks, "decode_times": True, "use_cftime": True},
progressbar=True,
)
dsets = enforce_chunking(dsets, chunks, field_separator)
for key, ds in tqdm(dsets.items(), desc="Saving zarr store"):
ds = ds.sortby('time')
ds = _preprocess(ds, query['variable'])
chunks = determine_chunk_size(ds)
print(ds.get_index("time").is_monotonic_increasing)
key = key.split(field_separator)
component = query['component']
experiment = query['experiment']
stream = query['stream']
forcing_variant = key[-2]
variable = key[-1]
if frequency != "hourly6":
if experiment == 'historical':
start_time = "1850-01"
end_time = "2015-01"
ds = fix_time(
ds,
start=start_time,
end=end_time,
freq=cftime_freq,
time_bounds_dim=time_bounds_dim,
)
store = zarr_store(experiment,
component,
frequency,
forcing_variant,
variable,
write=False,
dirout=dirout
)
save_data(ds, store)
elif experiment == 'ssp370':
start_time = "2015-01"
end_time = "2101-01"
ds = fix_time(
ds,
start=start_time,
end=end_time,
freq=cftime_freq,
time_bounds_dim=time_bounds_dim,
)
store = zarr_store(experiment,
component,
frequency,
forcing_variant,
variable,
write=False,
dirout=dirout
)
save_data(ds, store)
| zarrification/zarrification_lens2_process.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Explorando dados com Pandas
#
# <NAME>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Agenda
#
# * O que é *pandas*?
# * Estruturas de dados
# * Funcionalidades
# * Integrações
# + [markdown] slideshow={"slide_type": "slide"}
# # [github.com/felipemfp/python-day-natal-2017](https://github.com/felipemfp/python-day-natal-2017)
# + [markdown] slideshow={"slide_type": "slide"}
# # O que é *pandas*?
# + [markdown] slideshow={"slide_type": "subslide"}
# > pandas is an **open source**, **BSD-licensed** library providing **high-performance**, **easy-to-use data structures** and **data analysis tools** for the Python programming language
# + [markdown] slideshow={"slide_type": "subslide"}
# ## O que é *pandas*?
#
# * Biblioteca *open-source*
# * **PAN**el **D**ata **S**ystem
# * +11k 🌟 no GitHub
# + [markdown] slideshow={"slide_type": "subslide"}
# ## *numpy*
#
# * *ndarray* e matriz
# * Acesso por índice
# * Tipo único de dados
# + slideshow={"slide_type": "-"}
import numpy as np
arr = np.array([1, 3, 5, 7], dtype=np.int64)
arr
# + [markdown] slideshow={"slide_type": "-"}
# | índice | 0 | 1 | 2 | 3 |
# | - | - | - | - | - |
# | **elemento** | 1 | 3 | 5 | 7 |
# + [markdown] slideshow={"slide_type": "subslide"}
# ## *pandas*
#
# * Construído em cima do *numpy*
# * Acesso por rótulo (índice ou coluna)
# * Tipos varíados de dados
# + [markdown] slideshow={"slide_type": "-"}
# ![Exemplo de DataFrame](images/dataframe-example.png)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Por quê?
#
# * Aplicável para o mundo real
# * Estruturas de dados intuitivas
# * Baterias inclusas para preparação, análise e exploração de dados
# + [markdown] slideshow={"slide_type": "subslide"}
# ## *pandas* no ecossistema
#
# ![](images/pandas-context-01.jpg)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## *pandas* no ecossistema
#
# ![](images/pandas-context-02.jpg)
# + [markdown] slideshow={"slide_type": "slide"}
# # Estruturas de Dados
# -
import pandas as pd
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Estruturas de Dados
#
# ![](images/data-structures.png)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Series
#
# * Lista rotulada de 1 dimensão
# * Qualquer tipo de dado
# * inteiros
# * strings
# * decimais
# * objetos
#
# ```python
# s = pd.Series(data, index=index)
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando Series
#
# ### A partir de um `np.ndarray`
# + slideshow={"slide_type": "-"}
s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
s
# -
s.index
pd.Series(np.random.randn(5))
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando Series
#
# ### A partir de um `dict`
# -
d = {'a' : 0., 'b' : 1., 'c' : 2.}
pd.Series(d)
pd.Series(d, index=['b', 'c', 'd', 'a'])
# > `NaN` significa "Not a Number" e é um marcador padrão para valores que estão faltando.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando Series
#
# ### A partir de um `valor`
# + slideshow={"slide_type": "-"}
pd.Series(5., index=['a', 'b', 'c', 'd', 'e'])
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Series são como `np.ndarray`
# -
s[0]
s[:3]
s[[4, 3, 1]]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Series são como `dict`
# -
s['a']
s['e'] = 12.
s
'e' in s
'f' in s
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Operações com Series
# -
s + s
s * 3
s[1:] + s[:-1]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## DataFrame
#
# * Composição de `Series`
# * Colunas com diferentes tipos
#
# ```python
# df = pd.DataFrame(data, index=index)
# ```
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando DataFrames
#
# ### A partir de um `dict`
# -
d = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
df
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando DataFrames
#
# ### A partir de um `dict`
# -
pd.DataFrame(d, index=['d', 'b', 'a'])
pd.DataFrame(d, index=['d', 'b', 'a'], columns=['two', 'three'])
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando DataFrames
#
# ### A partir de uma lista de `dict`
# -
data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]
pd.DataFrame(data2)
pd.DataFrame(data2, index=['first', 'second'])
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando DataFrames
#
# ### A partir de uma `Series`
#
# * Mantém os índices
# * Uma coluna com nome da Series ou argumento passado
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Operações com DataFrame
#
# ### Projeção
# -
df['one']
# ### Adição
df['three'] = df['one'] * df['two']
df['flag'] = df['one'] > 2
df
df['foo'] = 'bar'
df
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Operações com DataFrame
#
# ### Exclusão
# -
del df['two']
three = df.pop('three')
df
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Panel
#
# * Composição de `DataFrames`
# * Descontinuado
# + [markdown] slideshow={"slide_type": "slide"}
# # Funcionalidades
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando objetos
# -
s = pd.Series([1,3,5,np.nan,6,8])
s
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando objetos
# -
dates = pd.date_range('20130101', periods=6)
dates
# + slideshow={"slide_type": "-"}
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))
df
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Criando objetos
# -
df2 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' })
df2
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Entendendo os dados
# -
df.head()
df.tail(3)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Entendendo os dados
# -
df.index
df.columns
df.values
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Entendendo os dados
# -
df.describe()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Entendendo os dados
# -
df.info()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Manipulando os dados
#
# ### Obtendo a transposta
# -
df.T
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Manipulando os dados
#
# ### Ordenando índices
# -
df.sort_index(axis=1, ascending=False)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Manipulando os dados
#
# ### Ordenando valores
# -
df.sort_values(by='B')
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
# -
df['A']
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
# -
df[0:3]
df['20130102':'20130104']
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
#
# ### Por rótulo
# -
df.loc[dates[0]]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
#
# ### Por rótulo
# -
df.loc[:,['A','B']]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
#
# ### Por rótulo
# -
df.loc['20130102':'20130104',['A','B']]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
#
# ### Por rótulo
# -
df.loc['20130102',['A','B']]
df.loc[dates[0],'A']
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
#
# ### Por posição
# -
df.iloc[3]
df.iloc[3:5,0:2]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
#
# ### Por posição
# -
df.iloc[[1,2,4],[0,2]]
df.iloc[1:3,:]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
#
# ### Por posição
# -
df.iloc[:,1:3]
df.iloc[1,1]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
#
# ### Por condição
# -
df[df.A > 0]
df[df > 0]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Projetando os dados
#
# ### `isin()`
# -
df2 = df.copy()
df2['E'] = ['one', 'one','two','three','four','three']
df2
df2[df2['E'].isin(['two','four'])]
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Modificando os dados
#
# ### Adicionando colunas
# -
s1 = pd.Series([1,2,3,4,5,6], index=pd.date_range('20130102', periods=6))
s1
df['F'] = s1
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Modificando os dados
#
# ### Alterando valores
# -
df
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Modificando os dados
#
# ### Alterando valores
# -
df.at[dates[0],'A'] = 0
df.iat[0,1] = 0
df.loc[:,'D'] = np.array([5] * len(df))
df
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Modificando os dados
#
# ### Alterando valores com condições
# -
df2 = df.copy()
df2
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Modificando os dados
#
# ### Alterando valores com condições
# -
df2[df2 > 0] = -df2
df2
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Tratando os dados
# -
df1 = df.reindex(index=dates[0:4], columns=list(df.columns) + ['E'])
df1.loc[dates[0]:dates[1],'E'] = 1
df1
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Tratando os dados
# -
df1
pd.isnull(df1)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Tratando os dados
# -
df1.dropna(how='any')
df1.fillna(value=5)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Tratando os dados
#
# ### `apply()`
# -
df.apply(np.cumsum)
df.apply(lambda x: x.max() - x.min())
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Tratando os dados
#
# ### `apply()`
# -
df.apply(lambda x: [max(1, y) for y in x])
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Operando com os dados
#
# ### Estatísticas
# -
df.mean()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Operando com os dados
#
# ### Estatísticas
# -
df.mean(1)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Operando com os dados
#
# ### Estatísticas
# -
s = pd.Series(np.random.randint(0, 7, size=10))
s
s.value_counts()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Operando com os dados
#
# ### Strings
# -
s = pd.Series(['A', 'B', 'C', 'Aaba', 'Baca', np.nan, 'CABA', 'dog', 'cat'])
s
s.str.lower()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Mesclando dados
#
# ### Merge
# -
df = pd.DataFrame(np.random.randn(10, 4))
df
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Mesclando dados
#
# ### Merge
# -
pieces = [df[:3], df[3:7], df[7:]]
pd.concat(pieces)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Mesclando dados
#
# ### Join
# -
left = pd.DataFrame({'key': ['foo', 'foo'], 'lval': [1, 2]})
right = pd.DataFrame({'key': ['foo', 'foo'], 'rval': [4, 5]})
left
right
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Mesclando dados
#
# ### Join
# -
pd.merge(left, right, on='key')
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Mesclando dados
#
# ### Append
# -
df = pd.DataFrame(np.random.randn(8, 4), columns=['A','B','C','D'])
df
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Mesclando dados
#
# ### Append
# -
s = df.iloc[3]
s
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Mesclando dados
#
# ### Append
# -
df.append(s, ignore_index=True)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Agrupando dados
# -
df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B' : ['one', 'one', 'two', 'three',
'two', 'two', 'one', 'three'],
'C' : np.random.randn(8),
'D' : np.random.randn(8)})
df
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Agrupando dados
# -
df.groupby('A').sum()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Agrupando dados
# -
df.groupby(['A','B']).sum()
# + [markdown] slideshow={"slide_type": "slide"}
# # Integrações
# + slideshow={"slide_type": "skip"}
# %matplotlib inline
# + slideshow={"slide_type": "-"}
import matplotlib.pyplot as plt
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Matplotlib
# -
ts = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000))
ts = ts.cumsum()
ts.plot()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Matplotlib
# -
df = pd.DataFrame(np.random.randn(1000, 4), index=ts.index,
columns=['A', 'B', 'C', 'D'])
df = df.cumsum()
plt.figure(); df.plot(); plt.legend(loc='best')
# + [markdown] slideshow={"slide_type": "subslide"}
# ## CSV
# -
df.to_csv('data/foo.csv', index_label='date')
df = pd.read_csv('data/foo.csv', index_col='date')
df.head()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## HDF5
# -
df.to_hdf('data/foo.h5','df')
df = pd.read_hdf('data/foo.h5','df')
df.head()
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Excel
# -
df.to_excel('data/foo.xlsx', index_label='date', sheet_name='Sheet1')
df = pd.read_excel('data/foo.xlsx', 'Sheet1', index_col='date', na_values=['NA'])
df.head()
# + [markdown] slideshow={"slide_type": "slide"}
# # Obrigado! Dúvidas?
#
# <NAME>
# @felipemfp
# <EMAIL>
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Links
#
# - [Intro to Data Structures](http://pandas.pydata.org/pandas-docs/stable/dsintro.html)
# - [10 Minutes to pandas](http://pandas.pydata.org/pandas-docs/stable/10min.html)
# - [PyConJP 2015: pandas internals by Sinhrks](https://speakerdeck.com/sinhrks/pyconjp-2015-pandas-internals)
# - [Pandas for Data Analysis by phanhoang17](https://speakerdeck.com/huyhoang17/pandas-for-data-analysis)
| explorando-dados-com-pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import nltk
nltk.download(['punkt', 'wordnet','averaged_perceptron_tagger'])
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import classification_report, f1_score, roc_auc_score
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, AdaBoostClassifier
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.base import BaseEstimator,TransformerMixin
import pickle
# -
# load data from database
engine = create_engine('sqlite:///../data/DisasterResponse.db')
# engine.table_names()
df = pd.read_sql("SELECT * FROM messages", engine)
X = df['message']
y = df.iloc[:,3:].values
def tokenize(text):
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
# Build a custom transformer which will extract the starting verb of a sentence
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
Starting Verb Extractor class
This class extract the starting verb of a sentence,
creating a new feature for the ML classifier
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
# Given it is a tranformer we can return the self
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
pipeline2 = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('count_vectorizer', CountVectorizer(tokenizer=tokenize)),
('tfidf_transformer', TfidfTransformer())
])),
('starting_verb_transformer', StartingVerbExtractor())
])),
('classifier', MultiOutputClassifier(RandomForestClassifier(n_estimators=200,min_samples_split=3)))
])
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.2)
pipeline2.fit(X_train, y_train)
y_pred = pipeline2.predict(X_test)
print(classification_report(y_test,y_pred,target_names=df.columns.values[4:]))
| notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Example-2-GP-BS-Derivatives
# Author: <NAME>
# Version: 1.0 (28.4.2020)
# License: MIT
# Email: <EMAIL>
# Notes: tested on Mac OS X running Python 3.6.9 with the following packages:
# scikit-learn=0.22.1, numpy=1.18.1, matplotlib=3.1.3
# Citation: Please cite the following reference if this notebook is used for research purposes:
# <NAME>. and <NAME>, Machine Learning in Finance: From Theory to Practice, Springer Graduate textbook Series, 2020.
# -
# # Calculating the Greeks
# # Overview
# The purpose of this notebook is to demonstrate the derivation of the greeks in a Gaussian Process Regression model (GP), fitted to option price data.
#
# In this notebook, European option prices are generated from the Black-Scholes model. The notebook begins by building a GP call model, where the input is the underlying price. The delta is then derived and compared with the Black-Scholes (BS)
# delta. The exercise is repeated, but using the volatility as the input instead of the underlying price. The vega of the GP is then derived and compared with the BS vega.
# +
from BlackScholes import bsformula
import numpy as np
import scipy as sp
from sklearn import gaussian_process
from sklearn.gaussian_process.kernels import RBF
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## Black-Scholes Model
# First, set the model parameters
KC = 130 # Call strike
KP = 70 # Put strike
r = 0.002 # risk-free rate
sigma = 0.4 # implied volatility
T = 2.0 # Time to maturity
S0 = 100 # Underlying spot
lb = 0 # lower bound on domain
ub = 300 # upper bound on domain
training_number = 100 # Number of training samples
testing_number = 50 # Number of testing samples
sigma_n = 1e-8 # additive noise in GP
# Define the call and put prices using the BS model
call = lambda x, y: bsformula(1, lb+(ub-lb)*x, KC, r, T, y, 0)[0]
put = lambda x, y: bsformula(-1, lb+(ub-lb)*x, KP, r, T, y, 0)[0]
# # Delta
# Generate the training and testing data, where the input is the gridded underlying and the output are the option prices.
# +
x_train = np.array(np.linspace(0.01, 1.2, training_number), dtype='float32').reshape(training_number, 1)
x_test = np.array(np.linspace(0.01, 1.0, testing_number), dtype='float32').reshape(testing_number, 1)
y_train = []
for idx in range(len(x_train)):
y_train.append(call(x_train[idx], sigma))
y_train = np.array(y_train)
# -
# Fit the GP model to the generated data
sk_kernel = RBF(length_scale=1.0, length_scale_bounds=(0.01, 10000.0))
gp = gaussian_process.GaussianProcessRegressor(kernel=sk_kernel, n_restarts_optimizer=20)
gp.fit(x_train, y_train)
# Get the model's predicted outputs for each of the test inputs
y_pred, sigma_hat = gp.predict(x_test, return_std=True)
# Derive the GP delta
# +
l = gp.kernel_.length_scale
rbf = gaussian_process.kernels.RBF(length_scale=l)
Kernel = rbf(x_train, x_train)
K_y = Kernel + np.eye(training_number) * sigma_n
L = sp.linalg.cho_factor(K_y)
alpha_p = sp.linalg.cho_solve(np.transpose(L), y_train)
k_s = rbf(x_test, x_train)
k_s_prime = (x_train.T - x_test) * k_s / l**2
f_prime = np.dot(k_s_prime, alpha_p) / (ub - lb)
# -
# Calculate the BS delta
delta = lambda x, y: bsformula(1, lb+(ub-lb)*x, KC, r, T, y, 0)[1]
delta(x_test, sigma) - f_prime
# Compare the GP delta with the BS delta
plt.figure(figsize = (10,6),facecolor='white', edgecolor='black')
plt.plot(lb+(ub-lb)*x_test, delta(x_test,sigma), color = 'black', label = 'Exact')
plt.plot(lb+(ub-lb)*x_test, f_prime, color = 'red', label = 'GP')
plt.grid(True)
plt.xlabel('S')
plt.ylabel('$\Delta$')
plt.legend(loc = 'best', prop={'size':10});
# Show the error between the GP delta and the BS delta
plt.figure(figsize = (10,6),facecolor='white', edgecolor='black')
plt.plot(lb+(ub-lb)*x_test, delta(x_test,sigma) - f_prime, color = 'black', label = 'GP Error')
plt.grid(True)
plt.xlabel('S')
plt.ylabel('Error in $\Delta$')
plt.legend(loc = 'best', prop={'size':10});
# ## Vega
# Generate the training and testing data, where the input is the gridded underlying and the output are the option prices. The inputs are again scaled to the unit domain.
# +
x_train = np.array(np.linspace(0.01, 1.2, training_number), dtype='float32').reshape(training_number, 1)
x_test = np.array(np.linspace(0.01, 1.0, testing_number), dtype='float32').reshape(testing_number, 1)
y_train = []
for idx in range(len(x_train)):
y_train.append(call((S0-lb)/(ub-lb), x_train[idx]))
y_train = np.array(y_train)
# -
# Fit the GP model to the generated data
sk_kernel = RBF(length_scale=1.0, length_scale_bounds=(0.01, 10000.0))
gp = gaussian_process.GaussianProcessRegressor(kernel=sk_kernel, n_restarts_optimizer=20)
gp.fit(x_train, y_train)
# Get the model's predicted outputs for each of the test inputs
y_pred, sigma_hat = gp.predict(x_test, return_std=True)
# Derive the GP delta
# +
l = gp.kernel_.length_scale
rbf = gaussian_process.kernels.RBF(length_scale=l)
Kernel= rbf(x_train, x_train)
K_y = Kernel + np.eye(training_number) * sigma_n
L = sp.linalg.cho_factor(K_y)
alpha_p = sp.linalg.cho_solve(np.transpose(L), y_train)
k_s = rbf(x_test, x_train)
k_s_prime = np.zeros([len(x_test), len(x_train)])
for i in range(len(x_test)):
for j in range(len(x_train)):
k_s_prime[i, j] = (1.0/l**2) * (x_train[j] - x_test[i]) * k_s[i, j]
f_prime = np.dot(k_s_prime, alpha_p)
# -
# Calculate the BS delta
vega = lambda x, y: bsformula(1, lb + (ub-lb) * x, KC, r, T, y, 0)[2]
vega((S0-lb)/(ub-lb), x_test) - f_prime
# Compare the GP vega with the BS vega
#
plt.figure(figsize = (10,6), facecolor='white', edgecolor='black')
plt.plot(x_test, vega((S0-lb)/(ub-lb), x_test), color = 'black', label = 'Exact')
plt.plot(x_test, f_prime, color = 'red', label = 'GP')
plt.grid(True)
plt.xlabel('$\\sigma$')
plt.ylabel('$\\nu$')
plt.legend(loc = 'best', prop={'size':10});
# Plot the error between the GP vega and the BS vega
#
plt.figure(figsize = (10,6), facecolor='white', edgecolor='black')
plt.plot(x_test, vega((S0-lb)/(ub-lb), x_test)-f_prime, color = 'black', label = 'GP Error')
plt.grid(True)
plt.xlabel('$\\sigma$')
plt.ylabel('Error in $\\nu$')
plt.legend(loc = 'best', prop={'size':10});
# # Idea: Calculate Gamma and then explain what a Gamma Squeeze is
#
# https://www.fool.com/investing/2021/01/28/what-is-a-gamma-squeeze/
| Chapter3-GPs/.ipynb_checkpoints/Example-2-GP-BS-Derivatives-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Umer7W0VbITT"
# ## Setup Colab
# + colab_type="code" id="LO_MNEQ7Bhbw" colab={}
# %tensorflow_version 1.x
# !pip install tensorflow-compression
![[ -e tfc ]] || git clone https://github.com/tensorflow/compression tfc
# %cd tfc/examples
import tfci # Check if tfci.py is available.
# + [markdown] colab_type="text" id="op4hPwy_mkPm"
# ## Enabling GPU
#
# GPU should be enabled for this colab. If the next cell prints a warning, do the following:
# - Navigate to Edit→Notebook Settings
# - select GPU from the Hardware Accelerator drop-down
#
# + colab_type="code" id="x-yLUG_tmo3M" colab={}
import tensorflow as tf
if not tf.test.is_gpu_available():
print('WARNING: No GPU found. Might be slow!')
else:
print('Found GPU.')
# + [markdown] colab_type="text" id="rQ9-8ZsTf7Hj"
# ## Imports and Definitions
# + colab_type="code" id="vtd1l70Pf95V" colab={}
import os
import zipfile
from google.colab import files
import collections
from PIL import Image
from IPython.display import Image as DisplayImage
from IPython.display import Javascript
from IPython.core.display import display, HTML
import tfci
import urllib.request
tf.get_logger().setLevel('WARN') # Only show Warnings
FILES_DIR = '/content/files'
OUT_DIR = '/content/out'
DEFAULT_IMAGE_URL = ('https://storage.googleapis.com/hific/clic2020/'
'images/originals/ad249bba099568403dc6b97bc37f8d74.png')
MODEL = 'hific-lo'
TMP_OUT = 'out.tfci'
os.makedirs(FILES_DIR, exist_ok=True)
os.makedirs(OUT_DIR, exist_ok=True)
File = collections.namedtuple('File', ['full_path', 'num_bytes', 'bpp'])
def print_html(html):
display(HTML(html + '<br/>'))
def make_cell_large():
display(Javascript(
'''google.colab.output.setIframeHeight(0, true, {maxHeight: 5000})'''))
def get_default_image(output_dir):
output_path = os.path.join(output_dir, os.path.basename(DEFAULT_IMAGE_URL))
print('Downloading', DEFAULT_IMAGE_URL, '\n->', output_path)
urllib.request.urlretrieve(DEFAULT_IMAGE_URL, output_path)
print('Caching model...')
tfci.import_metagraph(MODEL)
print('Done')
# + [markdown] colab_type="text" id="4Ngs9WvmbTMH"
# ## Load files
# + colab_type="code" id="NgtIlL2ADCI2" colab={}
#@title Loading Images { vertical-output: false, run: "auto", display-mode: "form" }
#@markdown Tick the following if you want to upload your own images to compress.
#@markdown Otherwise, a default image will be used.
#@markdown
#@markdown **Note**: We support JPG and PNG (without alpha channels).
#@markdown
upload_custom_images = False #@param {type:"boolean"}
if upload_custom_images:
uploaded = files.upload()
for name, content in uploaded.items():
with open(os.path.join(FILES_DIR, name), 'wb') as fout:
print('Writing', name, '...')
fout.write(content)
# + id="e0C4vMqZsnqA" colab_type="code" colab={}
all_files = os.listdir(FILES_DIR)
if not upload_custom_images or not all_files:
print('Downloading default...')
get_default_image(FILES_DIR)
print()
all_files = os.listdir(FILES_DIR)
print(f'Got following files ({len(all_files)}):')
for file_name in all_files:
img = Image.open(os.path.join(FILES_DIR, file_name))
w, h = img.size
img = img.resize((w // 15, h // 15))
print('- ' + file_name + ':')
display(img)
# + [markdown] id="guX3Q_AsTE7-" colab_type="text"
# # Compress images
# + colab_type="code" id="kd02HOhLBj6e" colab={}
SUPPORTED_EXT = {'.png', '.jpg'}
all_files = os.listdir(FILES_DIR)
if not all_files:
raise ValueError("Please upload images!")
def get_bpp(image_dimensions, num_bytes):
w, h = image_dimensions
return num_bytes * 8 / (w * h)
def has_alpha(img_p):
im = Image.open(img_p)
return im.mode == 'RGBA'
all_outputs = []
for file_name in all_files:
if os.path.isdir(file_name):
continue
if not any(file_name.endswith(ext) for ext in SUPPORTED_EXT):
print('Skipping', file_name, '...')
continue
full_path = os.path.join(FILES_DIR, file_name)
if has_alpha(full_path):
print('Skipping because of alpha channel:', file_name)
continue
file_name, _ = os.path.splitext(file_name)
output_path = os.path.join(OUT_DIR, f'{file_name}.png')
if os.path.isfile(output_path):
print('Skipping', output_path, '-- exists already.')
continue
print('Compressing', file_name, '...')
tfci.compress(MODEL, full_path, TMP_OUT)
num_bytes = os.path.getsize(TMP_OUT)
print('Decompressing...')
tfci.decompress(TMP_OUT, output_path)
all_outputs.append(
File(output_path, num_bytes,
get_bpp(Image.open(full_path).size, num_bytes)))
print('All done!')
# + [markdown] id="HQhQQs-CTkgy" colab_type="text"
# # Show output
# + colab_type="code" id="3nVCPeDnskD8" colab={}
make_cell_large() # Larger output window.
for file in all_outputs:
print_html('<hr/>')
print(f'Showing {file.full_path} | {file.num_bytes//1000}kB | {file.bpp:.4f}bpp')
display(Image.open(file.full_path))
print_html('<hr/>')
# + [markdown] id="4b-wkBnyrTAR" colab_type="text"
# ### Download all compressed images.
#
# To download all images, run the following cell.
#
# You can also use the _Files_ tab on the left to manually select images.
#
# ---
#
# #### **Note**: the images are saved as PNGs and thus very large. The bitrate used by HiFiC is given in the name.
# + id="9BKccvcTpj1k" colab_type="code" colab={}
ZIP = '/content/images.zip'
with zipfile.ZipFile(ZIP, 'w') as zf:
for f in all_outputs:
path_with_bpp = f.full_path.replace('.png', f'-{f.bpp:.3f}bpp.png')
zf.write(f.full_path, os.path.basename(path_with_bpp))
files.download(ZIP)
| models/hific/colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pprint
import sys
if "../" not in sys.path:
sys.path.append("../")
from lib.envs.gridworld import GridworldEnv
pp = pprint.PrettyPrinter(indent=2)
env = GridworldEnv()
def value_iteration(env, theta=0.0001, discount_factor=1.0):
"""
Value Iteration Algorithm.
Args:
env: OpenAI env. env.P represents the transition probabilities of the environment.
env.P[s][a] is a list of transition tuples (prob, next_state, reward, done).
env.nS is a number of states in the environment.
env.nA is a number of actions in the environment.
theta: We stop evaluation once our value function change is less than theta for all states.
discount_factor: Gamma discount factor.
Returns:
A tuple (policy, V) of the optimal policy and the optimal value function.
"""
def one_step_lookahead(state, V):
"""
Helper function to calculate the value for all action in a given state.
Args:
state: The state to consider (int)
V: The value to use as an estimator, Vector of length env.nS
Returns:
A vector of length env.nA containing the expected value of each action.
"""
A = np.zeros(env.nA)
for a in range(env.nA):
for prob, next_state, reward, done in env.P[state][a]:
A[a] += prob * (reward + discount_factor * V[next_state])
return A
V = np.zeros(env.nS)
while True:
# Stopping condition
delta = 0
# Update each state...
for s in range(env.nS):
# Do a one-step lookahead to find the best action
A = one_step_lookahead(s, V)
best_action_value = np.max(A)
# Calculate delta across all states seen so far
delta = max(delta, np.abs(best_action_value - V[s]))
# Update the value function. Ref: Sutton book eq. 4.10.
V[s] = best_action_value
# Check if we can stop
if delta < theta:
break
# Create a deterministic policy using the optimal value function
policy = np.zeros([env.nS, env.nA])
for s in range(env.nS):
# One step lookahead to find the best action for this state
A = one_step_lookahead(s, V)
best_action = np.argmax(A)
# Always take the best action
policy[s, best_action] = 1.0
return policy, V
# +
policy, v = value_iteration(env)
print("Policy Probability Distribution:")
print(policy)
print("")
print("Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):")
print(np.reshape(np.argmax(policy, axis=1), env.shape))
print("")
print("Value Function:")
print(v)
print("")
print("Reshaped Grid Value Function:")
print(v.reshape(env.shape))
print("")
# -
# Test the value function
expected_v = np.array([ 0, -1, -2, -3, -1, -2, -3, -2, -2, -3, -2, -1, -3, -2, -1, 0])
np.testing.assert_array_almost_equal(v, expected_v, decimal=2)
| DP/Value Iteration Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# %env CUDA_VISIBLE_DEVICES=[]
# ### Leopard seal vocalizations
# Source:
# - https://www.mobysound.org/
# - http://localhost:8186/tree/Datasets/mobysound/leopard_seal
#
#
# **Note**: This dataset is very noisy
from pathlib2 import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tqdm.autonotebook import tqdm
from librosa.core.time_frequency import mel_frequencies
from avgn_paper.signalprocessing.spectrogramming_tf import spectrogram_tensorflow
from avgn_paper.utils.audio import load_wav, float32_to_int16, int16_to_float32, write_wav
from avgn_paper.visualization.spectrogram import visualize_spec, plot_spec
from avgn_paper.utils.general import HParams
from avgn_paper.signalprocessing.filtering import butter_bandpass_filter
# ### data locations
DSLOC = Path('/mnt/cube/Datasets/mobysound/crabeater_seal/')
readme = DSLOC/'readme_1st.txt'
readme = [line for line in open(readme, 'r')]
DSLOC
# ### Load labels
LABEL_LOC = DSLOC / 'low_moan.xls'
rate = 1000
def get_sec(time_str):
if '.' in time_str:
time_str, ms = time_str.split('.')
else:
ms = '0'
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + int(s) + float('.'+ms)
label_df = pd.read_excel(LABEL_LOC)
label_df['start_time_s'] = [get_sec(time_str) for time_str in label_df['Start time [hh:mm:ss.ss]'].values]
label_df['end_time_s'] = [get_sec(time_str) for time_str in label_df['End time [hh:mm:ss.ss]'].values]
len(label_df)
label_df[:3]
# ### Make wav dataset
wavs = list((DSLOC / 'data').glob('2*.wav'))
wavs.sort()
len(wavs)
wavs[:3]
long_wav = np.concatenate([load_wav(wav_loc)[1] for wav_loc in tqdm(wavs)])
len(long_wav)
wav_ds = pd.DataFrame(columns = ['rate', 'data', 'file', 'wloc', 'wav_st', 'wav_et', 'ds_len'])
st = 0
for wav_loc in tqdm(wavs):
rate, data = load_wav(wav_loc)
ds_len = len(data)/rate
wav_ds.loc[len(wav_ds)] = [rate, data, wav_loc.stem, wav_loc, st, st+ds_len, ds_len]
st+=ds_len
np.unique(wav_ds.rate.values)
wav_ds[:5]
# ### grab a single test syllable
# +
win_length_ms = 2000
hop_length_ms = 10
print(win_length_ms / 1000 * rate)
hparams = HParams(
# spectrogramming
win_length=int(rate / 1000 * win_length_ms),
n_fft=4096 * 2,
hop_length=int(rate / 1000 * hop_length_ms),
ref_level_db=20,
min_level_db=-70,
# mel scaling
num_mel_bins=128,
mel_lower_edge_hertz=100,
mel_upper_edge_hertz=500,
# inversion
power=1.5, # for spectral inversion
griffin_lim_iters=50,
pad=True,
#
)
# create a filter to convolve with the spectrogram
mel_matrix = tf.signal.linear_to_mel_weight_matrix(
num_mel_bins=hparams.num_mel_bins,
num_spectrogram_bins=int(hparams.n_fft / 2) + 1,
sample_rate=rate,
lower_edge_hertz=hparams.mel_lower_edge_hertz,
upper_edge_hertz=hparams.mel_upper_edge_hertz,
dtype=tf.dtypes.float32,
name=None,
)
# gets the center frequencies of mel bands
mel_f = mel_frequencies(
n_mels=hparams.num_mel_bins + 2,
fmin=hparams.mel_lower_edge_hertz,
fmax=hparams.mel_upper_edge_hertz,
)
# Slaney-style mel is scaled to be approx constant energy per channel (from librosa)
enorm = tf.dtypes.cast(
tf.expand_dims(
tf.constant(
2.0 / (mel_f[2 : hparams.num_mel_bins + 2] - mel_f[: hparams.num_mel_bins])
),
0,
),
tf.float32,
)
mel_matrix = tf.multiply(mel_matrix, enorm)
mel_matrix = tf.divide(mel_matrix, tf.reduce_sum(mel_matrix, axis=0))
# -
len(label_df)
nex = 2000
syll_row = label_df.iloc[nex]
syll_data = long_wav[int(syll_row.start_time_s*rate):int(syll_row.end_time_s*rate)]
#syll_data = butter_bandpass_filter(syll_data, lowcut = syll_row['Low frequency [Hz]'], highcut=syll_row['High frequency [Hz]'], fs=rate, order=2)
spectrogram = spectrogram_tensorflow(int16_to_float32(syll_data), hparams)
mel_spectrogram = tf.tensordot(spectrogram,mel_matrix, 1)
fig, ax = plt.subplots(figsize=(10,5))
ax.matshow(mel_spectrogram.numpy().T, origin='lower', aspect='auto', cmap = plt.cm.afmhot)
ax.axis('off')
| notebooks/00.1-data-exploration/crabeater_seal/0.0-Crabeater-seal-vocalization-exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/FairozaAmira/AI-programming-1-a/blob/master/Lecture10.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="OWsHpW6e3moP" colab_type="text"
# # 第10回目の授業中練習問題の解答例
#
# ## イテレータ
#
# 1. `range`を使って、1から10までを表示されたループを作りなさい。
# + id="OUAkPnhi3SlE" colab_type="code" outputId="89ff5fd8-b042-4bbb-b49d-e7928cb0be69" colab={"base_uri": "https://localhost:8080/", "height": 184}
for i in range (1,11):
print(i)
# + [markdown] id="wyrLspqY3_a1" colab_type="text"
# 2. 下記のリストから一個ずつの数字が表示できるように、ループを作りなさい。
#
# `[1,2,3,4,5]`
# + id="bSJUYOpc5JiS" colab_type="code" outputId="300cea38-5ba0-4c98-df09-83a1c4b4f0fc" colab={"base_uri": "https://localhost:8080/", "height": 100}
for number in [1, 2, 3, 4, 5]:
print (number)
# + [markdown] id="utbVnpu-5R-_" colab_type="text"
# 3. `iter`と`next`関数を使って、2番目のリストの各数字を表示せよ。
# + id="TKtuhDri5eoy" colab_type="code" outputId="854b434e-743e-44b9-8a2f-7a15b95950dd" colab={"base_uri": "https://localhost:8080/", "height": 100}
I = iter([1,2,3,4,5])
print(next(I))
print(next(I))
print(next(I))
print(next(I))
print(next(I))
# + [markdown] id="e_iSdeay5k8v" colab_type="text"
# 4. 下記のコマンドを試してみなさい。
#
# `range(10)` <br/>
# `iter(range(10))`
# + id="gHNxpBzp5v0e" colab_type="code" outputId="6688944d-510f-4bf7-a54a-b997861c4a6d" colab={"base_uri": "https://localhost:8080/", "height": 33}
range(10)
# + id="41ziCvtP5zd6" colab_type="code" outputId="466a9977-c484-4332-bee4-35d39be09341" colab={"base_uri": "https://localhost:8080/", "height": 33}
iter(range(10))
# + [markdown] id="DZCNQ-Gn6QEt" colab_type="text"
# 5. `enumerate` 関数を使って、下記のリストをループし、各インデクスと値を表示せよ。
#
# `L = [1,2,3,4,5]`
# + id="ECMwssWG6fYV" colab_type="code" outputId="dcd98fa8-447f-4159-bc24-9e36051d1509" colab={"base_uri": "https://localhost:8080/", "height": 100}
L = [1,2,3,4,5]
for index, value in enumerate(L):
print(index, value)
# + id="WKpuIB4r6yaV" colab_type="code" outputId="5faac429-8311-49f3-e9aa-14e577970019" colab={"base_uri": "https://localhost:8080/", "height": 100}
for i in range(len(L)):
print(i, L[i])
# + [markdown] id="tN0xhubi61b1" colab_type="text"
# 6. 二つのリストがあり、リスト2の各値をリスト1の右側に表示しなさい。
#
# `L1 = [1,2,3,4,5]` <br/>
# `L2 = [6,7,8,9,10]`
#
# ヒント:`zip`の関数を使う。
# + id="18Q_259E7pnJ" colab_type="code" outputId="5787e9aa-a843-4543-a9fd-bb3dcc4628da" colab={"base_uri": "https://localhost:8080/", "height": 100}
L1 = [1,2,3,4,5]
L2 = [6,7,8,9,10]
for val_1, val_2 in zip(L1,L2):
print(val_1,val_2)
# + [markdown] id="LoQaSyFq7tK_" colab_type="text"
# 7.`*`を使っても、繰り返せる。
# 例:`*range()`
# + id="vUMljgex8MjQ" colab_type="code" outputId="43d34e56-4a4b-49c3-faf5-fdb71e283c51" colab={"base_uri": "https://localhost:8080/", "height": 33}
print(*range(10))
# + [markdown] id="DEb7Rdkx8QSi" colab_type="text"
# 8. `itertools`関数は数学の関数にも使える。
# 例:
# ```
# from itertools import permutations
# p = permutations (3)
# print(*p)
# ```
# 3の順列を計算し、表示させる。
# + id="pnsfYg5N8xHb" colab_type="code" outputId="dfa68dea-642c-4884-bf56-34323dcf5efb" colab={"base_uri": "https://localhost:8080/", "height": 33}
from itertools import permutations
p = permutations(range(3))
print(*p)
# + [markdown] id="ytGq6meT81tJ" colab_type="text"
# 9. `zip`関数を辞書で使いましょう。下記のリストから辞書を作りなさい。
#
# `names = ["Barney", "Robin", "Ted", "Lily", "Marshall"]` <br/>
# `age = [16, 20, 24, 18, 30]`
# + id="AZ-wTUAR9TPU" colab_type="code" outputId="a5a996f0-fb69-40ce-dcd5-e93ef39335be" colab={"base_uri": "https://localhost:8080/", "height": 33}
names = ["Barney", "Robin", "Ted", "Lily", "Marshall"]
age = [16, 20, 24, 18, 30]
people = dict(zip(names, age))
print(people)
# + [markdown] id="W-kZDjSO9WF_" colab_type="text"
# 10. 9番目で作った辞書をタプルを作って、解凍しなさい。
# + id="pSuj316E9JGR" colab_type="code" outputId="c92d7008-709a-49f0-c0fb-11e09520fa85" colab={"base_uri": "https://localhost:8080/", "height": 50}
cast = (("Barney", 16), ("Robin", 20), ("Ted", 24), ("Lily", 18), ("Marshall", 30))
# define names and heights here
names, age = zip(*cast)
print(names)
print(age)
# + [markdown] id="K5pKTt21_c9k" colab_type="text"
# ## リスト内包表記
#
# 1. ループを使って1から200までの偶数リストを作りなさい。
# + id="2y8-P4eo5hk-" colab_type="code" outputId="a275c611-57e1-4563-8a2e-d959558b5a7d" colab={"base_uri": "https://localhost:8080/", "height": 53}
numbers = []
for number in range(1,200):
if number % 2 == 0:
numbers.append(number)
print(numbers)
# + [markdown] id="CFLHsDNL6Eh7" colab_type="text"
# 2. リスト内包表記を使って、1番目と同じような結果が出るようにコードを書きなさい。
# + id="lZzQQhNU62za" colab_type="code" outputId="b9d82694-922e-4a96-8d0f-e69ed1f227aa" colab={"base_uri": "https://localhost:8080/", "height": 53}
numbers = [number for number in range(1,200) if number % 2 == 0]
print(numbers)
# + [markdown] id="ODZH57F366uY" colab_type="text"
# 3. 以下の出力が出るように、リスト内包表記を使用して、コードを書きなさい。
#
#
# ```
# [(0,0), (0,1), (1,0), (1,1)]
# ```
#
#
# + id="GRyJiqQa7NOT" colab_type="code" outputId="79fd8c08-9307-459c-fe86-89cb3df1bafa" colab={"base_uri": "https://localhost:8080/", "height": 33}
L = [(i,j) for i in range(2) for j in range(2)]
print(L)
# + [markdown] id="eQhf8FwI7btX" colab_type="text"
# 4. リスト内包表記を使って、$n^2$の結果を表示しなさい。<br/>
# $n=1,...,10$とし、<br/>
# $n = n^2$
# + id="onpCJfI673uf" colab_type="code" outputId="9219cf90-5686-440d-fdde-eeaa21fcc400" colab={"base_uri": "https://localhost:8080/", "height": 33}
squares = [n ** 2 for n in range(11)]
print(squares)
# + [markdown] id="QZ-yD2Sk8c3M" colab_type="text"
# 5. 下記の辞書に基づいて、リスト内包表記を使って、60以上のスコアを持っている人のリストを作りなさい。
#
#
# ```
# scores = {
# "Tanaka": 20,
# "Kimura": 60,
# "Li": 89,
# "Kim": 78,
# "Albert": 73
# }
# ```
#
#
# + id="gIeUUGKq9CPM" colab_type="code" outputId="9e78177d-efc7-449d-8709-fc3ecb5ef5cf" colab={"base_uri": "https://localhost:8080/", "height": 33}
scores = {
"Tanaka": 20,
"Kimura": 60,
"Li": 89,
"Kim": 78,
"<NAME>": 98
}
passed = [name for name, score in scores.items() if score >= 60]
print(passed)
# + [markdown] id="rWBi8yoO-Qz5" colab_type="text"
# ## ジェネレータ
#
# 1. リスト内包表記とジェネレータを使用して、以下のリストを作りなさい。
# 以下のリストは、1から40までの4倍数となる。
#
# ```
# [4, 8, 12, 16, 20, 24, 28, 32, 36, 40]
# ```
#
#
# + id="KZzXwy6I-3gI" colab_type="code" outputId="d7127443-a2c3-487b-d22c-658676731d04" colab={"base_uri": "https://localhost:8080/", "height": 33}
numbers = [n * 4 for n in range(1,11)]
print(numbers)
# + id="3hJ1-iHQ_Dmz" colab_type="code" outputId="e1571d52-0fb6-4c32-f1c9-f79c804bac04" colab={"base_uri": "https://localhost:8080/", "height": 50}
numbers = (n * 4 for n in range(1,11))
print(numbers)
print (list(numbers))
# + [markdown] id="yu287sKO_HNZ" colab_type="text"
# 2. ジェネレータを使って、1から20のループを10の後にいったん止めて、またループを続ける。
# 出力結果は下記のようである。
#
#
# ```
# 1 2 3 4 5 6 7 8 9 10
# いったん止める
# 11 12 13 14 15 16 17 18 19 20
# ```
#
#
# + id="nM6_HWC1_qrv" colab_type="code" outputId="a543ddd7-181f-4bee-8489-85e88e004f8d" colab={"base_uri": "https://localhost:8080/", "height": 67}
G = (n for n in range(1,21))
for n in G:
print (n, end=' ')
if n >= 10:
break
print("\nいったん止める")
for n in G:
print(n, end=' ')
# + [markdown] id="YHbW2AYE_xDh" colab_type="text"
# 3. `yield`を使って、最初に書いたジェネレータ表記の二つ目のジェネレータ関数を書きなさい。
# 出力結果は以下となります。
#
#
# ```
# [4, 8, 12, 16, 20, 24, 28, 32, 36, 40]
# [4, 8, 12, 16, 20, 24, 28, 32, 36, 40]
# ```
# リスト内包表記を使って、コードを書き、上のジェネレータ関数と比較しなさい。
#
# + id="fTTDQRDvBFGl" colab_type="code" outputId="dd159ad7-4a15-48fe-a293-4f41e67e279a" colab={"base_uri": "https://localhost:8080/", "height": 50}
numbers_1 = (n * 4 for n in range(1,11))
def gen():
for n in range(1,11):
yield n * 4
numbers_2 = gen()
a = list(numbers_1)
b = list(numbers_2)
print(a)
print(b)
# + id="uEMsTy_iBpTg" colab_type="code" outputId="3aaa7e1e-ca1f-4ff0-fa9f-c94f652f12a8" colab={"base_uri": "https://localhost:8080/", "height": 50}
L1 = [n * 4 for n in range(1,11)]
L2 = []
for n in range(1,11):
L2.append(n * 4)
print(L1)
print(L2)
# + [markdown] id="X142Wo7RCtHM" colab_type="text"
# 4. 100までの素数リストをジェネレータ関数を作りなさい。素数とは、1 より大きい自然数で、正の約数が 1 と自分自身のみであるもののことである。そして、`if`と`for`ループを使って100までの素数を表示しなさい。
# + id="dh8DFh-aDJBh" colab_type="code" outputId="52fba208-92a2-4a50-b603-5c368e25e0b4" colab={"base_uri": "https://localhost:8080/", "height": 53}
def gen_primes(N):
primes = set()
for n in range(2, N):
if all(n % p > 0 for p in primes):
primes.add(n)
yield n
print(list(gen_primes(100)))
# + id="hWbWd0HuDXRG" colab_type="code" outputId="2edbab6c-da08-403b-9b10-49d6e6747917" colab={"base_uri": "https://localhost:8080/", "height": 53}
primes = []
for possiblePrime in range(2, 101):
# Assume number is prime until shown it is not.
isPrime = True
for num in range(2, possiblePrime):
if possiblePrime % num == 0:
isPrime = False
if isPrime:
primes.append(possiblePrime)
print(primes)
| Lecture10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Configurations for Colab
# +
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
# !apt-get install -y xvfb python-opengl > /dev/null 2>&1
# !pip install gym pyvirtualdisplay > /dev/null 2>&1
# !pip install JSAnimation==0.1
# !pip install pyglet==1.3.2
from pyvirtualdisplay import Display
# Start virtual display
dis = Display(visible=0, size=(400, 400))
dis.start()
# -
# # 04. Dueling Network
#
# [<NAME> al., "Dueling Network Architectures for Deep Reinforcement Learning." arXiv preprint arXiv:1511.06581, 2015.](https://arxiv.org/pdf/1511.06581.pdf)
#
# The proposed network architecture, which is named *dueling architecture*, explicitly separates the representation of state values and (state-dependent) action advantages.
#
# ![fig1](https://user-images.githubusercontent.com/14961526/60322956-c2f0b600-99bb-11e9-9ed4-443bd14bc3b0.png)
#
# The dueling network automatically produces separate estimates of the state value function and advantage function, without any extra supervision. Intuitively, the dueling architecture can learn which states are (or are not) valuable, without having to learn the effect of each action for each state. This is particularly useful in states where its actions do not affect the environment in any relevant way.
#
# The dueling architecture represents both the value $V(s)$ and advantage $A(s, a)$ functions with a single deep model whose output combines the two to produce a state-action value $Q(s, a)$. Unlike in advantage updating, the representation and algorithm are decoupled by construction.
#
# $$A^\pi (s, a) = Q^\pi (s, a) - V^\pi (s).$$
#
# The value function $V$ measures the how good it is to be in a particular state $s$. The $Q$ function, however, measures the the value of choosing a particular action when in this state. Now, using the definition of advantage, we might be tempted to construct the aggregating module as follows:
#
# $$Q(s, a; \theta, \alpha, \beta) = V (s; \theta, \beta) + A(s, a; \theta, \alpha),$$
#
# where $\theta$ denotes the parameters of the convolutional layers, while $\alpha$ and $\beta$ are the parameters of the two streams of fully-connected layers.
#
# Unfortunately, the above equation is unidentifiable in the sense that given $Q$ we cannot recover $V$ and $A$ uniquely; for example, there are uncountable pairs of $V$ and $A$ that make $Q$ values to zero. To address this issue of identifiability, we can force the advantage function estimator to have zero advantage at the chosen action. That is, we let the last module of the network implement the forward mapping.
#
# $$
# Q(s, a; \theta, \alpha, \beta) = V (s; \theta, \beta) + \big( A(s, a; \theta, \alpha) - \max_{a' \in |\mathcal{A}|} A(s, a'; \theta, \alpha) \big).
# $$
#
# This formula guarantees that we can recover the unique $V$ and $A$, but the optimization is not so stable because the advantages have to compensate any change to the optimal action’s advantage. Due to the reason, an alternative module that replaces the max operator with an average is proposed:
#
# $$
# Q(s, a; \theta, \alpha, \beta) = V (s; \theta, \beta) + \big( A(s, a; \theta, \alpha) - \frac{1}{|\mathcal{A}|} \sum_{a'} A(s, a'; \theta, \alpha) \big).
# $$
#
# Unlike the max advantage form, in this formula, the advantages only need to change as fast as the mean, so it increases the stability of optimization.
# +
import os
from typing import Dict, List, Tuple
import gym
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from IPython.display import clear_output
from torch.nn.utils import clip_grad_norm_
# -
# ## Replay buffer
#
# Please see *01.dqn.ipynb* for detailed description.
class ReplayBuffer:
"""A simple numpy replay buffer."""
def __init__(self, obs_dim: int, size: int, batch_size: int = 32):
self.obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.next_obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size], dtype=np.float32)
self.rews_buf = np.zeros([size], dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.max_size, self.batch_size = size, batch_size
self.ptr, self.size, = 0, 0
def store(
self,
obs: np.ndarray,
act: np.ndarray,
rew: float,
next_obs: np.ndarray,
done: bool,
):
self.obs_buf[self.ptr] = obs
self.next_obs_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self) -> Dict[str, np.ndarray]:
idxs = np.random.choice(self.size, size=self.batch_size, replace=False)
return dict(obs=self.obs_buf[idxs],
next_obs=self.next_obs_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs])
def __len__(self) -> int:
return self.size
# ## Dueling Network
#
# Carefully take a look at advantage and value layers separated from feature layer.
class Network(nn.Module):
def __init__(self, in_dim: int, out_dim: int):
"""Initialization."""
super(Network, self).__init__()
# set common feature layer
self.feature_layer = nn.Sequential(
nn.Linear(in_dim, 128),
nn.ReLU(),
)
# set advantage layer
self.advantage_layer = nn.Sequential(
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, out_dim),
)
# set value layer
self.value_layer = nn.Sequential(
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, 1),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward method implementation."""
feature = self.feature_layer(x)
value = self.value_layer(feature)
advantage = self.advantage_layer(feature)
q = value + advantage - advantage.mean(dim=-1, keepdim=True)
return q
# ## DQN + DuelingNet Agent (w/o Double-DQN & PER)
#
# Here is a summary of DQNAgent class.
#
# | Method | Note |
# | --- | --- |
# |select_action | select an action from the input state. |
# |step | take an action and return the response of the env. |
# |compute_dqn_loss | return dqn loss. |
# |update_model | update the model by gradient descent. |
# |target_hard_update| hard update from the local model to the target model.|
# |train | train the agent during num_frames. |
# |test | test the agent (1 episode). |
# |plot | plot the training progresses. |
#
#
# Aside from the dueling network architecture, the authors suggest to use Double-DQN and Prioritized Experience Replay as extra components for better performance. However, we don't implement them to simplify the tutorial. Here, DQNAgent is totally same as the one from *01.dqn.ipynb*.
class DQNAgent:
"""DQN Agent interacting with environment.
Attribute:
env (gym.Env): openAI Gym environment
memory (ReplayBuffer): replay memory to store transitions
batch_size (int): batch size for sampling
epsilon (float): parameter for epsilon greedy policy
epsilon_decay (float): step size to decrease epsilon
max_epsilon (float): max value of epsilon
min_epsilon (float): min value of epsilon
target_update (int): period for target model's hard update
gamma (float): discount factor
dqn (Network): model to train and select actions
dqn_target (Network): target model to update
optimizer (torch.optim): optimizer for training dqn
transition (list): transition information including
state, action, reward, next_state, done
"""
def __init__(
self,
env: gym.Env,
memory_size: int,
batch_size: int,
target_update: int,
epsilon_decay: float,
max_epsilon: float = 1.0,
min_epsilon: float = 0.1,
gamma: float = 0.99,
):
"""Initialization.
Args:
env (gym.Env): openAI Gym environment
memory_size (int): length of memory
batch_size (int): batch size for sampling
target_update (int): period for target model's hard update
epsilon_decay (float): step size to decrease epsilon
lr (float): learning rate
max_epsilon (float): max value of epsilon
min_epsilon (float): min value of epsilon
gamma (float): discount factor
"""
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
self.env = env
self.memory = ReplayBuffer(obs_dim, memory_size, batch_size)
self.batch_size = batch_size
self.epsilon = max_epsilon
self.epsilon_decay = epsilon_decay
self.max_epsilon = max_epsilon
self.min_epsilon = min_epsilon
self.target_update = target_update
self.gamma = gamma
# device: cpu / gpu
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
print(self.device)
# networks: dqn, dqn_target
self.dqn = Network(obs_dim, action_dim).to(self.device)
self.dqn_target = Network(obs_dim, action_dim).to(self.device)
self.dqn_target.load_state_dict(self.dqn.state_dict())
self.dqn_target.eval()
# optimizer
self.optimizer = optim.Adam(self.dqn.parameters())
# transition to store in memory
self.transition = list()
# mode: train / test
self.is_test = False
def select_action(self, state: np.ndarray) -> np.ndarray:
"""Select an action from the input state."""
# epsilon greedy policy
if self.epsilon > np.random.random():
selected_action = self.env.action_space.sample()
else:
selected_action = self.dqn(
torch.FloatTensor(state).to(self.device)
).argmax()
selected_action = selected_action.detach().cpu().numpy()
if not self.is_test:
self.transition = [state, selected_action]
return selected_action
def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:
"""Take an action and return the response of the env."""
next_state, reward, done, _ = self.env.step(action)
if not self.is_test:
self.transition += [reward, next_state, done]
self.memory.store(*self.transition)
return next_state, reward, done
def update_model(self) -> torch.Tensor:
"""Update the model by gradient descent."""
samples = self.memory.sample_batch()
loss = self._compute_dqn_loss(samples)
self.optimizer.zero_grad()
loss.backward()
# we clip the gradients to have their norm less than or equal to 10.
clip_grad_norm_(self.dqn.parameters(), 10.0)
self.optimizer.step()
return loss.item()
def train(self, num_frames: int, plotting_interval: int = 200):
"""Train the agent."""
self.is_test = False
state = self.env.reset()
update_cnt = 0
epsilons = []
losses = []
scores = []
score = 0
for frame_idx in range(1, num_frames + 1):
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
# if episode ends
if done:
state = env.reset()
scores.append(score)
score = 0
# if training is ready
if len(self.memory) >= self.batch_size:
loss = self.update_model()
losses.append(loss)
update_cnt += 1
# linearly decrease epsilon
self.epsilon = max(
self.min_epsilon, self.epsilon - (
self.max_epsilon - self.min_epsilon
) * self.epsilon_decay
)
epsilons.append(self.epsilon)
# if hard update is needed
if update_cnt % self.target_update == 0:
self._target_hard_update()
# plotting
if frame_idx % plotting_interval == 0:
self._plot(frame_idx, scores, losses, epsilons)
self.env.close()
def test(self) -> List[np.ndarray]:
"""Test the agent."""
self.is_test = True
state = self.env.reset()
done = False
score = 0
frames = []
while not done:
frames.append(self.env.render(mode="rgb_array"))
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
print("score: ", score)
self.env.close()
return frames
def _compute_dqn_loss(self, samples: Dict[str, np.ndarray]) -> torch.Tensor:
"""Return dqn loss."""
device = self.device # for shortening the following lines
state = torch.FloatTensor(samples["obs"]).to(device)
next_state = torch.FloatTensor(samples["next_obs"]).to(device)
action = torch.LongTensor(samples["acts"].reshape(-1, 1)).to(device)
reward = torch.FloatTensor(samples["rews"].reshape(-1, 1)).to(device)
done = torch.FloatTensor(samples["done"].reshape(-1, 1)).to(device)
# G_t = r + gamma * v(s_{t+1}) if state != Terminal
# = r otherwise
curr_q_value = self.dqn(state).gather(1, action)
next_q_value = self.dqn_target(next_state).max(
dim=1, keepdim=True
)[0].detach()
mask = 1 - done
target = (reward + self.gamma * next_q_value * mask).to(self.device)
# calculate dqn loss
loss = F.smooth_l1_loss(curr_q_value, target)
return loss
def _target_hard_update(self):
"""Hard update: target <- local."""
self.dqn_target.load_state_dict(self.dqn.state_dict())
def _plot(
self,
frame_idx: int,
scores: List[float],
losses: List[float],
epsilons: List[float],
):
"""Plot the training progresses."""
clear_output(True)
plt.figure(figsize=(20, 5))
plt.subplot(131)
plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))
plt.plot(scores)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.subplot(133)
plt.title('epsilons')
plt.plot(epsilons)
plt.show()
# ## Environment
#
# You can see the [code](https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py) and [configurations](https://github.com/openai/gym/blob/master/gym/envs/__init__.py#L53) of CartPole-v0 from OpenAI's repository.
# environment
env_id = "CartPole-v0"
env = gym.make(env_id)
# ## Set random seed
# +
seed = 777
def seed_torch(seed):
torch.manual_seed(seed)
if torch.backends.cudnn.enabled:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
seed_torch(seed)
env.seed(seed)
# -
# ## Initialize
# +
# parameters
num_frames = 10000
memory_size = 1000
batch_size = 32
target_update = 200
epsilon_decay = 1 / 2000
# train
agent = DQNAgent(env, memory_size, batch_size, target_update, epsilon_decay)
# -
# ## Train
agent.train(num_frames)
# ## Test
#
# Run the trained agent (1 episode).
frames = agent.test()
# ## Render
# +
# Imports specifically so we can render outputs in Colab.
from matplotlib import animation
from JSAnimation.IPython_display import display_animation
from IPython.display import display
def display_frames_as_gif(frames):
"""Displays a list of frames as a gif, with controls."""
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(
plt.gcf(), animate, frames = len(frames), interval=50
)
display(display_animation(anim, default_mode='loop'))
# display
display_frames_as_gif(frames)
| 04.dueling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DiorgenesSilva/--INTRODU--O---PROGRAMA--O-ORIENTADA-A-OBJETOS--POO-/blob/main/Aula_1_Gabarito.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Gk3gnCgBFRrx"
# # Automação de Sistemas e Processos com Python
#
# ### Desafio:
#
# Todos os dias, o nosso sistema atualiza as vendas do dia anterior.
# O seu trabalho diário, como analista, é enviar um e-mail para a diretoria, assim que começar a trabalhar, com o faturamento e a quantidade de produtos vendidos no dia anterior
#
# E-mail da diretoria: <EMAIL><br>
# Local onde o sistema disponibiliza as vendas do dia anterior: https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga?usp=sharing
#
# Para resolver isso, vamos usar o pyautogui, uma biblioteca de automação de comandos do mouse e do teclado
#
# Comandos pyautogui: https://pyautogui.readthedocs.io/en/latest/quickstart.html
# + id="_I_GTSkVFRr3"
import pyautogui
import pyperclip
import time
pyautogui.PAUSE = 1
# Passo 1 - Entrar no nosso sistema (entrar no link: https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga?usp=sharing)
pyautogui.hotkey("ctrl", "t")
link = "https://drive.google.com/drive/folders/149xknr9JvrlEnhNWO49zPcw0PW5icxga?usp=sharing"
pyperclip.copy(link)
pyautogui.hotkey("ctrl", "v")
pyautogui.press("enter")
# Passo 2 - Navegar no sistema (entrar na pasta Exportar)
time.sleep(5) # espera 5 segundos
pyautogui.click(x=478, y=317, clicks=2)
# Passo 3 - baixar o arquivo de vendas
time.sleep(2)
pyautogui.click(x=524, y=370)
pyautogui.click(x=1110, y=182)
pyautogui.click(x=968, y=724)
time.sleep(3) # esperar ele fazer o download
# + [markdown] id="_4zUIWoTFRr8"
# ### Vamos agora ler o arquivo baixado para pegar os indicadores
#
# - Faturamento
# - Quantidade de Produtos
# + id="Ek7nKcGHFRr_" outputId="223a1fcf-3ae2-46d4-eb3b-b2a1733bdc1f"
# Passo 4 - Calcular faturamento e quantidade de produtos vendidos
import pandas as pd
tabela = pd.read_excel(r"C:\Users\alonp\Downloads\Vendas - Dez.xlsx")
faturamento = tabela["Valor Final"].sum()
quantidade = tabela["Quantidade"].sum()
display(tabela)
# + [markdown] id="MUc7LsqlFRsE"
# ### Vamos agora enviar um e-mail pelo gmail
# + id="yEY8HNqpFRsH"
# Passo 5 - Enviar o email para a diretoria
pyautogui.hotkey("ctrl", "t") # abre uma nova aba
link = "https://mail.google.com/"
pyperclip.copy(link)
pyautogui.hotkey("ctrl", "v")
pyautogui.press("enter")
# clicar no botão escrever
time.sleep(5)
pyautogui.click(x=95, y=185)
# escrever pra quem eu to mandando o email
pyautogui.write("<EMAIL>")
pyautogui.press("tab") # escolher o email
pyautogui.press("tab") # passar pro campo de assunto
# escrever o assunto
assunto = "Relatório de Vendas"
pyperclip.copy(assunto)
pyautogui.hotkey("ctrl", "v")
pyautogui.press("tab") # passar pro corpo do email
# escrever o corpo do email
texto = f"""
Prezados, bom dia
O faturamento foi de R${faturamento:,.2f}
A quantidade de produtos foi de {quantidade:,}
Abs
Lira Python"""
pyautogui.write(texto)
# enviar o email
pyautogui.hotkey("ctrl", "enter")
# + [markdown] id="x9soLvTOFRsL"
# #### Use esse código para descobrir qual a posição de um item que queira clicar
#
# - Lembre-se: a posição na sua tela é diferente da posição na minha tela
# + id="xZgAst9TFRsO" outputId="86100e40-b83a-47be-e8bc-5d17f58bc9c3"
import time
time.sleep(5)
pyautogui.position()
# + id="QtrwFrjyFRsS"
| Aula_1_Gabarito.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mnslarcher/cs224w-slides-to-code/blob/main/notebooks/05-label-propagation-for-node-classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8XSSJBHExyzl"
# # Label Propagation for Node Classification
# + id="VoV5Qcy6xql5"
import random
from typing import Optional, Tuple
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
# + id="FyzWARMcxtb9"
def seed_everything(seed: Optional[int] = None) -> None:
random.seed(seed)
np.random.seed(seed)
seed_everything(42)
# + [markdown] id="WGDZBK8Wt7WB"
# # Probabilistic Relational Classifier
# + id="SPOBiq_W54aR"
class ProbabilisticRelationalClassifier:
def __init__(
self,
max_iter: int = 10,
threshold: float = 0.01,
node_size: int = 2500,
node_font_size: int = 10,
title_font_size: int = 16,
fig_size: Optional[Tuple[int, int]] = None,
seed: int = 42,
) -> None:
self.max_iter = max_iter
self.threshold = threshold
self.node_size = node_size
self.node_font_size = node_font_size
self.title_font_size = title_font_size
self.fig_size = fig_size
self.seed = seed
def _iteration(self) -> None:
for node in self._G.nodes(data=True):
if node[1]["label"] is None:
prob = np.mean([self._G.nodes[neighbor]["prob"] for neighbor in self._G.neighbors(node[0])])
if abs(node[1]["prob"] - prob) < self.threshold:
node[1]["label"] = round(prob)
node[1]["prob"] = prob
def _initialization(self):
for node in self._G.nodes(data=True):
node[1]["prob"] = 0.5 if node[1]["label"] is None else 1.0 if node[1]["label"] else 0.0
def _is_converged(self) -> bool:
return not any(label is None for _, label in nx.get_node_attributes(self._G, "label").items())
def _draw(self) -> None:
node_colors = [
"tab:gray" if node[1]["label"] is None else "tab:blue" if node[1]["label"] else "tab:orange"
for node in self._G.nodes(data=True)
]
node_lables = {node[0]: f"{node[0]}\nP={node[1]['prob']:.2f}" for node in self._G.nodes(data=True)}
pos = nx.spring_layout(self._G, seed=self.seed)
nx.draw(
self._G,
pos=pos,
node_color=node_colors,
labels=node_lables,
node_size=self.node_size,
font_size=self.node_font_size,
with_labels=True,
)
def predict(self, G: nx.Graph, display: bool = True) -> nx.Graph:
self._G = G.copy()
num_iter = 0
self._initialization()
if display:
plt.figure(figsize=self.fig_size)
self._draw()
plt.title("Initialization", fontsize=self.title_font_size, loc="left")
plt.show()
while not self._is_converged() and num_iter < self.max_iter:
self._iteration()
num_iter += 1
if display:
plt.figure(figsize=self.fig_size)
self._draw()
plt.title(f"After iteration {num_iter}", fontsize=self.title_font_size, loc="left")
plt.show()
return self._G
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="rfQnA7Leu2hV" outputId="598eb959-9e3c-4373-bc79-1d25d2a6cd6e"
edge_list = [
(1, 2),
(1, 3),
(1, 4),
(2, 3),
(3, 4),
(4, 5),
(4, 6),
(5, 6),
(5, 7),
(5, 8),
(6, 7),
(6, 8),
(7, 8),
(7, 9),
]
G = nx.Graph(edge_list)
attrs = {
1: {"label": 0},
2: {"label": 0},
3: {"label": None},
4: {"label": None},
5: {"label": None},
6: {"label": 1},
7: {"label": 1},
8: {"label": None},
9: {"label": None},
}
nx.set_node_attributes(G, attrs)
clf = ProbabilisticRelationalClassifier()
_ = clf.predict(G)
# + [markdown] id="btNz4oiVPAOI"
# # Iterative Classification
# + id="J7nvNeMknVg5"
class IterativeClassifier:
def __init__(
self,
max_iter: int = 10,
edge_width: float = 2.0,
node_size: int = 5000,
node_edge_widths: int = 4,
node_font_size: int = 8,
title_font_size: int = 16,
fig_size: Optional[Tuple[int, int]] = None,
seed: Optional[int] = 42,
) -> None:
self.max_iter = max_iter
self.edge_width = edge_width
self.node_size = node_size
self.node_edge_widths = node_edge_widths
self.node_font_size = node_font_size
self.title_font_size = title_font_size
self.fig_size = fig_size
self.seed = seed
@staticmethod
def phi1(fv: np.ndarray) -> int:
return int(fv[0] == 0)
@staticmethod
def phi2(fv: np.ndarray, zv: np.ndarray) -> int:
return 1 if fv[0] == 0 or zv[:, 1].sum() > 1 else 0
def _apply_classifier_to_unlabeled_set(self) -> None:
for node in self._G.nodes(data=True):
node[1]["Yv"] = self.phi1(node[1]["fv"]) if node[1]["Yv"] is None else node[1]["Yv"]
def _update_relational_features(self) -> None:
for node in self._G.nodes():
zv = np.zeros((2, 2), dtype=int)
for predecessor in self._G.predecessors(node):
Yv = self._G.nodes[predecessor]["Yv"]
if Yv is not None:
zv[0, Yv] += 1
for successor in self._G.successors(node):
Yv = self._G.nodes[successor]["Yv"]
if Yv is not None:
zv[1, Yv] += 1
self._G.nodes[node]["zv"] = zv
def _update_labels(self) -> None:
for node in self._G.nodes(data=True):
node[1]["Yv"] = self.phi2(node[1]["fv"], node[1]["zv"])
def _iteration(self) -> bool:
self._update_relational_features()
old_Yv = nx.get_node_attributes(self._G, "Yv")
self._update_labels()
return old_Yv == nx.get_node_attributes(self._G, "Yv")
def _draw(self) -> None:
node_colors = [
"tab:gray" if node[1]["Yv"] is None else "tab:blue" if node[1]["Yv"] else "tab:orange"
for node in self._G.nodes(data=True)
]
node_edge_colors = ["tab:blue" if node[1]["gt"] else "tab:orange" for node in self._G.nodes(data=True)]
label = "Gt: {}, Yv: {}\nfv {}\nI {}\nO {}"
node_lables = {
node[0]: label.format(node[1]["gt"], node[1]["Yv"], node[1]["fv"], node[1]["zv"][0], node[1]["zv"][1])
for node in self._G.nodes(data=True)
}
pos = nx.spring_layout(self._G, k=1.0, seed=self.seed)
nx.draw(
self._G,
pos=pos,
width=self.edge_width,
node_color=node_colors,
edgecolors=node_edge_colors,
linewidths=self.node_edge_widths,
labels=node_lables,
node_size=self.node_size,
font_size=self.node_font_size,
with_labels=True,
)
def predict(self, G: nx.Graph, display: bool = True) -> nx.Graph:
self._G = G.copy()
self._update_relational_features()
self._apply_classifier_to_unlabeled_set()
num_iter = 0
if display:
plt.figure(figsize=self.fig_size)
self._draw()
plt.title("Apply classifier to unlabeled set", fontsize=self.title_font_size, loc="left")
plt.show()
is_converged = False
while not is_converged and num_iter < self.max_iter:
is_converged = self._iteration()
num_iter += 1
if display:
plt.figure(figsize=self.fig_size)
self._draw()
plt.title(f"After iteration {num_iter}", fontsize=self.title_font_size, loc="left")
plt.show()
return self._G
# + colab={"base_uri": "https://localhost:8080/", "height": 977} id="05mshu9hlGMQ" outputId="e7829848-a939-4d24-e681-5fa9023e8b08"
edge_list = [(1, 2), (2, 3), (3, 1), (3, 4)]
G = nx.DiGraph(edge_list)
attrs = {
1: {"fv": np.array([0, 1]), "Yv": 1, "gt": 1},
2: {"fv": np.array([0, 0]), "Yv": 1, "gt": 1},
3: {"fv": np.array([1, 0]), "Yv": None, "gt": 1},
4: {"fv": np.array([1, 1]), "Yv": 0, "gt": 0},
}
nx.set_node_attributes(G, attrs)
clf = IterativeClassifier()
_ = clf.predict(G)
# + [markdown] id="KQ58IZ09y_BC"
# # Collective Classification: Correct & Smooth
# + id="7KAnvYa02Kv8"
def get_normalized_diffusion_matrix(G: nx.Graph) -> np.ndarray:
A = np.asarray(nx.adjacency_matrix(G).todense())
degrees = np.array([degree for _, degree in G.degree()])
# Add self-loops
A[range(len(A)), range(len(A))] = 1.0
degrees += 1
# Compute D^-1/2
Dm12 = np.diag(1 / np.sqrt(degrees))
return Dm12.dot(A).dot(Dm12)
class CorrectAndSmooth:
def __init__(
self,
correct_alpha: float = 0.8,
smooth_alpha: float = 0.8,
smooth_s: float = 2.0,
num_correct_iters: int = 3,
num_smooth_iters: int = 3,
edge_width: float = 2.0,
node_size: int = 2500,
node_edge_widths: int = 4,
node_font_size: int = 12,
title_font_size: int = 16,
fig_size: Optional[Tuple[int, int]] = None,
seed: Optional[int] = 42,
) -> None:
self.correct_alpha = correct_alpha
self.smooth_alpha = smooth_alpha
self.smooth_s = smooth_s
self.num_correct_iters = num_correct_iters
self.num_smooth_iters = num_smooth_iters
self.edge_width = edge_width
self.node_size = node_size
self.node_edge_widths = node_edge_widths
self.node_font_size = node_font_size
self.title_font_size = title_font_size
self.fig_size = fig_size
self.seed = seed
def _correct_step(self) -> None:
E = self._compute_train_errs()
for _ in range(self.num_correct_iters):
E = self._diffuse(E, self.correct_alpha)
for idx, (_, attrs) in enumerate(self._G.nodes(data=True)):
attrs["soft_label"] += self.smooth_s * E[idx]
def _smooth_step(self) -> None:
Z = self._get_labels()
for _ in range(self.num_smooth_iters):
Z = self._diffuse(Z, self.smooth_alpha)
for idx, (_, attrs) in enumerate(self._G.nodes(data=True)):
attrs["soft_label"] = Z[idx]
def _compute_train_errs(self) -> np.ndarray:
E = np.zeros((self._G.number_of_nodes(), 2))
for idx, (_, attrs) in enumerate(self._G.nodes(data=True)):
gt = attrs["ground_truth"]
if gt is not None:
E[idx] = gt - attrs["soft_label"]
return E
def _get_labels(self) -> np.ndarray:
Z = np.zeros((self._G.number_of_nodes(), 2))
for idx, (_, attrs) in enumerate(self._G.nodes(data=True)):
gt = attrs["ground_truth"]
if gt is None:
Z[idx] = attrs["soft_label"]
else:
Z[idx] = gt
return Z
def _diffuse(self, X, alpha) -> np.ndarray:
return (1 - alpha) * X + alpha * self._A_tilde.dot(X)
def _draw(self) -> None:
node_colors = [
"tab:orange" if np.argmax(node[1]["soft_label"]) else "tab:blue" for node in self._G.nodes(data=True)
]
node_lables = {
node[0]: f"[{node[0]}]\n{node[1]['soft_label'][0]:.2f}\n{node[1]['soft_label'][1]:.2f}"
for node in self._G.nodes(data=True)
}
pos = nx.spring_layout(self._G, seed=self.seed)
nx.draw(
self._G,
pos=pos,
width=self.edge_width,
node_color=node_colors,
linewidths=self.node_edge_widths,
labels=node_lables,
node_size=self.node_size,
font_size=self.node_font_size,
with_labels=True,
)
def predict(self, G: nx.Graph, display: bool = True) -> nx.Graph:
self._G = G.copy()
if display:
plt.figure(figsize=self.fig_size)
self._draw()
plt.title("Initial soft labels", fontsize=self.title_font_size, loc="left")
plt.show()
self._A_tilde = get_normalized_diffusion_matrix(self._G)
self._correct_step()
if display:
plt.figure(figsize=self.fig_size)
self._draw()
plt.title("After correct step", fontsize=self.title_font_size, loc="left")
plt.show()
self._smooth_step()
if display:
plt.figure(figsize=self.fig_size)
self._draw()
plt.title("After smooth step", fontsize=self.title_font_size, loc="left")
plt.show()
return self._G
# + id="sjp0MzrbzHG_" colab={"base_uri": "https://localhost:8080/", "height": 977} outputId="c56160c0-5e96-425f-df8c-814b8cc2fbd8"
edge_list = [
(1, 2),
(1, 3),
(1, 4),
(2, 3),
(3, 4),
(4, 5),
(4, 6),
(5, 6),
(5, 7),
(6, 8),
(7, 8),
(7, 9),
]
G = nx.Graph(edge_list)
attrs = {
1: {"soft_label": np.array([0.05, 0.95]), "ground_truth": np.array([0.0, 1.0])},
2: {"soft_label": np.array([0.30, 0.70]), "ground_truth": np.array([0.0, 1.0])},
3: {"soft_label": np.array([0.60, 0.40]), "ground_truth": None},
4: {"soft_label": np.array([0.20, 0.80]), "ground_truth": None},
5: {"soft_label": np.array([0.90, 0.10]), "ground_truth": None},
6: {"soft_label": np.array([0.60, 0.40]), "ground_truth": np.array([1.0, 0.0])},
7: {"soft_label": np.array([0.95, 0.05]), "ground_truth": np.array([1.0, 0.0])},
8: {"soft_label": np.array([0.40, 0.60]), "ground_truth": None},
9: {"soft_label": np.array([0.80, 0.20]), "ground_truth": None},
}
nx.set_node_attributes(G, attrs)
clf = CorrectAndSmooth()
_ = clf.predict(G)
| notebooks/05-label-propagation-for-node-classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.10 64-bit
# name: python3710jvsc74a57bd0a99ace1d12d52a1bbe3d6f19578e1877f6a52f23b84e79f24ee865afab45b587
# ---
# +
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# To get smooth animations
import matplotlib.animation as animation
mpl.rc('animation', html='jshtml')
import tensorflow as tf
from tensorflow import keras
import gym
import pyvirtualdisplay
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
# + tags=["outputPrepend"]
for env in gym.envs.registry.all():
print(env)
# -
env = gym.make("CartPole-v1")
obs = env.reset()
obs
env.render()
img = env.render(mode="rgb_array")
img.shape
def plot_environment(env, figsize=(5,4)):
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
return img
plot_environment(env)
plt.show()
env.action_space
action = 1
obs, reward, done, info = env.step(action)
obs
print(reward)
print(done)
print(info)
def base_policy(obs):
angle = obs[2]
return 0 if angle < 0 else 1
obs = env.reset()
totals = []
for episode in range(500):
episode_rewards = 0
obs = env.reset()
for step in range(200):
action = base_policy(obs)
obs, reward, done, info = env.step(action)
episode_rewards += reward
if done:
break
totals.append(episode_rewards)
np.mean(totals), np.std(totals), np.min(totals), np.max(totals)
# +
env.seed(42)
frames = []
obs = env.reset()
for step in range(200):
img = env.render(mode="rgb_array")
frames.append(img)
action = base_policy(obs)
obs, reward, done, info = env.step(action)
if done:
break
# +
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
anim = animation.FuncAnimation(fig, update_scene, fargs=(frames, patch),
frames=len(frames), repeat=repeat, interval=interval)
plt.close()
return anim
# -
plot_animation(frames)
# # Neural Network Policies:
# +
n_inputs = 4
model = keras.models.Sequential([
keras.layers.Dense(5, activation="elu", input_shape=[n_inputs]),
keras.layers.Dense(1, activation="sigmoid"),
])
# -
def render_policy_net(model, n_max_steps=200, seed=42):
frames = []
env = gym.make("CartPole-v1")
env.seed(seed)
np.random.seed(seed)
obs = env.reset()
for step in range(n_max_steps):
frames.append(env.render(mode="rgb_array"))
left_proba = model.predict(obs.reshape(1, -1))
action = int(np.random.rand() > left_proba)
obs, reward, done, info = env.step(action)
if done:
break
env.close()
return frames
frames = render_policy_net(model)
plot_animation(frames)
# +
n_environments = 50
n_iterations = 5000
envs = [gym.make("CartPole-v1") for _ in range(n_environments)]
for index, env in enumerate(envs):
env.seed(index)
np.random.seed(42)
observations = [env.reset() for env in envs]
optimizer = keras.optimizers.RMSprop()
loss_fn = keras.losses.binary_crossentropy
for iteration in range(n_iterations):
# if angle < 0, we want proba(left) = 1., or else proba(left) = 0.
target_probas = np.array([([1.] if obs[2] < 0 else [0.])
for obs in observations])
with tf.GradientTape() as tape:
left_probas = model(np.array(observations))
loss = tf.reduce_mean(loss_fn(target_probas, left_probas))
print("\rIteration: {}, Loss: {:.3f}".format(iteration, loss.numpy()), end="")
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
actions = (np.random.rand(n_environments, 1) > left_probas.numpy()).astype(np.int32)
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(actions[env_index][0])
observations[env_index] = obs if not done else env.reset()
for env in envs:
env.close()
# -
frames = render_policy_net(model)
plot_animation(frames)
def play_one_step(env, obs, model, loss_fn):
with tf.GradientTape() as tape:
left_proba = model(obs[np.newaxis])
action = (tf.random.uniform([1, 1]) > left_proba)
y_target = tf.constant([[1.]]) - tf.cast(action, tf.float32)
loss = tf.reduce_mean(loss_fn(y_target, left_proba))
grads = tape.gradient(loss, model.trainable_variables)
obs, rewards, done, info = env.step(int(action[0, 0].numpy()))
return obs, rewards, done, grads
def play_multiple_episodes(env, n_episodes, n_max_steps, model, loss_fn):
all_rewards = []
all_grads = []
for episode in range(n_episodes):
current_rewards = []
current_grads = []
obs = env.reset()
for step in range(n_max_steps):
obs, reward, done, grads = play_one_step(env, obs, model, loss_fn)
current_rewards.append(reward)
current_grads.append(grads)
if done:
break
all_rewards.append(current_rewards)
all_grads.append(current_grads)
return all_rewards, all_grads
# +
def discount_rewards(rewards, discount_factor):
discounted = np.array(rewards)
for step in range(len(rewards) - 2, -1, -1):
discounted[step] += discounted[step + 1] * discount_factor
return discounted
def discount_and_normalize_rewards(all_rewards, discount_factor):
all_discounted_rewards = [discount_rewards(rewards, discount_factor)
for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
rewards_mean = flat_rewards.mean()
rewards_std = flat_rewards.std()
return [(discounted_rewards - rewards_mean) / rewards_std
for discounted_rewards in all_discounted_rewards]
# -
discount_rewards([10, 0, -50], discount_factor=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]],
discount_factor=0.8)
# +
n_iterations = 150
n_episodes_per_update = 10
n_max_steps = 200
discount_factor = 0.95
optimizer = keras.optimizers.Adam(lr=0.01)
loss_fn = keras.losses.binary_crossentropy
# +
env = gym.make("CartPole-v1")
env.seed(42)
for iteration in range(n_iterations):
all_rewards, all_grads = play_multiple_episodes(
env, n_episodes_per_update, n_max_steps, model, loss_fn)
total_rewards = sum(map(sum, all_rewards))
print("\rIteration: {}, mean rewards: {:.1f}".format(
iteration, total_rewards / n_episodes_per_update), end="")
all_final_rewards = discount_and_normalize_rewards(all_rewards,
discount_factor)
all_mean_grads = []
for var_index in range(len(model.trainable_variables)):
mean_grads = tf.reduce_mean(
[final_reward * all_grads[episode_index][step][var_index]
for episode_index, final_rewards in enumerate(all_final_rewards)
for step, final_reward in enumerate(final_rewards)], axis=0)
all_mean_grads.append(mean_grads)
optimizer.apply_gradients(zip(all_mean_grads, model.trainable_variables))
env.close()
# -
frames = render_policy_net(model)
plot_animation(frames)
# +
# shape=[s, a, s']
transition_probabilities = [[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]],
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None]]
# shape=[s, a, s']
rewards = [[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]]]
possible_actions = [[0, 1, 2], [0, 2], [1]]
# -
len(transition_probabilities), len(transition_probabilities[0]), len(transition_probabilities[0][0])
np.full((3, 3), -np.inf)
# +
Q_values = np.full((3, 3), -np.inf) # -np.inf for impossible actions
for state, actions in enumerate(possible_actions):
Q_values[state, actions] = 0.0 # for all possible actions
Q_values
# +
gamma = 0.90
for iteration in range(50):
Q_prev = Q_values.copy()
for s in range(3):
for a in possible_actions[s]:
Q_values[s, a] = np.sum([
transition_probabilities[s][a][sp]
* (rewards[s][a][sp] + gamma * np.max(Q_prev[sp]))
for sp in range(3)])
# -
Q_values
np.argmax(Q_values, axis=1)
def step(state, action):
probas = transition_probabilities[state][action]
next_state = np.random.choice([0, 1, 2], p=probas)
reward = rewards[state][action][next_state]
return next_state, reward
def exploration_policy(state):
return np.random.choice(possible_actions[state])
# +
alpha0 = 0.05
decay = 0.005
gamma = 0.90
state = 0
for iteration in range(10000):
action = exploration_policy(state)
next_state, reward = step(state, action)
next_value = np.max(Q_values[next_state])
alpha = alpha0 / (1 + iteration * decay)
Q_values[state, action] *= 1 - alpha
Q_values[state, action] += alpha * (reward + gamma * next_value)
state = next_state
# +
env = gym.make("CartPole-v0")
input_shape = [4]
n_outputs = 2
model = keras.models.Sequential([
keras.layers.Dense(32, activation="elu", input_shape=input_shape),
keras.layers.Dense(32, activation="elu"),
keras.layers.Dense(n_outputs)
])
# -
def epsilon_greedy_policy(state, epsilon=0):
if np.random.rand() < epsilon:
return np.random.randint(2)
else:
Q_values = model.predict(state[np.newaxis])
return np.argmax(Q_values[0])
# +
from collections import deque
replay_buffer = deque(maxlen=2000)
# -
def sample_experiences(batch_size):
indices = np.random.randint(len(replay_buffer), size=batch_size)
batch = [replay_buffer[index] for index in indices]
states, actions, rewards, next_states, dones = [
np.array([experience[field_index] for experience in batch])
for field_index in range(5)]
return states, actions, rewards, next_states, dones
def play_one_step(env, state, epsilon):
action = epsilon_greedy_policy(state, epsilon)
next_state, reward, done, info = env.step(action)
replay_buffer.append((state, action, reward, next_state, done))
return next_state, reward, done, info
# +
batch_size = 32
discount_factor = 0.95
optimizer = keras.optimizers.Adam(lr=1e-3)
loss_fn = keras.losses.mean_squared_error
def training_step(batch_size):
experiences = sample_experiences(batch_size)
states, actions, rewards, next_states, dones = experiences
next_Q_values = model.predict(next_states)
max_next_Q_values = np.max(next_Q_values, axis=1)
target_Q_values = (rewards + (1 - dones) * discount_factor * max_next_Q_values)
mask = tf.one_hot(actions, n_outputs)
with tf.GradientTape() as tape:
all_Q_values = model(states)
Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# -
for episode in range(600):
obs = env.reset()
for step in range(200):
epsilon = max(1 - episode / 500, 0.01)
obs, reward, done, info = play_one_step(env, obs, epsilon)
if done:
break
if episode > 50:
training_step(batch_size)
# # Deep Q-Variants:
# ## Fixed Q-value Targets:
temp_model = keras.models.clone_model(model)
temp_model.set_weights(model.get_weights())
# +
target = keras.models.clone_model(model)
target.set_weights(model.get_weights())
def training_step(batch_size):
experiences = sample_experiences(batch_size)
states, actions, rewards, next_states, dones = experiences
next_Q_values = target.predict(next_states)
max_next_Q_values = np.max(next_Q_values, axis=1)
target_Q_values = (rewards + (1 - dones) * discount_factor * max_next_Q_values)
mask = tf.one_hot(actions, n_outputs)
with tf.GradientTape() as tape:
all_Q_values = temp_model(states)
Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))
grads = tape.gradient(loss, temp_model.trainable_variables)
optimizer.apply_gradients(zip(grads, temp_model.trainable_variables))
for episode in range(600):
obs = env.reset()
for step in range(200):
epsilon = max(1 - episode / 500, 0.01)
obs, reward, done, info = play_one_step(env, obs, epsilon)
if done:
break
if episode > 50:
training_step(batch_size)
if episode % 50 == 0:
target.set_weights(temp_model.get_weights())
# -
# ## Double DQN:
temp_model = keras.models.clone_model(model)
temp_model.set_weights(model.get_weights())
# +
target = keras.models.clone_model(model)
target.set_weights(model.get_weights())
def training_step(batch_size, episode=None):
print("\n\nEpisode:", episode)
experiences = sample_experiences(batch_size)
states, actions, rewards, next_states, dones = experiences
print("Next States:\n", next_states)
next_Q_values = temp_model.predict(next_states)
print("Next Q Values:\n", next_Q_values)
best_next_actions = np.argmax(next_Q_values, axis=1)
print("Best Next Actions:\n", best_next_actions)
next_mask = tf.one_hot(best_next_actions, n_outputs).numpy()
print("Next Mask:\n", next_mask)
print("Target Predict:\n", target.predict(next_states))
next_best_Q_values = (target.predict(next_states) * next_mask).sum(axis=1)
print("Next Best Q Values:\n", next_best_Q_values)
target_Q_values = (rewards + (1 - dones) * discount_factor * next_best_Q_values)
print("Target Q Values:\n", target_Q_values)
mask = tf.one_hot(actions, n_outputs)
print("Mask:", mask)
if episode == 52:
print(asdasd)
with tf.GradientTape() as tape:
all_Q_values = temp_model(states)
Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))
grads = tape.gradient(loss, temp_model.trainable_variables)
optimizer.apply_gradients(zip(grads, temp_model.trainable_variables))
for episode in range(600):
obs = env.reset()
for step in range(200):
epsilon = max(1 - episode / 500, 0.01)
obs, reward, done, info = play_one_step(env, obs, epsilon)
if done:
break
if episode > 50:
training_step(batch_size, episode)
if episode % 50 == 0:
target.set_weights(temp_model.get_weights())
# -
# ## Dueling DQN:
K = keras.backend
input_states = keras.layers.Input(shape=[4])
hidden1 = keras.layers.Dense(32, activation="elu")(input_states)
hidden2 = keras.layers.Dense(32, activation="elu")(hidden1)
state_values = keras.layers.Dense(1)(hidden2)
raw_advantages = keras.layers.Dense(n_outputs)(hidden2)
advantages = raw_advantages - K.max(raw_advantages, axis=1, keepdims=True)
Q_values = state_values + advantages
model = keras.Model(inputs=[inputs_states], outputs=[Q_values])
| notebooks/Reinforcement_Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from rsna_retro.imports import *
from rsna_retro.metadata import *
from rsna_retro.preprocess import *
from rsna_retro.train import *
torch.cuda.set_device(1)
Meta.df_comb.head()
# ## Training
name = 'baseline_full'
dbch = get_data(512, 128)
learn = get_learner(dbch, xresnet34)
# ## Training
do_fit(learn, 20, 4e-2, splits=Meta.splits)
learn.save(f'runs/{name}-1')
learn.dls = get_data(512, 224, splits=Meta.splits)
do_fit(learn, 12, 5e-3, freeze=False)
learn.save(f'runs/{name}-2')
learn.dls = get_data(256, 384, splits=Meta.splits)
do_fit(learn, 4, 1e-3, freeze=False)
learn.save(f'runs/{name}-3')
# ## Submission
learn.load(f'runs/{name}-2')
# +
# tst_fns = df_tst.index.values
# -
sub_fn = f'subm/{name}-2'
learn.dls = get_test_data(meta.df_tst, bs=512, sz=256)
# +
# tst_bs = 256
# tst_sz = 384
# tst_splits = [L.range(tst_fns), L.range(tst_fns)]
# tst_dbch = get_data_gen(tst_fns, bs=tst_bs, img_tfm=get_pil_fn(path/'tst_jpg'), sz=tst_sz, splits=tst_splits, test=True)
# learn.dls = tst_dbch
# -
preds,targs = learn.get_preds()
pred_csv = submission(meta.df_tst, preds, fn=sub_fn)
FileLink(f'{sub_fn}.csv')
api.competition_submit(f'{sub_fn}.csv', 'rsna_retro - full - 256 size. Compare to 384 0.064', 'rsna-intracranial-hemorrhage-detection')
api.competitions_submissions_list('rsna-intracranial-hemorrhage-detection')[0]
api.competitions_submissions_list('rsna-intracranial-hemorrhage-detection')[0]
| 02_train_experiments/02_train_01_full.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ![title](img/cover4.png)
# <center><em>Copyright! This material is protected, please do not copy or distribute. by:<NAME></em></center>
# ***
# <h1 align="center">Udemy course : Python Bootcamp for Data Science 2021 Numpy Pandas & Seaborn</h1>
#
# ***
# ## 14.7 Resampling and Frequency Conversion
# First we import pandas library:
# + hide_input=false
import pandas as pd
# -
# Lets read this time series from a csv file using the function **pd.read_csv()**:
# + hide_input=false
df = pd.read_csv('data/apple.csv', index_col = 'Date', parse_dates = True)
df.head()
# -
# We can resample this time series into the **weekly average prices** using the function **resample('W')**:
# + hide_input=false
df.resample('W').mean()
# -
# We can resample this time series into a monthly frequency by passing the argument **'M'**:
# + hide_input=false
df.resample('M').mean()
# -
# For a monthly frequency we can display only the month without the days using the argument **kind = period**:
# + hide_input=false
df.resample('M', kind = 'period').mean()
# -
# Lets have another example, consider this time series:
# + hide_input=false
sales = pd.read_csv('data/sales.csv', index_col = 'Date', parse_dates = True)
sales.head()
# -
# We can calculate the total monthly sales using the function **resample()**:
# + hide_input=false
sales.resample('M', kind = 'period').sum()
# -
# ***
#
# <h1 align="center">Thank You</h1>
#
# ***
| 14.7 Resampling and Frequency Conversion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from straw import straw
help(straw)
result = straw.straw('NONE', 'https://hicfiles.s3.amazonaws.com/hiseq/gm12878/in-situ/HIC001.hic', 'X', 'X', 'BP', 1000000)
print("{0} {1} {2}".format(result[0][0], result[1][0], result[2][0]))
print("{0} {1} {2}".format(result[0][1], result[1][1], result[2][1]))
| python/straw_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Erasmus+ ICCT project (2018-1-SI01-KA203-047081)
# Toggle cell visibility
from IPython.display import HTML
tag = HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide()
} else {
$('div.input').show()
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
Toggle cell visibility <a href="javascript:code_toggle()">here</a>.''')
display(tag)
# Hide the code completely
# from IPython.display import HTML
# tag = HTML('''<style>
# div.input {
# display:none;
# }
# </style>''')
# display(tag)
# + [markdown] lang="it"
# ## Dalle equazioni differenziali alla forma di stato
#
# ### Modellazione di un sistema Massa-Molla-Smorzatore
#
# Questo esempio consente di sperimentare gli effetti delle variazioni dei parametri e sulla risposta libera e/o forzata del sistema massa-molla-smorzatore presentato nel manuale.
#
# <img src="Images\mass-spring-damper.png" alt="drawing" width="300">
#
# L'equazione che descrive il sistema è:
#
# $$m\ddot{x}=-kx-c\dot{x}+F(t),$$
#
# dove $x$ è la posizione della massa lungo la direzione del suo grado di libertà di movimento, $m$ è la sua massa, $k$ è la rigidità della molla (quindi $kx$ è la forza esercitata dalla molla sulla massa), $c$ la costante che descrive l'attrito viscoso (quindi $c\dot{x}$ è la forza esercitata dallo smorzatore sulla massa e $F(t)$ è la forza esterna applicata alla massa (rappresenta l'effettivo input del sistema).
#
# Definendo il vettore di stato $\textbf{x}=[x_1, x_2]^T$, dove $x_1=x$ e $x_2=\dot{x}$, e l'input come $u(t)=F(t)$, è possibile descrivere il comportamento del sistema mediante le due equazioni:
# \begin{cases}
# \dot{x_2}=-\frac{k}{m}x_1-\frac{c}{m}x_2+u(t) \\
# \dot{x_1}=x_2
# \end{cases}
# (nota che $\dot{x_2}=\ddot{x}$). Quindi in forma matriciale
# $$
# \begin{bmatrix}
# \dot{x_1} \\
# \dot{x_2}
# \end{bmatrix}=\underbrace{\begin{bmatrix}
# 0 && 1 \\
# -\frac{k}{m} && -\frac{c}{m}
# \end{bmatrix}}_{A}\begin{bmatrix}
# x_1 \\
# x_2
# \end{bmatrix}+\underbrace{\begin{bmatrix}
# 0 \\
# \frac{1}{m}
# \end{bmatrix}}_{B}u.
# $$
#
# ### Come usare questo notebook?
#
# - Esplora le diverse risposte del sistema alle diverse condizioni iniziali e ai diversi input.
# - Nota come il valore del coefficiente di smorzamento $c$ influisca sulla presenza o meno di oscillazioni della posizione della massa.
#
# Cerca di lasciare che la massa oscilli per sempre o che si sposti rapidamente da un punto all'altro senza oscillazioni.
#
# Prova a trovare, se puoi, un insieme di parametri ($m$, $k$ e $c$) e una funzione di input che faccia aumentare l'ampiezza delle oscillazioni nel tempo verso l'infinito. È possibile?
# +
#Preparatory Cell
# %matplotlib notebook
import control
import numpy
from IPython.display import display, Markdown
import ipywidgets as widgets
import matplotlib.pyplot as plt
from matplotlib import animation
# %matplotlib inline
#print a matrix latex-like
def bmatrix(a):
"""Returns a LaTeX bmatrix - by <NAME> (ICCT project)
:a: numpy array
:returns: LaTeX bmatrix as a string
"""
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{bmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{bmatrix}']
return '\n'.join(rv)
# Display formatted matrix:
def vmatrix(a):
if len(a.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(a).replace('[', '').replace(']', '').splitlines()
rv = [r'\begin{vmatrix}']
rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines]
rv += [r'\end{vmatrix}']
return '\n'.join(rv)
#matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value !
class matrixWidget(widgets.VBox):
def updateM(self,change):
for irow in range(0,self.n):
for icol in range(0,self.m):
self.M_[irow,icol] = self.children[irow].children[icol].value
#print(self.M_[irow,icol])
self.value = self.M_
def dummychangecallback(self,change):
pass
def __init__(self,n,m):
self.n = n
self.m = m
self.M_ = numpy.matrix(numpy.zeros((self.n,self.m)))
self.value = self.M_
widgets.VBox.__init__(self,
children = [
widgets.HBox(children =
[widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)]
)
for j in range(n)
])
#fill in widgets and tell interact to call updateM each time a children changes value
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].value = self.M_[irow,icol]
self.children[irow].children[icol].observe(self.updateM, names='value')
#value = Unicode('<EMAIL>', help="The email value.").tag(sync=True)
self.observe(self.updateM, names='value', type= 'All')
def setM(self, newM):
#disable callbacks, change values, and reenable
self.unobserve(self.updateM, names='value', type= 'All')
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].unobserve(self.updateM, names='value')
self.M_ = newM
self.value = self.M_
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].value = self.M_[irow,icol]
for irow in range(0,self.n):
for icol in range(0,self.m):
self.children[irow].children[icol].observe(self.updateM, names='value')
self.observe(self.updateM, names='value', type= 'All')
#self.children[irow].children[icol].observe(self.updateM, names='value')
#overlaod class for state space systems that DO NOT remove "useless" states (what "professor" of automatic control would do this?)
class sss(control.StateSpace):
def __init__(self,*args):
#call base class init constructor
control.StateSpace.__init__(self,*args)
#disable function below in base class
def _remove_useless_states(self):
pass
# +
#define matrixes
C = numpy.matrix([[1,0],[0,1]])
D = numpy.matrix([[0],[0]])
X0 = matrixWidget(2,1)
m = widgets.FloatSlider(
value=5,
min=0.1,
max=10.0,
step=0.1,
description='m [kg]:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
k = widgets.FloatSlider(
value=1,
min=0,
max=10.0,
step=0.1,
description='k [N/m]:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
c = widgets.FloatSlider(
value=0.5,
min=0,
max=10.0,
step=0.1,
description='c [Ns/m]:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
u = widgets.FloatSlider(
value=1,
min=0,
max=10.0,
step=0.1,
description='',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
omega = widgets.FloatSlider(
value=5,
min=0,
max=10.0,
step=0.1,
description='',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
# +
def main_callback(X0, m, k, c, u, selu, omega):#m, k, c, u, selu, DW
a = numpy.matrix([[0,1],[-k/m,-c/m]])
b = numpy.matrix([[0],[1/m]])
eig = numpy.linalg.eig(a)
sys = sss(a,b,C,D)
if min(numpy.real(abs(eig[0]))) != 0:
T = numpy.linspace(0,100/min(numpy.real(abs(eig[0]))),1000)
else:
if max(numpy.real(abs(eig[0]))) != 0:
T = numpy.linspace(0,100/max(numpy.real(abs(eig[0]))),1000)
else:
T = numpy.linspace(0,1000,1000)
if selu == 'impulse': #selu
U = [0 for t in range(0,len(T))]
U[0] = u
y = control.forced_response(sys,T,U,X0)
if selu == 'step':
U = [u for t in range(0,len(T))]
y = control.forced_response(sys,T,U,X0)
if selu == 'sinusoid':
U = u*numpy.sin(omega*T)
y = control.forced_response(sys,T,U,X0)
fig=plt.figure(num=1,figsize=[15, 4])
fig.add_subplot(121)
plt.plot(T,y[1][0])
plt.grid()
plt.xlabel('t [s]')
plt.ylabel('posizione [m]')
fig.add_subplot(122)
plt.plot(T,y[1][1])
plt.grid()
plt.xlabel('t [s]')
plt.ylabel('velocità [m]')
#display(Markdown('The A matrix is: $%s$ and the eigenvalues are: $%s$' % (bmatrix(a),eig[0])))
#create dummy widget
DW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px'))
#create button widget
START = widgets.Button(
description='Test',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Test',
icon='check'
)
def on_start_button_clicked(b):
#This is a workaround to have intreactive_output call the callback:
# force the value of the dummy widget to change
if DW.value> 0 :
DW.value = -1
else:
DW.value = 1
pass
START.on_click(on_start_button_clicked)
#define type of ipout
SELECT = widgets.Dropdown(
options=[('impulso','impulse'), ('gradino','step'), ('sinusoide','sinusoid')],
value='impulse',
description='',
disabled=False
)
#create a graphic structure to hold all widgets
alltogether = widgets.VBox([widgets.HBox([widgets.VBox([m,
k,
c]),
widgets.HBox([widgets.VBox([widgets.Label('seleziona il tipo di input:',border=3),
widgets.Label('u [N]:',border=3),
widgets.Label('omega [rad/s]:',border=3)]),
widgets.VBox([SELECT,u,omega])])]),
widgets.HBox([widgets.Label('Stato iniziale X0:',border=3),X0])])
out = widgets.interactive_output(main_callback,{'X0':X0, 'm': m, 'k': k, 'c': c, 'u': u, 'selu': SELECT, 'omega':omega})
#out.layout.height = '300px'
display(out,alltogether)
| ICCT_it/examples/04/SS-07-Dalle_equazioni_differenziali_allo_spazio_di_stato.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# A. Using the **same Amazon product dataset from HW1**, **process the reviews** however you deem appropriate using the tools we have learned today (tokenizing, stemming, lemmatization, removing stopwords), and **produce a correlation matrix** of the top 500 words by frequency. Then, **sample your dataframe for only the top 200 words, and identify the two reviews that are the most "similar" based upon cosine similarity**. (7 pts)
# +
files = ["good_amazon_toy_reviews.txt", "poor_amazon_toy_reviews.txt"]
corpus = []
for file in files:
corpus += open(file, "r").readlines()
# -
# replace backslashes and new line carriage symbols
corpus = list(map(lambda review: review.replace('\n', '').replace('\\', ''), corpus))[:10000]
# +
from nltk.stem import WordNetLemmatizer
from nltk import word_tokenize
import string
lemmatizer = WordNetLemmatizer()
# make my own function that takes in a full sentence, tokenizes it, lemmatizes the words, then joins it back
# on white space
def lemmatize_sentence(sentence):
words = word_tokenize(sentence)
res_words = []
for word in words:
res_words.append(lemmatizer.lemmatize(word).strip(string.punctuation))
return " ".join(res_words)
# +
from nltk.stem import WordNetLemmatizer
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
# iterate through the corpus, lemmatizing each sentence (this is a substitute for a for loop!)
lemmatized_corpus = map(lemmatize_sentence, corpus)
vectorizer = CountVectorizer(stop_words="english")
# vectorize the corpus
vector = vectorizer.fit_transform(lemmatized_corpus)
# convert into Pandas dataframe
count_df = pd.DataFrame(vector.toarray(), columns=vectorizer.get_feature_names())
# -
# find the top 200 words by first summing along the columns, sorting the values descending order, picking
# the top 200, and grabbing the indices (words)
top_200_words = count_df.sum(axis=0).sort_values(ascending=False)[:200].index.values
# # Co-Occurence Matrix
top_200_count_df = count_df[top_200_words]
top_200_count_df.corr()
# +
from itertools import product
from scipy.spatial.distance import cosine
# sample only 200 random reviews to save time
# save the original review text in this df, so we can go back and inspect the most similar reviews
top_200_count_df["text"] = corpus
top_200_count_df = top_200_count_df.sample(200)
# -
review_lookup = pd.DataFrame(columns=["review_text"])
review_lookup["review_text"] = top_200_count_df["text"].values
review_lookup.index = top_200_count_df["text"].index.values
top_200_count_df.drop(columns=["text"], inplace=True)
# # Computing Similarity
#
# ### Option 1: Using For Loops
# +
computed = set() # create a set to store computed values to minimize calculations
results = [] # store the results here
for idx, (reviewA, reviewB) in enumerate(list(product(top_200_count_df.index.values, repeat=2))):
if idx % 1000 == 0: # print out progress
print(f"Done with {idx}")
if reviewA == reviewB:
continue
if (reviewA, reviewB) in computed or (reviewB, reviewA) in computed: # if these reviews are already computed
continue
reviewA_vector = top_200_count_df.loc[reviewA].values
reviewB_vector = top_200_count_df.loc[reviewB].values
similarity = 1 - cosine(reviewA_vector, reviewB_vector)
computed.add((reviewA, reviewB))
results.append((reviewA, reviewB, similarity))
# -
similarities = pd.DataFrame(results, columns=["review A", "review B", "similarity"])
top_50_similar = similarities.sort_values(by="similarity", ascending=False).head(50) # get top 50
# use pandas' iterrows() to quickly iterate through rows, and print the most similar reviews
for idx, row in top_50_similar.iterrows():
a_index = row["review A"]
b_index = row["review B"]
a_text = review_lookup.loc[a_index]["review_text"]
b_text = review_lookup.loc[b_index]["review_text"]
print(f"({row['similarity']})\n{a_text}\n{b_text}\n\n")
# ## Option B (More Efficient): Using Sklearn's Cosine Similarity Functions (Student Answer)
from sklearn.metrics.pairwise import cosine_similarity
# iterate through the corpus, lemmatizing each sentence (this is a substitute for a for loop!)
lemmatized_corpus = map(lemmatize_sentence, corpus)
vectorizer = CountVectorizer(stop_words="english")
# vectorize the corpus
vector = vectorizer.fit_transform(lemmatized_corpus)
similarity_matrix = sklearn.metrics.pairwise.cosine_similarity(vector.toarray())
| solutions/HW2 (Instructor Solution).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="BWgZD6aDxeh_"
# # Imports
# + colab={"base_uri": "https://localhost:8080/"} id="6950f642-0c11-4671-bfa4-52c2a3f0ff4f" outputId="5581e07d-2f54-471f-9d52-57c735e2e04d"
import pandas as pd
import glob
import numpy as np
from functools import reduce
import time
import datetime
import math
pd.set_option("display.max_columns", None)
# Code to read csv file into Colaboratory:
# !pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Imports and upgrades for visualisation
import matplotlib.pyplot as plt
import seaborn as sns
# !pip install seaborn --upgrade
# !pip install matplotlib --upgrade
# + id="PKcI8GJipAuy"
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
link = 'https://drive.google.com/file/d/18B4656TSdWoJfqP3LE56nrg7QyiMNmJR'
downloaded = drive.CreateFile({'id':'18B4656TSdWoJfqP3LE56nrg7QyiMNmJR'})
downloaded.GetContentFile('input.zip')
# + colab={"base_uri": "https://localhost:8080/"} id="7pylfSYrpKsr" outputId="1892734f-20db-424c-9fe7-f667a282fbb8"
# !unzip input.zip
# + id="ef3746c7-01b7-43bb-a973-3cde94d2b44e"
path = 'input/tasutud_maksud' # use your path
all_files = glob.glob(path + "/*.xlsx")
all_files.sort()
# + [markdown] id="kYKR4o9kPkqK"
# # Preprocess
# + id="dffc36ef-1616-4013-9812-914e8e9594d0"
def mod_df(path):
file_name = path.split("/")[-1]
file_name = file_name.split("\\")[-1]
year = file_name.split("_")[2]
quater = file_name.split("_")[3]
suffix = "_{0}_{1}".format(year,quater)
df = pd.read_excel(path,usecols=['Nimi', 'Registrikood','Riiklikud Maksud', 'Tööjõumaksud Ja Maksed', 'Kaive', 'Tootajaid' ], dtype={'Registrikood': str}) #nrows=1000
#df['date'] = year
#df['type'] = "fast_grower"
df = df.rename(columns={'Nimi':"Nimi", 'Registrikood': "Registrikood", 'Riiklikud Maksud': 'Riiklikud Maksud'+suffix, 'Tööjõumaksud Ja Maksed': 'Tööjõumaksud Ja Maksed'+suffix, 'Kaive':'Kaive'+suffix, 'Tootajaid':'Tootajaid'+suffix,
"date": "date"+suffix, 'type':'type'+suffix})
return df
# + id="7090f607-f1ef-44c9-a14e-f29218bab7eb"
#all_files = all_files[6:10] # remove this row later
df_from_each_file = (mod_df(f) for f in all_files)
df_merged = reduce(lambda left,right: pd.concat([pd.merge(left,right,on=['Registrikood', 'Nimi'], #remove concat, if you want every company in 1 row
how='outer'), right]), df_from_each_file)
df_merged = df_merged.drop_duplicates()
#result = df_merged.copy()
# + colab={"base_uri": "https://localhost:8080/", "height": 806} id="00bde0ec-8fcb-4785-8cba-10aef36d27a1" outputId="ca41cf95-933f-4097-80f6-4aae0ec07cb3"
print(df_merged.shape)
print(df_merged.columns)
df_merged.sample(n = 3)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="KpdLJeTAosyL" outputId="40c933d5-4f19-47e5-f241-233f48f1219e"
df_merged.loc[df_merged['Registrikood'] == '14339972']
# + id="SpVcapuVosom"
debtors_df = pd.read_excel('input/maksuvolglaste_nimekiri.xlsx', dtype={'Kood': str})
debtors_df = debtors_df.drop(['Nimi'], axis=1)
reg_df = pd.read_excel('input/Ъksuste nimekiri 01.12.2020_revid.4.10.2021.xlsx', dtype={'Registrikood': str})
reg_df = reg_df.drop(['Nimi'], axis=1)
result = pd.merge(df_merged,debtors_df,left_on=['Registrikood'],right_on=['Kood'], how='left')
result = pd.merge(result,reg_df,on=['Registrikood'], how='left')
# drop rows, where company created is before 2015
result = result[result['Üksuse registreerimise kuupäev'] >= pd.Timestamp('2015-01-01')]
# + [markdown] id="ko6kD60It2bA"
# Pankrotis/likvideeritud:
# + colab={"base_uri": "https://localhost:8080/"} id="kW046zf0t0lr" outputId="4a14637e-eb7c-4c49-b707-aec2121d4498"
# pankrotis:
condition = list(map(result['Nimi'].str.contains, ['(PANKROTIS)']))
result['pankrotis'] = np.select(condition, [1], 0)
print(result['pankrotis'].value_counts())
# + colab={"base_uri": "https://localhost:8080/"} id="waNjtyV8t79p" outputId="562e9dbd-5d07-4582-82a3-843278652752"
# likvideeritud:
condition = list(map(result['Nimi'].str.contains, ['(LIKVIDEERIMISEL)']))
result['likvideeritud'] = np.select(condition, [1], 0)
print(result['likvideeritud'].value_counts())
# + colab={"base_uri": "https://localhost:8080/", "height": 487} id="8BwL3zMAur4F" outputId="ce05f5fe-e503-46df-d969-d428e0320522"
result.head()
# + [markdown] id="DAgVCbeZsvU-"
# Labelling formula(s):
# + id="tuR2noD-oslw"
# Average Increase:
def averageIncrease(list):
changes = []
for x1, x2 in zip(list[:-1], list[1:]):
try:
pct = (x2 - x1) * 100 / x1
except ZeroDivisionError:
pct = 0
changes.append(pct)
if len(changes) == 0:
return 0
avg = sum(changes)/len(changes)
return avg
# + id="TLA8I8tFdgpP"
# Compound Monthly Growth Rate - https://amplitude.com/blog/month-over-month-growth-rates
def CMGR(list):
if (len(list) is 0):
return 0
return ((((list[len(list) - 1]/list[0]) ** (1/len(list))) - 1) * 100).real
# + id="DcIVgytQBp49"
# + colab={"base_uri": "https://localhost:8080/"} id="NyYdiGFUosix" outputId="e6d60034-93e7-4f47-dd3d-d1a2a89b1b3d"
label_calc = result.filter(regex=("Kaive_.*"))
#label_calc['label'] = label_calc.apply(lambda x: x[0],axis=1)
label_calc['kaive_list'] = label_calc.values.tolist()
#remove nan values from list
label_calc['kaive_list'] = label_calc['kaive_list'].apply(lambda asd: [x for x in asd if math.isnan(x) == False and x != 0.0])
label_calc['averageIncrease'] = label_calc['kaive_list'].apply(lambda asd: averageIncrease(asd))
label_calc['compoundMonthlyGrowthRate'] = label_calc['kaive_list'].apply(lambda asd: CMGR(asd))
# + id="26nzBo0josf9"
result = pd.concat([result, label_calc['averageIncrease']], axis=1)
result = pd.concat([result, label_calc['compoundMonthlyGrowthRate']], axis=1)
result['label_avg'] = result['averageIncrease'].apply(lambda x: 'not fast-growing' if x <= 30 else 'fast-growing')
result['label'] = result['compoundMonthlyGrowthRate'].apply(lambda x: 'not fast-growing' if x <= 30 else 'fast-growing')
# + id="S8z1daU1o1Hz"
# + colab={"base_uri": "https://localhost:8080/"} id="EPs7sc4so1FB" outputId="32191857-bde6-4943-8643-f0a035c5fc70"
result['label_avg'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="5mICZ3BdmMyP" outputId="33202835-9b19-4d0a-9efa-0a13f68fb750"
result['label'].value_counts()
# + id="167Qwc9Go_Ja"
#print(unique_registrikoodid.shape)
# + id="WmR3xXh0qaUy" colab={"base_uri": "https://localhost:8080/"} outputId="a381545f-7469-430c-f2fd-88045a8b59e1"
print(result.shape)
# + [markdown] id="cleyXYxnNj4u"
# Train, test, val stuff:⏰☹⛪
#
# + id="ZOoFEP6JNjYJ"
# Find unique codes:
all_registrikoodid = result[['Registrikood']].values.ravel()
unique_registrikoodid = pd.unique(all_registrikoodid)
#print(unique_registrikoodid)
# How many lines before duplicating rows:
original_data_len = len(unique_registrikoodid)
# Create test, train, val indexes:
train_data_indx = [0] * int(original_data_len * 0.6)
test_data_indx = [1] * int(original_data_len * 0.2)
val_data_indx= [2] * int(original_data_len * 0.2)
# Sum indexes and shuffle:
split_indx_array = train_data_indx + test_data_indx + val_data_indx
np.random.seed(42)
np.random.shuffle(split_indx_array)
#print(split_indx_array)
# make sure unique_registrikoodid is the same length as split_indx_array:
unique_registrikoodid = unique_registrikoodid[:len(split_indx_array)]
# Merge codes and test, train, val indexes:
d = {'Registrikoodid': unique_registrikoodid, 'Train_test_val': split_indx_array}
split_df = pd.DataFrame(data=d)
# Merge split_indx_array to original df:
result2 = pd.merge(result, split_df ,left_on=['Registrikood'], right_on=['Registrikoodid'], how='left')
# Convert categorical variables to binary:
result2 = pd.get_dummies(result2, columns = ['EMTAK2008 Tähtkoodiga valdkond', 'Institutsionaalsete sektorite Eesti jaoks kohandatud klassifikaator 2010'])
# Drop certain columns that models don't use and fill NAs:
result2 = result2.drop(columns=['Nimi','Üksuse registreerimise kuupäev','Registrikood', 'Registrikoodid', 'Kood', 'Sh vaidlustatud', 'Maksuvõlg alates', 'Muudatuse või lisamise kuupäev','Üksuse registrist kustutamise kuupäev', 'averageIncrease', 'compoundMonthlyGrowthRate', 'label_avg'])
result2 = result2.fillna(0)
# Create dataframes (test, train, val):
train_data = result2.loc[result2['Train_test_val'] == 0]
test_data = result2.loc[result2['Train_test_val'] == 1]
val_data = result2.loc[result2['Train_test_val'] == 2]
# Create labels:
train_data_labels = train_data['label'].replace('fast-growing', 1).replace('not fast-growing', 0)
test_data_labels = test_data['label'].replace('fast-growing', 1).replace('not fast-growing', 0)
val_data_labels = val_data['label'].replace('fast-growing', 1).replace('not fast-growing', 0)
train_data = train_data.drop(columns=['label' , 'Train_test_val'])
test_data = test_data.drop(columns=['label' , 'Train_test_val'])
val_data = val_data.drop(columns=['label' , 'Train_test_val'])
# + id="x-YfjlevqhYH" colab={"base_uri": "https://localhost:8080/"} outputId="028bf5dd-0148-4202-b215-d3cbd2e21a35"
train_data.shape
# + id="uedTJWQ5qm-p" colab={"base_uri": "https://localhost:8080/", "height": 250} outputId="c9b8cf5b-9b4c-4ea4-9aea-227d861896f1"
test_data.head(3)
# + id="Ecroh6kwqmbA"
# + [markdown] id="aIJGm_yzPSX9"
# # Modeling
# + [markdown] id="i4mqV3-NBOL1"
# ## Logistic Regression
# + id="MZ3Lp_F3BW5B" colab={"base_uri": "https://localhost:8080/"} outputId="43c51b3a-3b48-4681-dc49-306f1ae49803"
from sklearn.linear_model import LogisticRegression
model_lr = LogisticRegression()
np.random.seed(1111)
model_lr.fit(train_data, train_data_labels)
score_model_lr = model_lr.score(test_data, test_data_labels)
print(f'LogisticRegression accuracy is {score_model_lr*100}%')
# + [markdown] id="V4g8roqhtbTn"
# ## ExtraTreesClassifier
# + colab={"base_uri": "https://localhost:8080/"} id="zs8J-S-bCyQb" outputId="c1248adb-5747-4718-8f72-021b909c3cd6"
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_score, GridSearchCV
model_etc = ExtraTreesClassifier(max_depth=10,n_jobs=16, n_estimators = 10)
np.random.seed(1111)
model_etc.fit(train_data, train_data_labels)
score_model_etc = model_etc.score(test_data, test_data_labels)
print(f'ExtraTreesClassifier accuracy is {score_model_etc*100}%')
# + [markdown] id="JJMG872h_DB2"
# ## **GridSearchCV🍩**
# + id="JQ9yozbk82YD"
# Grid search, 4 estimatori peal, https://www.davidsbatista.net/blog/2018/02/23/model_optimization/
# class EstimatorSelectionHelper:
# def __init__(self, models, params):
# self.models = models
# self.params = params
# self.keys = models.keys()
# self.grid_searches = {}
# def fit(self, X, y, **grid_kwargs):
# for key in self.keys:
# print('Running GridSearchCV for %s.' % key)
# model = self.models[key]
# params = self.params[key]
# grid_search = GridSearchCV(model, params, **grid_kwargs)
# grid_search.fit(X, y)
# self.grid_searches[key] = grid_search
# print('Done.')
# def score_summary(self, sort_by='mean_test_score'):
# frames = []
# for name, grid_search in self.grid_searches.items():
# frame = pd.DataFrame(grid_search.cv_results_)
# frame = frame.filter(regex='^(?!.*param_).*$')
# frame['estimator'] = len(frame)*[name]
# frames.append(frame)
# df = pd.concat(frames)
# df = df.sort_values([sort_by], ascending=False)
# df = df.reset_index()
# df = df.drop(['rank_test_score', 'index'], 1)
# columns = df.columns.tolist()
# columns.remove('estimator')
# columns = ['estimator']+columns
# df = df[columns]
# return df
# + id="sCMkNqk285Op"
# from sklearn import datasets
# breast_cancer = datasets.load_breast_cancer()
# X_cancer = breast_cancer.data
# y_cancer = breast_cancer.target
# from sklearn.ensemble import ExtraTreesClassifier
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.ensemble import AdaBoostClassifier
# from sklearn.ensemble import GradientBoostingClassifier
# from sklearn.svm import SVC
# models1 = {
# 'ExtraTreesClassifier': ExtraTreesClassifier(),
# 'RandomForestClassifier': RandomForestClassifier(),
# 'AdaBoostClassifier': AdaBoostClassifier(),
# 'GradientBoostingClassifier': GradientBoostingClassifier()
# }
# params1 = {
# 'ExtraTreesClassifier': { 'n_estimators': [16, 32] },
# 'RandomForestClassifier': [
# { 'n_estimators': [16, 32] },
# {'criterion': ['gini', 'entropy'], 'n_estimators': [8, 16]}],
# 'AdaBoostClassifier': { 'n_estimators': [16, 32] },
# 'GradientBoostingClassifier': { 'n_estimators': [16, 32], 'learning_rate': [0.8, 1.0] }
# }
# + id="vvC2x7YwFAWj"
# helper1 = EstimatorSelectionHelper(models1, params1)
# helper1.fit(train_data, train_data_labels, scoring='accuracy', n_jobs=2)
# + id="N7dWVBU2Wrlo"
# helper1.score_summary(sort_by='mean_test_score')
# + [markdown] id="2U5Ad50Zteub"
# ## RandomForestClassifier
# + id="w37gcJH-CyTP" colab={"base_uri": "https://localhost:8080/"} outputId="28751e57-aa45-4763-93dc-ab58726bd384"
from sklearn.ensemble import RandomForestClassifier
model_rfc = RandomForestClassifier(max_depth=10,n_jobs=16, n_estimators = 10)
np.random.seed(1111)
model_rfc.fit(train_data, train_data_labels)
score_model_rfc = model_rfc.score(test_data, test_data_labels)
print(f'model_rfc accuracy is {score_model_rfc*100}%')
# + [markdown] id="txZJE5mVtnXk"
# ## Bagged DT
# + id="qYw9SLOjCyWQ" colab={"base_uri": "https://localhost:8080/"} outputId="31369198-9061-4537-c08d-ab76a68d3f4d"
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
bagger = BaggingClassifier(base_estimator=DecisionTreeClassifier(), max_samples = 0.8, max_features = 0.8, n_estimators=9, random_state=1111)
bagger.fit(train_data, train_data_labels)
score_bagger = bagger.score(test_data, test_data_labels)
print(f'BaggingClassifier accuracy is {score_bagger*100}%')
# + [markdown] id="1meW_CiktrCg"
# ## XGBoost
# + id="gvnW1sktWT37" colab={"base_uri": "https://localhost:8080/"} outputId="bd6483f3-c990-44f1-c483-8f1280e83596"
import xgboost as xgb
# if nothing seems to improve for 150 iterations - stop
early_stopping = 50
# train for training and test for ... validation!
eval_list = [(train_data, train_data_labels), (test_data, test_data_labels)]
# 1,2,3.. go!
params={
'n_estimators':100,
'objective': 'binary:logistic',
'learning_rate': 0.15,
'gamma':0.1,
'subsample':0.8,
'colsample_bytree':0.3,
'min_child_weight':3,
'max_depth':10,
}
xgb_model = xgb.XGBClassifier(**params)
bst = xgb_model.fit(train_data, train_data_labels,eval_set=eval_list, early_stopping_rounds=early_stopping)
# + id="6DcheSgrWT6x" colab={"base_uri": "https://localhost:8080/"} outputId="443412c3-807d-48fc-bd29-47c05d8b026a"
ypred = bst.predict(test_data)
ypred = ypred.astype(int)
n_correct = np.sum(ypred == test_data_labels)
xgboost_accuracy = n_correct/len(test_data_labels)
print(f'Xgboost accuracy is {xgboost_accuracy}') # 0.9748992515831894
# + [markdown] id="k2fi-7XYLfsL"
# ## Deep l
# + id="lzY0Vc88UogQ"
# Import `Sequential` from `keras.models`
from keras.models import Sequential
# Import `Dense` from `keras.layers`
from keras.layers import Dense
from tensorflow.keras.optimizers import Adam
# Initialize the constructor
model = Sequential()
# Add an input layer
model.add(Dense(12, activation='relu', input_shape=(111,)))
# Add one hidden layer
model.add(Dense(8, activation='relu'))
# Add an output layer
model.add(Dense(1, activation='sigmoid'))
# + id="LggFnsYqYDn0"
model.compile(loss='binary_crossentropy',
optimizer='adam',
#optimizer=Adam(learning_rate=0.001),
metrics=['accuracy'])
history = model.fit(train_data,train_data_labels,epochs=20, batch_size=100, verbose=0, validation_split=0.1)
# + id="-yiGQsh8hxvd" colab={"base_uri": "https://localhost:8080/"} outputId="e5931ec6-302f-4a60-bbd8-db012c356070"
# evaluate the model
scores = model.evaluate(test_data, test_data_labels)
print(f"Accuracy is {scores[1]*100}%")
# + [markdown] id="UnQIsALQ_tdw"
# # Evaluation
# + id="8JDRxKAS_gm0" colab={"base_uri": "https://localhost:8080/"} outputId="1cdbb13f-de0d-450c-ca14-e5391f82745f"
from sklearn import metrics
#val_data = val_data.drop(['lr', 'etc', 'rfc', 'bagger', 'lr_pred', 'etc_pred',
# 'rfc_pred', 'bagger_pred', 'xgboost_pred','xgboost', 'deep', 'deep_pred'], axis=1)
N = val_data.shape[1]
val_data['lr'] = model_lr.predict_proba(val_data.iloc[:, : N])[:,1]
val_data['etc'] = model_etc.predict_proba(val_data.iloc[:, : N])[:,1]
val_data['rfc'] = model_rfc.predict_proba(val_data.iloc[:, : N])[:,1]
val_data['bagger'] = bagger.predict_proba(val_data.iloc[:, : N])[:,1]
val_data['lr_pred'] = model_lr.predict(val_data.iloc[:, : N])
val_data['etc_pred'] = model_etc.predict(val_data.iloc[:, : N])
val_data['rfc_pred'] = model_rfc.predict(val_data.iloc[:, : N])
val_data['bagger_pred'] = bagger.predict(val_data.iloc[:, : N])
val_data['xgboost_pred'] = bst.predict(val_data.iloc[:, : N])
val_data['xgboost'] = bst.predict_proba(val_data.iloc[:, : N])[:,1]
val_data['deep'] = model.predict(val_data.iloc[:, : N])
val_data['deep_pred'] = (val_data['deep'] > 0.5).astype("int32")
# accuracy
print(f"LogisticRegression accuracy is {np.round(metrics.accuracy_score(val_data_labels, val_data['lr_pred']),3)}")
print(f"ExtraTreesClassifier accuracy is {np.round(metrics.accuracy_score(val_data_labels, val_data['etc_pred']),3)}")
print(f"RandomForestClassifier accuracy is {np.round(metrics.accuracy_score(val_data_labels, val_data['rfc_pred']),3)}")
print(f"BaggingClassifier desicion tree accuracy is {np.round(metrics.accuracy_score(val_data_labels, val_data['bagger_pred']),3)}")
print(f"Deep learn accuracy is {np.round(metrics.accuracy_score(val_data_labels, val_data['deep_pred']),3)}")
print(f"Xgboost accuracy is {np.round(metrics.accuracy_score(val_data_labels, val_data['xgboost_pred']),3)}")
print("")
# AUC
print(f"AUC of LogisticRegression is {np.round(metrics.roc_auc_score(val_data_labels, val_data['lr']),3)}")
print(f"AUC of ExtraTreesClassifier is {np.round(metrics.roc_auc_score(val_data_labels, val_data['etc']),3)}")
print(f"AUC of RandomForestClassifier is {np.round(metrics.roc_auc_score(val_data_labels, val_data['rfc']),3)}")
print(f"AUC of BaggingClassifier desicion tree is {np.round(metrics.roc_auc_score(val_data_labels, val_data['bagger']),3)}")
print(f"AUC of Deep learn is {np.round(metrics.roc_auc_score(val_data_labels, val_data['deep']),3)}")
print(f"AUC of Xgboost is {np.round(metrics.roc_auc_score(val_data_labels, val_data['xgboost']),3)}")
print("")
# F1
print(f"F1 weighted of LogisticRegression is {np.round(metrics.f1_score(val_data_labels, val_data['lr_pred'] , average='weighted'),3)}")
print(f"F1 weighted of ExtraTreesClassifier is {np.round(metrics.f1_score(val_data_labels, val_data['etc_pred'] , average='weighted'),3)}")
print(f"F1 weighted of RandomForestClassifier is {np.round(metrics.f1_score(val_data_labels, val_data['rfc_pred'] , average='weighted'),3)}")
print(f"F1 weighted of BaggingClassifier desicion tree is {np.round(metrics.f1_score(val_data_labels, val_data['bagger_pred'] , average='weighted'),3)}")
print(f"F1 weighted of Deep learning is {np.round(metrics.f1_score(val_data_labels, val_data['deep_pred'] , average='weighted'),3)}")
print(f"F1 weighted of Xgboost is {np.round(metrics.f1_score(val_data_labels, val_data['xgboost_pred'] , average='weighted'),3)}")
# + [markdown] id="yLWRgX45esAu"
# # Visualization
# + id="tgaGadQEeyoa" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="8bae1f24-29c6-4e6f-e4a3-c172ee5ca8c5"
# accuracy
accuracies = [
np.round(metrics.accuracy_score(val_data_labels, val_data['lr_pred']),3),
np.round(metrics.accuracy_score(val_data_labels, val_data['etc_pred']),3),
np.round(metrics.accuracy_score(val_data_labels, val_data['rfc_pred']),3),
np.round(metrics.accuracy_score(val_data_labels, val_data['bagger_pred']),3),
np.round(scores[1],3),
np.round(xgboost_accuracy,3)
]
# hack to visualize without running everything
#accuracies = [0.917,0.917,0.919,0.959,0.917,0.956]
acc_vis_data = pd.DataFrame({"Accuracies":accuracies,"Algorithm":['Linear regression', 'ExtraTreesClassifier', 'RandomForestClassifier', 'BaggingClassifier', 'Deep learn', 'Xgboost']})
acc_vis_data = acc_vis_data.sort_values(["Accuracies"], ascending=False)
g = sns.barplot("Accuracies","Algorithm",data = acc_vis_data, palette="Set3")
g.set_xlabel("Accuracy")
for container in g.containers:
g.bar_label(container, label_type='edge')
g = g.set_title("Accuracy scores")
sns.despine(bottom = True, left = True)
plt.show()
# + id="UgnS8pd6Cg_N" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="92af29e5-2b8d-48ee-c56d-5f534e4cf939"
# AUC
AUCs = [
np.round(metrics.roc_auc_score(val_data_labels, val_data['lr']),3),
np.round(metrics.roc_auc_score(val_data_labels, val_data['etc']),3),
np.round(metrics.roc_auc_score(val_data_labels, val_data['rfc']),3),
np.round(metrics.roc_auc_score(val_data_labels, val_data['bagger']),3),
np.round(metrics.roc_auc_score(val_data_labels, val_data['deep']),3),
np.round(metrics.roc_auc_score(val_data_labels, val_data['xgboost']),3),
]
# hack to visualize without running everything
#AUCs = [0.595,0.634,0.877,0.955,0.5]
AUCs_vis_data = pd.DataFrame({"AUCs":AUCs,"Algorithm":['Linear regression', 'ExtraTreesClassifier', 'RandomForestClassifier', 'BaggingClassifier', 'Deep learn', 'XGBoost']})
AUCs_vis_data = AUCs_vis_data.sort_values(["AUCs"], ascending=False)
g = sns.barplot("AUCs","Algorithm",data = AUCs_vis_data, palette="Set3")
g.set_xlabel("AUC")
for container in g.containers:
g.bar_label(container, label_type='edge')
g = g.set_title("AUC scores")
sns.despine(bottom = True, left = True)
plt.show()
# + id="arp-3AJiDliC" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="cff7e5f3-5f1e-435c-b2a8-1b2c037705af"
# F1
f1 = [
np.round(metrics.f1_score(val_data_labels, val_data['lr_pred'] , average='weighted'),3),
np.round(metrics.f1_score(val_data_labels, val_data['etc_pred'] , average='weighted'),3),
np.round(metrics.f1_score(val_data_labels, val_data['rfc_pred'] , average='weighted'),3),
np.round(metrics.f1_score(val_data_labels, val_data['bagger_pred'] , average='weighted'),3),
np.round(metrics.f1_score(val_data_labels, val_data['deep_pred'] , average='weighted'),3),
np.round(metrics.f1_score(val_data_labels, val_data['xgboost_pred'] , average='weighted'),3)
]
# hack to visualize without running everything
#f1 = [0.879,0.878,0.881,0.955,0.878,0.951]
f1_vis_data = pd.DataFrame({"F1":f1,"Algorithm":['Linear regression', 'ExtraTreesClassifier', 'RandomForestClassifier', 'BaggingClassifier', 'Deep learn', 'Xgboost']})
f1_vis_data = f1_vis_data.sort_values(["F1"], ascending=False)
g = sns.barplot("F1","Algorithm",data = f1_vis_data, palette="Set3")
g.set_xlabel("F1")
for container in g.containers:
g.bar_label(container, label_type='edge')
g = g.set_title("F1 scores")
sns.despine(bottom = True, left = True)
plt.show()
# + [markdown] id="yLjvuvVTPaMh"
# # Old crap
# + id="c6abe546-0261-44d3-8119-2bb05b058777" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="a6e3d750-fa80-4d57-e771-3687d8cc7a8f"
"""
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
test = result.drop(columns=["Registrikood"])
test = test.fillna(0)
scale= StandardScaler()
scaled_data = scale.fit_transform(test)
kmeans = KMeans(n_clusters=3, random_state=0).fit(scaled_data)
clusters = kmeans.labels_
count_arr = np.bincount(clusters)
count_arr
"""
# + id="p9LPPrncEXfy" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="fb75f958-01a2-4c49-f075-63d05ce43e4c"
"""
itemindex = np.where(clusters==2)
itemindex
"""
# + id="al6kmtzXIPUo" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="f48e48ff-86e3-4ec1-afc7-317b4f8e122f"
"""
result.loc[821]
"""
| fast_growing_comp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/cp-sadag/deehive/blob/master/PyTorch_cifar10_tutorial_ROB313_2020.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] deletable=true editable=true id="4zsDWUk5cavn"
# # Deep Learning for Image Classification
#
# Welcome to deep learning for image classification tutorial!
# **In this notebook, you will**:
# - Learn the basics of PyTorch, a powerful but easy to use package for scientific computing (and deep learning)
# - Learn how to build and train a convolutional neural network for image classification.
#
# If you have never used jupyter notebooks, nor Colab notebooks, [here](https://colab.research.google.com/notebooks/welcome.ipynb) is a short intro.
#
#
# ## I. PyTorch Tutorial
#
# We will briefly go through the basics of the PyTorch package, playing with toy examples.
#
# If you know already how to use PyTorch, then you can directly go to the second part of this tutorial
#
# ## II. Training a classifier
#
# In this part, we will train a Convolutional Neural Network to classify images of 10 different classes (dogs, cats, car, ...) and see how our model performs on the test set.
#
#
# ## III. Exploring CNN Architectures
#
# This is the part where you get your hands dirty ;). Your mission is to experiment different CNN architectures and set hyperparameters in order to obtain the best accuracy on the test set!
#
# + [markdown] id="JgRltjas9PpN"
# The following command sets the backend of matplotlib to the 'inline' backend so that the output of plotting commands is displayed inline within frontends like the Jupyter notebook, directly below the code cell that produced it:
# + deletable=true editable=true id="GkjN23FKt2D-"
# %matplotlib inline
# + [markdown] id="YAz-fhRRdFaR"
# ### Plotting functions and useful imports
#
# You can skip this part
# + id="nnee2WPudA9K"
# Python 2/3 compatibility
from __future__ import print_function, division
import itertools
import time
import numpy as np
import matplotlib.pyplot as plt
# Colors from Colorbrewer Paired_12
colors = [[31, 120, 180], [51, 160, 44]]
colors = [(r / 255, g / 255, b / 255) for (r, g, b) in colors]
# functions to show an image
def imshow(img):
"""
:param img: (PyTorch Tensor)
"""
# unnormalize
img = img / 2 + 0.5
# Convert tensor to numpy array
npimg = img.numpy()
# Color channel first -> color channel last
plt.imshow(np.transpose(npimg, (1, 2, 0)))
def plot_losses(train_history, val_history):
x = np.arange(1, len(train_history) + 1)
plt.figure(figsize=(8, 6))
plt.plot(x, train_history, color=colors[0], label="Training loss", linewidth=2)
plt.plot(x, val_history, color=colors[1], label="Validation loss", linewidth=2)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.title("Evolution of the training and validation loss")
plt.show()
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
from http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
:param cm: (numpy matrix) confusion matrix
:param classes: [str]
:param normalize: (bool)
:param title: (str)
:param cmap: (matplotlib color map)
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# + [markdown] deletable=true editable=true id="aH_K9V7icav6"
# # I. What is PyTorch ?
#
# It’s a Python based scientific computing package targeted at two sets of audiences:
#
# - A replacement for numpy to use the power of GPUs
# - A deep learning research platform that provides maximum flexibility and speed
#
#
# ## PyTorch Basics
#
# In the next steps, we will briefly see how to use PyTorch and exploit its power:
#
# 1. PyTorch Installation
# 2. PyTorch Tensors
# 3. Numpy Bridge
# 4. Automatic differentiation
# 5. PyTorch and GPU (CUDA support)
#
#
# ### 1. Install PyTorch and Torchvision
#
#
# + id="e0y5PLM6ciB5" colab={"base_uri": "https://localhost:8080/"} outputId="f1fe85e1-e02d-40a6-bfd7-114e435e7914"
# !pip install torch #http://download.pytorch.org/whl/cu92/torch-0.4.1-cp36-cp36m-linux_x86_64.whl
# !pip install torchvision
# + deletable=true editable=true id="LcGVaagRcav8"
import numpy as np
# Import torch and create the alias "th"
# instead of writing torch.name_of_a_method() , we only need to write th.name_of_a_method()
# (similarly to numpy imported as np)
import torch as th
# + deletable=true editable=true id="g2-brDDHcawE" colab={"base_uri": "https://localhost:8080/"} outputId="a173ce78-7361-4f18-d92b-04c9099ff68b"
# Create tensor of ones (FloatTensor by default)
ones = th.ones(3, 2)
print(ones)
# + [markdown] deletable=true editable=true id="6RvPibnScawC"
# ### 2. PyTorch Tensors
#
# A `torch.Tensor` is a multi-dimensional matrix containing elements of a single data type.
#
# Tensors are similar to numpy’s ndarrays, but they have a super-power: Tensors can also be used on a GPU to accelerate computing.
# + [markdown] deletable=true editable=true id="QcJgJQERcawQ"
# #### Tensor Shape
# To know the shape of a given tensor, you can use the `.size()` method (the numpy equivalent is `.shape`)
# + deletable=true editable=true id="SI96-W9acawS" colab={"base_uri": "https://localhost:8080/"} outputId="c88269cb-ca04-48c2-b304-5ff13f3926e7"
# Display the shape of a tensor
# it can be used as a tuple
print("Tensor Shape: {}".format(ones.size()))
# + [markdown] deletable=true editable=true id="pUPWrNarcawZ"
# #### Reshape tensors
#
# To reshape tensors (e.g. flatten a 3D tensor to a 1D array), you can use the `.view()` method:
#
# - **x.view(new_shape)**: Returns a new tensor with the same data but different size. It is the equivalent of numpy function *reshape* (Gives a new shape to an array without changing its data.). You can read the full documentation [here.](http://pytorch.org/docs/master/tensors.html#torch.Tensor.view)
#
# [WARNING] when precising a new shape, you have to make sure that the number of elements is constant.
# For example, a 2D matrix of size 3x3 can only be viewed as a 1D array of size $3 \cdot 3 = 9$
# + deletable=true editable=true id="vX-oxI6Vcawb" colab={"base_uri": "https://localhost:8080/"} outputId="244cbff1-8e19-4ff2-d4be-393e83b01c0b"
# Create a 3D tensor of size 3x2x2
zeros_3d_tensor = th.zeros(3, 2, 2)
print("Original size:", zeros_3d_tensor.size())
# Reshape it to a 1D array of size 3*2*2 = 12
zeros_1d_array = zeros_3d_tensor.view(3 * 2 * 2)
print("Reshaped tensor:", zeros_1d_array.size())
# Let's view our original tensor as a 2D matrix
# If you want PyTorch to guess one remaining dimension,
# you specify '-1' instead of the actual size
zeros_2d_matrix = zeros_3d_tensor.view(-1, 2 * 2)
print("Matrix shape:", zeros_2d_matrix.size())
# + [markdown] deletable=true editable=true id="kTO_FFswcawj"
# #### Basic Operations on tensors
#
# Tensor support all basic linear algebra operations. You can read the full documentation [here](http://pytorch.org/docs/master/tensors.html)
# + deletable=true editable=true id="Ay7LvYeVcawl" colab={"base_uri": "https://localhost:8080/"} outputId="e1e5601b-6e46-49a6-e681-33fa1e20d9b7"
2 * ones + 1
# + [markdown] deletable=true editable=true id="OD7ZOT4jcaws"
# PyTorch tensors also supports numpy indexing:
# + deletable=true editable=true id="srzDzj_ocawu" colab={"base_uri": "https://localhost:8080/"} outputId="684c46de-18c9-4f14-935a-04c1c516cec8"
print("\n Indexing Demo:")
print(ones[:, 1])
# + [markdown] deletable=true editable=true id="xrjqKguqcaw0"
# ### 3. Numpy Bridge
# WARNING: PyTorch Tensors are different from numpy arrays
# even if they have a lot in common
#
# Though, it is **easy with PyTorch to tranform Tensors to Numpy arrays and vice versa**
# + [markdown] deletable=true editable=true id="gVAntrTVcaw3"
# #### Numpy <-> PyTorch
#
# Creating PyTorch tensors from numpy array is done via the `torch.from_numpy()` function
# (here `th.from_numpy()` because we renamed *torch* as *th*)
#
# To transform a PyTorch tensor to a numpy array, you can simply call `.numpy()` method.
# + deletable=true editable=true id="t2ENcAKOcaw5" colab={"base_uri": "https://localhost:8080/"} outputId="36c74338-1b12-475f-b81c-c63ef767dcd7"
# np.float32 -> th.FloatTensor
ones_matrix = np.ones((2, 2), dtype=np.float32)
# the matrix is passed by reference:
# if we modify the original numpy array, the tensor is also edited
ones_tensor = th.from_numpy(ones_matrix)
# Convert back to a numpy matrix
numpy_matrix = ones_tensor.numpy()
print("PyTorch Tensor:")
print(ones_tensor)
print("Numpy Matrix:")
print(numpy_matrix)
# + [markdown] deletable=true editable=true id="Y0Itjyg-caxD"
# ### 4. Automatic Differentiation
#
# Pytorch tensors allow to **automatically compute gradients**. That is particulary useful for backpropagation.
#
# Once you finish your computation you can call `.backward()` and have all the gradients computed automatically.
#
# You can access the gradient w.r.t. this variable using `.grad`.
#
# + deletable=true editable=true id="WrPNcIpYcaxK" colab={"base_uri": "https://localhost:8080/"} outputId="9112b95d-f99b-4d8d-9df2-a121cec32a95"
# We need to specify that we want to compute the gradient
# as it requires extra memory and computation
ones_tensor = th.ones(2,2, requires_grad=True)
print(ones_tensor)
# + [markdown] deletable=true editable=true id="IEZDUibxcaxj"
# To demonstrate the use of PyTorch Variable,
# let's define a simple linear transformation of a variable $x$ :
#
# $$y = a \cdot x + b$$
#
# PyTorch will allows us to automatically compute $$\frac{dy}{dx} $$
# + deletable=true editable=true id="A4j85JjZcaxl" colab={"base_uri": "https://localhost:8080/"} outputId="009fef9e-ecf9-4a35-f86a-8fdf71a04eee"
# Create a tensor and tell PyTorch
# that we want to compute the gradient
x = th.ones(1, requires_grad=True)
# Transformation constants
a = 4
b = 5
# Define the tranformation and store the result
# in a new variable
y = a * x + b
print(y)
# + [markdown] deletable=true editable=true id="_mxnlvwxcaxq"
# Let's backprop!
# + deletable=true editable=true id="X1i-pN-Fcaxs"
y.backward()
# + [markdown] deletable=true editable=true id="skgIGZdmcaxw"
# `x.grad` prints the gradient:
#
# $$\frac{dy}{dx} = a$$
#
# because:
#
# $$y = a \cdot x + b$$
# + deletable=true editable=true id="_TYbuwsXcaxx" colab={"base_uri": "https://localhost:8080/"} outputId="825d34c7-4560-442a-d0fa-ddf76f37afab"
x.grad
# + [markdown] deletable=true editable=true id="ggu-PBGvcax3"
# You can now change the values of $a$ and $b$ see their effects on the gradient
# (HINT: `x.grad` only depends on the value of `a`)
# + [markdown] deletable=true editable=true id="8iPn0C59cax5"
# ### 5. PyTorch and GPU (CUDA support)
#
# Google colab provides a CUDA enabled GPU, so we are going to use its power.
# You can move tensor to the GPU by simply using the `to()` method.
# Otherwise, PyTorch will use the CPU.
#
# Here, we will demonstrate the usefulness of the GPU on a simple matrix multiplication:
# + id="EwF6ePTpeefQ" colab={"base_uri": "https://localhost:8080/"} outputId="70967875-3c90-42a6-cf86-d85e07ce2a44"
if th.cuda.is_available():
# Create tensors
x = th.ones(1000, 1000)
y = 2 * x + 3
# Do the calculation on cpu (default)
start_time = time.time()
# Matrix multiplication (for benchmark purpose)
results = th.mm(x, y)
time_cpu = time.time() - start_time
# Do the same calculation but on the gpu
# First move tensors to gpu
x = x.to("cuda")
y = y.to("cuda")
start_time = time.time()
# Matrix multiplication (for benchmark purpose)
results = th.mm(x, y)
time_gpu = time.time() - start_time
print("Time on CPU: {:.5f}s \t Time on GPU: {:.5f}s".format(time_cpu, time_gpu))
print("Speed up: Computation was {:.0f}X faster on GPU!".format(time_cpu / time_gpu))
else:
print("You need to enable GPU accelaration in colab (runtime->change runtime type)")
# + [markdown] id="E-AOzDy9lFwi"
# As expected, matrix multiplication is way faster on a GPU, so we'd better use it.
# + [markdown] deletable=true editable=true id="0kqEBjG6t2Eh"
#
# # II. Training a classifier
#
#
# For this tutorial, we will use the CIFAR10 dataset.
# There are 10 classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’,
# ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’. The images in CIFAR-10 are of
# size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size.
#
#
# ![CIFAR10](http://pytorch.org/tutorials/_images/cifar10.png)
#
#
# Training an image classifier
# ----------------------------
#
# We will do the following steps in order:
#
# 1. Load and normalize the CIFAR10 training and test datasets using
# ``torchvision``
# 2. Define a Convolution Neural Network
# 3. Define a loss function
# 4. Train the network on the training data
# 5. Test the network on the test data
# + [markdown] deletable=true editable=true id="UWTdj2uYcax7"
# ### 1. Loading and normalizing CIFAR10 Dataset
#
# Using ``torchvision``, it’s extremely easy to load CIFAR10.
# + deletable=true editable=true id="KRrvrIi0t2Em"
import torch
import torchvision
import torchvision.transforms as transforms
# + [markdown] deletable=true editable=true id="iX2ltR_zcayA"
# Seed the random generator to have reproducible results:
# + deletable=true editable=true id="335xvR6acayB"
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
if th.cuda.is_available():
# Make CuDNN Determinist
th.backends.cudnn.deterministic = True
th.cuda.manual_seed(seed)
# Define default device, we should use the GPU (cuda) if available
device = th.device("cuda" if th.cuda.is_available() else "cpu")
# + [markdown] deletable=true editable=true id="7EzIeyD4cayG"
# ### Define subset of the dataset (so it is faster to train)
# + deletable=true editable=true id="Nwu-wWh3cayI"
from torch.utils.data.sampler import SubsetRandomSampler
n_training_samples = 20000 # Max: 50 000 - n_val_samples
n_val_samples = 5000
n_test_samples = 5000
train_sampler = SubsetRandomSampler(np.arange(n_training_samples, dtype=np.int64))
val_sampler = SubsetRandomSampler(np.arange(n_training_samples, n_training_samples + n_val_samples, dtype=np.int64))
test_sampler = SubsetRandomSampler(np.arange(n_test_samples, dtype=np.int64))
# (In the last case, indexes do not need to account for training ones because the train=False parameter in datasets.CIFAR will select from the test set)
# + [markdown] deletable=true editable=true id="evFXNmbst2Ez"
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1]
#
#
# + deletable=true editable=true id="ZJ-hYN00t2E2" colab={"base_uri": "https://localhost:8080/", "height": 99, "referenced_widgets": ["38fc72a174824e3c8ff218f48949da0d", "63b1fe49eecc402782646ffda444f970", "66d03931cca744a795c4ebe5c0c836d7", "a07651a5b3a44436adbef486ecf99f3d", "3fdb78add0394ba5a4de876fbb596b70", "3c768c72053c491094d8b0697007714e", "b8f03628ce2f49e48dac36dc8148bf12", "2b956fec680f4cb7b71d80fb49021af0"]} outputId="f64809a0-920b-4eac-f783-4a40ea39ff15"
num_workers = 2
test_batch_size = 4
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))])
train_set = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=test_batch_size, sampler=train_sampler,
num_workers=num_workers)
test_set = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=test_batch_size, sampler=test_sampler,
num_workers=num_workers)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# + [markdown] deletable=true editable=true id="cGWVnBOft2FI"
# Let us show some of the training images, for fun.
#
#
# + deletable=true editable=true id="68OfC35ut2FM" colab={"base_uri": "https://localhost:8080/", "height": 155} outputId="e4e7ad3d-0a95-4a9e-f85e-26e6b61b76d7"
# get some random training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('{:>10}'.format(classes[labels[j]]) for j in range(test_batch_size)))
# + [markdown] deletable=true editable=true id="8ULHEu5Zt2Fa"
# ### 2. Define a Convolution Neural Network
#
# + deletable=true editable=true id="6k6rJyTTcayi"
# Useful imports
import torch.nn as nn
import torch.nn.functional as F
# + [markdown] deletable=true editable=true id="0JcmlEe8t2Fe"
# #### Forward propagation
#
# In PyTorch, there are built-in functions that carry out the convolution steps for you.
#
# - **nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0):** Convolution layer. You can read the full documentation [here](http://pytorch.org/docs/master/nn.html#conv2d)
#
# - **nn.MaxPool2d(kernel_size, stride=None, padding=0):** Max pooling layer. You can read the full documentation [here](http://pytorch.org/docs/master/nn.html#maxpool2d)
#
# - **F.relu(Z1):** computes the elementwise ReLU of Z1 (which can be any shape). You can read the full documentation [here.](http://pytorch.org/docs/master/nn.html#torch.nn.ReLU)
#
# - **x.view(new_shape)**: Returns a new tensor with the same data but different size. It is the equivalent of numpy function *reshape* (Gives a new shape to an array without changing its data). You can read the full documentation [here.](http://pytorch.org/docs/master/tensors.html#torch.Tensor.view)
#
# - **nn.Linear(in_features, out_features):** Applies a linear transformation to the incoming data: $y = Ax + b$, it is also called a fully connected layer. You can read the full documentation [here.](http://pytorch.org/docs/master/nn.html#linear-layers)
# + [markdown] deletable=true editable=true id="rbykSRDTcaym"
# #### Simple Convolutional Neural Network
#
# ConvNet with one convolution layer followed by a max pooling operation,
# one fully connected layer and an output layer
# + deletable=true editable=true id="X4pljAWycayn"
class SimpleConvolutionalNetwork(nn.Module):
def __init__(self):
super(SimpleConvolutionalNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 18, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
# cf comments in forward() to have step by step comments
# on the shape (how we pass from a 3x32x32 input image to a 18x16x16 volume)
self.fc1 = nn.Linear(18 * 16 * 16, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
"""
Forward pass,
x shape is (batch_size, 3, 32, 32)
(color channel first)
in the comments, we omit the batch_size in the shape
"""
# shape : 3x32x32 -> 18x32x32
x = F.relu(self.conv1(x))
# 18x32x32 -> 18x16x16
x = self.pool(x)
# 18x16x16 -> 4608
x = x.view(-1, 18 * 16 * 16)
# 4608 -> 64
x = F.relu(self.fc1(x))
# 64 -> 10
# The softmax non-linearity is applied later (cf createLossAndOptimizer() fn)
x = self.fc2(x)
return x
# + [markdown] deletable=true editable=true id="4m-VHCtRcayr"
# #### Linear Classifier
# + deletable=true editable=true id="Rj-togN6cays"
class LinearClassifier(nn.Module):
"""
Linear Classifier
"""
def __init__(self):
super(LinearClassifier, self).__init__()
self.linear = nn.Linear(32 * 32 * 3, 10)
def forward(self, x):
# Flatten input 3x32x32 -> 3072
x = x.view(x.size(0), -1)
return self.linear(x)
# + [markdown] deletable=true editable=true id="2SQi9Xf-t2Fu"
# ### 3. Define a loss function and optimizer
#
# Let's use a Classification Cross-Entropy loss and ADAM (optionally, SGD with momentum). You can read more about [optimization methods](https://pytorch.org/docs/stable/optim.html).
#
#
# + deletable=true editable=true id="DOUiPtZQt2Fx"
import torch.optim as optim
def createLossAndOptimizer(net, learning_rate=0.001):
# it combines softmax with negative log likelihood loss
criterion = nn.CrossEntropyLoss()
#optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
optimizer = optim.Adam(net.parameters(), lr=learning_rate)
return criterion, optimizer
# + [markdown] deletable=true editable=true id="saJW5bKRt2F9"
# ### 4. Train the network
#
#
# This is when things start to get interesting.
# We simply have to loop over our data iterator, feed the inputs to the network, and optimize
#
#
# + [markdown] deletable=true editable=true id="mNf1e8QZcay1"
# #### Data loader
# + deletable=true editable=true id="EqDD8_z8cay2"
def get_train_loader(batch_size):
return torch.utils.data.DataLoader(train_set, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers)
# Use larger batch size for validation to speed up computation
val_loader = torch.utils.data.DataLoader(train_set, batch_size=128, sampler=val_sampler,
num_workers=num_workers)
# + [markdown] deletable=true editable=true id="yTDHHbLpcay5"
# #### Training loop
# The training script: it takes ~10s per epoch with batch_size = 32
# + deletable=true editable=true id="dATbDR5pt2GE"
def train(net, batch_size, n_epochs, learning_rate):
"""
Train a neural network and print statistics of the training
:param net: (PyTorch Neural Network)
:param batch_size: (int)
:param n_epochs: (int) Number of iterations on the training set
:param learning_rate: (float) learning rate used by the optimizer
"""
print("===== HYPERPARAMETERS =====")
print("batch_size=", batch_size)
print("n_epochs=", n_epochs)
print("learning_rate=", learning_rate)
print("=" * 30)
train_loader = get_train_loader(batch_size)
n_minibatches = len(train_loader)
criterion, optimizer = createLossAndOptimizer(net, learning_rate)
# Init variables used for plotting the loss
train_history = []
val_history = []
training_start_time = time.time()
best_error = np.inf
best_model_path = "best_model.pth"
# Move model to gpu if possible
net = net.to(device)
for epoch in range(n_epochs): # loop over the dataset multiple times
running_loss = 0.0
print_every = n_minibatches // 10
start_time = time.time()
total_train_loss = 0
for i, (inputs, labels) in enumerate(train_loader):
# Move tensors to correct device
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
total_train_loss += loss.item()
# print every 10th of epoch
if (i + 1) % (print_every + 1) == 0:
print("Epoch {}, {:d}% \t train_loss: {:.2f} took: {:.2f}s".format(
epoch + 1, int(100 * (i + 1) / n_minibatches), running_loss / print_every,
time.time() - start_time))
running_loss = 0.0
start_time = time.time()
train_history.append(total_train_loss / len(train_loader))
total_val_loss = 0
# Do a pass on the validation set
# We don't need to compute gradient,
# we save memory and computation using th.no_grad()
with th.no_grad():
for inputs, labels in val_loader:
# Move tensors to correct device
inputs, labels = inputs.to(device), labels.to(device)
# Forward pass
predictions = net(inputs)
val_loss = criterion(predictions, labels)
total_val_loss += val_loss.item()
val_history.append(total_val_loss / len(val_loader))
# Save model that performs best on validation set
if total_val_loss < best_error:
best_error = total_val_loss
th.save(net.state_dict(), best_model_path)
print("Validation loss = {:.2f}".format(total_val_loss / len(val_loader)))
print("Training Finished, took {:.2f}s".format(time.time() - training_start_time))
# Load best model
net.load_state_dict(th.load(best_model_path))
return train_history, val_history
# + deletable=true editable=true id="cJX2anB5cay_" colab={"base_uri": "https://localhost:8080/"} outputId="37170dbf-e26c-4352-ac61-de30499dd34a"
net = SimpleConvolutionalNetwork()
train_history, val_history = train(net, batch_size=32, n_epochs=10, learning_rate=0.001)
# + [markdown] deletable=true editable=true id="UkVKNPtccazC"
# Now, let's look at the evolution of the losses
# + deletable=true editable=true id="4CUQt-HJcazF" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="6238707a-cb8b-4222-d509-7c84640c3f12"
plot_losses(train_history, val_history)
# + [markdown] deletable=true editable=true id="O90WcUTwt2GU"
# ### 5. Test the network on the test data
#
#
# We have trained the network for 2 passes over the training dataset.
# But we need to check if the network has learnt anything at all.
#
# We will check this by predicting the class label that the neural network
# outputs, and checking it against the ground-truth. If the prediction is
# correct, we add the sample to the list of correct predictions.
#
# Okay, first step. Let us display an image from the test set to get familiar.
#
#
# + deletable=true editable=true id="V4vljwBlt2GX" colab={"base_uri": "https://localhost:8080/", "height": 189} outputId="f1c808f4-27b9-4274-975c-1bb960c14fed"
try:
images, labels = next(iter(test_loader))
except EOFError:
pass
# print images
imshow(torchvision.utils.make_grid(images))
print("Ground truth:\n")
print(' '.join('{:>10}'.format(classes[labels[j]]) for j in range(test_batch_size)))
# + [markdown] deletable=true editable=true id="KpmaQT4Zt2Gn"
# Okay, now let us see what the neural network thinks these examples above are:
#
#
# + deletable=true editable=true id="utIfocFrt2Gs" colab={"base_uri": "https://localhost:8080/"} outputId="11d37608-ef31-4dc4-fe78-ad2dd7580ad2"
outputs = net(images.to(device))
print(outputs.size())
# + [markdown] deletable=true editable=true id="6mU42O0Gt2G2"
# The outputs are energies for the 10 classes.
# The higher the energy for a class, the more the network
# thinks that the image is from that particular class.
# So, let's get the index of the highest energy:
#
#
# + deletable=true editable=true id="IWTWHHs9t2G5" colab={"base_uri": "https://localhost:8080/", "height": 189} outputId="ea5cffa8-417c-497a-e08d-7a068d26575b"
_, predicted = torch.max(outputs, 1)
print("Predicted:\n")
imshow(torchvision.utils.make_grid(images))
print(' '.join('{:>10}'.format(classes[predicted[j]]) for j in range(test_batch_size)))
# + [markdown] deletable=true editable=true id="AUpCEAOTt2HK"
# The results seem pretty good.
#
# Let us look at how the network performs on the whole test set.
#
#
# + deletable=true editable=true id="LI6JtYwTt2HM" colab={"base_uri": "https://localhost:8080/"} outputId="bef99e0a-c24b-4b0f-9004-43100e3dc9fe"
def dataset_accuracy(net, data_loader, name=""):
net = net.to(device)
correct = 0
total = 0
for images, labels in data_loader:
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
accuracy = 100 * float(correct) / total
print('Accuracy of the network on the {} {} images: {:.2f} %'.format(total, name, accuracy))
def train_set_accuracy(net):
dataset_accuracy(net, train_loader, "train")
def val_set_accuracy(net):
dataset_accuracy(net, val_loader, "validation")
def test_set_accuracy(net):
dataset_accuracy(net, test_loader, "test")
def compute_accuracy(net):
train_set_accuracy(net)
val_set_accuracy(net)
test_set_accuracy(net)
print("Computing accuracy...")
compute_accuracy(net)
# + [markdown] deletable=true editable=true id="iGGyra-4t2HW"
# That initial 59.78 % on the test set of images looks waaay better than chance, which is 10% accuracy (randomly picking
# a class out of 10 classes).
# Seems like the network learnt something.
# As a baseline, a linear model achieves around 30% accuracy.
#
# What are the classes that performed well, and the classes that did not perform well?
#
#
# + deletable=true editable=true id="rkim9_INt2HY" colab={"base_uri": "https://localhost:8080/"} outputId="ff26987d-4f29-417e-8dd4-2884299ee99a"
def accuracy_per_class(net):
net = net.to(device)
n_classes = 10
# (real, predicted)
confusion_matrix = np.zeros((n_classes, n_classes), dtype=np.int64)
for images, labels in test_loader:
images, labels = images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
for i in range(test_batch_size):
confusion_matrix[labels[i], predicted[i]] += 1
label = labels[i]
print("{:<10} {:^10}".format("Class", "Accuracy (%)"))
for i in range(n_classes):
class_total = confusion_matrix[i, :].sum()
class_correct = confusion_matrix[i, i]
percentage_correct = 100.0 * float(class_correct) / class_total
print('{:<10} {:^10.2f}'.format(classes[i], percentage_correct))
return confusion_matrix
confusion_matrix = accuracy_per_class(net)
# + [markdown] deletable=true editable=true id="AZKLymOacazg"
# ### Confusion Matrix
# + [markdown] deletable=true editable=true id="ekJHz3vpcazg"
# Let's look at what type of error our networks makes...
# It seems that our network is pretty good at classifying ships,
# but has some difficulties to differentiate cats and dogs.
# Also, it classifies a lot of trucks as cars.
# + deletable=true editable=true id="1aYMqD1Ocazi" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="69322c84-c555-4649-e34e-44ff918ebb9c"
# Plot normalized confusion matrix
plot_confusion_matrix(confusion_matrix, classes, normalize=True,
title='Normalized confusion matrix')
# Plot non-normalized confusion matrix
plot_confusion_matrix(confusion_matrix, classes,
title='Confusion matrix, without normalization')
# + [markdown] deletable=true editable=true id="MVv-mV8Pt2Hs"
# # III. Exploring CNN Architectures
#
# Now, it is your turn to build a Convolutional Neural Network. The goal of this section is to explore different CNN architectures and set hyperparameters in order to obtain the best accuracy on the **test** set!
#
# The network that you have to tweak is called **MyConvolutionalNetwork**.
#
# You can start changing the batch_size, number of epochs and then try adding more convolutional layers.
# + [markdown] deletable=true editable=true id="h1blK9eicazo"
# ### PyTorch functions to build the network
# - **nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0):** Convolution layer. You can read the full documentation [here](http://pytorch.org/docs/master/nn.html#conv2d)
#
# - **nn.MaxPool2d(kernel_size, stride=None, padding=0):** Max pooling layer. You can read the full documentation [here](http://pytorch.org/docs/master/nn.html#maxpool2d)
#
# - **F.relu(Z1):** computes the element-wise ReLU of Z1 (which can be of any shape). You can read the full documentation [here.](http://pytorch.org/docs/master/nn.html#torch.nn.ReLU)
#
# - **x.view(new_shape)**: Returns a new tensor with the same data but different size. It is the equivalent of numpy function *reshape* (Gives a new shape to an array without changing its data.). You can read the full documentation [here.](http://pytorch.org/docs/master/tensors.html#torch.Tensor.view)
#
# - **nn.Linear(in_features, out_features):** Applies a linear transformation to the incoming data: $y = Ax + b$, it is also called a fully connected (fc) layer. You can read the full documentation [here.](http://pytorch.org/docs/master/nn.html#linear-layers)
# + [markdown] deletable=true editable=true id="a8-lKBaacazp"
# **Convolution Formulas**:
#
# The formulas relating the output shape $(C_2, H_2, W_2)$ of the convolution to the input shape $(C_1, H_1, W_1)$ are:
#
#
# $$ H_2 = \lfloor \frac{H_1 - kernel\_size + 2 \times padding}{stride} \rfloor +1 $$
#
# $$ W_2 = \lfloor \frac{W_1 - kernel\_size + 2 \times padding}{stride} \rfloor +1 $$
#
# $$ C_2 = \text{number of filters used in the convolution}$$
#
# NOTE: $C_2 = C_1$ in the case of max pooling
#
# where:
# - $H_2$: height of the output volume
# - $W_2$: width of the output volume
# - $C_1$: in_channels, number of channels in the input volume
# - $C_2$: out_channels
# + deletable=true editable=true id="acppf3nkcazr"
def get_output_size(in_size, kernel_size, stride=1, padding=0):
"""
Get the output size given all the parameters of the convolution
:param in_size: (int) input size
:param kernel_size: (int)
:param stride: (int)
:param paddind: (int)
:return: (int)
"""
return int((in_size - kernel_size + 2 * padding) / stride) + 1
# + [markdown] deletable=true editable=true id="SEsbZoTOcazu"
# #### Example of use of helper method get_output_size()
#
# Let's assume you have an *input volume of size 3x32x32* (where 3 is the number of channels)
# and you use a 2D convolution with the following parameters:
#
# ```python
# conv1 = nn.Conv2d(3, 18, kernel_size=7, stride=2, padding=1)
# ```
# then, the size of the output volume is 18x?x? (because we have 18 filters) where ? is given by the convolution formulas (see above).
#
# **get_output_size()** function allows to compute that size:
#
# ```
# out_size = get_output_size(in_size=32, kernel_size=7, stride=2, padding=1)
# print(out_size) # prints 14
# ```
#
# That is to say, *the output volume is 18x14x14*
# + deletable=true editable=true id="2JFQ1wgKcazv" colab={"base_uri": "https://localhost:8080/"} outputId="7dcceb6a-ba7f-4e70-b0b7-3121f3222057"
out_size = get_output_size(in_size=32, kernel_size=3, stride=1, padding=1)
print(out_size)
# + [markdown] deletable=true editable=true id="wviV5iQIcazz"
# Below is the neural network you have to edit:
# + deletable=true editable=true id="fnKUPUDTcaz1"
class MyConvolutionalNetwork(nn.Module):
def __init__(self):
super(MyConvolutionalNetwork, self).__init__()
self.conv1 = nn.Conv2d(3, 18, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
#### START CODE: ADD NEW LAYERS ####
# (do not forget to update `flattened_size`:
# the input size of the first fully connected layer self.fc1)
# self.conv2 = ...
# Size of the output of the last convolution:
self.flattened_size = 18 * 16 * 16
### END CODE ###
self.fc1 = nn.Linear(self.flattened_size, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
"""
Forward pass,
x shape is (batch_size, 3, 32, 32)
(color channel first)
in the comments, we omit the batch_size in the shape
"""
# shape : 3x32x32 -> 18x32x32
x = F.relu(self.conv1(x))
# 18x32x32 -> 18x16x16
x = self.pool(x)
#### START CODE: USE YOUR NEW LAYERS HERE ####
# x = ...
#### END CODE ####
# Check the output size
output_size = np.prod(x.size()[1:])
assert output_size == self.flattened_size,\
"self.flattened_size is invalid {} != {}".format(output_size, self.flattened_size)
# 18x16x16 -> 4608
x = x.view(-1, self.flattened_size)
# 4608 -> 64
x = F.relu(self.fc1(x))
# 64 -> 10
x = self.fc2(x)
return x
# + deletable=true editable=true id="ruLWyTSocaz5" colab={"base_uri": "https://localhost:8080/"} outputId="36855794-37c4-4d96-c90c-5f8e87060589"
net = MyConvolutionalNetwork()
train_history, val_history = train(net, batch_size=32, n_epochs=10, learning_rate=0.001)
# + [markdown] deletable=true editable=true id="u7cgVbkDcaz9"
# ### Losses Plot
# + deletable=true editable=true id="XtXu67qbcaz-" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="e0a2c885-618a-4826-e5f7-fc98d2f55228"
plot_losses(train_history, val_history)
# + [markdown] deletable=true editable=true id="TuGKgAMWcaz_"
# ### Accuracy of the trained model
# + deletable=true editable=true id="TWowqQhYca0B" colab={"base_uri": "https://localhost:8080/"} outputId="eeb7ade8-f016-4a86-d27c-e4fca1ead388"
compute_accuracy(net)
# + [markdown] deletable=true editable=true id="st9f_4opca0F"
# **Baseline: Simple Convolutional Neural Network (form part II)**
#
# <table>
# <tr>
# <td>Accuracy on the test set:</td>
# <td>59.98 %</td>
# </tr>
# </table>
# + deletable=true editable=true id="OKvmb4p-ca0I" colab={"base_uri": "https://localhost:8080/"} outputId="cc38f6b9-7572-42f7-a41f-c9f1e5ad2a87"
confusion_matrix = accuracy_per_class(net)
# + deletable=true editable=true id="ih5Pj0WBca0L" colab={"base_uri": "https://localhost:8080/", "height": 585} outputId="fb058fab-9e5d-4208-aea0-beaa7731419a"
plot_confusion_matrix(confusion_matrix, classes,
title='Confusion matrix, without normalization')
# + [markdown] deletable=true editable=true id="7xzIkZqit2IA"
# ### Going further
#
# - [Coursera Course on CNN](https://www.coursera.org/learn/convolutional-neural-networks)
# - [Stanford Course](http://cs231n.stanford.edu/syllabus.html)
# - [PyTorch Tutorial](http://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html)
# - [How backpropagation works](http://michaelnielsen.org/blog/how-the-backpropagation-algorithm-works/) (<NAME>)
#
# If you feel like this was too easy peasy:
#
# -Investigate further [optimization methods](https://pytorch.org/docs/stable/optim.html) beyond SGD, and Adam and their parameters.
#
# -Look at ways to improve your network using regularization techniques
#
# -Look at ways to visualize network activations for model interpretability
#
# -Use transfer learning, in order to use torchvision with pretrained=True with some pretrained models
#
#
# Acknowledgements:
# This tutorial is based on the [original PyTorch tutorial](https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html) and was adapted by [<NAME>](http://araffin.github.io/) for the ROB313 course at ENSTA Paris. Thanks to <NAME> for feedback!
#
# + [markdown] id="XEFW6M6jZtWk"
# ### More documentation/ questions to explore about Google Colab:
#
# -How to connect your Google Drive with Google Colab?
#
# -How to import a new notebook and save it to your GDrive?
#
# -How to use files which are contained in your GDrive?
#
# Some tips [here](https://medium.com/deep-learning-turkey/google-colab-free-gpu-tutorial-e113627b9f5d)
#
#
#
#
# ## Extras to read later
# ### Visualizing Convolution parameters:
# [A guide to convolution arithmetic for deep learning](https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md)
# by <NAME>, <NAME>
#
#
# ### Documentation of autograd and Function:
# [Autograd](http://pytorch.org/docs/autograd)
#
# + id="FoJh0SAfb7H8"
| PyTorch_cifar10_tutorial_ROB313_2020.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SQLAlchemy Joins
# ## Setup
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect
engine = create_engine("sqlite:///../Resources/mammal_masses.sqlite", echo=False)
# Reflect Database into ORM classes
Base = automap_base()
Base.prepare(engine, reflect=True)
Base.classes.keys()
# Map Europe class
EA = Base.classes.ea
# Map North American class
NA = Base.classes.na
# create a session
session = Session(engine)
# ## Filtering Review
# Filters are the "WHERE" clause for your select statement.
# filter North American mammals whose genus is "Antilocapra"
# query, loop over and print out animals.
mammals = session.query(NA).filter(NA.genus == 'Antilocapra').all()
for mammal in mammals:
print("Family: {0}, Genus: {1}".format(mammal.family, mammal.genus))
# ## Joins
# A SQL join combines columns from one or more tables in a relational database.
#
# It creates a set that can be saved as a table or used as it is.
#
# A JOIN is a means for combining columns from one (self-table) or more tables by using values common to each.
inspector = inspect(engine)
inspector.get_table_names()
# Get a list of column names and types
columns = inspector.get_columns('ea')
for c in columns:
print(c['name'], c["type"])
session.query(EA.sporder, NA.sporder).limit(100).all()
# +
same_sporder = session.query(EA, NA).filter(EA.sporder == NA.sporder).limit(10).all()
for record in same_sporder:
(ea, na) = record
print(ea.sporder, ea.species)
print(na.sporder, na.species)
# +
# Return all animals from EA and NA belonging to the same sporder.
# This JOINs the data in the two tables together into a single dataset (here in the form of a tuple).
# Note: We are going to limit the results to 10 for printing
sel = [EA.family, EA.genus, EA.species, NA.family, NA.genus, NA.species]
same_sporder = session.query(*sel).filter(EA.sporder == NA.sporder).limit(10).all()
for record in same_sporder:
(ea_fam, ea_gen, ea_spec, na_fam, na_gen, na_spec) = record
print(
f"The European animal '{ea_fam} {ea_gen} {ea_spec}'"
f"belongs to the same sporder as the North American animal '{na_fam} {na_gen} {na_spec}'.")
# -
| Joins.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [<NAME>](https://github.com/donnemartin). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Challenge Notebook
# ## Problem: Determine whether there is a path between two nodes in a graph.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# * [Solution Notebook](#Solution-Notebook)
# ## Constraints
#
# * Is the graph directed?
# * Yes
# * Can we assume we already have Graph and Node classes?
# * Yes
# * Can we assume this is a connected graph?
# * Yes
# * Can we assume the inputs are valid?
# * Yes
# * Can we assume this fits memory?
# * Yes
# ## Test Cases
#
# Input:
# * `add_edge(source, destination, weight)`
#
# ```
# graph.add_edge(0, 1, 5)
# graph.add_edge(0, 4, 3)
# graph.add_edge(0, 5, 2)
# graph.add_edge(1, 3, 5)
# graph.add_edge(1, 4, 4)
# graph.add_edge(2, 1, 6)
# graph.add_edge(3, 2, 7)
# graph.add_edge(3, 4, 8)
# ```
#
# Result:
# * search_path(start=0, end=2) -> True
# * search_path(start=0, end=0) -> True
# * search_path(start=4, end=5) -> False
# ## Algorithm
#
# Refer to the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/graph_path_exists/path_exists_solution.ipynb). If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
# ## Code
# %run ../graph/graph.py
# %load ../graph/graph.py
class GraphPathExists(Graph):
def path_exists(self, start, end):
# TODO: Implement me
pass
# ## Unit Test
# **The following unit test is expected to fail until you solve the challenge.**
# +
# # %load test_path_exists.py
import unittest
class TestPathExists(unittest.TestCase):
def test_path_exists(self):
nodes = []
graph = GraphPathExists()
for id in range(0, 6):
nodes.append(graph.add_node(id))
graph.add_edge(0, 1, 5)
graph.add_edge(0, 4, 3)
graph.add_edge(0, 5, 2)
graph.add_edge(1, 3, 5)
graph.add_edge(1, 4, 4)
graph.add_edge(2, 1, 6)
graph.add_edge(3, 2, 7)
graph.add_edge(3, 4, 8)
self.assertEqual(graph.path_exists(nodes[0], nodes[2]), True)
self.assertEqual(graph.path_exists(nodes[0], nodes[0]), True)
self.assertEqual(graph.path_exists(nodes[4], nodes[5]), False)
print('Success: test_path_exists')
def main():
test = TestPathExists()
test.test_path_exists()
if __name__ == '__main__':
main()
# -
# ## Solution Notebook
#
# Review the [Solution Notebook](http://nbviewer.ipython.org/github/donnemartin/interactive-coding-challenges/blob/master/graphs_trees/graph_path_exists/path_exists_solution.ipynb) for a discussion on algorithms and code solutions.
| graphs_trees/graph_path_exists/path_exists_challenge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
from collections import defaultdict, namedtuple
import pandas as pd
import numpy as np
from hypergraph.network import *
# -
pd.set_option('display.max_columns', 50)
data_s1 = pd.read_csv("Data S1.csv", low_memory=False)
data_s1.head(100)
# +
data = data_s1[["collection_no", "genus", "tID", "stage_no", "stage_name"]]
layer_start, layer_stop = 1, 77
selected_layers = data["stage_no"].between(layer_start, layer_stop)
data = data[selected_layers]
data.head(20), len(data)
# +
node_ids = defaultdict(lambda: len(node_ids) + 1)
nodes = {node_ids[genus]: Node(node_ids[genus], genus) for genus in data["genus"].unique()}
nodes[1], len(nodes)
# +
import numpy as np
num_collections = data.groupby("stage_name")["collection_no"].count()
num_collections.head(20), len(num_collections)
# -
group_by_genus = data.groupby(["stage_name", "genus"])["collection_no"]
group_by_genus.apply(list).head(40), len(group_by_genus)
# +
NamedHyperEdge = namedtuple("NamedHyperEdge", "id, nodes, omega, name")
edge_ids = defaultdict(lambda: len(edges) + 1)
edges = {}
weights = []
for (edge_name, genus), collections in group_by_genus:
edge_id = edge_ids[edge_name]
collections_in_edge = num_collections[edge_name]
gamma = len(collections) / collections_in_edge
if edge_id not in edges:
omega = collections_in_edge
edges[edge_id] = NamedHyperEdge(edge_id, set(), omega, edge_name)
edge = edges[edge_id]
node_id = node_ids[genus]
node = nodes[node_id]
if node not in edge.nodes:
edge.nodes.add(node)
weights.append(Gamma(edge_id, node, gamma))
# +
hypergraph = HyperGraph(nodes.values(), edges.values(), weights)
with open(f"data/paleo-{layer_start}-{layer_stop}.txt", "w") as fp:
hypergraph.write(fp)
# -
with open("data/hyperedge-names.csv", "w") as fp:
for edge in edges.values():
fp.write(f"{edge.id},{edge.name}\n")
# +
import matplotlib.pyplot as plt
f, ax = plt.subplots()
num_collections.hist(ax=ax)
| notebooks/paleo data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preparing a state with antiferromagnetic order in the Ising model
#
# This notebook illustrates how to use Pulser to build a sequence for studying an antiferromagnetic state in an Ising-like model. It is based on [10.1103/PhysRevX.8.021070](https://journals.aps.org/prx/abstract/10.1103/PhysRevX.8.021070), where arrays of Rydberg atoms were programmed and whose correlations were studied.
#
# We begin by importing some basic modules:
# +
import numpy as np
import matplotlib.pyplot as plt
import qutip
from pulser import Pulse, Sequence, Register
from pulser.simulation import Simulation
from pulser.waveforms import RampWaveform
from pulser.devices import Chadoq2
# -
# ## Waveforms
#
# We are realizing the following program
#
# <center>
# <img src="attachment:AF_Ising_program.png" alt="AF Pulse Sequence" width="300">
# </center>
# The pulse and the register are defined by the following parameters:
# +
# Parameters in rad/µs and ns
Omega_max = 2.3 * 2*np.pi
U = Omega_max / 2.3
delta_0 = -6 * U
delta_f = 2 * U
t_rise = 252
t_fall = 500
t_sweep = (delta_f - delta_0)/(2 * np.pi * 10) * 1000
R_interatomic = Chadoq2.rydberg_blockade_radius(U)
N_side = 3
reg = Register.square(N_side, R_interatomic, prefix='q')
print(f'Interatomic Radius is: {R_interatomic}µm.')
reg.draw()
# -
# ## Creating my sequence
# We compose our pulse with the following objects from Pulser:
rise = Pulse.ConstantDetuning(RampWaveform(t_rise, 0., Omega_max), delta_0, 0.)
sweep = Pulse.ConstantAmplitude(Omega_max, RampWaveform(t_sweep, delta_0, delta_f), 0.)
fall = Pulse.ConstantDetuning(RampWaveform(t_fall, Omega_max, 0.), delta_f, 0.)
# +
seq = Sequence(reg, Chadoq2)
seq.declare_channel('ising', 'rydberg_global')
seq.add(rise, 'ising')
seq.add(sweep, 'ising')
seq.add(fall, 'ising')
seq.draw()
# -
# ## Phase Diagram
# The pulse sequence travels though the following path in the phase diagram of the system (the shaded area represents the antiferromagnetic phase):
# +
delta = []
omega = []
for x in seq._schedule['ising']:
if isinstance(x.type,Pulse):
omega += list(x.type.amplitude.samples / U)
delta += list(x.type.detuning.samples / U)
fig, ax = plt.subplots()
ax.grid(True, which='both')
ax.set_ylabel(r"$\hbar\delta(t)/U$", fontsize=16)
ax.set_xlabel(r"$\hbar\Omega(t)/U$", fontsize=16)
ax.set_xlim(0, 3)
ax.axhline(y=0, color='k')
ax.axvline(x=0, color='k')
y = np.arange(0.0, 6, 0.01)
x = 1.522 * (1 - 0.25 * (y - 2)**2)
ax.fill_between(x, y, alpha=0.4)
ax.plot(omega,delta, 'red', lw=2)
plt.show()
# -
# ## Simulation: Spin-Spin Correlation Function
# We shall now evaluate the quality of the obtained state by calculating the *spin-spin correlation function*, defined as:
#
#
# $$g^c(k,l)= \frac{1}{N_{k,l}}\sum_{(i,j) = (kR,lR)} \left[ \langle n_i n_j \rangle - \langle n_i \rangle \langle n_j \rangle \right]$$
#
# where the $c$ indicates that we are calculating the *connected* part, and where the sum is over all pairs $(i,j)$ whose distance is ${\bf r}_i - {\bf r}_j = (k R,l R)$ in the atomic array coordinate (both $k$ and $l$ are positive or negative integers within the size of the array).
#
# We run a simulation of the sequence:
simul = Simulation(seq, sampling_rate=0.02)
results = simul.run(progress_bar=True)
# Sample from final state using `sample_final_state()` method:
# +
count = results.sample_final_state()
most_freq = {k:v for k,v in count.items() if v>10}
plt.bar(list(most_freq.keys()), list(most_freq.values()))
plt.xticks(rotation='vertical')
plt.show()
# -
# The observable to measure will be the occupation operator $|r\rangle \langle r|_i$ on each site $i$ of the register, where the Rydberg state $|r\rangle$ represents the excited state.
def occupation(j,N):
up = qutip.basis(2,0)
prod = [qutip.qeye(2) for _ in range(N)]
prod[j] = up * up.dag()
return qutip.tensor(prod)
occup_list = [occupation(j, N_side*N_side) for j in range(N_side*N_side)]
# We define a function that returns all couples $(i,j)$ for a given $(k,l)$:
def get_corr_pairs(k, l, register, R_interatomic):
corr_pairs = []
for i, qi in enumerate(register.qubits):
for j, qj in enumerate(register.qubits):
r_ij = register.qubits[qi]-register.qubits[qj]
distance = np.linalg.norm(r_ij - R_interatomic*np.array([k, l]))
if distance < 1:
corr_pairs.append([i, j])
return corr_pairs
# The correlation function is calculated with the following routines:
# +
def get_corr_function(k, l, reg, R_interatomic, state):
N_qubits = len(reg.qubits)
corr_pairs = get_corr_pairs(k, l, reg, R_interatomic)
operators = [occupation(j, N_qubits) for j in range(N_qubits)]
covariance = 0
for qi, qj in corr_pairs:
covariance += qutip.expect(operators[qi]*operators[qj], state)
covariance -= qutip.expect(operators[qi], state)*qutip.expect(operators[qj], state)
return covariance/len(corr_pairs)
def get_full_corr_function(reg, state):
N_qubits = len(reg.qubits)
correlation_function = {}
N_side = int(np.sqrt(N_qubits))
for k in range(-N_side+1, N_side):
for l in range(-N_side+1, N_side):
correlation_function[(k, l)] = get_corr_function(k, l, reg, R_interatomic, state)
return correlation_function
# -
# With these functions, we operate on the final state of evolution obtained by our simulation.
final = results.states[-1]
correlation_function = get_full_corr_function(reg, final)
expected_corr_function = {}
xi = 1 # Estimated Correlation Length
for k in range(-N_side+1,N_side):
for l in range(-N_side+1,N_side):
kk = np.abs(k)
ll = np.abs(l)
expected_corr_function[(k, l)] = (-1)**(kk + ll) * np.exp(-np.sqrt(k**2 + l**2)/xi)
# +
A = 4*np.reshape(list(correlation_function.values()), (2*N_side-1, 2*N_side-1))
A = A/np.max(A)
B = np.reshape(list(expected_corr_function.values()), (2*N_side-1, 2*N_side-1))
B = B*np.max(A)
for i, M in enumerate([A.copy(),B.copy()]):
M[N_side-1, N_side-1] = None
plt.figure(figsize=(3.5,3.5))
plt.imshow(M, cmap='coolwarm', vmin=-.6, vmax=.6)
plt.xticks(range(len(M)), [f'{x}' for x in range(-N_side + 1, N_side)])
plt.xlabel(r'$\mathscr{k}$', fontsize=22)
plt.yticks(range(len(M)), [f'{-y}' for y in range(-N_side + 1, N_side)])
plt.ylabel(r'$\mathscr{l}$', rotation=0, fontsize=22, labelpad=10)
plt.colorbar(fraction=0.047, pad=0.02)
if i == 0 :plt.title(r'$4\times\.g^{(2)}(\mathscr{k},\mathscr{l})$ after simulation', fontsize=14)
if i == 1 :plt.title(r'Exponential $g^{(2)}(\mathscr{k},\mathscr{l})$ expected', fontsize=14)
plt.show()
# -
# Note that the correlation function would follow an exponential decay (modulo finite-size effects), which is best observed at larger system sizes (see for example https://arxiv.org/pdf/2012.12268.pdf)
np.around(A, 4)
np.around(B, 4)
# ### Néel Structure Factor
# One way to explore the $\Omega = 0$ line on the phase diagram is to calculate the *Néel Structure Factor*, $S_{\text{Néel}}=4 \times \sum_{(k,l) \neq (0,0)} (-1)^{|k|+|l|} g^c(k,l)$, which should be highest when the state is more antiferromagnetic. We will sweep over different values of $\delta_{\text{final}}$ to show that the region $0<\hbar \delta_{\text{final}}/U<4$ is indeed where the antiferromagnetic phase takes place.
def get_neel_structure_factor(reg, R_interatomic, state):
N_qubits = len(reg.qubits)
N_side = int(np.sqrt(N_qubits))
st_fac = 0
for k in range(-N_side+1, N_side):
for l in range(-N_side+1, N_side):
kk = np.abs(k)
ll = np.abs(l)
if not (k == 0 and l == 0):
st_fac += 4 * (-1)**(kk + ll) * get_corr_function(k, l, reg, R_interatomic, state)
return st_fac
def calculate_neel(det, N, Omega_max = 2.3 * 2 * np.pi):
#Setup:
U = Omega_max / 2.3
delta_0 = -6 * U
delta_f = det * U
t_rise = 252
t_fall = 500
t_sweep = int((delta_f - delta_0)/(2 * np.pi * 10) * 1000)
t_sweep += 4 - t_sweep % 4 # To be a multiple of the clock period of Chadoq2 (4ns)
R_interatomic = Chadoq2.rydberg_blockade_radius(U)
reg = Register.rectangle(N, N, R_interatomic)
#Pulse Sequence
rise = Pulse.ConstantDetuning(RampWaveform(t_rise, 0., Omega_max), delta_0, 0.)
sweep = Pulse.ConstantAmplitude(Omega_max, RampWaveform(t_sweep, delta_0, delta_f), 0.)
fall = Pulse.ConstantDetuning(RampWaveform(t_fall, Omega_max, 0.), delta_f, 0.)
seq = Sequence(reg, Chadoq2)
seq.declare_channel('ising', 'rydberg_global')
seq.add(rise, 'ising')
seq.add(sweep, 'ising')
seq.add(fall, 'ising')
simul = Simulation(seq, sampling_rate=0.02)
results = simul.run()
final = results.states[-1]
return get_neel_structure_factor(reg, R_interatomic, final)
# +
N_side = 3
occup_list = [occupation(j, N_side*N_side) for j in range(N_side*N_side)]
detunings = np.linspace(-1, 5, 20)
results=[]
for det in detunings:
print(f'Detuning = {np.round(det,3)} x 2π Mhz.')
results.append(calculate_neel(det, N_side))
plt.xlabel(r'$\hbar\delta_{final}/U$')
plt.ylabel(r'Néel Structure Factor $S_{Neel}$')
plt.plot(detunings, results, 'o', ls='solid')
plt.show()
max_index = results.index(max(results))
print(f'Max S_Neel {np.round(max(results),2)} at detuning = {np.round(detunings[max_index],2)} x 2π Mhz.')
| tutorials/applications/Preparing state with antiferromagnetic order in the Ising model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ideas
# - more generalizable version of _apply so that any op can be applied to obj, not just mapping some func on self.vals
# - inplace param also needs to generalize
# - figure out how I want decorator to work (or no decorator?). Don't love current implementation of having to return a lambda
# - maybe chainable should be a method of Chained?
# - still have to think about how this could be used: mixin class? Would a metaclass be useful here? Have to think.
from functools import wraps, partial
def chain(func):
@wraps(func)
def wrapped(instance, *args, **kwargs):
return instance._apply(func(*args, **kwargs))
return wrapped
class Chainable:
def __init__(self, vals):
self.vals = vals
self.ops = []
def _apply(self, func, mode='map'):
op = map if mode == 'map' else filter
self.ops.append((op, func))
return self
def exec(self, inplace=False):
new = self
for op, func in self.ops:
new = Chainable(list(op(func, new.vals)))
self.ops.clear()
if inplace:
self.__dict__ = new.__dict__
else:
return new
def double(self):
return self._apply(lambda x: x*2)
def add(self, n):
return self._apply(lambda x: x+n)
def stringify(self):
return self._apply(str)
def gt(self, n):
return self._apply(lambda x: x > n, 'filter')
def even_only(self):
return self._apply(lambda x: x % 2 == 0, 'filter')
@chain
def subtract(n):
return lambda x: x-n
def __repr__(self):
return f'Chainable({repr(self.vals)})'
c = Chainable([1, 3, 2, 44, -5, -7, 16, 6.6, 9, -3, 0.5, 1])
c
c.double().exec()
c.double().even_only().gt(-5).stringify().exec()
c.subtract(4).double().exec()
c
c.add(4).subtract(2).even_only().gt(-1).exec(inplace=True)
c
# ## Working on more generalized version
#
# Current implementation: user defines a staticmethod for each chainable op, then an instance method that calls that staticmethod. This works but is annoying to use.
from copy import copy, deepcopy
from functools import update_wrapper, wraps
import inspect
class Chainable():
def __init__(self, vals, num):
self.vals = vals
self.been_called = False
self.num = num
self.ops = []
def _apply(self, func):
self.ops.append(func)
return self
def exec(self, inplace=False):
new = deepcopy(self)
for func in self.ops:
new = func(copy(new))
# Clear ops list now that chain is complete.
new.ops.clear()
if inplace:
self.__dict__ = new.__dict__
else:
self.ops.clear()
return new
@staticmethod
def _double(instance):
instance.num *= 2
return instance
def double(self):
return self._apply(self._double)
@staticmethod
def _add(instance, n):
instance.num += n
return instance
def add(self, n):
return self._apply(partial(self._add, n=n))
@staticmethod
def _append(instance, n):
instance.vals.append(n)
return instance
def append(self, n):
return self._apply(partial(self._append, n=n))
@staticmethod
def _call(instance):
instance.been_called = True
return instance
def call(self):
return self._apply(self._call)
def __repr__(self):
return f'Chainable({repr(self.vals)}, {self.been_called}, {self.num}, {self.ops})'
c1 = Chainable([1, 3], 5)
c1
c2 = copy(c1)
c2
id(c1.vals), id(c2.vals)
c2.vals.append(3)
c1, c2
c1.double().exec(True)
c1, c2
c2.append(-1).exec(True)
c1, c2
# ### Testing basic func
vals = [5, 3, 1, 2, 4, 6]
n = 44
length = len(vals)
c = Chainable(vals, n)
c
c.add(3).double().exec()
# +
print(c)
assert c.vals == vals and not c.been_called and c.num == n and not c.ops
# -
c.add(3).double().exec(inplace=True)
print(c)
assert c.vals == vals and not c.been_called and c.num == (n+3)*2 and not c.ops
# ### Testing effect on mutable attrs
c.append(99).call().exec(False)
# Bool attr is unchanged (as desired), but list is changed.
print(c)
assert c.vals == vals and not c.been_called and c.num == (n+3)*2 and not c.ops
c.append(99).call().exec(True)
# Bool attr and list attr both changed.
print(c)
assert c.vals == vals+[99] and c.been_called and c.num == (n+3)*2 and not c.ops
assert len(vals) == length
# ## Testing meta magic
#
# Idea: can we make it simpler or more intuitive to define chainable methods?
# Status: Tentatively working, needs testing though.
# +
import types
from htools import hdir, debug
# +
# class ChainMethod:
# def __init__(self, func):
# wraps(func)(self)
# def __call__(self, *args, **kwargs):
# print('call', args, kwargs)
# return self.__wrapped__(*args, **kwargs)
# def __get__(self, instance, cls):
# if instance is None:
# return self
# return types.MethodType(self, instance)
def chain(func):
func._is_chainable = True
@wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
# def ChainMethod(func):
# # This version does attach _is_trainable attr but v.__name__
# # is now wrapper in metaclass
# @wraps(func)
# def wrapper(*args, **kwargs):
# print('CALL DECORATOR: args', args, '\nkwargs', kwargs)
# return func(*args, **kwargs)
# static = staticmethod(wrapper)
# static._is_chainable = True
# return static
# -
class ChainMeta(type):
def __new__(cls, name, bases, methods):
new_methods = {}
for k, v in methods.items():
try:
func = v.__get__(1)
assert func._is_chainable
except:
continue
public_name = k.lstrip('_')
# Capture args and kwargs passed to staticmethod (except for instance).
sig = inspect.signature(func)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
# Must use default args, otherwise func will always point to last method.
def make_public_method(func=func, private_name=k,
public_name=public_name, sig=sig):
def public(inst, *args, **kwargs):
bound = sig.bind(*args, **kwargs).arguments
new_method = partial(getattr(inst, private_name), **bound)
inst.ops.append(new_method)
return inst
public.__name__ = public_name
return public
new_methods[public_name] = make_public_method()
return type.__new__(cls, name, bases, {**methods, **new_methods})
class Chainable(metaclass=ChainMeta):
def __init__(self, vals, num):
self.vals = vals
self.been_called = False
self.num = num
self.ops = []
def _apply(self, func):
self.ops.append(func)
return self
def exec(self, inplace=False):
new = deepcopy(self)
for func in self.ops:
new = func(copy(new))
# Clear ops list now that chain is complete.
new.ops.clear()
if inplace:
self.__dict__ = new.__dict__
else:
self.ops.clear()
return new
@staticmethod
@chain
def _call(instance):
instance.been_called = True
return instance
@staticmethod
@chain
def _incr(instance):
instance.num += 1
return instance
@staticmethod
@chain
def _append(instance, n):
instance.vals.append(n)
return instance
def __repr__(self):
return f'Chainable({repr(self.vals)}, {self.been_called}, {self.num}, {self.ops})'
c1 = Chainable([1, 3], 5)
c1
assert not c1.been_called
_ = c1.call().exec(True)
assert _ is None
assert c1.been_called
assert c1.num == 5
c1.incr().exec()
assert c1.num == 5
_ = c1.incr().exec(True)
assert _ is None
assert c1.num == 6
c1 = Chainable([2, 4, 6], 111)
c1
# +
c2 = c1.incr().call().exec()
assert c2.been_called
assert c2.num == 112
assert not c1.been_called
assert c1.num == 111
# -
_ = c1.incr().call().append(n=99).exec(True)
assert _ is None
assert c1.vals == [2, 4, 6, 99]
assert c1.been_called
assert c1.num == 112
# ### Issue: only kwargs work atm, not args. Must map args to correct params in partial in metaclass
c2 = c1.append(4).exec()
assert c2.vals[-1] == 4
assert c1.vals[-1] == 99
c1
c1.append(333).incr().call().exec()
c1
c1.append(333).incr().call().exec(True)
c1
# ## Testing finished lazy chainable
#
# Note: Library version has a few changes to formatting (line lengths), docstrings, and removed \_apply() method in an effort to enforce 1 correct way of making chainable methods. chain decorator is also renamed to lazychain.
#
# ### IDEA: check if staticmethod is actually necessary now. May have been left over from a previous approach.
def chain(func):
"""Decorator to register a method as chainable within a
LazyChainable class.
"""
func._is_chainable = True
@wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
class LazyChainMeta(type):
"""Metaclass to create LazyChainable objects."""
def __new__(cls, name, bases, methods):
new_methods = {}
# Find chainable staticmethods and create public versions.
for k, v in methods.items():
try:
func = v.__get__(1)
assert func._is_chainable
except:
continue
public_name = k.lstrip('_')
# Capture args and kwargs passed to staticmethod (except for instance).
sig = inspect.signature(func)
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
# Must use default args, otherwise func will always point to last method.
def make_public_method(func=func, private_name=k,
public_name=public_name, sig=sig):
def public(inst, *args, **kwargs):
bound = sig.bind(*args, **kwargs).arguments
new_method = partial(getattr(inst, private_name), **bound)
inst.ops.append(new_method)
return inst
public.__name__ = public_name
return public
new_methods[public_name] = make_public_method()
return type.__new__(cls, name, bases, {**methods, **new_methods})
class LazyChainable(metaclass=LazyChainMeta):
"""Base class that allows children to lazily chain methods,
similar to a Spark RDD.
Chainable methods must be decorated with @staticmethod
and @chain and be named with a leading underscore. A public
method without the leading underscore will be created, so don't
overwrite this with another method. Chainable methods
accept an instance of the same class as the first argument,
process the instance in some way, then return it. A chain of
commands will be stored until the exec() method is called.
It can operate either in place or not.
Examples
--------
class Sequence(LazyChainable):
def __init__(self, numbers, counter, new=True):
super().__init__()
self.numbers = numbers
self.counter = counter
self.new = new
@staticmethod
@chain
def _sub(instance, n):
instance.counter -= n
return instance
@staticmethod
@chain
def _gt(instance, n=0):
instance.numbers = list(filter(lambda x: x > n, instance.numbers))
return instance
@staticmethod
@chain
def _call(instance):
instance.new = False
return instance
def __repr__(self):
pre, suf = super().__repr__().split('(')
argstrs = (f'{k}={repr(v)}' for k, v in vars(self).items())
return f'{pre}({", ".join(argstrs)}, {suf}'
>>> seq = Sequence([3, -1, 5], 0)
>>> output = seq.sub(n=3).gt(0).call().exec()
>>> output
Sequence(ops=[], numbers=[3, 5], counter=-3, new=False)
>>> seq # Unchanged because exec was not in place.
Sequence(ops=[], numbers=[3, -1, 5], counter=0, new=True)
>>> output = seq.sub(n=3).gt(-1).call().exec(inplace=True)
>>> output # None because exec was in place.
>>> seq # Changed
Sequence(ops=[], numbers=[3, -1, 5], counter=-3, new=False)
"""
def __init__(self):
self.ops = []
def _apply(self, func):
self.ops.append(func)
return self
def exec(self, inplace=False):
new = deepcopy(self)
for func in self.ops:
new = func(copy(new))
# Clear ops list now that chain is complete.
new.ops.clear()
if inplace:
self.__dict__ = new.__dict__
else:
self.ops.clear()
return new
def __repr__(self):
argstrs = (f'{k}={repr(v)}' for k, v in vars(self).items())
return f'{type(self).__name__}({", ".join(argstrs)})'
class Sequence(LazyChainable):
def __init__(self, numbers, counter, new=True):
super().__init__()
self.numbers = numbers
self.counter = counter
self.new = new
@staticmethod
@chain
def _sub(instance, n):
instance.counter -= n
return instance
@staticmethod
@chain
def _product(instance):
prod = 1
for num in instance.numbers:
prod *= num
instance.prod = prod
return instance
@staticmethod
@chain
def _gt(instance, n=0):
instance.numbers = list(filter(lambda x: x > n, instance.numbers))
return instance
@staticmethod
@chain
def _call(instance):
instance.new = False
return instance
def __repr__(self):
return super().__repr__()
seq = Sequence([1, 11, 99, 4, -3, -0.5, 1.5, -22.2], 4, new=True)
seq
seq.product().exec()
seq.gt(4).sub(3).product().exec(True)
seq
seq.ops
seq.sub(n=4).call().product().exec()
isinstance(seq, LazyChainable)
type(LazyChainable)
# ## Try another approach using \_\_getattr\_\_ instead of metaclass
def ChainMethod(func):
func._is_chainable = True
@wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
class Chainable:
def __init__(self, vals, num):
self.vals = vals
self.been_called = False
self.num = num
self.ops = []
def __getattr__(self, name, *args, **kwargs):
print('args, kwargs', args, kwargs)
private_name = '_' + name
method = super().__getattribute__(private_name)
if method._is_chainable:
self._apply(method)
return method
def _apply(self, func):
self.ops.append(func)
return self
def exec(self, inplace=False):
new = deepcopy(self)
for func in self.ops:
new = func(copy(new))
# Clear ops list now that chain is complete.
new.ops.clear()
if inplace:
self.__dict__ = new.__dict__
else:
self.ops.clear()
return new
@staticmethod
@ChainMethod
def _call(instance):
instance.been_called = True
return instance
@staticmethod
@ChainMethod
def _incr(instance):
instance.num += 1
return instance
@staticmethod
@ChainMethod
def _append(instance, n):
instance.vals.append(n)
return instance
def __repr__(self):
return f'Chainable({repr(self.vals)}, {self.been_called}, {self.num}, {self.ops})'
c = Chainable([2, 4, 0], 100)
c
c.incr()
c.append(n=3)
| notebooks/scratch_chainable.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine Learning: Intermediate report
#
# *Write your name and your student ID below.*
#
# + Your name (Your ID)
# Prepare an environment for running Python codes on Jupyter notebook. The most easiest way is to use [Google Colaboratory](https://colab.research.google.com/).
#
# Write codes for the following three problems, and submit the notebook file (`.ipynb`) on OCW. *We do not accept a report in other formats (e.g., Word, PDF, HTML)*. Write a code at the specified cell in the notebook. One can add more cells if necessary.
#
# These are the links to the sample codes used in the lecture:
#
# + [Binary classification](https://github.com/chokkan/deeplearningclass/blob/master/mlp_binary.ipynb)
# + [MNIST](https://github.com/chokkan/deeplearningclass/blob/master/mnist.ipynb)
#
# *Please accept that your report may be shared among students who take this course.*
# ## 1. Multi-class classification on MNIST
#
# Train a model on the training set of MNIST, and report the performance of the model on the test set in the following evaluation measures:
#
# + Accuracy
# + Precision, recall, and F1 scores on each category (digit)
# + Macro-averaged precision, recall, and F1 scores (i.e., the averages of the above measures for all categories)
#
# One can use the same code shown in the lecture. Write a code here and show the output.
# ## 2. Confusion matrix
#
# Show a confusion matrix of the predictions of the model on the test set. This is an example of a confusion matrix.
#
# ![example](example-confusion-matrix.png)
#
# Write a code here and show the confusion matrix.
# ## 3. Top-3 easy and confusing examples
#
# Show the top three easy and three confusing, respectively, images where the model recognized their digits with strong confidences. More specifically, let $y_n$ and $\hat{y}_n$ the true and predicted, respectively, digits of the image $x_n$. We want to find three images with high $P(\hat{y}_n | x_n)$ when $y_n = \hat{y}_n$ (easy examples) $y_n \neq \hat{y}_n$ (confusing examples).
#
# Please show $y_n$, $P(y_n | x_n)$, $\hat{y}_n$, and $P(\hat{y}_n | x_n)$. This is an example of an output for an image (you need this kind of outputs for top-three easy and top-three confusing images).
#
# ![example](example-confusing-sample.png)
#
# Write a code here and show the output.
| assignment/(YourID)_report2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tree LSTM modeling for semantic relatedness
#
# Just five years ago, many of the most successful models for doing supervised learning with text
# ignored word order altogether.
# Some of the most successful models represented documents or sentences
# with the order-invariant *bag-of-words* representation.
# Anyone thinking hard should probably have realized that these models couldn't dominate forever.
# That's because we all know that word order actually does matter.
# Bag-of-words models, which ignored word order, left some information on the table.
#
# The recurrent neural networks that
# [we introduced in chapter 5](../chapter05_recurrent-neural-networks/simple-rnn.ipynb)
# model word order, by passing over the sequence of words in order,
# updating the models representation of the sentence after each word.
# And, with LSTM recurrent cells and training on GPUs,
# even the straightforward LSTM far outpaces classical approaches,
# on a number of tasks, including language modeling,
# named entity recognition and more.
#
# But while those models are impressive, they still may be leaving some knowledge on the table.
# To begin with, we know a priori that sentence have a grammatical structure.
# And we already have some tools that are very good at recovering parse trees that reflect grammatical structure of the sentences.
# While it may be possible for an LSTM to learn this information implicitly,
# it's often a good idea to build known information into the structure of a neural network.
# Take for example convolutional neural networks.
# They build in the prior knowledge that low level feature should be translation-invariant.
# It's possible to come up with a fully connected net that does the same thing,
# but it would require many more nodes and would be much more susceptible to overfitting.
# In this case, we would like to build the grammatical tree structure of the sentences
# into the architecture of an LSTM recurrent neural network.
# This tutorial walks through *tree LSTMs*,
# an approach that does precisely that.
# The models here are based on the [tree-structured LSTM](https://nlp.stanford.edu/pubs/tai-socher-manning-acl2015.pdf)
# by <NAME>, <NAME>, and <NAME>.
# Our implementation borrows from [this Pytorch example](https://github.com/dasguptar/treelstm.pytorch).
#
#
# ### Sentences involving Compositional Knowledge
# This tutorial walks through training a child-sum Tree LSTM model for analyzing semantic relatedness of sentence pairs given their dependency parse trees.
#
# ### Preliminaries
# Before getting going, you'll probably want to note a couple preliminary details:
#
# * Use of GPUs is preferred if one wants to run the complete training to match the state-of-the-art results.
# * To show a progress meter, one should install the `tqdm` ("progress" in Arabic) through `pip install tqdm`. One should also install the HTTP library through `pip install requests`.
#
#
import mxnet as mx
from mxnet.gluon import Block, nn
from mxnet.gluon.parameter import Parameter
class Tree(object):
def __init__(self, idx):
self.children = []
self.idx = idx
def __repr__(self):
if self.children:
return '{0}: {1}'.format(self.idx, str(self.children))
else:
return str(self.idx)
tree = Tree(0)
tree.children.append(Tree(1))
tree.children.append(Tree(2))
tree.children.append(Tree(3))
tree.children[1].children.append(Tree(4))
print(tree)
# ### Model
# The model is based on [child-sum tree LSTM](https://nlp.stanford.edu/pubs/tai-socher-manning-acl2015.pdf). For each sentence, the tree LSTM model extracts information following the dependency parse tree structure, and produces the sentence embedding at the root of each tree. This embedding can be used to predict semantic similarity.
#
# #### Child-sum Tree LSTM
class ChildSumLSTMCell(Block):
def __init__(self, hidden_size,
i2h_weight_initializer=None,
hs2h_weight_initializer=None,
hc2h_weight_initializer=None,
i2h_bias_initializer='zeros',
hs2h_bias_initializer='zeros',
hc2h_bias_initializer='zeros',
input_size=0, prefix=None, params=None):
super(ChildSumLSTMCell, self).__init__(prefix=prefix, params=params)
with self.name_scope():
self._hidden_size = hidden_size
self._input_size = input_size
self.i2h_weight = self.params.get('i2h_weight', shape=(4*hidden_size, input_size),
init=i2h_weight_initializer)
self.hs2h_weight = self.params.get('hs2h_weight', shape=(3*hidden_size, hidden_size),
init=hs2h_weight_initializer)
self.hc2h_weight = self.params.get('hc2h_weight', shape=(hidden_size, hidden_size),
init=hc2h_weight_initializer)
self.i2h_bias = self.params.get('i2h_bias', shape=(4*hidden_size,),
init=i2h_bias_initializer)
self.hs2h_bias = self.params.get('hs2h_bias', shape=(3*hidden_size,),
init=hs2h_bias_initializer)
self.hc2h_bias = self.params.get('hc2h_bias', shape=(hidden_size,),
init=hc2h_bias_initializer)
def forward(self, F, inputs, tree):
children_outputs = [self.forward(F, inputs, child)
for child in tree.children]
if children_outputs:
_, children_states = zip(*children_outputs) # unzip
else:
children_states = None
with inputs.context as ctx:
return self.node_forward(F, F.expand_dims(inputs[tree.idx], axis=0), children_states,
self.i2h_weight.data(ctx),
self.hs2h_weight.data(ctx),
self.hc2h_weight.data(ctx),
self.i2h_bias.data(ctx),
self.hs2h_bias.data(ctx),
self.hc2h_bias.data(ctx))
def node_forward(self, F, inputs, children_states,
i2h_weight, hs2h_weight, hc2h_weight,
i2h_bias, hs2h_bias, hc2h_bias):
# comment notation:
# N for batch size
# C for hidden state dimensions
# K for number of children.
# FC for i, f, u, o gates (N, 4*C), from input to hidden
i2h = F.FullyConnected(data=inputs, weight=i2h_weight, bias=i2h_bias,
num_hidden=self._hidden_size*4)
i2h_slices = F.split(i2h, num_outputs=4) # (N, C)*4
i2h_iuo = F.concat(*[i2h_slices[i] for i in [0, 2, 3]], dim=1) # (N, C*3)
if children_states:
# sum of children states, (N, C)
hs = F.add_n(*[state[0] for state in children_states])
# concatenation of children hidden states, (N, K, C)
hc = F.concat(*[F.expand_dims(state[0], axis=1) for state in children_states], dim=1)
# concatenation of children cell states, (N, K, C)
cs = F.concat(*[F.expand_dims(state[1], axis=1) for state in children_states], dim=1)
# calculate activation for forget gate. addition in f_act is done with broadcast
i2h_f_slice = i2h_slices[1]
f_act = i2h_f_slice + hc2h_bias + F.dot(hc, hc2h_weight) # (N, K, C)
forget_gates = F.Activation(f_act, act_type='sigmoid') # (N, K, C)
else:
# for leaf nodes, summation of children hidden states are zeros.
hs = F.zeros_like(i2h_slices[0])
# FC for i, u, o gates, from summation of children states to hidden state
hs2h_iuo = F.FullyConnected(data=hs, weight=hs2h_weight, bias=hs2h_bias,
num_hidden=self._hidden_size*3)
i2h_iuo = i2h_iuo + hs2h_iuo
iuo_act_slices = F.SliceChannel(i2h_iuo, num_outputs=3) # (N, C)*3
i_act, u_act, o_act = iuo_act_slices[0], iuo_act_slices[1], iuo_act_slices[2] # (N, C) each
# calculate gate outputs
in_gate = F.Activation(i_act, act_type='sigmoid')
in_transform = F.Activation(u_act, act_type='tanh')
out_gate = F.Activation(o_act, act_type='sigmoid')
# calculate cell state and hidden state
next_c = in_gate * in_transform
if children_states:
next_c = F.sum(forget_gates * cs, axis=1) + next_c
next_h = out_gate * F.Activation(next_c, act_type='tanh')
return next_h, [next_h, next_c]
# #### Similarity regression module
# module for distance-angle similarity
class Similarity(nn.Block):
def __init__(self, sim_hidden_size, rnn_hidden_size, num_classes):
super(Similarity, self).__init__()
with self.name_scope():
self.wh = nn.Dense(sim_hidden_size, in_units=2*rnn_hidden_size)
self.wp = nn.Dense(num_classes, in_units=sim_hidden_size)
def forward(self, F, lvec, rvec):
# lvec and rvec will be tree_lstm cell states at roots
mult_dist = F.broadcast_mul(lvec, rvec)
abs_dist = F.abs(F.add(lvec,-rvec))
vec_dist = F.concat(*[mult_dist, abs_dist],dim=1)
out = F.log_softmax(self.wp(F.sigmoid(self.wh(vec_dist))))
return out
# #### Final model
# putting the whole model together
class SimilarityTreeLSTM(nn.Block):
def __init__(self, sim_hidden_size, rnn_hidden_size, embed_in_size, embed_dim, num_classes):
super(SimilarityTreeLSTM, self).__init__()
with self.name_scope():
self.embed = nn.Embedding(embed_in_size, embed_dim)
self.childsumtreelstm = ChildSumLSTMCell(rnn_hidden_size, input_size=embed_dim)
self.similarity = Similarity(sim_hidden_size, rnn_hidden_size, num_classes)
def forward(self, F, l_inputs, r_inputs, l_tree, r_tree):
l_inputs = self.embed(l_inputs)
r_inputs = self.embed(r_inputs)
# get cell states at roots
lstate = self.childsumtreelstm(F, l_inputs, l_tree)[1][1]
rstate = self.childsumtreelstm(F, r_inputs, r_tree)[1][1]
output = self.similarity(F, lstate, rstate)
return output
# ### Dataset classes
# #### Vocab
# +
import os
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import random
from tqdm import tqdm
import mxnet as mx
# class for vocabulary and the word embeddings
class Vocab(object):
# constants for special tokens: padding, unknown, and beginning/end of sentence.
PAD, UNK, BOS, EOS = 0, 1, 2, 3
PAD_WORD, UNK_WORD, BOS_WORD, EOS_WORD = '<blank>', '<unk>', '<s>', '</s>'
def __init__(self, filepaths=[], embedpath=None, include_unseen=False, lower=False):
self.idx2tok = []
self.tok2idx = {}
self.lower = lower
self.include_unseen = include_unseen
self.add(Vocab.PAD_WORD)
self.add(Vocab.UNK_WORD)
self.add(Vocab.BOS_WORD)
self.add(Vocab.EOS_WORD)
self.embed = None
for filename in filepaths:
logging.info('loading %s'%filename)
with open(filename, 'r') as f:
self.load_file(f)
if embedpath is not None:
logging.info('loading %s'%embedpath)
with open(embedpath, 'r') as f:
self.load_embedding(f, reset=set([Vocab.PAD_WORD, Vocab.UNK_WORD, Vocab.BOS_WORD,
Vocab.EOS_WORD]))
@property
def size(self):
return len(self.idx2tok)
def get_index(self, key):
return self.tok2idx.get(key.lower() if self.lower else key,
Vocab.UNK)
def get_token(self, idx):
if idx < self.size:
return self.idx2tok[idx]
else:
return Vocab.UNK_WORD
def add(self, token):
token = token.lower() if self.lower else token
if token in self.tok2idx:
idx = self.tok2idx[token]
else:
idx = len(self.idx2tok)
self.idx2tok.append(token)
self.tok2idx[token] = idx
return idx
def to_indices(self, tokens, add_bos=False, add_eos=False):
vec = [BOS] if add_bos else []
vec += [self.get_index(token) for token in tokens]
if add_eos:
vec.append(EOS)
return vec
def to_tokens(self, indices, stop):
tokens = []
for i in indices:
tokens += [self.get_token(i)]
if i == stop:
break
return tokens
def load_file(self, f):
for line in f:
tokens = line.rstrip('\n').split()
for token in tokens:
self.add(token)
def load_embedding(self, f, reset=[]):
vectors = {}
for line in tqdm(f.readlines(), desc='Loading embeddings'):
tokens = line.rstrip('\n').split(' ')
word = tokens[0].lower() if self.lower else tokens[0]
if self.include_unseen:
self.add(word)
if word in self.tok2idx:
vectors[word] = [float(x) for x in tokens[1:]]
dim = len(vectors.values()[0])
def to_vector(tok):
if tok in vectors and tok not in reset:
return vectors[tok]
elif tok not in vectors:
return np.random.normal(-0.05, 0.05, size=dim)
else:
return [0.0]*dim
self.embed = mx.nd.array([vectors[tok] if tok in vectors and tok not in reset
else [0.0]*dim for tok in self.idx2tok])
# -
# #### Data iterator
# Iterator class for SICK dataset
class SICKDataIter(object):
def __init__(self, path, vocab, num_classes, shuffle=True):
super(SICKDataIter, self).__init__()
self.vocab = vocab
self.num_classes = num_classes
self.l_sentences = []
self.r_sentences = []
self.l_trees = []
self.r_trees = []
self.labels = []
self.size = 0
self.shuffle = shuffle
self.reset()
def reset(self):
if self.shuffle:
mask = list(range(self.size))
random.shuffle(mask)
self.l_sentences = [self.l_sentences[i] for i in mask]
self.r_sentences = [self.r_sentences[i] for i in mask]
self.l_trees = [self.l_trees[i] for i in mask]
self.r_trees = [self.r_trees[i] for i in mask]
self.labels = [self.labels[i] for i in mask]
self.index = 0
def next(self):
out = self[self.index]
self.index += 1
return out
def set_context(self, context):
self.l_sentences = [a.as_in_context(context) for a in self.l_sentences]
self.r_sentences = [a.as_in_context(context) for a in self.r_sentences]
def __len__(self):
return self.size
def __getitem__(self, index):
l_tree = self.l_trees[index]
r_tree = self.r_trees[index]
l_sent = self.l_sentences[index]
r_sent = self.r_sentences[index]
label = self.labels[index]
return (l_tree, l_sent, r_tree, r_sent, label)
# ### Training with autograd
# +
import argparse, pickle, math, os, random
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd as ag
# training settings and hyper-parameters
use_gpu = False
optimizer = 'AdaGrad'
seed = 123
batch_size = 25
training_batches_per_epoch = 10
learning_rate = 0.01
weight_decay = 0.0001
epochs = 1
rnn_hidden_size, sim_hidden_size, num_classes = 150, 50, 5
# initialization
context = [mx.gpu(0) if use_gpu else mx.cpu()]
# seeding
mx.random.seed(seed)
np.random.seed(seed)
random.seed(seed)
# read dataset
def verified(file_path, sha1hash):
import hashlib
sha1 = hashlib.sha1()
with open(file_path, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
matched = sha1.hexdigest() == sha1hash
if not matched:
logging.warn('Found hash mismatch in file {}, possibly due to incomplete download.'
.format(file_path))
return matched
data_file_name = 'tree_lstm_dataset-3d85a6c4.cPickle'
data_file_hash = '3d85a6c44a335a33edc060028f91395ab0dcf601'
if not os.path.exists(data_file_name) or not verified(data_file_name, data_file_hash):
from mxnet.test_utils import download
download('https://apache-mxnet.s3-accelerate.amazonaws.com/gluon/dataset/%s'%data_file_name,
overwrite=True)
with open('tree_lstm_dataset-3d85a6c4.cPickle', 'rb') as f:
train_iter, dev_iter, test_iter, vocab = pickle.load(f)
logging.info('==> SICK vocabulary size : %d ' % vocab.size)
logging.info('==> Size of train data : %d ' % len(train_iter))
logging.info('==> Size of dev data : %d ' % len(dev_iter))
logging.info('==> Size of test data : %d ' % len(test_iter))
# get network
net = SimilarityTreeLSTM(sim_hidden_size, rnn_hidden_size, vocab.size, vocab.embed.shape[1], num_classes)
# use pearson correlation and mean-square error for evaluation
metric = mx.metric.create(['pearsonr', 'mse'])
# the prediction from the network is log-probability vector of each score class
# so use the following function to convert scalar score to the vector
# e.g 4.5 -> [0, 0, 0, 0.5, 0.5]
def to_target(x):
target = np.zeros((1, num_classes))
ceil = int(math.ceil(x))
floor = int(math.floor(x))
if ceil==floor:
target[0][floor-1] = 1
else:
target[0][floor-1] = ceil - x
target[0][ceil-1] = x - floor
return mx.nd.array(target)
# and use the following to convert log-probability vector to score
def to_score(x):
levels = mx.nd.arange(1, 6, ctx=x.context)
return [mx.nd.sum(levels*mx.nd.exp(x), axis=1).reshape((-1,1))]
# when evaluating in validation mode, check and see if pearson-r is improved
# if so, checkpoint and run evaluation on test dataset
def test(ctx, data_iter, best, mode='validation', num_iter=-1):
data_iter.reset()
samples = len(data_iter)
data_iter.set_context(ctx[0])
preds = []
labels = [mx.nd.array(data_iter.labels, ctx=ctx[0]).reshape((-1,1))]
for _ in tqdm(range(samples), desc='Testing in {} mode'.format(mode)):
l_tree, l_sent, r_tree, r_sent, label = data_iter.next()
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
preds.append(z)
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info(mode+' acc: %s=%f'%(name, acc))
if name == 'pearsonr':
test_r = acc
if mode == 'validation' and num_iter >= 0:
if test_r >= best:
best = test_r
logging.info('New optimum found: {}.'.format(best))
return best
def train(epoch, ctx, train_data, dev_data):
# initialization with context
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx[0])
net.embed.weight.set_data(vocab.embed.as_in_context(ctx[0]))
train_data.set_context(ctx[0])
dev_data.set_context(ctx[0])
# set up trainer for optimizing the network.
trainer = gluon.Trainer(net.collect_params(), optimizer, {'learning_rate': learning_rate, 'wd': weight_decay})
best_r = -1
Loss = gluon.loss.KLDivLoss()
for i in range(epoch):
train_data.reset()
num_samples = min(len(train_data), training_batches_per_epoch*batch_size)
# collect predictions and labels for evaluation metrics
preds = []
labels = [mx.nd.array(train_data.labels[:num_samples], ctx=ctx[0]).reshape((-1,1))]
for j in tqdm(range(num_samples), desc='Training epoch {}'.format(i)):
# get next batch
l_tree, l_sent, r_tree, r_sent, label = train_data.next()
# use autograd to record the forward calculation
with ag.record():
# forward calculation. the output is log probability
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
# calculate loss
loss = Loss(z, to_target(label).as_in_context(ctx[0]))
# backward calculation for gradients.
loss.backward()
preds.append(z)
# update weight after every batch_size samples
if (j+1) % batch_size == 0:
trainer.step(batch_size)
# translate log-probability to scores, and evaluate
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info('training acc at epoch %d: %s=%f'%(i, name, acc))
best_r = test(ctx, dev_data, best_r, num_iter=i)
train(epochs, context, train_iter, dev_iter)
# -
# ### Conclusion
# - Gluon offers great tools for modeling in an imperative way.
| chapter09_natural-language-processing/tree-lstm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Train classifier by layers
#
# This notebook trains a classifier that operates in two layers:
# - First we use a SVM classifier to label utterances with high degree of certainty.
# - Afterwards we use heuristics to complete the labeling
# ### Import and path definition
# +
import os
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
import pickle
import sys
root_path = os.path.dirname(os.path.dirname(os.path.abspath(os.getcwd())))
sys.path.append(root_path)
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from src import phase_classification as pc
data_path = os.path.join(root_path,'data')
tables_path = os.path.join(data_path,'tables')
# -
# ### Load data
WITH_STEMMING = True
#REMOVE_STOPWORDS = True
SEED = 10
NUM_TOPICS = 60
random.seed(SEED)
file_name = '[train]IBL_topic_distribution_by_utterance_minimum_5_words_with_stemming_{}_{}.xlsx'.format(WITH_STEMMING,NUM_TOPICS)
df_data = pd.read_excel(os.path.join(tables_path,'train',file_name))
df_data.head()
the_keys = list(set(df_data['phase']))
total_samples = 0
class_samples = {}
for key in the_keys:
n = list(df_data.phase.values).count(key)
#print("key {}, total {}".format(key,n))
total_samples += n
class_samples[key] = n
print(total_samples)
for key in the_keys:
print("key {}, samples: {}, prop: {}".format(key,class_samples[key],round(class_samples[key]*1.0/total_samples,2)))
# ### split data
filter_rows = list(range(60))+[67,68]
row_label = 60
dfs_train,dfs_val = pc.split_df_discussions(df_data,.2,SEED)
X_train,y_train = pc.get_joined_data_from_df(dfs_train,filter_rows,row_label)
X_val,y_val = pc.get_joined_data_from_df(dfs_val,filter_rows,row_label)
len(X_train)
dfs_all,_ = pc.split_df_discussions(df_data,.0,SEED)
X_all,y_all = pc.get_joined_data_from_df(dfs_all,filter_rows,row_label)
# ### Classify first layer
class_weight = {}
for key in the_keys:
class_weight[key] = 1000.0/class_samples[key]
svc = SVC(kernel='linear',random_state=SEED,max_iter=3000,probability=True,class_weight=class_weight)#
svc.fit(X_train, y_train)
print('Accuracy of SVM classifier on training set: {:.2f}'
.format(svc.score(X_train, y_train)))
print('Accuracy of SVM classifier on test set: {:.2f}'
.format(svc.score(X_val, y_val)))
pred = svc.predict(X_val)
labels = ["Phase {}".format(i) for i in range(1,6)]
df = pd.DataFrame(confusion_matrix(y_val, pred),columns=["Predicted {}".format(i) for i in labels])
df.index = labels
#print(" ")
print(classification_report(y_val, pred))
df
# ### Find threshold
# Look for the distance to the second max
pred_val = svc.predict_proba(X_val)
prob_pred = [v[int(y_val[i]-1)] for i,v in enumerate(pred_val)]
plt.hist(prob_pred)
tuple_winner_pred = [(np.max(v),prob_pred[i],np.max(v)==prob_pred[i]) for i,v in enumerate(pred_val)]
values_ok = [v[0] for i,v in enumerate(tuple_winner_pred) if v[2]==True]
values_not_ok = [v[0] for i,v in enumerate(tuple_winner_pred) if v[2]==False]
values_dist_not_ok = [v[0]-v[1] for i,v in enumerate(tuple_winner_pred) if v[2]==False]
plt.hist(values_ok,np.arange(0.2,.95,.01))
plt.hist(values_not_ok,np.arange(0.2,.95,.01))
plt.scatter(values_dist_not_ok,values_not_ok)
# Define Threshold
t = 0.55
output_first_layer = pc.first_layer_classifier(X_all,t,svc)
comparison = list(zip(output_first_layer,y_all))
df_data['first_layer'] = output_first_layer
# ### Second layer
pred_val = svc.predict_proba([X_all[3]])
pred_val
second_layer = pc.second_layer_classifier_max_border(X_all,df_data,svc)
df_data['second_layer'] = second_layer
df_data.to_excel(os.path.join(tables_path,'[second_layer]'+file_name))
second_aux = []
y_all_aux = []
for i,e in enumerate(second_layer):
if e!=-1:
second_aux.append(second_layer[i])
y_all_aux.append(y_all[i])
confusion_matrix(y_all_aux, second_aux)
"Number of not predicted {}".format(second_layer.count(-1))
df = pd.DataFrame(confusion_matrix(y_all_aux, second_aux),columns=["Predicted {}".format(i) for i in labels])
df.index = labels
print(classification_report(y_all_aux, second_aux))
df
with open(os.path.join(data_path,'classifier_svm_single_best_of_two.pickle'),'wb') as f:
pickle.dump(svc,f)
print('Accuracy of SVM classifier on training set: {:.2f}'
.format(svc.score(X_all, second_layer)))
| notebooks/2. Train classifier (best of two)/2. Train classifier by layers [utterance level][60t].ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
provider = pd.read_csv('PartD_Prescriber_PUF_NPI_17.txt', sep='\t')
#Confirming dataset seemed to read in correctly
provider.head()
#Looking at columns for unneeded columns
provider.columns
#Dropping a few unneeded columns since it's a large dataset and these don't connect to our other exclusions data
provider_2017 = provider.drop(columns = ['nppes_provider_street2', 'nppes_provider_zip4'])
#Confirming columns dropped
provider_2017.shape
#Looking at data types
print(provider_2017.dtypes)
#Looking at null values and where we may need to impute or drop values
provider_2017.isnull().sum()
#Due to the high number of null values, removing the columns specifying beneficiary age and race
provider_2017_clean = provider_2017.drop(columns = ['beneficiary_age_less_65_count', 'beneficiary_age_65_74_count', 'beneficiary_age_75_84_count', 'beneficiary_age_greater_84_count', 'beneficiary_race_white_count', 'beneficiary_race_black_count', 'beneficiary_race_asian_pi_count', 'beneficiary_race_hispanic_count', 'beneficiary_race_nat_ind_count', 'beneficiary_race_other_count'])
#Checking columns dropped as columns should now number 70
provider_2017_clean.shape
# According to documentation for the dataset, if the following categories are less than 11, this number is suppressed, resulting in null values: bene_count, total_claim_count, opioid_claim_count, la_opiod_claim_count, antiobiotic_claim_count, opioid_bene_count, and la_opioid_bene_count. Therefore, to use this as a feature in our model, we can impute the null values with 5 as this is the average number that these values could be if it is less than 11 or we can classify it in value ranges. After combining data we can look more into the options for managing these.
#Saving as new file
provider_2017 = provider_2017_clean.to_csv('Provider_2017_72Columns.txt', sep='\t', index = False)
| Cleaning_and_Merging/Cleaning 2017 Provider Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("test2.jpeg", cv2.IMREAD_GRAYSCALE)
img.shape
# +
img[img >= 127] = 255
img[img < 127] = 0
plt.imshow(img, cmap="gray")
plt.title("Original Image")
plt.show()
# -
def replace(img):
ret = np.array(img)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
ret[i,j] = 255 if img[i,j] == 0 else 0
return ret
def dilation(img):
dil = np.zeros(img.shape)
elem = np.array([[1,1],[1,1]])
img = replace(img)
for i in range(img.shape[0]-1):
for j in range(img.shape[1] - 1):
k = np.array(img[i:i+2,j:j+2]) * elem
if np.sum(k) > 0:
dil[i,j] = 255
else:
dil[i,j] = 0
dil = replace(dil)
# img = replace(img)
return dil
def erosion(img):
ero = np.zeros(img.shape)
elem = np.array([[1,1],[1,1]])
img = replace(img)
for i in range(img.shape[0]-1):
for j in range(img.shape[1] - 1):
k = np.array(img[i:i+2,j:j+2]) * elem
if np.sum(k) == np.sum(elem) * 255:
ero[i,j] = 255
else:
ero[i,j] = 0
ero = replace(ero)
return ero
# ## Output
# +
fig = plt.figure(figsize=(15, 10))
dil = dilation(img)
fig.add_subplot(121)
plt.imshow(dil, cmap="gray")
plt.title("Dilation")
ero = erosion(img)
fig.add_subplot(122)
plt.imshow(ero, cmap="gray")
plt.title("Erosion")
plt.show()
# -
replace(dil)
| all final/Dilation and Erosion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="77gENRVX40S7"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="d8jyt37T42Vf"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + cellView="form" colab={} colab_type="code" id="aPxHdjwW5P2j"
#@title MIT License
#
# Copyright (c) 2017 <NAME> # IGNORE_COPYRIGHT: cleared by OSS licensing
#
# Permission is hereby granted, free of charge, to any person obtaining a
# # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# + [markdown] colab_type="text" id="hRTa3Ee15WsJ"
# # Transfer learning with a pretrained ConvNet
# + [markdown] colab_type="text" id="dQHMcypT3vDT"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/images/transfer_learning"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/images/transfer_learning.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/images/transfer_learning.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/images/transfer_learning.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="2X4KyhORdSeO"
# In this tutorial, you will learn how to classify images of cats and dogs by using transfer learning from a pre-trained network.
#
# A pre-trained model is a saved network that was previously trained on a large dataset, typically on a large-scale image-classification task. You either use the pretrained model as is or use transfer learning to customize this model to a given task.
#
# The intuition behind transfer learning for image classification is that if a model is trained on a large and general enough dataset, this model will effectively serve as a generic model of the visual world. You can then take advantage of these learned feature maps without having to start from scratch by training a large model on a large dataset.
#
# In this notebook, you will try two ways to customize a pretrained model:
#
# 1. Feature Extraction: Use the representations learned by a previous network to extract meaningful features from new samples. You simply add a new classifier, which will be trained from scratch, on top of the pretrained model so that you can repurpose the feature maps learned previously for the dataset.
#
# You do not need to (re)train the entire model. The base convolutional network already contains features that are generically useful for classifying pictures. However, the final, classification part of the pretrained model is specific to the original classification task, and subsequently specific to the set of classes on which the model was trained.
#
# 1. Fine-Tuning: Unfreeze a few of the top layers of a frozen model base and jointly train both the newly-added classifier layers and the last layers of the base model. This allows us to "fine-tune" the higher-order feature representations in the base model in order to make them more relevant for the specific task.
#
# You will follow the general machine learning workflow.
#
# 1. Examine and understand the data
# 1. Build an input pipeline, in this case using Keras ImageDataGenerator
# 1. Compose the model
# * Load in the pretrained base model (and pretrained weights)
# * Stack the classification layers on top
# 1. Train the model
# 1. Evaluate model
#
# + colab={} colab_type="code" id="iBMcobPHdD8O"
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import matplotlib.pyplot as plt
# + colab={} colab_type="code" id="TqOt6Sv7AsMi"
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
# + [markdown] colab_type="text" id="v77rlkCKW0IJ"
# ## Data preprocessing
# + [markdown] colab_type="text" id="0GoKGm1duzgk"
# ### Data download
# + [markdown] colab_type="text" id="vHP9qMJxt2oz"
# Use [TensorFlow Datasets](http://tensorflow.org/datasets) to load the cats and dogs dataset.
#
# This `tfds` package is the easiest way to load pre-defined data. If you have your own data, and are interested in importing using it with TensorFlow see [loading image data](../load_data/images.ipynb)
#
# + colab={} colab_type="code" id="KVh7rDVAuW8Y"
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
# + [markdown] colab_type="text" id="Nsoic6bGuwQ-"
# The `tfds.load` method downloads and caches the data, and returns a `tf.data.Dataset` object. These objects provide powerful, efficient methods for manipulating data and piping it into your model.
#
# Since `"cats_vs_dogs"` doesn't define standard splits, use the subsplit feature to divide it into (train, validation, test) with 80%, 10%, and 10% of the data respectively.
# + colab={} colab_type="code" id="ro4oYaEmxe4r"
(raw_train, raw_validation, raw_test), metadata = tfds.load(
'cats_vs_dogs',
split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'],
with_info=True,
as_supervised=True,
)
# + [markdown] colab_type="text" id="o29EfE-p0g5X"
# The resulting `tf.data.Dataset` objects contain `(image, label)` pairs where the images have variable shape and 3 channels, and the label is a scalar.
# + colab={} colab_type="code" id="GIys1_zY1S9b"
print(raw_train)
print(raw_validation)
print(raw_test)
# + [markdown] colab_type="text" id="yO1Q2JaW5sIy"
# Show the first two images and labels from the training set:
# + colab={} colab_type="code" id="K5BeQyKThC_Y"
get_label_name = metadata.features['label'].int2str
for image, label in raw_train.take(2):
plt.figure()
plt.imshow(image)
plt.title(get_label_name(label))
# + [markdown] colab_type="text" id="wvidPx6jeFzf"
# ### Format the Data
#
# Use the `tf.image` module to format the images for the task.
#
# Resize the images to a fixed input size, and rescale the input channels to a range of `[-1,1]`
#
# <!-- TODO(markdaoust): fix the keras_applications preprocessing functions to work in tf2 -->
# + colab={} colab_type="code" id="y3PM6GVHcC31"
IMG_SIZE = 160 # All images will be resized to 160x160
def format_example(image, label):
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
# + [markdown] colab_type="text" id="i2MRh_AeBtOM"
# Apply this function to each item in the dataset using the map method:
# + colab={} colab_type="code" id="SFZ6ZW7KSXP9"
train = raw_train.map(format_example)
validation = raw_validation.map(format_example)
test = raw_test.map(format_example)
# + [markdown] colab_type="text" id="E5ifgXDuBfOC"
# Now shuffle and batch the data.
# + colab={} colab_type="code" id="Yic-I66m6Isv"
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 1000
# + colab={} colab_type="code" id="p3UUPdm86LNC"
train_batches = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
validation_batches = validation.batch(BATCH_SIZE)
test_batches = test.batch(BATCH_SIZE)
# + [markdown] colab_type="text" id="02rJpcFtChP0"
# Inspect a batch of data:
# + colab={} colab_type="code" id="iknFo3ELBVho"
for image_batch, label_batch in train_batches.take(1):
pass
image_batch.shape
# + [markdown] colab_type="text" id="OkH-kazQecHB"
# ## Create the base model from the pre-trained convnets
# You will create the base model from the **MobileNet V2** model developed at Google. This is pre-trained on the ImageNet dataset, a large dataset consisting of 1.4M images and 1000 classes. ImageNet is a research training dataset with a wide variety of categories like `jackfruit` and `syringe`. This base of knowledge will help us classify cats and dogs from our specific dataset.
#
# First, you need to pick which layer of MobileNet V2 you will use for feature extraction. The very last classification layer (on "top", as most diagrams of machine learning models go from bottom to top) is not very useful. Instead, you will follow the common practice to depend on the very last layer before the flatten operation. This layer is called the "bottleneck layer". The bottleneck layer features retain more generality as compared to the final/top layer.
#
# First, instantiate a MobileNet V2 model pre-loaded with weights trained on ImageNet. By specifying the **include_top=False** argument, you load a network that doesn't include the classification layers at the top, which is ideal for feature extraction.
# + colab={} colab_type="code" id="19IQ2gqneqmS"
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
# Create the base model from the pre-trained model MobileNet V2
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
# + [markdown] colab_type="text" id="AqcsxoJIEVXZ"
# This feature extractor converts each `160x160x3` image into a `5x5x1280` block of features. See what it does to the example batch of images:
# + colab={} colab_type="code" id="Y-2LJL0EEUcx"
feature_batch = base_model(image_batch)
print(feature_batch.shape)
# + [markdown] colab_type="text" id="rlx56nQtfe8Y"
# ## Feature extraction
# In this step, you will freeze the convolutional base created from the previous step and to use as a feature extractor. Additionally, you add a classifier on top of it and train the top-level classifier.
# + [markdown] colab_type="text" id="CnMLieHBCwil"
# ### Freeze the convolutional base
#
# It is important to freeze the convolutional base before you compile and train the model. Freezing (by setting layer.trainable = False) prevents the weights in a given layer from being updated during training. MobileNet V2 has many layers, so setting the entire model's trainable flag to False will freeze all the layers.
# + colab={} colab_type="code" id="OTCJH4bphOeo"
base_model.trainable = False
# + colab={} colab_type="code" id="KpbzSmPkDa-N"
# Let's take a look at the base model architecture
base_model.summary()
# + [markdown] colab_type="text" id="wdMRM8YModbk"
# ### Add a classification head
# + [markdown] colab_type="text" id="QBc31c4tMOdH"
# To generate predictions from the block of features, average over the spatial `5x5` spatial locations, using a `tf.keras.layers.GlobalAveragePooling2D` layer to convert the features to a single 1280-element vector per image.
# + colab={} colab_type="code" id="dLnpMF5KOALm"
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
# + [markdown] colab_type="text" id="O1p0OJBR6dOT"
# Apply a `tf.keras.layers.Dense` layer to convert these features into a single prediction per image. You don't need an activation function here because this prediction will be treated as a `logit`, or a raw prediction value. Positive numbers predict class 1, negative numbers predict class 0.
# + colab={} colab_type="code" id="Wv4afXKj6cVa"
prediction_layer = tf.keras.layers.Dense(1)
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
# + [markdown] colab_type="text" id="0iqnBeZrfoIc"
# Now stack the feature extractor, and these two layers using a `tf.keras.Sequential` model:
# + colab={} colab_type="code" id="eApvroIyn1K0"
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer
])
# + [markdown] colab_type="text" id="g0ylJXE_kRLi"
# ### Compile the model
#
# You must compile the model before training it. Since there are two classes, use a binary cross-entropy loss with `from_logits=True` since the model provides a linear output.
# + colab={} colab_type="code" id="RpR8HdyMhukJ"
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
# + colab={} colab_type="code" id="I8ARiyMFsgbH"
model.summary()
# + [markdown] colab_type="text" id="lxOcmVr0ydFZ"
# The 2.5M parameters in MobileNet are frozen, but there are 1.2K _trainable_ parameters in the Dense layer. These are divided between two `tf.Variable` objects, the weights and biases.
# + colab={} colab_type="code" id="krvBumovycVA"
len(model.trainable_variables)
# + [markdown] colab_type="text" id="RxvgOYTDSWTx"
# ### Train the model
#
# After training for 10 epochs, you should see ~96% accuracy.
#
# + colab={} colab_type="code" id="Om4O3EESkab1"
initial_epochs = 10
validation_steps=20
loss0,accuracy0 = model.evaluate(validation_batches, steps = validation_steps)
# + colab={} colab_type="code" id="8cYT1c48CuSd"
print("initial loss: {:.2f}".format(loss0))
print("initial accuracy: {:.2f}".format(accuracy0))
# + colab={} colab_type="code" id="JsaRFlZ9B6WK"
history = model.fit(train_batches,
epochs=initial_epochs,
validation_data=validation_batches)
# + [markdown] colab_type="text" id="Hd94CKImf8vi"
# ### Learning curves
#
# Let's take a look at the learning curves of the training and validation accuracy/loss when using the MobileNet V2 base model as a fixed feature extractor.
# + colab={} colab_type="code" id="53OTCh3jnbwV"
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
# + [markdown] colab_type="text" id="foWMyyUHbc1j"
# Note: If you are wondering why the validation metrics are clearly better than the training metrics, the main factor is because layers like `tf.keras.layers.BatchNormalization` and `tf.keras.layers.Dropout` affect accuracy during training. They are turned off when calculating validation loss.
#
# To a lesser extent, it is also because training metrics report the average for an epoch, while validation metrics are evaluated after the epoch, so validation metrics see a model that has trained slightly longer.
# + [markdown] colab_type="text" id="CqwV-CRdS6Nv"
# ## Fine tuning
# In the feature extraction experiment, you were only training a few layers on top of an MobileNet V2 base model. The weights of the pre-trained network were **not** updated during training.
#
# One way to increase performance even further is to train (or "fine-tune") the weights of the top layers of the pre-trained model alongside the training of the classifier you added. The training process will force the weights to be tuned from generic feature maps to features associated specifically with the dataset.
#
# Note: This should only be attempted after you have trained the top-level classifier with the pre-trained model set to non-trainable. If you add a randomly initialized classifier on top of a pre-trained model and attempt to train all layers jointly, the magnitude of the gradient updates will be too large (due to the random weights from the classifier) and your pre-trained model will forget what it has learned.
#
# Also, you should try to fine-tune a small number of top layers rather than the whole MobileNet model. In most convolutional networks, the higher up a layer is, the more specialized it is. The first few layers learn very simple and generic features that generalize to almost all types of images. As you go higher up, the features are increasingly more specific to the dataset on which the model was trained. The goal of fine-tuning is to adapt these specialized features to work with the new dataset, rather than overwrite the generic learning.
# + [markdown] colab_type="text" id="CPXnzUK0QonF"
# ### Un-freeze the top layers of the model
#
# + [markdown] colab_type="text" id="rfxv_ifotQak"
# All you need to do is unfreeze the `base_model` and set the bottom layers to be un-trainable. Then, you should recompile the model (necessary for these changes to take effect), and resume training.
# + colab={} colab_type="code" id="4nzcagVitLQm"
base_model.trainable = True
# + colab={} colab_type="code" id="-4HgVAacRs5v"
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
# + [markdown] colab_type="text" id="4Uk1dgsxT0IS"
# ### Compile the model
#
# Compile the model using a much lower learning rate.
# + colab={} colab_type="code" id="NtUnaz0WUDva"
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer = tf.keras.optimizers.RMSprop(lr=base_learning_rate/10),
metrics=['accuracy'])
# + colab={} colab_type="code" id="WwBWy7J2kZvA"
model.summary()
# + colab={} colab_type="code" id="bNXelbMQtonr"
len(model.trainable_variables)
# + [markdown] colab_type="text" id="4G5O4jd6TuAG"
# ### Continue training the model
# + [markdown] colab_type="text" id="0foWUN-yDLo_"
# If you trained to convergence earlier, this step will improve your accuracy by a few percentage points.
# + colab={} colab_type="code" id="ECQLkAsFTlun"
fine_tune_epochs = 10
total_epochs = initial_epochs + fine_tune_epochs
history_fine = model.fit(train_batches,
epochs=total_epochs,
initial_epoch = history.epoch[-1],
validation_data=validation_batches)
# + [markdown] colab_type="text" id="TfXEmsxQf6eP"
# Let's take a look at the learning curves of the training and validation accuracy/loss when fine-tuning the last few layers of the MobileNet V2 base model and training the classifier on top of it. The validation loss is much higher than the training loss, so you may get some overfitting.
#
# You may also get some overfitting as the new training set is relatively small and similar to the original MobileNet V2 datasets.
#
# + [markdown] colab_type="text" id="DNtfNZKlInGT"
# After fine tuning the model nearly reaches 98% accuracy.
# + colab={} colab_type="code" id="PpA8PlpQKygw"
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
# + colab={} colab_type="code" id="chW103JUItdk"
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.8, 1])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
# + [markdown] colab_type="text" id="_TZTwG7nhm0C"
# ## Summary:
#
# * **Using a pre-trained model for feature extraction**: When working with a small dataset, it is a common practice to take advantage of features learned by a model trained on a larger dataset in the same domain. This is done by instantiating the pre-trained model and adding a fully-connected classifier on top. The pre-trained model is "frozen" and only the weights of the classifier get updated during training.
# In this case, the convolutional base extracted all the features associated with each image and you just trained a classifier that determines the image class given that set of extracted features.
#
# * **Fine-tuning a pre-trained model**: To further improve performance, one might want to repurpose the top-level layers of the pre-trained models to the new dataset via fine-tuning.
# In this case, you tuned your weights such that your model learned high-level features specific to the dataset. This technique is usually recommended when the training dataset is large and very similar to the original dataset that the pre-trained model was trained on.
#
| site/en/tutorials/images/transfer_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv('data_1000.csv')
data=df[['correct_answ','bleu_score','levenstein_sim','cosine_sim','jaccard_sim']]
data.head(10)
data.describe()
data.boxplot(by='correct_answ', column=['bleu_score', 'levenstein_sim', 'cosine_sim', 'jaccard_sim'],
grid=True, figsize=(15,15))
X=data[['bleu_score','levenstein_sim','cosine_sim','jaccard_sim']]
y=data['correct_answ']
X.hist(bins=50,figsize=(20,15))
from pandas.plotting import scatter_matrix
scatter_matrix(X, figsize=(14, 10))
corr_matrix = data.corr()
corr_matrix["correct_answ"].sort_values(ascending=False)
corr_matrix = data.corr()
corr_matrix["correct_answ"].sort_values(ascending=False)
corr_matrix
# +
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import precision_score, recall_score,accuracy_score
from sklearn.base import clone
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import numpy as np
features=['bleu_score','levenstein_sim','cosine_sim','jaccard_sim']
plt.figure(figsize=(20,30))
count=0
feature_combination=[]
training_f1_scores=[]
training_accuracy=[]
training_precision=[]
training_confusion=[]
test_f1_scores=[]
test_accuracy=[]
test_precision=[]
test_confusion=[]
best_neighbors=[]
#feature combination
for i in range(len(features)):
for j in range(i+1,4):
feature=[features[i],features[j]]
X=data[feature]
y=data['correct_answ']
feature_combination.append(feature)
count+=1
# in each combination, split training datasets into three parts, use two part for training, one part for testing
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(data, data["correct_answ"]):
strat_train_set = data.loc[train_index]
strat_test_set = data.loc[test_index]
training = strat_train_set.copy()
testing=strat_test_set.copy()
X_training=training[feature]
y_training=training[['correct_answ']]
X_testing=testing[feature]
y_testing=testing[['correct_answ']]
skfolds = StratifiedKFold(n_splits=3, random_state=42)
neighbors = list(range(1,50,2))
#create list to store result
accuracy_scores = []
recall_scores=[]
precision_scores=[]
f1_scores=[]
# each combination, using K (1,50,2)
for K in neighbors:
temp_accuracy=[]
temp_recall=[]
temp_precision=[]
temp_f1=[]
for train_index, test_index in skfolds.split(X_training, y_training):
knn = KNeighborsClassifier(n_neighbors = K)
clone_clf = clone(knn)
X_train_folds = X_training.iloc[train_index]
y_train_folds = y_training.iloc[train_index]
X_test_fold = X_training.iloc[test_index]
y_test_fold = y_training.iloc[test_index]
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_fold)
recall_score(y_test_fold,y_pred)
temp_accuracy.append(accuracy_score(y_test_fold,y_pred))
temp_precision.append(precision_score(y_test_fold,y_pred))
temp_recall.append(recall_score(y_test_fold,y_pred))
temp_f1.append(f1_score(y_test_fold, y_pred))
#store result of the mean score of three splits
accuracy_scores.append(np.mean(temp_accuracy))
recall_scores.append(np.mean(temp_recall))
precision_scores.append(np.mean(temp_precision))
f1_scores.append(np.mean(temp_f1))
#in each combination, plot accuracy change with the k change
ax=plt.subplot(3,2,count)
plt.plot([k for k in range(1,50,2)],accuracy_scores,'g-',label='accuracy')
plt.plot([k for k in range(1,50,2)],f1_scores,'r-',label='f1_score')
plt.plot([k for k in range(1,50,2)],recall_scores,'y-',label='recall_score')
plt.plot([k for k in range(1,50,2)],precision_scores,'b-',label='precision')
plt.xlabel('K')
plt.ylabel('score')
plt.legend()
ax.title.set_text(str(feature))
plt.show
# in each comvination, identify the best k
best_neighbor= 2*f1_scores.index(max(f1_scores))+1
best_neighbors.append(best_neighbor)
# in each combination, evaluating the training datasets
best_knn = KNeighborsClassifier(n_neighbors = best_neighbor)
best_knn.fit(X_training,y_training)
best_knn_prediction=best_knn.predict(X_testing)
test_accuracy.append(accuracy_score(y_testing,best_knn_prediction))
test_precision.append(precision_score(y_testing,best_knn_prediction))
test_f1_scores.append(recall_score(y_testing,best_knn_prediction))
test_confusion.append(confusion_matrix(y_testing,best_knn_prediction))
# +
for i in range(6):
print('feature:',feature_combination[i])
print('best_neighbors:',best_neighbors[i])
print('test_f1_scores:',test_f1_scores[i])
print('test_accuracy:',test_accuracy[i])
print('test_precision:',test_precision[i])
print('test_confusion:\n',test_confusion[i])
print('\n')
plt.figure(figsize=(10,3))
x=np.arange(6)
bar_width=0.3
tick_label=feature_combination
a=plt.bar(x,test_f1_scores,bar_width,color='green',label='f1_score')
b=plt.bar(x+bar_width,test_accuracy,bar_width,color='red',label='accuracy')
c=plt.bar(x+bar_width*2,test_precision,bar_width,color='blue',label='precision')
plt.legend()
plt.xticks(x+bar_width/3,tick_label)
plt.xticks(rotation=270)
plt.show()
# +
X_training=training[['levenstein_sim','cosine_sim']]
y_training=training[['correct_answ']]
X_testing=testing[['levenstein_sim','cosine_sim']]
y_testing=testing[['correct_answ']]
best_knn = KNeighborsClassifier(n_neighbors = 1)
best_knn.fit(X_training,y_training)
best_knn_prediction=best_knn.predict(X_testing)
fig=plt.figure(figsize=(10,10))
plt.scatter(x=X_training['levenstein_sim'],y=X_training['cosine_sim'],c=training['correct_answ'],cmap='viridis',alpha=0.8,label='trainig')
plt.scatter(x=X_testing['levenstein_sim'],y=X_testing['cosine_sim'],marker='s',c=testing['correct_answ'],cmap='viridis',alpha=0.8,label='testing')
plt.scatter(x=X_testing['levenstein_sim'],y=X_testing['cosine_sim'],marker='*',c=best_knn_prediction,cmap='viridis',alpha=0.8,label='prediction',edgecolors='red')
plt.legend()
# -
"""
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(data, data["correct_answ"]):
strat_train_set = data.loc[train_index]
strat_test_set = data.loc[test_index]"""
"""print(data["correct_answ"].value_counts() / len(data))
print('------------')
print(strat_train_set["correct_answ"].value_counts() / len(strat_train_set))
print('------------')
print(strat_test_set["correct_answ"].value_counts() / len(strat_test_set))"""
"""training = strat_train_set.copy()
testing=strat_test_set.copy()"""
"""X_training=training[['bleu_score','levenstein_sim','cosine_sim','jaccard_sim']]
y_training=training[['correct_answ']]
X_testing=testing[['bleu_score','levenstein_sim','cosine_sim','jaccard_sim']]
y_testing=testing[['correct_answ']]"""
# +
"""# iteration of different k
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import precision_score, recall_score,accuracy_score
from sklearn.base import clone
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.model_selection import cross_val_score
import numpy as np
skfolds = StratifiedKFold(n_splits=3, random_state=42)
neighbors = list(range(1,50,2))
accuracy_scores = []
recall_scores=[]
precision_scores=[]
f1_scores=[]
for K in neighbors:
temp_accuracy=[]
temp_recall=[]
temp_precision=[]
temp_f1=[]
for train_index, test_index in skfolds.split(X_training, y_training):
knn = KNeighborsClassifier(n_neighbors = K)
clone_clf = clone(knn)
X_train_folds = X_training.iloc[train_index]
y_train_folds = y_training.iloc[train_index]
X_test_fold = X_training.iloc[test_index]
y_test_fold = y_training.iloc[test_index]
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_fold)
recall_score(y_test_fold,y_pred)
temp_accuracy.append(accuracy_score(y_test_fold,y_pred))
temp_precision.append(precision_score(y_test_fold,y_pred))
temp_recall.append(recall_score(y_test_fold,y_pred))
temp_f1.append(f1_score(y_test_fold, y_pred))
accuracy_scores.append(np.mean(temp_accuracy))
recall_scores.append(np.mean(temp_recall))
precision_scores.append(np.mean(temp_precision))
f1_scores.append(np.mean(temp_f1))"""
""" print('-------------')
print('K=',K)
print(confusion_matrix(y_test_fold,y_pred))
print('precision_score: ',precision_score(y_test_fold,y_pred))
print('recall_score: ',recall_score(y_test_fold,y_pred))
print('f1_socre: ',f1_score(y_test_fold, y_pred))
print('')
print('\n')"""
# +
# -
"""# visulize the optimization process
import matplotlib.pyplot as plt
plt.title='optimization analysis'
plt.plot([k for k in range(1,50,2)],accuracy_scores,'g-',label='accuracy')
plt.plot([k for k in range(1,50,2)],f1_scores,'r-',label='f1_score')
plt.plot([k for k in range(1,50,2)],recall_scores,'y-',label='recall_score')
plt.plot([k for k in range(1,50,2)],precision_scores,'b-',label='precision')
plt.xlabel('K')
plt.ylabel('score')
plt.legend()
plt.show"""
"""#verify the performance on test datasets
best_knn = KNeighborsClassifier(n_neighbors = 1)
best_knn.fit(X_training,y_training)
best_knn_prediction=best_knn.predict(X_testing)
print('accuracy:',accuracy_score(y_testing,best_knn_prediction))
print('f1:',f1_score(y_testing,best_knn_prediction))
print('precision:',precision_score(y_testing,best_knn_prediction))
print('recall:',recall_score(y_testing,best_knn_prediction))"""
"""print(confusion_matrix(y_testing,best_knn_prediction))"""
"""best_knn = KNeighborsClassifier(n_neighbors = 50)
best_knn.fit(X_training,y_training)
best_knn_prediction=best_knn.predict(X_testing)
print('accuracy:',accuracy_score(y_testing,best_knn_prediction))
print('f1:',f1_score(y_testing,best_knn_prediction))
print('precision:',precision_score(y_testing,best_knn_prediction))
print('recall:',recall_score(y_testing,best_knn_prediction))
print(confusion_matrix(y_testing,best_knn_prediction))"""
| Notebooks/machine_learning/optimization/knn_classifer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .venv
# language: python
# name: .venv
# ---
# +
import os
import sys
import pickle
from inspect import signature
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
import sklearn.metrics as metrics
from sklearn.preprocessing import normalize
get_dir = os.path.dirname
PROJ_ROOT = get_dir(get_dir(os.path.abspath('__file__')))
print(PROJ_ROOT)
sys.path.append(os.path.join(PROJ_ROOT, 'src'))
from data import dataset
from model import model
# +
with open(os.path.join(dataset.DEFAULT_DATA_MODEL_DIRECTORY, 'data_model.pickle'), 'rb') as f:
dat_mod = pickle.load(f)
class_params = model.DEFAULT_CLASSIFIER_SETTINGS
vectorizer_settings = dataset.DEFAULT_VECTORIZER_SETTINGS
pred_model = model.build_mmdisambiguator(
data_model_params=vectorizer_settings,
data_model_path=os.path.join(dataset.DEFAULT_DATA_MODEL_DIRECTORY, 'data_model.pickle'),
classificator_parameters=class_params
)
# -
# train data
FEATURES_PATH = dataset.DEFAULT_FEATURES_DIRECTORY
TRAIN_FEATURES_PATH = os.path.join(FEATURES_PATH, 'train.npy')
data = np.load(TRAIN_FEATURES_PATH, allow_pickle=True)
# train the classifier from features and print report
features_trained_report = pred_model.train(data=data[:,:-1],classes=data[:,-1], report=True, source='features')
print(features_trained_report)
# load train text data:
TEXT_PATH = dataset.DEFAULT_PROCESSED_TEXT_DATA_DIRECTORY
TRAIN_TEXT_PATH = os.path.join(TEXT_PATH, 'train.csv')
textdata = pd.read_csv(TRAIN_TEXT_PATH, sep=';')
textdata.head()
# train the classifier on text and expect identical results
text_trained_report = pred_model.train(data=textdata.iloc[:,0],classes=textdata.iloc[:,1], report=True, source='text')
print(text_trained_report)
# load validation features and text
VALIDATION_FEATURES_PATH = os.path.join(FEATURES_PATH, 'validation.npy')
VALIDATION_TEXT_PATH = os.path.join(dataset.DEFAULT_PROCESSED_TEXT_DATA_DIRECTORY, 'validation.csv')
validation = np.load(VALIDATION_FEATURES_PATH, allow_pickle=True)
validation_text = pd.read_csv(VALIDATION_TEXT_PATH, sep=';')
# +
# predict on validation features and text and expect the same results
predict_on_features = pred_model.predict(validation[:,:-1])
predict_on_text = pred_model.predict(validation_text.iloc[:,0], source='text')
assert (predict_on_features[:,0] == predict_on_text[:,0]).all()
# +
predicted_valid = pred_model.predict(validaton[:,:-1], format='binary', mode='prediction')
print(validaton[:,:-1].sum(axis=1, keepdims=True))
print(validation_text.iloc[1,0])
to_print = 1
predicted_classes = pred_model.predict(validaton[:,:-1], format='text')[:,0]
summary = [(sentence, real, probs, pred) for sentence, real, probs, pred in zip(
validation_text.iloc[:to_print,0], validation_text.iloc[:to_print,1],
predicted_valid[:to_print], predicted_classes[:to_print])]
for row in summary:
print(row)
pd.options.display.max_rows = 4000
# print(validation_text.iloc[:,1] != predicted_classes)
print(validation_text.iloc[[12, 24, 34, 35, 45, 50, 54, 57, 59, 62], 0])
print(predicted_valid[[12, 24, 34, 35, 45, 50, 54, 57, 59, 62]])
# print(validation_text.iloc[:to_print,0])
# -
| notebooks/06_training_and_evaluating_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mein erstes Jupyter Notebook
# ## Browser Kompatibilität
#
# Empfohlen werden aktuelle Versionen von
# * Firefox
# * Chrome
# * Safari
# ## Zellen
# Ein JN ist in Zellen organisiert,die einzeln durchlaufen werden können. Es gibt unterschiedliche Zelltypen
# **Markdown**-Zellen: werden verwendet, um erläuternden Text einzufügen
# * oder [Links](https:ki-campus.org)
# * oder Bilder
# ![edit_mode](https://ki-campus.org/sites/default/files/styles/front_about_icon/public/kic_front/about/KI-Campus__Einsatz_KI.png?itok=g-ZmQYv1)
# **Code**-Zellen:
a='Hello ';
b='World';
c=a + b;
c
# Zellen werden mit dem Plus-Icon in der Menubar oder ESC + b hinzugefügt
# ## Beispiel: Interaktiven Plot erstellen
# Wir betrachten die Funktion
#
# \begin{align*}
# y= \sin(a*x) + \cos(x) &&\mbox{für } &&x\in [0;10\pi)&&&&&&
# \end{align*}
#
# und wollen den Einfluss des Parameters $a$ beobachten.
#
# Zunächst importieren wir einige benötigte Bibliotheken
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ### Funktionsdefinition
#
# Um die unterschiedlichen Ansätze zu veranschaulichen, definieren wir
def myplot(a):
x = np.linspace(0, 10*np.pi, 10**3);
y = np.sin(a*x) + np.cos(x);
plt.plot(x,y);
plt.show();
# ### Standard Plot
#
# Wenn wir den Wert von $a$ in der unteren Zelle verändern, abspeichern und die Zelle durchlaufen lassen, verändert sich die Grafik.
a=1;
myplot(a)
# ### Interaktiver Plot
#
#
# Schöner ist es allerdings einen Slider zu erzeugen und den Einfluss des Parameters direkt mitverfolgen zu können. Dazu benötigen wir die _interactive_-Funktion aus der ipywidgets-Bibliothek.
from ipywidgets import interactive
# Jetzt können wir einen interaktiven Plot erstellen
interactive_plot=interactive(myplot, a = (-20,20, 0.1));
interactive_plot
| index_v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# -
input_df = pd.read_csv("tati_pariuri_actualizat.csv")
input_df = input_df.replace('<NAME>', '<NAME>')
input_df = input_df.replace('<NAME>', '<NAME>')
# +
#input_df = input_df.drop([1464])
# -
input_df.shape
tmp_df = input_df.groupby(['DATA', 'CINE', 'CAMP']).count()
tmp_df[tmp_df.U1 != 2].shape
tmp_df = tmp_df[tmp_df.U1 == 2]
# +
merged_df = []
for i in tmp_df.index:
idf = input_df[(input_df.DATA == i[0]) & (input_df.CINE == i[1]) & (input_df.CAMP == i[2])]
if pd.isna(idf.iloc[0].GAZDE):
dest = idf.iloc[1].copy()
source = idf.iloc[0].copy()
else:
dest = idf.iloc[0].copy()
source = idf.iloc[1].copy()
dest['LOC_home'] = source['LOC']
dest['ZI_home'] = source['ZI']
dest['U1_home'] = source['U1']
dest['U2_home'] = source['U2']
dest['U3_home'] = source['U3']
dest['U4_home'] = source['U4']
dest['U5_home'] = source['U5']
dest['LOC.1_away'] = source['LOC.1']
dest['ZI.1_away'] = source['ZI.1']
dest['U1.1_away'] = source['U1.1']
dest['U2.1_away'] = source['U2.1']
dest['U3.1_away'] = source['U3.1']
dest['U4.1_away'] = source['U4.1']
dest['U5.1_away'] = source['U5.1']
merged_df.append(dest)
# -
merged = pd.DataFrame(np.array(merged_df))
merged.columns = merged_df[0].index
merged.to_csv('preprocessed_data.csv')
merged = pd.read_csv("preprocessed_data.csv")
merged.head()
merged = merged.rename(columns={"LOC":"G_LOC_TOTAL", "ZI": "G_ZI_TOTAL", "U1": "G_U1_TOTAL", "U2":"G_U2_TOTAL", "U3":"G_U3_TOTAL", "U4":"G_U4_TOTAL", "U5":"G_U5_TOTAL",
"LOC.1":"O_LOC_TOTAL", "ZI.1":"O_ZI_TOTAL", "U1.1":"O_U1_TOTAL", "U2.1":"O_U2_TOTAL", "U3.1":"O_U3_TOTAL", "U4.1":"O_U4_TOTAL", "U5.1":"O_U5_TOTAL",
"LOC_home":"G_LOC_ACASA", "ZI_home":"G_ZI_ACASA", "U1_home":"G_U1_ACASA", "U2_home":"G_U2_ACASA", "U3_home":"G_U3_ACASA", "U4_home":"G_U4_ACASA", "U5_home":"G_U5_ACASA",
"LOC.1_away":"O_LOC_DEPL", "ZI.1_away":"O_ZI_DEPL", "U1.1_away":"O_U1_DEPL", "U2.1_away":"O_U2_DEPL", "U3.1_away":"O_U3_DEPL", "U4.1_away":"O_U4_DEPL", "U5.1_away":"O_U5_DEPL"
})
merged.to_csv('preprocessed_data.csv')
merged = pd.read_csv('prep_data_input.csv')
# # Computing distance between historical results
Y = merged[[
'G_U1_TOTAL', 'G_U2_TOTAL', 'G_U3_TOTAL', 'G_U4_TOTAL', 'G_U5_TOTAL',
'O_U1_TOTAL', 'O_U2_TOTAL', 'O_U3_TOTAL', 'O_U4_TOTAL', 'O_U5_TOTAL',
'G_U1_ACASA', 'G_U2_ACASA', 'G_U3_ACASA', 'G_U4_ACASA', 'G_U5_ACASA',
'O_U1_DEPL', 'O_U2_DEPL', 'O_U3_DEPL', 'O_U4_DEPL', 'O_U5_DEPL']].fillna('NAN').values
Y_arr = pd.DataFrame(Y.reshape(-1), columns=['result'])
Y_arr = Y_arr.replace('ABANDON', 'AB')
from sklearn.preprocessing import LabelEncoder
# creating instance of labelencoder
labelencoder = LabelEncoder()
# Assigning numerical values and storing in another column
Y_arr['result_Cat'] = labelencoder.fit_transform(Y_arr['result'])
# +
from sklearn.preprocessing import OneHotEncoder
# creating instance of one-hot-encoder
enc = OneHotEncoder(handle_unknown='ignore')
# passing bridge-types-cat column (label encoded values of bridge_types)
enc_df = pd.DataFrame(enc.fit_transform(Y_arr[['result_Cat']]).toarray())
# merge with main df bridge_df on key values
Y_arr = Y_arr.join(enc_df)
# -
enc_df
Y_oh_encoded = enc_df.values.reshape((Y.shape[0],Y.shape[1] * 5))
Y[120]
Y_oh_encoded.shape
from sklearn.metrics.pairwise import cosine_similarity
q = Y_oh_encoded[322]
rez = cosine_similarity(Y_oh_encoded, [q])
pd.DataFrame({'cosine':rez.squeeze()}).sort_values('cosine', ascending=False).head(10)
Y[322]
Y[870]
merged.iloc[322]
merged.iloc[870]
# # Computing distance between odds
X = merged[['1COTA', 'XCOTA', '2COTA']].fillna(-1).values
X
X[150]
from sklearn.metrics.pairwise import cosine_similarity
query = [2.3, 3.5, 3]
rez = cosine_similarity(X, [query])
pd.DataFrame({'cosine':rez.squeeze()}).sort_values('cosine', ascending=False)
query
X[1761]
merged.iloc[150]
merged.iloc[243]
| EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
#df = pd.read_csv(".\\Data_USD.csv", header=None,skiprows=1)
df = pd.read_csv(".\\Data_USD.csv")
df.head().to_csv(".\\test.csv")
T=df.groupby("SEX")
T.describe()
df.tail()
# X = df.drop('Y_Value',axis =1).values
# y = df['Y_Value'].values
X = df.drop('DEFAULT_PAYMENT_NEXT_MO',axis =1).values
X[2999,0]
X.shape
y = df['DEFAULT_PAYMENT_NEXT_MO'].values
#y.reshape(-1,1)
#print(X.shape)
X.shape
#print(y.shape)
y.shape
X_train, X_test, y_train, y_test = train_test_split (X,y,test_size=0.25, random_state=7)
y_test.T
X_test.shape
# +
from sklearn.preprocessing import StandardScaler
X_scaler = StandardScaler().fit(X_train)
# -
X_scaler
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
X_train_scaled
y_train_categorical = to_categorical(y_train)
y_test_categorical = to_categorical(y_test)
# +
from keras.models import Sequential
#instantiate
model = Sequential()
# +
from keras.layers import Dense
number_inputs = 50
number_hidden = 60
model.add(Dense(units = number_hidden, activation ='relu', input_dim=number_inputs))
model.add(Dense(units = 50, activation ='relu')) #second hidden layer
model.add(Dense(units = 40, activation ='relu')) #second hidden layer
model.add(Dense(units = 30, activation ='relu')) #second hidden layer
model.add(Dense(units = 15, activation ='relu')) #second hidden layer
model.add(Dense(units = 5, activation ='relu')) #third hidden layer
# -
number_classes =2 ## yes or no
model.add(Dense(units = number_classes, activation = 'sigmoid'))
model.summary()
#compile the model
model.compile(optimizer = 'sgd' ,
loss = 'categorical_crossentropy',
metrics =['accuracy'])
# +
#train the model
model.fit(X_train_scaled, y_train_categorical, epochs=100,shuffle = True,verbose =2)
# -
model.save("ccneuralnetwork.h5")
#quantify the model
model_loss, model_accuracy = model.evaluate(X_test_scaled,y_test_categorical,verbose =2)
print( model_loss )
print (model_accuracy)
# F1, Precision Recall, and Confusion Matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import recall_score
from sklearn.metrics import classification_report
y_prediction = model.predict_classes(X_test)
y_prediction.reshape(-1,1)
print("Recall score:"+ str(recall_score(y_test, y_prediction)))
print(classification_report(y_test, y_prediction,
target_names=["default", "non_default"]))
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
# +
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="red" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_prediction)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Defualt', 'Non_default'],
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Defualt', 'Non_default'], normalize=True,
title='Normalized confusion matrix')
plt.show()
# + jupyter={"outputs_hidden": true}
| Model/3-NeuralNetwork6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SSIM
# +
import numpy as np
import matplotlib.pyplot as plt
from skimage import img_as_float#data,
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import mean_squared_error
from skimage.color import rgb2gray
import skimage.io
import os
# -
PATH = '/home/ning_a/Desktop/CAPTCHA/dataset_ablation/len_8/'
img = skimage.io.imread(fname=PATH+"clean/00foN9IM_95.png")
img = rgb2gray(img)
img_noise = skimage.io.imread(fname=PATH+"org/00foN9IM_95.png")
img_noise = rgb2gray(img_noise)
img_const = skimage.io.imread(fname=PATH+"GAN/00foN9IM_95.png")
img_const = rgb2gray(img_const)
def get_similarity(path, file_name):
PATH = path
img = skimage.io.imread(fname=PATH+"clean/"+file_name)
img = rgb2gray(img)
img_noise = skimage.io.imread(fname=PATH+"org/"+file_name)
img_noise = rgb2gray(img_noise)
img_const = skimage.io.imread(fname=PATH+"GAN/"+file_name)
img_const = rgb2gray(img_const)
# img = img_as_float(data.camera())
rows, cols = img.shape
noise = np.ones_like(img) * 0.2 * (img.max() - img.min())
noise[np.random.random(size=noise.shape) > 0.5] *= -1
mse_none = mean_squared_error(img, img)
ssim_none = ssim(img, img, data_range=img.max() - img.min())
mse_noise = mean_squared_error(img, img_noise)
ssim_noise = ssim(img, img_noise,
data_range=img_noise.max() - img_noise.min())
mse_const = mean_squared_error(img, img_const)
ssim_const = ssim(img, img_const,
data_range=img_const.max() - img_const.min())
return ssim_none, ssim_noise, ssim_const
get_similarity(PATH, '00foN9IM_95.png')
PATH = '/home/ning_a/Desktop/CAPTCHA/dataset_ablation/len_8/'
file_list = os.listdir(PATH+'org/')
# Read a list
total_org = 0
total_gan = 0
for file_name in os.listdir(PATH+'org/'):
_, temp_org, temp_gan = get_similarity(PATH, file_name)
total_org += temp_org
total_gan += temp_gan
print("total_org:",total_org/500)
print("total_gan:",total_gan/500)
| Example_and_Record/ablation_experiment/Img_Similarity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mooglol/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/module3-make-explanatory-visualizations/LS_DS_123_Make_Explanatory_Visualizations_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="NMEswXWh9mqw"
# # ASSIGNMENT
#
# ### 1) Replicate the lesson code. I recommend that you [do not copy-paste](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit).
#
# Get caught up to where we got our example in class and then try and take things further. How close to "pixel perfect" can you make the lecture graph?
#
# Once you have something that you're proud of, share your graph in the cohort channel and move on to the second exercise.
#
# ### 2) Reproduce another example from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/).
#
# **WARNING**: There are a lot of very custom graphs and tables at the above link. I **highly** recommend not trying to reproduce any that look like a table of values or something really different from the graph types that we are already familiar with. Search through the posts until you find a graph type that you are more or less familiar with: histogram, bar chart, stacked bar chart, line chart, [seaborn relplot](https://seaborn.pydata.org/generated/seaborn.relplot.html), etc. Recreating some of the graphics that 538 uses would be a lot easier in Adobe photoshop/illustrator than with matplotlib.
#
# - If you put in some time to find a graph that looks "easy" to replicate you'll probably find that it's not as easy as you thought.
#
# - If you start with a graph that looks hard to replicate you'll probably run up against a brick wall and be disappointed with your afternoon.
#
#
#
#
#
#
#
#
#
#
#
# + id="7SY1ZHawyZvz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 355} outputId="436772fa-d6a3-489c-9435-6b843841bd94"
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator, FuncFormatter
from IPython.display import display, Image
url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png'
example = Image(url=url, width=400)
display(example)
# + id="OC9CsePzGRyn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 271} outputId="968c49a5-0670-4cfc-e407-a4605616c376"
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33],
index=range(1,11))
fake.plot.bar(color='C1', width=0.9);
# + id="yyNWxQ35IL0T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 271} outputId="235ed1d3-79fc-4d35-afc1-711b8319ebb4"
fake2 = pd.Series(
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2,
3, 3, 3,
4, 4,
5, 5, 5,
6, 6, 6, 6,
7, 7, 7, 7, 7,
8, 8, 8, 8,
9, 9, 9, 9,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10])
ax = fake2.value_counts().sort_index().plot.bar(color='C1', width=0.9);
ax.grid(True)
# + id="iQgCP_yuIqXI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 334} outputId="bb0dd228-c53a-4fa7-f764-af4bfccb8960"
# From lesson
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
fig = plt.figure()
fig.patch.set(facecolor='white')
ax = fake.plot.bar(color="#ED713A", width=0.9)
ax.set(facecolor='white')
ax.text(x=-1.8, y=44, s="'An Inconvenient Sequel: Truth To Power' is divisive",
fontweight='bold', fontsize=12);
ax.text(x=-1.8, y=41.5, s="IMDb ratings for the film as of Aug. 29",
fontsize=11)
ax.set_ylabel("Percent of total votes", fontsize=9, fontweight='bold',
labelpad=10)
ax.set_xlabel("Rating", fontsize=9, fontweight='bold', labelpad=10)
ax.set_xticklabels(range(1,11), rotation=0)
ax.set_yticks(range(0,50,10))
ax.set_yticklabels(range(0, 50, 10))
plt.show()
# + id="eOxCaiQEUZ4I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 355} outputId="4e0288de-eeb5-4bdf-8e43-45af4af3808c"
display(example)
# + id="bnhOSTIkVh6k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 321} outputId="3e7f74da-0a81-40f9-82cc-021d2e85c99c"
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(5.47,3.75))
fig.patch.set(facecolor='white')
plt.rcParams["font.family"] = "Atlas Grotesk"
ax = fake.plot.bar(color="#ED713A", width=0.9)
ax.set(facecolor='white')
ax.set_ylim(bottom=-1.5)
ax.set_ylim(top=40.5)
ax.text(x=-1.8, y=46, s="'An Inconvenient Sequel: Truth To Power' is divisive",
fontweight='bold', fontsize=12);
ax.text(x=-1.8, y=43.5, s="IMDb ratings for the film as of Aug. 29",
fontsize=11)
ax.set_ylabel("Percent of total votes", fontsize=9.7, fontweight='bold',
labelpad=10, fontname="Atlas Grotesk")
ax.set_xlabel("Rating", fontsize=9.7, fontweight='bold', labelpad=10)
ax.set_xticklabels(range(1,11), rotation=0)
yt1 = [0,10,20,30,40]
yt2 = ['0 ', '10 ', '20 ', '30 ', '40%']
ax.set_yticks(yt1)
ax.set_yticklabels(yt2)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.tick_params(labelsize=9.42, grid_alpha=0.75, labelcolor = '#bfbfbf')
plt.show()
# + id="eqyRJO8mYziZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 355} outputId="3f143b56-353c-4f7e-ce84-bf45ecb6d6f6"
display(example)
# + id="1bjV7sl1kMr2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="83db8e96-923c-4fa8-c9e8-a40fbd666067"
url2 = 'https://fivethirtyeight.com/wp-content/uploads/2015/04/barry-jester-datalab-boomersdruguse-actual.png?w=575'
example2 = Image(url=url2, width=400)
display(example2)
# + id="v2zAqT8LmLyn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 326} outputId="30f543d7-b2ef-474c-e3ae-0ca58b8ec9f4"
df = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/drug-use-by-age/drug-use-by-age.csv')
df.round(2)
df.tail(5)
# + id="jOfsFrLooilY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 151} outputId="a38c6479-3296-4369-9e41-cfe80c0f241a"
boomer_drug = df.loc[[15]]
boomer_drug
# + id="LsL9uodSq-6B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 151} outputId="053588a9-717e-4898-a805-bd19064e4a3c"
boomer_drug.drop([col for col in df.columns if 'frequency' in col],axis=1,inplace=True)
boomer_drug
# + id="p3eruQ2GtA1I" colab_type="code" colab={}
boomer_drug.drop('alcohol-use', axis=1, inplace=True)
# + id="Gs_0HPONtGiP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 131} outputId="e4f36463-9391-484b-b32e-fb624d5bbfe4"
boomer_drug
# + id="Sh-Hx3LguGca" colab_type="code" colab={}
boomer_drug = boomer_drug.T
# + id="g8ECqaW3xtCi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 483} outputId="fe3f9f1a-e8ee-4e2c-c74e-078d99012631"
boomer_drug
# + id="6n11vGK0x5Bk" colab_type="code" colab={}
boomer_drug = boomer_drug.drop(['age', 'n'])
# + id="hXA55l5fyCKg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 421} outputId="5831572b-ec5b-4348-aa17-ae0d753739cf"
boomer_drug
# + id="_Lxx5h-mympE" colab_type="code" colab={}
boomer_drug.rename(index = {'marijuana-use': 'Marijuana',
'cocaine-use': 'Cocaine',
'crack-use': 'Crack',
'heroin-use': 'Heroin',
'hallucinogen-use': 'Hallucinogen',
'inhalant-use': 'Inhalant',
'pain-releiver-use': 'Pain reliever',
'oxycontin-use': 'OxyContin',
'tranquilizer-use': 'Tranquilizer',
'stimulant-use': 'Stimulant',
'meth-use': 'Meth',
'sedative-use': 'Sedative'},
inplace = True)
# + id="cU-ua6OX0jzV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 421} outputId="249fed0b-715a-4e29-a247-6df34acd6287"
boomer_drug
# + id="JZm0yvqY076j" colab_type="code" colab={}
boomer_drug.columns = boomer_drug.columns.astype(str)
# + id="f484RmMb1_Hn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 421} outputId="55b51adc-78f1-461b-fffe-37f3db94b490"
boomer_drug
# + id="yqJbmXGK2QpJ" colab_type="code" colab={}
boomer_drug = boomer_drug.sort_values(by=['15'])
# + id="tyqUrKkH2WNt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 421} outputId="ccedaf34-5558-4042-93f9-bc4524de22bb"
boomer_drug
# + id="jYAwKlo6oztA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 318} outputId="02cecdd3-699d-4cc6-cbdc-6a227237e59d"
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(5.47,3.75))
fig.patch.set(facecolor='white')
ax = boomer_drug.plot.barh(color='r', width=0.9)
ax.set(facecolor='white')
ax.text(x=-2, y=12.2, s="Percentage of Americans aged 50-64 who said in a 2012 survey \n that they had used the following drugs in the past year",
fontsize=14);
y = [0.05, 0.15, 0.15, 0.23 ,0.25, 0.28, 0.36, 0.36, 0.87, 1.43, 2.52, 7.29]
for i, v in enumerate(y):
ax.text(v + 0.15, i + -0.18, str(v), color='black', size='12')
ax.grid(False)
ax.axes.get_xaxis().set_visible(False)
ax.get_legend().remove()
plt.show();
# + id="MaVVHz9TW1F7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="415e2540-e717-4903-d173-8b38384671ed"
display(example2)
# + id="N9_r-0nXW7DU" colab_type="code" colab={}
# Order of values are different because the data 538 used for their plot was with 2 decimal points,
# but the data they uploaded on github only uses 1 decimal place.
# I am fairly certain I put in the code for facecolor correctly, but the background color will not change from gray.
# Unsure about how to add a % next to 7.29 without breaking the code, gives errors when I try to.
# + [markdown] id="0wSrBzmJyWaV" colab_type="text"
# # STRETCH OPTIONS
#
# ### 1) Reproduce one of the following using the matplotlib or seaborn libraries:
#
# - [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/)
# - [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/)
# - or another example of your choice!
#
# ### 2) Make more charts!
#
# Choose a chart you want to make, from [Visual Vocabulary - Vega Edition](http://ft.com/vocabulary).
#
# Find the chart in an example gallery of a Python data visualization library:
# - [Seaborn](http://seaborn.pydata.org/examples/index.html)
# - [Altair](https://altair-viz.github.io/gallery/index.html)
# - [Matplotlib](https://matplotlib.org/gallery.html)
# - [Pandas](https://pandas.pydata.org/pandas-docs/stable/visualization.html)
#
# Reproduce the chart. [Optionally, try the "<NAME>."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) If you want, experiment and make changes.
#
# Take notes. Consider sharing your work with your cohort!
# + id="dRJkKftiy5BJ" colab_type="code" colab={}
# More Work Here
| module3-make-explanatory-visualizations/LS_DS_123_Make_Explanatory_Visualizations_Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="g_nWetWWd_ns"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab_type="code" id="2pHVBk_seED1" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + cellView="form" colab_type="code" id="N_fMsQ-N8I7j" colab={}
#@title MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# + [markdown] colab_type="text" id="pZJ3uY9O17VN"
# # Save and restore models
# + [markdown] colab_type="text" id="M4Ata7_wMul1"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/beta/tutorials/keras/save_and_restore_models"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/save_and_restore_models.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/keras/save_and_restore_models.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/keras/save_and_restore_models.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="mBdde4YJeJKF"
# Model progress can be saved during—and after—training. This means a model can resume where it left off and avoid long training times. Saving also means you can share your model and others can recreate your work. When publishing research models and techniques, most machine learning practitioners share:
#
# * code to create the model, and
# * the trained weights, or parameters, for the model
#
# Sharing this data helps others understand how the model works and try it themselves with new data.
#
# Caution: Be careful with untrusted code—TensorFlow models are code. See [Using TensorFlow Securely](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for details.
#
# ### Options
#
# There are different ways to save TensorFlow models—depending on the API you're using. This guide uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow. For other approaches, see the TensorFlow [Save and Restore](https://www.tensorflow.org/guide/saved_model) guide or [Saving in eager](https://www.tensorflow.org/guide/eager#object-based_saving).
# + [markdown] colab_type="text" id="xCUREq7WXgvg"
# ## Setup
#
# ### Installs and imports
# + [markdown] colab_type="text" id="7l0MiTOrXtNv"
# Install and import TensorFlow and dependencies:
# + colab_type="code" id="RzIOVSdnMYyO" colab={}
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
# !pip install pyyaml h5py # Required to save models in HDF5 format
# + colab_type="code" id="7Nm7Tyb-gRt-" colab={}
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
from tensorflow import keras
print(tf.version.VERSION)
# + [markdown] colab_type="text" id="SbGsznErXWt6"
# ### Get an example dataset
#
# To demonstrate how to save and load weights, you'll use the [MNIST dataset](http://yann.lecun.com/exdb/mnist/). To speed up these runs, use the first 1000 examples:
# + colab_type="code" id="9rGfFwE9XVwz" colab={}
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0
test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0
# + [markdown] colab_type="text" id="anG3iVoXyZGI"
# ### Define a model
# + [markdown] colab_type="text" id="wynsOBfby0Pa"
# Start by building a simple sequential model:
# + colab_type="code" id="0HZbJIjxyX1S" colab={}
# Define a simple sequential model
def create_model():
model = tf.keras.models.Sequential([
keras.layers.Dense(512, activation='relu', input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
# Create a basic model instance
model = create_model()
# Display the model's architecture
model.summary()
# + [markdown] colab_type="text" id="soDE0W_KH8rG"
# ## Save checkpoints during training
# + [markdown] colab_type="text" id="mRyd5qQQIXZm"
# You can use a trained model without having to retrain it, or pick-up training where you left off—in case the training process was interrupted. The `tf.keras.callbacks.ModelCheckpoint` callback allows to continually save the model both *during* and at *the end* of training.
#
# ### Checkpoint callback usage
#
# Create a `tf.keras.callbacks.ModelCheckpoint` callback that saves weights only during training:
# + colab_type="code" id="IFPuhwntH8VH" colab={}
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,
verbose=1)
# Train the model with the new callback
model.fit(train_images,
train_labels,
epochs=10,
validation_data=(test_images,test_labels),
callbacks=[cp_callback]) # Pass callback to training
# This may generate warnings related to saving the state of the optimizer.
# These warnings (and similar warnings throughout this notebook)
# are in place to discourage outdated usage, and can be ignored.
# + [markdown] colab_type="text" id="rlM-sgyJO084"
# This creates a single collection of TensorFlow checkpoint files that are updated at the end of each epoch:
# + colab_type="code" id="gXG5FVKFOVQ3" colab={}
# !ls {checkpoint_dir}
# + [markdown] colab_type="text" id="wlRN_f56Pqa9"
# Create a new, untrained model. When restoring a model from weights-only, you must have a model with the same architecture as the original model. Since it's the same model architecture, you can share weights despite that it's a different *instance* of the model.
#
# Now rebuild a fresh, untrained model, and evaluate it on the test set. An untrained model will perform at chance levels (~10% accuracy):
# + colab_type="code" id="Fp5gbuiaPqCT" colab={}
# Create a basic model instance
model = create_model()
# Evaluate the model
loss, acc = model.evaluate(test_images, test_labels)
print("Untrained model, accuracy: {:5.2f}%".format(100*acc))
# + [markdown] colab_type="text" id="1DTKpZssRSo3"
# Then load the weights from the checkpoint and re-evaluate:
# + colab_type="code" id="2IZxbwiRRSD2" colab={}
# Loads the weights
model.load_weights(checkpoint_path)
# Re-evaluate the model
loss,acc = model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
# + [markdown] colab_type="text" id="bpAbKkAyVPV8"
# ### Checkpoint callback options
#
# The callback provides several options to provide unique names for checkpoints and adjust the checkpointing frequency.
#
# Train a new model, and save uniquely named checkpoints once every five epochs:
# + colab_type="code" id="mQF_dlgIVOvq" colab={}
# Include the epoch in the file name (uses `str.format`)
checkpoint_path = "training_2/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create a callback that saves the model's weights every 5 epochs
cp_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_path,
verbose=1,
save_weights_only=True,
period=5)
# Create a new model instance
model = create_model()
# Save the weights using the `checkpoint_path` format
model.save_weights(checkpoint_path.format(epoch=0))
# Train the model with the new callback
model.fit(train_images,
train_labels,
epochs=50,
callbacks=[cp_callback],
validation_data=(test_images,test_labels),
verbose=0)
# + [markdown] colab_type="text" id="1zFrKTjjavWI"
# Now, look at the resulting checkpoints and choose the latest one:
# + colab_type="code" id="p64q3-V4sXt0" colab={}
# ! ls {checkpoint_dir}
# + colab_type="code" id="1AN_fnuyR41H" colab={}
latest = tf.train.latest_checkpoint(checkpoint_dir)
latest
# + [markdown] colab_type="text" id="Zk2ciGbKg561"
# Note: the default tensorflow format only saves the 5 most recent checkpoints.
#
# To test, reset the model and load the latest checkpoint:
# + colab_type="code" id="3M04jyK-H3QK" colab={}
# Create a new model instance
model = create_model()
# Load the previously saved weights
model.load_weights(latest)
# Re-evaluate the model
loss, acc = model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
# + [markdown] colab_type="text" id="c2OxsJOTHxia"
# ## What are these files?
# + [markdown] colab_type="text" id="JtdYhvWnH2ib"
# The above code stores the weights to a collection of [checkpoint](https://www.tensorflow.org/guide/saved_model#save_and_restore_variables)-formatted files that contain only the trained weights in a binary format. Checkpoints contain:
# * One or more shards that contain your model's weights.
# * An index file that indicates which weights are stored in a which shard.
#
# If you are only training a model on a single machine, you'll have one shard with the suffix: `.data-00000-of-00001`
# + [markdown] colab_type="text" id="S_FA-ZvxuXQV"
# ## Manually save weights
#
# You saw how to load the weights into a model. Manually saving them is just as simple with the `Model.save_weights` method. By default, `tf.keras`—and `save_weights` in particular—uses the TensorFlow [checkpoints](../../guide/keras/checkpoints) format with a `.ckpt` extension (saving in [HDF5](https://js.tensorflow.org/tutorials/import-keras.html) with a `.h5` extension is covered in the [Save and serialize models](../../guide/keras/saving_and_serializing#weights-only_saving_in_savedmodel_format) guide):
# + colab_type="code" id="R7W5plyZ-u9X" colab={}
# Save the weights
model.save_weights('./checkpoints/my_checkpoint')
# Create a new model instance
model = create_model()
# Restore the weights
model.load_weights('./checkpoints/my_checkpoint')
# Evaluate the model
loss,acc = model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
# + [markdown] colab_type="text" id="kOGlxPRBEvV1"
# ## Save the entire model
#
# The model and optimizer can be saved to a file that contains both their state (weights and variables) and the model configuration. This allows you to export a model so it can be used without access to the original Python code. Since the optimizer-state is recovered, you can resume training from exactly where you left off.
#
# Saving a fully-functional model is very useful—you can load them in TensorFlow.js ([HDF5](https://js.tensorflow.org/tutorials/import-keras.html), [Saved Model](https://js.tensorflow.org/tutorials/import-saved-model.html)) and then train and run them in web browsers, or convert them to run on mobile devices using TensorFlow Lite ([HDF5](https://www.tensorflow.org/lite/convert/python_api#exporting_a_tfkeras_file_), [Saved Model](https://www.tensorflow.org/lite/convert/python_api#exporting_a_savedmodel_))
# + [markdown] colab_type="text" id="SkGwf-50zLNn"
# ### Save model as an HDF5 file
#
# Keras also provides a basic save format using the [HDF5](https://en.wikipedia.org/wiki/Hierarchical_Data_Format) standard. For our purposes, the saved model can be treated as a single binary blob:
# + colab_type="code" id="m2dkmJVCGUia" colab={}
# Create a new model instance
model = create_model()
# Train the model
model.fit(train_images, train_labels, epochs=5)
# Save the entire model to a HDF5 file
model.save('my_model.h5')
# + [markdown] colab_type="text" id="GWmttMOqS68S"
# Now, recreate the model from that file:
# + colab_type="code" id="5NDMO_7kS6Do" colab={}
# Recreate the exact same model, including its weights and the optimizer
new_model = keras.models.load_model('my_model.h5')
# Show the model architecture
new_model.summary()
# + [markdown] colab_type="text" id="JXQpbTicTBwt"
# Check its accuracy:
# + colab_type="code" id="jwEaj9DnTCVA" colab={}
loss, acc = new_model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
# + [markdown] colab_type="text" id="dGXqd4wWJl8O"
# This technique saves everything:
#
# * The weight values
# * The model's configuration(architecture)
# * The optimizer configuration
#
# Keras saves models by inspecting the architecture. Currently, it is not able to save TensorFlow optimizers (from `tf.train`). When using those you will need to re-compile the model after loading, and you will lose the state of the optimizer.
#
# + [markdown] colab_type="text" id="kPyhgcoVzqUB"
# ### As a `saved_model`
# + [markdown] colab_type="text" id="LtcN4VIb7JkK"
# Caution: This method of saving a `tf.keras` model is experimental and may change in future versions.
#
# Build a new model, then train it:
# + colab_type="code" id="sI1YvCDFzpl3" colab={}
model = create_model()
model.fit(train_images, train_labels, epochs=5)
# + [markdown] colab_type="text" id="iUvT_3qE8hV5"
# Create a `saved_model`, and place it in a time-stamped directory with `tf.keras.experimental.export_saved_model`:
# + colab_type="code" id="sq8fPglI1RWA" colab={}
import time
saved_model_path = "./saved_models/{}".format(int(time.time()))
tf.keras.experimental.export_saved_model(model, saved_model_path)
saved_model_path
# + [markdown] colab_type="text" id="MjpmyPfh8-1n"
# List your saved models:
# + colab_type="code" id="ZtOvxA7V0iTv" colab={}
# !ls saved_models/
# + [markdown] colab_type="text" id="B7qfpvpY9HCe"
# Reload a fresh Keras model from the saved model:
# + colab_type="code" id="0YofwHdN0pxa" colab={}
new_model = tf.keras.experimental.load_from_saved_model(saved_model_path)
# Check its architecture
new_model.summary()
# + [markdown] colab_type="text" id="uWwgNaz19TH2"
# Run a prediction with the restored model:
# + colab_type="code" id="Yh5Mu0yOgE5J" colab={}
model.predict(test_images).shape
# + colab_type="code" id="Pc9e6G6w1AWG" colab={}
# The model has to be compiled before evaluating.
# This step is not required if the saved model is only being deployed.
new_model.compile(optimizer=model.optimizer, # Keep the optimizer that was loaded
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Evaluate the restored model
loss, acc = new_model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
| site/en/r2/tutorials/keras/save_and_restore_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#source: https://www.kaggle.com/philculliton/nlp-getting-started-tutorial
import numpy as np #linear algebra
import pandas as pd #data processing, CSV file I/O
from sklearn import feature_extraction, linear_model, model_selection, preprocessing
train_df = pd.read_csv("data/train.csv")
test_df = pd.read_csv("data/test.csv")
train_df[train_df['target'] == 0]['text'].values[1] #example of what is not diaster
train_df[train_df['target'] == 1]["text"].values[1] #example of what is diaster
| experimental/NLP_Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Object Detection API TFRecord Generation
#
# This notebook generates TFRecords, that are needed to use custom datasets with the TensorFlow Object Detection API.
# These TFRecords can then be used to configure the training and the validation of the gesture detection model.
# The documentation can be found on the official [TensorFlow Object Detection API Respository](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/using_your_own_dataset.md#conversion-script-outline).
#
# The [TFRecord generation notebook by Dat Tran](https://github.com/datitran/raccoon_dataset/blob/master/generate_tfrecord.py) for his Raccoon Dataset proved to be a valuable resource.
# It provides an implementation of the TFRecord format that fits the goal of this notebook very well.
#
# Some lines of code like paths have to be adjusted for your case. All needed adjustments are marked with "Todo".
# +
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import pandas as pd
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple
# -
# ## Helper methods
def split(df, group):
""" Groups same image names together.
One image can contain multiple hands. With this method those hands are grouped together
and attached to one image object.
"""
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
# ## Main method for TFRecord creation
# + pycharm={"name": "#%%\n"}
def create_tf_example(group, path):
""" Create the TFRecord.
This method creates the TFRecord according to the input data. It specifies how the data looks like.
"""
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
# Todo: Change file format if needed. Alternative: b'png'
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
# Todo: Map class to label
# Should you need multiple labels a dedicated label mapping file can be used.
# See https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
classes_text.append('hand'.encode('utf8'))
classes.append(1)
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
# -
# ## TFRecord writer methods
# After executing the data preprocessing script all files can be found under your defined folder name.
# A folder will be generated with the following structure:
#
# <pre>
# ego_lared_tiny/
# ├── images/
# │ ├── aishwaryfist000000109.jpg
# │ ├── aishwaryfist000000611.jpg
# │ ├── aishwaryfist000000782.jpg
# │ ├── ...
# │ ├── aishwaryfist000001356.jpg
# │ └── ...
# ├── train/
# │ ├── aishwaryfist000000109.jpg
# │ ├── aishwaryfist000000782.jpg
# │ └── ...
# ├── val/
# │ ├── aishwaryfist000000611.jpg
# │ ├── aishwaryfist000001356.jpg
# │ └── ...
# ├── labels_all.csv
# ├── labels_train.csv
# └── labels_val.csv
# </pre>
#
# Please have a look at the following cells to see how these folders have to placed inside your detection_training/ folder.
# + pycharm={"name": "#%%\n"}
# Write training TFRecord
# Todo: Change file paths
writer = tf.python_io.TFRecordWriter("/home/jetbot/Documents/detection_training/train.record")
path = "/home/jetbot/Documents/detection_training/images_train/" # Path to training images
examples = pd.read_csv("/home/jetbot/Documents/detection_training/labels_train.csv") # Path to training labels
grouped = split(examples, 'frame')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
print("done")
# + pycharm={"name": "#%%\n"}
# Write validation TFRecord
# Todo: Change file paths
writer = tf.python_io.TFRecordWriter("/home/jetbot/Documents/detection_training/val.record")
path = "/home/jetbot/Documents/detection_training/images_val/" # Path to validation images
examples = pd.read_csv("/home/jetbot/Documents/detection_training/labels_val.csv") # Path to validation labels
grouped = split(examples, 'frame')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
print("done")
# -
# ## Output TFRecords to console for verification
#
# Outputting the records to the console allows manual verification of their correctness.
# + pycharm={"name": "#%%\n"}
# Write training record to console
# Todo: Change file path
i = 1
for example in tf.python_io.tf_record_iterator("/home/jetbot/Documents/detection_training/train.record"):
example = tf.train.Example.FromString(example)
print(example)
if i % 3 == 0:
break
i = i + 1
# + pycharm={"name": "#%%\n"}
# Write validation record to console
# Todo: Change file path
i = 1
for example in tf.python_io.tf_record_iterator("/home/jetbot/Documents/detection_training/val.record"):
example = tf.train.Example.FromString(example)
print(example)
if i % 3 == 0:
break
i = i + 1
| howto/2_detection/generate_tfrecords.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # String Formatting
# ## Anatomy of Strings
# - Single Quote
# - Double Quotes
# - Single Quote `VS` Double Quote (**Problems & Advantage**)
# - Triple Quotes(**also called docstring**)
# - New Line
# - Tab
str1 = 'Hello Bangladesh'
str2 = "<NAME>"
print(str2)
str3 = 'Rahim's Name
print(str3)
# ## Escape Sequence & Comments
str4 = "Badol's Name"
print(str4)
doc = """doctsring"""
print(doc)
"""This is a comment"""
print("I Love \nBangladesh")
# I Love <br>
# Bangladesh
print("I Love\tBangladesh")
# I Love Bangladesh
# ## Printing Message / String Formatting
# ### Printing any variable without message
#
# #### Syntax
# ```python
# print(name_of_variable)
# ```
num1 = 10
print(num1)
pi = 3.1416
print(pi)
p = 4.231236700912
print(round(p, 2))
num4 = 3 + 2j
print(num4)
name = "<NAME>"
print(name)
# ### Printing variable with message
# #### Syntax
# ```python
# print("message",variable)
# ```
pi = 3.1416
print("The value of pi is", pi)
g = 9.8
print("The value of g is", g)
a = 10
print(a, "is a integer value")
name = "<NAME>"
print("Hello", name)
# ### String Interpolation / f-Strings (Python 3.6+)
# #### Syntax
# ```python
# print(f"Message {variable_name}")
# ```
pi = 3.1416
print(f"The value of pi is {pi}")
name = "<NAME>"
print(f"My name is {name}")
name = "John"
age = 23
print(f"My name is {name} and I'm {age}")
a = 10
b = 20
total = a+b
print(f"The sum of {a} and {b} is {total}")
| book/python/01-string_formatting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="JSjG64ra4aFu" outputId="b24eeca0-6414-4d1f-de5d-ac7997770520" colab={"base_uri": "https://localhost:8080/", "height": 36}
from google.colab import drive
drive.mount('/content/drive')
# + id="V8-7SARDZErK"
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from matplotlib import pyplot as plt
import copy
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# + id="acRFqJNrZErV" outputId="213ff057-a494-4603-ca2f-c44eeb62a11f" colab={"base_uri": "https://localhost:8080/", "height": 105, "referenced_widgets": ["2dd73283065a47ceb7a108375459252e", "eefe8a13f7814fe9a0ba320f96d701ec", "c98d6bd086874285a3b6e8db7de60324", "f9e021cf5dc84fc0bcebac9a9487c5ec", "b6563e26ae0d424bbe6b41253db245d4", "e356209237d54a4283a82c1d81429761", "4f490069f74b4af1aab42f609f04cc48", "8974e531aa734e398ff68760647876cb"]}
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
# + id="gh5DXuAV1tp5"
# trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=True)
# testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# foreground_classes = {'plane', 'car', 'bird'}
# background_classes = {'cat', 'deer', 'dog', 'frog', 'horse','ship', 'truck'}
# fg1,fg2,fg3 = 0,1,2
# + id="EQOH55xR6INA"
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256,shuffle=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=256,shuffle=False)
# + id="SadRzWBBZEsP"
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0)
self.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0)
self.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv5 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0)
self.conv6 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv7 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.conv8 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.batch_norm1 = nn.BatchNorm2d(32, track_running_stats = False)
self.batch_norm2 = nn.BatchNorm2d(128, track_running_stats = False)
self.dropout1 = nn.Dropout2d(p=0.05)
self.dropout2 = nn.Dropout2d(p=0.1)
self.fc1 = nn.Linear(128,64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(self.batch_norm1(x))
x = (F.relu(self.conv2(x)))
x = self.pool(x)
x = self.conv3(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv4(x)))
x = self.pool(x)
x = self.dropout1(x)
x = self.conv5(x)
x = F.relu(self.batch_norm2(x))
x = (F.relu(self.conv6(x)))
x = self.pool(x)
x = self.conv7(x)
x = F.relu(self.batch_norm2(x))
x = self.conv8(x)
x = F.relu(self.batch_norm2(x))
x = x.view(x.size(0), -1)
x = self.dropout2(x)
# print(x.shape)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.dropout2(x)
x = self.fc3(x)
return x
# + id="1GvXR1zV5n4w"
cnn_net = CNN()#.double()
cnn_net = cnn_net.to("cuda")
# + id="cO37uxDTXDBV" outputId="16ee111a-5456-477c-de2c-d069ed4d7ad8" colab={"base_uri": "https://localhost:8080/", "height": 353}
print(cnn_net)
# + id="zfFdFpVOkBVl" outputId="d69f3403-9c95-42c4-cde4-06020917fa24" colab={"base_uri": "https://localhost:8080/", "height": 503}
for i,j in cnn_net.state_dict().items():
print(i)
# + id="aIeDTIPhknNt" outputId="fd5f0590-0148-4938-8df0-1ec9f1b0de90" colab={"base_uri": "https://localhost:8080/", "height": 73}
for i,j in cnn_net.state_dict().items():
if i == 'batch_norm1.weight':
print(i,j)
# + id="fek-B_uRlpLe" outputId="3b50daf8-2ae4-4962-f0b1-0c58aca3714e" colab={"base_uri": "https://localhost:8080/", "height": 36}
cnn_net.load_state_dict(torch.load("/content/drive/My Drive/Research/train_begining_layers_vs_last_layers/"+"cnn_net_8layer"+".pt"))
# + id="OqPjz2wXmOuo" outputId="b6831070-d63b-4606-8ff9-f5ea52f1166c" colab={"base_uri": "https://localhost:8080/", "height": 54}
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))
print(total,correct)
# + id="jsYFqNdvmOu0" outputId="83d747d7-47fb-48b8-aabe-564bda4ee92e" colab={"base_uri": "https://localhost:8080/", "height": 54}
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print(total,correct)
# + id="1DIuhJR1mOu8" outputId="15c474ad-46fa-4e3c-fc13-324c6c9aa755" colab={"base_uri": "https://localhost:8080/", "height": 204}
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
# + id="apTIj9b2mSOV"
cnn_net.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, padding=0).to("cuda")
cnn_net.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=0).to("cuda")
cnn_net.conv3 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=0).to("cuda")
cnn_net.conv4 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=0).to("cuda")
# + id="9VD5hmd2qPB4"
cnn_net.conv5.weight.requires_grad = False
cnn_net.conv5.bias.requires_grad = False
cnn_net.conv6.weight.requires_grad = False
cnn_net.conv6.bias.requires_grad = False
cnn_net.conv7.weight.requires_grad = False
cnn_net.conv7.bias.requires_grad = False
cnn_net.conv8.weight.requires_grad = False
cnn_net.conv8.bias.requires_grad = False
cnn_net.fc1.weight.requires_grad = False
cnn_net.fc1.bias.requires_grad = False
cnn_net.fc2.weight.requires_grad = False
cnn_net.fc2.bias.requires_grad = False
cnn_net.fc3.weight.requires_grad = False
cnn_net.fc3.bias.requires_grad = False
# + id="Xhi9rNWpqgrv" outputId="6bffca33-28a2-4b37-dc5f-a1b8ae13c313" colab={"base_uri": "https://localhost:8080/", "height": 503}
for param in cnn_net.parameters():
print(param.requires_grad)
# + id="YicAYgZAxHUI" outputId="cce7c906-06df-47be-e25c-863d3b11fe81" colab={"base_uri": "https://localhost:8080/", "height": 54}
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))
print(total,correct)
# + id="W8ACz9AlxHUX" outputId="7a5524ff-cf08-4e44-e1a6-35c0c7bbb8d4" colab={"base_uri": "https://localhost:8080/", "height": 54}
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print(total,correct)
# + id="P_0ckt3pxHUf" outputId="86b74353-84d8-4488-dced-e237390ff435" colab={"base_uri": "https://localhost:8080/", "height": 204}
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
# + id="n5g3geNJ5zEu"
import torch.optim as optim
criterion_cnn = nn.CrossEntropyLoss()
optimizer_cnn = optim.SGD(cnn_net.parameters(), lr=0.01, momentum=0.9)
# + id="tFfAJZkcZEsY" outputId="c4cc39bf-19a6-4357-eb74-0280b7f3516c" colab={"base_uri": "https://localhost:8080/", "height": 1000}
acti = []
loss_curi = []
epochs = 300
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_cnn.zero_grad()
# forward + backward + optimize
outputs = cnn_net(inputs)
loss = criterion_cnn(outputs, labels)
loss.backward()
optimizer_cnn.step()
# print statistics
running_loss += loss.item()
mini_batch = 50
if i % mini_batch == mini_batch-1: # print every 50 mini-batches
print('[%d, %5d] loss: %.3f' %(epoch + 1, i + 1, running_loss / mini_batch))
ep_lossi.append(running_loss/mini_batch) # loss per minibatch
running_loss = 0.0
if(np.mean(ep_lossi) <= 0.01):
break;
loss_curi.append(np.mean(ep_lossi)) #loss per epoch
print('Finished Training')
# + id="WIAJ3UZN8rPE"
torch.save(cnn_net.state_dict(),"/content/drive/My Drive/Research/train_begining_layers_vs_last_layers/weights"+"CIFAR10_last4layer_fixed_cnn8layer"+".pt")
# + id="an7qmNLB-Ilb" outputId="787ad0d3-c803-4f6a-f193-4875561f06fa" colab={"base_uri": "https://localhost:8080/", "height": 54}
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %d %%' % (total, 100 * correct / total))
print(total,correct)
# + id="3WqTm-aW-TGU" outputId="94499d35-71e2-4767-d8cb-e28099880fa4" colab={"base_uri": "https://localhost:8080/", "height": 54}
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= cnn_net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total))
print(total,correct)
# + id="oQ1yPyXRBNT4" outputId="fbfc8e74-7cbe-4775-efd5-607ce556ef1a" colab={"base_uri": "https://localhost:8080/", "height": 204}
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
outputs = cnn_net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
# + id="bzU_HuQnEB29"
| 8_train_begining_layers_vs_end_layers/CIFAR10/CIFAR10_last4layer_fixed_cnn8layer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# <br />
# <div style="text-align: center;">
# <span style="font-weight: bold; color:#6dc; font-family: 'Arial'; font-size: 2.5em;">MODIS Normalized Difference Vegetation Index</span>
# </div>
import numpy as np
import os
import rasterio
import urllib2
import shutil
from contextlib import closing
from netCDF4 import Dataset
import datetime
import tinys3
def dataDownload():
now = datetime.datetime.now()
year = now.year
month = now.month - 4
print now
print year
print month
remote_path = 'ftp://chg-ftpout.geog.ucsb.edu/pub/org/chg/products/CHIRPS-2.0/global_daily/tifs/p05/2017/'
last_file = 'MOD13A2_M_NDVI_'+str(year)+'-'+"%02d" % (month,)+'.TIFF'
local_path = os.getcwd()
print remote_path
print last_file
print local_path
with closing(urllib2.urlopen(remote_path+last_file)) as r:
with open(last_file, 'wb') as f:
shutil.copyfileobj(r, f)
print local_path+'/'+last_file
with rasterio.open(local_path+'/'+last_file) as src:
npixels = src.width * src.height
for i in src.indexes:
band = src.read(i)
print(i, band.min(), band.max(), band.sum()/npixels)
return last_file
def tiffile(dst,outFile):
CM_IN_FOOT = 30.48
with rasterio.open(outFile) as src:
kwargs = src.meta
kwargs.update(
driver='GTiff',
dtype=rasterio.float64, #rasterio.int16, rasterio.int32, rasterio.uint8,rasterio.uint16, rasterio.uint32, rasterio.float32, rasterio.float64
count=1,
compress='lzw',
nodata=0,
bigtiff='NO' # Output will be larger than 4GB
)
windows = src.block_windows(1)
with rasterio.open(outFile,'w',**kwargs) as dst:
for idx, window in windows:
src_data = src.read(1, window=window)
# Source nodata value is a very small negative number
# Converting in to zero for the output raster
np.putmask(src_data, src_data < 0, 0)
dst_data = (src_data * CM_IN_FOOT).astype(rasterio.float64)
dst.write_band(1, dst_data, window=window)
def s3Upload(outFile):
# Push to Amazon S3 instance
conn = tinys3.Connection(os.getenv('S3_ACCESS_KEY'),os.getenv('S3_SECRET_KEY'),tls=True)
f = open(outFile,'rb')
conn.upload(outFile,f,os.getenv('BUCKET'))
# +
# Execution
now = datetime.datetime.now()
year = now.year
month = now.month - 4
outFile ='MOD13A2_M_NDVI_'+str(year)+'-'+"%02d" % (month,)+'.TIFF'
print 'starting'
file = dataDownload()
print 'downloaded'
tiffile(file,outFile)
print 'converted'
#s3Upload(outFile)
print 'finish'
# -
# <span style="color:#6dc; font-family: 'Arial'; font-size: 2em;">
# **If I want to know how the data is like**</span>
# +
src = rasterio.open('./'+outFile)
print 'Source: ',src
print 'Source mode: ',src.mode
array = src.read(1)
print '.TIF Shape: ',array.shape
print 'Source type:',src.dtypes
print(src.crs)
print(src.transform)
from matplotlib import pyplot
pyplot.imshow(array, cmap='gist_earth')
pyplot.show()
# -
| ResourceWatch/data_management/3) MODIS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
# $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $
# $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $
# $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $
# $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $
# $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $
# <font style="font-size:28px;" align="left"><b>Vectors: One Dimensional Lists</b></font>
# <br>
# _prepared by <NAME>_
# <br><br>
# A <b>vector</b> is a list of numbers.
#
# Vectors are very useful to describe the state of a system, as we will see in the main tutorial.
#
# A list is a single object in python.
#
# Similarly, a vector is a single mathematical object.
#
# The number of elements in a list is its size or length.
#
# Similarly, the number of entries in a vector is called as the <b>size</b> or <b>dimension</b> of the vector.
# consider the following list with 4 elements
L = [1,-2,0,5]
print(L)
# Vectors can be in horizontal or vertical shape.
#
# We show this list as a <i><u>four dimensional</u></i> <b>row vector</b> (horizontal) or a <b>column vector</b> (vertical):
#
# $$
# u = \mypar{1~~-2~~0~~-5} ~~~\mbox{ or }~~~ v =\mymatrix{r}{1 \\ -2 \\ 0 \\ 5}, ~~~\mbox{ respectively.}
# $$
#
# Remark that we do not need to use any comma in vector representation.
# <h3> Multiplying a vector with a number</h3>
#
# A vector can be multiplied by a number.
#
# Multiplication of a vector with a number is also a vector: each entry is multiplied by this number.
#
# $$
# 3 \cdot v = 3 \cdot \mymatrix{r}{1 \\ -2 \\ 0 \\ 5} = \mymatrix{r}{3 \\ -6 \\ 0 \\ 15}
# ~~~~~~\mbox{ or }~~~~~~
# (-0.6) \cdot v = (-0.6) \cdot \mymatrix{r}{1 \\ -2 \\ 0 \\ 5} = \mymatrix{r}{-0.6 \\ 1.2 \\ 0 \\ -3}.
# $$
#
# We may consider this as enlarging or making smaller the entries of a vector.
#
# We verify our calculations in python.
# +
# 3 * v
v = [1,-2,0,5]
print("v is",v)
# we use the same list for the result
for i in range(len(v)):
v[i] = 3 * v[i]
print("3v is",v)
# -0.6 * u
# reinitialize the list v
v = [1,-2,0,5]
for i in range(len(v)):
v[i] = -0.6 * v[i]
print("0.6v is",v)
# -
# <h3> Summation of vectors</h3>
#
# Two vectors (with same dimension) can be summed up.
#
# The summation of two vectors is a vector: the numbers on the same entries are added up.
#
# $$
# u = \myrvector{-3 \\ -2 \\ 0 \\ -1 \\ 4} \mbox{ and } v = \myrvector{-1\\ -1 \\2 \\ -3 \\ 5}.
# ~~~~~~~ \mbox{Then, }~~
# u+v = \myrvector{-3 \\ -2 \\ 0 \\ -1 \\ 4} + \myrvector{-1\\ -1 \\2 \\ -3 \\ 5} =
# \myrvector{-3+(-1)\\ -2+(-1) \\0+2 \\ -1+(-3) \\ 4+5} = \myrvector{-4\\ -3 \\2 \\ -4 \\ 9}.
# $$
#
# We do the same calculations in Python.
# +
u = [-3,-2,0,-1,4]
v = [-1,-1,2,-3,5]
result=[]
for i in range(len(u)):
result.append(u[i]+v[i])
print("u+v is",result)
# print the result vector similarly to a column vector
print() # print an empty line
print("the elements of u+v are")
for j in range(len(result)):
print(result[j])
# -
# <h3> Task 1 </h3>
#
# Create two 7-dimensional vectors $u$ and $ v $ as two different lists in Python having entries randomly picked between $-10$ and $10$.
#
# Print their entries.
# +
from random import randrange
#
# your solution is here
#
#r=randrange(-10,11) # randomly pick a number from the list {-10,-9,...,-1,0,1,...,9,10}
# -
# <a href="Math20_Vectors_Solutions.ipynb#task1">click for our solution</a>
# <h3> Task 2 </h3>
#
# By using the same vectors, find the vector $ (3 u-2 v) $ and print its entries. Here $ 3u $ and $ 2v $ means $u$ and $v$ are multiplied by $3$ and $2$, respectively.
#
# your solution is here
#
# <a href="Math20_Vectors_Solutions.ipynb#task2">click for our solution</a>
# <h3> Visualization of vectors </h3>
#
# We can visualize the vectors with dimension at most 3.
#
# For simplicity, we give examples of 2-dimensional vectors.
#
# Consider the vector $ v = \myvector{1 \\ 2} $.
#
# A 2-dimensional vector can be represented on the two-dimensional plane by an arrow starting from the origin $ (0,0) $ to the point $ (1,2) $.
# %run math.py
visualize_vectors("example1")
# We represent the vectors $ 2v = \myvector{2 \\ 4} $ and $ -v = \myvector{-1 \\ -2} $ below.
# %run math.py
visualize_vectors("example2")
# As we can observe, after multiplying by 2, the vector is enlarged, and, after multiplying by $(-1)$, the vector is the same but its direction is opposite.
# <h3> The length of a vector </h3>
#
# The length of a vector is the (shortest) distance from the points represented by the entries of vector to the origin point $(0,0)$.
#
# The length of a vector can be calculated by using Pythagoras Theorem.
#
# We visualize a vector, its length, and the contributions of each entry to the length.
#
# Consider the vector $ u = \myrvector{-3 \\ 4} $.
# %run math.py
visualize_vectors("example3")
# The length of $ u $ is denoted as $ \norm{u} $, and it is calculated as $ \norm{u} =\sqrt{(-3)^2+4^2} = 5 $.
#
# Here each entry contributes with its square value. All contributions are summed up. Then, we obtain the square of the length.
#
# This formula is generalized to any dimension.
#
# We find the length of the following vector by using Python:
#
# $$
# v = \myrvector{-1 \\ -3 \\ 5 \\ 3 \\ 1 \\ 2}
# ~~~~~~~~~~
# \mbox{and}
# ~~~~~~~~~~
# \norm{v} = \sqrt{(-1)^2+(-3)^2+5^2+3^2+1^2+2^2} .
# $$
# <div style="font-style:italic;background-color:#fafafa;font-size:10pt;"> Remember: There is a short way of writing power operation in Python.
# <ul>
# <li> In its generic form: $ a^x $ can be denoted by $ a ** x $ in Python. </li>
# <li> The square of a number $a$: $ a^2 $ can be denoted by $ a ** 2 $ in Python. </li>
# <li> The square root of a number $ a $: $ \sqrt{a} = a^{\frac{1}{2}} = a^{0.5} $ can be denoted by $ a ** 0.5 $ in Python.</li>
# </ul>
# </div>
# +
v = [-1,-3,5,3,1,2]
length_square=0
for i in range(len(v)):
print(v[i],":square ->",v[i]**2) # print each entry and its square value
length_square = length_square + v[i]**2 # sum up the square of each entry
length = length_square ** 0.5 # take the square root of the summation of the squares of all entries
print("the summation is",length_square)
print("then the length is",length)
# for square root, we can also use built-in function math.sqrt
print() # print an empty line
from math import sqrt
print("the square root of",length_square,"is",sqrt(length_square))
# -
# <h3> Task 3 </h3>
#
# Let $ u = \myrvector{1 \\ -2 \\ -4 \\ 2} $ be a four dimensional vector.
#
# Verify that $ \norm{4 u} = 4 \cdot \norm{u} $ in Python.
#
# Remark that $ 4u $ is another vector obtained from $ u $ by multiplying it with 4.
#
# your solution is here
#
# <a href="Math20_Vectors_Solutions.ipynb#task3">click for our solution</a>
# <h3> Notes:</h3>
#
# When a vector is multiplied by a number, then its length is also multiplied with the same number.
#
# But, we should be careful with the sign.
#
# Consider the vector $ -3 v $. It has the same length of $ 3v $, but its direction is opposite.
#
# So, when calculating the length of $ -3 v $, we use absolute value of the number:
#
# $ \norm{-3 v} = |-3| \norm{v} = 3 \norm{v} $.
#
# Here $ |-3| $ is the absolute value of $ -3 $.
#
# The absolute value of a number is its distance to 0. So, $ |-3| = 3 $.
# <h3> Task 4 </h3>
#
# Let $ u = \myrvector{1 \\ -2 \\ -4 \\ 2} $ be a four dimensional vector.
#
# Randomly pick a number $r$ from $ \left\{ \dfrac{1}{10}, \dfrac{2}{10}, \cdots, \dfrac{9}{10} \right\} $.
#
# Find the vector $(-r)\cdot u$ and then its length.
#
# your solution is here
#
# <a href="Math20_Vectors_Solutions.ipynb#task4">click for our solution</a>
| math/Math20_Vectors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RachitBansal/AppliancePower_TimeSeries/blob/master/Ensemble.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="rU0SxZHFwyah" colab_type="code" outputId="242575b0-cc96-4664-fd1f-0cd4294732c0" colab={"base_uri": "https://localhost:8080/", "height": 136}
####code to upload the data and resample the data (day wise)
from sklearn.externals import joblib
from keras.layers import Dense ,LSTM
from keras.models import Sequential
from sklearn.metrics import mean_absolute_error,mean_squared_error
import pandas as pd
import numpy as np
from math import sqrt
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from matplotlib import pyplot
from keras.layers import Dropout
from keras.layers import LeakyReLU
from keras.layers import LSTM
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers import Flatten
from sklearn.metrics import r2_score as r2
import matplotlib.pyplot as plt
import tensorflow as tf
# import r2
# import mane
import keras.backend as K
import os
# + id="vwHoyoEboN9L" colab_type="code" outputId="6b599e40-00ec-419e-e1f1-ac8b501496f7" colab={"base_uri": "https://localhost:8080/", "height": 124}
from google.colab import drive
drive.mount('/content/drive',force_remount=True)
# + id="0d5_jy4MoKWz" colab_type="code" colab={}
file_path = './drive/My Drive/final_resampled_data_5min.pkl'
data = joblib.load(file_path)
# + id="JdJJrDjhw1En" colab_type="code" outputId="bd38429b-f048-4098-a03a-8bc1dd4536ef" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !ls
# + id="PJUZ2WAuwyat" colab_type="code" colab={}
import keras.backend as K
def r2_score(y_true, y_pred):
SS_res = K.sum(K.square(y_true-y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res/(SS_tot + K.epsilon()))
# + id="46uhLAGjwya1" colab_type="code" colab={}
# mane_loss = mane.mane
# + id="jh3Tf2VKwya8" colab_type="code" colab={}
## Data Loading and Preprocessing:
# + id="CQDj92qzwya-" colab_type="code" outputId="4f71dfb7-17f4-4a81-f088-bc20efaac15f" colab={"base_uri": "https://localhost:8080/", "height": 52}
print(data[:10,0])
# + id="0ctodNn1wybD" colab_type="code" colab={}
# ### importing the data files
# # x_year = joblib.load("hour_resampled_data.pkl")
# # x_year = x_year.values
# # y_year = joblib.load("y_year_values.pkl")
# ###slicing the tiestamp from x_year
# # timestamps = data[:,0]
# #preparing data file for rnn
# # rnn_data = x_year
# '''
# file = open(".dat")
# data = file.readlines()
# rnn_data=[]
# for i in data:
# temp = i.split(" ")
# rnn_data.append([int(x) for x in temp])
# '''
# ###changing the data file to dataframe
# rnn_data = pd.DataFrame(rnn_data)
# final_data = rnn_data.values
# # rnn_data = rnn_data.set_index([0])
# # rnn_data.index = pd.to_datetime(rnn_data.index,unit = "s")
# # #resampling the data hourwise
# # resampler_day = rnn_data.resample("D")
# # day_resampled_data = resampler_day.sum()
# #saving the resampled file in the pickle format
# #joblib.dump(day_resampled_data,"day_resampled_data.pkl")
# #changing the dataframe to array
# # final_day_array = np.asarray(day_resampled_data)
# #selecting the size of
# # print()
# training_fraction = float(input("Enter the fraction of data to be sent to training :"))
# length = len(rnn_data)
# train_size = int(training_fraction*length)
# errors_saver = {}
# + id="2GkHRR5MoxqX" colab_type="code" outputId="df0aa6b5-2e59-4890-8acd-fddfab8c75bf" colab={"base_uri": "https://localhost:8080/", "height": 34}
training_fraction = float(input("Enter the fraction of data to be sent to training :"))
length = len(data)
train_size = int(training_fraction*length)
# + id="SeGk-eropDc1" colab_type="code" colab={}
final_data = data/300
# + id="9owirsmJTUNE" colab_type="code" outputId="766c3743-9eff-4179-f24a-9d55c031af1a" colab={"base_uri": "https://localhost:8080/", "height": 34}
final_data.shape
# + id="WfbXbhRfwybJ" colab_type="code" colab={}
def return_data(final_data, i):
copy = final_data[:,i].reshape(-1,1)
sc = MinMaxScaler()
copy = sc.fit_transform(copy)
#now we are going to divide into training and test set
x_train = final_data[:train_size, i]
y_train = final_data[:train_size, i]
#x_train = x_train.reshape((-1,1,1))
train_x = []
train_y = []
print("enter n steps : ")
n_steps = int(input())
list_i = 0
print("Entering loop")
print(x_train.shape)
print(y_train.shape)
k=0
while k<x_train.shape[0]-n_steps-1:
train = x_train[k:k+n_steps]
train_x.append([float(x) for x in train])
train_y.append(y_train[k+n_steps])
k= k+1
train_x = np.asarray(train_x)
train_y = np.asarray(train_y)
train_x = train_x.reshape((-1,n_steps,1))
train_y = train_y.reshape((-1,1,1))
x_test = final_data[train_size:, i]
y_test = final_data[train_size:, i]
#x_test = x_test.reshape((-1,1,1))
test_x = []
test_y = []
k=0
while k < x_test.shape[0]-n_steps-1:
test= x_test[k:k+n_steps]
test_x.append([x for x in test])
test_y.append(y_test[k+n_steps])
k = k+1
test_x = np.asarray(test_x)
test_y = np.asarray(test_y)
test_x = test_x.reshape((-1,n_steps,1))
test_y = test_y.reshape((-1,1,1))
'''
regressor = Sequential()
regressor.add(LSTM(units = 10,activation = "relu",input_shape= (None,1)))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = "adam",loss = "mean_squared_error",metrics = ["accuracy"])
regressor.fit(train_x,train_y,epochs = 100 ,batch_size = 7,validation_split = 0.1)
'''
# print("Enter 1 if you want to process next equipment and 0 if not :")
# decision = int(input())
# if decision==1:
# pass
# else:
# break
return train_x, test_x, train_y, test_y
# + id="N_cr0a9KjX57" colab_type="code" outputId="8aab9091-7cb4-4aec-b712-0d5cdae90dfc" colab={"base_uri": "https://localhost:8080/", "height": 121}
equipment = int(input("Equipment: "))
train_x, test_x, train_y, test_y = return_data(final_data, equipment-1)
# + id="x3LEcdkykrwW" colab_type="code" outputId="bb506435-773e-4e95-ac23-7441096c27b5" colab={"base_uri": "https://localhost:8080/", "height": 52}
print(np.mean(train_x.reshape(-1)), np.mean(train_y.reshape(-1)))
print(np.mean(test_x.reshape(-1)), np.mean(test_y.reshape(-1)))
# + id="BWCAMPYdwybO" colab_type="code" outputId="85e12072-5353-460b-bb34-04386f588307" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(train_x.shape, train_y.shape, test_x.shape, test_y.shape)
# + id="l7xRbDfsAxAm" colab_type="code" colab={}
# np.save(file='./drive/My Drive/train_x.npy', arr=train_x)
# np.save(file='./drive/My Drive2/train_y.npy', arr=train_y)
# np.save(file='./drive/My Drive/test_x.npy', arr=test_x)
# np.save(file='./drive/My Drive/test_y.npy', arr=test_y)
# + [markdown] id="QXa7rGl4wybT" colab_type="text"
# ## Ensemble:
# + [markdown] id="evhtKS4dwybU" colab_type="text"
# ### Sub-Models:
# - Using 3 CNN_LSTMs
# + id="J3hySRFgwybW" colab_type="code" outputId="19c9b160-23f5-4877-8540-4c7cb146fc63" colab={"base_uri": "https://localhost:8080/", "height": 816}
def fit_cnn_lstm(x_train, y_train):
n_timesteps, n_features = x_train.shape[1], x_train.shape[2]
model = Sequential()
model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(n_timesteps,1)))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(RepeatVector(1))
model.add(LSTM(200, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(100, activation='relu')))
model.add(TimeDistributed(Dense(1)))
model.summary()
model.compile(loss='mse', optimizer='adam',metrics = ["accuracy"])
model.fit(x_train, y_train, epochs=10, batch_size=128, verbose = 1)
return model
# os.makedirs('ensemble')
# fit and save models
n_members = 3
for i in range(n_members):
model = fit_cnn_lstm(train_x, train_y)
filename = './drive/My Drive/model_' + str(i + 1) + '.h5'
model.save(filename)
print('>>> Saved %s' % filename)
# + id="Fb-kn9OWwybc" colab_type="code" outputId="bcda4239-a5a6-456f-ed6b-fa44d4cb6956" colab={"base_uri": "https://localhost:8080/", "height": 590}
from keras.models import load_model
def load_all_models(n_models):
all_models = list()
for i in range(n_models):
# define filename for this ensemble
filename = './drive/My Drive/model_' + str(i + 1) + '.h5'
# load model from file
model = load_model(filename)
# add to list of members
all_models.append(model)
print('>>> loaded %s' % filename)
return all_models
n_members = 3
members = load_all_models(n_members)
print('Loaded %d models' % len(members))
# + id="cMxJxeZ6wybh" colab_type="code" colab={}
#### Evaluating the 3 CNN_LSTMS Individually
# + id="N0J5Ij0Twybn" colab_type="code" outputId="b20989f1-f495-4dcf-a272-6d50420ec640" colab={"base_uri": "https://localhost:8080/", "height": 69}
for model in members:
# testy_enc = to_categorical(testy)
_, acc = model.evaluate(test_x, test_y, verbose=0)
print('Model Accuracy: %.3f' % acc)
# + id="2F48COSorqVg" colab_type="code" colab={}
preds = []
for j in range(n_members):
model_preds = []
for i in range(1000):
model_preds.append(members[j].predict(test_x[i].reshape(1,-1,1))[0][0][0])
preds.append(model_preds)
# + id="A7CVOG3crwon" colab_type="code" outputId="f3a5e8f4-e5f2-4536-8646-475b882a40a7" colab={"base_uri": "https://localhost:8080/", "height": 265}
plt.plot(test_y[:1000].reshape(-1), c = 'orange')
list_colrs = ['red', 'blue', 'green']
for j in range(n_members):
plt.plot(preds[j], c = list_colrs[j])
plt.show()
# + [markdown] id="DS8Dza4mwybq" colab_type="text"
# ### Creating Dataset to go into XGBoost
# - Outputs from the 3 sub models flow into XGBoost
# + id="yvlHse1vwybs" colab_type="code" colab={}
def stacked_dataset(members, inputX):
stackX = None
for model in members:
# make prediction
yhat = model.predict(inputX, verbose=0)
# stack predictions into [rows, members, probabilities]
if stackX is None:
stackX = yhat
else:
stackX = np.dstack((stackX, yhat))
# flatten [rows, members x probabilities]
stackX = stackX.reshape((stackX.shape[0], stackX.shape[1]*stackX.shape[2]))
return stackX
# + id="H9vulhTywybw" colab_type="code" colab={}
from xgboost import XGBClassifier
import xgboost
# + id="VfAGnlgSwybz" colab_type="code" colab={}
def fit_stacked_model(members, inputX, inputy, n_est=45, alpha = 10):
# create dataset using ensemble
stackedX = stacked_dataset(members, inputX)
# fit standalone model
model = xgboost.XGBRegressor(objective ='reg:linear', colsample_bytree = 0.3, learning_rate = 0.1, max_depth = 5, alpha = alpha, n_estimators = n_est)
model.fit(stackedX, inputy)
return model
# + id="dwOXaNTUwyb4" colab_type="code" outputId="37eccbab-f7a1-48ec-f115-6518ce34a00f" colab={"base_uri": "https://localhost:8080/", "height": 34}
model = fit_stacked_model(members, test_x, test_y)
# + [markdown] id="xab7e8jVwyb7" colab_type="text"
# ### Compiling Ensemble Model
# + id="KMVRTOGjwyb9" colab_type="code" colab={}
def stacked_prediction(members, model, inputX):
# create dataset using ensemble
stackedX = stacked_dataset(members, inputX)
# make a prediction
yhat = model.predict(stackedX)
return yhat
# + [markdown] id="C3nIDeYFwycC" colab_type="text"
# ### Evaluating
# + id="IzbHSGRewycD" colab_type="code" colab={}
# yhat = stacked_prediction(members, model, test_x)
# acc = r2_score(testy, yhat)
# print('--> Stacked Test Accuracy: %.3f' % acc)
# + id="X15ccjv-v5FC" colab_type="code" colab={}
# for i in range(1000):
# print(test_x[i].reshape(1,-1,1).shape)
# + id="mS2wOMaPtoZd" colab_type="code" outputId="5eb654e6-f678-49a0-f683-40f07edb53aa" colab={"base_uri": "https://localhost:8080/", "height": 1000}
n_e_list = [15,20,25,30,35] # number of estimators going into xgboost
for n_e in n_e_list:
model = fit_stacked_model(members, test_x, test_y, n_e)
preds = stacked_prediction(members, model, test_x[:1000])
plt.plot(preds)
plt.plot(test_y[:1000].reshape(-1))
plt.show()
# + id="wKhzi7M1yxiU" colab_type="code" outputId="3d329795-816c-4129-c224-1a9a390807ea" colab={"base_uri": "https://localhost:8080/", "height": 282}
model = fit_stacked_model(members, test_x, test_y, 40)
preds = stacked_prediction(members, model, test_x[:1000])
plt.plot(preds)
plt.plot(test_y[:1000].reshape(-1))
plt.show()
# + id="fwYCG-h8zNIQ" colab_type="code" outputId="a6590fe3-4b98-4cc7-f9c8-99585a2eb633" colab={"base_uri": "https://localhost:8080/", "height": 282}
model = fit_stacked_model(members, test_x, test_y, 50)
preds = stacked_prediction(members, model, test_x[:1000])
plt.plot(preds)
plt.plot(test_y[:1000].reshape(-1))
plt.show()
# + id="7sZgTQ-41BhK" colab_type="code" outputId="6c1a3097-ce99-478a-8f7d-0b0bc540e1b9" colab={"base_uri": "https://localhost:8080/", "height": 282}
model = fit_stacked_model(members, test_x, test_y, 60)
preds = stacked_prediction(members, model, test_x[:1000])
plt.plot(preds)
plt.plot(test_y[:1000].reshape(-1))
plt.show()
# + id="QHaanLpB1g94" colab_type="code" outputId="b467a27e-1472-4497-8f97-65e27f35427a" colab={"base_uri": "https://localhost:8080/", "height": 282}
model = fit_stacked_model(members, test_x, test_y, 60)
preds = stacked_prediction(members, model, test_x[:200])
plt.plot(preds)
plt.plot(test_y[:200].reshape(-1))
plt.show()
# + id="MkffNoBE1w-7" colab_type="code" outputId="53f2de4e-04e4-47e0-fa6b-caefc0677e22" colab={"base_uri": "https://localhost:8080/", "height": 265}
# model = fit_stacked_model(members, test_x, test_y, 60)
preds = stacked_prediction(members, model, test_x[:200])
plt.plot(preds)
plt.plot(test_y[:200].reshape(-1))
plt.show()
# + id="7EXz4NzJ3K7E" colab_type="code" outputId="d74d1052-19fb-46f4-9e6d-ca5c105d4e12" colab={"base_uri": "https://localhost:8080/", "height": 34}
model = fit_stacked_model(members, test_x, test_y, 100, 20)
# + id="Y2T9phmu15Uf" colab_type="code" outputId="6345d7ac-f963-46b0-f707-48a796b41e13" colab={"base_uri": "https://localhost:8080/", "height": 266}
preds = []
for j in range(n_members):
model_preds = []
for i in range(50):
model_preds.append(members[j].predict(test_x[i].reshape(1,-1,1))[0][0][0])
preds.append(model_preds)
e_preds = stacked_prediction(members, model, test_x[:50])
preds.append(e_preds)
plt.plot(test_y[:50].reshape(-1), c = 'orange')
list_colrs = ['red', 'blue', 'green', 'black']
labels = ['cnn-lstm 1', 'cnn-lstm 2', 'cnn-lstm 3', 'xgboost ensemble']
for j in range(n_members+1):
plt.plot(preds[j], c = list_colrs[j], label=labels[j])
plt.legend()
plt.show()
# + id="UQRCNwZwF47h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="68a94d89-d0b4-46e1-8624-bcb6df7e0c73"
arima_preds = np.load('./drive/My Drive/arima_preds_200.npy')
preds = []
lstm_preds = []
for i in range(200):
lstm_preds.append(members[0].predict(test_x[i].reshape(1,-1,1))[0][0][0])
e_preds = stacked_prediction(members, model, test_x[:200])
preds.append(lstm_preds)
preds.append(e_preds)
preds.append(arima_preds)
print(len(preds))
plt.plot(test_y[:200].reshape(-1), c = 'orange', label='actual')
list_colrs = ['red', 'black', 'blue']
labels = ['cnn-lstm', 'xgboost ensemble', 'ARIMA']
for j in range(3):
plt.plot(preds[j], c = list_colrs[j], label=labels[j])
plt.legend()
plt.show()
# + id="oHado0yIKPwv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="a82a9897-eeaf-4f87-f828-7596f22a64dd"
arima_preds = np.load('./drive/My Drive/arima_preds_200.npy')
preds = []
lstm_preds = []
for i in range(50):
lstm_preds.append(members[0].predict(test_x[i].reshape(1,-1,1))[0][0][0])
e_preds = stacked_prediction(members, model, test_x[:50])
preds.append(lstm_preds)
preds.append(e_preds)
preds.append(arima_preds[:50])
print(len(preds))
plt.plot(test_y[:50].reshape(-1), c = 'orange', label='actual')
list_colrs = ['red', 'black', 'blue']
labels = ['cnn-lstm', 'xgboost ensemble', 'ARIMA']
for j in range(3):
plt.plot(preds[j], c = list_colrs[j], label=labels[j])
plt.legend()
plt.show()
# + id="3dsixwtjIJBQ" colab_type="code" colab={}
arima_preds = np.load('./drive/My Drive/arima_preds_200.npy')
# + id="J4TJVurFHZSP" colab_type="code" colab={}
def make_comparison_graph(n_test, idx=0):
preds = []
lstm_preds = []
for i in range(10):
lstm_preds.append(members[0].predict(test_x[i].reshape(1,-1,1))[0][0][0])
e_preds = stacked_prediction(members, model, test_x[idx:idx+n_test])
preds.append(lstm_preds)
preds.append(e_preds)
preds.append(arima_preds[idx:idx+n_test])
print(len(preds))
plt.plot(test_y[idx:idx+n_test].reshape(-1), c = 'orange', label='actual')
list_colrs = ['red', 'black', 'blue']
labels = ['cnn-lstm', 'xgboost ensemble', 'ARIMA']
for j in range(3):
plt.plot(preds[j], c = list_colrs[j], label=labels[j])
plt.legend()
plt.show()
# + id="wo4-VxhrI9ku" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="69374ff9-9982-4636-c058-fca64e8a2333"
for i in range(0, 50, 10):
make_comparison_graph(10, i)
# + id="bdTtyf3nKphT" colab_type="code" colab={}
arima_preds = np.load('./drive/My Drive/arima_preds_1000.npy')
preds = []
lstm_preds = []
for i in range(1000):
lstm_preds.append(members[0].predict(test_x[i].reshape(1,-1,1))[0][0][0])
e_preds = stacked_prediction(members, model, test_x[:1000])
preds.append(lstm_preds)
preds.append(e_preds)
preds.append(arima_preds)
print(len(preds))
plt.plot(test_y[:1000].reshape(-1), c = 'orange', label='actual')
list_colrs = ['red', 'black', 'blue']
labels = ['cnn-lstm', 'xgboost ensemble', 'ARIMA']
for j in range(3):
plt.plot(preds[j], c = list_colrs[j], label=labels[j])
plt.legend()
plt.show()
# + id="TWpkpPsN6kxF" colab_type="code" colab={}
import pickle
pickle.dump(model, open("ensemble_weights.dat", "wb"))
# + id="rM6ahGKGwycI" colab_type="code" colab={}
# model = Sequential()
# model.add(Conv1D(filters=64, kernel_size=3, activation='relu', input_shape=(28,28)))
# model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
# model.add(MaxPooling1D(pool_size=2))
# model.add(Flatten())
# model.add(RepeatVector(1))
# model.add(LSTM(200, activation='relu', return_sequences=True))
# model.add(TimeDistributed(Dense(100, activation='relu')))
# model.add(TimeDistributed(Dense(1)))
# model.compile(loss= mane_loss, optimizer='adam',metrics = ["accuracy", r2])
# # fit network
# history = model.fit(x_train, y_train, epochs=30, batch_size=5, validation_split = 0.1)
# pyplot.plot(history.history['loss'])
# pyplot.plot(history.history['val_loss'])
# pyplot.title('model train vs validation loss')
# pyplot.ylabel('loss')
# pyplot.xlabel('epoch')
# pyplot.legend(['train', 'validation'], loc='upper right')
# pyplot.show()
# + id="z-K24VZ1wycM" colab_type="code" colab={}
y_pred = model.predict(test_x)
y_pred = y_pred.reshape((-1,1))
test_y = test_y.reshape((-1,1))
mse = mean_squared_error(test_y,y_pred)
rms = sqrt(mse)
mae = mean_absolute_error(test_y,y_pred)
print("mean_squared_error : ",mse)
print("root_mean_squared_error : ",rms)
print("mean_absolute_error : ",mae)
#errors_saver["errors_equipment{}".format(i+1)] = {"mean_squared_error":mse,"mean_absolute_error":mae,"root_mean_squared_error":rms}
y_pred = sc.inverse_transform(y_pred)
test_y = sc.inverse_transform(test_y)
l=0
while l<len(test_y)-7:
plt.scatter([1,2,3,4,5,6,7],y_pred[l:l+7,:],color = "cyan",label = "predicted_day")
#plt.savefig("equipment2_day(prediction){}".format(i//288))
#plt.legend()
#plt.show()
plt.scatter([1,2,3,4,5,6,7],test_y[l:l+7,:],color = "blue",label = "real_values")
plt.legend()
plt.xlabel("day")
plt.ylabel("power_consumption")
#plt.savefig("equipment9_day{}.png".format(i//24))
plt.show()
l = l+7
| Notebooks/Ensemble.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:lens-py]
# language: python
# name: conda-env-lens-py-py
# ---
import numpy as np
import xarray as xr
import glob
import matplotlib
import matplotlib.pyplot as plt
import cftime
import dask
import os
import statsmodels.api as sm
from scipy import stats
from sklearn import linear_model
# %matplotlib inline
thedir = '/glade/scratch/djk2120/clm5ppe/ens002/hist/'
thefiles = sorted(glob.glob(thedir+'*.h0.*nc'))
ds = xr.open_mfdataset(thefiles,combine='nested',concat_dim='ens')
nmonths = len(ds.time)
yr0 = ds['time.year'][0].values
ds['time'] =xr.cftime_range(str(yr0),periods=nmonths,freq='MS')
params =['taulnir', 'dleaf', 'tkd_sand', 'bsw_sf', 'n_melt_coef', 'medlynslope',
'jmaxb1', 'kmax', 'dbh', 'grperc', 'FUN_fracfixers',
'froot_leaf', 'leaf_long', 'tau_cwd', 'k_nitr_max_perday', 'cli_scale', 'vcmaxha']
thedir = '/glade/scratch/djk2120/ctsm51c6_PPEn08ctsm51d023_2deg_GSWP3V1_Sparse400_2000/run/'
thefile = glob.glob(thedir+'*.h0.*')[0]
base = xr.open_dataset(thefile)
plt.plot(la,'.')
base.GPP.plot()
def month_wts(nyears):
days_pm = [31,28,31,30,31,30,31,31,30,31,30,31]
return xr.DataArray(np.tile(days_pm,nyears),dims='time')
tmp = xr.open_dataset('sparsegrid_land_area.nc')
la = tmp.landarea
max(la/landarea)
plt.plot(ds.grid1d_lat.sel(ens=0),la,'.')
plt.ylabel('represented landarea (km2)')
plt.xlabel('centroid latitude');
gpp_avg = 1e-9*(la*month_wts(10)*24*60*60/10*ds.GPP).sum(dim=['time','gridcell']).compute()
et_avg = 1/la.sum()*(4e-7*24*60*60/10*la*month_wts(10)*ds.EFLX_LH_TOT).sum(dim=['time','gridcell']).compute()
wue = gpp_avg/(1e-9*la.sum()*et_avg)
ix1 = [0,*3+2*np.arange(15)]
x1 = [1,*3+np.arange(15)]
ix2 = [1,*2+2*np.arange(16)]
x2 = 1+np.arange(17)
plt.figure(figsize=[10,5])
plt.plot(x1,gpp_avg[ix1],'.')
plt.plot(x2,gpp_avg[ix2],'.')
plt.xlabel('parameter')
plt.ylabel('mean GPP (PgC/yr)')
plt.xticks(1+np.arange(17),params,rotation = 45)
plt.title('I2000 mini-ensemble: 2005-2014')
plt.legend(['low','high']);
plt.figure(figsize=[10,5])
plt.plot(x1,et_avg[ix1],'.')
plt.plot(x2,et_avg[ix2],'.')
plt.xlabel('parameter')
plt.ylabel('mean ET (mm/yr)')
plt.xticks(1+np.arange(17),params,rotation = 45)
plt.title('I2000 mini-ensemble: 2005-2014')
plt.legend(['low','high']);
plt.figure(figsize=[10,5])
plt.plot(x1,wue[ix1],'.')
plt.plot(x2,wue[ix2],'.')
plt.xlabel('parameter')
plt.ylabel('mean WUE (gC/kgH2O[ET])')
plt.xticks(1+np.arange(17),params,rotation = 45)
plt.title('I2000 mini-ensemble: 2005-2014')
plt.legend(['low','high']);
thedir = '/glade/scratch/djk2120/clm5ppe/ens002/histSTEP3/'
thefiles = sorted(glob.glob(thedir+'*.h0.*nc'))
step3 = xr.open_mfdataset(thefiles,combine='nested',concat_dim='ens')
thedir = '/glade/scratch/djk2120/clm5ppe/ens002/histSTEP4/'
thefiles = sorted(glob.glob(thedir+'*.h0.*nc'))
step4 = xr.open_mfdataset(thefiles,combine='nested',concat_dim='ens')
ix = step3['time.year']>1
tvc_avg_step3 = step3.TOTVEGC.isel(time=ix).mean(dim=['gridcell']).compute()
ix = step4['time.year']>1
tvc_avg_step4 = step4.TOTVEGC.isel(time=ix).mean(dim=['gridcell']).compute()
mwts = month_wts(10)/365
tvc_avg_prod = (mwts*ds.TOTVEGC).groupby('time.year').sum().mean(dim='gridcell')
ee=0
plt.plot(1+np.arange(80),tvc_avg_step3.sel(ens=ee),'.')
plt.plot(81+np.arange(40),tvc_avg_step4.sel(ens=ee),'.')
plt.plot(121+np.arange(10),tvc_avg_prod.sel(ens=ee),'.')
plt.legend(['step3','step4','prod'],loc=4)
plt.title('ensemble member '+str(ee+1))
plt.xlabel('years since AD mode')
plt.ylabel('avg TOTVEGC (gC/m2)');
ix = step3['time.year']>1
tvc_tot_step3 = 1e-9*(la*step3.TOTVEGC.isel(time=ix)).sum(dim=['gridcell']).compute()
ix = step4['time.year']>1
tvc_tot_step4 = 1e-9*(la*step4.TOTVEGC.isel(time=ix)).sum(dim=['gridcell']).compute()
mwts = month_wts(10)/365
tvc_prod = 1e-9*(la*mwts*ds.TOTVEGC).groupby('time.year').sum().sum(dim='gridcell').compute()
ee=0
plt.plot(1+np.arange(80),tvc_tot_step3.sel(ens=ee),'.')
plt.plot(81+np.arange(40),tvc_tot_step4.sel(ens=ee),'.')
plt.plot(121+np.arange(10),tvc_prod.sel(ens=ee),'.')
plt.legend(['step3','step4','prod'],loc=4)
plt.title('ensemble member '+str(ee+1))
plt.xlabel('years since AD mode')
plt.ylabel('avg TOTVEGC (PgC)');
ee = 0
dq = np.zeros(12);
for i in range(12):
if i<7:
ix1 = np.logical_and( step3['time.year']>1+10*i,step3['time.year']<=11+10*i)
ix2 = np.logical_and(step3['time.year']>11+10*i,step3['time.year']<=21+10*i)
x1 = step3.TOTECOSYSC.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = step3.TOTECOSYSC.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i==7:
ix1 = np.logical_and( step3['time.year']>1+10*i,step3['time.year']<=11+10*i)
ix2 = np.logical_and( step4['time.year']>1+10*(i-7),step4['time.year']<=11+10*(i-7))
x1 = step3.TOTECOSYSC.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = step4.TOTECOSYSC.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i>7:
ix1 = np.logical_and( step4['time.year']> 1+10*(i-8),step4['time.year']<=11+10*(i-8))
ix2 = np.logical_and( step4['time.year']>11+10*(i-8),step4['time.year']<=21+10*(i-8))
x1 = step4.TOTECOSYSC.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = step4.TOTECOSYSC.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i==11:
ix = step4['time.year']>31
x1 = step4.TOTECOSYSC.sel(ens=ee).isel(time=ix1).mean(dim='time')
mwts = month_wts(10)/365
x2 = (mwts*ds.TOTECOSYSC.sel(ens=ee)).groupby('time.year').sum().mean(dim='year')
dtec = x1-x2
dq[i] = (abs(dtec)>10).sum()
plt.plot(1+np.arange(12),dq/400,'-x')
plt.ylabel('TOTECOSYSC diseq (1gC/m2)')
plt.xlabel('decade')
plt.ylim([0,1])
plt.title('ensemble1: num gridcells');
ee = 0
dq = np.zeros(12);
for i in range(12):
if i<7:
ix1 = np.logical_and( step3['time.year']>1+10*i,step3['time.year']<=11+10*i)
ix2 = np.logical_and(step3['time.year']>11+10*i,step3['time.year']<=21+10*i)
x1 = step3.TOTECOSYSC.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = step3.TOTECOSYSC.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i==7:
ix1 = np.logical_and( step3['time.year']>1+10*i,step3['time.year']<=11+10*i)
ix2 = np.logical_and( step4['time.year']>1+10*(i-7),step4['time.year']<=11+10*(i-7))
x1 = step3.TOTECOSYSC.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = step4.TOTECOSYSC.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i>7:
ix1 = np.logical_and( step4['time.year']> 1+10*(i-8),step4['time.year']<=11+10*(i-8))
ix2 = np.logical_and( step4['time.year']>11+10*(i-8),step4['time.year']<=21+10*(i-8))
x1 = step4.TOTECOSYSC.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = step4.TOTECOSYSC.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i==11:
ix = step4['time.year']>31
x1 = step4.TOTECOSYSC.sel(ens=ee).isel(time=ix1).mean(dim='time')
mwts = month_wts(10)/365
x2 = (mwts*ds.TOTECOSYSC.sel(ens=ee)).groupby('time.year').sum().mean(dim='year')
dtec = x1-x2
ixdq = (abs(dtec)>10)
dq[i] = la[ixdq].sum()/la.sum()
plt.plot(1+np.arange(12),dq,'-x')
plt.ylabel('TOTECOSYSC diseq (1gC/m2)')
plt.xlabel('decade')
plt.ylim([0,1])
plt.title('ensemble1: represented area');
ee = 0
dq = np.zeros(12);
for i in range(12):
if i<7:
ix1 = np.logical_and( step3['time.year']>1+10*i,step3['time.year']<=11+10*i)
ix2 = np.logical_and(step3['time.year']>11+10*i,step3['time.year']<=21+10*i)
tec = step3.TOTECOSYSC+step3.TOTSOMC_1m-step3.TOTSOMC
x1 = tec.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = tec.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i==7:
ix1 = np.logical_and( step3['time.year']>1+10*i,step3['time.year']<=11+10*i)
ix2 = np.logical_and( step4['time.year']>1+10*(i-7),step4['time.year']<=11+10*(i-7))
tec = step3.TOTECOSYSC+step3.TOTSOMC_1m-step3.TOTSOMC
x1 = tec.sel(ens=ee).isel(time=ix1).mean(dim='time')
tec = step4.TOTECOSYSC+step4.TOTSOMC_1m-step4.TOTSOMC
x2 = tec.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i>7:
ix1 = np.logical_and( step4['time.year']> 1+10*(i-8),step4['time.year']<=11+10*(i-8))
ix2 = np.logical_and( step4['time.year']>11+10*(i-8),step4['time.year']<=21+10*(i-8))
tec = step4.TOTECOSYSC+step4.TOTSOMC_1m-step4.TOTSOMC
x1 = tec.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = tec.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i==11:
ix = step4['time.year']>31
tec = step4.TOTECOSYSC+step4.TOTSOMC_1m-step4.TOTSOMC
x1 = tec.sel(ens=ee).isel(time=ix1).mean(dim='time')
mwts = month_wts(10)/365
tec = ds.TOTECOSYSC+ds.TOTSOMC_1m-ds.SOMC
x2 = (mwts*tec.sel(ens=ee)).groupby('time.year').sum().mean(dim='year')
dtec = x1-x2
ixdq = (abs(dtec)>10)
dq[i] = la[ixdq].sum()/la.sum()
plt.plot(1+np.arange(12),dq,'-x')
plt.ylabel('TOTECOSYSC* diseq (1gC/m2)')
plt.xlabel('decade')
plt.ylim([0,1])
plt.title('ensemble1: represented area');
landarea = np.zeros([400])
ixy = step3['grid1d_ixy'].sel(ens=0).values
jxy = step3['grid1d_jxy'].sel(ens=0).values
k = -1
for i,j in zip(ixy,jxy):
i = int(i)-1
j = int(j)-1
k +=1
landarea[k] = step3['area'].sel(ens=0)[j,i]*step3['landfrac'].sel(ens=0)[j,i]
plt.plot(step3['grid1d_lat'].sel(ens=0).values,landarea,'.')
ee = 0
dq = np.zeros(12);
for i in range(12):
if i<7:
ix1 = np.logical_and( step3['time.year']>1+10*i,step3['time.year']<=11+10*i)
ix2 = np.logical_and(step3['time.year']>11+10*i,step3['time.year']<=21+10*i)
tec = step3.TOTECOSYSC+step3.TOTSOMC_1m-step3.TOTSOMC
x1 = tec.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = tec.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i==7:
ix1 = np.logical_and( step3['time.year']>1+10*i,step3['time.year']<=11+10*i)
ix2 = np.logical_and( step4['time.year']>1+10*(i-7),step4['time.year']<=11+10*(i-7))
tec = step3.TOTECOSYSC+step3.TOTSOMC_1m-step3.TOTSOMC
x1 = tec.sel(ens=ee).isel(time=ix1).mean(dim='time')
tec = step4.TOTECOSYSC+step4.TOTSOMC_1m-step4.TOTSOMC
x2 = tec.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i>7:
ix1 = np.logical_and( step4['time.year']> 1+10*(i-8),step4['time.year']<=11+10*(i-8))
ix2 = np.logical_and( step4['time.year']>11+10*(i-8),step4['time.year']<=21+10*(i-8))
tec = step4.TOTECOSYSC+step4.TOTSOMC_1m-step4.TOTSOMC
x1 = tec.sel(ens=ee).isel(time=ix1).mean(dim='time')
x2 = tec.sel(ens=ee).isel(time=ix2).mean(dim='time')
if i==11:
ix = step4['time.year']>31
tec = step4.TOTECOSYSC+step4.TOTSOMC_1m-step4.TOTSOMC
x1 = tec.sel(ens=ee).isel(time=ix1).mean(dim='time')
mwts = month_wts(10)/365
tec = ds.TOTECOSYSC+ds.TOTSOMC_1m-ds.TOTSOMC
x2 = (mwts*tec.sel(ens=ee)).groupby('time.year').sum().mean(dim='year')
dtec = x1-x2
ixdq = (abs(dtec)>10)
dq[i] = landarea[ixdq].sum()/landarea.sum()
plt.plot(dq,'-x')
ix1 = np.logical_and(step4['time.year']>21,step4['time.year']<=31)
ix2 = np.logical_and(step4['time.year']>31,step4['time.year']<=41)
tec1 = step4.TOTECOSYSC.sel(ens=0).isel(time=ix1).mean(dim='time')
tec2 = step4.TOTECOSYSC.sel(ens=0).isel(time=ix2).mean(dim='time')
dtec = (tec2-tec1)/10
ixg = abs(dtec)>1
diseq = landarea[ixg].sum()/landarea.sum()
plt.plot(dtec,'.')
plt.ylabel('delta TEC (gC/m2/yr)')
plt.xlabel('gridcell')
ix1 = step4['time.year']>31
tec1 = step4.TOTECOSYSC.sel(ens=0).isel(time=ix1).mean(dim='time')
mwts = month_wts(10)/365
tec2 = (mwts*ds.TOTECOSYSC).sel(ens=0).groupby('time.year').sum().mean(dim='year')
dtec = (tec2-tec1)/10
ixg = abs(dtec)>1
diseq = landarea[ixg].sum()/landarea.sum()
plt.plot(dtec,'.')
plt.ylabel('delta TEC (gC/m2/yr)')
plt.xlabel('gridcell');
| pyth/ens002.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data and cleaning
# +
import pandas as pd
data = pd.read_html('https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M',skiprows=1)
df = data[0]
df.columns=['PostalCode','Borough','Neighborhood']
df.head()
# -
# Remove rows where the borough value is "Not assigned".
df = df[df.Borough != 'Not assigned']
df.head(7)
# Find out the row that has "Not assigned" value in its neighborhood column and then assign the value in borough to neighborhood column.
# +
df[df['Neighborhood'] == 'Not assigned']
def replace(row):
if row['Neighborhood'] == 'Not assigned':
row['Neighborhood'] = row['Borough']
return row
df = df.apply(replace, axis = 1)
df.head(7)
# -
# For rows with same postal code (and borough), aggregate their neighborhood values together seperated with ","
df = df.groupby(['PostalCode','Borough']).apply(lambda group: ','.join(group['Neighborhood']))
df = df.to_frame()
df = df.rename(columns = {0: 'Neighborhood'}).reset_index()
df.head()
df.shape
# # Use location data
# !pip install geocoder
# +
# import geocoder
# # initialize your variable to None
# lat_lng_coords = None
# # loop until you get the coordinates
# while(lat_lng_coords is None):
# g = geocoder.google('{}, Toronto, Ontario'.format('M1B'))
# lat_lng_coords = g.latlng
# latitude = lat_lng_coords[0]
# longitude = lat_lng_coords[1]
# -
# The above code takes forever to run and cannot fetch the result via geocoder. Hence I have to read from the csv file available online.
df2 = pd.read_csv('http://cocl.us/Geospatial_data')
df2.head()
df3 = pd.merge(df, df2, left_on = 'PostalCode', right_on = 'Postal Code', how = 'left')
df3 = df3.drop(columns = ['Postal Code'])
df3.head()
# The analysis will be done only boroughs which has 'Toronto' in their names
df4 = df3[df3['Borough'].str.contains('Toronto')]
df4.head()
# +
import numpy as np
import json
import requests
from pandas.io.json import json_normalize
# Matplotlib and associated plotting modules
import matplotlib.cm as cm
import matplotlib.colors as colors
# import k-means from clustering stage
from sklearn.cluster import KMeans
# #!conda install -c conda-forge folium=0.5.0 --yes # uncomment this line if you haven't completed the Foursquare API lab
import folium
print('Libraries imported.')
# -
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
# +
CLIENT_ID = 'F4W32SODNFMMF41X5J1VSEEBIQKLBTWVC4F4ELCU2SGHXP4B' # your Foursquare ID
CLIENT_SECRET = '<KEY>' # your Foursquare Secret
VERSION = '20190126' # Foursquare API version
print('Your credentails:')
print('CLIENT_ID: ' + CLIENT_ID)
print('CLIENT_SECRET:' + CLIENT_SECRET)
LIMIT = 100
radius = 500
# -
# Query Foursquare to explore the venues around the places in dataframe.
def getNearbyVenues(names, latitudes, longitudes, radius=500):
venues_list=[]
for name, lat, lng in zip(names, latitudes, longitudes):
print(name)
# create the API request URL
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
radius,
LIMIT)
# make the GET request
results = requests.get(url).json()["response"]['groups'][0]['items']
# return only relevant information for each nearby venue
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighborhood',
'Neighborhood Latitude',
'Neighborhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Category']
return(nearby_venues)
toronto_venues = getNearbyVenues(names=df4['Neighborhood'],
latitudes=df4['Latitude'],
longitudes=df4['Longitude']
)
print(toronto_venues.shape)
toronto_venues.head()
toronto_venues.groupby('Neighborhood').count()
print('There are {} uniques categories.'.format(len(toronto_venues['Venue Category'].unique())))
# # One hot encoding
# +
# one hot encoding
toronto_onehot = pd.get_dummies(toronto_venues[['Venue Category']], prefix="", prefix_sep="")
# add neighborhood column back to dataframe
toronto_onehot['Neighborhood'] = toronto_venues['Neighborhood']
# move neighborhood column to the first column
fixed_columns = [toronto_onehot.columns[-1]] + list(toronto_onehot.columns[:-1])
toronto_onehot = toronto_onehot[fixed_columns]
toronto_onehot.head()
# -
toronto_grouped = toronto_onehot.groupby('Neighborhood').mean().reset_index()
toronto_grouped.head()
toronto_grouped.shape
def return_most_common_venues(row, num_top_venues):
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
# +
num_top_venues = 10
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
columns = ['Neighborhood']
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
# create a new dataframe
neighborhoods_venues_sorted = pd.DataFrame(columns=columns)
neighborhoods_venues_sorted['Neighborhood'] = toronto_grouped['Neighborhood']
for ind in np.arange(toronto_grouped.shape[0]):
neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues)
neighborhoods_venues_sorted.head()
# -
# # Clustering
# Use k-means clustering to cluster the neighborhoods based on the popular venues near the neighborhood.
# +
# set number of clusters
kclusters = 5
toronto_grouped_clustering = toronto_grouped.drop('Neighborhood', 1)
# run k-means clustering
kmeans = KMeans(n_clusters=kclusters, random_state=0).fit(toronto_grouped_clustering)
# check cluster labels generated for each row in the dataframe
kmeans.labels_[0:100]
# -
# add clustering labels
neighborhoods_venues_sorted.insert(0, 'Cluster Labels', kmeans.labels_)
# +
toronto_merged = df4
# merge toronto_grouped with toronto_data to add latitude/longitude for each neighborhood
toronto_merged = toronto_merged.join(neighborhoods_venues_sorted.set_index('Neighborhood'), on='Neighborhood')
toronto_merged.head() # check the last columns!
# -
# # Display clustering result on the map
# +
# create map
latitude = 43.657952
longitude = -79.387383
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
# set color scheme for the clusters
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, poi, cluster in zip(toronto_merged['Latitude'], toronto_merged['Longitude'], toronto_merged['Neighborhood'], toronto_merged['Cluster Labels']):
label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=0.7).add_to(map_clusters)
map_clusters
| Toronto-neighborhood-clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Лабораторная работа №4
# ## CCN для задачи классификации набора данных CIFAR-10
#
# В работе используется CIFAR-10, который включает 60000 цветных изображений размера 32х32 для 10 классов (6к изображений на класс). Объем обучающей выборки - 50000 изображений, а тестовой - 10000 изображений.
#
# Набор разделен на 6 батчей: 5 батчей для обучения и 1 для тестирования. Тестовый батч соистоит из 1000 изображений, отобранных из общей выборки случайным образом. Обучающие батчи содержат оставшиеся изображения, перемешанные в случайном порядке. В общем обучающая выборка содержит по 5000 изображений каждого класса, но каждый отдельный батч не является сбалансированным ( изображений одного класса может быть больше, чем изображений другого класса).
#
#
# ## Шаг 0: Получение данных
#
# **Скачайте данные с сайта https://www.cs.toronto.edu/~kriz/cifar.html (CIFAR-100 python version). Прочитайте описание данных.**
# Укажите путь к файлам
CIFAR_DIR = 'cifar-10-batches-py/'
# Скачанный архив содержит следующие файлы data_batch_1, data_batch_2, ..., data_batch_5, test_batch, которые являются python словарями, сериализованными с помощью модуля cPickle.
#
# ** Загрузите все данные. Используйте функцию, приведенную на сайте. **
# This function is from the download site, it is a custom function that goes hand-in-hand with the data
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
cifar_dict = pickle.load(fo, encoding='bytes')
return cifar_dict
dirs = ['batches.meta','data_batch_1','data_batch_2',
'data_batch_3','data_batch_4','data_batch_5','test_batch']
all_data = [0,1,2,3,4,5,6]
for i,direc in zip(all_data,dirs):
all_data[i] = unpickle(CIFAR_DIR+direc)
batch_meta = all_data[0]
data_batch1 = all_data[1]
data_batch2 = all_data[2]
data_batch3 = all_data[3]
data_batch4 = all_data[4]
data_batch5 = all_data[5]
test_batch = all_data[6]
batch_meta
data_batch1.keys()
# ### Выведите одно изображение с помощью matplotlib.
#
# ** Используйте plt.imshow(). Вам необходимо воспользоваться функциями reshape and transpose для получения двумерных RGB изображений. Объясните нижеприведенное решение.**
#
# #X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("uint8")
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
X = data_batch1[b"data"]
# The input data is an array with 10000 rows for each image and 3072 columns for pixel info
X.shape
# Need to reshape the 2nd dimension into a higher order tensor in order to deal with an image for plotting
X = X.reshape(10000, 3, 32, 32).transpose(0,2,3,1).astype("uint8")
# Normalize the pixel data for imshow
X[0].max()
(X[0]/255).max()
plt.imshow(X[0])
plt.imshow(X[1])
plt.imshow(X[4])
# # Функции для работы с данными
#
# **Объясните принцип работы и назначение приведенного ниже кода.**
def one_hot_encode(vec, vals=10):
'''
10- possible labels
'''
n = len(vec)
out = np.zeros((n, vals))
out[range(n), vec] = 1
return out
class CifarHelper():
def __init__(self):
self.i = 0
# Grabs a list of all the data batches for training
self.all_train_batches = [data_batch1,data_batch2,data_batch3,data_batch4,data_batch5]
# Grabs a list of all the test batches
self.test_batch = [test_batch]
# Intialize some empty variables
self.training_images = None
self.training_labels = None
self.test_images = None
self.test_labels = None
def set_up_images(self):
print("Setting Up Training Images and Labels")
# Vertically stacks the training images
self.training_images = np.vstack([d[b"data"] for d in self.all_train_batches])
train_len = len(self.training_images)
# Reshapes and normalizes training images
self.training_images = self.training_images.reshape(train_len,3,32,32).transpose(0,2,3,1)/255
# One hot Encodes the training labels ([0,0,0,1,0,0,0,0,0,0])
self.training_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.all_train_batches]), 10)
print("Setting Up Test Images and Labels")
# Vertically stacks the test images
self.test_images = np.vstack([d[b"data"] for d in self.test_batch])
test_len = len(self.test_images)
# Reshapes and normalizes test images
self.test_images = self.test_images.reshape(test_len,3,32,32).transpose(0,2,3,1)/255
# One hot Encodes the test labels (e.g. [0,0,0,1,0,0,0,0,0,0])
self.test_labels = one_hot_encode(np.hstack([d[b"labels"] for d in self.test_batch]), 10)
def next_batch(self, batch_size):
# the first dimension is the batch size of 100
x = self.training_images[self.i:self.i+batch_size].reshape(100,32,32,3)
y = self.training_labels[self.i:self.i+batch_size]
self.i = (self.i + batch_size) % len(self.training_images)
return x, y
# **Создайте экземпляр класса CifarHelper, вызовите метод set_up_images(). Объясните, для чего это нужно сделать.**
# +
# Before Your tf.Session run these two lines
ch = CifarHelper()
ch.set_up_images()
# During your session to grab the next batch use this line
# (Just like we did for mnist.train.next_batch)
# batch = ch.next_batch(100)
# -
# ## Создание модели
#
#
import tensorflow as tf
# **Создайте 2 плейсхолдера для x и y_true, а также плейсхолдер hold_prob, который будет содержать вероятности классов на выходе сети.**
#
# Place holders for x and y_true to be filled in later
x = tf.placeholder(tf.float32,shape=[None,32,32,3])
y_true = tf.placeholder(tf.float32,shape=[None,10])
#
# Think of this as a regularization parameter to prevent too much work on a few neurons
hold_prob = tf.placeholder(tf.float32)
# ### Вспомогательные функции
#
# ** Возьмите из примера следующие функции:**
#
# * init_weights
# * init_bias
# * conv2d
# * max_pool_2by2
# * convolutional_layer
# * full_layer
#
# Для чего они нужны?
# +
# Initialize weights as variables
def init_weights(shape):
init_random_dist = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(init_random_dist)
# Initialize biases as variables
def init_bias(shape):
init_bias_vals = tf.constant(0.1, shape=shape)
return tf.Variable(init_bias_vals)
# Create a 2d convolution layer that only does one step in W,H dimensions
# Remember strides [batches, H, W, channels]
# batches and channels are virtually always 1, you don't want to skip any observations or channels
# We can vary H,W if we like. 1,1 is relatively standard but 2,2 is used as well to streamline the feature gen.
# padding = 'SAME'
# https://stackoverflow.com/questions/37674306/what-is-the-difference-between-same-and-valid-padding-in-tf-nn-max-pool-of-t
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
#
def max_pool_2by2(x):
# ksize = 1,2,2,1 reduces the size of H, W by 1/2
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def convolutional_layer(input_x, shape):
W = init_weights(shape)
b = init_bias([shape[3]])
return tf.nn.relu(conv2d(input_x, W) + b)
def normal_full_layer(input_layer, size):
input_size = int(input_layer.get_shape()[1])
W = init_weights([input_size, size])
b = init_bias([size])
return tf.matmul(input_layer, W) + b
# -
# ### Создание слоев
#
# ** Создайте первый сверточный слой и следующий за ним слой подвыборки. Используйте размер ядра свертки, равный 4.**
#
# Will compute 32 features for each 4,4 patch
# 4,4 is patch size,
# next is input channels
# last number is features to compute, number of output channels,
convo_1 = convolutional_layer(x,shape=[4,4,3,32])
convo_1_pooling = max_pool_2by2(convo_1)
# ** Создайте следующие сверточный слой и слой подвыборки. **
convo_2 = convolutional_layer(convo_1_pooling,shape=[4,4,32,64])
convo_2_pooling = max_pool_2by2(convo_2)
# ** Создайте первый полносвязный слой, варьируйте количество нейронов на выходе.**
8*8*64
# Reshape it for the last DNN layer
convo_2_flat = tf.reshape(convo_2_pooling,[-1,8*8*64])
#
# Run a DNN activation on the last layer
full_layer_one = tf.nn.relu(normal_full_layer(convo_2_flat,1024))
# **Создайте dropout слой. **
# To prevent overfitting, knock off some of the neurons in case it is doing everything.
full_one_dropout = tf.nn.dropout(full_layer_one,keep_prob=hold_prob)
# ** Создайте последний полносвязный слой.**
# Define y_pred
y_pred = normal_full_layer(full_one_dropout,10)
# ### Loss Function
#
# ** Создайте функцию потерь cross_entropy **
# Put in a loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true,logits=y_pred))
# ### Optimizer
# ** Создайте Adam Optimizer. **
# Stochastic gradient descent with varying learning_rate
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
# ## Graph Session
#
# ** Выполните обучение и тестирование сети, во время обучения периодически выводите результат тестирования.**
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(2000):
batch = ch.next_batch(100)
sess.run(train, feed_dict={x: batch[0], y_true: batch[1], hold_prob: 0.5})
# PRINT OUT A MESSAGE EVERY 100 STEPS
if i%100 == 0:
print('Currently on step {}'.format(i))
print('Accuracy is:')
# Test the Train Model
matches = tf.equal(tf.argmax(y_pred,1),tf.argmax(y_true,1))
acc = tf.reduce_mean(tf.cast(matches,tf.float32))
print(sess.run(acc,feed_dict={x:ch.test_images,y_true:ch.test_labels,hold_prob:1.0}))
print('\n')
| Lab_3_CNN_CIFAR_10_ZIADE (2).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 第14讲 在数轴上标记不同的数
# + [markdown] heading_collapsed=true
# ### Assignments 作业
# -
# 1. **Mass** is the amount of "matter" in an object (though "matter" may be difficult to define), whereas **weight** is the force exerted on an object by gravity. In other words, an object with a mass of 1.0 kilogram weighs approximately 9.81 newtons on the surface of the Earth, which is its mass multiplied by the gravitational field strength. The object's weight is less on Mars, where gravity is weaker, and more on Saturn, and very small in space when far from any significant source of gravity, but it always has the same mass.
#
# **质量**是物体中“物质”的数量(尽管“物质”可能难以定义),而**重量**是地球吸引物体的力量,也就是重力作用在物体上的力,。 换句话说,一个质量为 1.0 千克的物体在地球表面上的重量约为 9.81 牛顿,即其质量乘以引力场强度。该物体的重量在火星上较小,那里的重力较弱,而在土星上则较大,并且在远离任何重要重力源的空间中非常小,但它的质量始终相同。
#
# Answer the following question step by step:
# 依次回答下面的问题:
# 1. Measure what the mass yourself is on a scales at home with the help of your Mom or Dad, observe the reading of the scale and the unit. 在爸爸妈妈的帮助下用家里的秤称一称自己,看看上面的读数是多少,单位是什么。
# 2. Then write a program to calculate how many forces you are attracted by Earth. 编程计算你受到的球的吸引力有多少牛顿。
#
# +
def my_weight():
mass = int(input("your mass please. please input a integer"))
weight = 9.81 * mass
print("your mass is {} and your weight is {} newton".format(mass, weight))
print("the equation is: {}kg (your mass)x 9.81(number of newtons in a kg).".format(mass))
# -
my_weight()
# + [markdown] hidden=true
# 2. Write a method to compare two numbers and tell which number is larger. Requirements:
# 创建一个方法用来判断给定的两个数哪一个大。要求:
# 1. the method name is `larger_num`, 方法名为`larger_num`;
# 2. the method accepts two parameters, the names of which is up to you. 方法接受两个整数型参数,参数名可自由设定
# 3. within the method, compare the values of the two parameters, output the larger one with the format of "XX is larger.". If two numbers are euqal output: "Two numbers are equal." 在方法内部,比较这两个数,打印输出较大的那个数,如果两个数相等则打印输出“两个数相等”。
#
# Run and test your methods 5 time with the following values of parameters. Observe if your method's output is the same as the output provided below. 创建好这个方法后,依次用下面的两组数来测试你创建的方法,观察输出的结果是否与预期的结果相同:
#
# | num1 | num2 | Output |
# | ----------- |:------------:| ------------------:|
# | 12 | 12 | Two numbers are equal. |
# | 18 | 14 | 18 is larger. |
# | 0 | 4 | 4 is larger. |
# | 1091 | 0 | 1091 is larger. |
# | 0 | 0 | Two numbers are equal. |
#
# -
def larger_num(num1, num2):
if num1 > num2:
print("{} is larger.".format(num1))
elif num1 == num2:
print("Two numbers are equal.")
elif num1 < num2:
print("{} is larger.".format(num2))
return
larger_num(12, 12)
larger_num(18, 14)
larger_num(0, 4)
larger_num(1091, 0)
larger_num(0, 0)
# + [markdown] hidden=true
# 3. Write method to calculate and print out the sum of two integers that you input from the keyboard. Requirements:
# 创建一个方法来计算两个整数的和并输出结果。要求:
# 1. 方法名为`get_sum`,该方法不接受任何参数。
# 2. within the method, write codes to receive two number strings from keyboard 你的代码能够从键盘接受两个数字字符串
# 3. within the method, convert these two strings to two Integer numbers, assign them to two variables. The names of the two Integer variables can be decided as you like 将这两个字符串转化为两个整数型数字并将其赋值给两个变量,变量名可以根据你自己的喜好来设定
# 4. within the method, calculate the sum of the two Integer variables, assign the result to a new Variable named `result` 计算这两个整型变量的和,并将结果赋值给一个叫`result`的变量
# 5. within the method, print out the type of the `result` variable 打印输出变量`result`的数据类型
# 6. within the method, display the result on screen with a readable sentence. 打印输出一个通俗易懂的句子来显示你计算得到的结果
#
# Run and test 5 times your method. Each time when inputs from keyboard are required, provide the following values for the two numbers. Observe whether the output of the method is the same as the sum for each test 运行5次你创建的方法,分别使用下表中的num1,num2的值作为键盘输入运行并测试你的代码共计5次,观察结果是否与对应的Sum一样
#
# **Your should run the method on all test data; that being said, you may not change the codes between each run.**
# **你应该用同样的代码来运行所有的测试数据,也就是说,你不应该在两次测试间隙修改你的代码**
#
#
# | num1 | num2 | Output |
# | ----------- |:------------:| ------------------:|
# | 12 | 12 | The sum is 24 |
# | 18 | 14 | The sum is 32 |
# | 0 | 4 | The sum is 4 |
# | 1091 | 0 | The sum is 1091 |
# | 0 | 0 | The sum is 0 |
#
# -
def get_sum():
num1 = int(input("num1 please"))
num2 = int(input("num2 please"))
result = num1 + num2
print(type(result))
print("The sum is {}".format(result))
# TODO: execute your methods here 5 times with values of parameters provided.
get_sum()
get_sum()
get_sum()
get_sum()
get_sum()
# 4. Write a method to calculate and print out the perimeter and area of a rectagle with the length and width provided by keyboard. 创建一个方法来计算一个长方形的周长和面积,确定这个长方形的长和宽从键盘输入得到。Requirements: 要求:
# 1. 方法名为`perimeter_and_area`, 该方法接受两个整数型参数,试图代表矩形的两个边长。
# 2. within the method, verify whether two parameters provided can form a rectangle or not. If not, print out "Not a rectangle"; otherwise, continue the following steps
# 3. within the method, calculate the perimiter and the area of this rectangle, assign the result to `perimeter` and `area` variable 计算这个长方形的周长和面积,并将结果分别赋值给名为`perimeter`和`area`的变量
# 4. within the method, print out the value of the `perimeter`和`area` variables 打印输出变量`perimeter`和`area`的结果
#
# Run and test 5 times your method with the following inputs (length, width) for each test. Observe whether the results are equal to the Perimeter and Area for each test 分别使用下表中的length和width的值作为键盘输入运行并测试你的代码共计5次,观察结果是否与对应的Perimiter和Area值一样
#
# **Your should run the method on all test data.**
# **你应该用同样的代码来运行所有的测试数据。**
#
#
#
# | length | width | output |
# | --------- |:----------:|:-------------------------------:|
# | 12 | 12 | perimeter is: 48, area is: 144 |
# | 18 | 14 | perimeter is: 48, area is: 144 |
# | 0 | 4 | Not a rectangle |
# | 1091 | 0 | Not a rectangle |
# | 0 | 0 | Not a rectangle |
def perimeter_and_area(lenght, wigth):
side1 = int(lenght)
side2 = int(wigth)
if side1 == 0 or side2 == 0:
print("Not a rectangle")
else:
perimeter = (side1 + side2) * 2
area = side1 * side2
print("perimeter is:{}, area is: {}".format(perimeter, area))
perimeter_and_area(12, 12)
perimeter_and_area(18, 14)
perimeter_and_area(0, 4)
perimeter_and_area(1091, 0)
perimeter_and_area(0, 0)
| source/2021/500Answer/content/014_locate_points_answer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## _*VQE; using its callback to monitor optimization progress*_
#
# This notebook demonstrates using Qiskit Aqua's VQE algorithm to plot graphs of the convergence path to ground state energy with different optimizers.
#
# This notebook uses the callback capability of VQE to capture information at each objective functional evaluation where it is computing the energy using the parameterized variational form. While the params themselves are also part of the callback we are only interested in the energy value here to plot the convergence.
#
# Note: other variational algorithms such as QAOA and QSVM have similar callbacks.
# +
import numpy as np
import pylab
from qiskit import BasicAer
from qiskit.aqua import Operator, QuantumInstance, aqua_globals
from qiskit.aqua.algorithms.adaptive import VQE
from qiskit.aqua.algorithms.classical import ExactEigensolver
from qiskit.aqua.components.initial_states import Zero
from qiskit.aqua.components.optimizers import COBYLA, L_BFGS_B, SLSQP
from qiskit.aqua.components.variational_forms import RY
# -
# First we create a qubit operator for VQE. Here we have taken a set of paulis that were originally computed by qiskit-chemistry for an H2 molecule.
# +
pauli_dict = {
'paulis': [{"coeff": {"imag": 0.0, "real": -1.052373245772859}, "label": "II"},
{"coeff": {"imag": 0.0, "real": 0.39793742484318045}, "label": "ZI"},
{"coeff": {"imag": 0.0, "real": -0.39793742484318045}, "label": "IZ"},
{"coeff": {"imag": 0.0, "real": -0.01128010425623538}, "label": "ZZ"},
{"coeff": {"imag": 0.0, "real": 0.18093119978423156}, "label": "XX"}
]
}
qubit_op = Operator.load_from_dict(pauli_dict)
# -
# Now we loop over the set of optimizers. The defaults for maxiters/evals for the respective optimizers is more than sufficient to converge the above H2 problem so we do not need to add any logic to set accordingly.
# +
optimizers = [COBYLA, L_BFGS_B, SLSQP]
converge_cnts = np.empty([len(optimizers)], dtype=object)
converge_vals = np.empty([len(optimizers)], dtype=object)
num_qubits = qubit_op.num_qubits
for i in range(len(optimizers)):
aqua_globals.random_seed = 250
optimizer = optimizers[i]()
print('\rOptimizer: {} '.format(type(optimizer).__name__), end='')
init_state = Zero(num_qubits)
var_form = RY(num_qubits, initial_state=init_state)
counts = []
values = []
def store_intermediate_result(eval_count, parameters, mean, std):
counts.append(eval_count)
values.append(mean)
algo = VQE(qubit_op, var_form, optimizer, 'matrix', callback=store_intermediate_result)
backend = BasicAer.get_backend('statevector_simulator')
quantum_instance = QuantumInstance(backend=backend)
algo_result = algo.run(quantum_instance)
converge_cnts[i] = np.asarray(counts)
converge_vals[i] = np.asarray(values)
print('\rOptimization complete ');
# -
# Now from the callback data we stored we can plot the energy value at each objective function call each optimzer makes. An optimizer using a finite difference method for computing gradient has that characteristic step like plot where for a number of evaluations it is computing the value for close by points to establish a gradient (the close by points having very similiar values whose difference cannot be seen on the scale of the graph here).
pylab.rcParams['figure.figsize'] = (12, 8)
for i in range(len(optimizers)):
pylab.plot(converge_cnts[i], converge_vals[i], label=optimizers[i].__name__)
pylab.xlabel('Eval count')
pylab.ylabel('Energy')
pylab.title('Energy convergence for various optimizers')
pylab.legend(loc='upper right')
# Finally since the above problem is still easily tractable classically we can use ExactEigensolver to compute a reference value for the solution. We can now plot the difference from the resultant exact solution as the energy converges with VQE towards the minimum value which should be that exact classical solution.
ee = ExactEigensolver(qubit_op)
result = ee.run()
ref = result['energy']
print('Reference value: {}'.format(ref))
pylab.rcParams['figure.figsize'] = (12, 8)
for i in range(len(optimizers)):
pylab.plot(converge_cnts[i], abs(ref - converge_vals[i]), label=optimizers[i].__name__)
pylab.xlabel('Eval count')
pylab.ylabel('Energy difference from solution reference value')
pylab.title('Energy convergence for various optimizers')
pylab.yscale('log')
pylab.legend(loc='upper right')
| aqua/vqe_convergence.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/WEBSTERMASTER777/siamese-triplet/blob/master/KNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="9Lm6xAKTjcUE" colab_type="code" outputId="d3d86514-8279-439a-8191-a7f39e1a2cd9" colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/gdrive')
# + id="zGvBuF6Sjhj_" colab_type="code" outputId="2ee86f86-401f-4eb8-ddd4-56fbf4ead56d" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %cd /content/gdrive/My\ Drive/Colab Notebooks
# !ls
# + id="c7Eos4pljoDE" colab_type="code" colab={}
# # !pip3 install triplettorch
# + id="u6SuGOWBYvGz" colab_type="code" colab={}
# + id="BTnuUTtajp5f" colab_type="code" colab={}
import numpy as np
import torch
import time
import os
from torch.utils.data import DataLoader
from torchvision.models import mobilenet_v2
from torchvision import transforms
from torch import nn
# from triplettorch import HardNegativeTripletMiner
# from triplettorch import AllTripletMiner
# from torch.utils.data import DataLoader
# from triplettorch import TripletDataset
from torchvision import transforms
from torchvision import datasets
import matplotlib.pyplot as plt
import torch.nn as nn
import numpy as np
import torch
# + id="ydQ_W1e7YyhX" colab_type="code" colab={}
import random
random.seed(0);
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
torch.backends.cudnn.deterministic=True
# + id="TTjpVwsGkn_C" colab_type="code" colab={}
# # !wget http://pdd.jinr.ru/archive_full.zip
# + id="OG2n34_HkrvG" colab_type="code" outputId="86b7b447-961b-45e6-b6ce-3857e3482660" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# !unzip archive_full.zip -d pdd
# + id="X3jNcyHCkuXL" colab_type="code" outputId="5c8ac53d-db50-4dd3-9446-7f5ef02e24d3" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !ls pdd
# + id="yRUB_o1Wky38" colab_type="code" colab={}
import numpy as np
import os
from torch.utils.data import Dataset
from torch.utils.data import Sampler
from torchvision.datasets import ImageFolder
class AllCropsDataset(Dataset):
def __init__(self, image_folder, subset='', transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
# data subset (train, test)
self.subset = subset
# store each crop data
self.datasets = []
self.crops = []
self.samples = []
self.imgs = []
self.classes = []
self.targets = []
self.class_to_idx = {}
# iterate over all folders
# with all crops
for i, d in enumerate(os.listdir(image_folder)):
self.crops.append(d)
# full path to the folder
d_path = os.path.join(image_folder, d, self.subset)
# attribute name to set attribute
attr_name = '%s_ds' % d.lower()
print("Load '%s' data" % attr_name)
# set the attribute with the specified name
setattr(self, attr_name, ImageFolder(d_path))
# add the dataset to datasets list
self.datasets.append(getattr(self, attr_name))
# get dataset attribute
ds = getattr(self, attr_name)
# add attr targets to the global targets
ds_targets = [x+len(self.classes) for x in ds.targets]
self.targets.extend(ds_targets)
# add particular classes to the global classes' list
ds_classes = []
for c in ds.classes:
new_class = '__'.join([d, c])
self.class_to_idx[new_class] = len(self.classes) + ds.class_to_idx[c]
ds_classes.append(new_class)
self.classes.extend(ds_classes)
# imgs attribute has form (file_path, target)
ds_imgs, _ = zip(*ds.imgs)
# images and samples are equal
self.imgs.extend(list(zip(ds_imgs, ds_targets)))
self.samples.extend(list(zip(ds_imgs, ds_targets)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
path, target = self.samples[idx]
img = self.datasets[0].loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
# + id="uZ7yk2gPk1el" colab_type="code" colab={}
DATA_PATH = 'pdd'
def prepare_datasets():
train_ds = AllCropsDataset(
DATA_PATH,
subset='train',
transform=transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
# transforms.Normalize([0.4352, 0.5103, 0.2836], [0.2193, 0.2073, 0.2047])]),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
target_transform=torch.tensor)
test_ds = AllCropsDataset(
DATA_PATH,
subset='test',
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
target_transform=torch.tensor)
# print statistics
print('Train size:', len(train_ds))
print('Test size:', len(test_ds))
print('Number of samples in the dataset:', len(train_ds))
print('Crops in the dataset:', train_ds.crops)
print('Total number of classes in the dataset:', len(train_ds.classes))
print('Classes with the corresponding targets:')
print(train_ds.class_to_idx)
return train_ds, test_ds
import numpy as np
import shutil
import os
from glob import glob
from tqdm import tqdm
# from tqdm.notebook import tqdm
TEST_SIZE = 0.2
RS = 42
def _remove_path_if_exists(path):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
def _makedir_and_copy2(path, dirname, fnames):
path_for_saving_files = os.path.join(path, dirname)
os.makedirs(path_for_saving_files)
for fname in fnames:
shutil.copy2(fname, path_for_saving_files)
def datadir_train_test_split(origin_path, test_size, random_state=0):
"""Splits the data in directory on train and test.
# Arguments
origin_path: path to the original directory
test_size: the size of test data fraction
# Returns
Tuple of paths: `(train_path, test_path)`.
"""
print("\n\nSplit `%s` directory" % origin_path)
print("Test size: %.2f" % test_size)
print("Random state: {}".format(random_state))
train_path = os.path.join(origin_path, 'train')
test_path = os.path.join(origin_path, 'test')
_remove_path_if_exists(train_path)
_remove_path_if_exists(test_path)
try:
subfolders = glob(os.path.join(origin_path, "*", ""))
# if train/test split is already done
if set(subfolders) == set(['train', 'test']):
return (train_path, test_path)
# if train/test split is required
# recreate train/test folders
os.makedirs(train_path)
os.makedirs(test_path)
for folder in tqdm(subfolders, total=len(subfolders), ncols=57):
# collect all images
img_fnames = []
for ext in ["*.jpg", "*.png", "*jpeg"]:
img_fnames.extend(
glob(os.path.join(folder, ext)))
# set random state parameter
rs = np.random.RandomState(random_state)
# shuffle array
rs.shuffle(img_fnames)
# split on train and test
n_test_files = int(len(img_fnames)*test_size)
test_img_fnames = img_fnames[:n_test_files]
train_img_fnames = img_fnames[n_test_files:]
# copy train files into `train_path/folder`
folder_name = os.path.basename(os.path.dirname(folder))
_makedir_and_copy2(train_path, folder_name, train_img_fnames)
# copy test files into `test_path/folder`
_makedir_and_copy2(test_path, folder_name, test_img_fnames)
for folder in subfolders:
shutil.rmtree(folder)
except:
_remove_path_if_exists(train_path)
_remove_path_if_exists(test_path)
raise
return (train_path, test_path)
def split_on_train_and_test():
for crop in os.listdir('pdd'):
crop_path = os.path.join('pdd', crop)
_ = datadir_train_test_split(crop_path,
test_size=0.2,
random_state=42)
# + id="QExEx-yPk4Vc" colab_type="code" outputId="60967336-335b-4c93-bb52-f9d2bfff3ee8" colab={"base_uri": "https://localhost:8080/", "height": 316}
split_on_train_and_test()
# + id="SpihyboSlIoT" colab_type="code" outputId="73f046f5-72f2-4390-b12d-fdf83a8f4789" colab={"base_uri": "https://localhost:8080/", "height": 253}
BATCH_SIZE = 16
train_ds, test_ds = prepare_datasets()
train_loader = torch.utils.data.DataLoader(train_ds, pin_memory=True, batch_size=BATCH_SIZE, shuffle=True, num_workers=BATCH_SIZE)
test_loader = torch.utils.data.DataLoader(test_ds, pin_memory=True, batch_size=BATCH_SIZE, shuffle=True, num_workers=BATCH_SIZE)
# + id="q26X28r5rzqx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="f3b9eb7f-c343-4035-a0c2-1d5fe637f6af"
type(train_ds)
# + id="qiqzEGFmlOmj" colab_type="code" outputId="67a552ae-465f-4a4e-c7ae-888281acdb9c" colab={"base_uri": "https://localhost:8080/", "height": 302}
plt.imshow(train_ds[12][0].permute(1,2,0))
# + id="CYNxwx0aMKJ7" colab_type="code" colab={}
# + [markdown] id="xZ96b80UMKy5" colab_type="text"
# # Обычная сеть 1024 фичи
# + id="F_UR6JzClQh6" colab_type="code" colab={}
def simple_conv_block(in_channels,
out_channels,
kernel_size,
stride,
padding,
pool_size,
pool_stride):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.ReLU(),
nn.BatchNorm2d(out_channels),
nn.MaxPool2d(pool_size, pool_stride))
import torch.nn.functional as F
class Model(nn.Module):
'''Feature extractor'''
def __init__(self, output_dim=1024):
super(Model, self).__init__()
self.output_dim = output_dim
self.cnn1 = simple_conv_block(3, 32, 10, 1, 1, 2, 2)
self.cnn2 = simple_conv_block(32, 64, 7, 1, 1, 2, 2)
self.cnn3 = simple_conv_block(64, 128, 5, 1, 1, 2, 2)
self.cnn4 = simple_conv_block(128, 256, 3, 1, 1, 2, 2)
self.cnn5 = simple_conv_block(256, 512, 3, 1, 1, 2, 2)
self.feature_proj = nn.Sequential(
nn.Flatten(),
nn.Linear(512*7*7, self.output_dim),
nn.ReLU()
)
self.mlp = nn.Sequential(
nn.Linear(self.output_dim, 512),
nn.ReLU(),
nn.Linear(512,256),
nn.ReLU()
)
self.fc = nn.Sequential(
# nn.Linear(self.output_dim, 15),
nn.Linear(256, 15),
nn.LogSoftmax()
)
def forward(self, x):
x = self.cnn1(x)
x = self.cnn2(x)
x = self.cnn3(x)
x = self.cnn4(x)
x = self.cnn5(x)
x = self.feature_proj(x)
x=self.mlp(x)
x = self.fc(x)
# print(x.shape)
# x = self.cnn1(x)
# x = self.cnn2(x)
# x = self.cnn3(x)
# x = self.cnn4(x)
# x = self.cnn5(x)
# print(x.shape)
# x = x.view(x.size()[0], -1)
# print(x.shape)
# x = F.relu(self.feature_proj(x))
# print(x.shape)
# x = F.log_softmax(self.fc(x), dim=1)
# x = x.view(x.size()[0], -1)
# # x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
# x = self.fc1(x)
# x = self.act3(x)
# x = self.fc2(x)
# x = self.act4(x)
# x = self.fc3(x)
# x=self.sm(x)
return x
# + [markdown] id="uDD8XQ2bMRXC" colab_type="text"
# # Обычная сеть 2048 фич
#
# + id="s0zulQG2AJj4" colab_type="code" colab={}
def simple_conv_block(in_channels,
out_channels,
kernel_size,
stride,
padding,
pool_size,
pool_stride):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.ReLU(),
nn.BatchNorm2d(out_channels),
nn.MaxPool2d(pool_size, pool_stride))
import torch.nn.functional as F
class Model(nn.Module):
'''Feature extractor'''
def __init__(self, output_dim=2048):
super(Model, self).__init__()
self.output_dim = output_dim
self.cnn1 = simple_conv_block(3, 32, 10, 1, 1, 2, 2)
self.cnn2 = simple_conv_block(32, 64, 7, 1, 1, 2, 2)
self.cnn3 = simple_conv_block(64, 128, 5, 1, 1, 2, 2)
self.cnn4 = simple_conv_block(128, 256, 3, 1, 1, 2, 2)
self.cnn5 = simple_conv_block(256, 512, 3, 1, 1, 2, 2)
self.cnn6 = simple_conv_block(512, 1024, 3, 1, 1, 2, 2)
self.cnn7 = simple_conv_block(1024, output_dim, 3, 1, 1, 2, 2)
# self.feature_proj = nn.Sequential(
# nn.Flatten(),
# nn.Linear(512*7*7, self.output_dim),
# nn.ReLU()
# )
# self.mlp = nn.Sequential(
# nn.Linear(self.output_dim, 512),
# nn.ReLU(),
# nn.Linear(512,256),
# nn.ReLU()
# )
self.fc = nn.Sequential(
# nn.Linear(1, 15),
# nn.Linear(256, 15),
# nn.Conv2d(self.output_dim, 15, 1, 1),
# nn.ReLU(),
# # nn.Linear(512*7*7, self.output_dim),
nn.Flatten(),
nn.Linear(self.output_dim, 15),
nn.LogSoftmax(dim=1)
)
def forward(self, x):
x = self.cnn1(x)
x = self.cnn2(x)
x = self.cnn3(x)
x = self.cnn4(x)
x = self.cnn5(x)
x = self.cnn6(x)
x = self.cnn7(x)
# x = self.feature_proj(x)
# x=self.mlp(x)
x = self.fc(x)
return x
# + [markdown] id="c9LHmfoTMc15" colab_type="text"
# # Перенос обучения
# + id="5ktwfpyYMFtj" colab_type="code" colab={}
# + [markdown] id="LMzSGWdOAaVn" colab_type="text"
#
# + id="WUklkqrDnZp2" colab_type="code" outputId="23f4e7e7-5c93-4b71-851b-ed372d76ddce" colab={"base_uri": "https://localhost:8080/", "height": 33}
try:
import torchbearer
except:
# !pip install -q torchbearer
import torchbearer
print(torchbearer.__version__)
# + id="RGrliY89RXYT" colab_type="code" colab={}
try:
import pycm
except:
# !pip install -q pycm
import pycm
# + id="H2p12Y9jn-0u" colab_type="code" colab={}
import torchbearer
from torchbearer.callbacks import imaging
inv_normalize = transforms.Normalize(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.255],
std=[1/0.229, 1/0.224, 1/0.255]
)
make_grid = imaging.MakeGrid(torchbearer.INPUT, num_images=64, nrow=8, transform=inv_normalize)
make_grid = make_grid.on_test().to_pyplot().to_file('sample.png')
# + id="OBw-8thElcmY" colab_type="code" outputId="14774bd5-d09b-4109-ff78-5487299c406b" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# model=Model()
# model.state_dict=Model().load_state_dict(torch.load('CNNmodelNLLloss.pt'))
# model = models.resnet50(pretrained=True)
# # Disable grad for all conv layers
# for param in model.parameters():
# param.requires_grad = False
from torchvision import datasets, models, transforms
model =models.mobilenet_v2(pretrained=True)
for param in model.parameters():
param.requires_grad = False
model.classifier[0] = nn.Linear(model.last_channel, 15)
model.classifier[1]=nn.LogSoftmax(dim=1)
from torchbearer.callbacks import EarlyStopping
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = model.to(device)
loss = torch.nn.NLLLoss()
# loss=torch.nn.BCELoss()
# optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)
import torchbearer
from torchbearer import Trial
from torchbearer.callbacks import Best
import sys
# if 'tensorboardX' in sys.modules:
# import tensorboardX
# from torchbearer.callbacks import TensorBoard
# callbacks = [TensorBoard(write_batch_metrics=True)]
# else:
# callbacks = []
checkpoint = Best('bestmodel.pt', monitor='val_acc', mode='max')
# callbacks.append(make_grid)
stopping = EarlyStopping(monitor='val_acc', patience=5, mode='max')
from torchbearer.callbacks import PyCM
cm = PyCM().on_val().to_pyplot( title='Confusion Matrix: {epoch}')
# print_normalized_matrix()
# to_pyplot(normalize=True,)
#
# Decay LR by a factor of 0.1 every 7 epochs
# scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
from torchsummary import summary
summary(model, input_size=(3, 224, 224))
# + id="pGYYWPqG3qDE" colab_type="code" outputId="0c5fd0cf-288f-4e3d-d7f4-33908887df1b" colab={"base_uri": "https://localhost:8080/", "height": 183}
help(mobilenet_v2)
# + id="x2kQroMW_A6z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="1bfad5f0-a10c-4fc7-81ad-74b154508808"
print(model.last_channel)
# + id="-Rww0SpayYG8" colab_type="code" colab={}
# + id="EVuAGXPllmgI" colab_type="code" outputId="9cbe4c65-1714-4194-a639-433f20220e90" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["99523dbf2a3244cb81afb4da40f6662a", "<KEY>", "<KEY>", "e5fc85f2634f4d4785e9719b90aeded0", "ede1f11fe016402c9130699220e53a04", "64eff83152444955ac9963d487fa0d4e", "<KEY>", "f7ecb82512114a02be55ffc2765062a0", "932a61b1777e44828d43c67bb28a28d2", "aa8a77aa509f481488e766a75f34fc25", "4d00f26ce3a54f7dac3c438d95532c35", "ad8183be01c04df59ce73213a39010df", "<KEY>", "<KEY>", "5a9d0df67fda401fb108eed078fa9b83", "893502d7ca8b4c1993a39b989418c8c4", "<KEY>", "<KEY>", "2ef4b1ed4c8a4bc783bef06ca47c054f", "e50a5cddc12643c48c2425a03bd61ab6", "d33376e17db243ce8dac3459e22dc7c5", "9004d20be62b4214b73169105efbfd8a", "cdb50597287541c1857c741c96ad0b3a", "38127d385df6442fb7e8637a4161f56b", "2d022cb1bad7418185698edac2938d69", "<KEY>", "702f1b4f59b847a18ce3b2e3373c4865", "<KEY>", "6e6fc507d8f84e6695784091d1d0b053", "<KEY>", "<KEY>", "2784c65032ae43b2a2e0c750e7e32794", "<KEY>", "<KEY>", "e4dac4b4af8040ffb59e441e92e6e934", "<KEY>", "<KEY>", "<KEY>", "1eafe12d81e2486c890463d4b42a5bb6", "<KEY>"]}
trial = Trial(model, optimizer, loss, metrics=['acc', 'loss'], callbacks=[checkpoint,cm]).to(device)
trial.with_train_generator(train_loader).with_val_generator(test_loader)
trial.to(device)
history = trial.run(epochs=70, verbose=2)
# + [markdown] id="RiEUc_qqk7CA" colab_type="text"
# # Тест
# + id="xdMI2t5bdUUt" colab_type="code" colab={}
model_test1 =models.mobilenet_v2(pretrained=True)
model_test1 = torch.nn.Sequential(*(list(model_test1.children())[:-1]))
# model_test1.classifier[1] = nn.Linear(model_test1.last_channel, 15)
for param in model_test1.parameters():
param.requires_grad = False
model_test1.to(device)
model_test1.eval()
test_x_numpy=[]
test_x1_numpy=[]
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model_test1(inputs).detach().cpu().numpy()
targets= targets.detach().cpu().numpy()
if (outputs.shape[0]==16):
test_x_numpy.append(outputs)
test_x_numpy=np.vstack(test_x_numpy)
print(test_x_numpy.shape)
# model_test1.fc = nn.Sequential(
# nn.Linear(1280, 15),
# nn.LogSoftmax(dim=1))
model_test1 =models.mobilenet_v2(pretrained=True)
# model_test1.classifier[0] = nn.Linear(model_test1.last_channel, 15)
# model_test1.classifier[1]=nn.LogSoftmax(dim=1)
model_test1.train()
trial = Trial(model_test1, optimizer, loss, metrics=['acc', 'loss'], callbacks=[checkpoint]).to(device)
trial.with_train_generator(train_loader).with_val_generator(test_loader)
trial.to(device)
history = trial.run(epochs=1, verbose=2)
model_test1 = torch.nn.Sequential(*(list(model_test1.children())[:-1]))
model_test1.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model_test1(inputs).detach().cpu().numpy()
targets= targets.detach().cpu().numpy()
if (outputs.shape[0]==16):
test_x1_numpy.append(outputs)
test_x1_numpy=np.vstack(test_x1_numpy)
print(test_x1_numpy.shape)
np.testing.assert_allclose(test_x_numpy,test_x1_numpy)
# + [markdown] id="svvvHGXgqu0n" colab_type="text"
# # Трансфер ленинг батч норм
# + id="88wpVQieqp_a" colab_type="code" colab={}
model_test1 =models.mobilenet_v2(pretrained=True)
model_test1.classifier[0] = nn.Linear(model_test1.last_channel, 15)
model_test1.classifier[1]=nn.LogSoftmax(dim=1)
model_test1.to(device)
# model_test1.train()
# trial = Trial(model_test1, optimizer, loss, metrics=['acc', 'loss'], callbacks=[checkpoint]).to(device)
# trial.with_train_generator(train_loader).with_val_generator(test_loader)
# trial.to(device)
# history = trial.run(epochs=1, verbose=2)
model_test1 = torch.nn.Sequential(*(list(model_test1.children())[:-1]))
model_test1.eval()
test_x_numpy=[]
train_x_numpy=[]
test_y_numpy=[]
train_y_numpy=[]
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model_test1(inputs).detach().cpu().numpy()
targets= targets.detach().cpu().numpy()
if (outputs.shape[0]==16):
train_x_numpy.append(outputs)
train_y_numpy.append(targets)
train_x_numpy=np.vstack(train_x_numpy)
train_y_numpy=np.hstack(train_y_numpy)
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model_test1(inputs).detach().cpu().numpy()
targets= targets.detach().cpu().numpy()
if (outputs.shape[0]==16):
test_x_numpy.append(outputs)
test_y_numpy.append(targets)
test_x_numpy=np.vstack(test_x_numpy)
test_y_numpy=np.hstack(test_y_numpy)
x_train=torch.FloatTensor(train_x_numpy)
x_test=torch.FloatTensor(test_x_numpy)
y_train=torch.FloatTensor(train_y_numpy)
y_test=torch.FloatTensor(test_y_numpy)
# classifier = nn.Sequential(OrderedDict([
# ('fc1', nn.Linear(25088, 4096)),
# ('relu', nn.ReLU()),
# ('fc2', nn.Linear(4096, 102)),
# ('output', nn.LogSoftmax(dim=1))
# ]))
# classifier = nn.Sequential(
# nn.Linear(1280, 15),
# nn.LogSoftmax(dim=1))
# trial.with_train_generator(train_loader).with_val_generator(test_loader)
# trial.to(device)
# history = trial.run(epochs=70, verbose=2)
# + id="7Qf--G__MhUZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 103} outputId="ccfc4af6-083d-4d86-a644-53b94df1b5b3"
y_train = torch.tensor(y_train, dtype=torch.long)
y_test = torch.tensor(y_test, dtype=torch.long)
# + id="E6E9LTtABhaf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 526, "referenced_widgets": ["ff9dc44f25d54f389d25545efa079556", "9a656d9559154ecf85da1c7d0f562833", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "e8e9fec414ec4f8b81706730129e0f76", "8e856b92a4ac4ee4a172ecfbe32feefa", "<KEY>", "<KEY>", "71d869b7eac24e31b34477b30a9a0b28", "b7d7bfba935e4cd5b9063814558bf93c", "<KEY>", "625ecd6e1a514b34a6f6140406d9de97", "<KEY>", "<KEY>", "deceb3a586a24ee0980ed15b1cc3fb09", "d6ac4eb3e92f46c5834ce5e8ea2f4221", "<KEY>", "c041db8a025b4a17943c0c65059e5380", "<KEY>", "c946a1e36a374d3d9c92d1c39af77d79", "<KEY>", "03041cc1d39845e2ab3aa9a0d12deab1", "41e62406982c456792e7d866c5212258", "<KEY>", "<KEY>", "ca1b54e1b1fa45f78f9661ce9355360c", "<KEY>", "<KEY>", "025a01a185a84caf8788a9ba6b6e9612", "b55e40b7383b483d9a9103069bec44a7", "f3e5ef8f3e254af3b3a97667cd870d55", "<KEY>", "<KEY>", "b84555ca09d64e43af4cb6dcee7b44be", "5c2392576c89436494009be0da0823dd", "<KEY>", "<KEY>", "e4a8f433fb60452a87077b4065df6e2b", "<KEY>", "4840773df1d048a98f0f0e835abf1214", "<KEY>", "a9b1e4d79d4a4ff78501b8bfe830781a"]} outputId="f5d81c8b-74f3-444b-96b8-7e57abf131d8"
trial = Trial(cla, optimizer, loss, metrics=['acc', 'loss'], callbacks=[checkpoint]).to(device)
trial.with_train_data(x_train, y_train).with_val_data(x_test,y_test)
trial.to(device)
history = trial.run(epochs=50, verbose=2)
# + id="RwkGyCtM3Wt4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 133} outputId="2df00955-9d08-47fd-cc0d-e0223098a903"
class cl(torch.nn.Module):
def __init__(self):
super(cl,self).__init__()
self.fc = nn.Sequential(
nn.Flatten(),
nn.Linear(8960*7, 15),
nn.LogSoftmax(dim=1)
)
def forward(self,x):
x = x.mean(3).mean(2)
x = self.fc(x)
return x
cla=cl()
cla.to(device)
# + id="_jnRK0D-QCk_" colab_type="code" colab={}
# + id="MstBm-kv5P06" colab_type="code" colab={}
torch.save(model,'CNNmodelNLLloss.pt')
torch.save(model.state_dict(),'CNNmodelNLLloss.pt')
# + id="n59j2jk64g8p" colab_type="code" colab={}
model=model.load_state_dict(torch.load('bestmodel.pt'))
# + id="_cG93zCRkTat" colab_type="code" colab={}
model.eval()
# + id="0fe-v50I6e3j" colab_type="code" colab={}
model = torch.nn.Sequential(*(list(model.children())[:-1]))
# + id="U6XcwDVg60f5" colab_type="code" colab={}
model
# from torchsummary import summary
# summary(model, input_size=(3, 256, 256))
# model(torch.rand(1, 3, 256, 256).to(device)).shape
# + id="GB5qealipGdI" colab_type="code" colab={}
print(history)
# + [markdown] id="2NlgNkFIuka3" colab_type="text"
# # Перевод в Numpy
# + id="TKqYJ1JOii6z" colab_type="code" colab={}
# for img in train_ds:
# print(img)
# ipt=torch.FloatTensor(img)
# # ipt.unsqueeze_(0)
from tqdm import tqdm
# from tqdm.notebook import tqdm
# i=0
from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier
count=0
scorsum=0
train_x_numpy=[]
train_y_numpy=[]
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs).detach().cpu().numpy()
targets= targets.detach().cpu().numpy()
if (outputs.shape[0]==16):
print(outputs.shape)
print(outputs.reshape(2048,16).shape)
print(targets.shape)
# knn=KNeighborsClassifier(n_neighbors=1)
# knn.fit(outputs,targets)
train_x_numpy.append(outputs.reshape(2048,16).transpose())
train_y_numpy.append(targets)
test_x_numpy=[]
test_y_numpy=[]
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs).detach().cpu().numpy()
targets= targets.detach().cpu().numpy()
# y_pred=knn.predict(outputs)
# scor=metrics.accuracy_score(targets,y_pred)
# scorsum=scorsum+scor
# count=count+1
if (outputs.shape[0]==16):
test_x_numpy.append(outputs.reshape(2048,16).transpose())
test_y_numpy.append(targets)
# print(scorsum/count)
# for b, batch in enumerate(train_loader):
# labels, data =
# data = torch.cat( [ datum for datum in data ], axis = 0 )
# labels = torch.cat( [ label for label in labels ], axis = 0 )
# embeddings = model( data.cuda( ) ).detach( ).cpu( ).numpy( )
# labels = labels.numpy( )
# test_embeddings.append( embeddings )
# test_labels.append( labels )
# while i < len(train_ds):
# ipt= torch.FloatTensor(train_ds[i][0]).to(device)
# ipt.unsqueeze_(0)
# probs = torch.exp(model.forward(ipt))
# probsTrainNP=probs.cpu().detach().numpy()
# TrainNP=np.append(TrainNP,probsTrainNP)
# # print(probsTrainNP)
# i=i+1
# + id="TkQoqEEggqfm" colab_type="code" colab={}
print(len(train_loader))
# type(like_x_list)
# outputs.shape
# outputs.reshape(1024,5).shape
print(len(train_ds))
outputs.shape[0]
# + id="ZqkATlIKtjhP" colab_type="code" colab={}
from sklearn.preprocessing import normalize
import sklearn.preprocessing
train_x_numpy=normalize(np.vstack(train_x_numpy),norm='l2')
train_y_numpy=np.hstack(train_y_numpy)
test_y_numpy=np.hstack(test_y_numpy)
test_x_numpy=normalize(np.vstack(test_x_numpy),norm='l2')
print(train_y_numpy.shape)
print(train_x_numpy.shape)
print(test_y_numpy.shape)
print(test_x_numpy.shape)
# X = normalize(numpy.vstack([X_0, X_1]), norm='l2')
# + id="OVKorVpVzZCD" colab_type="code" colab={}
# from numpy import array
# data = [[[[11, 22],
# [33, 44],
# [55, 66]]]]
# data=array(data)
# data.shape
# data.reshape(3,2).sh
print(type(train_y_numpy))
print(train_y_numpy.shape)
train_y_numpy
# + id="XJKzxuihg94t" colab_type="code" colab={}
# like_x_list = [train_x_numpy(BATCH_SIZE, 2048).astype('float32') for _ in range(len(train_loader))]
# + id="Rf6v0upWkcW_" colab_type="code" colab={}
# like_x_list = [np.random.rand(1, 1024).astype('float32') for _ in range(100)]
# + id="zBtP60SSruSn" colab_type="code" colab={}
print (train_x_numpy.shape)
# print (train_x_numpy.reshape(-1,1).shape)
xreshpe=train_x_numpy.reshape(-1,1)
print(xreshpe.shape)
# + id="tDaezXTOvWWa" colab_type="code" colab={}
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics.pairwise import cosine_similarity
# train_x_numpy=[np.random.rand(, 1024).astype('float32') for _ in range(100)]
# x_train_reshape=train_x_numpy.numpy().reshape(-1,1)
# x_train_reshape=train_x_numpy.
# y_train_reshape=train_y_numpy.reshape(-1,1)
# np.asarray(train_x_numpy).reshape(-1,1)
from sklearn import metrics
# k_range=range(1,26)
k=1
scores={}
scores_list=[]
# for k in k_range:
knn=KNeighborsClassifier(n_neighbors=k,metric=cosine_similarity)
# knn.fit(np.asarray(train_x_numpy).reshape(-1,1),np.asarray (train_y_numpy).reshape(-1,1))
knn.fit(train_x_numpy,train_y_numpy.reshape(-1,1))
y_pred=knn.predict(test_x_numpy)
scores[k]=metrics.accuracy_score(test_y_numpy,y_pred)
print(scores[k])
# + id="l2n8dxGUQD0S" colab_type="code" colab={}
from sklearn.neighbors import KNeighborsClassifier
from scipy.spatial.distance import cosine
# train_x_numpy=[np.random.rand(, 1024).astype('float32') for _ in range(100)]
# x_train_reshape=train_x_numpy.numpy().reshape(-1,1)
# x_train_reshape=train_x_numpy.
# y_train_reshape=train_y_numpy.reshape(-1,1)
# np.asarray(train_x_numpy).reshape(-1,1)
from sklearn import metrics
# k_range=range(1,26)
k=1
scores={}
scores_list=[]
# for k in k_range:
knn=KNeighborsClassifier(n_neighbors=k,metric=cosine)
# knn.fit(np.asarray(train_x_numpy).reshape(-1,1),np.asarray (train_y_numpy).reshape(-1,1))
knn.fit(train_x_numpy,train_y_numpy)
y_pred=knn.predict(test_x_numpy)
scores[k]=metrics.accuracy_score(test_y_numpy,y_pred)
print(scores[k])
# + id="6xLzHrF9642I" colab_type="code" colab={}
scores
# + id="t0s15uQn7txv" colab_type="code" colab={}
from sklearn.ensemble import GradientBoostingRegressor
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from sklearn.metrics import mean_absolute_error
# + id="x7XiRBn7-Ubm" colab_type="code" colab={}
from sklearn.ensemble import GradientBoostingClassifier
boost = GradientBoostingClassifier()
boost.fit(train_x_numpy,train_y_numpy)
y_pred = boost.predict(test_x_numpy)
acc=metrics.accuracy_score(test_y_numpy,y_pred)
# + id="NP0k748v-jeX" colab_type="code" colab={}
print(acc)
# + id="wNvKCkFi_X2P" colab_type="code" colab={}
from sklearn.metrics import confusion_matrix
confusion_matrix(test_y_numpy, y_pred)
# + id="jic5MoxK_cxY" colab_type="code" colab={}
# + id="b_TZ81Yg_n1-" colab_type="code" colab={}
| KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ___
#
# <a href='http://www.pieriandata.com'><img src='../Pierian_Data_Logo.png'/></a>
# ___
# <center><em>Copyright by Pierian Data Inc.</em></center>
# <center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center>
# # Time Methods
# ## Python Datetime Review
#
# Basic Python outside of Pandas contains a datetime library:
from datetime import datetime
# To illustrate the order of arguments
my_year = 2017
my_month = 1
my_day = 2
my_hour = 13
my_minute = 30
my_second = 15
# January 2nd, 2017
my_date = datetime(my_year,my_month,my_day)
# Defaults to 0:00
my_date
# January 2nd, 2017 at 13:30:15
my_date_time = datetime(my_year,my_month,my_day,my_hour,my_minute,my_second)
my_date_time
# You can grab any part of the datetime object you want
my_date.day
my_date_time.hour
# # Pandas
#
# # Converting to datetime
#
# Often when data sets are stored, the time component may be a string. Pandas easily converts strings to datetime objects.
import pandas as pd
myser = pd.Series(['Nov 3, 2000', '2000-01-01', None])
myser
myser[0]
# ### pd.to_datetime()
#
# https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#converting-to-timestamps
pd.to_datetime(myser)
pd.to_datetime(myser)[0]
obvi_euro_date = '31-12-2000'
pd.to_datetime(obvi_euro_date)
# 10th of Dec OR 12th of October?
# We may need to tell pandas
euro_date = '10-12-2000'
pd.to_datetime(euro_date)
pd.to_datetime(euro_date,dayfirst=True)
# ## Custom Time String Formatting
#
# Sometimes dates can have a non standard format, luckily you can always specify to pandas the format. You should also note this could speed up the conversion, so it may be worth doing even if pandas can parse on its own.
# A full table of codes can be found here: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
style_date = '12--Dec--2000'
pd.to_datetime(style_date, format='%d--%b--%Y')
strange_date = '12th of Dec 2000'
pd.to_datetime(strange_date)
# ## Data
#
# Retail Sales: Beer, Wine, and Liquor Stores
#
# Units: Millions of Dollars, Not Seasonally Adjusted
#
# Frequency: Monthly
#
#
# U.S. Census Bureau, Retail Sales: Beer, Wine, and Liquor Stores [MRTSSM4453USN], retrieved from FRED, Federal Reserve Bank of St. Louis; https://fred.stlouisfed.org/series/MRTSSM4453USN, July 2, 2020.
sales = pd.read_csv('RetailSales_BeerWineLiquor.csv')
sales
sales.iloc[0]['DATE']
type(sales.iloc[0]['DATE'])
sales['DATE'] = pd.to_datetime(sales['DATE'])
sales
sales.iloc[0]['DATE']
type(sales.iloc[0]['DATE'])
# ------
# ## Attempt to Parse Dates Automatically
#
# **parse_dates** - bool or list of int or names or list of lists or dict, default False
# The behavior is as follows:
#
# boolean. If True -> try parsing the index.
#
# list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3 each as a separate date column.
#
# list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as a single date column.
#
# dict, e.g. {‘foo’ : [1, 3]} -> parse columns 1, 3 as date and call result ‘foo’
#
# If a column or index cannot be represented as an array of datetimes, say because of an unparseable value or a mixture of timezones, the column or index will be returned unaltered as an object data type. For non-standard datetime parsing, use pd.to_datetime after pd.read_csv. To parse an index or column with a mixture of timezones, specify date_parser to be a partially-applied pandas.to_datetime() with utc=True. See Parsing a CSV with mixed timezones for more.
# Parse Column at Index 0 as Datetime
sales = pd.read_csv('RetailSales_BeerWineLiquor.csv',parse_dates=[0])
sales
type(sales.iloc[0]['DATE'])
# ## Resample
#
# A common operation with time series data is resampling based on the time series index. Let's see how to use the resample() method. [[reference](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html)]
# Our index
sales.index
# +
# Reset DATE to index
# -
sales = sales.set_index("DATE")
sales
# When calling `.resample()` you first need to pass in a **rule** parameter, then you need to call some sort of aggregation function.
#
# The **rule** parameter describes the frequency with which to apply the aggregation function (daily, monthly, yearly, etc.)<br>
# It is passed in using an "offset alias" - refer to the table below. [[reference](http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases)]
#
# The aggregation function is needed because, due to resampling, we need some sort of mathematical rule to join the rows (mean, sum, count, etc.)
# <table style="display: inline-block">
# <caption style="text-align: center"><strong>TIME SERIES OFFSET ALIASES</strong></caption>
# <tr><th>ALIAS</th><th>DESCRIPTION</th></tr>
# <tr><td>B</td><td>business day frequency</td></tr>
# <tr><td>C</td><td>custom business day frequency (experimental)</td></tr>
# <tr><td>D</td><td>calendar day frequency</td></tr>
# <tr><td>W</td><td>weekly frequency</td></tr>
# <tr><td>M</td><td>month end frequency</td></tr>
# <tr><td>SM</td><td>semi-month end frequency (15th and end of month)</td></tr>
# <tr><td>BM</td><td>business month end frequency</td></tr>
# <tr><td>CBM</td><td>custom business month end frequency</td></tr>
# <tr><td>MS</td><td>month start frequency</td></tr>
# <tr><td>SMS</td><td>semi-month start frequency (1st and 15th)</td></tr>
# <tr><td>BMS</td><td>business month start frequency</td></tr>
# <tr><td>CBMS</td><td>custom business month start frequency</td></tr>
# <tr><td>Q</td><td>quarter end frequency</td></tr>
# <tr><td></td><td><font color=white>intentionally left blank</font></td></tr></table>
#
# <table style="display: inline-block; margin-left: 40px">
# <caption style="text-align: center"></caption>
# <tr><th>ALIAS</th><th>DESCRIPTION</th></tr>
# <tr><td>BQ</td><td>business quarter endfrequency</td></tr>
# <tr><td>QS</td><td>quarter start frequency</td></tr>
# <tr><td>BQS</td><td>business quarter start frequency</td></tr>
# <tr><td>A</td><td>year end frequency</td></tr>
# <tr><td>BA</td><td>business year end frequency</td></tr>
# <tr><td>AS</td><td>year start frequency</td></tr>
# <tr><td>BAS</td><td>business year start frequency</td></tr>
# <tr><td>BH</td><td>business hour frequency</td></tr>
# <tr><td>H</td><td>hourly frequency</td></tr>
# <tr><td>T, min</td><td>minutely frequency</td></tr>
# <tr><td>S</td><td>secondly frequency</td></tr>
# <tr><td>L, ms</td><td>milliseconds</td></tr>
# <tr><td>U, us</td><td>microseconds</td></tr>
# <tr><td>N</td><td>nanoseconds</td></tr></table>
# Yearly Means
sales.resample(rule='A').mean()
# Resampling rule 'A' takes all of the data points in a given year, applies the aggregation function (in this case we calculate the mean), and reports the result as the last day of that year. Note 2020 in this data set was not complete.
# # .dt Method Calls
#
# Once a column or index is ina datetime format, you can call a variety of methods off of the .dt library inside pandas:
#
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.dt.html
sales = sales.reset_index()
sales
help(sales['DATE'].dt)
sales['DATE'].dt.month
sales['DATE'].dt.is_leap_year
| 08-Time-Methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Model 1
#
# Four-layer deep network based on initial, eight layer deep model.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import keras
from keras.models import Sequential,Input,Model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv1D, MaxPooling1D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
# -
from glob import glob
# +
X = []
labels = []
for lcpath, truthpath in zip(sorted(glob('../data/parallel_normed/*_simulated_transit_lcs.npy')),
sorted(glob('../data/parallel_normed/*_simulated_spots_occulted.npy'))):
X.append(np.load(lcpath).T)
labels.append(np.load(truthpath))
X = np.concatenate(X)[:, :, np.newaxis]# - np.array(X).mean()
X -= X.mean()
X /= X.ptp()
# X += 0.5
labels = np.hstack(labels)#[:, np.newaxis]
# -
for x, l in zip(X[:100, :], labels):
plt.plot(x, color='r' if l else 'b', alpha=0.5)
# +
# X = np.random.randn(*X.shape)
# +
# X = np.load('data/simulated_transit_lcs.npy')[:, :, np.newaxis]
# labels = np.load('data/simulated_spots_occulted.npy').astype(int)#[:, np.newaxis]
# +
train_X = X
train_Y = labels
print('Training data shape : ', train_X.shape, train_Y.shape)
# print('Testing data shape : ', test_X.shape, test_Y.shape)
# -
# Find the unique numbers from the train labels
classes = np.unique(train_Y)
nClasses = len(classes)
print('Total number of outputs : ', nClasses)
print('Output classes : ', classes)
train_X = train_X.astype('float32')
# +
from keras.utils import to_categorical
train_Y_one_hot = to_categorical(train_Y)
# +
from sklearn.model_selection import train_test_split
train_X, valid_X, train_label, valid_label = train_test_split(train_X, train_Y_one_hot,
test_size=0.2, random_state=13)
# +
alpha = 0.1
kernel_size = 5
activation = 'linear'
padding = 'same'
batch_size = 128 #64
epochs = 30
num_classes = 2
model = Sequential()
# First layer
model.add(Conv1D(filters=32, kernel_size=kernel_size, activation=activation,
input_shape=(train_X.shape[1], 1), padding=padding))
model.add(LeakyReLU(alpha=alpha))
model.add(MaxPooling1D(2, padding=padding))
model.add(BatchNormalization())
model.add(Dropout(0.25))
# Second layer
model.add(Conv1D(filters=64, kernel_size=kernel_size,
activation=activation, padding=padding))
model.add(LeakyReLU(alpha=alpha))
model.add(MaxPooling1D(pool_size=2, padding=padding))
model.add(BatchNormalization())
model.add(Dropout(0.25))
# Third layer
model.add(Conv1D(filters=128, kernel_size=kernel_size,
activation=activation, padding=padding))
model.add(LeakyReLU(alpha=alpha))
model.add(MaxPooling1D(pool_size=2, padding=padding))
model.add(BatchNormalization())
model.add(Dropout(0.25))
# fourth layer
model.add(Conv1D(filters=128, kernel_size=kernel_size,
activation=activation, padding=padding))
model.add(LeakyReLU(alpha=alpha))
model.add(MaxPooling1D(pool_size=2, padding=padding))
model.add(BatchNormalization())
model.add(Dropout(0.25))
# Fully connected layer 1
model.add(Flatten())
model.add(Dense(128, activation=activation))
model.add(LeakyReLU(alpha=alpha))
# Fully connected layer 2
# model.add(Dropout(0.3))
model.add(Dense(num_classes, activation='softmax'))
# -
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
# optimizer=keras.optimizers.SGD(lr=1e-4),
metrics=['accuracy'])
model.summary()
# model.load_weights('data/weights.hdf5')
train = model.fit(train_X, train_label, batch_size=batch_size,
epochs=epochs, validation_data=(valid_X, valid_label))
accuracy = train.history['acc']
val_accuracy = train.history['val_acc']
loss = train.history['loss']
val_loss = train.history['val_loss']
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, 'bo', label='Training accuracy')
plt.plot(epochs, val_accuracy, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
model.save('data/model1.hdf5')
| cnn/compare_models/model1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# Importing Packages
#Scikit-Learn
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
#Computational and Visualisation packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# -
#Custom plot display function
def display_plot(cv_scores, cv_scores_std):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(alpha_space, cv_scores)
std_error = cv_scores_std / np.sqrt(10)
ax.fill_between(alpha_space, cv_scores + std_error, cv_scores - std_error, alpha=0.2)
ax.set_ylabel('CV Score +/- Std Error')
ax.set_xlabel('Alpha')
ax.axhline(np.max(cv_scores), linestyle='--', color='.5')
ax.set_xlim([alpha_space[0], alpha_space[-1]])
ax.set_xscale('log')
plt.show()
#Loading the requisite dataset
df = pd.read_csv('gapminder_dataset.csv')
df_columns = df.columns
# Array for feature and target variables
X = df.fertility.values
y = df.life.values
# +
# Dimensions before reshape
print("Dimensions of y before reshaping: {}".format(y.shape))
print("Dimensions of X before reshaping: {}".format(X.shape))
# Reshaping X and y
y = y.reshape(-1, 1)
X = X.reshape(-1, 1)
# Dimensions after reshape
print("Dimensions of y after reshaping: {}".format(y.shape))
print("Dimensions of X after reshaping: {}".format(X.shape))
# -
# Split of the main dataset into 70% training and 30% test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state=42)
# +
# Linear Regression
reg_all = LinearRegression()
# Fitting the regressor on the training dataset
reg_all.fit(X_train, y_train)
y_pred = reg_all.predict(X_test)
# Compute and print R^2 and RMSE
print("R^2: {}".format(reg_all.score(X_test, y_test)))
rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print("Root Mean Squared Error: {}".format(rmse))
# -
#5 Fold Cross Validation Evaluation
reg = LinearRegression()
cv_scores_5fold = cross_val_score(reg, X, y, cv=5 )
print(cv_scores_5fold)
print("Average 5-Fold CV Score: {}".format(np.mean(cv_scores_5fold)))
# +
#Lasso Regularization
X = df.drop('fertility',1)
y = df.life
df_columns = df.drop('fertility',1).columns
# Instantiating a lasso regressor
lasso = Lasso(alpha=0.4, normalize=True)
# Fit the regressor to the data
lasso = Lasso(alpha=0.4, normalize=True)
lasso = lasso.fit(X, y)
lasso
lasso_coef = lasso.coef_
lasso_coef
plt.plot(range(len(df_columns)), lasso_coef)
plt.xticks(range(len(df_columns)), df_columns.values, rotation=60)
plt.margins(0.02)
plt.show()
# +
#Ridge Regularization
# +
# Setup the array of alphas and lists to store scores
alpha_space = np.logspace(-4, 0, 50)
ridge_scores = []
ridge_scores_std = []
# Initialize a ridge regressor
ridge = Ridge(normalize=True)
for alpha in alpha_space:
ridge.alpha = alpha
ridge_cv_scores = cross_val_score(ridge, X, y, cv=10)
ridge_scores.append(np.mean(ridge_cv_scores))
ridge_scores_std.append(np.std(ridge_cv_scores))
display_plot(ridge_scores, ridge_scores_std)
| Regression/Linear-Regression-Lasso&Ridge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Solution for JSON Structured Data
#
# ---
# #### Step 1:
#
# +
# Determine the local JSON file name
# Import the OS module
import os
# List the contents of the current working directory
print(os.listdir())
# The name of the file is 'network_data.json'
json_file = 'network_data.json'
print(json_file)
# -
# ---
# #### Step 2:
#
# Import the JSON module
import json
# ---
# #### Step 3:
#
# +
# Use the context manager to read the JSON file and convert the contents to a Python object
with open(json_file, mode='rt', encoding='utf-8') as file:
json_data = file.read()
python_data = json.loads(json_data)
# Display the type of the 'python_data' object
type(python_data)
# -
# ---
# #### Step 4:
#
# +
# Import the Pretty Print (**pprint**) function from the Pretty Print (**pprint**) module
from pprint import pprint
# Use Pretty Print to Display the 'python_data' object
pprint(python_data)
# -
# ---
# #### Step 5:
#
# +
# Create a dictionary object for the 'nxos2' device
nxos2 = {
'nxos2':{
'data': {
'role': 'distribution',
'site': 'atc56',
'type': 'network-device'
},
'groups': ['dna_3'],
'hostname': 'nxos2',
'platform': 'nxos',
'username': 'wwt',
'password': '<PASSWORD>1!',
'port': '22'
}
}
# Display the contents of the 'nxos2' dictionary
print(nxos2.items())
# -
# ---
# #### Step 6:
#
# +
# Add the 'nxos2' dictionary to the 'python_data' dictionary
python_data.update(nxos2)
# Display the 'python_data dictionary - option #1
pprint(python_data)
# Display the 'python_data dictionary - option #2
print(python_data.keys())
# Display the 'python_data dictionary - option #3
print(python_data.items())
# -
# ---
# #### Step 7
#
# +
# Assign the contents of the 'python_data' dictionary to the 'devices' key of a new 'python_data'
python_data = {'devices': python_data}
# Display the contents of the new 'python_data' dictionary
pprint(python_data)
# -
# ---
# #### Step 8
#
# +
# Use the context manager to write a new JSON file with the JSON-converted contents of **python_data**
with open('new_network_data.json', mode='wt', encoding='utf-8') as file:
new_json_data = json.dumps(python_data, indent=2)
file.write(new_json_data)
# List the contents of the current working directory
print(os.listdir())
# -
| lab/part_ii_json/json_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#export
from local.torch_basics import *
from local.test import *
from local.layers import *
from local.callback.hook import *
from local.notebook.showdoc import *
# +
# default_exp vision.models.unet
# -
# # Dynamic UNet
#
# > Unet model using PixelShuffle ICNR upsampling that can be built on top of any pretrained architecture
#export
def _get_sz_change_idxs(sizes):
"Get the indexes of the layers where the size of the activation changes."
feature_szs = [size[-1] for size in sizes]
sz_chg_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_szs[1:]))[0])
if feature_szs[0] != feature_szs[1]: sz_chg_idxs = [0] + sz_chg_idxs
return sz_chg_idxs
#hide
test_eq(_get_sz_change_idxs([[3,64,64], [16,64,64], [32,32,32], [16,32,32], [32,32,32], [16,16]]), [1,4])
#export
class UnetBlock(Module):
"A quasi-UNet block, using `PixelShuffle_ICNR upsampling`."
@delegates(ConvLayer.__init__)
def __init__(self, up_in_c, x_in_c, hook, final_div=True, blur=False, act_cls=defaults.activation,
self_attention=False, init=nn.init.kaiming_normal_, **kwargs):
self.hook = hook
self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, act_cls=act_cls)
self.bn = BatchNorm(x_in_c)
ni = up_in_c//2 + x_in_c
nf = ni if final_div else ni//2
self.conv1 = ConvLayer(ni, nf, act_cls=act_cls, **kwargs)
self.conv2 = ConvLayer(nf, nf, act_cls=act_cls, xtra=SelfAttention(nf) if self_attention else None, **kwargs)
self.relu = act_cls()
apply_init(nn.Sequential(self.conv1, self.conv2), init)
def forward(self, up_in):
s = self.hook.stored
up_out = self.shuf(up_in)
ssh = s.shape[-2:]
if ssh != up_out.shape[-2:]:
up_out = F.interpolate(up_out, s.shape[-2:], mode='nearest')
cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1))
return self.conv2(self.conv1(cat_x))
#export
class DynamicUnet(SequentialEx):
"Create a U-Net from a given architecture."
def __init__(self, encoder, n_classes, img_size, blur=False, blur_final=True, self_attention=False,
y_range=None, last_cross=True, bottle=False, act_cls=defaults.activation,
init=nn.init.kaiming_normal_, norm_type=NormType.Batch, **kwargs):
imsize = img_size
sizes = model_sizes(encoder, size=imsize)
sz_chg_idxs = list(reversed(_get_sz_change_idxs(sizes)))
self.sfs = hook_outputs([encoder[i] for i in sz_chg_idxs])
x = dummy_eval(encoder, imsize).detach()
ni = sizes[-1][1]
middle_conv = nn.Sequential(ConvLayer(ni, ni*2, act_cls=act_cls, norm_type=norm_type, **kwargs),
ConvLayer(ni*2, ni, act_cls=act_cls, norm_type=norm_type, **kwargs)).eval()
x = middle_conv(x)
layers = [encoder, BatchNorm(ni), nn.ReLU(), middle_conv]
for i,idx in enumerate(sz_chg_idxs):
not_final = i!=len(sz_chg_idxs)-1
up_in_c, x_in_c = int(x.shape[1]), int(sizes[idx][1])
do_blur = blur and (not_final or blur_final)
sa = self_attention and (i==len(sz_chg_idxs)-3)
unet_block = UnetBlock(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=do_blur, self_attention=sa,
act_cls=act_cls, init=init, norm_type=norm_type, **kwargs).eval()
layers.append(unet_block)
x = unet_block(x)
ni = x.shape[1]
if imsize != sizes[0][-2:]: layers.append(PixelShuffle_ICNR(ni, act_cls=act_cls))
x = PixelShuffle_ICNR(ni)(x)
if imsize != x.shape[-2:]: layers.append(Lambda(lambda x: F.interpolate(x, imsize, mode='nearest')))
if last_cross:
layers.append(MergeLayer(dense=True))
ni += in_channels(encoder)
layers.append(ResBlock(1, ni, ni//2 if bottle else ni, act_cls=act_cls, norm_type=norm_type, **kwargs))
layers += [ConvLayer(ni, n_classes, ks=1, act_cls=None, norm_type=norm_type, **kwargs)]
apply_init(nn.Sequential(layers[2], *layers[-2:]), init)
if y_range is not None: layers.append(SigmoidRange(*y_range))
super().__init__(*layers)
def __del__(self):
if hasattr(self, "sfs"): self.sfs.remove()
from local.vision.all import *
m = resnet34()
m = nn.Sequential(*list(m.children())[:-2])
tst = DynamicUnet(m, 5, (128,128), norm_type=None)
x = torch.randn(2, 3, 128, 128)
y = tst(x)
test_eq(y.shape, [2, 5, 128, 128])
# ## Export -
#hide
from local.notebook.export import *
notebook2script(all_fs=True)
| dev/15a_vision_models_unet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # ggplot style sheet
#
#
# This example demonstrates the "ggplot" style, which adjusts the style to
# emulate ggplot_ (a popular plotting package for R_).
#
# These settings were shamelessly stolen from [1]_ (with permission).
#
# .. [1] https://web.archive.org/web/20111215111010/http://www.huyng.com/archives/sane-color-scheme-for-matplotlib/691/
#
#
# +
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# Fixing random state for reproducibility
np.random.seed(19680801)
fig, axs = plt.subplots(ncols=2, nrows=2)
ax1, ax2, ax3, ax4 = axs.ravel()
# scatter plot (Note: `plt.scatter` doesn't use default colors)
x, y = np.random.normal(size=(2, 200))
ax1.plot(x, y, 'o')
# sinusoidal lines with colors from default color cycle
L = 2*np.pi
x = np.linspace(0, L)
ncolors = len(plt.rcParams['axes.prop_cycle'])
shift = np.linspace(0, L, ncolors, endpoint=False)
for s in shift:
ax2.plot(x, np.sin(x + s), '-')
ax2.margins(0)
# bar graphs
x = np.arange(5)
y1, y2 = np.random.randint(1, 25, size=(2, 5))
width = 0.25
ax3.bar(x, y1, width)
ax3.bar(x + width, y2, width,
color=list(plt.rcParams['axes.prop_cycle'])[2]['color'])
ax3.set_xticks(x + width)
ax3.set_xticklabels(['a', 'b', 'c', 'd', 'e'])
# circles with colors from default color cycle
for i, color in enumerate(plt.rcParams['axes.prop_cycle']):
xy = np.random.normal(size=2)
ax4.add_patch(plt.Circle(xy, radius=0.3, color=color['color']))
ax4.axis('equal')
ax4.margins(0)
plt.show()
| matplotlib/gallery_jupyter/style_sheets/ggplot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: xpython
# language: python
# name: xpython
# ---
# + [markdown] deletable=false editable=false
# Copyright 2020 <NAME> and made available under [CC BY-SA](https://creativecommons.org/licenses/by-sa/4.0) for text and [Apache-2.0](http://www.apache.org/licenses/LICENSE-2.0) for code.
# + [markdown] deletable=false editable=false
# # Data Science and the Nature of Data
# + [markdown] deletable=false editable=false
# ## Types of variables
#
# Structured data begins with **measurements** of some type of thing in the real world, which we call a **variable**.
# Consider the example of height.
# I may measure 10 people and find that their heights in centimeters are:
#
# | Height |
# |--------|
# | 165 |
# | 188 |
# | 153 |
# | 164 |
# | 150 |
# | 190 |
# | 169 |
# | 163 |
# | 165 |
# | 190 |
#
# Each of these values (e.g. 165) is a measurement of the variable *height*.
# We call *height* a variable because its value isn't constant.
# If everyone in the world were the same height, we wouldn't call height a variable, and we also wouldn't bother measuring it, because we'd know everyone is the same.
#
# Variables have different **types** that can affect your analysis.
#
# ### Nominal
#
# A nominal variable consists of unordered categories, like *male* or *female* for biological sex.
# Notice that these categories are not numbers, and there is no order to the categories.
# We do not say that male comes before female or is smaller than female.
#
# ### Ordinal
#
# Ordinal variables consist of ordered categories.
# You can think of it as nominal data but with an ordering from first to last or smallest to largest.
# A common example of ordinal data are Likert questions like:
#
# ```
# (1) Strongly disagree
# (2) Disagree
# (3) Neither agree nor disagree
# (4) Agree
# (5) Strongly agree
# ```
#
# Even though these options are numbered 1 to 5, those numbers only indicate which comes before the others, not how "big" an option is.
# For example, we wouldn't say that the difference between *Agree* and *Disagree* is the same as the difference between *Neither agree nor disagree* and *Strongly agree*.
#
# ### Interval
#
# Interval variables are ordered *and* their measurement scales are evenly spaced.
# A classic example is temperature in Fahrenheit.
# In degrees Fahrenheit, the difference between 70 and 71 is the same as the difference between 90 and 91 - either case is one degree.
# The other most important characteristic of interval variables is also the most confusing one, which is that interval variables don't have a meaningful zero value.
# Degrees Fahrenheit is an example of this because there's nothing special about 0 degrees.
# 0 degrees doesn't mean there's no temperature or no heat energy, it's just an arbitrary point on the scale.
#
# ### Ratio
#
# Ratio variables are like interval variables but with meaningful zeros.
# Age and height are good examples because 0 age means you have no age, and 0 height means you have no height.
# The name *ratio* reflects that you can form a ratio with these variables, which means that you can say age 20 is twice as old as age 10.
# Notice you can't say that about degrees Fahrenheit: 100 degrees is not really twice as hot as 50 degrees, because 0 degrees Fahrenheit doesn't mean "no temperature."
# + [markdown] deletable=false editable=false
# ## Tabular data
#
# The most common type of structured data is **tabular data** which is what you find in spreadsheets.
# If you've ever used a spreadsheet, you know something about tabular data!
#
# Here's an example of tabular data, with *height* in centimeters, *age* in years, and *weight* in kilograms:
#
# | Height | Age | Weight |
# |--------|-----|--------|
# | 161 | 50 | 53 |
# | 161 | 17 | 53 |
# | 155 | 33 | 84 |
# | 180 | 51 | 84 |
# | 186 | 18 | 88 |
#
# In tabular data like this, each **row** is a person.
# More generically, we would say each row is an **observation** or **datapoint** (in statistics terminology) or an **item** (in machine learning terminology).
# In each row, we have measurements for each of our variables for that particular person.
# Since we have five rows of measurements, we know that there are five people in this dataset.
#
# We can also think about tabular data in terms of **columns**.
# Each column represents a variable, with the name of that variable in the **column header**.
# For example, *height* is at the top of the first column and is the name of the variable for that column.
# Importantly, the header is not an observation but rather a description of our data.
# This is why we don't count the header when we are counting the rows in our data.
#
# ### Delimited tabular data - CSV and TSV
#
# You are probably familiar with spreadsheet files, e.g. Microsoft Excel has files that end in `.xls` or `.xlsx`.
# However, in data science, it is more common to have tabular data files that are **delimited**.
# A delimited file is just a plain text file where column boundaries are represented by a specific character, usually a comma or a tab.
#
# Here's what the data above looks like in **comma separated value (CSV)** form:
#
# ```
# Height,Age,Weight
# 161,50,53
# 161,17,53
# 155,33,84
# 180,51,84
# 186,18,88
# ```
#
# and here's what the data looks like in **tab separated value (TSV)** form:
#
# ```
# Height Age Weight
# 161 50 53
# 161 17 53
# 155 33 84
# 180 51 84
# 186 18 88
# ```
#
# The choice of the delimiter (comma, tab, or something else) is really arbitrary, but it's always better to use a delimiter that doesn't appear in your data.
#
# ## Dataframes
#
# Data scientists often load tabular data into a **dataframe** that they can manipulate in a program.
# In other words, tabular data from a file is brought into the computational notebook in a variable that represents rows, columns, header, etc just like they are stored in the tabular data file.
# Because dataframes match tabular data in files, they are very intuitive to work with, which may explain their popularity.
#
# We're now at the practical portion of this notebook, so let's work with dataframes!
# + [markdown] deletable=false editable=false
# First, we need to import a dataframe library called `pandas`.
# + [markdown] html="<iframe class='metadata-html' src='https://drive.google.com/file/d/1t2xuFo0TUyILl-ljCZK1zECx8cu__41g/preview' width='624' height='351'></iframe>"
# **Follow the steps in the video below**
# + [markdown] deletable=false editable=false
# Once the code is in the Jupyter cell below, you must **execute** or **run** it by either pressing the ► button at the top of the window or by pressing Shift + Enter on your keyboard.
# + deletable=false editable=false
import pandas as pd
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="i!#]:2XI=^qLb$e.|iwo">pd</variable></variables><block type="importAs" id="*]+[97*lfc0xBExPyl{#" x="73" y="63"><field name="libraryName">pandas</field><field name="libraryAlias" id="i!#]:2XI=^qLb$e.|iwo">pd</field></block></xml>
# + [markdown] deletable=false editable=false
# We can now do things with `pd`, like load datasets!
#
# Our file is called `height-age-weight.csv` and it is in the `datasets` folder.
# That means the **path** from this notebook (the one you're reading) to the data is `datasets/height-age-weight.csv`.
#
# To read this file into a dataframe, we will use `pd`.
# + [markdown] html="<iframe class='metadata-html' src='https://drive.google.com/file/d/18P7zNt-9PYIswS_5aevZ5oNaoa4Aep71/preview' width='624' height='351'></iframe>"
# **Follow the steps in the video below**
# + [markdown] deletable=false editable=false
# Execute or run the cell by pressing the ► button.
# + deletable=false editable=false
pd.read_csv('datasets/height-age-weight.csv')
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="i!#]:2XI=^qLb$e.|iwo">pd</variable></variables><block type="varDoMethod" id="Ki|piN(^ln~knW%tt:zm" x="8" y="188"><field name="VAR" id="i!#]:2XI=^qLb$e.|iwo">pd</field><field name="MEMBER">read_csv</field><data>pd:read_csv</data><value name="INPUT"><block type="text" id=":/)(_QEn?aUY9N7=qBa["><field name="TEXT">datasets/height-age-weight.csv</field></block></value></block></xml>
# + [markdown] deletable=false editable=false
# When you run the cell, it will display the dataframe directly below it.
# This is one of the nice things about Jupyter - **it will display the output of the last line of code in a cell**, even if the output is text, a table, or a plot.
# + [markdown] deletable=false editable=false
# Right now, we haven't actually stored the dataframe anywhere.
# We used `pd` to read the csv file, and then Jupyter output that so we could see it.
# But if we wanted to do anything with the dataframe, we'd have to read the file again.
#
# Instead of reading the file every time we want to access the data, we can **store it in a variable**.
# In other words, we will create a variable and set it to be the dataframe we created from the file.
# + [markdown] html="<iframe class='metadata-html' src='https://drive.google.com/file/d/1lyZ3vFWxckmjzhM-JO8-8LJWpsHaRs1_/preview' width='624' height='351'></iframe>"
# **Follow the steps in the video below**
# + [markdown] deletable=false editable=false
# As always, you need to hit the ► button or press Shift + Enter to run the code.
# + deletable=false editable=false
dataframe = pd.read_csv('datasets/height-age-weight.csv')
dataframe
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="hL-!hLQ^Kb^==E8[,~?e">dataframe</variable><variable id="i!#]:2XI=^qLb$e.|iwo">pd</variable></variables><block type="variables_set" id="FL{Ko)?[53f+HALXr,iQ" x="82" y="95"><field name="VAR" id="hL-!hLQ^Kb^==E8[,~?e">dataframe</field><value name="VALUE"><block type="varDoMethod" id="MMpyJ@2l1WE|gv?i2OrP"><field name="VAR" id="i!#]:2XI=^qLb$e.|iwo">pd</field><field name="MEMBER">read_csv</field><data>pd:read_csv</data><value name="INPUT"><block type="text" id="J|W%;]~T@%@Z=gvY_(@g"><field name="TEXT">datasets/height-age-weight.csv</field></block></value></block></value></block><block type="variables_get" id="ky?+zrP9.?Cs^zOi9k3-" x="80" y="175"><field name="VAR" id="hL-!hLQ^Kb^==E8[,~?e">dataframe</field></block></xml>
# + [markdown] deletable=false editable=false
# The output is the same as before - the only difference is that we've read the csv and stored the data in the `dataframe` variable, so we will use the `dataframe` variable whenever we want to work with the data.
# + [markdown] deletable=false editable=false
# There are many things we can do with dataframes.
# One thing we can do is get specific rows.
# + [markdown] html="<iframe class='metadata-html' src='https://drive.google.com/file/d/1KGvTX2sE-jagp75aEhi82sSa21LC7hwF/preview' width='624' height='351'></iframe>"
# **Follow the steps in the video below**
# + [markdown] deletable=false editable=false
# *Then ► or Shift + Enter*
# + deletable=false editable=false
dataframe[ : 1]
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="hL-!hLQ^Kb^==E8[,~?e">dataframe</variable></variables><block type="lists_getSublist" id="si:PS_Y2vG]k8xp(Pfv:" x="59" y="280"><mutation at1="false" at2="true"></mutation><field name="WHERE1">FIRST</field><field name="WHERE2">FROM_START</field><value name="LIST"><block type="variables_get" id="8],lIJ/{(ICxxa?6-DDS"><field name="VAR" id="hL-!hLQ^Kb^==E8[,~?e">dataframe</field></block></value><value name="AT2"><block type="math_number" id="Db54~QCvA;Af:x^@NtZ?"><field name="NUM">1</field></block></value></block></xml>
# + [markdown] deletable=false editable=false
# As you can see, the output is only the first row of the dataframe.
# + [markdown] deletable=false editable=false
# Try it again in the cell below, but this time, change the `1` to a `2`
# + [markdown] deletable=false editable=false
# *Then ► or Shift + Enter*
# + deletable=false editable=false
dataframe[ : 2]
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="t[n^Fcp7,s93E17ZZ9J6">dataframe</variable></variables><block type="lists_getSublist" id="J=Y2q,G9vz$#fGJl6Et=" x="8" y="518"><mutation at1="false" at2="true"></mutation><field name="WHERE1">FIRST</field><field name="WHERE2">FROM_START</field><value name="LIST"><block type="variables_get" id="`pG,vo{P(1~U+ZtKB-Zr"><field name="VAR" id="t[n^Fcp7,s93E17ZZ9J6">dataframe</field></block></value><value name="AT2"><block type="math_number" id="n+8Lq=-557|wZ@WBd}%l"><field name="NUM">2</field></block></value></block></xml>
# + [markdown] deletable=false editable=false
# Now the output is the first two rows of the dataframe.
# We could get arbitrary rows of the dataframe by starting at a different number and ending at a different number.
# Sometimes people call this a **slice**.
# + [markdown] deletable=false editable=false
# We can get a column of the dataframe by using the name of the variable for that column.
# Before we go any further, let's step back for a second to talk about **lists**.
#
# We can think of a dataframe in two ways:
#
# - A list of rows
# - A list of columns
#
# We just saw the list of rows way.
# So why are columns any different?
# The difference is that our columns have variable names, and we often want to refer to columns using those names.
# For example, we want to say something like "give me the Age column" instead of "give me column 2."
#
# Let's make a list from scratch to illustrate this.
# + [markdown] html="<iframe class='metadata-html' src='https://drive.google.com/file/d/1Su7EClOxhnYMVSdVvipvjGIP66Lez0Bq/preview' width='624' height='351'></iframe>"
# **Follow the steps in the video below**
# + [markdown] deletable=false editable=false
# Now execute the cell (scroll up if you need a reminder how).
# + deletable=false editable=false
['Height']
#<xml xmlns="https://developers.google.com/blockly/xml"><block type="lists_create_with" id="A!9qJISK`^DPM.ZM+0V1" x="120" y="210"><mutation items="1"></mutation><value name="ADD0"><block type="text" id="XMRG^JW~zEZawF/bENvh"><field name="TEXT">Height</field></block></value></block></xml>
# + [markdown] deletable=false editable=false
# This is a list with one thing inside it, `"Height"`.
# Lists can have multiple things inside them, making lists a container for other variables.
# + [markdown] deletable=false editable=false
# Let's use a list to get a column from the dataframe.
# + [markdown] html="<iframe class='metadata-html' src='https://drive.google.com/file/d/1EFo71gc2znZVkblOQitJpMdiXCwYEORh/preview' width='624' height='351'></iframe>"
# **Follow the steps in the video below**
# + [markdown] deletable=false editable=false
# And run it.
# + deletable=false editable=false
dataframe[['Height']]
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="hL-!hLQ^Kb^==E8[,~?e">dataframe</variable></variables><block type="indexer" id="(Y770#M`2DWvNWkj?JXf" x="114" y="219"><field name="VAR" id="hL-!hLQ^Kb^==E8[,~?e">dataframe</field><value name="INDEX"><block type="lists_create_with" id="k8Zx3G8(ic?]j]H;K2XA"><mutation items="1"></mutation><value name="ADD0"><block type="text" id="z/jw.,-]Mv[`//q+Gj^S"><field name="TEXT">Height</field></block></value></block></value></block></xml>
# + [markdown] deletable=false editable=false
# We can get more than one column by adding another element to the list.
# + [markdown] html="<iframe class='metadata-html' src='https://drive.google.com/file/d/1mb3PrAdo2XMNFwSa76m3ooCA7EF6luI_/preview' width='624' height='351'></iframe>"
# **Follow the steps in the video below**
# + [markdown] deletable=false editable=false
# And run the cell (try Shift + Enter if you haven't tried it yet).
# + deletable=false editable=false
dataframe[['Height', 'Age']]
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="hL-!hLQ^Kb^==E8[,~?e">dataframe</variable></variables><block type="indexer" id="w4iC+S3D7/C70r7+?.sm" x="116" y="214"><field name="VAR" id="hL-!hLQ^Kb^==E8[,~?e">dataframe</field><value name="INDEX"><block type="lists_create_with" id=";,?ePMm86[Emg/2#O3-`"><mutation items="2"></mutation><value name="ADD0"><block type="text" id="G*0SS%A%|@,nGuL1V]v."><field name="TEXT">Height</field></block></value><value name="ADD1"><block type="text" id="^(9;Hz5G/rmXj3Plb*08"><field name="TEXT">Age</field></block></value></block></value></block></xml>
# + [markdown] deletable=false editable=false
# To recap, dataframes are both lists of rows and lists of columns, and lists are themselves containers for other variables.
# + [markdown] deletable=false editable=false
# There are many, many things we can do with dataframes, but let's just talk about one more for now.
#
# We can select rows based on a value in a particular column:
# + [markdown] html="<iframe class='metadata-html' src='https://drive.google.com/file/d/1bZ7TkxwBC7eVkt3SI26-QztwXcB3pIK9/preview' width='624' height='351'></iframe>"
# **Follow the steps in the video below**
# + [markdown] deletable=false editable=false
# Don't forget to run the cell!
# + deletable=false editable=false
dataframe[['Height']] > 161
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="hL-!hLQ^Kb^==E8[,~?e">dataframe</variable></variables><block type="logic_compare" id=";:=3#b-u]Es@#Pk6#Z-U" x="92" y="207"><field name="OP">GT</field><value name="A"><block type="indexer" id="AJ@Ge,N{W6f}Ht,$E+Gc"><field name="VAR" id="hL-!hLQ^Kb^==E8[,~?e">dataframe</field><value name="INDEX"><block type="lists_create_with" id="KWo5o#X2YkZzA!Kra)RL"><mutation items="1"></mutation><value name="ADD0"><block type="text" id="vl~F*?w_@4jY@-77)1i%"><field name="TEXT">Height</field></block></value></block></value></block></value><value name="B"><block type="math_number" id=":]B_nco|qZKgTxZ{{2N%"><field name="NUM">161</field></block></value></block></xml>
# + [markdown] deletable=false editable=false
# The resulting column is `True` or `False` depending on whether the value of `Height` was above 161 or not (notice a few were exactly equal to 161, so they weren't greater).
# + [markdown] deletable=false editable=false
# What we're about to do next is magical.
# + [markdown] html="<iframe class='metadata-html' src='https://drive.google.com/file/d/1t1uRk--zv7OodkBytsRkb1bGoroeyGAD/preview' width='624' height='351'></iframe>"
# **Follow the steps in the video below**
# + [markdown] deletable=false editable=false
# And run it!
# + deletable=false editable=false
dataframe[(dataframe['Height'] > 161)]
#<xml xmlns="https://developers.google.com/blockly/xml"><variables><variable id="hL-!hLQ^Kb^==E8[,~?e">dataframe</variable></variables><block type="indexer" id="1I9_x|53fOy}m^)DYpOu" x="26" y="143"><field name="VAR" id="hL-!hLQ^Kb^==E8[,~?e">dataframe</field><value name="INDEX"><block type="logic_compare" id="g{+I/!]tLL$|Xf?U?aeg"><field name="OP">GT</field><value name="A"><block type="indexer" id="h*mS_gv5(.;`UJE,mhci"><field name="VAR" id="hL-!hLQ^Kb^==E8[,~?e">dataframe</field><value name="INDEX"><block type="text" id="(}AUy92RNq+ETbWfVT|Q"><field name="TEXT">Height</field></block></value></block></value><value name="B"><block type="math_number" id="S7=aG{oQWAS]K)ax3o:Y"><field name="NUM">161</field></block></value></block></value></block></xml>
# + [markdown] deletable=false editable=false
# The dataframe only kept the rows for which `Height` was > 161, i.e. those for which this was `True`.
#
# Notice this time we didn't put `Height` in a list. It won't work if we do.
# + [markdown] deletable=false editable=false
# <!-- -->
| E1/solutions/we-bl-na-solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# # 実験を実行する
#
# Azure Machine Learning SDK を使用して、メトリックを記録して出力を生成するコード実験を実行できます。これは、Azure Machine Learning におけるほとんどの機械学習操作の中核です。
#
# ## ワークスペースに接続する
#
# すべての実験と関連リソースは、Azure Machine Learning ワークスペース内で管理されます。ほとんどの場合、ワークスペースの構成は JSON 構成ファイルに格納されます。これにより、Azure サブスクリプション ID などの詳細を覚えておく必要なく、簡単に再接続できます。Azure portal のワークスペースのブレードから JSON 構成ファイルをダウンロードできますが、ワークスペースでコンピューティング インスタンスを使用している場合、構成ファイルは既にルート フォルダーにダウンロードされています。
#
# 次のコードでは、構成ファイルを使用してワークスペースに接続します。
#
# > **注**: Azure サブスクリプションでまだ認証済みのセッションを確立していない場合は、リンクをクリックして認証コードを入力し、Azure にサインインして認証するよう指示されます。
# +
import azureml.core
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to use Azure ML {} to work with {}'.format(azureml.core.VERSION, ws.name))
# -
# ## 実験を実行する
#
# データ サイエンティストが実行する必要のある最も基本的なタスクの 1 つは、データを処理して分析する実験を作成して実行することです。この演習では、Azure ML の*実験*を使用して、Python コードを実行し、データから抽出された値を記録する方法を学習します。この場合、糖尿病の検査を受けた患者の詳細を含む単純なデータセットを使用します。データを探索し、統計情報、視覚化、およびデータ サンプルを抽出する実験を実行します。使用するコードのほとんどは、データ探索プロセスで実行されるなど、かなり汎用的な Python です。ただし、数行を追加すると、コードは Azure ML *実験*を使用して実行の詳細を記録します。
# +
from azureml.core import Experiment
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# Create an Azure ML experiment in your workspace
experiment = Experiment(workspace=ws, name="mslearn-diabetes")
# Start logging data from the experiment, obtaining a reference to the experiment run
run = experiment.start_logging()
print("Starting experiment:", experiment.name)
# load the data from a local file
data = pd.read_csv('data/diabetes.csv')
# Count the rows and log the result
row_count = (len(data))
run.log('observations', row_count)
print('Analyzing {} rows of data'.format(row_count))
# Plot and log the count of diabetic vs non-diabetic patients
diabetic_counts = data['Diabetic'].value_counts()
fig = plt.figure(figsize=(6,6))
ax = fig.gca()
diabetic_counts.plot.bar(ax = ax)
ax.set_title('Patients with Diabetes')
ax.set_xlabel('Diagnosis')
ax.set_ylabel('Patients')
plt.show()
run.log_image(name='label distribution', plot=fig)
# log distinct pregnancy counts
pregnancies = data.Pregnancies.unique()
run.log_list('pregnancy categories', pregnancies)
# Log summary statistics for numeric columns
med_columns = ['PlasmaGlucose', 'DiastolicBloodPressure', 'TricepsThickness', 'SerumInsulin', 'BMI']
summary_stats = data[med_columns].describe().to_dict()
for col in summary_stats:
keys = list(summary_stats[col].keys())
values = list(summary_stats[col].values())
for index in range(len(keys)):
run.log_row(col, stat=keys[index], value = values[index])
# Save a sample of the data and upload it to the experiment output
data.sample(100).to_csv('sample.csv', index=False, header=True)
run.upload_file(name='outputs/sample.csv', path_or_stream='./sample.csv')
# Complete the run
run.complete()
# -
# ## 実行の詳細を表示する
#
# Jupyter Notebook では、**RunDetails** ウィジェットを使用して、実行の詳細を視覚化できます。
# +
from azureml.widgets import RunDetails
RunDetails(run).show()
# -
# ### Azure Machine Learning Studio の詳細を表示する
#
# **RunDetails** ウィジェットには、Azure Machine Learning Studio で**実行の詳細を表示**するためのリンクが含まれていることに注意してください。これをクリックすると、実行の詳細を表示する新しいブラウザー タブが開きます ([Azure Machine Learning Studio](https://ml.azure.com) を開いて、**実験**ページで実行を検索することもできます)。Azure Machine Learning Studio での実行を表示する場合は、次の点に注意してください。
#
# - **[詳細]** タブには、実験実行の一般的なプロパティが含まれています。
# - **[メトリック]** タブでは、記録されたメトリックを選択し、表またはグラフとして表示できます。
# - **[イメージ]** タブでは、実験で記録された画像やプロットを選択して表示できます (この場合は、*ラベル分布*プロット)
# - **[子の実行]** タブには、子の実行が一覧表示されます (この実験では、何も表示されません)。
# - **[出力 + ログ]** タブには、実験で生成された出力ファイルまたはログ ファイルが表示されます。
# - **[スナップショット]** タブには、実験コードが実行されたフォルダー内のすべてのファイル (この場合は、このノートブックと同じフォルダー内のすべてファイル) が含まれます。
# - **[説明]** タブは、実験によって生成されたモデルの説明を表示するために使用されます (この場合は、何も表示されません)。
# - **[均等化]** タブは、機械学習モデルの公平性の評価に役立つ予測パフォーマンスの格差を視覚化するために使用されます (この場合は、何も表示されません)。
# ### SDK を使用して実験の詳細を取得する
#
# 以前に実行したコードの **run** 変数は **Run** オブジェクトのインスタンスで、Azure Machine Learning で実験の個々の実行を参照します。この参照を使用すると、実行とその出力に関する情報を得られます。
# +
import json
# Get logged metrics
print("Metrics:")
metrics = run.get_metrics()
for metric_name in metrics:
print(metric_name, ":", metrics[metric_name])
# Get output files
print("\nFiles:")
files = run.get_file_names()
for file in files:
print(file)
# -
# 実験で作成したファイルは、**download_file** メソッドを使用して個々に、または **download_files** メソッドを使用して複数のファイルを取得することでダウンロードできます。以下のコードを使って、実行の**output**フォルダーのファイルをすべてダウンロードします。
# +
import os
download_folder = 'downloaded-files'
# Download files in the "outputs" folder
run.download_files(prefix='outputs', output_directory=download_folder)
# Verify the files have been downloaded
for root, directories, filenames in os.walk(download_folder):
for filename in filenames:
print (os.path.join(root,filename))
# -
# 実験実行のトラブルシューティングが必要な場合は、**get_details** メソッドを使用して実行に関する基本的な詳細を取得するか、**get_details_with_logs** メソッドを使用して実行の詳細と実行中に生成されたログ ファイルの内容を取得できます。
run.get_details_with_logs()
# 詳細には、実験が実行されたコンピューティング ターゲット、実験の開始日時、終了日時などの情報が含まれることに注意してください。さらに、実験コードを含むノートブック (このノート) は複製された Git リポジトリにあるため、リポジトリ、ブランチ、状態に関する詳細が実行履歴に記録されます。
#
# この場合は、詳細の **logFiles** エントリは、ログ ファイルが生成されていないことを示している点に留意してください。このようなインライン実験では、これは一般的なことですが、実験としてスクリプトを実行するとさらに興味深い状態になります。これからそれを見ていきましょう。
# ## 実験スクリプトを実行する
#
# 前の例では、このノートブックで実験をインラインで実行しました。より柔軟なソリューションとして、実験用の別のスクリプトを作成し、必要な他のファイルと共にフォルダーに格納し、Azure ML を使用してフォルダー内のスクリプトに基づいて実験を実行します。
#
# まず、実験ファイルのフォルダーを作成し、データをコピーします。
# +
import os, shutil
# Create a folder for the experiment files
folder_name = 'diabetes-experiment-files'
experiment_folder = './' + folder_name
os.makedirs(folder_name, exist_ok=True)
# Copy the data file into the experiment folder
shutil.copy('data/diabetes.csv', os.path.join(folder_name, "diabetes.csv"))
# -
# 次に、実験用のコードを含む Python スクリプトを作成し、実験フォルダーに保存します。
#
# > **注**: 次のセルを実行するとスクリプト ファイルが*作成されます*が、実行されません。
# +
# %%writefile $folder_name/diabetes_experiment.py
from azureml.core import Run
import pandas as pd
import os
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
data = pd.read_csv('diabetes.csv')
# Count the rows and log the result
row_count = (len(data))
run.log('observations', row_count)
print('Analyzing {} rows of data'.format(row_count))
# Count and log the label counts
diabetic_counts = data['Diabetic'].value_counts()
print(diabetic_counts)
for k, v in diabetic_counts.items():
run.log('Label:' + str(k), v)
# Save a sample of the data in the outputs folder (which gets uploaded automatically)
os.makedirs('outputs', exist_ok=True)
data.sample(100).to_csv("outputs/sample.csv", index=False, header=True)
# Complete the run
run.complete()
# -
# このコードは、以前に使用したインライン コードの簡略化されたバージョンです。ただし、次の点に注意してください。
# - スクリプトの実行時に実験の実行コンテキストを取得するために、`Run.get_context()` メソッドを使用します。
# - スクリプトが配置されているフォルダーから糖尿病データを読み込みます。
# - **outputs** という名前のフォルダーを作成し、サンプルファイルをそれに書き込みます - このフォルダーは自動的に実験実行にアップロードされます
# これで、実験を実行する準備がほぼ整いました。スクリプトを実行するには、実験で実行する Python スクリプト ファイルを識別する **ScriptRunConfig** を作成し、それに基づいて実験を実行する必要があります。
#
# > **注**: ScriptRunConfig は、コンピューティング ターゲットと Python 環境も決定します。この場合、Python 環境は複数の Conda および pip パッケージを含むように定義されていますが、計算ターゲットは省略されているため、デフォルトのローカル計算が使用されます。
#
# 次のセルは、スクリプトベースの実験を構成して送信します。
# +
from azureml.core import Experiment, ScriptRunConfig, Environment
from azureml.widgets import RunDetails
# Create a Python environment for the experiment (from a .yml file)
env = Environment.from_conda_specification("experiment_env", "environment.yml")
# Create a script config
script_config = ScriptRunConfig(source_directory=experiment_folder,
script='diabetes_experiment.py',
environment=env)
# submit the experiment
experiment = Experiment(workspace=ws, name='mslearn-diabetes')
run = experiment.submit(config=script_config)
RunDetails(run).show()
run.wait_for_completion()
# -
# 以前と同様に、ウィジェットまたは [Azure Machine Learning Studio](https://ml.azure.com) の実験へのリンクを使用して、実験によって生成された出力を表示したり、生成されたメトリックとファイルを取得するコードを記述したりできます。
# Get logged metrics
metrics = run.get_metrics()
for key in metrics.keys():
print(key, metrics.get(key))
print('\n')
for file in run.get_file_names():
print(file)
# 今回は実行によっていくつかのログ ファイルが生成されている点に留意してください。これはウィジェットで表示したり、**get_details_with_logs** メソッドを使用したりできますが、今回は出力にログ データが含まれます。
run.get_details_with_logs()
# ログの詳細は上記の出力で表示できますが、通常はログ ファイルをダウンロードしてテキスト エディタで表示する方が簡単です。
# +
import os
log_folder = 'downloaded-logs'
# Download all files
run.get_all_logs(destination=log_folder)
# Verify the files have been downloaded
for root, directories, filenames in os.walk(log_folder):
for filename in filenames:
print (os.path.join(root,filename))
# -
# ## 実験実行履歴を表示する
#
# 同じ実験を複数回実行したので、[Azure Machine Learning Studio](https://ml.azure.com) で履歴を表示し、記録された各実行を調べることができます。または、SDK を使用して、ワークスペースから名前で実験を取得し、実行を反復処理することもできます。
# +
from azureml.core import Experiment, Run
diabetes_experiment = ws.experiments['mslearn-diabetes']
for logged_run in diabetes_experiment.get_runs():
print('Run ID:', logged_run.id)
metrics = logged_run.get_metrics()
for key in metrics.keys():
print('-', key, metrics.get(key))
# -
# ## MLflow を使用する
#
# MLflow は、機械学習プロセスを管理するためのオープンソース プラットフォームです。これは、実験を調整し、メトリックを追跡するために、Databricks 環境で一般的に使用されます (ただし、排他的ではありません)。Azure Machine Learning の実験では、ネイティブ ログ機能の代わりに指標を追跡できます。
#
# この機能を利用するには、**azureml-mlflow** パッケージが必要なので、これらがインストールされていることを確認しましょう。
# !pip show azureml-mlflow
# ### インライン実験で MLflow を使用する
#
# MLflow を使用してインライン実験の指標を追跡するには、実験が実行されているワークスペースに MLflow *トラッキング URI* を設定する必要があります。これにより、**mlflow** トラッキング メソッドを使用して、実験の実行にデータを記録できます。
# +
from azureml.core import Experiment
import pandas as pd
import mlflow
# Set the MLflow tracking URI to the workspace
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
# Create an Azure ML experiment in your workspace
experiment = Experiment(workspace=ws, name='mslearn-diabetes-mlflow')
mlflow.set_experiment(experiment.name)
# start the MLflow experiment
with mlflow.start_run():
print("Starting experiment:", experiment.name)
# Load data
data = pd.read_csv('data/diabetes.csv')
# Count the rows and log the result
row_count = (len(data))
mlflow.log_metric('observations', row_count)
print("Run complete")
# -
# それでは実行中にログされた指標を見てみましょう。
# +
# Get the latest run of the experiment
run = list(experiment.get_runs())[0]
# Get logged metrics
print("\nMetrics:")
metrics = run.get_metrics()
for key in metrics.keys():
print(key, metrics.get(key))
# Get a link to the experiment in Azure ML studio
experiment_url = experiment.get_portal_url()
print('See details at', experiment_url)
# -
# 上記のコードを実行した後、表示されるリンクを使用して、Azure Machine Learning Studio で実験を表示できます。次に、実験の最新の実行を選択し、その [**指標**] タブを表示して、ログに記録された指標を確認します。
#
# ### 実験スクリプトで MLflow を使用する
#
# MLflow を使用して、実験スクリプトの指標を追跡することもできます。
#
# 次の 2 つのセルを実行して、MLflow を使用する実験用のフォルダーとスクリプトを作成します。
# +
import os, shutil
# Create a folder for the experiment files
folder_name = 'mlflow-experiment-files'
experiment_folder = './' + folder_name
os.makedirs(folder_name, exist_ok=True)
# Copy the data file into the experiment folder
shutil.copy('data/diabetes.csv', os.path.join(folder_name, "diabetes.csv"))
# +
# %%writefile $folder_name/mlflow_diabetes.py
from azureml.core import Run
import pandas as pd
import mlflow
# start the MLflow experiment
with mlflow.start_run():
# Load data
data = pd.read_csv('diabetes.csv')
# Count the rows and log the result
row_count = (len(data))
print('observations:', row_count)
mlflow.log_metric('observations', row_count)
# -
# Azure ML 実験スクリプトで MLflow トラッキングを使用する場合、実験の実行を開始すると、MLflow トラッキング URI が自動的に設定されます。ただし、スクリプトを実行する環境には、必要な **mlflow** パッケージが含まれている必要があります。
# +
from azureml.core import Experiment, ScriptRunConfig, Environment
from azureml.widgets import RunDetails
# Create a Python environment for the experiment (from a .yml file)
env = Environment.from_conda_specification("experiment_env", "environment.yml")
# Create a script config
script_mlflow = ScriptRunConfig(source_directory=experiment_folder,
script='mlflow_diabetes.py',
environment=env)
# submit the experiment
experiment = Experiment(workspace=ws, name='mslearn-diabetes-mlflow')
run = experiment.submit(config=script_mlflow)
RunDetails(run).show()
run.wait_for_completion()
# -
# いつものように、実験の実行が終了すると、ログに記録されたメトリックを取得できます。
# Get logged metrics
metrics = run.get_metrics()
for key in metrics.keys():
print(key, metrics.get(key))
# > **詳細情報**: 実験の実行の詳細については、Azure ML のドキュメントの[このトピック](https://docs.microsoft.com/azure/machine-learning/how-to-manage-runs)を参照してください。実行でのメトリックの記録方法の詳細については、[このトピック](https://docs.microsoft.com/azure/machine-learning/how-to-track-experiments)を参照してください。Azure ML の実験と MLflow の統合の詳細については、[このトピック](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-use-mlflow)を参照してください。
| 04 - Run Experiments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multiple Movements
#
# Let's see how our robot responds to moving multiple times without sensing!
#
# <img src='images/uncertain_motion.png' width=50% height=50% />
#
# First let's include our usual resource imports and display function.
# importing resources
import matplotlib.pyplot as plt
import numpy as np
# A helper function for visualizing a distribution.
def display_map(grid, bar_width=1):
if(len(grid) > 0):
x_labels = range(len(grid))
plt.bar(x_labels, height=grid, width=bar_width, color='b')
plt.xlabel('Grid Cell')
plt.ylabel('Probability')
plt.ylim(0, 1) # range of 0-1 for probability values
plt.title('Probability of the robot being at each cell in the grid')
plt.xticks(np.arange(min(x_labels), max(x_labels)+1, 1))
plt.show()
else:
print('Grid is empty')
# ### Write code that moves 1000 times and then prints the resulting probability distribution.
#
# You are given the initial variables and a complete `move` function (that incorporates uncertainty), below.
# +
# given initial variables
p=[0, 1, 0, 0, 0]
# the color of each grid cell in the 1D world
world=['green', 'red', 'red', 'green', 'green']
# Z, the sensor reading ('red' or 'green')
Z = 'red'
pHit = 0.6
pMiss = 0.2
pExact = 0.8
pOvershoot = 0.1
pUndershoot = 0.1
# Complete the move function
def move(p, U):
q=[]
# iterate through all values in p
for i in range(len(p)):
# use the modulo operator to find the new location for a p value
# this finds an index that is shifted by the correct amount
index = (i-U) % len(p)
nextIndex = (index+1) % len(p)
prevIndex = (index-1) % len(p)
s = pExact * p[index]
s = s + pOvershoot * p[nextIndex]
s = s + pUndershoot * p[prevIndex]
# append the correct, modified value of p to q
q.append(s)
return q
# Here is code for moving twice
# p = move(p, 1)
# p = move(p, 1)
# print(p)
# display_map(p)
# +
## Write code for moving 1000 times
for k in range(1000):
p = move(p,1)
print(p)
display_map(p)
# -
| 07_Object_Tracking_Localisation/4_2_Robot_Localization/8_1. Multiple Movements.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Synthesis in programmatic mode
# + [markdown] deletable=true editable=true
# ## Simple synthesis in a chromosphere
# + [markdown] deletable=true editable=true
# For simple calculations, like synthesizing spectral lines in simple models, Hazel v2.0 can be used in programmatic
# mode. For instance, let us generate a spectral window in the near-infrared and synthesize the He I 10830 A line
# with some parameters.
# + deletable=true editable=true
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as pl
import hazel
print(hazel.__version__)
label = ['I', 'Q', 'U', 'V']
# + [markdown] deletable=true editable=true
# Let's first do a simple experiment in which we synthesize the 10830 A line for a set of parameters. Let's first instantiating a `hazel` model. We set the verbosity to a high level as an example but you should lower it when doing calculations to avoid crowding your terminal:
# + deletable=true editable=true
mod = hazel.Model(working_mode='synthesis', verbose=3)
# + [markdown] deletable=true editable=true
# The first thing to do with the model is to add a spectral region for our synthesis.
# + deletable=true editable=true
mod.add_spectral({'Name': 'spec1', 'Wavelength': [10826, 10833, 150], 'topology': 'ch1',
'LOS': [0.0,0.0,90.0], 'Boundary condition': [1.0,0.0,0.0,0.0]})
# + [markdown] deletable=true editable=true
# Note that we need to define the angles defining the line-of-sight, the boundary condition, the wavelength range, the name and the topology. We specify in this case that the topology is just a single chromosphere that we label as `ch1`. We need now to define this chromosphere:
# + deletable=true editable=true
mod.add_chromosphere({'Name': 'ch1', 'Spectral region': 'spec1', 'Height': 3.0, 'Line': '10830',
'Wavelength': [10826, 10833]})
# + [markdown] deletable=true editable=true
# Now that we have defined all elements of the synthesis, we finish the setup by invoking the following, which will add all topologies to the spectrum and remove unused atmospheres (if any):
# + deletable=true editable=true
mod.setup()
# + [markdown] deletable=true editable=true
# It's time now to modify the parameters of the chromosphere and do some plots. You can have access to the wavelength axis and to the synthetic spectrum by accessing the `wavelength_axis`and `stokes` properties of `mod.spectrum[name]`, where `name`is the name given to the spectral region.
# + deletable=true editable=true
f, ax = pl.subplots(nrows=2, ncols=2, figsize=(10,10))
ax = ax.flatten()
for j in range(10):
for i in range(4):
mod.atmospheres['ch1'].set_parameters([50.0*j,0.0,100.0*j,0.2,0.0,8.0,1.0,0.0], 1.0)
mod.synthesize()
ax[i].plot(mod.spectrum['spec1'].wavelength_axis - 10830, mod.spectrum['spec1'].stokes[i,:])
for i in range(4):
ax[i].set_xlabel('Wavelength - 10830[$\AA$]')
ax[i].set_ylabel('{0}/Ic'.format(label[i]))
ax[i].set_xlim([-2,2])
pl.tight_layout()
# + [markdown] deletable=true editable=true
# As another example, let's generate a profile for an off-limb observation. To this end, we just simply select $\theta_\mathrm{obs}=90^\circ$ and set the boundary condition of intensity to zero.
# + deletable=true editable=true
mod = hazel.Model(working_mode='synthesis', verbose=True)
mod.add_spectral({'Name': 'spec1', 'Wavelength': [10826, 10833, 150], 'topology': 'ch1',
'LOS': [90.0,0.0,90.0], 'Boundary condition': [0.0,0.0,0.0,0.0]})
mod.add_chromosphere({'Name': 'ch1', 'Spectral region': 'spec1', 'Height': 3.0, 'Line': '10830',
'Wavelength': [10826, 10833]})
mod.setup()
f, ax = pl.subplots(nrows=2, ncols=2, figsize=(10,10))
ax = ax.flatten()
for j in range(10):
for i in range(4):
mod.atmospheres['ch1'].set_parameters([50.0*j,0.0,100.0*j,0.2,0.0,8.0,1.0,0.0], 1.0)
mod.synthesize()
ax[i].plot(mod.spectrum['spec1'].wavelength_axis - 10830, mod.spectrum['spec1'].stokes[i,:])
for i in range(4):
ax[i].set_xlabel('Wavelength - 10830[$\AA$]')
ax[i].set_ylabel('{0}/Ic'.format(label[i]))
ax[i].set_xlim([-2,2])
pl.tight_layout()
# + [markdown] deletable=true editable=true
# ## Synthesizing with several atmospheres
# + [markdown] deletable=true editable=true
# Now that we know how to compute a single atmosphere, let's complicate things a little and add a photosphere below the chromosphere. We define again the spectral region and add a photosphere and a chromosphere. For the photosphere, it's easier to pass a 1D model that will be automatically read and used. If not, then you would need to read the model yourself and change the parameters. The file contents follows:
# + deletable=true editable=true
# %cat photospheres/model_photosphere.1d
# + deletable=true editable=true
mod = hazel.Model(working_mode='synthesis', verbose=True)
mod.add_spectral({'Name': 'spec1', 'Wavelength': [10826, 10833, 150], 'topology': 'ph1->ch1',
'LOS': [0.0,0.0,90.0], 'Boundary condition': [1.0,0.0,0.0,0.0]})
mod.add_chromosphere({'Name': 'ch1', 'Spectral region': 'spec1', 'Height': 3.0, 'Line': '10830',
'Wavelength': [10826, 10833]})
mod.add_photosphere({'Name': 'ph1', 'Spectral region': 'spec1', 'Spectral lines': [300],
'Wavelength': [10826, 10833], 'Reference atmospheric model': 'model_photosphere.1d'})
mod.setup()
# + [markdown] deletable=true editable=true
# And then do the plots again by changing the velocity of the chromospheric component.
# + deletable=true editable=true
f, ax = pl.subplots(nrows=2, ncols=2, figsize=(10,10))
ax = ax.flatten()
for j in range(10):
for i in range(4):
mod.atmospheres['ch1'].set_parameters([50.0,0.0,100.0,0.5,-20.0+4*j,8.0,1.0,0.0], 1.0)
mod.synthesize()
ax[i].plot(mod.spectrum['spec1'].wavelength_axis, mod.spectrum['spec1'].stokes[i,:])
for i in range(4):
ax[i].set_xlabel('Wavelength [$\AA$]')
ax[i].set_ylabel('{0}/Ic'.format(label[i]))
ax[i].set_xlim([10826,10832])
# + [markdown] deletable=true editable=true
# ## Synthesizing with several chromospheres
# + [markdown] deletable=true editable=true
# Chromospheres (and atmospheres in general) in Hazel can be combined with filling factors (using `+`) or as stacked atmospheres (using `->`). We provide a few examples of that.
# + [markdown] deletable=true editable=true
# ### One atmosphere on top of the other
# + [markdown] deletable=true editable=true
# Let's stack two chromospheres together with a photosphere below.
# + deletable=true editable=true
mod = hazel.Model(working_mode='synthesis', verbose=True)
mod.add_spectral({'Name': 'spec1', 'Wavelength': [10826, 10833, 150], 'topology': 'ph1->ch1->ch2',
'LOS': [0.0,0.0,90.0], 'Boundary condition': [1.0,0.0,0.0,0.0]})
mod.add_chromosphere({'Name': 'ch1', 'Spectral region': 'spec1', 'Height': 3.0, 'Line': '10830',
'Wavelength': [10826, 10833]})
mod.add_chromosphere({'Name': 'ch2', 'Spectral region': 'spec1', 'Height': 3.0, 'Line': '10830',
'Wavelength': [10826, 10833]})
mod.add_photosphere({'Name': 'ph1', 'Spectral region': 'spec1', 'Spectral lines': [300],
'Wavelength': [10826, 10833], 'Reference atmospheric model': 'model_photosphere.1d'})
mod.setup()
# + [markdown] deletable=true editable=true
# Now we synthesize the emergent Stokes parameters by changing the velocity of the second component.
# + deletable=true editable=true
f, ax = pl.subplots(nrows=2, ncols=2, figsize=(10,10))
ax = ax.flatten()
for j in range(9):
# Bx, By, Bz, tau, v, delta, beta, a
mod.atmospheres['ch1'].set_parameters([50.0,0.0,100.0,0.3,0.0,8.0,1.0,0.0], 1.0)
mod.atmospheres['ch2'].set_parameters([50.0,0.0,100.0,0.3,2*j,8.0,1.0,0.0], 1.0)
mod.synthesize()
for i in range(4):
ax[i].plot(mod.spectrum['spec1'].wavelength_axis, mod.spectrum['spec1'].stokes[i,:], color=pl.cm.autumn(25*j))
for i in range(4):
ax[i].set_xlabel('Wavelength [$\AA$]')
ax[i].set_ylabel('{0}/Ic'.format(label[i]))
ax[i].set_xlim([10826,10832])
pl.tight_layout()
# + [markdown] deletable=true editable=true
# ### Atmospheres with filling factors
# + [markdown] deletable=true editable=true
# And now we combine the atmospheres with a filling factor.
# + deletable=true editable=true
mod = hazel.Model(working_mode='synthesis', verbose=True)
mod.add_spectral({'Name': 'spec1', 'Wavelength': [10826, 10833, 150], 'topology': 'ph1->ch1+ch2',
'LOS': [0.0,0.0,90.0], 'Boundary condition': [1.0,0.0,0.0,0.0]})
mod.add_chromosphere({'Name': 'ch1', 'Spectral region': 'spec1', 'Height': 3.0, 'Line': '10830',
'Wavelength': [10826, 10833]})
mod.add_chromosphere({'Name': 'ch2', 'Spectral region': 'spec1', 'Height': 3.0, 'Line': '10830',
'Wavelength': [10826, 10833]})
mod.add_photosphere({'Name': 'ph1', 'Spectral region': 'spec1', 'Spectral lines': [300],
'Wavelength': [10826, 10833], 'Reference atmospheric model': 'model_photosphere.1d'})
mod.setup()
# + [markdown] deletable=true editable=true
# Note that filling factors will be combined so that they add up to 1. This way, it is unnecessary to set them by hand to add to 1, it is automatically done by the code. We combine two atmospheres with different velocities and we gradually change the filling factor from 0 to 1.
# + deletable=true editable=true
f, ax = pl.subplots(nrows=2, ncols=2, figsize=(10,10))
ax = ax.flatten()
for j in range(11):
# Bx, By, Bz, tau, v, delta, beta, a
mod.atmospheres['ch1'].set_parameters([50.0,0.0,100.0,1.0,10.0,8.0,1.0,0.0], j/10.0)
mod.atmospheres['ch2'].set_parameters([50.0,0.0,100.0,1.0,-10.0,8.0,1.0,0.0], 1.0-j/10.0)
mod.synthesize()
for i in range(4):
ax[i].plot(mod.spectrum['spec1'].wavelength_axis, mod.spectrum['spec1'].stokes[i,:], color=pl.cm.autumn(25*j))
for i in range(4):
ax[i].set_xlabel('Wavelength [$\AA$]')
ax[i].set_ylabel('{0}/Ic'.format(label[i]))
ax[i].set_xlim([10826,10832])
pl.tight_layout()
# + deletable=true editable=true
| docs/notebooks/prog_synthesis.ipynb |