markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Convert unit of app size from GB into KB.
rating_df = app[["name","size","overall_rating", "current_rating", 'num_current_rating', "num_overall_rating"]].dropna() rating_cleaned = {'1 star':1, "1 and a half stars": 1.5, '2 stars': 2, '2 and a half stars':2.5, "3 stars":3, "3 and a half stars":3.5, "4 stars": 4, '4 and a half stars': 4.5, "5 stars": 5} rating_df.overall_rating = rating_df.overall_rating.replace(rating_cleaned) rating_df['weighted_rating'] = np.divide(rating_df['num_current_rating'],rating_df['num_overall_rating'])*rating_df['current_rating']+(1-np.divide(rating_df['num_current_rating'],rating_df['num_overall_rating']))*rating_df['overall_rating']
_____no_output_____
MIT
notebooks/Correlation between app size and app quality.ipynb
jpzhangvincent/MobileAppRecommendSys
Add variable weighted rating as app's quality into data set.
plt.scatter(rating_df['size'], rating_df['weighted_rating']) plt.xlabel('Size of app') plt.ylabel('Quality of app') plt.title('Relationship between app size and quality') plt.show() rating_df_2 = rating_df[rating_df['size'] <= 500] plt.scatter(rating_df_2['size'], rating_df_2['weighted_rating']) plt.xlabel('Size of app') plt.ylabel('Quality of app') plt.title('Relationship between app size(less than 500) and quality') plt.show()
_____no_output_____
MIT
notebooks/Correlation between app size and app quality.ipynb
jpzhangvincent/MobileAppRecommendSys
environment(on conda) ```bashconda create -y -n holo python=3.7conda activate holo https://xarray.pydata.org/en/stable/getting-started-guide/installing.htmlinstructionsconda install -y -c conda-forge xarray dask netCDF4 bottleneckconda install -y -c conda-forge hvplotconda install -y -c conda-forge seleniumconda install -y -c conda-forge firefox geckodriverconda install -y -c conda-forge jupyter``` make html & png read
fs = glob.glob('out0*.nc') fs.sort() dd = [] for f in fs: ds = xr.open_dataset(f) U = ds['u'].values V = ds['v'].values Vmag = np.sqrt( U**2 + V**2) + 0.00000001 angle = (np.pi/2.) - np.arctan2(U/Vmag, V/Vmag) ds['Vmag'] = (['t','x','y'], Vmag) ds['angle'] =(['t','x','y'], angle) ds = ds.drop(['u','v']) dd.append(ds) dss = xr.concat(dd, dim="t") with open('obst.dat', mode='r', encoding='utf-8') as response: l = next(response) l = next(response) ll = l.replace('\n','').split(',') i1,i2,j1,j2 = np.array(ll, dtype=int) x1 = dss['x'].values[i1] x2 = dss['x'].values[i2] y1 = dss['y'].values[j1] y2 = dss['y'].values[j2] fPoly = hv.Polygons(np.array([[x1,y1],[x2,y1],[x2,y2],[x1,y2]])).options(fill_color='green')
_____no_output_____
MIT
makefig.ipynb
computational-sediment-hyd/2DH_Python
make html graph
fVec = dss.hvplot.vectorfield(x='x', y='y', groupby='t', angle='angle', mag='Vmag',hover=False) fVor = dss['vortex'].hvplot(frame_height=220, frame_width=600, x='x', y='y', cmap='bwr', clim=(-10,10)) g = fVor * fVec * fPoly g
_____no_output_____
MIT
makefig.ipynb
computational-sediment-hyd/2DH_Python
Thinning out for github
dssp = dss.isel( t=range(0,int(dss.dims['t']/2),10) ) fVec = dssp.hvplot.vectorfield(x='x', y='y', groupby='t', angle='angle', mag='Vmag',hover=False) fVor = dssp['vortex'].hvplot(frame_height=220, frame_width=600, x='x', y='y', cmap='bwr', clim=(-10,10)) g = fVor * fVec * fPoly
_____no_output_____
MIT
makefig.ipynb
computational-sediment-hyd/2DH_Python
export html
d = hvplot.save(g,'out.html') del d
_____no_output_____
MIT
makefig.ipynb
computational-sediment-hyd/2DH_Python
export png
%%time for i, t in enumerate( dssp['t'].values ): gp = g[t].options(title=str(np.round(t,3)) + ' sec', toolbar=None) d = hvplot.save(gp,'png'+str(i).zfill(8) +'.png') del d
_____no_output_____
MIT
makefig.ipynb
computational-sediment-hyd/2DH_Python
make gif
from PIL import Image fs = glob.glob("*.png") imgs = [Image.open(f) for f in fs] # imgs = imgs[0:501:2] # appendした画像配列をGIFにする。durationで持続時間、loopでループ数を指定可能。 d= imgs[0].save('out.gif', save_all=True, append_images=imgs[1:], optimize=False, duration=0.5, loop=0) del d
_____no_output_____
MIT
makefig.ipynb
computational-sediment-hyd/2DH_Python
1D convolution
b = np.random.random(4) a = np.random.random(10) np.convolve(a, b) def convolve(a, b): if a.shape[0] < b.shape[0]: a, b = b, a return np.array([ # important to remember the [::-1] np.matmul(a[i:i+b.shape[0]], b[::-1]) # \equiv dot().sum() for i in range(a.shape[0] - b.shape[0] + 1) ]) plt.plot(convolve(a, b)) plt.plot(signal.convolve(a, b, mode="valid")) plt.show() # print(convolve(a, b), signal.convolve(a, b, mode="valid"))
_____no_output_____
MIT
misc/deep_learning_notes/Ch4_Recurrent_Networks/001_Optimization_Algorithms_and_Hyper-parameter_Search/Higher_Dimentional_Convolutions.ipynb
tmjnow/MoocX
2D convolution
a = np.random.random((3, 6)) b = np.random.random((2, 2)) # 2D convolution def convolve2d(a, b): #a_f = a.flatten().reshape((a.size, 1)) #b_f = b.flatten().reshape((1, b.size)) return np.array( [ [ (a[i:i+b.shape[0], j:j+b.shape[1]]* b[::-1,::-1]).sum() for j in range(a.shape[1] - b.shape[1] + 1) ] for i in range(a.shape[0] - b.shape[0] + 1) ]) print(convolve2d(a,b) - signal.convolve2d(a,b,mode='valid')) plt.figure(figsize=(12,5)) plt.subplot(131) plt.imshow(a, interpolation="none") plt.subplot(132) plt.imshow(convolve2d(a, b), interpolation="none") plt.subplot(133) plt.imshow(convolve2d(a, b)-signal.convolve2d(a, b, mode='valid'), interpolation="none") plt.show()
[[ 0.00000000e+00 1.11022302e-16 0.00000000e+00 0.00000000e+00 0.00000000e+00] [ 0.00000000e+00 0.00000000e+00 0.00000000e+00 2.22044605e-16 2.22044605e-16]]
MIT
misc/deep_learning_notes/Ch4_Recurrent_Networks/001_Optimization_Algorithms_and_Hyper-parameter_Search/Higher_Dimentional_Convolutions.ipynb
tmjnow/MoocX
results in the difference are from floating point imprecision. 3D convolution (for video applications)
a = np.random.random((3, 6, 4)) b = np.random.random((2, 2, 3)) # 2D convolution def convolve3d(a, b): #a_f = a.flatten().reshape((a.size, 1)) #b_f = b.flatten().reshape((1, b.size)) return np.array( [ [ [ (a[i:i+b.shape[0], j:j+b.shape[1], k:k+b.shape[2]]* b[::-1, ::-1, ::-1]).sum() for k in range(a.shape[2] - b.shape[2] + 1) ] for j in range(a.shape[1] - b.shape[1] + 1) ] for i in range(a.shape[0] - b.shape[0] + 1) ])
_____no_output_____
MIT
misc/deep_learning_notes/Ch4_Recurrent_Networks/001_Optimization_Algorithms_and_Hyper-parameter_Search/Higher_Dimentional_Convolutions.ipynb
tmjnow/MoocX
... ***CURRENTLY UNDER DEVELOPMENT*** ... Obtain synthetic waves and water level timeseries under a climate change scenario (future AWTs occurrence probability)inputs required: * Historical DWTs (for plotting) * Historical wave families (for plotting) * Synthetic DWTs climate change * Historical intradaily hydrograph parameters * TCs waves * Fitted multivariate extreme model for the waves associated to each DWT in this notebook: * Generate synthetic time series of wave conditions
#!/usr/bin/env python # -*- coding: utf-8 -*- # common import os import os.path as op # pip import numpy as np import xarray as xr import pandas as pd from datetime import datetime import matplotlib.pyplot as plt # DEV: override installed teslakit import sys sys.path.insert(0, op.join(os.path.abspath(''), '..', '..','..', '..')) # teslakit from teslakit.database import Database from teslakit.climate_emulator import Climate_Emulator from teslakit.waves import AWL, Aggregate_WavesFamilies from teslakit.plotting.outputs import Plot_FitSim_Histograms from teslakit.plotting.extremes import Plot_FitSim_AnnualMax, Plot_FitSim_GevFit, Plot_Fit_QQ from teslakit.plotting.waves import Plot_Waves_Histogram_FitSim
_____no_output_____
MIT
notebooks/ROI/03_ClimateChange/S5_SLR_ENSO/01_12_Climate_Emulator.ipynb
teslakit/teslak
Database and Site parameters
# -------------------------------------- # Teslakit database p_data = r'/Users/anacrueda/Documents/Proyectos/TESLA/data' # offshore db = Database(p_data) db.SetSite('ROI') # climate change - S5 db_S5 = Database(p_data) db_S5.SetSite('ROI_CC_S5') # climate emulator simulation modified path p_S5_CE_sims = op.join(db_S5.paths.site.EXTREMES.climate_emulator, 'Simulations') # -------------------------------------- # Load data for climate emulator simulation climate change: ESTELA DWT and TCs (MU, TAU) DWTs_sim = db_S5.Load_ESTELA_DWT_sim() # DWTs climate change TCs_params = db.Load_TCs_r2_sim_params() # TCs parameters (copula generated) TCs_RBFs = db.Load_TCs_sim_r2_rbf_output() # TCs numerical_IH-RBFs_interpolation output probs_TCs = db.Load_TCs_probs_synth() # TCs synthetic probabilities pchange_TCs = probs_TCs['category_change_cumsum'].values[:] l_mutau_wt = db.Load_MU_TAU_hydrograms() # MU - TAU intradaily hidrographs for each DWT MU_WT = np.array([x.MU.values[:] for x in l_mutau_wt]) # MU and TAU numpy arrays TAU_WT = np.array([x.TAU.values[:] for x in l_mutau_wt]) # solve first 10 DWTs simulations DWTs_sim = DWTs_sim.isel(n_sim=slice(0, 10)) #DWTs_sim = DWTs_sim.isel(time=slice(0,365*40+10), n_sim=slice(0,1)) print(DWTs_sim)
<xarray.Dataset> Dimensions: (n_sim: 10, time: 365244) Coordinates: * time (time) object 2000-01-01 00:00:00 ... 3000-01-01 00:00:00 Dimensions without coordinates: n_sim Data variables: evbmus_sims (time, n_sim) float32 ... Attributes: source: teslakit_v0.9.1
MIT
notebooks/ROI/03_ClimateChange/S5_SLR_ENSO/01_12_Climate_Emulator.ipynb
teslakit/teslak
Climate Emulator - Simulation
# -------------------------------------- # Climate Emulator extremes model fitting # Load Climate Emulator CE = Climate_Emulator(db.paths.site.EXTREMES.climate_emulator) CE.Load() # set a new path for S5 simulations CE.Set_Simulation_Folder(p_S5_CE_sims, copy_WAVES_noTCs = False) # climate change waves (no TCs) not simulated, DWTs have changed # optional: list variables to override distribution to empirical #CE.sim_icdf_empirical_override = ['sea_Hs_31', # 'swell_1_Hs_1','swell_1_Tp_1', # 'swell_1_Hs_2','swell_1_Tp_2',] # set simulated waves min-max filter CE.sim_waves_filter.update({ 'hs': (0, 8), 'tp': (2, 25), 'ws': (0, 0.06), }) # -------------------------------------- #  Climate Emulator simulation # each DWT series will generate a different set of waves for n in DWTs_sim.n_sim: print('- Sim: {0} -'.format(int(n)+1)) # Select DWTs simulation DWTs = DWTs_sim.sel(n_sim=n) # Simulate waves n_ce = 1 # (one CE sim. for each DWT sim.) WVS_sim = CE.Simulate_Waves(DWTs, n_ce, filters={'hs':True, 'tp':True, 'ws':True}) # Simulate TCs and update simulated waves TCs_sim, WVS_upd = CE.Simulate_TCs(DWTs, WVS_sim, TCs_params, TCs_RBFs, pchange_TCs, MU_WT, TAU_WT) # store simulation data CE.SaveSim(WVS_sim, TCs_sim, WVS_upd, int(n))
- Sim: 1 -
MIT
notebooks/ROI/03_ClimateChange/S5_SLR_ENSO/01_12_Climate_Emulator.ipynb
teslakit/teslak
Strings in Python What is a string? A "string" is a series of characters of arbitrary length.Strings are immutable - they cannot be changed once created. When you modify a string, you automatically make a copy and modify the copy.
s1 = 'Godzilla' print s1, s1.upper(), s1
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
String literals A "literal" is essentially a string constant, already spelled out for you. Python uses either on output, but that's just for formatting simplicity.
"Godzilla"
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
Single and double quotes Generally, a string literal can be in single ('), double ("), or triple (''') quotes. Single and double quotes are equivalent - use whichever you prefer (but be consistent). If you need to have a single or double quote in your literal, surround your literal with the other type, or use the backslash to escape the quote.
"Godzilla's a kaiju." 'Godzilla\'s a kaiju.' 'We call him... "Godzilla".'
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
Triple quotes (''') Triple quotes are a special form of quoting used for documenting your Python files (docstrings). We won't discuss that type here. Raw strings Raw strings don't use any escape character interpretation. Use them when you have a complicated string that you don't want to clutter with lots of backslashes. Python puts them in for you.
print('This is a\ncomplicated string with newline escapes in it.') print(r'This is a\ncomplicated string with newline escapes in it.')
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
Strings and numbers
x=int('122', 3) x+1
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
String objects String objects are just the string variables you create in Python.
kaiju = 'Godzilla' print(kaiju) kaiju
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
Note the print() call shows no quotes, while the simple variable name did. That is a Python output convention. Just entering the name will call the repr() method, which displays the value of the argument as Python would see it when it reads it in, not as the user wants it.
repr(kaiju) print(repr(kaiju))
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
String operators When you read text from a file, it's just that - text. No matter what the data represents, it's still text. To use it as a number, you have to explicitly convert it to a number.
one = 1 two = '2' print one, two, one + two one = 1 two = int('2') print one, two, one + two num1 = 1.1 num2 = float('2.2') print num1, num2, num1 + num2
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
You can also do this with hexadecimal and octal numbers, or any other base, for that matter.
print int('FF', 16) print int('0xff', 16) print int('777', 8) print int('0777', 8) print int('222', 7) print int('110111001', 2)
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
If the conversion cannot be done, an exception is thrown.
print int('0xGG', 16)
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
Concatenation
kaiju1 = 'Godzilla' kaiju2 = 'Mothra' kaiju1 + ' versus ' + kaiju2
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
Repetition
'Run away! ' * 3
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
String keywords in() NOTE: This _particular_ statement is false regardless of how the statement is evaluated! :^)
'Godzilla' in 'Godzilla vs Gamera'
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
String functions len()
len(kaiju)
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
String methods Remember - methods are functions attached to objects, accessed via the 'dot' notation. Basic formatting and manipulation capitalize()/lower()/upper()/swapcase()/title()
kaiju.capitalize() kaiju.lower() kaiju.upper() kaiju.swapcase() 'godzilla, king of the monsters'.title()
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
center()/ljust()/rjust()
kaiju.center(20, '*') kaiju.ljust(20, '*') kaiju.rjust(20, '*')
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
expandtabs()
tabbed_kaiju = '\tGodzilla' print('[' + tabbed_kaiju + ']') print('[' + tabbed_kaiju.expandtabs(16) + ']')
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
join()
' vs '.join(['Godzilla', 'Hedorah']) ','.join(['Godzilla', 'Mothra', 'King Ghidorah'])
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
strip()/lstrip()/rstrip()
' Godzilla '.strip() 'xxxGodzillayyy'.strip('xy') ' Godzilla '.lstrip() ' Godzilla '.rstrip()
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
partition()/rpartition()
battle = 'Godzilla x Gigan' battle.partition(' x ') battle = 'Godzilla and Jet Jaguar vs. Gigan and Megalon' battle.partition(' vs. ') battle = 'Godzilla vs Megalon vs Jet Jaguar' battle.partition('vs') battle = 'Godzilla vs Megalon vs Jet Jaguar' battle.rpartition('vs')
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
replace()
battle = 'Godzilla vs Mothra' battle.replace('Mothra', 'Anguiras') battle = 'Godzilla vs a monster and another monster' battle.replace('monster', 'kaiju', 2) battle = 'Godzilla vs a monster and another monster and yet another monster' battle.replace('monster', 'kaiju', 2)
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
split()/rsplit()
battle = 'Godzilla vs King Ghidorah vs Mothra' battle.split(' vs ') kaijus = 'Godzilla,Mothra,King Ghidorah' kaijus.split(',') kaijus = 'Godzilla Mothra King Ghidorah' kaijus.split() kaijus = 'Godzilla,Mothra,King Ghidorah,Megalon' kaijus.rsplit(',', 2)
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
splitlines()
kaijus_in_lines = 'Godzilla\nMothra\nKing Ghidorah\nEbirah' print(kaijus_in_lines) kaijus_in_lines.splitlines() kaijus_in_lines.splitlines(True)
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
zfill()
age_of_Godzilla = 60 age_string = str(age_of_Godzilla) print(age_string, age_string.zfill(5))
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
String information isXXX()
print('Godzilla'.isalnum()) print('*Godzilla*'.isalnum()) print('Godzilla123'.isalnum()) print('Godzilla'.isalpha()) print('Godzilla123'.isalpha()) print('Godzilla'.isdigit()) print('60'.isdigit()) print('SpaceGodzilla'.isspace()) print(' '.isspace()) print('Godzilla'.islower()) print('godzilla'.islower()) print('Godzilla'.isupper()) print('GODZILLA'.isupper()) print('Godzilla vs Mothra'.istitle()) print('Godzilla X Mothra'.istitle())
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
count()
monsters = 'Godzilla and Space Godzilla and MechaGodzilla' print 'There are ', monsters.count('Godzilla'), ' Godzillas.' print 'There are ', monsters.count('Godzilla', len('Godzilla')), ' pseudo-Godzillas.'
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
startswith()/endswith()
king_kaiju = 'Godzilla' print king_kaiju.startswith('God') print king_kaiju.endswith('lla') print king_kaiju.startswith('G') print king_kaiju.endswith('amera')
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
find()/index()/rfind()/rindex()
kaiju_string = 'Godzilla,Gamera,Gorgo,Space Godzilla' print 'The first Godz is at position', kaiju_string.find('Godz') print 'The second Godz is at position', kaiju_string.find('Godz', len('Godz')) kaiju_string.index('Minilla') kaiju_string.rindex('Godzilla')
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
Advanced features decode()/encode()/translate() Used to convert strings to/from Unicode and other systems. Rarely used in science code. String formatting Similar to formatting in C, FORTRAN, etc.. There is a _lot_ more to this than I am showing here.
kaiju = 'Godzilla' age = 60 print '%s is %d years old.' % (kaiju, age)
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
The _string_ module The _string_ module is the Python equivalent of "junk DNA" in living organisms. It's been around since the beginning, but many of its functions have been superseded by evolution. But some ancient code still relies on it, so they leave the old parts in....For modern code, the _string_ module does have some useful constants and functions.
import string print string.ascii_letters print string.ascii_lowercase print string.ascii_uppercase print string.digits print string.hexdigits print string.octdigits print string.letters print string.lowercase print string.uppercase print string.printable print string.punctuation print string.whitespace
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
The _string_ module also provides the _Formatter_ class, which can be useful for sophisticated text formatting. Regular Expressions What is a regular expression? Regular expressions ('regexps') are essentially a mini-language for describing string operations. Everything shown above with string methods and operators can be done with regular expressions. Most of the time, the regular expression verrsion is more concise. But not always more readable....To use regular expressions, you have to import the 're' module.
import re
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
A very short, whirlwind tour of regular expressions Scanning
kaiju_truth = 'Godzilla is the King of the Monsters. Ebirah is also a monster, but looks like a giant lobster.' re.findall('Godz', kaiju_truth) print re.findall('(^.+) is the King', kaiju_truth)
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
For simple searches like this, using in() is typically easier.Regexps are by default case-sensitive.
print re.findall('\. (.+) is also', kaiju_truth) print re.findall('(.+) is also a (.+)', kaiju_truth)[0] print re.findall('\. (.+) is also a (.+),', kaiju_truth)[0]
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
Changing
some_kaiju = 'Godzilla, Space Godzilla, Mechagodzilla' print re.sub('Godzilla', 'Gamera', some_kaiju) print re.sub('(?i)Godzilla', 'Gamera', some_kaiju)
_____no_output_____
MIT
Day_00/02_Strings_and_FileIO/00 Strings in Python.ipynb
saudijack/unfpyboot
Intel® Distribution for GDB* In this notebook, we will cover using the Intel® Distribution for GDB* to debug oneAPI applications on the GPU. Sections- [Intel Distribution for GDB Overview](Intel-Distribution-for-GDB-Overview)- [How does the Intel Distribution for GDB debug GPUs?](How-does-Intel-Distribution-for-GDB-debug-GPUs?)- [GDB Commands](GDB-Commands)- [Debug an Application](Debug-an-Application)- [Multi-Device Debugging](Multi-Device-Debugging)Note: Unlike other modules in the oneAPI Essentials series, this notebook is designed for the DevCloud and cannot be run in a local environment. This is because when GDB pauses the GPU execution, display rendering is also interrupted. Learning ObjectivesThe goal of this notebook is to show how the Intel® Distribution for GDB* can help you debug GPU kernels. At the end of module, you will be able to: Run the Intel Distribution for GDB. Understand inferiors, threads, and SIMD lanes as shown in GDB. Use different methods to examine local variables for different threads and lanes. Intel Distribution for GDB OverviewIntel® Distribution for GDB* (*gdb-oneapi* executable) is part of the Intel® oneAPI Base Toolkit. It can be used to debug oneAPI applications written in several different languages targeting various different accelerator devices. Major Features* Multi-target: The debugger can orchestrate multiple targets for different architectures. This feature allows you to debug the "host" portion and the "kernel" of a DPC++ program in the same GDB* session.* Auto-attach: The debugger automatically creates an inferior that attaches itself to the Intel® Graphics Technology target to be able to receive events and control the GPU for debugging.* Thread and SIMD lanes: The debugger displays SIMD lane information for the GPU threads on the command line interface. You can switch among active threads and lanes.* Support for debugging a kernel offloaded to a CPU, GPU, or FPGA-emulation device. How does the Intel Distribution for GDB debug GPUs? Compilation and Execution for DebugWhen debugging oneAPI applications with gdb-oneapi, debug information for the GPU needs to be generated and embedded in the application. The compilation and execution process looks like the following.1. Source code is compiled. Host code is compiled normally while kernel code is compiled with debug info into SPIR-V intermediate representation format embedded in the host binary. * Use -g (generate debug info) and -O0 (disable optimization) compiler options to debug source. * May use -O2 to debug optimized code at assembly level. * Use same optimization level when linking, if compiling and linking separately. * Ahead-of-time (AOT) compilation also works with GPU debug and can be utilize to avoid JIT compilation everytime application is run.2. Launch appliction with `gdb-oneapi` * `gdb-oneapi `3. Application runtime compiles SPIR-V and debug info into ELF and DWARF formats.4. GPU kernel code is executed and debugged. Inferiors for GPUsGDB creates objects called *inferior*s to represent the state of each program execution. An inferior usually corresponds to a debugee process. For oneAPI applications, GDB will create one inferior for the native host target and additional inferiors for each GPU or GPU tile. When a GPU application is debugged, the debugger, by default, automatically launches a `gdbserver-gt` process to listen to GPU debug events. The `gdbserver-gt` target is then added to the debugger as an inferior.To see information about the inferiors while debugging. Use the `info inferiors` GDB command. Debugging Threaded GPU SIMD CodeGPU kernel code is written for a single work-item. When executing, the code is implicitly threaded and widened to vectors of work-items. In the Intel Distribution for GDB, variable locations are expressed as functions of the SIMD lane. The lane field is added to the thread representation in the form of `.:`.Users can use the `info threads` command to see information about the various active threads. The `thread` command can be used to switching among active threads and SIMD lanes. The `thread apply : ` command can be used to apply the specified command to the specified lanes.SIMD Lanes Support: * Only enabled SIMD lanes are displayed * SIMD width is not fixed * User can switch between enabled SIMD lanes * After a stop, GDB switches to an enabled SIMD lane GDB CommandsThe following table lists some common GDB commands. If a command has special functionality for GPU debugging, description will be shown in orange. You may also consult the [Intel Distribution for GDB Reference Sheet](https://software.intel.com/content/www/us/en/develop/download/gdb-reference-sheet.html).| Command | Description || ---: | :--- || help \ | Print help information. || run [arg1, ... argN] | Start the program, optionally with arguments. || break \:\ | Define a breakpoint at a specified line. || info break | Show defined breakpoints. || delete \ | Remove Nth breakpoint. || step / next | Single-step a source line, stepping into / over function calls. || info args/locals | Show the arguments/local variables of the current function. || print \ | Print value of expression. || x/\ \ | Examine the memory at \. || up, down | Go one level up/down the function call stack || disassemble | Disassemble the current function. If inside a GPU kernel, GPU instructions will be shown. || backtrace | Shown the function call stack. || info inferiors | Display information about the inferiors. GPU debugging will display additional inferior(s) (gdbserver-gt). || info threads \ | Display information about threads, including their active SIMD lanes. || thread \:\ | Switch context to the SIMD lane of the specified thread. || thread apply \:\ \ | Apply \ to specified lane of the thread. || set scheduler-locking on/step/off | Lock the thread scheduler. Keep other threads stopped while current thread is stepping (step) or resumed (on) to avoid interference. Default (off). || set nonstop on/off | Enable/disable nonstop mode. Set before program starts. (off) : When a thread stops, all other threads stop. Default. (on) : When a thread stops, other threads keep running. || print/t $emask | Inspect the execution mask to show active SIMD lanes. | Debug an ApplicationThe kernel we're going to debug is a simple array transform function where the kernel adds 100 to even elements of the array and sets the odd elements to be -1. Below is the kernel code, the entire source code is [here](src/array-transform.cpp).``` cpp54 h.parallel_for(data_range, [=](id index) {55 size_t id0 = GetDim(index, 0);56 int element = in[index]; // breakpoint-here57 int result = element + 50;58 if (id0 % 2 == 0) {59 result = result + 50; // then-branch60 } else {61 result = -1; // else-branch62 }63 out[index] = result;64 });``` Compile the CodeExecute the following cell to compile the code. Notice the compiler options used to disable optimization and enable debug information.
! dpcpp -O0 -g src/array-transform.cpp -o bin/array-transform
_____no_output_____
MIT
DirectProgramming/DPC++/Jupyter/oneapi-essentials-training/11_Intel_Distribution_for_GDB/gdb_oneapi.ipynb
krisrak/oneAPI-samples
Create a debug scriptTo debug on the GPU, we're going to write the GDB debug commands to a file and then submit the execution of the debugger to a node with GPUs.In our first script, we'll get take a look at how inferiors, threads, and SIMD lanes are represented. Our debug script will perform the following tasks. 1. Set a temporary breakpoint in the DPCPP kernel at line 59.2. Run the application in the debugger.3. Display information about the active inferiors once the breakpoint is encountered.4. Display information about the active threads and SIMD lanes.5. Display the execution mask showing which SIMD lanes are active.6. Remove breakpoint.7. Continue running.Execute the following cell to write the debug commands to file.
%%writefile lab/array-transform.gdb #Set Breakpoint in the Kernel echo ================= (1) tbreak 59 ===============\n tbreak 59 # Run the application on the GPU echo ================= (2) run gpu ===============\n run gpu echo ================= (3) info inferiors ============\n info inferiors echo ================= (4) info threads ============\n info threads # Show execution mask that show active SIMD lanes. echo ================= (5) print/t $emask ============\n print/t $emask echo ================= (6) c ==========================\n c
_____no_output_____
MIT
DirectProgramming/DPC++/Jupyter/oneapi-essentials-training/11_Intel_Distribution_for_GDB/gdb_oneapi.ipynb
krisrak/oneAPI-samples
Start the DebuggerThe [run_debug.sh](run_debug.sh) script runs the *gdb-oneapi* executable with our debug script on the compiled application.Execute the following cell to submit the debug job to a node with a GPU.
! chmod 755 q; chmod 755 run_debug.sh; if [ -x "$(command -v qsub)" ]; then ./q run_debug.sh; else ./run_debug.sh; fi
_____no_output_____
MIT
DirectProgramming/DPC++/Jupyter/oneapi-essentials-training/11_Intel_Distribution_for_GDB/gdb_oneapi.ipynb
krisrak/oneAPI-samples
Explanation of Output1. You should see breakpoint 1 created at line 59.2. Application is run with the *gpu* argument to execute on the GPU device. Program should stop at the kernel breakpoint.3. With context now automatically switched to the device. The *info inferiors* command will show the active GDB inferior(s). Here, you should see two, one corresponds to the host portion, another, the active one, for gdbserver-gt which is debugging the GPU kernel. 4. The *info threads* command allows you to examine the active threads and SIMD lanes. There should be 8 threads active. Notice that only even SIMD lanes are active, this is because only the even work-items encounter the breakpoint at line 59.5. Printing the $emask execution mask also shows the even lanes being active.6. Continue running the program. Debug the Application AgainNow, we will debug the application again. This time, we'll switch threads, use the scheduler-locking feature, and print local variables.Run the following cell to write new GDB commands to array-transform.gdb.
%%writefile lab/array-transform.gdb #Set Breakpoint in the Kernel echo ================= (1) break 59 ===============\n break 59 echo ================= (2) break 61 ===============\n break 61 # Run the application on the GPU echo ================= (3) run gpu ===============\n run gpu # Keep other threads stopped while current thread is stepped echo ================= (4) set scheduler-locking step ===============\n set scheduler-locking step echo ================= (5) next ===============\n next echo ================= (6) info threads 2.* ===============\n info threads 2.* echo ================= (7) Print element ============\n print element # Switch thread echo ================= (8) thread 2.1:5 =======================\n thread 2.1:4 echo ================= (9) Print element ============\n print element echo ================= (10) thread apply 2.1:* print element =======================\n thread apply 2.1:* print element # Inspect vector of a local variable, 8 elements, integer word echo ================= (11) x/8dw &result =======================\n x /8dw &result echo ================= (12) d 1 =======================\n d 1 echo ================= (13) d 2 =======================\n d 2 echo ================= (14) c ==========================\n c
_____no_output_____
MIT
DirectProgramming/DPC++/Jupyter/oneapi-essentials-training/11_Intel_Distribution_for_GDB/gdb_oneapi.ipynb
krisrak/oneAPI-samples
Start Debugger Again To Examine Variables, MemoriesRun the following cell to run the debugger for the second time.
! chmod 755 q; chmod 755 run_debug.sh; if [ -x "$(command -v qsub)" ]; then ./q run_debug.sh; else ./run_debug.sh; fi
_____no_output_____
MIT
DirectProgramming/DPC++/Jupyter/oneapi-essentials-training/11_Intel_Distribution_for_GDB/gdb_oneapi.ipynb
krisrak/oneAPI-samples
Cyclical Systems: An Example of the Crank-Nicolson Method CH EN 2450 - Numerical Methods**Prof. Tony Saad (www.tsaad.net) Department of Chemical Engineering University of Utah**
import numpy as np from numpy import * # %matplotlib notebook # %matplotlib nbagg %matplotlib inline %config InlineBackend.figure_format = 'svg' # %matplotlib qt import matplotlib.pyplot as plt from scipy.optimize import fsolve from scipy.integrate import odeint def forward_euler(rhs, f0, tend, dt): ''' Computes the forward_euler method ''' nsteps = int(tend/dt) f = np.zeros(nsteps) f[0] = f0 time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): f[n+1] = f[n] + dt * rhs(f[n], time[n]) return time, f def forward_euler_system(rhsvec, f0vec, tend, dt): ''' Solves a system of ODEs using the Forward Euler method ''' nsteps = int(tend/dt) neqs = len(f0vec) f = np.zeros( (neqs, nsteps) ) f[:,0] = f0vec time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): t = time[n] f[:,n+1] = f[:,n] + dt * rhsvec(f[:,n], t) return time, f def be_residual(fnp1, rhs, fn, dt, tnp1): ''' Nonlinear residual function for the backward Euler implicit time integrator ''' return fnp1 - fn - dt * rhs(fnp1, tnp1) def backward_euler(rhs, f0, tend, dt): ''' Computes the backward euler method :param rhs: an rhs function ''' nsteps = int(tend/dt) f = np.zeros(nsteps) f[0] = f0 time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): fn = f[n] tnp1 = time[n+1] fnew = fsolve(be_residual, fn, (rhs, fn, dt, tnp1)) f[n+1] = fnew return time, f def cn_residual(fnp1, rhs, fn, dt, tnp1, tn): ''' Nonlinear residual function for the Crank-Nicolson implicit time integrator ''' return fnp1 - fn - 0.5 * dt * ( rhs(fnp1, tnp1) + rhs(fn, tn) ) def crank_nicolson(rhs,f0,tend,dt): nsteps = int(tend/dt) f = np.zeros(nsteps) f[0] = f0 time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): fn = f[n] tnp1 = time[n+1] tn = time[n] fnew = fsolve(cn_residual, fn, (rhs, fn, dt, tnp1, tn)) f[n+1] = fnew return time, f
_____no_output_____
MIT
topics/initial-value-problems/Cyclical Example.ipynb
jomorodi/NumericalMethods
Sharp TransientSolve the ODE:\begin{equation}\frac{\text{d}y}{\text{d}t} = -1000 y + 3000 - 2000 e^{-t};\quad y(0) = 0\end{equation}The analytical solution is \begin{equation}y(t) = 3 - 0.998 e^{-1000t} - 2.002 e^{-t}\end{equation} We first plot the analytical solution
y = lambda t : 3 - 0.998*exp(-1000*t) - 2.002*exp(-t) t = np.linspace(0,1,500) plt.plot(t,y(t)) plt.grid()
_____no_output_____
MIT
topics/initial-value-problems/Cyclical Example.ipynb
jomorodi/NumericalMethods
Now let's solve this numerically. We first define the RHS for this function
def rhs_sharp_transient(f,t): return 3000 - 1000 * f - 2000* np.exp(-t)
_____no_output_____
MIT
topics/initial-value-problems/Cyclical Example.ipynb
jomorodi/NumericalMethods
Let's solve this using forward euler and backward euler
y0 = 0 tend = 0.03 dt = 0.001 t,yfe = forward_euler(rhs_sharp_transient,y0,tend,dt) t,ybe = backward_euler(rhs_sharp_transient,y0,tend,dt) t,ycn = crank_nicolson(rhs_sharp_transient,y0,tend,dt) plt.plot(t,y(t),label='Exact') # plt.plot(t,yfe,'r.-',markevery=1,markersize=10,label='Forward Euler') plt.plot(t,ybe,'k*-',markevery=2,markersize=10,label='Backward Euler') plt.plot(t,ycn,'o-',markevery=2,markersize=2,label='Crank Nicholson') plt.grid() plt.legend()
_____no_output_____
MIT
topics/initial-value-problems/Cyclical Example.ipynb
jomorodi/NumericalMethods
Oscillatory SystemsSolve the ODE:Solve the ODE:\begin{equation}\frac{\text{d}y}{\text{d}t} = r \omega \sin(\omega t)\end{equation}The analytical solution is \begin{equation}y(t) = r - r \cos(\omega t)\end{equation} First plot the analytical solution
r = 0.5 ω = 0.02 y = lambda t : r - r * cos(ω*t) t = np.linspace(0,100*pi) plt.clf() plt.plot(t,y(t)) plt.grid()
_____no_output_____
MIT
topics/initial-value-problems/Cyclical Example.ipynb
jomorodi/NumericalMethods
Let's solve this numerically
def rhs_oscillatory(f,t): r = 0.5 ω = 0.02 return r * ω * sin(ω*t) y0 = 0 tend = 100*pi dt = 10 t,yfe = forward_euler(rhs_oscillatory,y0,tend,dt) t,ybe = backward_euler(rhs_oscillatory,y0,tend,dt) t,ycn = crank_nicolson(rhs_oscillatory,y0,tend,dt) plt.plot(t,y(t),label='Exact') plt.plot(t,yfe,'r.-',markevery=1,markersize=10,label='Forward Euler') plt.plot(t,ybe,'k*-',markevery=2,markersize=10,label='Backward Euler') plt.plot(t,ycn,'o-',markevery=2,markersize=2,label='Crank Nicholson') plt.grid() plt.legend() plt.savefig('cyclical-system-example.pdf') import urllib import requests from IPython.core.display import HTML def css_styling(): styles = requests.get("https://raw.githubusercontent.com/saadtony/NumericalMethods/master/styles/custom.css") return HTML(styles.text) css_styling()
_____no_output_____
MIT
topics/initial-value-problems/Cyclical Example.ipynb
jomorodi/NumericalMethods
Unsupervised neural computation - PracticalDependencies:- Python (>= 2.6 or >= 3.3)- NumPy (>= 1.6.1)- SciPy (>= 0.12)- SciKit Learn (>=0.18.1)Just as there are different ways in which we ourselves learn from our own surrounding environments, so it is with neural networks. In a broad sense, we may categorize the learning processes through which neural networks function as follows: learning with a teacher and learning without a teacher. These different forms of learning as performed on neural networks parallel those of human learning. Learning with a teacher is also referred to as supervised learning. In conceptual terms, we may think of the teacher as having knowledge of the environment, with that knowledge being represented by a set of input - output examples. Unsupervised learning does not require target vectors for the outputs. Without input-output training pairs as external teachers, unsupervised learning is self-organized to produce consistent output vectors by modifying weights. That is to say, there are no labelled examples of the function to be learned by the network. For a specific task-independent measure, once the network has become tuned to the statistical regularities of the input data, the network develops the ability to discover internal structure for encoding features of the input or compress the input data, and thereby to create new classes automatically. Radial Basis Functions and Radial Basis Function Networks - Semi-supervised Learning combining supervised and unsupervised learning In machine learning, the radial basis function kernel, or RBF kernel, is a popular kernel function (typically Gaussian) used in various kernelized learning algorithms.
# Class implementing the basic RBF parametrization # based on code from https://github.com/jeffheaton/aifh import numpy as np class RbfFunction(object): def __init__(self, dimensions, params, index): self.dimensions = dimensions self.params = params self.index = index @property def width(self): return self.params[self.index] @width.setter def width(self, value): self.params[self.index] = value def set_center(self, index, value): self.params[self.index + index + 1] = value def get_center(self, index): return self.params[self.index + index + 1]
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
RBFs can take various shapes: quadratic, multi-quadratic, inverse multi-quadratic, mexican hat. Yet the most used is the Gaussian.
# Class implementing a Gaussian RBF class RbfGaussian(RbfFunction): def evaluate(self, x): value = 0 width = self.width for i in range(self.dimensions): center = self.get_center(i) value += ((x[i] - center) ** 2) / (2.0 * width * width) return np.exp(-value)
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
A RBF network is an advanced machine learning algorithm that uses a series of RBF functions to perform regression. It can also perform classification by means of one-of-n encoding. The long term memory of a RBF network is made up of the widths and centers of the RBF functions, as well as input and output weighting.
# Class implementing a Gaussian RBF Network class RbfNetwork(object): def __init__(self, input_count, rbf_count, output_count): """ Create an RBF network with the specified shape. @param input_count: The input count. @param rbf_count: The RBF function count. @param output_count: The output count. """ self.input_count = input_count self.output_count = output_count # calculate input and output weight counts # add 1 to output to account for an extra bias node input_weight_count = input_count * rbf_count output_weight_count = (rbf_count + 1) * output_count rbf_params = (input_count + 1) * rbf_count self.long_term_memory = np.zeros((input_weight_count + output_weight_count + rbf_params), dtype=float) self.index_input_weights = 0 self.index_output_weights = input_weight_count + rbf_params self.rbf = {} # default the Rbf's to gaussian for i in range(0, rbf_count): rbf_index = input_weight_count + ((input_count + 1) * i) self.rbf[i] = RbfGaussian(input_count, self.long_term_memory, rbf_index) def compute_regression(self, input): """ Compute the output for the network. @param input: The input pattern. @return: The output pattern. """ # first, compute the output values of each of the RBFs # Add in one additional RBF output for bias (always set to one). rbf_output = [0] * (len(self.rbf) + 1) # bias rbf_output[len(rbf_output) - 1] = 1.0 for rbfIndex in range(0, len(self.rbf)): # weight the input weighted_input = [0] * len(input) for inputIndex in range(0, len(input)): memory_index = self.index_input_weights + (rbfIndex * self.input_count) + inputIndex weighted_input[inputIndex] = input[inputIndex] * self.long_term_memory[memory_index] # calculate the rbf rbf_output[rbfIndex] = self.rbf[rbfIndex].evaluate(weighted_input) # Second, calculate the output, which is the result of the weighted result of the RBF's. result = [0] * self.output_count for outputIndex in range(0, len(result)): sum_value = 0 for rbfIndex in range(0, len(rbf_output)): # add 1 to rbf length for bias memory_index = self.index_output_weights + (outputIndex * (len(self.rbf) + 1)) + rbfIndex sum_value += rbf_output[rbfIndex] * self.long_term_memory[memory_index] result[outputIndex] = sum_value # finally, return the result. return result def reset(self): """ Reset the network to a random state. """ for i in range(0, len(self.long_term_memory)): self.long_term_memory[i] = np.random.uniform(0, 1) def compute_classification(self, input): """ Compute the output and return the index of the output with the largest value. This is the class that the network recognized. @param input: The input pattern. @return: """ output = self.compute_regression(input) return output.index(max(output)) def copy_memory(self, source): """ Copy the specified vector into the long term memory of the network. @param source: The source vector. """ for i in range(0, len(source)): self.long_term_memory[i] = source[i]
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
The Iris dataset is a traditional benchmark in classification problems in ML. The data set consists of 50 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimetres. Based on the combination of these four features, Fisher developed a linear discriminant model to distinguish the species from each other.The Iris flower data set or Fisher's Iris data set is a multivariate data set introduced by Ronald Fisher in his 1936 paper "The use of multiple measurements in taxonomic problems" as an example of linear discriminant analysis. It is sometimes called Anderson's Iris data set because Edgar Anderson collected the data to quantify the morphologic variation of Iris flowers of three related species. Based on Fisher's linear discriminant model, this data set became a typical test case for many statistical classification techniques in machine learning such as support vector machines.In the following we will use simulated annealing to fit an RBF network to the Iris data set, to classifiy the iris species correctly.Simulated annealing is a probabilistic technique for approximating the global optimum of a given function. Specifically, it is a metaheuristic to approximate global optimization in a large search space.
# Find the dataset import os import sys from normalize import Normalize from error import ErrorCalculation from train import TrainAnneal import numpy as np irisFile = os.path.abspath("./data/iris.csv") # Read the Iris data set print('Reading CSV file: ' + irisFile) norm = Normalize() iris_work = norm.load_csv(irisFile) # Extract the original iris species so we can display during the final validation ideal_species = [row[4] for row in iris_work] # Setup the first four fields to "range normalize" between -1 and 1. for i in range(0, 4): norm.make_col_numeric(iris_work, i) norm.norm_col_range(iris_work, i, 0, 1) # Discover all of the classes for column #4, the iris species. classes = norm.build_class_map(iris_work, 4) inv_classes = {v: k for k, v in classes.items()} # Normalize iris species using one-of-n. # We could have used equilateral as well. For an example of equilateral, see the example_nm_iris example. norm.norm_col_one_of_n(iris_work, 4, classes, 0, 1) # Prepare training data. Separate into input and ideal. training = np.array(iris_work) training_input = training[:, 0:4] training_ideal = training[:, 4:7] # Define the score of the training process of the network def score_funct(x): """ The score function for Iris anneal. @param x: @return: """ global best_score global input_data global output_data # Update the network's long term memory to the vector we need to score. network.copy_memory(x) # Loop over the training set and calculate the output for each. actual_output = [] for input_data in training_input: output_data = network.compute_regression(input_data) actual_output.append(output_data) # Calculate the error with MSE. result = ErrorCalculation.mse(np.array(actual_output), training_ideal) return result # Create an RBF network. There are four inputs and two outputs. # There are also five RBF functions used internally. # You can experiment with different numbers of internal RBF functions. # However, the input and output must match the data set. inputs = 4 rbfs = 4 outputs = 3 network = RbfNetwork(inputs, rbfs, outputs) network.reset() # Create a copy of the long-term memory. This becomes the initial state. x0 = list(network.long_term_memory) # Perform the annealing # Train a Machine Learning Algorithm using Simulated Annealing. Simulated Annealing is a Monte Carlo algorithm # that is based on annealing in metallurgy, a technique involving heating and controlled cooling of a # material to increase the size of its crystals and reduce their defects, both are attributes of the material # that depend on its thermodynamic free energy. train = TrainAnneal() train.display_iteration = True train.train(x0, score_funct) # Display the final validation. We show all of the iris data as well as the predicted species. for i in range(0, len(training_input)): input_data = training_input[i] # Compute the output from the RBF network output_data = network.compute_regression(input_data) ideal_data = training_ideal[i] # Decode the three output neurons into a class number. class_id = norm.denorm_one_of_n(output_data) print(str(input_data) + " -> " + inv_classes[class_id] + ", Ideal: " + ideal_species[i])
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
It is often used when the search space is discrete (e.g., all tours that visit a given set of cities). For problems where finding an approximate global optimum is more important than finding a precise local optimum in a fixed amount of time, simulated annealing may be preferable to alternatives such as gradient descent. Assignments Given the RBFN API please follow the next steps to train a RBF to clssify the Iris dataset.
# Perform the simmulated annealing. # Display the final validation. We show all of the iris data as well as the predicted species. # Compute the output from the RBF network # Decode the three output neurons into a class number and print it
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Vector Quantization Vector quantization (VQ) is a form of competitive learning. Such an algorithm is able to discover structure in the input data. Generally speaking, vector quantization is a form of lossy data compression—lossy in the sense that some information contained in the input data is lost as a result of the compression.![title](img/vq_alg.png)An input data point belongs to a certain class if its position (in the 2D space) is closest to the class prototype, fulfilling the Voronoi partitioning (i.e. partitioning of a plane into regions based on distance to points in a specific subset of the plane.![title](img/vq.png)In a typical scenario, such behavior can be implemented with a neural network that consists of two layers—an input layer and a competitive layer with lateral inhibition. The input layer receives the available data. The competitive layer consists of neurons that compete with each other.![title](img/vq_net.png)The classic image processing example, Lena, an 8-bit grayscale bit-depth, 512 x 512 sized image, is used here to illustrate how `k`-means is used for vector quantization.
import numpy as np import scipy as sp import matplotlib.pyplot as plt from sklearn import cluster from sklearn.utils.testing import SkipTest from sklearn.utils.fixes import sp_version try: face = sp.face(gray=True) except AttributeError: # Newer versions of scipy have face in misc from scipy import misc face = misc.face(gray=True) n_clusters = 5 np.random.seed(0) X = face.reshape((-1, 1)) # We need an (n_sample, n_feature) array k_means = cluster.KMeans(n_clusters=n_clusters, n_init=4) k_means.fit(X) values = k_means.cluster_centers_.squeeze() labels = k_means.labels_ # create an array from labels and values face_compressed = np.choose(labels, values) face_compressed.shape = face.shape vmin = face.min() vmax = face.max()
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Plot the results of the clutering and plot the original, quatized, and histogram.
# original face plt.figure(1, figsize=(3, 2.2)) plt.imshow(face, cmap=plt.cm.gray, vmin=vmin, vmax=256) # compressed face plt.figure(2, figsize=(3, 2.2)) plt.imshow(face_compressed, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # equal bins face regular_values = np.linspace(0, 256, n_clusters + 1) regular_labels = np.searchsorted(regular_values, face) - 1 regular_values = .5 * (regular_values[1:] + regular_values[:-1]) # mean regular_face = np.choose(regular_labels.ravel(), regular_values, mode="clip") regular_face.shape = face.shape plt.figure(3, figsize=(3, 2.2)) plt.imshow(regular_face, cmap=plt.cm.gray, vmin=vmin, vmax=vmax) # histogram plt.figure(4, figsize=(3, 2.2)) plt.clf() plt.axes([.01, .01, .98, .98]) plt.hist(X, bins=256, color='.5', edgecolor='.5') plt.yticks(()) plt.xticks(regular_values) values = np.sort(values) for center_1, center_2 in zip(values[:-1], values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b') for center_1, center_2 in zip(regular_values[:-1], regular_values[1:]): plt.axvline(.5 * (center_1 + center_2), color='b', linestyle='--') plt.show()
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Assignments In this problem you should group 2d input points (x,y) into clusters and determine the center of each cluster. The number of required clusters is provided as integer number on the first line. Following, the system provides an unknown number of 2d input data points (x, y), one per line. Continue reading until your program obtains no more data. You can safely assume to read less than 1000 points. After reading, you should run the Vector Quantization algorithm to find the center(s) of input data, and finally report the center position as x, y coordinate. Present one such center position per output line. The order of center points output does not matter. 3 cluster VQ![title](img/vq_3clust.png)
# load the datasets for training and testing import numpy as np import csv with open('./data/vq_3clust_in.txt') as inputfile: train_data = list(csv.reader(inputfile)) with open('./data/vq_3clust_out.txt') as inputfile: test_data = list(csv.reader(inputfile)) # add network code here
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
6 cluster VQ![title](img/vq_3clust.png)
# load the datasets for training and testing for the 6 cluster example import numpy as np import csv with open('./data/vq_6clust_in.txt') as inputfile: train_data = list(csv.reader(inputfile)) with open('./data/vq_6clust_out.txt') as inputfile: test_data = list(csv.reader(inputfile)) # add network code here
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Self-Organizing Maps In neurobiology, during neural growth, synapses are strengthened or weakened, in a process usually modelled as a competition for resources. In such a learning process, there is a competition between the neurons to fire. More precisely, neurons compete with each other (in accordance with a learning rule) for the “opportunity” to respond to features contained in the input data. ![title](img/som.png)In its simplest form, such behaviour describes a “winner-takes-all” strategy. In such a strategy, the neuron with the greatest total input “wins” the competition and turns on; all the other neurons in the network then switch off. The aim of such learning mechanisms is to cluster the data.![title](img/som_tr.png)Kohonen’s self-organizing map (SOM) is one of the most popular unsupervised neural network models. Developed for an associative memory model, it is an unsupervised learning algorithm with a simple structure and computational form, and is motivated by the retina-cortex mapping. The SOM can provide topologically preserved mapping from input to output spaces, such that “nearby” sensory stimuli are represented in “nearby” regions.![title](img/som_alg.png)
# Class implementing a basic SOM import scipy.spatial import numpy as np import scipy as sp import sys class SelfOrganizingMap: """ The weights of the output neurons base on the input from the input neurons. """ def __init__(self, input_count, output_count): """ The constructor. :param input_count: Number of input neurons :param output_count: Number of output neurons :return: """ self.input_count = input_count self.output_count = output_count self.weights = np.zeros([self.output_count, self.input_count]) self.distance = sp.spatial.distance.euclidean def calculate_error(self, data): bmu = BestMatchingUnit(self) bmu.reset() # Determine the BMU for each training element. for input in data: bmu.calculate_bmu(input) # update the error return bmu.worst_distance / 100.0 def classify(self, input): if len(input) > self.input_count: raise Exception("Can't classify SOM with input size of {} " "with input data of count {}".format(self.input_count, len(input))) min_dist = sys.maxfloat result = -1 for i in range(self.output_count): dist = self.distance.calculate(input, self.weights[i]) if dist < min_dist: min_dist = dist result = i return result def reset(self): self.weights = (np.random.rand(self.weights.shape[0], self.weights.shape[1]) * 2.0) - 1
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
The "Best Matching Unit" or BMU is a very important concept in the training for a SOM. The BMU is the output neuron that has weight connections to the input neurons that most closely match the current input vector. This neuron (and its "neighborhood") are the neurons that will receive training.
# Class implementing the competition stage in SOM, finding the best matching unit. class BestMatchingUnit: """ This class also tracks the worst distance (of all BMU's). This gives some indication of how well the network is trained, and thus becomes the "error" of the entire network. """ def __init__(self, som): """ Construct a BestMatchingUnit class. The training class must be provided. :param som: The SOM to evaluate. """ # The owner of this class. self.som = som # What is the worst BMU distance so far, this becomes the error for the # entire SOM. self.worst_distance = 0 def calculate_bmu(self, input): """ Calculate the best matching unit (BMU). This is the output neuron that has the lowest Euclidean distance to the input vector. :param input: The input vector. :return: The output neuron number that is the BMU. """ result = 0 if len(input) > self.som.input_count: raise Exception( "Can't train SOM with input size of {} with input data of count {}.".format(self.som.input_count, len(input))) # Track the lowest distance so far. lowest_distance = float("inf") for i in range(self.som.output_count): distance = self.calculate_euclidean_distance(self.som.weights, input, i) # Track the lowest distance, this is the BMU. if distance < lowest_distance: lowest_distance = distance result = i # Track the worst distance, this is the error for the entire network. if lowest_distance > self.worst_distance: self.worst_distance = lowest_distance return result def calculate_euclidean_distance(self, matrix, input, output_neuron): """ Calculate the Euclidean distance for the specified output neuron and the input vector. This is the square root of the squares of the differences between the weight and input vectors. :param matrix: The matrix to get the weights from. :param input: The input vector. :param outputNeuron: The neuron we are calculating the distance for. :return: The Euclidean distance. """ result = 0 # Loop over all input data. diff = input - matrix[output_neuron] return np.sqrt(sum(diff*diff))
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
In the next section we analyze competitive training, which would be used in a winner-take-all neural network, such as the self organizing map (SOM). This is an unsupervised training method, no ideal data is needed on the training set. If ideal data is provided, it will be ignored. Training is done by looping over all of the training elements and calculating a "best matching unit" (BMU). This BMU output neuron is then adjusted to better "learn" this pattern. Additionally, this training may be applied to other "nearby" output neurons. The degree to which nearby neurons are update is defined by the neighborhood function.A neighborhood function is required to determine the degree to which neighboring neurons (to the winning neuron) are updated by each training iteration. Because this is unsupervised training, calculating an error to measure progress by is difficult. The error is defined to be the "worst", or longest, Euclidean distance of any of the BMU's. This value should be minimized, as learning progresses.
# Class implementing the basic training algorithm for a SOM class BasicTrainSOM: """ Because only the BMU neuron and its close neighbors are updated, you can end up with some output neurons that learn nothing. By default these neurons are not forced to win patterns that are not represented well. This spreads out the workload among all output neurons. This feature is not used by default, but can be enabled by setting the "forceWinner" property. """ def __init__(self, network, learning_rate, training, neighborhood): # The neighborhood function to use to determine to what degree a neuron # should be "trained". self.neighborhood = neighborhood # The learning rate. To what degree should changes be applied. self.learning_rate = learning_rate # The network being trained. self.network = network # How many neurons in the input layer. self.input_neuron_count = network.input_count # How many neurons in the output layer. self.output_neuron_count = network.output_count # Utility class used to determine the BMU. self.bmu_util = BestMatchingUnit(network) # Correction matrix. self.correction_matrix = np.zeros([network.output_count, network.input_count]) # True is a winner is to be forced, see class description, or forceWinners # method. By default, this is true. self.force_winner = False # When used with autodecay, this is the starting learning rate. self.start_rate = 0 # When used with autodecay, this is the ending learning rate. self.end_rate = 0 # When used with autodecay, this is the starting radius. self.start_radius = 0 # When used with autodecay, this is the ending radius. self.end_radius = 0 # This is the current autodecay learning rate. self.auto_decay_rate = 0 # This is the current autodecay radius. self.auto_decay_radius = 0 # The current radius. self.radius = 0 # Training data. self.training = training def _apply_correction(self): """ Loop over the synapses to be trained and apply any corrections that were determined by this training iteration. """ np.copyto(self.network.weights, self.correction_matrix) def auto_decay(self): """ Should be called each iteration if autodecay is desired. """ if self.radius > self.end_radius: self.radius += self.auto_decay_radius if self.learning_rate > self.end_rate: self.learning_rate += self.auto_decay_rate self.neighborhood.radius = self.radius def copy_input_pattern(self, matrix, output_neuron, input): """ Copy the specified input pattern to the weight matrix. This causes an output neuron to learn this pattern "exactly". This is useful when a winner is to be forced. :param matrix: The matrix that is the target of the copy. :param output_neuron: The output neuron to set. :param input: The input pattern to copy. """ matrix[output_neuron, :] = input def decay(self, decay_rate, decay_radius): """ Decay the learning rate and radius by the specified amount. :param decay_rate: The percent to decay the learning rate by. :param decay_radius: The percent to decay the radius by. """ self.radius *= (1.0 - decay_radius) self.learning_rate *= (1.0 - decay_rate) self.neighborhood.radius = self.radius def _determine_new_weight(self, weight, input, currentNeuron, bmu): """ Determine the weight adjustment for a single neuron during a training iteration. :param weight: The starting weight. :param input: The input to this neuron. :param currentNeuron: The neuron who's weight is being updated. :param bmu: The neuron that "won", the best matching unit. :return: The new weight value. """ return weight \ + (self.neighborhood.fn(currentNeuron, bmu) \ * self.learning_rate * (input - weight)) def _force_winners(self, matrix, won, least_represented): """ Force any neurons that did not win to off-load patterns from overworked neurons. :param matrix: An array that specifies how many times each output neuron has "won". :param won: The training pattern that is the least represented by this neural network. :param least_represented: The synapse to modify. :return: True if a winner was forced. """ max_activation = float("-inf") max_activation_neuron = -1 output = self.compute(self.network, self.least_represented) # Loop over all of the output neurons. Consider any neurons that were # not the BMU (winner) for any pattern. Track which of these # non-winning neurons had the highest activation. for output_neuron in range(len(won)): # Only consider neurons that did not "win". if won[output_neuron] == 0: if (max_activation_neuron == -1) \ or (output[output_neuron] > max_activation): max_activation = output[output_neuron] max_activation_neuron = output_neuron # If a neurons was found that did not activate for any patterns, then # force it to "win" the least represented pattern. if max_activation_neuron != -1: self.copy_input_pattern(matrix, max_activation_neuron, least_represented) return True else: return False def iteration(self): """ Perform one training iteration. """ # Reset the BMU and begin this iteration. self.bmu_util.reset() won = [0] * self.output_neuron_count least_represented_activation = float("inf") least_represented = None # Reset the correction matrix for this synapse and iteration. self.correctionMatrix.clear() # Determine the BMU for each training element. for input in self.training: bmu = self.bmu_util.calculate_bmu(input) won[bmu] += 1 # If we are to force a winner each time, then track how many # times each output neuron becomes the BMU (winner). if self.force_winner: # Get the "output" from the network for this pattern. This # gets the activation level of the BMU. output = self.compute(self.network, input) # Track which training entry produces the least BMU. This # pattern is the least represented by the network. if output[bmu] < least_represented_activation: least_represented_activation = output[bmu] least_represented = input.getInput() self.train(bmu, self.network.getWeights(), input.getInput()) if self.force_winner: # force any non-winning neurons to share the burden somewhat if not self.force_winners(self.network.weights, won, least_represented): self.apply_correction() else: self.apply_correction() def set_auto_decay(self, planned_iterations, start_rate, end_rate, start_radius, end_radius): """ Setup autodecay. This will decrease the radius and learning rate from the start values to the end values. :param planned_iterations: The number of iterations that are planned. This allows the decay rate to be determined. :param start_rate: The starting learning rate. :param end_rate: The ending learning rate. :param start_radius: The starting radius. :param end_radius: The ending radius. """ self.start_rate = start_rate self.end_rate = end_rate self.start_radius = start_radius self.end_radius = end_radius self.auto_decay_radius = (end_radius - start_radius) / planned_iterations self.auto_decay_rate = (end_rate - start_rate) / planned_iterations self.set_params(self.start_rate, self.start_radius) def set_params(self, rate, radius): """ Set the learning rate and radius. :param rate: The new learning rate. :param radius: :return: The new radius. """ self.radius = radius self.learning_rate = rate self.neighborhood.radius = radius def get_status(self): """ :return: A string display of the status. """ result = "Rate=" result += str(self.learning_rate) result += ", Radius=" result += str(self.radius) return result def _train(self, bmu, matrix, input): """ Train for the specified synapse and BMU. :param bmu: The best matching unit for this input. :param matrix: The synapse to train. :param input: The input to train for. :return: """ # adjust the weight for the BMU and its neighborhood for output_neuron in range(self.output_neuron_count): self._train_pattern(matrix, input, output_neuron, bmu) def _train_pattern(self, matrix, input, current, bmu): """ Train for the specified pattern. :param matrix: The synapse to train. :param input: The input pattern to train for. :param current: The current output neuron being trained. :param bmu: The best matching unit, or winning output neuron. """ for input_neuron in range(self.input_neuron_count): current_weight = matrix[current][input_neuron] input_value = input[input_neuron] new_weight = self._determine_new_weight(current_weight, input_value, current, bmu) self.correction_matrix[current][input_neuron] = new_weight def train_single_pattern(self, pattern): """ Train the specified pattern. Find a winning neuron and adjust all neurons according to the neighborhood function. :param pattern: The pattern to train. """ bmu = self.bmu_util.calculate_bmu(pattern) self._train(bmu, self.network.weights, pattern) self._apply_correction() def compute(self, som, input): """ Calculate the output of the SOM, for each output neuron. Typically, you will use the classify method instead of calling this method. :param som: The input pattern. :param input: The output activation of each output neuron. :return: """ result = np.zeros(som.output_count) for i in range(som.output_count): optr = som.weights[i] matrix_a = np.zeros([input.length,1]) for j in range(len(input)): matrix_a[0][j] = input[j] matrix_b = np.zeros(1,input.length) for j in range(len(optr)): matrix_b[0][j] = optr[j] result[i] = np.dot(matrix_a, matrix_b) return result
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
A common example used to help teach the principals behind SOMs is the mapping of colours from their three dimensional components - red, green and blue, into two dimensions.The colours are presented to the network as 3D vectors - one dimension for each of the colour components (RGB encoding) - and the network learns to represent them in the 2D space we can see. Notice that in addition to clustering the colours into distinct regions, regions of similar properties are usually found adjacent to each other.
import os import sys from Tkinter import * import numpy as np from neighborhood import * TILES_WIDTH = 50 TILES_HEIGHT = 50 TILE_SCREEN_SIZE = 10 class DisplayColors: def __init__(self,root,samples): # Build the grid display canvas_width = TILES_WIDTH * TILE_SCREEN_SIZE canvas_height = TILES_HEIGHT * TILE_SCREEN_SIZE self.samples = samples self.root = root self.c = Canvas(self.root,width=canvas_width, height=canvas_height) self.c.pack() self.grid_rects = [[None for j in range(TILES_WIDTH)] for i in range(TILES_HEIGHT)] for row in range(TILES_HEIGHT): for col in range(TILES_WIDTH): x = col * TILE_SCREEN_SIZE y = row * TILE_SCREEN_SIZE r = self.c.create_rectangle(x, y, x+TILE_SCREEN_SIZE,y+TILE_SCREEN_SIZE, fill="white") self.grid_rects[row][col] = r self.som = SelfOrganizingMap(3,TILES_WIDTH * TILES_HEIGHT) self.som.reset() self.gaussian = NeighborhoodRBF(NeighborhoodRBF.TYPE_GAUSSIAN,[TILES_WIDTH,TILES_HEIGHT]) self.train = BasicTrainSOM(self.som, 0.01, None, self.gaussian) self.train.force_winner = False self.train.set_auto_decay(1000, 0.8, 0.003, 30, 5) self.iteration = 1 def RGBToHTMLColor(self, rgb_tuple): hexcolor = '#%02x%02x%02x' % rgb_tuple return hexcolor def convert_color(self, d): result = 128*d result+= 128 result = min(result, 255) result = max(result, 0) return result def update(self, som): for row in range(TILES_HEIGHT): for col in range(TILES_WIDTH): index = (row*TILES_WIDTH)+col color = ( self.convert_color(som.weights[index][0]), self.convert_color(som.weights[index][1]), self.convert_color(som.weights[index][2])) r = self.grid_rects[row][col] self.c.itemconfig(r, fill=self.RGBToHTMLColor(color)) self.c.itemconfig(r, outline=self.RGBToHTMLColor(color)) def update_clock(self): idx = np.random.randint(len(samples)) c = self.samples[idx] self.train.train_single_pattern(c) self.train.auto_decay() self.update(self.som) print("Iteration {}, {}".format(self.iteration,self.train.get_status())) self.iteration+=1 if self.iteration<=1000: self.root.after(1, self.update_clock) samples = np.zeros([15,3]) for i in range(15): samples[i][0] = np.random.uniform(-1,1) samples[i][1] = np.random.uniform(-1,1) samples[i][2] = np.random.uniform(-1,1) root = Tk() display = DisplayColors(root, samples) display.update_clock() root.mainloop()
Iteration 1, Rate=0.799203, Radius=29 Iteration 2, Rate=0.798406, Radius=28 Iteration 3, Rate=0.797609, Radius=27 Iteration 4, Rate=0.796812, Radius=26 Iteration 5, Rate=0.796015, Radius=25 Iteration 6, Rate=0.795218, Radius=24 Iteration 7, Rate=0.794421, Radius=23 Iteration 8, Rate=0.793624, Radius=22 Iteration 9, Rate=0.792827, Radius=21 Iteration 10, Rate=0.79203, Radius=20 Iteration 11, Rate=0.791233, Radius=19 Iteration 12, Rate=0.790436, Radius=18 Iteration 13, Rate=0.789639, Radius=17 Iteration 14, Rate=0.788842, Radius=16 Iteration 15, Rate=0.788045, Radius=15 Iteration 16, Rate=0.787248, Radius=14 Iteration 17, Rate=0.786451, Radius=13 Iteration 18, Rate=0.785654, Radius=12 Iteration 19, Rate=0.784857, Radius=11 Iteration 20, Rate=0.78406, Radius=10 Iteration 21, Rate=0.783263, Radius=9 Iteration 22, Rate=0.782466, Radius=8 Iteration 23, Rate=0.781669, Radius=7 Iteration 24, Rate=0.780872, Radius=6 Iteration 25, Rate=0.780075, Radius=5 Iteration 26, Rate=0.779278, Radius=5 Iteration 27, Rate=0.778481, Radius=5 Iteration 28, Rate=0.777684, Radius=5 Iteration 29, Rate=0.776887, Radius=5 Iteration 30, Rate=0.77609, Radius=5 Iteration 31, Rate=0.775293, Radius=5 Iteration 32, Rate=0.774496, Radius=5 Iteration 33, Rate=0.773699, Radius=5 Iteration 34, Rate=0.772902, Radius=5 Iteration 35, Rate=0.772105, Radius=5 Iteration 36, Rate=0.771308, Radius=5 Iteration 37, Rate=0.770511, Radius=5 Iteration 38, Rate=0.769714, Radius=5 Iteration 39, Rate=0.768917, Radius=5 Iteration 40, Rate=0.76812, Radius=5 Iteration 41, Rate=0.767323, Radius=5 Iteration 42, Rate=0.766526, Radius=5 Iteration 43, Rate=0.765729, Radius=5 Iteration 44, Rate=0.764932, Radius=5 Iteration 45, Rate=0.764135, Radius=5 Iteration 46, Rate=0.763338, Radius=5 Iteration 47, Rate=0.762541, Radius=5 Iteration 48, Rate=0.761744, Radius=5 Iteration 49, Rate=0.760947, Radius=5 Iteration 50, Rate=0.76015, Radius=5 Iteration 51, Rate=0.759353, Radius=5 Iteration 52, Rate=0.758556, Radius=5 Iteration 53, Rate=0.757759, Radius=5 Iteration 54, Rate=0.756962, Radius=5 Iteration 55, Rate=0.756165, Radius=5 Iteration 56, Rate=0.755368, Radius=5 Iteration 57, Rate=0.754571, Radius=5 Iteration 58, Rate=0.753774, Radius=5 Iteration 59, Rate=0.752977, Radius=5 Iteration 60, Rate=0.75218, Radius=5 Iteration 61, Rate=0.751383, Radius=5 Iteration 62, Rate=0.750586, Radius=5 Iteration 63, Rate=0.749789, Radius=5 Iteration 64, Rate=0.748992, Radius=5 Iteration 65, Rate=0.748195, Radius=5 Iteration 66, Rate=0.747398, Radius=5 Iteration 67, Rate=0.746601, Radius=5 Iteration 68, Rate=0.745804, Radius=5 Iteration 69, Rate=0.745007, Radius=5 Iteration 70, Rate=0.74421, Radius=5 Iteration 71, Rate=0.743413, Radius=5 Iteration 72, Rate=0.742616, Radius=5 Iteration 73, Rate=0.741819, Radius=5 Iteration 74, Rate=0.741022, Radius=5 Iteration 75, Rate=0.740225, Radius=5 Iteration 76, Rate=0.739428, Radius=5 Iteration 77, Rate=0.738631, Radius=5 Iteration 78, Rate=0.737834, Radius=5 Iteration 79, Rate=0.737037, Radius=5 Iteration 80, Rate=0.73624, Radius=5 Iteration 81, Rate=0.735443, Radius=5 Iteration 82, Rate=0.734646, Radius=5 Iteration 83, Rate=0.733849, Radius=5 Iteration 84, Rate=0.733052, Radius=5 Iteration 85, Rate=0.732255, Radius=5 Iteration 86, Rate=0.731458, Radius=5 Iteration 87, Rate=0.730661, Radius=5 Iteration 88, Rate=0.729864, Radius=5 Iteration 89, Rate=0.729067, Radius=5 Iteration 90, Rate=0.72827, Radius=5 Iteration 91, Rate=0.727473, Radius=5 Iteration 92, Rate=0.726676, Radius=5 Iteration 93, Rate=0.725879, Radius=5 Iteration 94, Rate=0.725082, Radius=5 Iteration 95, Rate=0.724285, Radius=5 Iteration 96, Rate=0.723488, Radius=5 Iteration 97, Rate=0.722691, Radius=5 Iteration 98, Rate=0.721894, Radius=5 Iteration 99, Rate=0.721097, Radius=5 Iteration 100, Rate=0.7203, Radius=5 Iteration 101, Rate=0.719503, Radius=5 Iteration 102, Rate=0.718706, Radius=5 Iteration 103, Rate=0.717909, Radius=5 Iteration 104, Rate=0.717112, Radius=5 Iteration 105, Rate=0.716315, Radius=5 Iteration 106, Rate=0.715518, Radius=5 Iteration 107, Rate=0.714721, Radius=5 Iteration 108, Rate=0.713924, Radius=5 Iteration 109, Rate=0.713127, Radius=5 Iteration 110, Rate=0.71233, Radius=5 Iteration 111, Rate=0.711533, Radius=5 Iteration 112, Rate=0.710736, Radius=5 Iteration 113, Rate=0.709939, Radius=5 Iteration 114, Rate=0.709142, Radius=5 Iteration 115, Rate=0.708345, Radius=5 Iteration 116, Rate=0.707548, Radius=5 Iteration 117, Rate=0.706751, Radius=5 Iteration 118, Rate=0.705954, Radius=5 Iteration 119, Rate=0.705157, Radius=5 Iteration 120, Rate=0.70436, Radius=5 Iteration 121, Rate=0.703563, Radius=5 Iteration 122, Rate=0.702766, Radius=5 Iteration 123, Rate=0.701969, Radius=5 Iteration 124, Rate=0.701172, Radius=5 Iteration 125, Rate=0.700375, Radius=5 Iteration 126, Rate=0.699578, Radius=5 Iteration 127, Rate=0.698781, Radius=5 Iteration 128, Rate=0.697984, Radius=5 Iteration 129, Rate=0.697187, Radius=5 Iteration 130, Rate=0.69639, Radius=5 Iteration 131, Rate=0.695593, Radius=5 Iteration 132, Rate=0.694796, Radius=5 Iteration 133, Rate=0.693999, Radius=5 Iteration 134, Rate=0.693202, Radius=5 Iteration 135, Rate=0.692405, Radius=5 Iteration 136, Rate=0.691608, Radius=5 Iteration 137, Rate=0.690811, Radius=5 Iteration 138, Rate=0.690014, Radius=5 Iteration 139, Rate=0.689217, Radius=5 Iteration 140, Rate=0.68842, Radius=5 Iteration 141, Rate=0.687623, Radius=5 Iteration 142, Rate=0.686826, Radius=5 Iteration 143, Rate=0.686029, Radius=5 Iteration 144, Rate=0.685232, Radius=5 Iteration 145, Rate=0.684435, Radius=5 Iteration 146, Rate=0.683638, Radius=5 Iteration 147, Rate=0.682841, Radius=5 Iteration 148, Rate=0.682044, Radius=5 Iteration 149, Rate=0.681247, Radius=5 Iteration 150, Rate=0.68045, Radius=5 Iteration 151, Rate=0.679653, Radius=5 Iteration 152, Rate=0.678856, Radius=5 Iteration 153, Rate=0.678059, Radius=5 Iteration 154, Rate=0.677262, Radius=5 Iteration 155, Rate=0.676465, Radius=5 Iteration 156, Rate=0.675668, Radius=5 Iteration 157, Rate=0.674871, Radius=5 Iteration 158, Rate=0.674074, Radius=5 Iteration 159, Rate=0.673277, Radius=5 Iteration 160, Rate=0.67248, Radius=5 Iteration 161, Rate=0.671683, Radius=5 Iteration 162, Rate=0.670886, Radius=5 Iteration 163, Rate=0.670089, Radius=5 Iteration 164, Rate=0.669292, Radius=5 Iteration 165, Rate=0.668495, Radius=5 Iteration 166, Rate=0.667698, Radius=5 Iteration 167, Rate=0.666901, Radius=5 Iteration 168, Rate=0.666104, Radius=5 Iteration 169, Rate=0.665307, Radius=5 Iteration 170, Rate=0.66451, Radius=5 Iteration 171, Rate=0.663713, Radius=5 Iteration 172, Rate=0.662916, Radius=5 Iteration 173, Rate=0.662119, Radius=5 Iteration 174, Rate=0.661322, Radius=5 Iteration 175, Rate=0.660525, Radius=5 Iteration 176, Rate=0.659728, Radius=5 Iteration 177, Rate=0.658931, Radius=5 Iteration 178, Rate=0.658134, Radius=5 Iteration 179, Rate=0.657337, Radius=5 Iteration 180, Rate=0.65654, Radius=5 Iteration 181, Rate=0.655743, Radius=5 Iteration 182, Rate=0.654946, Radius=5 Iteration 183, Rate=0.654149, Radius=5 Iteration 184, Rate=0.653352, Radius=5 Iteration 185, Rate=0.652555, Radius=5 Iteration 186, Rate=0.651758, Radius=5 Iteration 187, Rate=0.650961, Radius=5 Iteration 188, Rate=0.650164, Radius=5 Iteration 189, Rate=0.649367, Radius=5 Iteration 190, Rate=0.64857, Radius=5 Iteration 191, Rate=0.647773, Radius=5 Iteration 192, Rate=0.646976, Radius=5 Iteration 193, Rate=0.646179, Radius=5 Iteration 194, Rate=0.645382, Radius=5 Iteration 195, Rate=0.644585, Radius=5 Iteration 196, Rate=0.643788, Radius=5 Iteration 197, Rate=0.642991, Radius=5 Iteration 198, Rate=0.642194, Radius=5 Iteration 199, Rate=0.641397, Radius=5 Iteration 200, Rate=0.6406, Radius=5 Iteration 201, Rate=0.639803, Radius=5 Iteration 202, Rate=0.639006, Radius=5 Iteration 203, Rate=0.638209, Radius=5 Iteration 204, Rate=0.637412, Radius=5 Iteration 205, Rate=0.636615, Radius=5 Iteration 206, Rate=0.635818, Radius=5 Iteration 207, Rate=0.635021, Radius=5 Iteration 208, Rate=0.634224, Radius=5 Iteration 209, Rate=0.633427, Radius=5 Iteration 210, Rate=0.63263, Radius=5 Iteration 211, Rate=0.631833, Radius=5 Iteration 212, Rate=0.631036, Radius=5 Iteration 213, Rate=0.630239, Radius=5 Iteration 214, Rate=0.629442, Radius=5 Iteration 215, Rate=0.628645, Radius=5 Iteration 216, Rate=0.627848, Radius=5 Iteration 217, Rate=0.627051, Radius=5 Iteration 218, Rate=0.626254, Radius=5 Iteration 219, Rate=0.625457, Radius=5 Iteration 220, Rate=0.62466, Radius=5 Iteration 221, Rate=0.623863, Radius=5 Iteration 222, Rate=0.623066, Radius=5 Iteration 223, Rate=0.622269, Radius=5 Iteration 224, Rate=0.621472, Radius=5 Iteration 225, Rate=0.620675, Radius=5 Iteration 226, Rate=0.619878, Radius=5 Iteration 227, Rate=0.619081, Radius=5 Iteration 228, Rate=0.618284, Radius=5 Iteration 229, Rate=0.617487, Radius=5 Iteration 230, Rate=0.61669, Radius=5 Iteration 231, Rate=0.615893, Radius=5 Iteration 232, Rate=0.615096, Radius=5 Iteration 233, Rate=0.614299, Radius=5 Iteration 234, Rate=0.613502, Radius=5 Iteration 235, Rate=0.612705, Radius=5 Iteration 236, Rate=0.611908, Radius=5 Iteration 237, Rate=0.611111, Radius=5 Iteration 238, Rate=0.610314, Radius=5 Iteration 239, Rate=0.609517, Radius=5 Iteration 240, Rate=0.60872, Radius=5 Iteration 241, Rate=0.607923, Radius=5 Iteration 242, Rate=0.607126, Radius=5 Iteration 243, Rate=0.606329, Radius=5 Iteration 244, Rate=0.605532, Radius=5 Iteration 245, Rate=0.604735, Radius=5 Iteration 246, Rate=0.603938, Radius=5 Iteration 247, Rate=0.603141, Radius=5 Iteration 248, Rate=0.602344, Radius=5 Iteration 249, Rate=0.601547, Radius=5 Iteration 250, Rate=0.60075, Radius=5 Iteration 251, Rate=0.599953, Radius=5 Iteration 252, Rate=0.599156, Radius=5 Iteration 253, Rate=0.598359, Radius=5 Iteration 254, Rate=0.597562, Radius=5 Iteration 255, Rate=0.596765, Radius=5 Iteration 256, Rate=0.595968, Radius=5 Iteration 257, Rate=0.595171, Radius=5 Iteration 258, Rate=0.594374, Radius=5 Iteration 259, Rate=0.593577, Radius=5 Iteration 260, Rate=0.59278, Radius=5 Iteration 261, Rate=0.591983, Radius=5 Iteration 262, Rate=0.591186, Radius=5 Iteration 263, Rate=0.590389, Radius=5 Iteration 264, Rate=0.589592, Radius=5 Iteration 265, Rate=0.588795, Radius=5 Iteration 266, Rate=0.587998, Radius=5 Iteration 267, Rate=0.587201, Radius=5 Iteration 268, Rate=0.586404, Radius=5 Iteration 269, Rate=0.585607, Radius=5 Iteration 270, Rate=0.58481, Radius=5 Iteration 271, Rate=0.584013, Radius=5 Iteration 272, Rate=0.583216, Radius=5 Iteration 273, Rate=0.582419, Radius=5 Iteration 274, Rate=0.581622, Radius=5 Iteration 275, Rate=0.580825, Radius=5 Iteration 276, Rate=0.580028, Radius=5 Iteration 277, Rate=0.579231, Radius=5 Iteration 278, Rate=0.578434, Radius=5 Iteration 279, Rate=0.577637, Radius=5 Iteration 280, Rate=0.57684, Radius=5 Iteration 281, Rate=0.576043, Radius=5 Iteration 282, Rate=0.575246, Radius=5 Iteration 283, Rate=0.574449, Radius=5 Iteration 284, Rate=0.573652, Radius=5 Iteration 285, Rate=0.572855, Radius=5 Iteration 286, Rate=0.572058, Radius=5 Iteration 287, Rate=0.571261, Radius=5 Iteration 288, Rate=0.570464, Radius=5 Iteration 289, Rate=0.569667, Radius=5 Iteration 290, Rate=0.56887, Radius=5 Iteration 291, Rate=0.568073, Radius=5 Iteration 292, Rate=0.567276, Radius=5 Iteration 293, Rate=0.566479, Radius=5 Iteration 294, Rate=0.565682, Radius=5 Iteration 295, Rate=0.564885, Radius=5 Iteration 296, Rate=0.564088, Radius=5 Iteration 297, Rate=0.563291, Radius=5 Iteration 298, Rate=0.562494, Radius=5 Iteration 299, Rate=0.561697, Radius=5 Iteration 300, Rate=0.5609, Radius=5 Iteration 301, Rate=0.560103, Radius=5 Iteration 302, Rate=0.559306, Radius=5 Iteration 303, Rate=0.558509, Radius=5 Iteration 304, Rate=0.557712, Radius=5 Iteration 305, Rate=0.556915, Radius=5 Iteration 306, Rate=0.556118, Radius=5 Iteration 307, Rate=0.555321, Radius=5 Iteration 308, Rate=0.554524, Radius=5 Iteration 309, Rate=0.553727, Radius=5 Iteration 310, Rate=0.55293, Radius=5 Iteration 311, Rate=0.552133, Radius=5 Iteration 312, Rate=0.551336, Radius=5 Iteration 313, Rate=0.550539, Radius=5 Iteration 314, Rate=0.549742, Radius=5 Iteration 315, Rate=0.548945, Radius=5 Iteration 316, Rate=0.548148, Radius=5 Iteration 317, Rate=0.547351, Radius=5 Iteration 318, Rate=0.546554, Radius=5 Iteration 319, Rate=0.545757, Radius=5 Iteration 320, Rate=0.54496, Radius=5 Iteration 321, Rate=0.544163, Radius=5 Iteration 322, Rate=0.543366, Radius=5 Iteration 323, Rate=0.542569, Radius=5 Iteration 324, Rate=0.541772, Radius=5 Iteration 325, Rate=0.540975, Radius=5 Iteration 326, Rate=0.540178, Radius=5 Iteration 327, Rate=0.539381, Radius=5 Iteration 328, Rate=0.538584, Radius=5 Iteration 329, Rate=0.537787, Radius=5 Iteration 330, Rate=0.53699, Radius=5 Iteration 331, Rate=0.536193, Radius=5 Iteration 332, Rate=0.535396, Radius=5 Iteration 333, Rate=0.534599, Radius=5 Iteration 334, Rate=0.533802, Radius=5 Iteration 335, Rate=0.533005, Radius=5 Iteration 336, Rate=0.532208, Radius=5 Iteration 337, Rate=0.531411, Radius=5 Iteration 338, Rate=0.530614, Radius=5 Iteration 339, Rate=0.529817, Radius=5 Iteration 340, Rate=0.52902, Radius=5 Iteration 341, Rate=0.528223, Radius=5 Iteration 342, Rate=0.527426, Radius=5 Iteration 343, Rate=0.526629, Radius=5 Iteration 344, Rate=0.525832, Radius=5 Iteration 345, Rate=0.525035, Radius=5 Iteration 346, Rate=0.524238, Radius=5 Iteration 347, Rate=0.523441, Radius=5 Iteration 348, Rate=0.522644, Radius=5 Iteration 349, Rate=0.521847, Radius=5 Iteration 350, Rate=0.52105, Radius=5 Iteration 351, Rate=0.520253, Radius=5 Iteration 352, Rate=0.519456, Radius=5 Iteration 353, Rate=0.518659, Radius=5 Iteration 354, Rate=0.517862, Radius=5 Iteration 355, Rate=0.517065, Radius=5 Iteration 356, Rate=0.516268, Radius=5 Iteration 357, Rate=0.515471, Radius=5 Iteration 358, Rate=0.514674, Radius=5 Iteration 359, Rate=0.513877, Radius=5 Iteration 360, Rate=0.51308, Radius=5 Iteration 361, Rate=0.512283, Radius=5 Iteration 362, Rate=0.511486, Radius=5 Iteration 363, Rate=0.510689, Radius=5 Iteration 364, Rate=0.509892, Radius=5 Iteration 365, Rate=0.509095, Radius=5 Iteration 366, Rate=0.508298, Radius=5 Iteration 367, Rate=0.507501, Radius=5 Iteration 368, Rate=0.506704, Radius=5 Iteration 369, Rate=0.505907, Radius=5 Iteration 370, Rate=0.50511, Radius=5 Iteration 371, Rate=0.504313, Radius=5 Iteration 372, Rate=0.503516, Radius=5 Iteration 373, Rate=0.502719, Radius=5 Iteration 374, Rate=0.501922, Radius=5 Iteration 375, Rate=0.501125, Radius=5 Iteration 376, Rate=0.500328, Radius=5 Iteration 377, Rate=0.499531, Radius=5 Iteration 378, Rate=0.498734, Radius=5 Iteration 379, Rate=0.497937, Radius=5 Iteration 380, Rate=0.49714, Radius=5 Iteration 381, Rate=0.496343, Radius=5 Iteration 382, Rate=0.495546, Radius=5 Iteration 383, Rate=0.494749, Radius=5 Iteration 384, Rate=0.493952, Radius=5 Iteration 385, Rate=0.493155, Radius=5 Iteration 386, Rate=0.492358, Radius=5 Iteration 387, Rate=0.491561, Radius=5 Iteration 388, Rate=0.490764, Radius=5 Iteration 389, Rate=0.489967, Radius=5 Iteration 390, Rate=0.48917, Radius=5 Iteration 391, Rate=0.488373, Radius=5 Iteration 392, Rate=0.487576, Radius=5 Iteration 393, Rate=0.486779, Radius=5 Iteration 394, Rate=0.485982, Radius=5 Iteration 395, Rate=0.485185, Radius=5 Iteration 396, Rate=0.484388, Radius=5 Iteration 397, Rate=0.483591, Radius=5 Iteration 398, Rate=0.482794, Radius=5 Iteration 399, Rate=0.481997, Radius=5 Iteration 400, Rate=0.4812, Radius=5 Iteration 401, Rate=0.480403, Radius=5 Iteration 402, Rate=0.479606, Radius=5 Iteration 403, Rate=0.478809, Radius=5 Iteration 404, Rate=0.478012, Radius=5 Iteration 405, Rate=0.477215, Radius=5 Iteration 406, Rate=0.476418, Radius=5 Iteration 407, Rate=0.475621, Radius=5 Iteration 408, Rate=0.474824, Radius=5 Iteration 409, Rate=0.474027, Radius=5 Iteration 410, Rate=0.47323, Radius=5 Iteration 411, Rate=0.472433, Radius=5 Iteration 412, Rate=0.471636, Radius=5 Iteration 413, Rate=0.470839, Radius=5 Iteration 414, Rate=0.470042, Radius=5 Iteration 415, Rate=0.469245, Radius=5 Iteration 416, Rate=0.468448, Radius=5 Iteration 417, Rate=0.467651, Radius=5 Iteration 418, Rate=0.466854, Radius=5 Iteration 419, Rate=0.466057, Radius=5 Iteration 420, Rate=0.46526, Radius=5 Iteration 421, Rate=0.464463, Radius=5 Iteration 422, Rate=0.463666, Radius=5 Iteration 423, Rate=0.462869, Radius=5 Iteration 424, Rate=0.462072, Radius=5 Iteration 425, Rate=0.461275, Radius=5 Iteration 426, Rate=0.460478, Radius=5 Iteration 427, Rate=0.459681, Radius=5 Iteration 428, Rate=0.458884, Radius=5 Iteration 429, Rate=0.458087, Radius=5 Iteration 430, Rate=0.45729, Radius=5 Iteration 431, Rate=0.456493, Radius=5 Iteration 432, Rate=0.455696, Radius=5 Iteration 433, Rate=0.454899, Radius=5 Iteration 434, Rate=0.454102, Radius=5 Iteration 435, Rate=0.453305, Radius=5 Iteration 436, Rate=0.452508, Radius=5 Iteration 437, Rate=0.451711, Radius=5 Iteration 438, Rate=0.450914, Radius=5 Iteration 439, Rate=0.450117, Radius=5 Iteration 440, Rate=0.44932, Radius=5 Iteration 441, Rate=0.448523, Radius=5 Iteration 442, Rate=0.447726, Radius=5 Iteration 443, Rate=0.446929, Radius=5 Iteration 444, Rate=0.446132, Radius=5 Iteration 445, Rate=0.445335, Radius=5 Iteration 446, Rate=0.444538, Radius=5 Iteration 447, Rate=0.443741, Radius=5 Iteration 448, Rate=0.442944, Radius=5 Iteration 449, Rate=0.442147, Radius=5 Iteration 450, Rate=0.44135, Radius=5 Iteration 451, Rate=0.440553, Radius=5 Iteration 452, Rate=0.439756, Radius=5 Iteration 453, Rate=0.438959, Radius=5 Iteration 454, Rate=0.438162, Radius=5 Iteration 455, Rate=0.437365, Radius=5 Iteration 456, Rate=0.436568, Radius=5 Iteration 457, Rate=0.435771, Radius=5 Iteration 458, Rate=0.434974, Radius=5 Iteration 459, Rate=0.434177, Radius=5 Iteration 460, Rate=0.43338, Radius=5 Iteration 461, Rate=0.432583, Radius=5 Iteration 462, Rate=0.431786, Radius=5 Iteration 463, Rate=0.430989, Radius=5 Iteration 464, Rate=0.430192, Radius=5 Iteration 465, Rate=0.429395, Radius=5 Iteration 466, Rate=0.428598, Radius=5 Iteration 467, Rate=0.427801, Radius=5 Iteration 468, Rate=0.427004, Radius=5 Iteration 469, Rate=0.426207, Radius=5 Iteration 470, Rate=0.42541, Radius=5 Iteration 471, Rate=0.424613, Radius=5 Iteration 472, Rate=0.423816, Radius=5 Iteration 473, Rate=0.423019, Radius=5 Iteration 474, Rate=0.422222, Radius=5 Iteration 475, Rate=0.421425, Radius=5 Iteration 476, Rate=0.420628, Radius=5 Iteration 477, Rate=0.419831, Radius=5 Iteration 478, Rate=0.419034, Radius=5 Iteration 479, Rate=0.418237, Radius=5 Iteration 480, Rate=0.41744, Radius=5 Iteration 481, Rate=0.416643, Radius=5 Iteration 482, Rate=0.415846, Radius=5 Iteration 483, Rate=0.415049, Radius=5 Iteration 484, Rate=0.414252, Radius=5 Iteration 485, Rate=0.413455, Radius=5 Iteration 486, Rate=0.412658, Radius=5 Iteration 487, Rate=0.411861, Radius=5 Iteration 488, Rate=0.411064, Radius=5 Iteration 489, Rate=0.410267, Radius=5 Iteration 490, Rate=0.40947, Radius=5 Iteration 491, Rate=0.408673, Radius=5 Iteration 492, Rate=0.407876, Radius=5 Iteration 493, Rate=0.407079, Radius=5 Iteration 494, Rate=0.406282, Radius=5 Iteration 495, Rate=0.405485, Radius=5 Iteration 496, Rate=0.404688, Radius=5 Iteration 497, Rate=0.403891, Radius=5 Iteration 498, Rate=0.403094, Radius=5 Iteration 499, Rate=0.402297, Radius=5 Iteration 500, Rate=0.4015, Radius=5 Iteration 501, Rate=0.400703, Radius=5 Iteration 502, Rate=0.399906, Radius=5 Iteration 503, Rate=0.399109, Radius=5 Iteration 504, Rate=0.398312, Radius=5 Iteration 505, Rate=0.397515, Radius=5 Iteration 506, Rate=0.396718, Radius=5 Iteration 507, Rate=0.395921, Radius=5 Iteration 508, Rate=0.395124, Radius=5 Iteration 509, Rate=0.394327, Radius=5 Iteration 510, Rate=0.39353, Radius=5 Iteration 511, Rate=0.392733, Radius=5 Iteration 512, Rate=0.391936, Radius=5 Iteration 513, Rate=0.391139, Radius=5 Iteration 514, Rate=0.390342, Radius=5 Iteration 515, Rate=0.389545, Radius=5 Iteration 516, Rate=0.388748, Radius=5 Iteration 517, Rate=0.387951, Radius=5 Iteration 518, Rate=0.387154, Radius=5 Iteration 519, Rate=0.386357, Radius=5 Iteration 520, Rate=0.38556, Radius=5 Iteration 521, Rate=0.384763, Radius=5 Iteration 522, Rate=0.383966, Radius=5 Iteration 523, Rate=0.383169, Radius=5 Iteration 524, Rate=0.382372, Radius=5 Iteration 525, Rate=0.381575, Radius=5 Iteration 526, Rate=0.380778, Radius=5 Iteration 527, Rate=0.379981, Radius=5 Iteration 528, Rate=0.379184, Radius=5 Iteration 529, Rate=0.378387, Radius=5 Iteration 530, Rate=0.37759, Radius=5 Iteration 531, Rate=0.376793, Radius=5 Iteration 532, Rate=0.375996, Radius=5 Iteration 533, Rate=0.375199, Radius=5 Iteration 534, Rate=0.374402, Radius=5 Iteration 535, Rate=0.373605, Radius=5 Iteration 536, Rate=0.372808, Radius=5 Iteration 537, Rate=0.372011, Radius=5 Iteration 538, Rate=0.371214, Radius=5 Iteration 539, Rate=0.370417, Radius=5 Iteration 540, Rate=0.36962, Radius=5 Iteration 541, Rate=0.368823, Radius=5 Iteration 542, Rate=0.368026, Radius=5 Iteration 543, Rate=0.367229, Radius=5 Iteration 544, Rate=0.366432, Radius=5 Iteration 545, Rate=0.365635, Radius=5 Iteration 546, Rate=0.364838, Radius=5 Iteration 547, Rate=0.364041, Radius=5 Iteration 548, Rate=0.363244, Radius=5 Iteration 549, Rate=0.362447, Radius=5 Iteration 550, Rate=0.36165, Radius=5 Iteration 551, Rate=0.360853, Radius=5 Iteration 552, Rate=0.360056, Radius=5 Iteration 553, Rate=0.359259, Radius=5 Iteration 554, Rate=0.358462, Radius=5 Iteration 555, Rate=0.357665, Radius=5 Iteration 556, Rate=0.356868, Radius=5 Iteration 557, Rate=0.356071, Radius=5 Iteration 558, Rate=0.355274, Radius=5 Iteration 559, Rate=0.354477, Radius=5 Iteration 560, Rate=0.35368, Radius=5 Iteration 561, Rate=0.352883, Radius=5 Iteration 562, Rate=0.352086, Radius=5 Iteration 563, Rate=0.351289, Radius=5 Iteration 564, Rate=0.350492, Radius=5 Iteration 565, Rate=0.349695, Radius=5 Iteration 566, Rate=0.348898, Radius=5 Iteration 567, Rate=0.348101, Radius=5 Iteration 568, Rate=0.347304, Radius=5 Iteration 569, Rate=0.346507, Radius=5 Iteration 570, Rate=0.34571, Radius=5 Iteration 571, Rate=0.344913, Radius=5 Iteration 572, Rate=0.344116, Radius=5 Iteration 573, Rate=0.343319, Radius=5 Iteration 574, Rate=0.342522, Radius=5 Iteration 575, Rate=0.341725, Radius=5 Iteration 576, Rate=0.340928, Radius=5 Iteration 577, Rate=0.340131, Radius=5 Iteration 578, Rate=0.339334, Radius=5 Iteration 579, Rate=0.338537, Radius=5 Iteration 580, Rate=0.33774, Radius=5 Iteration 581, Rate=0.336943, Radius=5 Iteration 582, Rate=0.336146, Radius=5 Iteration 583, Rate=0.335349, Radius=5 Iteration 584, Rate=0.334552, Radius=5 Iteration 585, Rate=0.333755, Radius=5 Iteration 586, Rate=0.332958, Radius=5 Iteration 587, Rate=0.332161, Radius=5 Iteration 588, Rate=0.331364, Radius=5 Iteration 589, Rate=0.330567, Radius=5 Iteration 590, Rate=0.32977, Radius=5 Iteration 591, Rate=0.328973, Radius=5 Iteration 592, Rate=0.328176, Radius=5 Iteration 593, Rate=0.327379, Radius=5 Iteration 594, Rate=0.326582, Radius=5 Iteration 595, Rate=0.325785, Radius=5 Iteration 596, Rate=0.324988, Radius=5 Iteration 597, Rate=0.324191, Radius=5 Iteration 598, Rate=0.323394, Radius=5 Iteration 599, Rate=0.322597, Radius=5 Iteration 600, Rate=0.3218, Radius=5 Iteration 601, Rate=0.321003, Radius=5 Iteration 602, Rate=0.320206, Radius=5 Iteration 603, Rate=0.319409, Radius=5 Iteration 604, Rate=0.318612, Radius=5 Iteration 605, Rate=0.317815, Radius=5 Iteration 606, Rate=0.317018, Radius=5 Iteration 607, Rate=0.316221, Radius=5 Iteration 608, Rate=0.315424, Radius=5 Iteration 609, Rate=0.314627, Radius=5 Iteration 610, Rate=0.31383, Radius=5 Iteration 611, Rate=0.313033, Radius=5 Iteration 612, Rate=0.312236, Radius=5 Iteration 613, Rate=0.311439, Radius=5 Iteration 614, Rate=0.310642, Radius=5 Iteration 615, Rate=0.309845, Radius=5 Iteration 616, Rate=0.309048, Radius=5 Iteration 617, Rate=0.308251, Radius=5 Iteration 618, Rate=0.307454, Radius=5 Iteration 619, Rate=0.306657, Radius=5 Iteration 620, Rate=0.30586, Radius=5 Iteration 621, Rate=0.305063, Radius=5 Iteration 622, Rate=0.304266, Radius=5 Iteration 623, Rate=0.303469, Radius=5 Iteration 624, Rate=0.302672, Radius=5 Iteration 625, Rate=0.301875, Radius=5 Iteration 626, Rate=0.301078, Radius=5 Iteration 627, Rate=0.300281, Radius=5 Iteration 628, Rate=0.299484, Radius=5 Iteration 629, Rate=0.298687, Radius=5 Iteration 630, Rate=0.29789, Radius=5 Iteration 631, Rate=0.297093, Radius=5 Iteration 632, Rate=0.296296, Radius=5 Iteration 633, Rate=0.295499, Radius=5 Iteration 634, Rate=0.294702, Radius=5 Iteration 635, Rate=0.293905, Radius=5 Iteration 636, Rate=0.293108, Radius=5 Iteration 637, Rate=0.292311, Radius=5 Iteration 638, Rate=0.291514, Radius=5 Iteration 639, Rate=0.290717, Radius=5 Iteration 640, Rate=0.28992, Radius=5 Iteration 641, Rate=0.289123, Radius=5 Iteration 642, Rate=0.288326, Radius=5 Iteration 643, Rate=0.287529, Radius=5 Iteration 644, Rate=0.286732, Radius=5 Iteration 645, Rate=0.285935, Radius=5 Iteration 646, Rate=0.285138, Radius=5 Iteration 647, Rate=0.284341, Radius=5 Iteration 648, Rate=0.283544, Radius=5 Iteration 649, Rate=0.282747, Radius=5 Iteration 650, Rate=0.28195, Radius=5 Iteration 651, Rate=0.281153, Radius=5 Iteration 652, Rate=0.280356, Radius=5 Iteration 653, Rate=0.279559, Radius=5 Iteration 654, Rate=0.278762, Radius=5 Iteration 655, Rate=0.277965, Radius=5 Iteration 656, Rate=0.277168, Radius=5 Iteration 657, Rate=0.276371, Radius=5 Iteration 658, Rate=0.275574, Radius=5 Iteration 659, Rate=0.274777, Radius=5 Iteration 660, Rate=0.27398, Radius=5 Iteration 661, Rate=0.273183, Radius=5 Iteration 662, Rate=0.272386, Radius=5 Iteration 663, Rate=0.271589, Radius=5 Iteration 664, Rate=0.270792, Radius=5 Iteration 665, Rate=0.269995, Radius=5 Iteration 666, Rate=0.269198, Radius=5 Iteration 667, Rate=0.268401, Radius=5 Iteration 668, Rate=0.267604, Radius=5 Iteration 669, Rate=0.266807, Radius=5 Iteration 670, Rate=0.26601, Radius=5 Iteration 671, Rate=0.265213, Radius=5 Iteration 672, Rate=0.264416, Radius=5 Iteration 673, Rate=0.263619, Radius=5 Iteration 674, Rate=0.262822, Radius=5 Iteration 675, Rate=0.262025, Radius=5 Iteration 676, Rate=0.261228, Radius=5 Iteration 677, Rate=0.260431, Radius=5 Iteration 678, Rate=0.259634, Radius=5 Iteration 679, Rate=0.258837, Radius=5 Iteration 680, Rate=0.25804, Radius=5 Iteration 681, Rate=0.257243, Radius=5
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Asignments In this assignment a solution path for the Traveling Salesman Problem (finding a short path to travel once to each city and return home), for an unknown number of cities as input (you can safely assume <= 1000 cities). Each city consists of an ID (an integer number), and X and Y position of that city (two integer numbers). The provided input format for each line to read in is CITY-ID,X,Y\n.Your program shall implement a Self-Organizing Map to accomplish this task. When your SOM finished learning, print the path as one city-id per line, followed by '\n'. Example for three cities with IDs 1,2,3 which are visited in the order 3,1,2: 3\n 1\n 2\nRemember that the number of cities in the output corresponds exactly to the number of cities in the input. It does not matter which of the cities is the first on your path.You can safely assume that your program does not need to find the shortest possible path (remember, this problem is NP hard!), but your result needs to be within 15% of the shortest path we found (which again might not be optimal). A travelling salesmap across Europe :)![title](img/som_ts_eu.png)
# load the datasets for training and testing for TS in Europe import numpy as np import csv with open('./data/som_ts_in.txt') as inputfile: train_data = list(csv.reader(inputfile)) with open('./data/som_ts_out.txt') as inputfile: test_data = list(csv.reader(inputfile)) # add network code here
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
And for a more complex example, consider a more restricted dataset.![title](img/som_ts_random.png)
# load the datasets for training and testing for TS import numpy as np import csv with open('./data/som_ts_in_aux.txt') as inputfile: train_data = list(csv.reader(inputfile)) with open('./data/som_ts_out_aux.txt') as inputfile: test_data = list(csv.reader(inputfile)) # add network code here
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Hopfield Networks Donald Hebb hypothesized in 1949 how neurons are connected with each other in the brain: “When an axon of cell A is near enough to excite a cell B and repeatedly or persistently takes part in firing it, some growth process or metabolic change takes place in one or both cells such that A’s efficiency, as one of the cells firing B, is increased.”, and postulated a new learning mechanism, Hebbian learning. In other words neural networks stores and retrieves associations, which are learned as synaptic connection. In Hebbian learning, both presynaptic and postsynaptic neurons are involved. Human memory thus works in an associative or content-addressable way.The model is a recurrent neural network with fully interconnected neurons. The number of feedback loops is equal to the number of neurons. Basically, the output of each neuron is fed back, via a unit-time delay element, to each of the other neurons in the network. ![title](img/hopfield.png)Such a structure allows the network to recognise any of the learned patterns by exposure to only partial or even some corrupted information about that pattern, i.e., it eventually settles down and returns the closest pattern or the best guess.
# Class implementing a Hopfield Network import numpy as np from energetic import EnergeticNetwork class HopfieldNetwork(EnergeticNetwork): def __init__(self, neuron_count): EnergeticNetwork.__init__(self, neuron_count) self.input_count = neuron_count self.output_count = neuron_count self.activation_function = lambda d: 1 if (d > 0) else 0 def compute(self, input): """ Note: for Hopfield networks, you will usually want to call the "run" method to compute the output. This method can be used to copy the input data to the current state. A single iteration is then run, and the new current state is returned. :param input: The input pattern. :return: The new current state. """ result = self.current_state[:] self.run() for i in range(self.current_state): result[i] = self.activation_function(self.current_state[i]) self.current_state[:] = result return result def run(self): """ Perform one Hopfield iteration. """ for to_neuron in range(self.neuron_count): sum = 0 for from_neuron in range(self.neuron_count): sum += self.current_state[from_neuron] \ * self.get_weight(from_neuron, to_neuron) self.current_state[to_neuron] = self.activation_function(sum) def run_until_stable(self, max_cycle): """ Run the network until it becomes stable and does not change from more runs. :param max_cycle: The maximum number of cycles to run before giving up. :return: The number of cycles that were run. """ done = False last_state_str = str(self.current_state) current_state_str = last_state_str cycle = 0 while not done: self.run() cycle += 1 last_state_str = str(self.current_state) if last_state_str == current_state_str: if cycle > max_cycle: done = True else: done = True current_state_str = last_state_str return cycle def energy(self): t = 0 # Calculate first term a = 0 for i in range(self.input_count): for j in range(self.output_count): a += self.get_weight(i, j) * self.current_state[i] * self.current_state[j] a *= -0.5 # Calculate second term b = 0 for i in range(self.input_count): b += self.current_state[i] * t return a+b
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
In the next section we implement the Hopefield Network training algorithm![title](img/hopfield_alg.png)
class TrainHopfieldHebbian: def __init__(self, network): self.network = network; self.sum_matrix = np.zeros([network.input_count, network.input_count]) self.pattern_count = 1 def add_pattern(self, pattern): for i in range(self.network.input_count): for j in range(self.network.input_count): if i == j: self.sum_matrix[i][j] = 0 else: self.sum_matrix[i][j] += pattern[i] * pattern[j] self.pattern_count += 1 def learn(self): if self.pattern_count == 0: raise Exception("Please add a pattern before learning. Nothing to learn.") for i in range(self.network.input_count): for j in range(self.network.input_count): self.network.set_weight(i, j, self.sum_matrix[i][j]/self.pattern_count)
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
In the following sample problem we will implement a Hopfield network to correct distorted patterns (here: 2D images). The algorithm reads a collection of binary images (5 patterns), each image being 10x10 "pixels" in size. A pixel may either be a space ' ' or a circle 'o'. We will train a Hopfield network (size 10x10 neurons) with these images as attractors. After training, the algorithm will read another small number of images with "distortions"; i.e. with incorrect pixel patterns compared to the previously trained images. For each such "distorted" image the algorithm shall output the closest training example.
# The neural network will learn these patterns. PATTERN = [[ "O O O O O ", " O O O O O", "O O O O O ", " O O O O O", "O O O O O ", " O O O O O", "O O O O O ", " O O O O O", "O O O O O ", " O O O O O"], [ "OO OO OO", "OO OO OO", " OO OO ", " OO OO ", "OO OO OO", "OO OO OO", " OO OO ", " OO OO ", "OO OO OO", "OO OO OO" ], [ "OOOOO ", "OOOOO ", "OOOOO ", "OOOOO ", "OOOOO ", " OOOOO", " OOOOO", " OOOOO", " OOOOO", " OOOOO" ], [ "O O O O", " O O O ", " O O O ", "O O O O", " O O O ", " O O O ", "O O O O", " O O O ", " O O O ", "O O O O" ], [ "OOOOOOOOOO", "O O", "O OOOOOO O", "O O O O", "O O OO O O", "O O OO O O", "O O O O", "O OOOOOO O", "O O", "OOOOOOOOOO" ]] # The neural network will be tested on these patterns, to see which of the last set they are the closest to. PATTERN2 = [[ " ", " ", " ", " ", " ", " O O O O O", "O O O O O ", " O O O O O", "O O O O O ", " O O O O O"], ["OOO O O", " O OOO OO", " O O OO O", " OOO O ", "OO O OOO", " O OOO O", "O OO O O", " O OOO ", "OO OOO O ", " O O OOO"], ["OOOOO ", "O O OOO ", "O O OOO ", "O O OOO ", "OOOOO ", " OOOOO", " OOO O O", " OOO O O", " OOO O O", " OOOOO"], ["O OOOO O", "OO OOOO ", "OOO OOOO ", "OOOO OOOO", " OOOO OOO", " OOOO OO", "O OOOO O", "OO OOOO ", "OOO OOOO ", "OOOO OOOO"], ["OOOOOOOOOO", "O O", "O O", "O O", "O OO O", "O OO O", "O O", "O O", "O O", "OOOOOOOOOO"]]
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Convert the image representation into a bipolar {-1/1} representation and display according to the original patterns
# Size of the network HEIGHT = 10 WIDTH = 10 def convert_pattern(data, index): result_index = 0 result = np.zeros([WIDTH*HEIGHT]) for row in range(HEIGHT): for col in range(WIDTH): ch = data[index][row][col] result[result_index] = 1 if ch != ' ' else -1 result_index += 1 return result def display(pattern1, pattern2): index1 = 0 index2 = 0 for row in range(HEIGHT): line = "" for col in range(WIDTH): if pattern1[index1]>0: line += "O" else: line += " " index1 += 1 line += " -> " for col in range(WIDTH): if pattern2[index2] >0 : line += "O" else: line += " " index2 += 1 print(line) def display_data(pattern1): index1 = 0 index2 = 0 for row in range(HEIGHT): line = "" for col in range(WIDTH): if pattern1[index1]>0: line += "O" else: line += " " index1 += 1 print(line) # Evaluate the network for the provided patterns, using a number of N steps of convergence N = 10 def evaluate(hopfield, pattern): for i in range(len(pattern)): print 'Convergence for pattern %d \n' % i pattern1 = convert_pattern(pattern, i) print 'input\n' display_data(pattern1) hopfield.current_state = pattern1 cycles = hopfield.run_until_stable(N) pattern2 = hopfield.current_state print 'attractor\n' display_data(pattern2) print("----------------------") # Create the network and train it on the first set of patterns and evaluate for both datasets (i.e. one correct and one distorted) hopfield = HopfieldNetwork(WIDTH*HEIGHT) train = TrainHopfieldHebbian(hopfield) for i in range(len(PATTERN)): train.add_pattern(convert_pattern(PATTERN, i)) train.learn() print("Evaluate distorted patterns\n") evaluate(hopfield, PATTERN2)
Evaluate distorted patterns Convergence for pattern 0 input O O O O O O O O O O O O O O O O O O O O O O O O O attractor O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O ---------------------- Convergence for pattern 1 input OOO O O O OOO OO O O OO O OOO O OO O OOO O OOO O O OO O O O OOO OO OOO O O O OOO attractor OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO OO ---------------------- Convergence for pattern 2 input OOOOO O O OOO O O OOO O O OOO OOOOO OOOOO OOO O O OOO O O OOO O O OOOOO attractor OOOOO OOOOO OOOOO OOOOO OOOOO OOOOO OOOOO OOOOO OOOOO OOOOO ---------------------- Convergence for pattern 3 input O OOOO O OO OOOO OOO OOOO OOOO OOOO OOOO OOO OOOO OO O OOOO O OO OOOO OOO OOOO OOOO OOOO attractor O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O O ---------------------- Convergence for pattern 4 input OOOOOOOOOO O O O O O O O OO O O OO O O O O O O O OOOOOOOOOO attractor OOOOOOOOOO O O O OOOOOO O O O O O O O OO O O O O OO O O O O O O O OOOOOO O O O OOOOOOOOOO ----------------------
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
In the application of the Hopfield network as a content-addressable memory, we know a priori the fixed points (attractors) of the network in that they correspond to the patterns to be stored. However, the synaptic weights of the network that produce the desired fixed points are unknown, and the problem is how to determine them. The primary function of a content-addressable memory is to retrieve a pattern (item) stored in memory in response to the presentation of an incomplete or noisy version of that pattern.![title](img/hopfield_energy.png) Assignments For this assignment you should develop a Hopfield Network capable of learning a phonebook. More precisely, a simple autoassociative memory to recover names and phone numbers and/or match them.Assuming that this is the phonebook extract the network needs to learn:
TINA -> 6843726 ANTJE -> 8034673 LISA -> 7260915
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Code a Hopfield Network for phonebook learning and restoring using its Content-Addressable-Memory behavior. Simulate network for distorted numbers.The data is represented as: Input | Output Name -> Number TINA -> ? 86'GV | TINA -> 6843726ANTJE -> ?Z!ES-= | ANTJE -> 8034673LISA -> JKXMG | LISA -> 7260915
# add code here
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Simulate network for distorted name.The data is represented as: Input | Output Number -> Name6843726 -> ; 01, | 6843726 -> TINA 8034673 -> &;A$T | 8034673 -> ANTJE7260915 -> N";SE | 7260915 -> LISA
# add code here
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
Simulate network for distorted names and numbers.The data is represented as: Input | Output Name -> Number TINE -> 1F&KV]: | TINA -> 6843726ANNJE -> %VZAQ$> | ANTJE -> 8034673RITA -> [)@)EK& | DIVA -> 6060737
# add code here
_____no_output_____
MIT
code/UnsupervisedNeuralComputation.ipynb
caxenie/basecamp-winterschool-2017
2021-05-10 Daily Practice- [x] Practice - [ ] SQL - [x] Algorithms - [ ] Solve + Design- [ ] Learn- [ ] Write- [ ] Build --- Practice- [x] https://leetcode.com/problems/reverse-integer/- [x] https://leetcode.com/problems/longest-common-prefix/- [x] https://leetcode.com/problems/maximum-subarray/- [x] https://leetcode.com/problems/same-tree/- [x] https://leetcode.com/problems/combination-sum/- [x] https://leetcode.com/problems/longest-substring-without-repeating-characters/ Problem solving process[CSDojo problem solving tips](https://www.youtube.com/watch?v=GBuHSRDGZBY)1. Brute-force solution2. Think of a simpler version of the problem3. Think with simpler examples: look for patterns4. Use some visualization5. Test solution on a other examples ProblemGiven two arrays of the same length, find the pair(s) of values with sums closest to the target.
arr1 = [-1, 3, 8, 2, 9, 5] arr2 = [4, 1, 2, 10, 5, 20] tgt = 24 # Brute-force iterative approach - O(n^2) # Iterate through every pair of elements to find the closest def find_closest_sum(arr1, arr2, tgt): closest = tgt # Can't be further away than the target itself? closest_sums = [] for i, v1 in enumerate(arr1): for j, v2 in enumerate(arr2): if abs(tgt - (v1 + v2)) <= closest: closest = tgt - (v1 + v2) closest_sums.append((v1, v2)) return closest, closest_sums find_closest_sum(arr1, arr2, tgt) # Simpler version of the problem - target sum pair exists arr3 = [-1, 3, 8, 2, 9, 4] arr4 = [4, 1, 2, 10, 5, 20] tgt2 = 24 # Use a set to check for differences def find_closest_sum(arr1, arr2, tgt): set1 = set(arr1) # Create set from first array pairs = [] for j, v2 in enumerate(arr2): # Iterate through second array # Check if target minus element is in set if (tgt - v2) in set1: pairs.append((tgt - v2, v2)) return pairs find_closest_sum(arr3, arr4, tgt)
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Once the simpler version of the problem (where a pair exists that add up to the target) is solved, expand that solution to include any other cases that need to be accounted for (arrays without a pair that add up to the target).In this problem, if the target is not found, add or subtract 1 from the target and try again. Repeat until pair is found. > Think with simpler examples: try noticing a pattern
# Sorting the arrays first; start at the top of first array def find_closest_sum(arr1, arr2, tgt): arr1s, arr2s = sorted(arr1), sorted(arr2) # First pair is (arr1s[-1], arr2s[1]) # Increment second array's index # If sum is less than target, increment second array's index # If sum is more than target, decrement first array's index # if sum equals target, solution is found # Otherwise, keep track of closest pairs and return closest one after iteration is complete
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Reverse integerOn [LeetCode](https://leetcode.com/problems/reverse-integer/)Given a signed 32-bit integer `x`, return `x` with its digits reversed. If reversing `x` causes the value to go outside the signed 32-bit integer range ``[-2^31, 2^31 - 1]``, then return `0`.Assume the environment does not allow you to store 64-bit integers (signed or unsigned).
# Get components of integer 201 -> 102 # Modulo of 10 will return the ten factor - rightmost number 201 % 10 -> 1 # Remove that digit from the integer by floor division 201 // 10 -> 20 # 20 is going to be fed back into function; repeat steps above 20 % 10 -> 0 20 // 10 -> 2 # Base case: 2 % 10 = 2 # Then return that number # Reconstruct from right to left 2 * (0 + 10**0) 123 -> 321 123 % 10 = 3 123 // 10 = 12 12 % 10 = 2 12 // 10 = 1 1 % 10 = 1 # base case, return 1 1 + (2 * 10**1) = 21 21 + (3 * 10**2) = 21 + 300 = 321 import math def reverse(x): # Deal with negative case? neg = 1 if x < 0: neg = -1 x *= neg # Base case: mod 10 of x = x if x % 10 == x: return x # "Pop" rightmost number off of x right = x % 10 x_new = x // 10 # Get factor of x_new to use as exponent below factor = int(math.log(x_new, 10)) + 1 # Feed remainder back into function and reconstruct right to left rev = (reverse(x_new) + (right * 10**factor)) * neg if 2**31 < rev or rev < (-1 * (2**31)) - 1: return 0 else: return rev reverse(123) reverse(-123) int(math.log(21, 10)) int(math.log(211, 10))
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Longest common prefixOn [LeetCode](https://leetcode.com/problems/longest-common-prefix/)Write a function to find the longest common prefix string amongst an array of strings.If there is no common prefix, return an empty string "". - Implement a trie- Insert words into trie- DFS for node that has multiple children
class TrieNode: """Node of a trie.""" def __init__(self, char: str): self.char = char # Character held by this node self.is_end = False # End of word self.children = {} # Children: key is char, value is node class Trie: """A trie object.""" def __init__(self): """Instantiate the tree with blank root node.""" self.root = TrieNode("") def insert(self, word: str) -> None: """Inserts a word into the trie; each char is a node.""" prev_node = self.root # Start at root for char in word: # Iterate through chars in word # Check if char is already a child of prev_node if char in prev_node.children: # If already exists, iterate to next char prev_node = prev_node.children[char] else: # If not, instantiate node with char; add as child to prev_node new_node = TrieNode(char) prev_node.children[char] = new_node prev_node = new_node prev_node.is_end = True # Mark end of word, in case word itself is prefix def longest_common_prefix(self, root: TrieNode): """Traverses the tree to find longest common prefix of inserted words.""" # Base case: node has multiple children or end of word -> return node.char if len(root.children) > 1 or root.is_end is True: return root.char # Recursive case: concat cur node's char with return of recursive call child = root.children[list(root.children)[0]] # Get child node return root.char + self.longest_common_prefix(child) from typing import List def longestCommonPrefix(strs: List[str]) -> str: trie = Trie() # Instantiate a trie # Loop through words, inserting them into trie for word in strs: trie.insert(word) # Call longest_common_prefix to find prefix return trie.longest_common_prefix(trie.root) longestCommonPrefix(["flower","flow","flight"])
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Max SubarrayOn [LeetCode](https://leetcode.com/problems/maximum-subarray/)Given an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.Example 1: Input: nums = [-2,1,-3,4,-1,2,1,-5,4] Output: 6 Explanation: [4,-1,2,1] has the largest sum = 6.Example 2: Input: nums = [1] Output: 1Example 3: Input: nums = [5,4,-1,7,8] Output: 23
def max_subarray(nums): # vars to hold subarray and max sum so far max_sum = None sub = [] for i, num in enumerate(nums): # iterate through nums # check if the current value is better than the highest sum of all possible combinations of previous values if num >= sum(sub) + num: # if it's better, clear out subarray and add current value sub = [num] else: # Otherwise, add num to running subarray sub.append(num) if max_sum is None: # Deal with negative items max_sum = sum(sub) if sum(sub) > max_sum or max_sum is None: # If running sum is greater, set new max max_sum = sum(sub) return max_sum nums = [-2,1,-3,4,-1,2,1,-5,4] print(max_subarray(nums)) print(max_subarray([1])) print(max_subarray([5,4,-1,7,8]))
6 1 23
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Same TreeOn [LeetCode](https://leetcode.com/problems/same-tree/)Given the roots of two binary trees p and q, write a function to check if they are the same or not.Two binary trees are considered the same if they are structurally identical, and the nodes have the same value.
class Solution: def preorderTraversal(self, node) -> list: # Base case: node is None if node is None: return [None] # Recursive case: [this node.val, pt(left.val), pt.right.val] return [node.val] + self.preorderTraversal(node.left) + self.preorderTraversal(node.right) def isSameTree(self, p: TreeNode, q: TreeNode) -> bool: """If output of traversal is equal, then they are the same.""" if self.preorderTraversal(p) == self.preorderTraversal(q): return True else: return False
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Combination Sum (Again)
class Solution: def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: valid_paths = [] self.pathSearch(candidates, 0, target, [], valid_paths) return valid_paths def pathSearch(self, candidates, start, target, path, valid_paths): # Base case: target / remainder less than 0 if target < 0: return # Base case: target = 0 -> path is valid if target == 0: valid_paths.append(path) return # Recursive case: iterate through candidates starting with start for i, cand in enumerate(candidates): path.append(cand) # Add current search node to path # Recurse self.pathSearch(candidates, i, target - cand, path, valid_paths) # Remove search node from path path.pop() class Solution: def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: self.valid_paths = [] self.path = [] self.pathSearch(candidates, 0, target) return self.valid_paths def pathSearch(self, candidates, start, target): # Base case: target / remainder less than 0 if target < 0: return # Base case: target = 0 -> path is valid if target == 0: self.valid_paths.append(self.path) return # Recursive case: iterate through candidates starting with start for i, cand in enumerate(candidates): self.path.append(cand) # Add current search node to path self.pathSearch(candidates, i, target - cand) # Recurse # Remove search node from path self.path.pop() class Solution: def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: paths = [] self.pathSearch(candidates, 0, target, [], paths) return paths def pathSearch(self, candidates, start, target, path, paths): # Base case: target / remainder less than 0 if target < 0: return # Base case: target = 0 -> path is valid if target == 0: paths.append(list(path)) return # Recursive case: iterate through candidates starting with start for i, cand in enumerate(candidates): path.append(cand) # Add current search node to path self.pathSearch(candidates[start:], i, target - cand, path, paths) # Recurse path.pop() class Solution: def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]: paths = [] self.pathSearch(candidates, target, [], paths) return paths def pathSearch(self, candidates, target, path, paths): # Base case: target / remainder less than 0 if target < 0: return # Base case: target = 0 -> path is valid if target == 0: paths.append(list(path)) return # Recursive case: iterate through candidates starting with start for i, cand in enumerate(candidates): path.append(cand) # Add current search node to path self.pathSearch(candidates[i:], target - cand, path, paths) # Recurse path.pop() candidates = [2, 3, 6, 7] sol = Solution() sol.combinationSum(candidates, 7) candidates = [2,3,5] sol = Solution() sol.combinationSum(candidates, 8) [2, 3, 3] is [3, 3, 2]
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Data Structures Review LinkedListSingly linked list with recursive methods.
class LinkedListNode: def __init__(self, data=None, next=None): self.data = data self.next = next def append(self, data) -> None: if self.next is None: # Base case, no next node self.next = LinkedListNode(data) else: self.next.append(data) class LinkedList: def __init__(self, head=None): self.head = head def append(self, data) -> None: if self.head: self.head.append(data) else: self.head = LinkedListNode(data) a = LinkedListNode(1) my_ll = LinkedList(a) my_ll.append(2) my_ll.append(3) print(my_ll.head.data) print(my_ll.head.next.data) print(my_ll.head.next.next.data)
1 2 3
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
QueueFIFO!- Enqueue: constant time - `O(1)`- Dequeue: constant time - `O(1)`- Peek: constant time - `O(1)`- Space complexity = `O(n)`
class Queue: def __init__(self): self.front = None self.back = None def is_empty(self) -> bool: if self.front is None: return True else: return False def enqueue(self, data): new_node = LinkedListNode(data) if self.is_empty(): self.front = new_node else: self.back.next = new_node self.back = new_node # Send new node to back of queue def dequeue(self): """Remove node from front of list and return its value.""" if not self.is_empty(): # Check if queue is empty dq = self.front # Save current front of queue self.front = dq.next # Set next node as new front else: return None # Return None if queue is empty # Check if queue is empty after dequeue if self.is_empty(): self.back = None # Also clear out back return dq.data # Return old front's data def peek(self): if not self.is_empty(): return self.front.data
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
StackLIFO!- Push: constant time - `O(1)`- Pop: constant time - `O(1)`- Peek: constant time - `O(1)`- Space complexity = `O(n)`
class Stack: def __init__(self): self.top = None def push(self, data): """Adds element to top of stack.""" new_node = LinkedListNode(data) new_node.next = self.top self.top = new_node def pop(self): """Removes element from top of stack and returns its value.""" if self.top: popped = self.top self.top = popped.next return popped.data else: return None def peek(self): """Return value of the stack's top element without removing it.""" peeked = None if self.top: peeked = self.top.data return peeked
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Binary Search TreeFirst, I'm going to implement a BST from scratch, run DFS and BFS on it, then look for a good leetcode problem to apply it to.
import math # Perfect binary tree math # Given 127 nodes, what is the height? print(math.log(127 + 1, 2)) # Given height of 8, how many nodes does it have? print(2 ** 8 - 1) class BSTNode: def __init__(self, val: int): self.val = val self.left = None self.right = None def __str__(self): print(f"<({self.left})-({self.val})-({self.right})>") def insert(self, val) -> None: if val < self.val: if self.left is None: self.left = BSTNode(val) else: self.left.insert(val) if val > self.val: if self.right is None: self.right = BSTNode(val) else: self.right.insert(val) def search(self, tgt: int): if self.val == tgt: return self elif tgt < self.val: if self.left is None: return False else: return self.left.search(tgt) else: if self.right is None: return False else: return self.right.search(tgt) def min(self): # Find minimum by going all the way left if self.left is None: # Base case: no more left to go return self else: # Recursive case: call left node's min method return self.left.min() class BST: def __init__(self, root_val: int): self.root = BSTNode(root_val) def insert(self, val: int) -> None: self.root.insert(val) def search(self, val: int) -> BSTNode: return self.root.search(val) def min(self, node: BSTNode): return node.min() def delete(self, val: int) -> None: pass
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Traversals- Breadth-first- Depth-first - Inorder: Node visited in order (l->n->r) - Preorder: Node visited before children (n->l->r) - Postorder: Node visited after children (l->r->n)
from collections import deque def breadth_first_traversal(root): if root is None: return [] results = [] q = deque() q.append(root) while len(q) > 0: node = q.popleft() results.append(node.val) # Put children into the queue if node.left: q.append(node.left) if node.right: q.append(node.right) return results
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
Longest substring without repeating charactersOn [LeetCode](https://leetcode.com/problems/longest-substring-without-repeating-characters/)I believe I have a good method for solving this one now: using a queue as a way to set up a sliding window. I can iterate through the string, adding each character to the queue. If the character matches the character at the front of the queue, dequeue the char off the front. Keep track of the max length of the queue and return it at the end.
from collections import deque class Solution: def lengthOfLongestSubstring(self, s: str) -> int: max = 0 # Keep track of max queue length q = deque() # Use queue as sliding window for char in s: # Iterate through string # If char being added matches that at front of queue, dequeue it first if len(q) > 0: if char in q: # Find index of char; dequeue that many elements ix = q.index(char) for i in range(ix + 1): q.popleft() q.append(char) # Add char to queue # Compare length of queue to max, setting max accordingly if len(q) > max: max = len(q) print(q) return max s = "abcabcbb" sol = Solution() sol.lengthOfLongestSubstring(s) d = deque(s) for i in range(d.index("b") + 1): d.popleft() d
_____no_output_____
MIT
ds/practice/daily_practice/21-05/21-05-10-130-mon.ipynb
tobias-fyi/vela
View source on GitHub Notebook Viewer Run in Google Colab ConvolutionsTo perform linear convolutions on images, use `image.convolve()`. The only argument to convolve is an `ee.Kernel` which is specified by a shape and the weights in the kernel. Each pixel of the image output by `convolve()` is the linear combination of the kernel values and the input image pixels covered by the kernel. The kernels are applied to each band individually. For example, you might want to use a low-pass (smoothing) kernel to remove high-frequency information. The following illustrates a 15x15 low-pass kernel applied to a Landsat 8 image: Install Earth Engine API and geemapInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemapdependencies), including earthengine-api, folium, and ipyleaflet.**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
# Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.foliumap as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize()
_____no_output_____
MIT
tutorials/Image/06_convolutions.ipynb
ppoon23/geemap
Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.pyL13) can be added using the `Map.add_basemap()` function.
Map = emap.Map(center=[40, -100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map
_____no_output_____
MIT
tutorials/Image/06_convolutions.ipynb
ppoon23/geemap
Add Earth Engine Python script
# Load and display an image. image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318') Map.setCenter(-121.9785, 37.8694, 11) Map.addLayer(image, {'bands': ['B5', 'B4', 'B3'], 'max': 0.5}, 'input image') # Define a boxcar or low-pass kernel. # boxcar = ee.Kernel.square({ # 'radius': 7, 'units': 'pixels', 'normalize': True # }) boxcar = ee.Kernel.square(7, 'pixels', True) # Smooth the image by convolving with the boxcar kernel. smooth = image.convolve(boxcar) Map.addLayer(smooth, {'bands': ['B5', 'B4', 'B3'], 'max': 0.5}, 'smoothed') Map.addLayerControl() Map
_____no_output_____
MIT
tutorials/Image/06_convolutions.ipynb
ppoon23/geemap
The output of convolution with the low-pass filter should look something like Figure 1. Observe that the arguments to the kernel determine its size and coefficients. Specifically, with the `units` parameter set to pixels, the `radius` parameter specifies the number of pixels from the center that the kernel will cover. If `normalize` is set to true, the kernel coefficients will sum to one. If the `magnitude` parameter is set, the kernel coefficients will be multiplied by the magnitude (if `normalize` is also true, the coefficients will sum to `magnitude`). If there is a negative value in any of the kernel coefficients, setting `normalize` to true will make the coefficients sum to zero.Use other kernels to achieve the desired image processing effect. This example uses a Laplacian kernel for isotropic edge detection:
Map = emap.Map(center=[40, -100], zoom=4) # Define a Laplacian, or edge-detection kernel. laplacian = ee.Kernel.laplacian8(1, False) # Apply the edge-detection kernel. edgy = image.convolve(laplacian) Map.addLayer(edgy, {'bands': ['B5', 'B4', 'B3'], 'max': 0.5}, 'edges') Map.setCenter(-121.9785, 37.8694, 11) Map.addLayerControl() Map
_____no_output_____
MIT
tutorials/Image/06_convolutions.ipynb
ppoon23/geemap
Note the format specifier in the visualization parameters. Earth Engine sends display tiles to the Code Editor in JPEG format for efficiency, however edge tiles are sent in PNG format to handle transparency of pixels outside the image boundary. When a visual discontinuity results, setting the format to PNG results in a consistent display. The result of convolving with the Laplacian edge detection kernel should look something like Figure 2.There are also anisotropic edge detection kernels (e.g. Sobel, Prewitt, Roberts), the direction of which can be changed with `kernel.rotate()`. Other low pass kernels include a Gaussian kernel and kernels of various shape with uniform weights. To create kernels with arbitrarily defined weights and shape, use `ee.Kernel.fixed()`. For example, this code creates a 9x9 kernel of 1’s with a zero in the middle:
# Create a list of weights for a 9x9 kernel. list = [1, 1, 1, 1, 1, 1, 1, 1, 1] # The center of the kernel is zero. centerList = [1, 1, 1, 1, 0, 1, 1, 1, 1] # Assemble a list of lists: the 9x9 kernel weights as a 2-D matrix. lists = [list, list, list, list, centerList, list, list, list, list] # Create the kernel from the weights. kernel = ee.Kernel.fixed(9, 9, lists, -4, -4, False) print(kernel.getInfo())
_____no_output_____
MIT
tutorials/Image/06_convolutions.ipynb
ppoon23/geemap