code
stringlengths
2.5k
6.36M
kind
stringclasses
2 values
parsed_code
stringlengths
0
404k
quality_prob
float64
0
0.98
learning_prob
float64
0.03
1
출처: https://blog.breezymind.com/2018/03/02/sklearn-feature_extraction-text-2/ ``` import pandas as pd import numpy as np pd.options.mode.chained_assignment = None np.random.seed(0) from konlpy.tag import Mecab mecab = Mecab() from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics.pairwise import linear_kernel, cosine_similarity # tokenizer : 문장에서 색인어 추출을 위해 명사,동사,알파벳,숫자 정도의 단어만 뽑아서 normalization, stemming 처리하도록 함 def tokenizer(raw, pos=["NNG","NNP"], stopword=['수','퀄리티','도시','분','전문','스타','년','원',\ '월','화','수','목','금','시','앤','일','그램','문'] ): return [ word for word, tag in mecab.pos(raw) if len(word) > 1 and tag in pos and word not in stopword ] # 테스트 문장 df = pd.read_csv("word2vec_wrangling.csv") df.head() import re def preprocessing(text): # 개행문자 제거 text = re.sub('\\\\n', ' ', text) # 특수문자 제거 # 특수문자나 이모티콘 등은 때로는 의미를 갖기도 하지만 여기에서는 제거했습니다. # text = re.sub('[?.,;:|\)*~`’!^\-_+<>@\#$%&-=#}※]', '', text) # 한글, 영문, 숫자만 남기고 모두 제거하도록 합니다. # text = re.sub('[^가-힣ㄱ-ㅎㅏ-ㅣa-zA-Z0-9]', ' ', text) # 한글, 영문만 남기고 모두 제거하도록 합니다. text = re.sub('[^가-힣ㄱ-ㅎㅏ-ㅣa-zA-Z]', ' ', text) return text %time rawdata = df['Content_txt'].apply(preprocessing) vectorize = CountVectorizer( tokenizer=tokenizer, min_df=10 # 예제로 보기 좋게 1번 정도만 노출되는 단어들은 무시하기로 했다 # min_df = 0.01 : 문서의 1% 미만으로 나타나는 단어 무시 # min_df = 10 : 문서에 10개 미만으로 나타나는 단어 무시 # max_df = 0.80 : 문서의 80% 이상에 나타나는 단어 무시 # max_df = 10 : 10개 이상의 문서에 나타나는 단어 무시 ) # 문장에서 노출되는 feature(특징이 될만한 단어) 수를 합한 Document Term Matrix(이하 DTM) 을 리턴한다 X = vectorize.fit_transform(rawdata) print( 'fit_transform, (sentence {}, feature {})'.format(X.shape[0], X.shape[1]) ) # fit_transform, (sentence 5, feature 7) print(type(X)) # <class 'scipy.sparse.csr.csr_matrix'> print(X.toarray()) # [[0, 1, 2, 0, 0, 0, 1], # [0, 1, 1, 0, 0, 0, 2], # [1, 0, 0, 2, 1, 1, 0], # [1, 0, 0, 1, 0, 0, 0], # [0, 0, 0, 3, 1, 1, 0]] # 문장에서 뽑아낸 feature 들의 배열 features = vectorize.get_feature_names() # 박조은 튜토리얼 이어나가려고 함 vectorizer = vectorize feature_vector = X feature_vector.shape vocab = vectorizer.get_feature_names() print(len(vocab)) vocab[:10] # 각 리뷰마다 등장하는 단어에 빈도수가 표현됩니다. 0 은 등장하지 않음을 의미합니다. pd.DataFrame(feature_vector[:10].toarray(), columns=vocab).head() # 위에서 구한 단어벡터를 더하면 단어가 전체에서 등장하는 횟수를 알 수 있습니다. # 벡터화 된 피처를 확인해 봄 # Bag of words 에 몇 개의 단어가 들어있는지 확인 dist = np.sum(feature_vector, axis=0) df_freq = pd.DataFrame(dist, columns=vocab) df_freq # 행과 열의 축을 T로 바꿔주고 빈도수로 정렬 df_too_much = df_freq.T.sort_values(by=0, ascending=False).head(50) df_too_much df_too_much.T too_much_list = list(df_too_much.T.columns.values) print(too_much_list) # df_freq_T = df_freq.T.reset_index() # df_freq_T.columns = ["course", "freq"] # # 강의명을 토큰 3개로 중복제거하기 위해, 강좌명에서 지식공유자의 이름을 빈문자열로 변경 # df_freq_T["course_find"] = df_freq_T["course"].str.replace("홍정모의", "") # df_freq_T["course_find"] = df_freq_T["course_find"].apply(lambda x : " ". join(x.split()[:4])) # df_freq_T.sort_values(["course_find", "freq"], ascending=False).head(10) ``` TF-IDF ``` from sklearn.feature_extraction.text import TfidfTransformer transformer = TfidfTransformer(smooth_idf=False) transformer %%time feature_tfidf = transformer.fit_transform(feature_vector) feature_tfidf.shape feature_tfidf # 각 row에서 전체 단어가방에 있는 어휘에서 등장하는 단어에 대한 one-hot-vector에 TF-IDF 가중치 반영 tfidf_freq = pd.DataFrame(feature_tfidf.toarray(), columns=vocab) tfidf_freq.head() df_tfidf = pd.DataFrame(tfidf_freq.sum()) df_tfidf_top = df_tfidf.sort_values(by=0, ascending=False) df_tfidf_top.head(50) df_tfidf_bottom = df_tfidf.sort_values(by=0, ascending=True) df_tfidf_bottom.head(10) # 중간에 생략되는 단어를 자세히 보고자 할 때 # 제일 쓸모 없는 단어들 뽑아내기 top_list = [] for t in df_tfidf_top.index[:50]: top_list.append(t) print(top_list) ['요가','필라테스','다이어트',''] list_too_much1 = ['운동', '요가', '필라테스', '댄스', '다이어트', '헬스', '발레', '번지', '플라잉', '살사', '니스', '복싱', '수업', '여자', '패들', '아쿠아', '일상', '시간', '보드', '서핑', '수영', '피트', '강사', '바디', '자이로', '오늘', '피닝', '남자', '레슨', '취미', '점핑', '여행', '회원', '건강', '맛집', '라틴', '키즈', '교육', '가능', '크로스', '소통', '바이크', '감사', '기구', '부산', '토닉', '교정', '스포츠', '그룹', '후프', '진행', '시작', '검도', '볼링', '등산', '수련', '클라이밍', '클럽', '개인', '학원', '사진', '서프', '데일리', '트레이닝', '센터', '스튜디오', '상담', '수영장', '에어로빅', '무용', '드럼', '동요', '피티', '트램', '탕가', '조깅', '공연', '파운드', '야사', '폴린', '광주', '수중', '피지', '동호회', '사람', '킥복싱', '휘트', '대구', '카페', '지도자', '선수', '핫요가', '플라이', '타요', '기부', '하루', '행복', '일산', '사이클', '체형'] list_too_much_tf_idf = ['운동', '요가', '필라테스', '댄스', '번지', '다이어트', '복싱', '발레', '니스', '수업', '플라잉', '헬스', '살사', '여자', '피트', '시간', '강사', '일상', '점핑', '취미', '바디', '교육', '오늘', '레슨', '남자', '키즈', '회원', '보드', '맛집', '가능', '교정', '라틴', '여행', '건강', '피닝', '진행', '서핑', '그룹', '플라이', '잠실', '개인', '부산', '스튜디오', '시작', '기구', '자이로', '학원', '상담', '센터', '감사', '피지', '소통', '지도자', '과정', '수련', '무용', '대구', '자격증', '클럽', '수영', '홍대', '볼링', '일산', '트레이닝', '동요', '동호회', '토닉', '사진', '영상', '크로스', '오픈', '사람', '데일리', '방송', '광주', '선생', '자세', '마음', '체형', '문의', '행복', '사랑', '카페', '근력', '동작', '탕가', '클래스', '드럼', '친구', '카톡', '무료', '전화', '스포츠', '공연', '야사', '할인', '하루', '에어로빅', '등록', '준비'] list_too_much_200 = list_too_much1 + list_too_much_tf_idf bulyongeo_list = list(set(list_too_much_200)) print(bulyongeo_list) ```
github_jupyter
import pandas as pd import numpy as np pd.options.mode.chained_assignment = None np.random.seed(0) from konlpy.tag import Mecab mecab = Mecab() from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics.pairwise import linear_kernel, cosine_similarity # tokenizer : 문장에서 색인어 추출을 위해 명사,동사,알파벳,숫자 정도의 단어만 뽑아서 normalization, stemming 처리하도록 함 def tokenizer(raw, pos=["NNG","NNP"], stopword=['수','퀄리티','도시','분','전문','스타','년','원',\ '월','화','수','목','금','시','앤','일','그램','문'] ): return [ word for word, tag in mecab.pos(raw) if len(word) > 1 and tag in pos and word not in stopword ] # 테스트 문장 df = pd.read_csv("word2vec_wrangling.csv") df.head() import re def preprocessing(text): # 개행문자 제거 text = re.sub('\\\\n', ' ', text) # 특수문자 제거 # 특수문자나 이모티콘 등은 때로는 의미를 갖기도 하지만 여기에서는 제거했습니다. # text = re.sub('[?.,;:|\)*~`’!^\-_+<>@\#$%&-=#}※]', '', text) # 한글, 영문, 숫자만 남기고 모두 제거하도록 합니다. # text = re.sub('[^가-힣ㄱ-ㅎㅏ-ㅣa-zA-Z0-9]', ' ', text) # 한글, 영문만 남기고 모두 제거하도록 합니다. text = re.sub('[^가-힣ㄱ-ㅎㅏ-ㅣa-zA-Z]', ' ', text) return text %time rawdata = df['Content_txt'].apply(preprocessing) vectorize = CountVectorizer( tokenizer=tokenizer, min_df=10 # 예제로 보기 좋게 1번 정도만 노출되는 단어들은 무시하기로 했다 # min_df = 0.01 : 문서의 1% 미만으로 나타나는 단어 무시 # min_df = 10 : 문서에 10개 미만으로 나타나는 단어 무시 # max_df = 0.80 : 문서의 80% 이상에 나타나는 단어 무시 # max_df = 10 : 10개 이상의 문서에 나타나는 단어 무시 ) # 문장에서 노출되는 feature(특징이 될만한 단어) 수를 합한 Document Term Matrix(이하 DTM) 을 리턴한다 X = vectorize.fit_transform(rawdata) print( 'fit_transform, (sentence {}, feature {})'.format(X.shape[0], X.shape[1]) ) # fit_transform, (sentence 5, feature 7) print(type(X)) # <class 'scipy.sparse.csr.csr_matrix'> print(X.toarray()) # [[0, 1, 2, 0, 0, 0, 1], # [0, 1, 1, 0, 0, 0, 2], # [1, 0, 0, 2, 1, 1, 0], # [1, 0, 0, 1, 0, 0, 0], # [0, 0, 0, 3, 1, 1, 0]] # 문장에서 뽑아낸 feature 들의 배열 features = vectorize.get_feature_names() # 박조은 튜토리얼 이어나가려고 함 vectorizer = vectorize feature_vector = X feature_vector.shape vocab = vectorizer.get_feature_names() print(len(vocab)) vocab[:10] # 각 리뷰마다 등장하는 단어에 빈도수가 표현됩니다. 0 은 등장하지 않음을 의미합니다. pd.DataFrame(feature_vector[:10].toarray(), columns=vocab).head() # 위에서 구한 단어벡터를 더하면 단어가 전체에서 등장하는 횟수를 알 수 있습니다. # 벡터화 된 피처를 확인해 봄 # Bag of words 에 몇 개의 단어가 들어있는지 확인 dist = np.sum(feature_vector, axis=0) df_freq = pd.DataFrame(dist, columns=vocab) df_freq # 행과 열의 축을 T로 바꿔주고 빈도수로 정렬 df_too_much = df_freq.T.sort_values(by=0, ascending=False).head(50) df_too_much df_too_much.T too_much_list = list(df_too_much.T.columns.values) print(too_much_list) # df_freq_T = df_freq.T.reset_index() # df_freq_T.columns = ["course", "freq"] # # 강의명을 토큰 3개로 중복제거하기 위해, 강좌명에서 지식공유자의 이름을 빈문자열로 변경 # df_freq_T["course_find"] = df_freq_T["course"].str.replace("홍정모의", "") # df_freq_T["course_find"] = df_freq_T["course_find"].apply(lambda x : " ". join(x.split()[:4])) # df_freq_T.sort_values(["course_find", "freq"], ascending=False).head(10) from sklearn.feature_extraction.text import TfidfTransformer transformer = TfidfTransformer(smooth_idf=False) transformer %%time feature_tfidf = transformer.fit_transform(feature_vector) feature_tfidf.shape feature_tfidf # 각 row에서 전체 단어가방에 있는 어휘에서 등장하는 단어에 대한 one-hot-vector에 TF-IDF 가중치 반영 tfidf_freq = pd.DataFrame(feature_tfidf.toarray(), columns=vocab) tfidf_freq.head() df_tfidf = pd.DataFrame(tfidf_freq.sum()) df_tfidf_top = df_tfidf.sort_values(by=0, ascending=False) df_tfidf_top.head(50) df_tfidf_bottom = df_tfidf.sort_values(by=0, ascending=True) df_tfidf_bottom.head(10) # 중간에 생략되는 단어를 자세히 보고자 할 때 # 제일 쓸모 없는 단어들 뽑아내기 top_list = [] for t in df_tfidf_top.index[:50]: top_list.append(t) print(top_list) ['요가','필라테스','다이어트',''] list_too_much1 = ['운동', '요가', '필라테스', '댄스', '다이어트', '헬스', '발레', '번지', '플라잉', '살사', '니스', '복싱', '수업', '여자', '패들', '아쿠아', '일상', '시간', '보드', '서핑', '수영', '피트', '강사', '바디', '자이로', '오늘', '피닝', '남자', '레슨', '취미', '점핑', '여행', '회원', '건강', '맛집', '라틴', '키즈', '교육', '가능', '크로스', '소통', '바이크', '감사', '기구', '부산', '토닉', '교정', '스포츠', '그룹', '후프', '진행', '시작', '검도', '볼링', '등산', '수련', '클라이밍', '클럽', '개인', '학원', '사진', '서프', '데일리', '트레이닝', '센터', '스튜디오', '상담', '수영장', '에어로빅', '무용', '드럼', '동요', '피티', '트램', '탕가', '조깅', '공연', '파운드', '야사', '폴린', '광주', '수중', '피지', '동호회', '사람', '킥복싱', '휘트', '대구', '카페', '지도자', '선수', '핫요가', '플라이', '타요', '기부', '하루', '행복', '일산', '사이클', '체형'] list_too_much_tf_idf = ['운동', '요가', '필라테스', '댄스', '번지', '다이어트', '복싱', '발레', '니스', '수업', '플라잉', '헬스', '살사', '여자', '피트', '시간', '강사', '일상', '점핑', '취미', '바디', '교육', '오늘', '레슨', '남자', '키즈', '회원', '보드', '맛집', '가능', '교정', '라틴', '여행', '건강', '피닝', '진행', '서핑', '그룹', '플라이', '잠실', '개인', '부산', '스튜디오', '시작', '기구', '자이로', '학원', '상담', '센터', '감사', '피지', '소통', '지도자', '과정', '수련', '무용', '대구', '자격증', '클럽', '수영', '홍대', '볼링', '일산', '트레이닝', '동요', '동호회', '토닉', '사진', '영상', '크로스', '오픈', '사람', '데일리', '방송', '광주', '선생', '자세', '마음', '체형', '문의', '행복', '사랑', '카페', '근력', '동작', '탕가', '클래스', '드럼', '친구', '카톡', '무료', '전화', '스포츠', '공연', '야사', '할인', '하루', '에어로빅', '등록', '준비'] list_too_much_200 = list_too_much1 + list_too_much_tf_idf bulyongeo_list = list(set(list_too_much_200)) print(bulyongeo_list)
0.29523
0.809088
``` import numpy as np import pandas as pd import wisps import wisps.simulations as wispsim import matplotlib.pyplot as plt from astropy.io import fits, ascii from astropy.table import Table %matplotlib inline bigf= wisps.get_big_file() bigf=bigf[bigf.snr1>=3] #3dhst data from astropy.io import ascii hst3d= ascii.read('/users/caganze/3dhst_master.phot.v4.1/3dhst_master.phot.v4.1.cat').to_pandas() #hst3d def magnitude(flux): return 25.0-2.5*np.log10(flux) def mag_err(combined): #combined is a pandas table with flux and flux_error if np.isnan(combined['flux']): return np.nan else: return abs(0.434*2.5*combined['flux_error']/combined['flux']) for k in ['F125', 'F606', 'F140', 'F160', 'F814']: hst3d[k]=magnitude(hst3d['f_{}W'.format(k)]) pnts=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl') maglimits=pd.DataFrame([x.mag_limits for x in pnts]) maglimits['imexp']=[x.imag_exptime for x in pnts] #exposure_times=[np.nanmean(x.exposure_time) for x in pnts] maglimits['pointing']=[x.name for x in pnts] maglimits['survey']=[x.survey for x in pnts] maglimits_wisp=maglimits[maglimits.survey=='wisps'] maglimits_hst3d=maglimits[maglimits.survey!='wisps'] wisp_ph=fits.open('/users/caganze/WISPPhotometricCatalog_v1.2.fits') wt=Table(wisp_ph[1].data) bigf_3d=bigf[bigf.survey=='HST3D'] bigf_w=bigf[bigf.survey != 'HST3D'] mag_grid=np.linspace(15, 30, 1000) fig, ax=plt.subplots(ncols=2, figsize=(8, 4)) h=ax[0].hist(bigf_3d.F140.values, range=[19, 30], bins=32, histtype='step', density=False) h=ax[0].hist(hst3d.F140.values, range=[15, 30], bins=32, histtype='step', density=False) h=ax[1].hist(bigf_3d.F160.values, range=[15, 30], bins=32, histtype='step', density=False) h=ax[1].hist(hst3d.F160.values, range=[15, 30], bins=32, histtype='step', density=False) plt.tight_layout() fig, (ax, ax1)=plt.subplots(ncols=2, nrows=2, figsize=(8, 4), sharex='col') h=ax[0].hist(bigf_3d.F140.values, range=[19, 30], bins=32, histtype='step', density=True, label='with spectra') h=ax[0].hist(hst3d.F140.values, range=[19, 30], bins=32, histtype='step', density=True, label='all') kde0=wisps.kernel_density(bigf_3d.F140[bigf_3d.F140.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) kde1=wisps.kernel_density(hst3d.F140[hst3d.F140.between(15, 30)], bw_method=0.1).pdf(mag_grid) ax[0].plot(mag_grid, kde0) ax[0].plot(mag_grid, kde1) ax1[0].plot(mag_grid, kde1/kde0) h=ax[1].hist(bigf_3d.F160.values, range=[19, 30], bins=32, histtype='step', density=True, label='with spectra') h=ax[1].hist(hst3d.F160.values, range=[19, 30], bins=32, histtype='step', density=True, label='all') kde2=wisps.kernel_density(bigf_3d.F160[bigf_3d.F160.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) kde3=wisps.kernel_density(hst3d.F160[hst3d.F160.between(15, 30)], bw_method=0.1).pdf(mag_grid) ax[1].plot(mag_grid, kde2) ax[1].plot(mag_grid, kde3) ax1[1].plot(mag_grid, kde3/kde2) for x in maglimits_hst3d.F140.dropna().values: ax[0].axvline(x, alpha=0.1) ax[1].axvline(x, alpha=0.1) for x in maglimits_hst3d.F160.dropna().values: ax1[0].axvline(x, alpha=0.1) ax1[1].axvline(x, alpha=0.1) plt.tight_layout() ax1[0].set_xlabel('F140W', fontsize=18) ax1[1].set_xlabel('F160W', fontsize=18) ax[0].legend() for a in ax1: a.set_yscale('log') #maglimits_hst3d.F140.dropna().values fig, ax=plt.subplots(ncols=3, figsize=(12, 4)) h=ax[0].hist(bigf_w.F110.values, range=[19, 30], bins=32, histtype='step') h=ax[0].hist(np.array(wt['MAG_AUTO_F110W']), range=[19, 30], bins=32, histtype='step') h=ax[1].hist(bigf_w.F140.values, range=[19, 27], bins=32, histtype='step') h=ax[1].hist(np.array(wt['MAG_AUTO_F140W']), range=[19, 30], bins=32, histtype='step') h=ax[2].hist(bigf_w.F160.values, range=[19, 30], bins=32, histtype='step') h=ax[2].hist(np.array(wt['MAG_AUTO_F160W']), range=[19, 30], bins=32, histtype='step') plt.tight_layout() fig, (ax, ax1)=plt.subplots(ncols=3,nrows=2, figsize=(12, 6), sharex='col') h=ax[0].hist(bigf_w.F110.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[0].hist(np.array(wt['MAG_AUTO_F110W']), range=[19, 30], bins=32, histtype='step', density=True) wkde0=wisps.kernel_density(bigf_w.F110[bigf_w.F110.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde1=wisps.kernel_density(np.array(wt['MAG_AUTO_F110W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F110W']) >15,\ np.array(wt['MAG_AUTO_F110W']) <30)], bw_method=0.1).pdf(mag_grid) ax[0].plot(mag_grid, wkde0) ax[0].plot(mag_grid, wkde1) ax1[0].plot(mag_grid, wkde1/wkde0) h=ax[1].hist(bigf_w.F140.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[1].hist(np.array(wt['MAG_AUTO_F140W']), range=[19, 30], bins=32, histtype='step', density=True) wkde2=wisps.kernel_density(bigf_w.F140[bigf_w.F140.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde3=wisps.kernel_density(np.array(wt['MAG_AUTO_F140W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F140W']) >15,\ np.array(wt['MAG_AUTO_F140W']) <30)], bw_method=0.1).pdf(mag_grid) ax[1].plot(mag_grid, wkde2) ax[1].plot(mag_grid, wkde3) ax1[1].plot(mag_grid, wkde3/wkde2) h=ax[-1].hist(bigf_w.F160.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[-1].hist(np.array(wt['MAG_AUTO_F160W']), range=[19, 30], bins=32, histtype='step', density=True) wkde4=wisps.kernel_density(bigf_w.F160[bigf_w.F160.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde5=wisps.kernel_density(np.array(wt['MAG_AUTO_F160W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F160W']) >15,\ np.array(wt['MAG_AUTO_F160W']) <30)], bw_method=0.1).pdf(mag_grid) ax[-1].plot(mag_grid, wkde4) ax[-1].plot(mag_grid, wkde5) ax1[-1].plot(mag_grid, wkde5/wkde4) for x in maglimits_wisp.F110.dropna().values: ax[0].axvline(x, alpha=0.1) ax1[0].axvline(x, alpha=0.1) for x in maglimits_wisp.F140.dropna().values: ax[1].axvline(x, alpha=0.1) ax1[1].axvline(x, alpha=0.1) for x in maglimits_wisp.F160.dropna().values: ax[-1].axvline(x, alpha=0.1) ax1[-1].axvline(x, alpha=0.1) plt.tight_layout() ax1[0].set_xlabel('F110W', fontsize=18) ax1[1].set_xlabel('F140W', fontsize=18) ax1[-1].set_xlabel('F160W', fontsize=18) for a in ax1: a.set_yscale('log') ```
github_jupyter
import numpy as np import pandas as pd import wisps import wisps.simulations as wispsim import matplotlib.pyplot as plt from astropy.io import fits, ascii from astropy.table import Table %matplotlib inline bigf= wisps.get_big_file() bigf=bigf[bigf.snr1>=3] #3dhst data from astropy.io import ascii hst3d= ascii.read('/users/caganze/3dhst_master.phot.v4.1/3dhst_master.phot.v4.1.cat').to_pandas() #hst3d def magnitude(flux): return 25.0-2.5*np.log10(flux) def mag_err(combined): #combined is a pandas table with flux and flux_error if np.isnan(combined['flux']): return np.nan else: return abs(0.434*2.5*combined['flux_error']/combined['flux']) for k in ['F125', 'F606', 'F140', 'F160', 'F814']: hst3d[k]=magnitude(hst3d['f_{}W'.format(k)]) pnts=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl') maglimits=pd.DataFrame([x.mag_limits for x in pnts]) maglimits['imexp']=[x.imag_exptime for x in pnts] #exposure_times=[np.nanmean(x.exposure_time) for x in pnts] maglimits['pointing']=[x.name for x in pnts] maglimits['survey']=[x.survey for x in pnts] maglimits_wisp=maglimits[maglimits.survey=='wisps'] maglimits_hst3d=maglimits[maglimits.survey!='wisps'] wisp_ph=fits.open('/users/caganze/WISPPhotometricCatalog_v1.2.fits') wt=Table(wisp_ph[1].data) bigf_3d=bigf[bigf.survey=='HST3D'] bigf_w=bigf[bigf.survey != 'HST3D'] mag_grid=np.linspace(15, 30, 1000) fig, ax=plt.subplots(ncols=2, figsize=(8, 4)) h=ax[0].hist(bigf_3d.F140.values, range=[19, 30], bins=32, histtype='step', density=False) h=ax[0].hist(hst3d.F140.values, range=[15, 30], bins=32, histtype='step', density=False) h=ax[1].hist(bigf_3d.F160.values, range=[15, 30], bins=32, histtype='step', density=False) h=ax[1].hist(hst3d.F160.values, range=[15, 30], bins=32, histtype='step', density=False) plt.tight_layout() fig, (ax, ax1)=plt.subplots(ncols=2, nrows=2, figsize=(8, 4), sharex='col') h=ax[0].hist(bigf_3d.F140.values, range=[19, 30], bins=32, histtype='step', density=True, label='with spectra') h=ax[0].hist(hst3d.F140.values, range=[19, 30], bins=32, histtype='step', density=True, label='all') kde0=wisps.kernel_density(bigf_3d.F140[bigf_3d.F140.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) kde1=wisps.kernel_density(hst3d.F140[hst3d.F140.between(15, 30)], bw_method=0.1).pdf(mag_grid) ax[0].plot(mag_grid, kde0) ax[0].plot(mag_grid, kde1) ax1[0].plot(mag_grid, kde1/kde0) h=ax[1].hist(bigf_3d.F160.values, range=[19, 30], bins=32, histtype='step', density=True, label='with spectra') h=ax[1].hist(hst3d.F160.values, range=[19, 30], bins=32, histtype='step', density=True, label='all') kde2=wisps.kernel_density(bigf_3d.F160[bigf_3d.F160.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) kde3=wisps.kernel_density(hst3d.F160[hst3d.F160.between(15, 30)], bw_method=0.1).pdf(mag_grid) ax[1].plot(mag_grid, kde2) ax[1].plot(mag_grid, kde3) ax1[1].plot(mag_grid, kde3/kde2) for x in maglimits_hst3d.F140.dropna().values: ax[0].axvline(x, alpha=0.1) ax[1].axvline(x, alpha=0.1) for x in maglimits_hst3d.F160.dropna().values: ax1[0].axvline(x, alpha=0.1) ax1[1].axvline(x, alpha=0.1) plt.tight_layout() ax1[0].set_xlabel('F140W', fontsize=18) ax1[1].set_xlabel('F160W', fontsize=18) ax[0].legend() for a in ax1: a.set_yscale('log') #maglimits_hst3d.F140.dropna().values fig, ax=plt.subplots(ncols=3, figsize=(12, 4)) h=ax[0].hist(bigf_w.F110.values, range=[19, 30], bins=32, histtype='step') h=ax[0].hist(np.array(wt['MAG_AUTO_F110W']), range=[19, 30], bins=32, histtype='step') h=ax[1].hist(bigf_w.F140.values, range=[19, 27], bins=32, histtype='step') h=ax[1].hist(np.array(wt['MAG_AUTO_F140W']), range=[19, 30], bins=32, histtype='step') h=ax[2].hist(bigf_w.F160.values, range=[19, 30], bins=32, histtype='step') h=ax[2].hist(np.array(wt['MAG_AUTO_F160W']), range=[19, 30], bins=32, histtype='step') plt.tight_layout() fig, (ax, ax1)=plt.subplots(ncols=3,nrows=2, figsize=(12, 6), sharex='col') h=ax[0].hist(bigf_w.F110.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[0].hist(np.array(wt['MAG_AUTO_F110W']), range=[19, 30], bins=32, histtype='step', density=True) wkde0=wisps.kernel_density(bigf_w.F110[bigf_w.F110.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde1=wisps.kernel_density(np.array(wt['MAG_AUTO_F110W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F110W']) >15,\ np.array(wt['MAG_AUTO_F110W']) <30)], bw_method=0.1).pdf(mag_grid) ax[0].plot(mag_grid, wkde0) ax[0].plot(mag_grid, wkde1) ax1[0].plot(mag_grid, wkde1/wkde0) h=ax[1].hist(bigf_w.F140.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[1].hist(np.array(wt['MAG_AUTO_F140W']), range=[19, 30], bins=32, histtype='step', density=True) wkde2=wisps.kernel_density(bigf_w.F140[bigf_w.F140.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde3=wisps.kernel_density(np.array(wt['MAG_AUTO_F140W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F140W']) >15,\ np.array(wt['MAG_AUTO_F140W']) <30)], bw_method=0.1).pdf(mag_grid) ax[1].plot(mag_grid, wkde2) ax[1].plot(mag_grid, wkde3) ax1[1].plot(mag_grid, wkde3/wkde2) h=ax[-1].hist(bigf_w.F160.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[-1].hist(np.array(wt['MAG_AUTO_F160W']), range=[19, 30], bins=32, histtype='step', density=True) wkde4=wisps.kernel_density(bigf_w.F160[bigf_w.F160.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde5=wisps.kernel_density(np.array(wt['MAG_AUTO_F160W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F160W']) >15,\ np.array(wt['MAG_AUTO_F160W']) <30)], bw_method=0.1).pdf(mag_grid) ax[-1].plot(mag_grid, wkde4) ax[-1].plot(mag_grid, wkde5) ax1[-1].plot(mag_grid, wkde5/wkde4) for x in maglimits_wisp.F110.dropna().values: ax[0].axvline(x, alpha=0.1) ax1[0].axvline(x, alpha=0.1) for x in maglimits_wisp.F140.dropna().values: ax[1].axvline(x, alpha=0.1) ax1[1].axvline(x, alpha=0.1) for x in maglimits_wisp.F160.dropna().values: ax[-1].axvline(x, alpha=0.1) ax1[-1].axvline(x, alpha=0.1) plt.tight_layout() ax1[0].set_xlabel('F110W', fontsize=18) ax1[1].set_xlabel('F140W', fontsize=18) ax1[-1].set_xlabel('F160W', fontsize=18) for a in ax1: a.set_yscale('log')
0.36727
0.549641
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import json import io ``` # Import data from json file to dataframe ##### 1. load json files and convert to three dataframe ``` business_json_file = 'business.json' user_json_file = 'user.json' review_json_file = 'review.json' business = [] user = [] review = [] for line in open(business_json_file, 'r'): business.append(json.loads(line)) for line in open(user_json_file, 'r'): user.append(json.loads(line)) for line in open(review_json_file, 'r'): review.append(json.loads(line)) business_df = pd.DataFrame.from_records(business) user_df = pd.DataFrame.from_records(user) review_df = pd.DataFrame.from_records(review) business_df.head(3) user_df.head(3) review_df.head(3) ``` # Exploratary Data Analysis ### 1.business analytics ``` business_df_state = business_df.groupby('state').count() business_df_state business_df_state.sort_values('business_id',ascending = False)[:10] business_df_category = business_df.groupby('categories').count() business_df_category.sort_values('business_id',ascending = False)[:10] business_df_stars = business_df.groupby('stars').count() business_df_stars ``` #### (1)visualize the stars ``` business_df_stars.sns ``` ### 2.User analytics ``` user_df['yelp_age'] = user_df['yelping_since'].apply(lambda x: x[:4]) user_df.groupby('yelp_age').count().sort_values('user_id',ascending = False)[0:10] ``` ### 3.Review Analytics # Get some test data (review) ``` business_test = business_df[:5000] review_test = review_df[:5000] user_test = user_df[:5000] ``` # Sentiment Analysis (review) #### 1. import packages ``` from nltk.tokenize import word_tokenize, RegexpTokenizer import nltk.classify.util from nltk.classify import NaiveBayesClassifier import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer ``` #### 2. Using NLTK extract the reviews' Sentiment ``` review_test[:3] sid = SentimentIntensityAnalyzer() def sentiment_extractor(x): ss = sid.polarity_scores(x) sentiment = {} for k in ss: sentiment[k] = ss[k] return sentiment review_test['sentiment'] = review_test['text'].apply(sentiment_extractor) new_review_test = review_test.loc[:,['business_id','user_id','cool','funny','useful','stars','sentiment']] # seperate the sentiment score new_review_test['negative'] = new_review_test['sentiment'].apply(lambda x: x['neg']) new_review_test['neutral'] = new_review_test['sentiment'].apply(lambda x: x['neu']) new_review_test['positive'] = new_review_test['sentiment'].apply(lambda x: x['pos']) new_review_test['compound'] = new_review_test['sentiment'].apply(lambda x: x['compound']) new_review_test[:5000].to_csv('review_sentiment.csv', sep='\t',index = False) ```
github_jupyter
import numpy as np import pandas as pd import matplotlib.pyplot as plt import json import io business_json_file = 'business.json' user_json_file = 'user.json' review_json_file = 'review.json' business = [] user = [] review = [] for line in open(business_json_file, 'r'): business.append(json.loads(line)) for line in open(user_json_file, 'r'): user.append(json.loads(line)) for line in open(review_json_file, 'r'): review.append(json.loads(line)) business_df = pd.DataFrame.from_records(business) user_df = pd.DataFrame.from_records(user) review_df = pd.DataFrame.from_records(review) business_df.head(3) user_df.head(3) review_df.head(3) business_df_state = business_df.groupby('state').count() business_df_state business_df_state.sort_values('business_id',ascending = False)[:10] business_df_category = business_df.groupby('categories').count() business_df_category.sort_values('business_id',ascending = False)[:10] business_df_stars = business_df.groupby('stars').count() business_df_stars business_df_stars.sns user_df['yelp_age'] = user_df['yelping_since'].apply(lambda x: x[:4]) user_df.groupby('yelp_age').count().sort_values('user_id',ascending = False)[0:10] business_test = business_df[:5000] review_test = review_df[:5000] user_test = user_df[:5000] from nltk.tokenize import word_tokenize, RegexpTokenizer import nltk.classify.util from nltk.classify import NaiveBayesClassifier import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer review_test[:3] sid = SentimentIntensityAnalyzer() def sentiment_extractor(x): ss = sid.polarity_scores(x) sentiment = {} for k in ss: sentiment[k] = ss[k] return sentiment review_test['sentiment'] = review_test['text'].apply(sentiment_extractor) new_review_test = review_test.loc[:,['business_id','user_id','cool','funny','useful','stars','sentiment']] # seperate the sentiment score new_review_test['negative'] = new_review_test['sentiment'].apply(lambda x: x['neg']) new_review_test['neutral'] = new_review_test['sentiment'].apply(lambda x: x['neu']) new_review_test['positive'] = new_review_test['sentiment'].apply(lambda x: x['pos']) new_review_test['compound'] = new_review_test['sentiment'].apply(lambda x: x['compound']) new_review_test[:5000].to_csv('review_sentiment.csv', sep='\t',index = False)
0.212314
0.755186
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All). Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below: ``` NAME = "" COLLABORATORS = "" ``` --- <!--NOTEBOOK_HEADER--> *This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks); content is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).* <!--NAVIGATION--> < [Command Reference](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/A.00-Appendix-A.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Cleaning pdb files](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/C.00-Appendix-C.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/B.00-Appendix-B.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a> # Residue Parameter Files Parameter files describing the chemical and structural properties of each residue is found in the PyRosetta package in the `database/chemical/residue_type_sets` directory. The full-atom residue parameters are stored in the `/fa_standard/residue_types` directory. As an example, the parameter file for threonine is shown below. ``` from IPython.display import Image Image('./Media/res-param-1.png',width='700') from IPython.display import Image Image('./Media/res-param-2.png',width='700') ``` The centroid residue parameters can be found in the `/centroid/residue_types` directory. The centroid parameter file for Threonine is shown below. ``` from IPython.display import Image Image('./Media/centroid-res-param.png',width='700') ``` <!--NAVIGATION--> < [Command Reference](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/A.00-Appendix-A.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Cleaning pdb files](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/C.00-Appendix-C.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/B.00-Appendix-B.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
github_jupyter
NAME = "" COLLABORATORS = "" from IPython.display import Image Image('./Media/res-param-1.png',width='700') from IPython.display import Image Image('./Media/res-param-2.png',width='700') from IPython.display import Image Image('./Media/centroid-res-param.png',width='700')
0.151467
0.903465
# Development of Deep Learning Guided Genetic Algorithm for Material Design Optimization Kuanlin Chen, PhD student of the schulman lab<br> Advisor: Rebecca Schulman, PhD<br> Johns Hopkins University **Keywords: Machine Learning, Deep Learning, Computer Vision, Numeric Simulation, Multi-Objective Optimization** *** #### Summary: We developed a genetic algorithm to efficiently search through large parameter space for designing our digit automata. The algorithm starts with an initial population of automata designs generated from a random seed. Each design within the population is then simulated to find all possible geometric outputs upon sixteen actuation combination and scored with a deep learning model. During the scoring process, to fully utilize each image, all images are rotated with twenty different degrees and the image with the highest score as a digit is selected to represent the final class and score of the image. We thus get a 2d array documenting what digits are formed and the score for each digits. We next developed a custom loss function to evaluate the performance of each design. We define the loss function as such: $Loss = 5000 * (number\_of\_digits\_formed) * \sum_{i = 0}^{i = 9}[1.001 - (score\_for\_digit\_i)]$ The loss function computes the **diversity** and the **similarity to real digits** for the digits formed. Designs that outputs images resembling a larger number of high-quality digits are more likely to be preserved. During the selection stage, we eliminate 80% of the designs within the population, by selecting the designs that have the 20% lowest loss score. These designs are sent into a mutation function to repopulate a new generation. For the mutation function, we used the **single-parent mutation method** where the genetic information of each descendant come from a single survived design from previous selection. During mutation, each design has a fifty percent chance to randomly update the strip segment lengths, preserving the actuator pattern information; each design also has a fifty percent chance of mutating the actuator pattern, where we randomly mutate half of the pattern. Each survivor design generates four descendants, so the population returns to its original size after every round of selection and mutation. Finally, the algorithm iterates the cylce of population generation, selection and mutation until reaching generation limit and outputs the optimized designs. For our even digit automata and odd digit automata search, we slighlty tweaked the loss function and mutation function to obtain fabricable results. We first included an additional rule within the mutation function to ensure new design are within reasonable patterning steps to avoid generating designs overly complex and un-patternable. We developed a custom fabrication step calculation function `fab_steps_strip_requires` - calculating the sumulative sum of unique actuator systems within each layer, and eliminating mutations that requires more than six fabrication steps. As this step limits the complexity of outputs formed, we aimed to search for patterning an even digit automata and an odd digit automata, changing the loss functions for the two search and derived the final optimized outputs. $Loss = 5000 * (number\_of\_digits\_formed) * \sum_{i = 1, 3, 5, 7, 9}[1.001 - (score\_for\_digit\_i)]$ $Loss = 5000 * (number\_of\_digits\_formed) * \sum_{i = 0, 2, 4, 6, 8}[1.001 - (score\_for\_digit\_i)]$ ``` # Package Importing import csv, math, os, time, copy, matplotlib, datetime, keras import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from keras.datasets import mnist from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.utils import np_utils from scipy import io as spio from scipy.ndimage import gaussian_filter from scipy.stats import bernoulli from math import log10, floor from skimage import transform, exposure print(keras.__version__) # 2.4.3 print(tf.__version__) # 2.2.0 ``` #### Set Up Material Simulation Environment ``` %run ./Numeric_Simulation_of_Material_Behavior.ipynb # Sample Design segment_lengths_ex_four_types = [938, 954, 1022, 843, 931, 722, 702, 655, 1066, 947] segment_identities_ex_four_types = [[2,3,2,3,2,3,4,0,1,4],[4,4,3,1,3,4,4,1,3,2]] sampleStrip = ActuatorStrip(segment_lengths_ex_four_types, segment_identities_ex_four_types, four_t_rocs, four_t_ctls) cnn_digit_model = load_model("Deep_Learning_Classifier_v3.h5") ``` ### Multi-Objective Optimization Loss Function $Loss = 5000 * (number\_of\_digits\_formed) * \sum_{i = 0}^{i = 9}[1.001 - (score\_for\_digit\_i)]$ This loss function computes the **diversity** and the **similarity to real digits** for the digits formed. Designs that outputs images resembling a larger number of high-quality digits are more likely to be preserved. ``` def loss_on_dig_sim_var_v2(strip, rotation = 20): """ - loss on digit similarity and variety - default 20 rotations - Final design score list would be stored in a np array, documenting the max score for each digit. - Reconsider np.log as log would greatly lock into one design for a perfect 1 or 7, as log(1-1) = -inf The loss is then calculated by the following: digits_for_score = np.log(1 - digit_recs) * 5000 * digits_form) loss = np.sum(digits_for_score) Where we're taking the log of each value's distance against one, and scaling with the digit numbers that it can form. For design that can form more digit number and higher similairy, they can get lower loss. """ strip.analysis(rotation = rotation); output = strip.selected dig_score = np.zeros(shape=(10, )) if output.keys(): for i in output.keys(): dig_score[i] = output[i]["score"] # did 1.001 - dig_score as -inf often appears when 1 is in dig_score loss = np.sum(np.log(1.001 - dig_score) * 5000 * np.count_nonzero(dig_score)) return loss loss_on_dig_sim_var_v2(sampleStrip, rotation = 4) # -2080099.4708855439 if 0, messed up, restart kernal ``` ### Updated Mutation Function controlling fabrication complexity ``` def fab_steps_strip_requires(identities): top_steps = np.size(np.unique(identities[0])) bottom_steps = np.size(np.unique(identities[1])) return top_steps + bottom_steps def sp_mutation_maxfab_v1(parent_strip, max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * (max_segment_length - min_segment_length) + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") satisfy_max_fab = False while satisfy_max_fab == False: identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] if (fab_steps_strip_requires(new_identities) <= max_fab_steps): satisfy_max_fab = True offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip ``` ### Mutation Function We used the **single-parent mutation method** where the genetic information of each descendant come from a single survived design from previous selection. During mutation, each design has a fifty percent chance to randomly update the strip segment lengths, preserving the actuator pattern information; each design also has a fifty percent chance of mutating the actuator pattern, where we randomly mutate half of the pattern. Each survivor design generates four descendants, so the population returns to its original size after every round of selection and mutation. ``` def sp_mutation_v2(parent_strip, max_segments, min_segment_length,\ rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * min_segment_length + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip ``` ### Combined Genetic Algorithm ``` def strip_optimizer_gen_alg_v3(rocs, ctls, loss_function, mutation_function, save_filename,\ max_segments = 10, min_segment_length = 600,\ max_segment_length = 2000, max_fab_steps = 6,\ population_size = 20, generation_limit = 2500): """ Genetic Algorithm for Strip Optimizer 1. Creates 100 (or 20 for debug) different random initial design. 2. Score them based on the loss function and get a sum of pupulation score. 3. Kill 80% of the population who has score in the last 80%. 4. Mutate the survivor and rescore them. 5. Goes on and on and on until reaches generation limit """ def plot_best_fives(population_loss, strip_population, SAVE = False): """ plots 5 best strip design for overall visualization """ best5s = np.argsort(population_loss)[:5] for best_num in np.flip(best5s): best_strip = strip_population[best_num] print("Actuator segments are " + str(best_strip.identities)) print("Segment lengths are " + str(best_strip.segment_lengths)) print("Loss is " + str(population_loss[best_num])) # best_strip.plot_input_and_all(rotation = 20, save = False) best_strip.plot_input_and_selected(rotation = 20, save = SAVE) def save_best_tens(filename, mode, population_loss, strip_population, gen): """ save 10 best strip design of each generation. """ with open(filename, mode) as f: f.write("Generation {}\n".format(gen)) best10s = np.argsort(population_loss)[:10] for best_num in np.flip(best10s): best_strip = strip_population[best_num] formed = list(best_strip.selected.keys()) f.write("Segments: {}\nIdentities: {}\nFormed: {}\n\n".format(str(best_strip.segment_lengths),str(best_strip.identities),str(formed))) itr = 0; num_actuator_types = len(rocs[0][0]) - 1 # generate initial population population_lengths = np.random.random(size = (population_size, max_segments)) * (max_segment_length - min_segment_length) + min_segment_length population_identities = np.random.randint(0, high = (num_actuator_types + 1), size = (population_size, 2, max_segments)) for i in range(population_size): found_fabbable = False #print("Generating Step " + str(i)) while found_fabbable == False: population_identities[i]=np.random.randint(0, high = (num_actuator_types + 1), size = (2, max_segments)) if fab_steps_strip_requires(population_identities[i]) <= max_fab_steps: found_fabbable = True strip_population = [ActuatorStrip(population_lengths[num], population_identities[num], four_t_rocs, four_t_ctls) for num in range(population_size)] [strip.generate_curves() for strip in strip_population] population_loss = [loss_function(strip) for strip in strip_population] [strip.save_imgs() for strip in strip_population] # plot 5 best individuals for visualization plot_best_fives(population_loss, strip_population, SAVE = False) # save 5 best individuals save_best_tens(save_filename, "w", population_loss, strip_population, itr) while itr < generation_limit: itr += 1 # evolution and mutation print("Evolution {}".format(itr)) # kills 80% of the population survivors = np.argsort(population_loss)[:int(np.floor(population_size/5))] print("Survivors: " + str(survivors)) print("Survivor loss: " + str([population_loss[i] for i in survivors])) # mutation and creates 4 offspring for each survivor new_population = [mutation_function(strip_population[survivor_id], max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types) for survivor_id in survivors for _ in range(4)] [strip.generate_curves() for strip in new_population] # Add survivors to new population [new_population.append(strip_population[survivor_id]) for survivor_id in survivors]; new_loss = [loss_function(strip) for strip in new_population] [strip.save_imgs() for strip in new_population] # plot 5 best individuals for visualization if itr == generation_limit: plot_best_fives(new_loss, new_population, SAVE = True) else: plot_best_fives(new_loss, new_population, SAVE = False) save_best_tens(save_filename, "a+", new_loss, new_population, itr) print("delta population loss: %.4f"%(np.sum(new_loss)-np.sum(population_loss))) population_loss = new_loss; strip_population = new_population ```
github_jupyter
# Package Importing import csv, math, os, time, copy, matplotlib, datetime, keras import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from keras.datasets import mnist from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.utils import np_utils from scipy import io as spio from scipy.ndimage import gaussian_filter from scipy.stats import bernoulli from math import log10, floor from skimage import transform, exposure print(keras.__version__) # 2.4.3 print(tf.__version__) # 2.2.0 %run ./Numeric_Simulation_of_Material_Behavior.ipynb # Sample Design segment_lengths_ex_four_types = [938, 954, 1022, 843, 931, 722, 702, 655, 1066, 947] segment_identities_ex_four_types = [[2,3,2,3,2,3,4,0,1,4],[4,4,3,1,3,4,4,1,3,2]] sampleStrip = ActuatorStrip(segment_lengths_ex_four_types, segment_identities_ex_four_types, four_t_rocs, four_t_ctls) cnn_digit_model = load_model("Deep_Learning_Classifier_v3.h5") def loss_on_dig_sim_var_v2(strip, rotation = 20): """ - loss on digit similarity and variety - default 20 rotations - Final design score list would be stored in a np array, documenting the max score for each digit. - Reconsider np.log as log would greatly lock into one design for a perfect 1 or 7, as log(1-1) = -inf The loss is then calculated by the following: digits_for_score = np.log(1 - digit_recs) * 5000 * digits_form) loss = np.sum(digits_for_score) Where we're taking the log of each value's distance against one, and scaling with the digit numbers that it can form. For design that can form more digit number and higher similairy, they can get lower loss. """ strip.analysis(rotation = rotation); output = strip.selected dig_score = np.zeros(shape=(10, )) if output.keys(): for i in output.keys(): dig_score[i] = output[i]["score"] # did 1.001 - dig_score as -inf often appears when 1 is in dig_score loss = np.sum(np.log(1.001 - dig_score) * 5000 * np.count_nonzero(dig_score)) return loss loss_on_dig_sim_var_v2(sampleStrip, rotation = 4) # -2080099.4708855439 if 0, messed up, restart kernal def fab_steps_strip_requires(identities): top_steps = np.size(np.unique(identities[0])) bottom_steps = np.size(np.unique(identities[1])) return top_steps + bottom_steps def sp_mutation_maxfab_v1(parent_strip, max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * (max_segment_length - min_segment_length) + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") satisfy_max_fab = False while satisfy_max_fab == False: identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] if (fab_steps_strip_requires(new_identities) <= max_fab_steps): satisfy_max_fab = True offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip def sp_mutation_v2(parent_strip, max_segments, min_segment_length,\ rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * min_segment_length + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip def strip_optimizer_gen_alg_v3(rocs, ctls, loss_function, mutation_function, save_filename,\ max_segments = 10, min_segment_length = 600,\ max_segment_length = 2000, max_fab_steps = 6,\ population_size = 20, generation_limit = 2500): """ Genetic Algorithm for Strip Optimizer 1. Creates 100 (or 20 for debug) different random initial design. 2. Score them based on the loss function and get a sum of pupulation score. 3. Kill 80% of the population who has score in the last 80%. 4. Mutate the survivor and rescore them. 5. Goes on and on and on until reaches generation limit """ def plot_best_fives(population_loss, strip_population, SAVE = False): """ plots 5 best strip design for overall visualization """ best5s = np.argsort(population_loss)[:5] for best_num in np.flip(best5s): best_strip = strip_population[best_num] print("Actuator segments are " + str(best_strip.identities)) print("Segment lengths are " + str(best_strip.segment_lengths)) print("Loss is " + str(population_loss[best_num])) # best_strip.plot_input_and_all(rotation = 20, save = False) best_strip.plot_input_and_selected(rotation = 20, save = SAVE) def save_best_tens(filename, mode, population_loss, strip_population, gen): """ save 10 best strip design of each generation. """ with open(filename, mode) as f: f.write("Generation {}\n".format(gen)) best10s = np.argsort(population_loss)[:10] for best_num in np.flip(best10s): best_strip = strip_population[best_num] formed = list(best_strip.selected.keys()) f.write("Segments: {}\nIdentities: {}\nFormed: {}\n\n".format(str(best_strip.segment_lengths),str(best_strip.identities),str(formed))) itr = 0; num_actuator_types = len(rocs[0][0]) - 1 # generate initial population population_lengths = np.random.random(size = (population_size, max_segments)) * (max_segment_length - min_segment_length) + min_segment_length population_identities = np.random.randint(0, high = (num_actuator_types + 1), size = (population_size, 2, max_segments)) for i in range(population_size): found_fabbable = False #print("Generating Step " + str(i)) while found_fabbable == False: population_identities[i]=np.random.randint(0, high = (num_actuator_types + 1), size = (2, max_segments)) if fab_steps_strip_requires(population_identities[i]) <= max_fab_steps: found_fabbable = True strip_population = [ActuatorStrip(population_lengths[num], population_identities[num], four_t_rocs, four_t_ctls) for num in range(population_size)] [strip.generate_curves() for strip in strip_population] population_loss = [loss_function(strip) for strip in strip_population] [strip.save_imgs() for strip in strip_population] # plot 5 best individuals for visualization plot_best_fives(population_loss, strip_population, SAVE = False) # save 5 best individuals save_best_tens(save_filename, "w", population_loss, strip_population, itr) while itr < generation_limit: itr += 1 # evolution and mutation print("Evolution {}".format(itr)) # kills 80% of the population survivors = np.argsort(population_loss)[:int(np.floor(population_size/5))] print("Survivors: " + str(survivors)) print("Survivor loss: " + str([population_loss[i] for i in survivors])) # mutation and creates 4 offspring for each survivor new_population = [mutation_function(strip_population[survivor_id], max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types) for survivor_id in survivors for _ in range(4)] [strip.generate_curves() for strip in new_population] # Add survivors to new population [new_population.append(strip_population[survivor_id]) for survivor_id in survivors]; new_loss = [loss_function(strip) for strip in new_population] [strip.save_imgs() for strip in new_population] # plot 5 best individuals for visualization if itr == generation_limit: plot_best_fives(new_loss, new_population, SAVE = True) else: plot_best_fives(new_loss, new_population, SAVE = False) save_best_tens(save_filename, "a+", new_loss, new_population, itr) print("delta population loss: %.4f"%(np.sum(new_loss)-np.sum(population_loss))) population_loss = new_loss; strip_population = new_population
0.512693
0.987993
# Visualizing Logistic Regression ``` import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels ``` # Define the graph ``` # Parameters of Logistic Regression learning_rate = 0.01 training_epochs = 20 batch_size = 100 display_step = 5 # Create Graph for Logistic Regression x = tf.placeholder("float", [None, 784], name="INPUT_x") y = tf.placeholder("float", [None, 10], name="OUTPUT_y") W = tf.Variable(tf.zeros([784, 10]), name="WEIGHT_W") b = tf.Variable(tf.zeros([10]), name="BIAS_b") # Activation, Cost, and Optimizing functions pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accr = tf.reduce_mean(tf.cast(corr, "float")) init = tf.initialize_all_variables() ``` # Launch the graph ``` sess = tf.Session() sess.run(init) ``` # Summary writer ``` summary_path = '/tmp/tf_logs/logistic_regression_mnist' summary_writer = tf.summary.FileWriter(summary_path, graph=sess.graph) print ("Summary writer ready") ``` # Run ``` print ("Summary writer ready") for epoch in range(training_epochs): sum_cost = 0. num_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(num_batch): randidx = np.random.randint(trainimg.shape[0], size=batch_size) batch_xs = trainimg[randidx, :] batch_ys = trainlabel[randidx, :] # Fit training using batch data feeds = {x: batch_xs, y: batch_ys} sess.run(optm, feed_dict=feeds) # Compute average loss sum_cost += sess.run(cost, feed_dict=feeds) avg_cost = sum_cost / num_batch # Display logs per epoch step if epoch % display_step == 0: train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print ("Epoch: %03d/%03d cost: %.9f train_acc: %.3f" % (epoch, training_epochs, avg_cost, train_acc)) print ("Optimization Finished!") # Test model test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (("Test Accuracy: %.3f") % (test_acc)) float(epoch) ``` ### Run the command line ##### tensorboard --logdir=/tmp/tf_logs/logistic_regression_mnist ### Open http://localhost:6006/ into your web browser <img src="images/tsboard/logistic_regression_mnist.png">
github_jupyter
import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels # Parameters of Logistic Regression learning_rate = 0.01 training_epochs = 20 batch_size = 100 display_step = 5 # Create Graph for Logistic Regression x = tf.placeholder("float", [None, 784], name="INPUT_x") y = tf.placeholder("float", [None, 10], name="OUTPUT_y") W = tf.Variable(tf.zeros([784, 10]), name="WEIGHT_W") b = tf.Variable(tf.zeros([10]), name="BIAS_b") # Activation, Cost, and Optimizing functions pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accr = tf.reduce_mean(tf.cast(corr, "float")) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) summary_path = '/tmp/tf_logs/logistic_regression_mnist' summary_writer = tf.summary.FileWriter(summary_path, graph=sess.graph) print ("Summary writer ready") print ("Summary writer ready") for epoch in range(training_epochs): sum_cost = 0. num_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(num_batch): randidx = np.random.randint(trainimg.shape[0], size=batch_size) batch_xs = trainimg[randidx, :] batch_ys = trainlabel[randidx, :] # Fit training using batch data feeds = {x: batch_xs, y: batch_ys} sess.run(optm, feed_dict=feeds) # Compute average loss sum_cost += sess.run(cost, feed_dict=feeds) avg_cost = sum_cost / num_batch # Display logs per epoch step if epoch % display_step == 0: train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print ("Epoch: %03d/%03d cost: %.9f train_acc: %.3f" % (epoch, training_epochs, avg_cost, train_acc)) print ("Optimization Finished!") # Test model test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (("Test Accuracy: %.3f") % (test_acc)) float(epoch)
0.676086
0.913252
### Closed-loop control of a deformable mirror (DM) #### using SVD pseudo-inversion of DM influence matrix #### and low-pass filtering of the eigenvalues for improved convergence stability Hardware used: * Thorlabs WFS-150 Shack-Hartmann sensor * Mirao52e deformable mirror This code uses Thorlabs 64-bit WFS driver installed via [Thorlabs WFS Software](https://www.thorlabs.com/software_pages/ViewSoftwarePage.cfm?Code=WFS). Tested using Anaconda 64-bit Python 2.7 distribution, Windows 10. Author: [Nikita Vladimirov](mailto:nikita.vladimirov@mdc-berlin.de) ``` import ctypes as ct import matplotlib.pyplot as plt import numpy as np %matplotlib inline import sys sys.path.append('./lib') from Mirao52_utils import * #define home dir of the code: homeDir = 'C:/Users/Nikita/Documents/GitHub/AO-toolkit/' #load the WFS DLL: WFS = ct.windll.WFS_64 #Load the Mirao52e DLL: DM = ct.windll.LoadLibrary('./lib/x64/mirao52e.dll') byref = ct.byref #Set the data types compatible with C DLL count = ct.c_int32() deviceID = ct.c_int32() instrumentListIndex = ct.c_int32() inUse = ct.c_int32() instrumentName = ct.create_string_buffer("", 256) instrumentSN = ct.create_string_buffer("", 256) resourceName = ct.create_string_buffer("", 256) IDQuery = ct.c_bool() resetDevice = ct.c_bool() instrumentHandle = ct.c_ulong() pupilCenterXMm = ct.c_double() pupilCenterYMm = ct.c_double() pupilDiameterXMm = ct.c_double() pupilDiameterYMm = ct.c_double() exposureTimeAct = ct.c_double() masterGainAct = ct.c_double() dynamicNoiseCut = ct.c_int32() calculateDiameters = ct.c_int32() cancelWavefrontTilt = ct.c_int32() errorMessage = ct.create_string_buffer("", 512) errorCode = ct.c_int32() pixelFormat = ct.c_int32() pixelFormat.value = 0 #currently 8 bit only camResolIndex = ct.c_int32() spotsX = ct.c_int32() spotsY = ct.c_int32() wavefrontType = ct.c_int32() limitToPupil = ct.c_int32() #Set the WFS parameter values MAX_SPOTS_X = 50 MAX_SPOTS_Y = 40 arrayWavefront = np.zeros((MAX_SPOTS_Y,MAX_SPOTS_X),dtype = np.float32) instrumentListIndex.value = 0 #0,1,2,, if multiple instruments connected #Configure WFS camera camResolIndex.value = 1 # camResolIndex values: # For WFS instruments: # Index Resolution # 0 1280x1024 # 1 1024x1024 # 2 768x768 # 3 512x512 # 4 320x320 # For WFS10 instruments: # Index Resolution # 0 640x480 # 1 480x480 # 2 360x360 # 3 260x260 # 4 180x180 # For WFS20 instruments: # Index Resolution # 0 1440x1080 # 1 1080x1080 # 2 768x768 # 3 512x512 # 4 360x360 # 5 720x540, bin2 # 6 540x540, bin2 # 7 384x384, bin2 # 8 256x256, bin2 # 9 180x180, bin2 #Set pupil pupilCenterXMm.value = 0 #mm pupilCenterYMm.value = 0 #mm pupilDiameterXMm.value = 4.5 #mm pupilDiameterYMm.value = 4.5 #mm #Set spot calculation params dynamicNoiseCut.value = 1 calculateDiameters.value = 0 cancelWavefrontTilt.value = 1 wavefrontType.value = 0 # This parameter defines the type of wavefront to calculate. # Valid settings for wavefrontType: # 0 Measured Wavefront # 1 Reconstructed Wavefront based on Zernike coefficients # 2 Difference between measured and reconstructed Wavefront # Note: Function WFS_CalcReconstrDeviations needs to be called prior to this function in case of Wavefront type 1 and 2. limitToPupil.value = 1 # This parameter defines if the Wavefront should be calculated based on all detected spots or only within the defined pupil. # Valid settings: # 0 Calculate Wavefront for all spots # 1 Limit Wavefront to pupil interior #Check how many WFS devices are connected WFS.WFS_GetInstrumentListLen(None,byref(count)) print('WFS sensors connected: ' + str(count.value)) #Select a device and get its info devStatus = WFS.WFS_GetInstrumentListInfo(None,instrumentListIndex, byref(deviceID), byref(inUse), instrumentName, instrumentSN, resourceName) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_GetInstrumentListInfo():' + str(errorMessage.value)) else: print('WFS deviceID: ' + str(deviceID.value)) print('in use? ' + str(inUse.value)) print('instrumentName: ' + str(instrumentName.value)) print('instrumentSN: ' + str(instrumentSN.value)) print('resourceName: ' + str(resourceName.value)) if not inUse.value: devStatus = WFS.WFS_init(resourceName, IDQuery, resetDevice, byref(instrumentHandle)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_init():' + str(errorMessage.value)) else: print('WFS has been initialized. Instrument handle: ' +str(instrumentHandle.value)) else: print('WFS already in use') #Configure WFS camera devStatus = WFS.WFS_ConfigureCam(instrumentHandle, pixelFormat, camResolIndex, byref(spotsX), byref(spotsY)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_ConfigureCam():' + str(errorMessage.value)) else: print('WFS camera configured') print('SpotsX: ' + str(spotsX.value)) print('SpotsY: ' + str(spotsY.value)) #Set pupil devStatus = WFS.WFS_SetPupil(instrumentHandle, pupilCenterXMm, pupilCenterYMm, pupilDiameterXMm, pupilDiameterYMm) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_SetPupil():' + str(errorMessage.value)) else: print('WFS pupil set') nSensorPoints = spotsX.value * spotsY.value def WFS_getAveragedWavefront(N = 10, lamb = 0.633): ''' Measure the average wavefront from WF sensor and return it as 2D array Parameters: N - number of measurements (default 10) lamb - wavelengh of the light to convert (default 0.633 micron, HeNe laser) Returns: meanWavefront - wavefront 2D-array, units: lambdas PV - peak to value, units: lambdas RMS - root-mean-square, units: lambdas ''' meanWavefront = np.zeros((spotsY.value,spotsX.value)) for t in range(N): #Take a spotfield image, with auto-exposure devStatus = WFS.WFS_TakeSpotfieldImageAutoExpos(instrumentHandle, byref(exposureTimeAct), byref(masterGainAct)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_TakeSpotfieldImageAutoExpos():' + str(errorMessage.value)) #calculate spot centroids devStatus = WFS.WFS_CalcSpotsCentrDiaIntens(instrumentHandle, dynamicNoiseCut, calculateDiameters) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcSpotsCentrDiaIntens():' + str(errorMessage.value)) #calculate spot to reference deviations devStatus = WFS.WFS_CalcSpotToReferenceDeviations(instrumentHandle, cancelWavefrontTilt) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcSpotToReferenceDeviations():' + str(errorMessage.value)) #calculate the wavefront devStatus = WFS.WFS_CalcWavefront(instrumentHandle, wavefrontType, limitToPupil, arrayWavefront.ctypes.data) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcWavefront():' + str(errorMessage.value)) #save the data as numpy array meanWavefront[:,:] += arrayWavefront[:spotsY.value,:spotsX.value].copy() #Calculate stats, units: lambda meanWavefront = meanWavefront/N/lamb meanWavefront = meanWavefront - np.nanmean(meanWavefront) #normalize by the mean PV = np.nanmax(meanWavefront) - np.nanmin(meanWavefront) RMS = np.sqrt(np.nanmean(meanWavefront**2,axis=(0,1))) return (meanWavefront, PV, RMS) #open the DM session dm_nActuators = 52 dm_trigger = ct.c_int32() dm_trigger.value = 0 dm_status = ct.c_int32() assert DM.mro_open(byref(dm_status)), errors[dm_status.value] def safe_voltage(cmd): "returns 1 if command data in the safe zone, between -1 and 1 Volt for individual actuators, and < 25 Volt sum of absolute values, \ returns 0 if unsafe" if cmd.min() >= -1.0 and cmd.max() <= 1.0 and np.sum(np.abs(cmd)) < 25.0: return 1 else: return 0 %%time #Measure the poke matrix of DM flat_path = homeDir + 'python/flat/FLAT_MIRAO_0274-01.mro' dm_command_flat = read_Mirao_commandFile(flat_path,DM) dm_command = np.zeros(dm_nActuators, dtype=np.float64) Varray = np.array([-0.25, 0.25]) #Volts, this is applied to every actuator sequentially PokeMatrix = np.zeros((len(Varray),nSensorPoints, dm_nActuators)) for iVolts in range(len(Varray)): for iActuator in range(dm_nActuators): dm_command[:] = 0 dm_command[iActuator] = Varray[iVolts] #cmd = dm_command + dm_command_flat cmd = dm_command #apply poke to DM actuator if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure the wavefront WF,_,_ = WFS_getAveragedWavefront(N=40) WF = WF - np.nanmean(WF) #WF is measured to arbitrary constant, so normalize by the mean WF = WF.reshape(nSensorPoints) # 2D -> 1D PokeMatrix[iVolts,:,iActuator] = np.nan_to_num(WF)/Varray[iVolts] #rescale to unit voltage vector and remember #reset DM to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] #Check PokeMatrix iActuator = 29 plt.imshow(PokeMatrix[0,:,iActuator].reshape((29,29)),aspect = 'equal') plt.title('Single column of poke matrix \n (reshaped to sensor dimensions)') plt.colorbar() PokeMatrix_ave = PokeMatrix.mean(axis=0) # Plot mean poke matrices with +V and -V pokes plt.imshow(PokeMatrix_ave.T,aspect = 'auto') plt.xlabel('Sensor points') plt.ylabel('Actuator') plt.title('Mean poke matrix') plt.colorbar() #do the SVD pseudo-inverse now U, s, V = np.linalg.svd(PokeMatrix_ave, full_matrices=False) #plot the eigenvalues plt.plot(s) plt.grid('on') plt.xlabel('index') plt.ylabel('eigenvalue') print(s) #show the DM orthogonal modes nPrimaryModes = 52 dm_modes = np.zeros((nPrimaryModes,spotsX.value,spotsY.value)) for ind in range(nPrimaryModes): dm_modes[ind,:,:] = U[:,ind].reshape((spotsX.value,spotsY.value)) #plot first n x m modes n = 4 m = 13 zmin = -0.1 zmax = 0.1 fig, ax = plt.subplots(m,n, figsize = (3*n,3*m)) for i in range(n): for j in range(m): im = ax[j,i].imshow(dm_modes[i+j*n],vmin = zmin, vmax = zmax, aspect='equal') ax[j,i].text(0,-1, 'mode = ' + str(i+j*n),fontsize=10) fig.colorbar(im, ax = ax[j,i]) #Inverse PokeMatrix: # low-pass the inverse s_inv = 1.0/s s_thresh = s[0]/40 s_inv[s < s_thresh] = 0 print('Cut-off modes:' + str(np.where(s_inv == 0)[0])) S_inv = np.diag(s_inv) PokeMatrix_inv = np.dot(V.T,np.dot(S_inv, U.T)) #Set arbitrary DM target profile for test: DM_diam = 15.0 #mm x = np.linspace(-DM_diam/2,DM_diam/2,spotsX.value) y = np.linspace(-DM_diam/2,DM_diam/2,spotsY.value) X, Y = np.meshgrid(x, y) def test_gauss(x,y,sigma = 2): return np.exp(-x**2/(2*sigma**2) - y**2/(2*sigma**2)) def test_defocus(x,y,radius = 2000): #units: mm z = np.sqrt(radius**2 - x**2 - y**2) return z def test_flat(x,y): return np.ones(x.shape) DM_target_profile = test_flat(X,Y) #crop the corners to None values cropMask = np.ones(DM_target_profile.shape) crop_radius = DM_diam/2 for i in range(len(x)): for j in range(len(y)): if x[i]**2 + y[j]**2 >= crop_radius**2: cropMask[i,j] = None DM_target_profile = DM_target_profile * cropMask #DM_target_profile = DM_target_profile - np.nanmin(DM_target_profile) #Find the command for target profile, by using pseudo-inverse DM_target_command = np.dot(PokeMatrix_inv, np.nan_to_num(DM_target_profile).flatten()) DM_cmd = DM_target_command fig, ax = plt.subplots(1,3, figsize = (15,3.5)) # target shape im = ax[0].imshow(DM_target_profile, interpolation='nearest') PV = np.nanmax(DM_target_profile) - np.nanmin(DM_target_profile) ax[0].text(0,-1, 'Target profile: PV='"{:2.1f}".format(PV),fontsize=12) fig.colorbar(im, ax = ax[0]) # DM command map, 2D im = ax[1].imshow(DM_voltage_to_map(DM_cmd), interpolation='nearest') ax[1].text(0,-0.75, 'DM actuator voltages',fontsize=12) fig.colorbar(im, ax = ax[1]) # DM predicted profile: apply the command to the forward matrix A to see the pedicted shape WF_predicted = np.dot(PokeMatrix_ave,DM_cmd).reshape((spotsX.value,spotsY.value)) im = ax[2].imshow(WF_predicted, interpolation='nearest') PV = WF_predicted.max() - WF_predicted.min() RMS = np.sqrt(np.mean(WF_predicted**2)) ax[2].text(0,-1, 'Predicted WF: PV='+"{:2.1f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[2]) # Check if command is withing DM allowed limits: V1abs_max = 1.0 #Volt Vall_abs_max = 25.0 #Volt if np.abs(DM_cmd).max() >= V1abs_max: print('Voltage too high for individual actuators') if np.sum(np.abs(DM_cmd)) >= Vall_abs_max: print('Total absolute voltage is too high for all actuators') #Open-loop flattening of the WF fig, ax = plt.subplots(1,2, figsize = (10,3.5)) # measure the 'flat' WF without DM control. # The WF is not very flat because of imprefect optics and alignment # reset DM to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] # measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) # plot im = ax[0].imshow(WF, interpolation='nearest') ax[0].text(0,-1, 'Measured WF (flat DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[0]) #Try to make WF flatter by applying open-loop DM control cmd = DM_target_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[1].imshow(WF, interpolation='nearest') ax[1].text(0,-1, 'Measured WF (open-loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[1]) #reset DM back to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] ``` So the result of **open-loop** control is not very safisfactory - the command for flattening the WF makes WF actually somewhat less flat than before. An iterative closed-loop control (with **low-pass filter** by eigenvalues) should be better - see below. ``` %%time #Closed-loop control #set DM to open-loop shape assert DM.mro_applySmoothCommand(DM_target_command.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] #measure initial WF WF, _, _ = WFS_getAveragedWavefront(N=20) DM_target_profile = DM_target_profile - np.nanmean(DM_target_profile) #normalize target profile DM_CL_command = DM_target_command #set initial command n_iterations = 100 gain = -0.2 PV_RMS_timelapse = np.zeros((n_iterations,2)) WF_timelapse = np.zeros((n_iterations,WF.shape[0],WF.shape[1])) for it in range(n_iterations): #Calculate the diff between measured and target WFs: WF_diff = WF - DM_target_profile WF_diff = WF_diff - np.nanmean(WF_diff) #normalize #Find the control command using pseudo-inverse DM_command_increment = np.dot(PokeMatrix_inv, np.nan_to_num(WF_diff).flatten()) #Try to make WF flatter by applying the updated command DM_CL_command = DM_CL_command + DM_command_increment*gain cmd = DM_CL_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Voltage is outside of safe range. Reducing gain.' gain /= 2.0 #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) WF_timelapse[it,:,:] = WF PV_RMS_timelapse[it,:] = (PV,RMS) plt.plot(PV_RMS_timelapse[:,0]/5,label='PV/5') plt.plot(PV_RMS_timelapse[:,1],label='RMS') plt.legend() plt.xlabel('iteration') plt.grid('on') plt.title('Timelapse of errors, PV and RMS, by iterations') ``` Note that RMS error stays low once it converged to some small value, so errors do not accumulate as in basic (non-filtered eigenvalues) algorithm. ``` plt.imshow(WF_timelapse[-1,:,:]) plt.colorbar() #Plot target, open-loop and closed-loop WFs fig, ax = plt.subplots(1,3, figsize = (15,3.5)) # target WF im = ax[0].imshow(DM_target_profile, interpolation='nearest') PV = np.nanmax(DM_target_profile) - np.nanmin(DM_target_profile) ax[0].text(0,-1, 'Target WF - flat \n PV=' + "{:2.1f}".format(PV) + ', RMS=' + "{:1.2f}".format(0),fontsize=14) fig.colorbar(im, ax = ax[0]) # open-loop WF cmd = DM_target_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[1].imshow(WF, interpolation='nearest') ax[1].text(0,-1, 'Measured WF (open loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=14) fig.colorbar(im, ax = ax[1]) # closed-loop WF cmd = DM_CL_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[2].imshow(WF, interpolation='nearest') ax[2].text(0,-1, 'Measured WF (closed loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=14) fig.colorbar(im, ax = ax[2]) #close the WFS session devStatus = WFS.WFS_close(instrumentHandle) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_close():' + str(errorMessage.value)) else: print('WFS closed') #close the DM session assert DM.mro_close(byref(dm_status)), errors[dm_status.value] ```
github_jupyter
import ctypes as ct import matplotlib.pyplot as plt import numpy as np %matplotlib inline import sys sys.path.append('./lib') from Mirao52_utils import * #define home dir of the code: homeDir = 'C:/Users/Nikita/Documents/GitHub/AO-toolkit/' #load the WFS DLL: WFS = ct.windll.WFS_64 #Load the Mirao52e DLL: DM = ct.windll.LoadLibrary('./lib/x64/mirao52e.dll') byref = ct.byref #Set the data types compatible with C DLL count = ct.c_int32() deviceID = ct.c_int32() instrumentListIndex = ct.c_int32() inUse = ct.c_int32() instrumentName = ct.create_string_buffer("", 256) instrumentSN = ct.create_string_buffer("", 256) resourceName = ct.create_string_buffer("", 256) IDQuery = ct.c_bool() resetDevice = ct.c_bool() instrumentHandle = ct.c_ulong() pupilCenterXMm = ct.c_double() pupilCenterYMm = ct.c_double() pupilDiameterXMm = ct.c_double() pupilDiameterYMm = ct.c_double() exposureTimeAct = ct.c_double() masterGainAct = ct.c_double() dynamicNoiseCut = ct.c_int32() calculateDiameters = ct.c_int32() cancelWavefrontTilt = ct.c_int32() errorMessage = ct.create_string_buffer("", 512) errorCode = ct.c_int32() pixelFormat = ct.c_int32() pixelFormat.value = 0 #currently 8 bit only camResolIndex = ct.c_int32() spotsX = ct.c_int32() spotsY = ct.c_int32() wavefrontType = ct.c_int32() limitToPupil = ct.c_int32() #Set the WFS parameter values MAX_SPOTS_X = 50 MAX_SPOTS_Y = 40 arrayWavefront = np.zeros((MAX_SPOTS_Y,MAX_SPOTS_X),dtype = np.float32) instrumentListIndex.value = 0 #0,1,2,, if multiple instruments connected #Configure WFS camera camResolIndex.value = 1 # camResolIndex values: # For WFS instruments: # Index Resolution # 0 1280x1024 # 1 1024x1024 # 2 768x768 # 3 512x512 # 4 320x320 # For WFS10 instruments: # Index Resolution # 0 640x480 # 1 480x480 # 2 360x360 # 3 260x260 # 4 180x180 # For WFS20 instruments: # Index Resolution # 0 1440x1080 # 1 1080x1080 # 2 768x768 # 3 512x512 # 4 360x360 # 5 720x540, bin2 # 6 540x540, bin2 # 7 384x384, bin2 # 8 256x256, bin2 # 9 180x180, bin2 #Set pupil pupilCenterXMm.value = 0 #mm pupilCenterYMm.value = 0 #mm pupilDiameterXMm.value = 4.5 #mm pupilDiameterYMm.value = 4.5 #mm #Set spot calculation params dynamicNoiseCut.value = 1 calculateDiameters.value = 0 cancelWavefrontTilt.value = 1 wavefrontType.value = 0 # This parameter defines the type of wavefront to calculate. # Valid settings for wavefrontType: # 0 Measured Wavefront # 1 Reconstructed Wavefront based on Zernike coefficients # 2 Difference between measured and reconstructed Wavefront # Note: Function WFS_CalcReconstrDeviations needs to be called prior to this function in case of Wavefront type 1 and 2. limitToPupil.value = 1 # This parameter defines if the Wavefront should be calculated based on all detected spots or only within the defined pupil. # Valid settings: # 0 Calculate Wavefront for all spots # 1 Limit Wavefront to pupil interior #Check how many WFS devices are connected WFS.WFS_GetInstrumentListLen(None,byref(count)) print('WFS sensors connected: ' + str(count.value)) #Select a device and get its info devStatus = WFS.WFS_GetInstrumentListInfo(None,instrumentListIndex, byref(deviceID), byref(inUse), instrumentName, instrumentSN, resourceName) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_GetInstrumentListInfo():' + str(errorMessage.value)) else: print('WFS deviceID: ' + str(deviceID.value)) print('in use? ' + str(inUse.value)) print('instrumentName: ' + str(instrumentName.value)) print('instrumentSN: ' + str(instrumentSN.value)) print('resourceName: ' + str(resourceName.value)) if not inUse.value: devStatus = WFS.WFS_init(resourceName, IDQuery, resetDevice, byref(instrumentHandle)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_init():' + str(errorMessage.value)) else: print('WFS has been initialized. Instrument handle: ' +str(instrumentHandle.value)) else: print('WFS already in use') #Configure WFS camera devStatus = WFS.WFS_ConfigureCam(instrumentHandle, pixelFormat, camResolIndex, byref(spotsX), byref(spotsY)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_ConfigureCam():' + str(errorMessage.value)) else: print('WFS camera configured') print('SpotsX: ' + str(spotsX.value)) print('SpotsY: ' + str(spotsY.value)) #Set pupil devStatus = WFS.WFS_SetPupil(instrumentHandle, pupilCenterXMm, pupilCenterYMm, pupilDiameterXMm, pupilDiameterYMm) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_SetPupil():' + str(errorMessage.value)) else: print('WFS pupil set') nSensorPoints = spotsX.value * spotsY.value def WFS_getAveragedWavefront(N = 10, lamb = 0.633): ''' Measure the average wavefront from WF sensor and return it as 2D array Parameters: N - number of measurements (default 10) lamb - wavelengh of the light to convert (default 0.633 micron, HeNe laser) Returns: meanWavefront - wavefront 2D-array, units: lambdas PV - peak to value, units: lambdas RMS - root-mean-square, units: lambdas ''' meanWavefront = np.zeros((spotsY.value,spotsX.value)) for t in range(N): #Take a spotfield image, with auto-exposure devStatus = WFS.WFS_TakeSpotfieldImageAutoExpos(instrumentHandle, byref(exposureTimeAct), byref(masterGainAct)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_TakeSpotfieldImageAutoExpos():' + str(errorMessage.value)) #calculate spot centroids devStatus = WFS.WFS_CalcSpotsCentrDiaIntens(instrumentHandle, dynamicNoiseCut, calculateDiameters) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcSpotsCentrDiaIntens():' + str(errorMessage.value)) #calculate spot to reference deviations devStatus = WFS.WFS_CalcSpotToReferenceDeviations(instrumentHandle, cancelWavefrontTilt) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcSpotToReferenceDeviations():' + str(errorMessage.value)) #calculate the wavefront devStatus = WFS.WFS_CalcWavefront(instrumentHandle, wavefrontType, limitToPupil, arrayWavefront.ctypes.data) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcWavefront():' + str(errorMessage.value)) #save the data as numpy array meanWavefront[:,:] += arrayWavefront[:spotsY.value,:spotsX.value].copy() #Calculate stats, units: lambda meanWavefront = meanWavefront/N/lamb meanWavefront = meanWavefront - np.nanmean(meanWavefront) #normalize by the mean PV = np.nanmax(meanWavefront) - np.nanmin(meanWavefront) RMS = np.sqrt(np.nanmean(meanWavefront**2,axis=(0,1))) return (meanWavefront, PV, RMS) #open the DM session dm_nActuators = 52 dm_trigger = ct.c_int32() dm_trigger.value = 0 dm_status = ct.c_int32() assert DM.mro_open(byref(dm_status)), errors[dm_status.value] def safe_voltage(cmd): "returns 1 if command data in the safe zone, between -1 and 1 Volt for individual actuators, and < 25 Volt sum of absolute values, \ returns 0 if unsafe" if cmd.min() >= -1.0 and cmd.max() <= 1.0 and np.sum(np.abs(cmd)) < 25.0: return 1 else: return 0 %%time #Measure the poke matrix of DM flat_path = homeDir + 'python/flat/FLAT_MIRAO_0274-01.mro' dm_command_flat = read_Mirao_commandFile(flat_path,DM) dm_command = np.zeros(dm_nActuators, dtype=np.float64) Varray = np.array([-0.25, 0.25]) #Volts, this is applied to every actuator sequentially PokeMatrix = np.zeros((len(Varray),nSensorPoints, dm_nActuators)) for iVolts in range(len(Varray)): for iActuator in range(dm_nActuators): dm_command[:] = 0 dm_command[iActuator] = Varray[iVolts] #cmd = dm_command + dm_command_flat cmd = dm_command #apply poke to DM actuator if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure the wavefront WF,_,_ = WFS_getAveragedWavefront(N=40) WF = WF - np.nanmean(WF) #WF is measured to arbitrary constant, so normalize by the mean WF = WF.reshape(nSensorPoints) # 2D -> 1D PokeMatrix[iVolts,:,iActuator] = np.nan_to_num(WF)/Varray[iVolts] #rescale to unit voltage vector and remember #reset DM to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] #Check PokeMatrix iActuator = 29 plt.imshow(PokeMatrix[0,:,iActuator].reshape((29,29)),aspect = 'equal') plt.title('Single column of poke matrix \n (reshaped to sensor dimensions)') plt.colorbar() PokeMatrix_ave = PokeMatrix.mean(axis=0) # Plot mean poke matrices with +V and -V pokes plt.imshow(PokeMatrix_ave.T,aspect = 'auto') plt.xlabel('Sensor points') plt.ylabel('Actuator') plt.title('Mean poke matrix') plt.colorbar() #do the SVD pseudo-inverse now U, s, V = np.linalg.svd(PokeMatrix_ave, full_matrices=False) #plot the eigenvalues plt.plot(s) plt.grid('on') plt.xlabel('index') plt.ylabel('eigenvalue') print(s) #show the DM orthogonal modes nPrimaryModes = 52 dm_modes = np.zeros((nPrimaryModes,spotsX.value,spotsY.value)) for ind in range(nPrimaryModes): dm_modes[ind,:,:] = U[:,ind].reshape((spotsX.value,spotsY.value)) #plot first n x m modes n = 4 m = 13 zmin = -0.1 zmax = 0.1 fig, ax = plt.subplots(m,n, figsize = (3*n,3*m)) for i in range(n): for j in range(m): im = ax[j,i].imshow(dm_modes[i+j*n],vmin = zmin, vmax = zmax, aspect='equal') ax[j,i].text(0,-1, 'mode = ' + str(i+j*n),fontsize=10) fig.colorbar(im, ax = ax[j,i]) #Inverse PokeMatrix: # low-pass the inverse s_inv = 1.0/s s_thresh = s[0]/40 s_inv[s < s_thresh] = 0 print('Cut-off modes:' + str(np.where(s_inv == 0)[0])) S_inv = np.diag(s_inv) PokeMatrix_inv = np.dot(V.T,np.dot(S_inv, U.T)) #Set arbitrary DM target profile for test: DM_diam = 15.0 #mm x = np.linspace(-DM_diam/2,DM_diam/2,spotsX.value) y = np.linspace(-DM_diam/2,DM_diam/2,spotsY.value) X, Y = np.meshgrid(x, y) def test_gauss(x,y,sigma = 2): return np.exp(-x**2/(2*sigma**2) - y**2/(2*sigma**2)) def test_defocus(x,y,radius = 2000): #units: mm z = np.sqrt(radius**2 - x**2 - y**2) return z def test_flat(x,y): return np.ones(x.shape) DM_target_profile = test_flat(X,Y) #crop the corners to None values cropMask = np.ones(DM_target_profile.shape) crop_radius = DM_diam/2 for i in range(len(x)): for j in range(len(y)): if x[i]**2 + y[j]**2 >= crop_radius**2: cropMask[i,j] = None DM_target_profile = DM_target_profile * cropMask #DM_target_profile = DM_target_profile - np.nanmin(DM_target_profile) #Find the command for target profile, by using pseudo-inverse DM_target_command = np.dot(PokeMatrix_inv, np.nan_to_num(DM_target_profile).flatten()) DM_cmd = DM_target_command fig, ax = plt.subplots(1,3, figsize = (15,3.5)) # target shape im = ax[0].imshow(DM_target_profile, interpolation='nearest') PV = np.nanmax(DM_target_profile) - np.nanmin(DM_target_profile) ax[0].text(0,-1, 'Target profile: PV='"{:2.1f}".format(PV),fontsize=12) fig.colorbar(im, ax = ax[0]) # DM command map, 2D im = ax[1].imshow(DM_voltage_to_map(DM_cmd), interpolation='nearest') ax[1].text(0,-0.75, 'DM actuator voltages',fontsize=12) fig.colorbar(im, ax = ax[1]) # DM predicted profile: apply the command to the forward matrix A to see the pedicted shape WF_predicted = np.dot(PokeMatrix_ave,DM_cmd).reshape((spotsX.value,spotsY.value)) im = ax[2].imshow(WF_predicted, interpolation='nearest') PV = WF_predicted.max() - WF_predicted.min() RMS = np.sqrt(np.mean(WF_predicted**2)) ax[2].text(0,-1, 'Predicted WF: PV='+"{:2.1f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[2]) # Check if command is withing DM allowed limits: V1abs_max = 1.0 #Volt Vall_abs_max = 25.0 #Volt if np.abs(DM_cmd).max() >= V1abs_max: print('Voltage too high for individual actuators') if np.sum(np.abs(DM_cmd)) >= Vall_abs_max: print('Total absolute voltage is too high for all actuators') #Open-loop flattening of the WF fig, ax = plt.subplots(1,2, figsize = (10,3.5)) # measure the 'flat' WF without DM control. # The WF is not very flat because of imprefect optics and alignment # reset DM to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] # measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) # plot im = ax[0].imshow(WF, interpolation='nearest') ax[0].text(0,-1, 'Measured WF (flat DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[0]) #Try to make WF flatter by applying open-loop DM control cmd = DM_target_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[1].imshow(WF, interpolation='nearest') ax[1].text(0,-1, 'Measured WF (open-loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[1]) #reset DM back to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] %%time #Closed-loop control #set DM to open-loop shape assert DM.mro_applySmoothCommand(DM_target_command.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] #measure initial WF WF, _, _ = WFS_getAveragedWavefront(N=20) DM_target_profile = DM_target_profile - np.nanmean(DM_target_profile) #normalize target profile DM_CL_command = DM_target_command #set initial command n_iterations = 100 gain = -0.2 PV_RMS_timelapse = np.zeros((n_iterations,2)) WF_timelapse = np.zeros((n_iterations,WF.shape[0],WF.shape[1])) for it in range(n_iterations): #Calculate the diff between measured and target WFs: WF_diff = WF - DM_target_profile WF_diff = WF_diff - np.nanmean(WF_diff) #normalize #Find the control command using pseudo-inverse DM_command_increment = np.dot(PokeMatrix_inv, np.nan_to_num(WF_diff).flatten()) #Try to make WF flatter by applying the updated command DM_CL_command = DM_CL_command + DM_command_increment*gain cmd = DM_CL_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Voltage is outside of safe range. Reducing gain.' gain /= 2.0 #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) WF_timelapse[it,:,:] = WF PV_RMS_timelapse[it,:] = (PV,RMS) plt.plot(PV_RMS_timelapse[:,0]/5,label='PV/5') plt.plot(PV_RMS_timelapse[:,1],label='RMS') plt.legend() plt.xlabel('iteration') plt.grid('on') plt.title('Timelapse of errors, PV and RMS, by iterations') plt.imshow(WF_timelapse[-1,:,:]) plt.colorbar() #Plot target, open-loop and closed-loop WFs fig, ax = plt.subplots(1,3, figsize = (15,3.5)) # target WF im = ax[0].imshow(DM_target_profile, interpolation='nearest') PV = np.nanmax(DM_target_profile) - np.nanmin(DM_target_profile) ax[0].text(0,-1, 'Target WF - flat \n PV=' + "{:2.1f}".format(PV) + ', RMS=' + "{:1.2f}".format(0),fontsize=14) fig.colorbar(im, ax = ax[0]) # open-loop WF cmd = DM_target_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[1].imshow(WF, interpolation='nearest') ax[1].text(0,-1, 'Measured WF (open loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=14) fig.colorbar(im, ax = ax[1]) # closed-loop WF cmd = DM_CL_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[2].imshow(WF, interpolation='nearest') ax[2].text(0,-1, 'Measured WF (closed loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=14) fig.colorbar(im, ax = ax[2]) #close the WFS session devStatus = WFS.WFS_close(instrumentHandle) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_close():' + str(errorMessage.value)) else: print('WFS closed') #close the DM session assert DM.mro_close(byref(dm_status)), errors[dm_status.value]
0.286269
0.795539
# Statistical Relational Learning with `pslpython` As we've seen there are several ways to work with graph-based data, including: SPARQL queries, graph algorithms traversals, ML embedding, etc. Each of these methods makes trade-offs in terms of: * computational costs as the graph size scales * robustness when there is uncertainty or conflicting information in the graph * formalism (i.e., *analytic solutions*) vs. empirical approaches (i.e., data-driven, machine learning) One way to visualize some of these trade-offs is in the following diagram: <img src="https://github.com/DerwenAI/kglab/blob/main/docs/tradeoffs.png?raw=true" width="400"/> Note in the top/right corner of the diagram that a relatively formal category of graph-based approaches is called [*statistical relational learning*](https://www.cs.umd.edu/srl-book/). The gist is that so much of the *network analysis* that we want to perform can be describe mathematically as [*markov networks*](https://en.wikipedia.org/wiki/Markov_random_field), in terms of probabilistic models. Sometimes these can be quite computationally expensive; for example, hedge funds on Wall Street tend to burn lots of cloud computing on markov models. They are *robust* in terms of being able to work well even with lots of missing or conflicting data, and the *formalism* implies that we can infer mathematical guarantees from the analyis. That's quite the opposite of deep learning models, which are great at predicting sequences of things, but terrible at providing guarantees. Clearly, there's been much emphasis in industry recently that equates "artificial intelligence" with "deep learning", although we are also recognizing [*diminishing returns*](https://derwen.ai/s/zf43#33) for methods that rely purely on ever-larger data rates and ever-larger ML models. One path forward will be to combine machine learning with use of *structured knowledge* (i.e., KGs) such that we can avoid "boiling the oceans" with purely data-driven approaches when in so many use cases we can leverage domain expertise. In this secton we'll consider one form of statistical relational learning called [*probabilistic soft logic*](https://psl.linqs.org/) (PSL) which is essentially a kind of "fuzzy logic" for graphs that has interesting computational qualities. Whereas many kinds of formal graph analysis (e.g., "traveling salesman problem") are provably hard and quite expensive in practice, PSL can be solved with a *convex optimization* (e.g., like so many machine learning algorithms). Consider this: we can describe "rules" about nodes and relations in a KG, then assign probabilities to specific instances of those rules that are found within our graph. If the probabilities are all *zero* then the system is consistent. As some of the assigned probabilities are increased, then some of the rules become inconsistent. How high (i.e., optimal) of a set of probabilities can we assign while still keeping the system consistent? Alternatively, if we apply a set of rules, then how "far away" (probabilistically speaking) is a graph from being logically consistent? This comes in quite handy when we want to combine *semantic technologies* and *machine learning*, or rather when we have explicit rules plus lots of empirical data. Data quality is a persistent problem, so we can leverage PSL to identify which parts of the graph seem the least "logically consistent", and therefore need some review and curation. ## RDF representation of the "simple acquaintances" example One of the examples given for PSL is called [*simple acquaintances*](https://github.com/linqs/psl-examples/tree/master/simple-acquaintances), which uses a graph of some friends, where they live, what interests they share, and then infers who probably knows whom. Some people explicitly do or do not know each other, while other "knows" relations can be inferred based on whether two people have lived in the same place or share common interest. The objective is to build a PSL model for [*link prediction*](https://en.wikipedia.org/wiki/Link_prediction), to evaluate the annotations in the friend graph. In this case, we'll assume that the "knows" relations have been added from a questionable source (e.g., some third-party dataset) so we'll measure a subset of these relations and determine their likelihood. NB: this is really useful for cleaning up annotations in a large graph! First, let's load a KG which is an RDF representation of this example, based on a simple extension of the [`foaf`](http://www.foaf-project.org/) vocabulary: ``` import kglab namespaces = { "acq": "http://example.org/stuff/", "foaf": "http://xmlns.com/foaf/0.1/", "rdfs": "http://www.w3.org/2000/01/rdf-schema#", } kg = kglab.KnowledgeGraph( name = "LINQS simple acquaintance example for PSL", base_uri = "http://example.org/stuff/", language = "en", namespaces = namespaces, ) kg.load_ttl("acq.ttl") ``` Take a look at the `acq.ttl` file to see the people and their relations. Here's a quick visualization of the graph: ``` VIS_STYLE = { "foaf": { "color": "orange", "size": 5, }, "acq":{ "color": "blue", "size": 30, }, } g = kg.vis_pyvis(notebook=True, style=VIS_STYLE) g.barnes_hut() g.show("tmp.html") ``` Also, let's serialize this in TTL/Turtle format, then view the resulting `foo.ttl` file to see the relations organized in a way that's probably more readable: ``` kg.save_ttl("foo.ttl") ``` ## Loading a PSL model Next, we'll use the (`pslpython`](https://pypi.org/project/pslpython/) library implemented in Python (atop Java core software) to define three *predicates* (i.e., relations – similar as in RDF) which are: `Neighbors`, `Likes`, `Knows` ``` from pslpython.model import Model from pslpython.partition import Partition from pslpython.predicate import Predicate from pslpython.rule import Rule model = Model("simple acquaintances") # add predicates predicate = Predicate("Neighbors", closed=True, size=2) model.add_predicate(predicate) predicate = Predicate("Likes", closed=True, size=2) model.add_predicate(predicate) predicate = Predicate("Knows", closed=False, size=2) model.add_predicate(predicate) ``` Next, we'll add a set of probabilistic [*rules*](https://psl.linqs.org/wiki/2.2.1/Rule-Specification.html), all with different weights applied: 1. "Two people who live in the same place are **more** likely to know each other" 2. "Two people who don't live in the same place are **less** likely to know each other" 3. "Two people who share a common interest are **more** likely to know each other" 4. "Two people who both know a third person are **more** likely to know each other" 5. "Otherwise, any pair of people are **less** likely to know each other" ``` model.add_rule(Rule("20: Neighbors(P1, L) & Neighbors(P2, L) & (P1 != P2) -> Knows(P1, P2) ^2")) model.add_rule(Rule("5: Neighbors(P1, L1) & Neighbors(P2, L2) & (P1 != P2) & (L1 != L2) -> !Knows(P1, P2) ^2")) model.add_rule(Rule("10: Likes(P1, L) & Likes(P2, L) & (P1 != P2) -> Knows(P1, P2) ^2")) model.add_rule(Rule("5: Knows(P1, P2) & Knows(P2, P3) & (P1 != P3) -> Knows(P1, P3) ^2")) model.add_rule(Rule("5: !Knows(P1, P2) ^2")) ``` Finally we'll add a *commutative* rule such that "If Person 1 knows Person 2, then Person 2 also knows Person 1." ``` model.add_rule(Rule("Knows(P1, P2) = Knows(P2, P1) .")) ``` To initialize the model, we'll clear any pre-existing data from each of the predicates: ``` for predicate in model.get_predicates().values(): predicate.clear_data() ``` And we'll define a simple helper function, to format a unique URL within our `acq` vocabulary (a simple extension of `foaf`) based on the purely numeric identifiers used within PSL: ``` def get_person_id (url): return url.replace("http://example.org/stuff/person_", "") ``` Let's query our KG to populate data into the `Neighbors` predicate in the PSL model, based on `foaf:based_near` that represents living near the same locations: ``` predicate = model.get_predicate("Neighbors") query = """ SELECT DISTINCT ?p1 ?p2 WHERE { ?p1 foaf:based_near ?l . ?p2 foaf:based_near ?l . } """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if p1 != p2: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2]) ``` Note: these data points are *observations*, i.e., empirical support for the probabilistic model. Then let's query our KG to populate data into the `Likes` predicate in the PSL model, based on shared interests in `foaf:topic_interest` topics: ``` predicate = model.get_predicate("Likes") query = """ SELECT DISTINCT ?p1 ?p2 WHERE { ?p1 foaf:topic_interest ?t . ?p2 foaf:topic_interest ?t . } """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if p1 != p2: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2]) ``` Just for kicks, let's take a look at the internal representation of a PSL predicate, which is a `pandas` DataFrame: ``` predicate = model.get_predicate("Likes") predicate.__dict__ ``` Now we'll load data from the `dat/psl/knows_targets.txt` CSV file, which is a list of `foaf:knows` relations in our graph that we want to analyze. Each of these has an assumed value of `1.0` (true) or `0.0` (false). Our PSL analysis will assign probabilities for each so that we can compare which annotations appear to be suspect and require further review: ``` import csv import pandas as pd import rdflib as rdf targets = [] rows_list = [] predicate = model.get_predicate("Knows") with open("dat/psl/knows_targets.txt", "r") as f: reader = csv.reader(f, delimiter="\t") for i, row in enumerate(reader): p1, p2 = row targets.append((p1, p2)) p1_url = rdf.URIRef("http://example.org/stuff/person_" + p1) p2_url = rdf.URIRef("http://example.org/stuff/person_" + p2) if (p1_url, kg.get_ns("foaf").knows, p2_url) in kg._g: truth = 1.0 predicate.add_data_row(Partition.TRUTH, [p1, p2], truth_value=truth) predicate.add_data_row(Partition.TARGETS, [p1, p2]) rows_list.append({ 0: p1, 1: p2, "truth": truth}) elif (p1_url, kg.get_ns("acq").wantsIntro, p2_url) in kg._g: truth = 0.0 predicate.add_data_row(Partition.TRUTH, [p1, p2], truth_value=truth) predicate.add_data_row(Partition.TARGETS, [p1, p2]) rows_list.append({ 0: p1, 1: p2, "truth": truth}) else: print("UNKNOWN", p1, p2) df_dat = pd.DataFrame(rows_list) ``` These data points are considered to be *ground atoms*, each with a *truth* value set initially. These are also our *targets* for which nodes in the graph to analyze based on the rules. Next, we'll add `foaf:knows` observations which are in the graph, although not among our set of targets. This provides more evidence for the probabilistic inference. Note that since RDF does not allow for representing probabilities on relations, we're using the `acq:wantsIntro` to represent a `foaf:knows` with a `0.0` probability: ``` predicate = model.get_predicate("Knows") query = """ SELECT ?p1 ?p2 WHERE { ?p1 foaf:knows ?p2 . } ORDER BY ?p1 ?p2 """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if (p1, p2) not in targets: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2], truth_value=1.0) query = """ SELECT ?p1 ?p2 WHERE { ?p1 acq:wantsIntro ?p2 . } ORDER BY ?p1 ?p2 """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if (p1, p2) not in targets: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2], truth_value=0.0) ``` Now we're ready to optimize the PSL model – this may take a few minutes to run: ``` PSL_OPTIONS = { "log4j.threshold": "INFO" } results = model.infer(additional_cli_optons=[], psl_config=PSL_OPTIONS) ``` Let's examine the results. We'll get a `pandas` DataFrame describing the targets in the `Knows` predicate: ``` predicate = model.get_predicates()["KNOWS"] df = results[predicate] df.head() ``` Now we can compare the "truth" values from our targets, with their probabilities from the inference provided by the PSL model: ``` dat_val = {} for index, row in df_dat.iterrows(): p1 = row[0] p2 = row[1] key = (int(p1), int(p2)) dat_val[key] = row["truth"] for index, row in df.iterrows(): p1 = row[0] p2 = row[1] key = (int(p1), int(p2)) df.at[index, "diff"] = row["truth"] - dat_val[key] df ``` In other words, which of these "knows" relations in the graph appears to be suspect, based on our rules plus the other evidence in the graph? Let's visualize a histogram of how the inferred probabilities are distributed: ``` df["diff"].hist(); ``` In most cases there is little or no difference (`0.0 <= d <= 0.2`) in the probabilities for the target relations. However, some appear to be off by a substantial (`-0.8`) amount, which indicates problems in that part of our graph data. The following rows show where these `foaf:knows` annotations in the graph differs significantly from their truth values predicted by PSL: ``` for index, row in df.iterrows(): p1 = row[0] p2 = row[1] diff = row["diff"] if diff < -0.2: print("?? acq:person_{} foaf:knows acq:person_{}".format(int(row[0]), int(row[1]))) ``` Speaking of [*human-in-the-loop*](https://derwen.ai/d/human-in-the-loop) practices for AI, using PSL along with a KG seems like a great way to leverage machine learning, so that the people can focus on parts of the graph that have the most uncertainty. And, therefore, probably provide the best ROI for investing time+cost into curation. --- ## Exercises **Exercise 1:** Build a PSL model that tests the "noodle vs. pancake" rules used in an earlier example with our recipe KG. Which recipes should be annotated differently? **Exercise 2:** Try representing one of the other [PSL examples](https://github.com/linqs/psl-examples/) using RDF and `kglab`.
github_jupyter
import kglab namespaces = { "acq": "http://example.org/stuff/", "foaf": "http://xmlns.com/foaf/0.1/", "rdfs": "http://www.w3.org/2000/01/rdf-schema#", } kg = kglab.KnowledgeGraph( name = "LINQS simple acquaintance example for PSL", base_uri = "http://example.org/stuff/", language = "en", namespaces = namespaces, ) kg.load_ttl("acq.ttl") VIS_STYLE = { "foaf": { "color": "orange", "size": 5, }, "acq":{ "color": "blue", "size": 30, }, } g = kg.vis_pyvis(notebook=True, style=VIS_STYLE) g.barnes_hut() g.show("tmp.html") kg.save_ttl("foo.ttl") from pslpython.model import Model from pslpython.partition import Partition from pslpython.predicate import Predicate from pslpython.rule import Rule model = Model("simple acquaintances") # add predicates predicate = Predicate("Neighbors", closed=True, size=2) model.add_predicate(predicate) predicate = Predicate("Likes", closed=True, size=2) model.add_predicate(predicate) predicate = Predicate("Knows", closed=False, size=2) model.add_predicate(predicate) model.add_rule(Rule("20: Neighbors(P1, L) & Neighbors(P2, L) & (P1 != P2) -> Knows(P1, P2) ^2")) model.add_rule(Rule("5: Neighbors(P1, L1) & Neighbors(P2, L2) & (P1 != P2) & (L1 != L2) -> !Knows(P1, P2) ^2")) model.add_rule(Rule("10: Likes(P1, L) & Likes(P2, L) & (P1 != P2) -> Knows(P1, P2) ^2")) model.add_rule(Rule("5: Knows(P1, P2) & Knows(P2, P3) & (P1 != P3) -> Knows(P1, P3) ^2")) model.add_rule(Rule("5: !Knows(P1, P2) ^2")) model.add_rule(Rule("Knows(P1, P2) = Knows(P2, P1) .")) for predicate in model.get_predicates().values(): predicate.clear_data() def get_person_id (url): return url.replace("http://example.org/stuff/person_", "") predicate = model.get_predicate("Neighbors") query = """ SELECT DISTINCT ?p1 ?p2 WHERE { ?p1 foaf:based_near ?l . ?p2 foaf:based_near ?l . } """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if p1 != p2: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2]) predicate = model.get_predicate("Likes") query = """ SELECT DISTINCT ?p1 ?p2 WHERE { ?p1 foaf:topic_interest ?t . ?p2 foaf:topic_interest ?t . } """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if p1 != p2: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2]) predicate = model.get_predicate("Likes") predicate.__dict__ import csv import pandas as pd import rdflib as rdf targets = [] rows_list = [] predicate = model.get_predicate("Knows") with open("dat/psl/knows_targets.txt", "r") as f: reader = csv.reader(f, delimiter="\t") for i, row in enumerate(reader): p1, p2 = row targets.append((p1, p2)) p1_url = rdf.URIRef("http://example.org/stuff/person_" + p1) p2_url = rdf.URIRef("http://example.org/stuff/person_" + p2) if (p1_url, kg.get_ns("foaf").knows, p2_url) in kg._g: truth = 1.0 predicate.add_data_row(Partition.TRUTH, [p1, p2], truth_value=truth) predicate.add_data_row(Partition.TARGETS, [p1, p2]) rows_list.append({ 0: p1, 1: p2, "truth": truth}) elif (p1_url, kg.get_ns("acq").wantsIntro, p2_url) in kg._g: truth = 0.0 predicate.add_data_row(Partition.TRUTH, [p1, p2], truth_value=truth) predicate.add_data_row(Partition.TARGETS, [p1, p2]) rows_list.append({ 0: p1, 1: p2, "truth": truth}) else: print("UNKNOWN", p1, p2) df_dat = pd.DataFrame(rows_list) predicate = model.get_predicate("Knows") query = """ SELECT ?p1 ?p2 WHERE { ?p1 foaf:knows ?p2 . } ORDER BY ?p1 ?p2 """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if (p1, p2) not in targets: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2], truth_value=1.0) query = """ SELECT ?p1 ?p2 WHERE { ?p1 acq:wantsIntro ?p2 . } ORDER BY ?p1 ?p2 """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if (p1, p2) not in targets: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2], truth_value=0.0) PSL_OPTIONS = { "log4j.threshold": "INFO" } results = model.infer(additional_cli_optons=[], psl_config=PSL_OPTIONS) predicate = model.get_predicates()["KNOWS"] df = results[predicate] df.head() dat_val = {} for index, row in df_dat.iterrows(): p1 = row[0] p2 = row[1] key = (int(p1), int(p2)) dat_val[key] = row["truth"] for index, row in df.iterrows(): p1 = row[0] p2 = row[1] key = (int(p1), int(p2)) df.at[index, "diff"] = row["truth"] - dat_val[key] df df["diff"].hist(); for index, row in df.iterrows(): p1 = row[0] p2 = row[1] diff = row["diff"] if diff < -0.2: print("?? acq:person_{} foaf:knows acq:person_{}".format(int(row[0]), int(row[1])))
0.304765
0.988885
``` import os import sys import time import numpy as np import pandas as pd from scipy import misc import matplotlib.pyplot as plt from scipy import sparse from scipy.sparse import csgraph from scipy import linalg from pysheds.grid import Grid from scipy import ndimage from matplotlib import colors import seaborn as sns import matplotlib import matplotlib.cm as cm from matplotlib.collections import LineCollection import matplotlib.gridspec as gridspec %matplotlib inline sns.set_palette('husl', 2) grid = Grid.from_raster('../data/n30w100_dir', data_name='dir') grid.read_raster('../data/n30w100_con', data_name='dem') dirmap = (64, 128, 1, 2, 4, 8, 16, 32) # Specify pour point x, y = -97.294167, 32.73750 # Delineate the catchment grid.catchment(data='dir', x=x, y=y, dirmap=dirmap, out_name='catch', recursionlimit=15000, xytype='label') # Clip the bounding box to the catchment grid.clip_to('catch', precision=5) grid.accumulation(data='catch', dirmap=dirmap, pad_inplace=False, out_name='acc') profiles, connections = grid.extract_profiles('catch', grid.acc > 25) geom = grid.extract_river_network('catch', grid.acc > 25) ``` # Lo Diffusion + advection ``` dx = 60. * (np.asarray([profile.size for profile in profiles]) + 1) n = max(max(connections.keys()), max(connections.values())) + 1 s = np.ones(n) I = np.eye(n) u = 1.0*np.ones(n) D = (u * dx / 10).mean()*np.ones(n) dt = 1 I[0, 0] = 0 U = np.zeros((n, n)) W = np.zeros((n, n)) for i, j in connections.items(): # Confirmed as i U[j, i] = u[i] * dt / dx[i] W[i, j] = u[i] * dt / dx[i] V = np.diag(W @ s) X = np.zeros((n, n)) for i, j in connections.items(): X[i, j] = 2 / (dx[i] + dx[j]) X[j, i] = 2 / (dx[i] + dx[j]) Y = np.diag(X @ s) Z = np.diag(D * dt / dx) S = (U - V) - Z @ (Y - X) + I np.imag(np.linalg.eig(S)[0]).max(), np.imag(np.linalg.eig(S)[0]).min(), np.real(np.linalg.eig(S)[0]).max(), np.real(np.linalg.eig(S)[0]).min() C = np.eye(n) ns = 11 W_c = linalg.solve_discrete_lyapunov(S, C) st = np.argsort(np.diag(W_c))[::-1][:ns] W_o = linalg.solve_discrete_lyapunov(S.T, C) np.random.seed(0) x0 = np.ones(S.shape[0]) from numba import njit sr = np.asarray([ 1, 128, 136, 52, 64, 92, 194, 9, 143, 161, 191, 13, 71, 15, 125, 77, 141, 198, 58, 150, 102, 12, 88, 164, 204, 2, 70, 87, 159, 177, 197, 22]) st = np.asarray([ 5, 8, 14, 42, 26, 76, 51, 89, 45, 55, 63, 83, 28, 1, 65, 85, 104, 93, 103, 121, 115, 13, 11, 140, 9, 194, 107, 6, 71, 64, 31, 40]) @njit(fastmath=True) def compute_multi_x0s(X0, S, C, T): Ac = np.eye(S.shape[0]) Wo_r = np.zeros(S.shape) R_r = np.zeros(X0.shape) X0_hat = np.zeros(X0.shape) m = X0.shape[0] for _ in range(T): AC = C @ Ac Wo_r += (AC.T @ AC) for i in range(m): y = AC @ X0[i] R_r[i] += (AC.T @ y) Ac = S @ Ac Wo_r_inv = np.linalg.pinv(Wo_r) for i in range(m): X0_hat[i] = (Wo_r_inv @ R_r[i]) return X0_hat C = np.eye(S.shape[0])[sr[:11]] X0 = np.vstack([x0, x0, x0, x0]) %time x0_hats = compute_multi_x0s(X0, S, C, 4000) Adj = np.zeros((n, n)) for i, j in connections.items(): # Confirmed as i Adj[j, i] = 1 Adj[i, j] = 1 Lap = sparse.csgraph.laplacian(Adj) L, U = np.linalg.eigh(Lap) ghat = np.exp(-100*L) np.random.seed(0) trials = 200 T = np.random.choice(len(U), size=trials, replace=False) ghat = [np.exp(np.minimum((100*np.random.randn() - 100), -1)*L) for _ in range(trials)] X0 = np.vstack([(U @ np.diag(ghat[i]) @ U[T[i]]).ravel() for i in range(trials)]) X0 = X0 / np.linalg.norm(X0, axis=1).reshape(-1, 1) start = time.time() x0_hats = compute_multi_x0s(X0, S, C, 4000) end = time.time() print(end - start, ' s') ``` # Rank-based ``` C = np.eye(len(S)) X0_hats_r = {} first_start = time.time() for i in range(1, len(sr)): sensors = sr[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_r[i] = x0_hats_i print('Total: ', end - first_start, ' s') ``` # Trace-based ``` C = np.eye(len(S)) X0_hats_t = {} first_start = time.time() for i in range(1, len(st)): sensors = st[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_t[i] = x0_hats_i print('Total: ', end - first_start, ' s') ``` # Random placement ``` np.random.seed(0) sss = [np.random.choice(np.arange(1, len(Adj)), size=len(sr), replace=False) for _ in range(10)] C = np.eye(len(S)) X0_hats_U = [] for it, ss in enumerate(sss[:]): first_start = time.time() X0_hats_ui = {} print(it) for i in range(1, len(ss)): sensors = ss[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_ui[i] = x0_hats_i print('Total: ', end - first_start, ' s') X0_hats_U.append(X0_hats_ui) mse_by_num_sensors_r = np.asarray([((X0_hats_r[i] - X0)**2).sum() / trials for i in range(1, 32)]) mse_by_num_sensors_t = np.asarray([((X0_hats_t[i] - X0)**2).sum() / trials for i in range(1, 32)]) mse_by_num_sensors_U = [np.asarray([((X0_hats_u[i] - X0)**2).sum() / trials for i in range(1, 32)]) for X0_hats_u in X0_hats_U] sns.set() fig, ax = plt.subplots(1, 2, figsize=(10, 4)) ax[0].plot(np.arange(1, 32), mse_by_num_sensors_U[0], label='randomized', c='0.5', alpha=0.35) for mse_by_num_sensors_u in mse_by_num_sensors_U[1:]: ax[0].plot(np.arange(1, 32), mse_by_num_sensors_u, c='0.5', alpha=0.35) ax[0].plot(np.arange(1, 32), mse_by_num_sensors_r, label='rank-based', c='red') ax[0].plot(np.arange(1, 32), mse_by_num_sensors_t, label='trace-based', c='blue') ax[0].set_xlabel('Number of sensors') ax[0].set_ylabel('Mean squared error') ax[0].set_title('Reconstruction error vs. number of sensors') ax[0].legend() ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_U[0]), label='randomized', c='0.5', alpha=0.35) for mse_by_num_sensors_u in mse_by_num_sensors_U[1:]: ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_u), c='0.5', alpha=0.35) ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_r), label='rank-based', c='red') ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_t), label='trace-based', c='blue') ax[1].set_xlabel('Number of sensors') ax[1].set_ylabel('Log of mean squared error') ax[1].set_title('Log reconstruction error vs. number of sensors') ax[1].legend() plt.tight_layout() std_by_num_sensors_r = np.asarray([((X0_hats_r[i] - X0)).std() for i in range(1, 32)]) std_by_num_sensors_t = np.asarray([((X0_hats_t[i] - X0)).std() for i in range(1, 32)]) std_by_num_sensors_u0 = np.asarray([((X0_hats_U[0][i] - X0)).std() for i in range(1, 32)]) import pickle with open('X0_hat_r.p', 'wb') as outfile: pickle.dump(X0_hats_r, outfile) with open('X0_hat_t.p', 'wb') as outfile: pickle.dump(X0_hats_t, outfile) for i in range(len(X0_hats_U)): with open(f'X0_hat_u{i}.p', 'wb') as outfile: pickle.dump(X0_hats_U[i], outfile) ```
github_jupyter
import os import sys import time import numpy as np import pandas as pd from scipy import misc import matplotlib.pyplot as plt from scipy import sparse from scipy.sparse import csgraph from scipy import linalg from pysheds.grid import Grid from scipy import ndimage from matplotlib import colors import seaborn as sns import matplotlib import matplotlib.cm as cm from matplotlib.collections import LineCollection import matplotlib.gridspec as gridspec %matplotlib inline sns.set_palette('husl', 2) grid = Grid.from_raster('../data/n30w100_dir', data_name='dir') grid.read_raster('../data/n30w100_con', data_name='dem') dirmap = (64, 128, 1, 2, 4, 8, 16, 32) # Specify pour point x, y = -97.294167, 32.73750 # Delineate the catchment grid.catchment(data='dir', x=x, y=y, dirmap=dirmap, out_name='catch', recursionlimit=15000, xytype='label') # Clip the bounding box to the catchment grid.clip_to('catch', precision=5) grid.accumulation(data='catch', dirmap=dirmap, pad_inplace=False, out_name='acc') profiles, connections = grid.extract_profiles('catch', grid.acc > 25) geom = grid.extract_river_network('catch', grid.acc > 25) dx = 60. * (np.asarray([profile.size for profile in profiles]) + 1) n = max(max(connections.keys()), max(connections.values())) + 1 s = np.ones(n) I = np.eye(n) u = 1.0*np.ones(n) D = (u * dx / 10).mean()*np.ones(n) dt = 1 I[0, 0] = 0 U = np.zeros((n, n)) W = np.zeros((n, n)) for i, j in connections.items(): # Confirmed as i U[j, i] = u[i] * dt / dx[i] W[i, j] = u[i] * dt / dx[i] V = np.diag(W @ s) X = np.zeros((n, n)) for i, j in connections.items(): X[i, j] = 2 / (dx[i] + dx[j]) X[j, i] = 2 / (dx[i] + dx[j]) Y = np.diag(X @ s) Z = np.diag(D * dt / dx) S = (U - V) - Z @ (Y - X) + I np.imag(np.linalg.eig(S)[0]).max(), np.imag(np.linalg.eig(S)[0]).min(), np.real(np.linalg.eig(S)[0]).max(), np.real(np.linalg.eig(S)[0]).min() C = np.eye(n) ns = 11 W_c = linalg.solve_discrete_lyapunov(S, C) st = np.argsort(np.diag(W_c))[::-1][:ns] W_o = linalg.solve_discrete_lyapunov(S.T, C) np.random.seed(0) x0 = np.ones(S.shape[0]) from numba import njit sr = np.asarray([ 1, 128, 136, 52, 64, 92, 194, 9, 143, 161, 191, 13, 71, 15, 125, 77, 141, 198, 58, 150, 102, 12, 88, 164, 204, 2, 70, 87, 159, 177, 197, 22]) st = np.asarray([ 5, 8, 14, 42, 26, 76, 51, 89, 45, 55, 63, 83, 28, 1, 65, 85, 104, 93, 103, 121, 115, 13, 11, 140, 9, 194, 107, 6, 71, 64, 31, 40]) @njit(fastmath=True) def compute_multi_x0s(X0, S, C, T): Ac = np.eye(S.shape[0]) Wo_r = np.zeros(S.shape) R_r = np.zeros(X0.shape) X0_hat = np.zeros(X0.shape) m = X0.shape[0] for _ in range(T): AC = C @ Ac Wo_r += (AC.T @ AC) for i in range(m): y = AC @ X0[i] R_r[i] += (AC.T @ y) Ac = S @ Ac Wo_r_inv = np.linalg.pinv(Wo_r) for i in range(m): X0_hat[i] = (Wo_r_inv @ R_r[i]) return X0_hat C = np.eye(S.shape[0])[sr[:11]] X0 = np.vstack([x0, x0, x0, x0]) %time x0_hats = compute_multi_x0s(X0, S, C, 4000) Adj = np.zeros((n, n)) for i, j in connections.items(): # Confirmed as i Adj[j, i] = 1 Adj[i, j] = 1 Lap = sparse.csgraph.laplacian(Adj) L, U = np.linalg.eigh(Lap) ghat = np.exp(-100*L) np.random.seed(0) trials = 200 T = np.random.choice(len(U), size=trials, replace=False) ghat = [np.exp(np.minimum((100*np.random.randn() - 100), -1)*L) for _ in range(trials)] X0 = np.vstack([(U @ np.diag(ghat[i]) @ U[T[i]]).ravel() for i in range(trials)]) X0 = X0 / np.linalg.norm(X0, axis=1).reshape(-1, 1) start = time.time() x0_hats = compute_multi_x0s(X0, S, C, 4000) end = time.time() print(end - start, ' s') C = np.eye(len(S)) X0_hats_r = {} first_start = time.time() for i in range(1, len(sr)): sensors = sr[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_r[i] = x0_hats_i print('Total: ', end - first_start, ' s') C = np.eye(len(S)) X0_hats_t = {} first_start = time.time() for i in range(1, len(st)): sensors = st[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_t[i] = x0_hats_i print('Total: ', end - first_start, ' s') np.random.seed(0) sss = [np.random.choice(np.arange(1, len(Adj)), size=len(sr), replace=False) for _ in range(10)] C = np.eye(len(S)) X0_hats_U = [] for it, ss in enumerate(sss[:]): first_start = time.time() X0_hats_ui = {} print(it) for i in range(1, len(ss)): sensors = ss[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_ui[i] = x0_hats_i print('Total: ', end - first_start, ' s') X0_hats_U.append(X0_hats_ui) mse_by_num_sensors_r = np.asarray([((X0_hats_r[i] - X0)**2).sum() / trials for i in range(1, 32)]) mse_by_num_sensors_t = np.asarray([((X0_hats_t[i] - X0)**2).sum() / trials for i in range(1, 32)]) mse_by_num_sensors_U = [np.asarray([((X0_hats_u[i] - X0)**2).sum() / trials for i in range(1, 32)]) for X0_hats_u in X0_hats_U] sns.set() fig, ax = plt.subplots(1, 2, figsize=(10, 4)) ax[0].plot(np.arange(1, 32), mse_by_num_sensors_U[0], label='randomized', c='0.5', alpha=0.35) for mse_by_num_sensors_u in mse_by_num_sensors_U[1:]: ax[0].plot(np.arange(1, 32), mse_by_num_sensors_u, c='0.5', alpha=0.35) ax[0].plot(np.arange(1, 32), mse_by_num_sensors_r, label='rank-based', c='red') ax[0].plot(np.arange(1, 32), mse_by_num_sensors_t, label='trace-based', c='blue') ax[0].set_xlabel('Number of sensors') ax[0].set_ylabel('Mean squared error') ax[0].set_title('Reconstruction error vs. number of sensors') ax[0].legend() ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_U[0]), label='randomized', c='0.5', alpha=0.35) for mse_by_num_sensors_u in mse_by_num_sensors_U[1:]: ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_u), c='0.5', alpha=0.35) ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_r), label='rank-based', c='red') ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_t), label='trace-based', c='blue') ax[1].set_xlabel('Number of sensors') ax[1].set_ylabel('Log of mean squared error') ax[1].set_title('Log reconstruction error vs. number of sensors') ax[1].legend() plt.tight_layout() std_by_num_sensors_r = np.asarray([((X0_hats_r[i] - X0)).std() for i in range(1, 32)]) std_by_num_sensors_t = np.asarray([((X0_hats_t[i] - X0)).std() for i in range(1, 32)]) std_by_num_sensors_u0 = np.asarray([((X0_hats_U[0][i] - X0)).std() for i in range(1, 32)]) import pickle with open('X0_hat_r.p', 'wb') as outfile: pickle.dump(X0_hats_r, outfile) with open('X0_hat_t.p', 'wb') as outfile: pickle.dump(X0_hats_t, outfile) for i in range(len(X0_hats_U)): with open(f'X0_hat_u{i}.p', 'wb') as outfile: pickle.dump(X0_hats_U[i], outfile)
0.268174
0.628892
# Final Project Submission * Student name: `Reno Vieira Neto` * Student pace: `self paced` * Scheduled project review date/time: `Fri Oct 15, 2021 3pm – 3:45pm (PDT)` * Instructor name: `James Irving` * Blog post URL: https://renoneto.github.io/using_streamlit #### This project originated the [following app](https://movie-recommender-reno.herokuapp.com/). I'd recommend playing with the app and then coming back here to understand how the model behind it works. # Table of Contents <a class="anchor" id="toc"></a> - **[Business Case and Goals](#bc)** - **[The Dataset](#td)** - **[Dataset Exploration and Cleaning](#dec)** - **[No. of Movies by Genre](#mg)** - **[No. of Ratings per Year](#ry)** - **[No. of Users rating movies per Year](#urm)** - **[Recommender System](#rs)** - **[Create Popularity Model](#pop)** - **[Collaborative-Based Filtering](#colab)** - **[Hyperparameter Tuning](#grid)** - **[Try different models](#dif)** - **[Model Evaluation](#eval)** - **[Create function to take user input and give recommendations (+ hint of content-based attribute)](#func)** - **[Conclusion](#conclusion)** - **[Export files to create app](#lit)** - **[Improvements](#improvements)** # Business Case and Goal <a class="anchor" id="bc"></a> In this project, I'm creating a movie recommender using the [MovieLens dataset](https://grouplens.org/datasets/movielens/) to build a model that provides top 5 movie recommendations to a user, based on their ratings of other movies. I'm going to be addressing the cold start problem as well by being able to deal with users with no movie ratings. # The Dataset <a class="anchor" id="td"></a> The MovieLens dataset is a "classic" recommendation system dataset used in numerous academic papers and machine learning proofs-of-concept. [You can find more about it here](https://grouplens.org/datasets/movielens/) # Dataset Exploration and Cleaning <a class="anchor" id="dec"></a> ## Import necessary packages ``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import re import time from surprise import Reader, Dataset, dump from surprise.model_selection import cross_validate, GridSearchCV from surprise.prediction_algorithms import KNNBasic, KNNBaseline, SVD, SVDpp from surprise.accuracy import rmse from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') %matplotlib inline # Import datasets df_movies = pd.read_csv('./app/data/movies.csv') df_ratings = pd.read_csv('./app/data/ratings.csv') # Show first rows display(df_movies.head()) display(df_ratings.head()) ``` #### Notes - Breakdown genres into different columns (one-hot encoding) - `title` seems to have the release year of the movie. It might be interesting to have title and year in different columns. ``` # Check for nulls and data types display(df_movies.info()) display(df_ratings.info()) ``` #### Notes - No nulls - Might need to convert timestamps to `datetime` - There are 9742 movies in the dataset - 100836 ratings ### `df_movies` First, I'm going to start exploring the movies dataset to understand what I'm dealing with. ``` # Create column with array of genres and calculate the Number of Genres per movie df_movies['genres_array'] = df_movies['genres'].str.split('|') # Flattened genres stacked_genres = df_movies['genres_array'].apply(pd.Series).stack(level=0).reset_index() stacked_genres.columns = ['index', 'level_1', 'genre'] # Combine original dataframe with flattened genres using the index df_movies_new = pd.merge(df_movies, stacked_genres, how='left', left_index=True, right_on=['index']) df_movies_new = df_movies_new[['movieId', 'title', 'genre']] # One-hot Encoding of Genre column one_hot = pd.get_dummies(df_movies_new['genre']) # Get list of genres (it's going to be useful soon) list_of_genres = list(one_hot.columns) # Combine the new dataframe with the one-hot encoded dataframe df_movies_new = pd.merge(df_movies_new, one_hot, left_index=True, right_index=True) df_movies_new = df_movies_new.drop('genre', axis=1) # Use groupby to have one row per movie df_movies_new = df_movies_new.groupby(['movieId', 'title']).sum()[list_of_genres].reset_index() # Split year and title df_movies_new['release_year'] = df_movies_new.apply(lambda x: x['title'].strip()[-5:][:-1], axis=1) df_movies_new['release_year'] = df_movies_new.apply(lambda x: x['release_year'] if len(re.findall("[0-9]{4}", x['release_year'])) == 1 else np.nan, axis=1) df_movies_new['title'] = df_movies_new.apply(lambda x: x['title'][:-6].strip() if x['release_year'] != np.nan else x['title'], axis=1) ``` ### No. of Movies by genre <a class="anchor" id="mg"></a> **[Go back to Table of Contents](#toc)** ``` # Create empty dictionary to store the no of movies by genre no_of_movies_by_genre = {} for genre in list_of_genres: no_of_movies = df_movies_new[genre].sum() no_of_movies_by_genre[genre] = no_of_movies # Transform that into a dataframe to_plot = pd.DataFrame.from_dict(no_of_movies_by_genre, orient='index').reset_index() to_plot.columns = ['genre', 'no_of_movies'] to_plot = to_plot.sort_values('no_of_movies', ascending=False).reset_index(drop=True) # Plot plt.figure(figsize=(10,8)) sns.barplot(x="no_of_movies", y="genre", data=to_plot) plt.title('No of Movies by Genre', size=14) plt.xlabel('No. of Movies', size=13) plt.ylabel(None) plt.show() ``` #### Note - We are dealing with an unbalanced dataset from the perspective of the genres. There are way more Drama and Comedy movies than other genres. The consequence of that to the model is that certain genres will have a smaller set of options to choose from. ### `df_ratings` ### No. of Ratings per Year <a class="anchor" id="ry"></a> I wonder how many ratings were created per year. **[Go back to Table of Contents](#toc)** ``` # Convert timestamp column to datetime df_ratings['datetime'] = pd.to_datetime(df_ratings['timestamp'], unit='s') df_ratings['year'] = df_ratings['datetime'].dt.year # Create plot with No. of ratings per year to_plot = df_ratings.groupby('year').count()['rating'].reset_index() plt.figure(figsize=(17,5)) sns.barplot(x='year', y='rating', data=to_plot, color='blue', alpha=0.5) plt.title('No of Ratings per Year') plt.show() ``` **Note** - I don't see any trends. It's great to see that the last 4 years of the dataset had almost the same number of ratings. ### No. of Users rating movies per Year <a class="anchor" id="urm"></a> **[Go back to Table of Contents](#toc)** ``` # Create Plot with No. of Unique Users giving ratings to_plot = df_ratings.groupby('year').nunique()['userId'].reset_index() plt.figure(figsize=(17,5)) sns.barplot(x='year', y='userId', data=to_plot, color='blue', alpha=0.5) plt.title('No. of Users rating movies per Year') plt.show() ``` **Note** - Not many users rating movies. Around 40 per year. # Recommender System <a class="anchor" id="rs"></a> ## Create Popularity Model <a class="anchor" id="pop"></a> The first model is going to be very simple. It's a popularity model. Basically I'm going to rank movies by popularity. However, I need to find a way to scale the ratings because a movie with 100 ratings with an average of 4.5 and another with 2 with an average of 4.75 are completely different. I'd argue that the first movie actually has a higher rating score than the second one since more users have rated it with a high score. To address that problem I'm using the IMDB's Weighted Rating Method I found [online](https://math.stackexchange.com/questions/169032/understanding-the-imdb-weighted-rating-function-for-usage-on-my-own-website) that does a good job at weighting the ratings. #### Calculation ![](https://image.ibb.co/jYWZp9/wr.png) where, * v is the number of votes for the movie; * m is the minimum votes required to be listed in the chart; * R is the average rating of the movie; And * C is the mean vote across the whole report #### C: Calculate mean vote across the whole dataset ``` # Calculate Mean and Count the No. of Ratings to a given movie mean_ratings_df = df_ratings.groupby('movieId').agg(avg_rating=('rating', 'mean'), count_rating=('rating', 'count')).reset_index() # Calculate the Overall Average Rating mean_ratings_df['overall_avg_rating'] = mean_ratings_df['avg_rating'].mean() mean_ratings_df.head() ``` #### m: Define the minimum number of ratings required to be listed To define the minimum number of votes I'm going to look at the distribution of No. of Ratings by Movies. ``` # Plot plt.figure(figsize=(15,5)) sns.boxplot(x=mean_ratings_df['count_rating']) plt.title('Boxplot of No. of Ratings given to movies') plt.show() ``` Not super helpful. I'm going to print different quantiles ``` # Calculate different quatiles n_of_users = df_ratings['userId'].nunique() n_of_movies = len(mean_ratings_df) quantiles_list = [] for n in range(10, 100, 5): q = mean_ratings_df['count_rating'].quantile(n/100) n_of_selected_movies = len(mean_ratings_df[mean_ratings_df['count_rating'] >= q]) quantiles_list.append([n, q, n_of_selected_movies]) pd.DataFrame(quantiles_list, columns=['quantile', 'quantile_value', 'number_of_movies']) ``` Before deciding the Minimum No. of Ratings, I'm going to look at the number of movies users have rated. ``` df_ratings.groupby('userId').count()['movieId'].describe() ``` The Median number of movies a user has rated is 70 movies and the 75th quantile is 168 movies. Therefore, I'm comfortable moving forward with having the Minimum Number of Ratings (or `m`) of 47 ratings since that represents 491 Movies which is more than most users have rated. > **Disclamer**: I have tried a minimum of 27/17 ratings as well, however, the model resulted in weird recommendations. So I'm picking 47 after iteratively trying 17 and 27. #### m = 47 #### Create function to apply to the dataset ``` def weighted_rating(df): """ Calculates the IMDB's Weighted Rating using the following formula: (v / (v+m) * R) + (m / (m+v) * C) where: - v is the number of votes for the movie; - m is the minimum votes required to be listed in the chart; - R is the average rating of the movie; And - C is the mean vote across the whole report """ v = df['count_rating'] m = df['minimum_no_of_ratings'] R = df['avg_rating'] C = df['overall_avg_rating'] return (v / (v+m) * R) + (m / (m+v) * C) # Create Copy popularity_df = mean_ratings_df.copy() # Calculate the 95th quantile and the weighted rating popularity_df['minimum_no_of_ratings'] = popularity_df['count_rating'].quantile(0.95) popularity_df['weighted_rating'] = popularity_df.apply(weighted_rating, axis=1) ``` I'm going to look at the top 10 movies with the highest ratings. ``` # Grab the top 10 ids top_ten_ids = popularity_df.sort_values('weighted_rating', ascending=False)['movieId'][:10].values # Print them for idx, movie_id in enumerate(top_ten_ids): print((idx + 1), df_movies[df_movies['movieId'] == movie_id]['title'].item()) ``` Not too bad, I agree with these being the top 10. _However, that's very personal._ **[Go back to Table of Contents](#toc)** ## Collaborative-Based Filtering <a class="anchor" id="colab"></a> Collaborative Filtering is based on the idea that users similar to a me can be utilized to predict how much I will like a particular product or service that those same users have used/experienced but I have not. The strategy is to use different models and compare their performances. The metric to optimize for is RMSE. However, most likely, the best model will be the Singular Value Decomposition (SVD) or SVD++ based on what I have seen in different places. Nonetheless, I think it's worth trying different models rather than simply trying only these two models. Moreover, I'm also considering the fit time, otherwise, I might end up with a model that would not be _deployable_. ``` # Create a new dataframe to train the model. df_ratings_clean = df_ratings[['userId', 'movieId', 'rating']] ``` #### Reduce dataset to decrease runtime The dataset is too big and it's going to take too long to train the models if I use the whole dataset (_I've learned that the hard way_). Therefore, I'm picking only 50% of it to identify the best hyperparameters for the SVD model and I'm running GridSearchCV only for 50% of that. Once I identify the best hyperparameters, I'll then train the model using the whole dataset. ``` # Randomly pick 50,000 datapoints fmor the dataset sample_df = df_ratings_clean.sample(n=50000, random_state=111) # Split the sample data in two so I can test the best hyperparameters later on train_df, test_df = train_test_split(sample_df, train_size=.50, random_state=111) # Create reader and dataset objects reader = Reader() traindata = Dataset.load_from_df(train_df, reader) testdata = Dataset.load_from_df(test_df, reader) ``` ### GridSearchCV - Hyperparameter Tunning of SVD <a class="anchor" id="grid"></a> **[Go back to Table of Contents](#toc)** ``` # Perform a gridsearch with SVD param_grid = {'n_factors':[10, 15, 20] , 'n_epochs': [10, 20] , 'lr_all': [0.008, 0.012] , 'reg_all': [0.06, 0.1] , 'random_state': [111]} gs_model = GridSearchCV(SVD, param_grid=param_grid, n_jobs = -1, joblib_verbose=False) %time gs_model.fit(traindata) print('The best parameters are:') gs_model.best_params['rmse'] ``` ### GridSearchCV Metrics Analysis Let's analyze the metrics of each run and pick the best parameters given the RMSE and Fit Time. Sometimes simply choosing the best parameters is not the best option since the only goal of the Grid is to minimize RMSE. We should also consider the Fit Time if we are planning on having this model as a service running online. ``` # Convert results from the GridSearchCV to dataframes df_params = pd.DataFrame(gs_model.cv_results['params']) df_rmse = pd.DataFrame(gs_model.cv_results['mean_test_rmse'], columns=['mean_test_rmse']) df_time = pd.DataFrame(gs_model.cv_results['mean_fit_time'], columns=['mean_fit_time']) df_results = pd.concat([df_params, df_rmse, df_time], axis=1) ``` Create a function to print metrics so we can see the impact of hyperparameters in RMSE and Fit Time. ``` def compare_metrics_chart(df, column_a, column_b): """ Function to plot the comparison of two metrics in a GridSearchCV run. Args: df(pd.Dataframe): Pandas Dataframe with GridSearchCV metrics. column_a(str): First metric column_b(str): Second Metric """ # Create Figure fig = plt.figure(figsize=(10,5)) # Create first axis ax = fig.add_subplot(111) # Plot Column A sns.lineplot(data=df[column_a], color="g", ax=ax) # Set Y Label ax.set_ylabel(column_a, color='g', size=10) # Create axis 2 ax2 = plt.twinx() # Plot Column B sns.lineplot(data=df[column_b], color="b", ax=ax2) # Set Y Label ax2.set_ylabel(column_b, color='b', size=10) # Change the format of the title column_a_title = column_a.replace('_', ' ').title() column_b_title = column_b.replace('_', ' ').title() plt.title(column_a_title + ' vs. ' + column_b_title) plt.show(); ``` #### Number of Factors ``` compare_metrics_chart(df_results, 'n_factors', 'mean_test_rmse') compare_metrics_chart(df_results, 'n_factors', 'mean_fit_time') ``` The lowest values for RMSE is reached regardless of the Number of Factors. It's arguable that we should have more factors to decrease RMSE since that's the expectation. However, it comes at a cost: fit time increase. Since the data is showing we can achieve low RMSE with only `10` factors then I'm going to choose that. #### Number of Epochs ``` compare_metrics_chart(df_results, 'n_epochs', 'mean_test_rmse') compare_metrics_chart(df_results, 'n_epochs', 'mean_fit_time') ``` The Number of Epochs reduces RMSE, but it's possible to see an increase of 50%-80% in Fit Time, which is more than the positive impact in RMSE. Therefore, I'll go with `20` epochs. #### Regularization Term ``` compare_metrics_chart(df_results, 'reg_all', 'mean_test_rmse') compare_metrics_chart(df_results, 'reg_all', 'mean_fit_time') ``` Low regularization term achieves better results with no impact in fit time. #### Learning Rate ``` compare_metrics_chart(df_results, 'lr_all', 'mean_test_rmse') compare_metrics_chart(df_results, 'lr_all', 'mean_fit_time') ``` Having high Learning Rate has a positive impact in RMSE with now impact in Fit Time. #### Final hyperparameters: - `n_factors`: 15 - `n_epochs`: 20 - `lr_all`: 0.012 - `reg_all`: 0.06 **[Go back to Table of Contents](#toc)** ### Try different models <a class="anchor" id="dif"></a> #### Create a function to easily test different models ``` def full_model_training_evaluation(model, model_name, traindata, testdata): """ Train and test different models and collect fit time and train/test RMSE. Args: model(surprise.prediction_algorithms): Model instances from the surprise package. model_name(str): Model name created by the User. A way to identify the model. traindata(surprise.dataset.DatasetAutoFolds): Train dataset testdata(surprise.dataset.DatasetAutoFolds): Test dataset Returns: results(dict): A dictionary with the model name, fit time and RMSE's (train/test). """ # Stor results in dictionary results = {} results['model_name'] = model_name print('Training', model_name, 'model') # Fit on train data start_time = time.time() model.fit(traindata.build_full_trainset()) end_time = time.time() total_time = round(end_time - start_time, 2) results['fit_time_in_seconds'] = total_time # Get RMSE on train data predictions_train = model.test(traindata.build_full_trainset().build_testset()) rmse_train = rmse(predictions_train, verbose=False).round(2) results['rmse_train'] = rmse_train # Get RMSE on test data predictions_test = model.test(testdata.build_full_trainset().build_testset()) rmse_test = rmse(predictions_test, verbose=False).round(2) results['rmse_test'] = rmse_test return results ``` Instantiate different models ``` # Create SVD model with the best hyperparameters svd = SVD(n_factors=15, n_epochs=20, lr_all=0.012, reg_all=0.06, random_state=111) # SVD++: Use the same hyperparameters svd_pp = SVDpp(n_factors=15, n_epochs=20, lr_all=0.012, reg_all=0.06, random_state=111) # Different instances of KNN Basic models with different hyperparameters knn_basic_person_baseline = KNNBasic(sim_options={'name':'pearson_baseline', 'user_based':True}, verbose=False) knn_basic_person = KNNBasic(sim_options={'name':'pearson', 'user_based':True}, verbose=False) knn_basic_cosine = KNNBasic(sim_options={'name':'cosine', 'user_based':True}, verbose=False) # Different instances of KNN Baseline models with different hyperparameters knn_base_person_baseline = KNNBaseline(sim_options={'name':'pearson_baseline', 'user_based':True}, verbose=False) knn_base_person = KNNBaseline(sim_options={'name':'pearson', 'user_based':True}, verbose=False) knn_base_cosine = KNNBaseline(sim_options={'name':'cosine', 'user_based':True}, verbose=False) # Put all models in a dictionary models = {'SVD': svd, 'SVD++': svd_pp, 'KNNBasic Cosine': knn_basic_cosine, 'KNNBasic Person': knn_basic_person, 'KNNBasic Person Baseline': knn_basic_person_baseline, 'KNNBaseline Cosine': knn_base_cosine, 'KNNBaseline Person': knn_base_person, 'KNNBaseline Person Baseline': knn_base_person_baseline} # Loop through different models and evaluate them model_results = [] for model_name, model_instance in models.items(): results = full_model_training_evaluation(model_instance, model_name, traindata, testdata) model_results.append(results) ``` **[Go back to Table of Contents](#toc)** ### Model Evaluation <a class="anchor" id="eval"></a> ``` pd.DataFrame(model_results) ``` #### Notes: - **Fit Time**: `SVD++` is by far the worst model. All KNN models have somewhat the same Fit Time, which is 4 times faster than `SVD`. However, they are all very fast relative to the `SVD++` model. - **RMSE Train**: The KNN Models using `person_baseline` is overfitting the train set. When comparing both Singular Value Decomposition models, the `SVD++` is performing better than the `SVD`. - **RMSE Test**: Both Singular Value Decomposition models had the same performance numbers and performed better than all KNN models. ### Conclusion I'll move forward with the `SVD` model given the fit time and RMSE scores. **[Go back to Table of Contents](#toc)** ## Create function to take user input and give recommendations (+ hint of content-based attribute) <a class="anchor" id="func"></a> Finally, I'm going to create a function that takes a genre and ratings from a user who has no ratings in the dataset. In the process, I'm going to focus my recommendations based on the chosen genre (content-based part of the recommendation). ``` # Create list of genres list_of_genres = stacked_genres['genre'].sort_values().unique()[1:] # Combine mean ratings and movies details ratings_movies_df = pd.merge(mean_ratings_df, df_movies, on='movieId') ``` #### Filter the dataset by removing movies with not enough ratings ``` def filtered_dataset(genre): """ Function to filter the dataset given the genre and remove outliers. Args: genre(str): The genre the user has chosen to come with recommendations. Returns: genre_df(pd.DataFrame): Filtered Dataframe with only the chosen genre. """ # Keep only the selected genre genre_df = ratings_movies_df[ratings_movies_df['genres'].str.contains(genre)] # Calculate the 95th quantile and the weighted rating minimum_no_of_ratings = genre_df['count_rating'].quantile(0.95) genre_df['minimum_no_of_ratings'] = minimum_no_of_ratings genre_df['weighted_rating'] = genre_df.apply(weighted_rating, axis=1) # Remove movies with not enough ratings genre_df = genre_df[genre_df['count_rating'] >= minimum_no_of_ratings] # Sorted it by weighted rating so we have the highest ratings on the top genre_df = genre_df.sort_values('weighted_rating', ascending=False) genre_df = genre_df.reset_index(drop=True) # Keep certain relevant columns genre_df = genre_df[['movieId', 'title', 'genres', 'count_rating', 'minimum_no_of_ratings', 'weighted_rating']] return genre_df ``` #### Create first a function to let the user rate five movies ``` def rate_movie(n_of_movies=5, default_user_id=9999999): """ Function to request a new user to review some movies. Args: n_of_movies(int): Number of ratings the new will have to give. default_user_id(int): Random user id that will be given to the user to be able to reference to it later. Returns: new_ratings_df(pd.DataFrame): Pandas Dataframe with the new ratings favorite_genre(str): The User's favorite genre """ # Print a list of the available genres print('List of Available Genres: ', ", ".join(list_of_genres)) # Gather input from user on which genre will be analyzed favorite_genre = input('Choose one genre from the following (case-sensitive): ') # Filter the dataset df_movies_popularity = filtered_dataset(favorite_genre) # Keep only movies that contain the chosen genre favorite_genre_movies = df_movies_popularity[df_movies_popularity['genres'].str.contains(favorite_genre)] # Keep the highest rated movies favorite_genre_movies = favorite_genre_movies.iloc[:20].sample(frac=1, random_state=111) favorite_genre_movies = favorite_genre_movies.iloc[:n_of_movies] print('') # Created to store ratings from user ratings_list = [] # Loop through dataframe with movies to be rated for row in favorite_genre_movies.iterrows(): # Extract Title and ID movie_title = row[1]['title'] movie_id = row[1]['movieId'] print('Movie to rate: ', movie_title) # Gather rating from user rating = input('How do you rate this movie on a scale of 1-5, press n if you have not seen :\n') # Deal with users not typing a number and create a new variable with the integer try: rating_int = int(rating) except: rating_int = 1 # While the Rating is not valid, keep asking the user while (rating != 'n') and not (1 <= rating_int <=5): rating = input('Please rate the movie between 1-5 or n if you have not seen : \n') else: # If the rating is different from 'n' then we need to add the rating to the list if rating != 'n': ratings_list.append({'userId': default_user_id, 'movieId': movie_id, 'rating': rating_int}) print('') # Convert to DataFrame new_ratings_df = pd.DataFrame(ratings_list) return new_ratings_df, favorite_genre, df_movies_popularity ``` #### Create a function to give the recommendations ``` def give_n_recommendations(model, default_user_id=9999999, n_recommendations=5): """ Function to request a new user to review movies and give recommendations based on that. Args: model(surprise.prediction_algorithms): Model instances from the surprise package. default_user_id(int): Random user id that will be given to the user to be able to reference to it later. n_recommendations(int): Number of recommendations that will be given to the user. """ # Extract ratings from the user new_ratings_df, favorite_genre, df_movies_popularity = rate_movie(default_user_id=default_user_id) watched_movies_id = new_ratings_df['movieId'] ## add the new ratings to the original ratings DataFrame updated_df = pd.concat([new_ratings_df, df_ratings_clean]) new_data = Dataset.load_from_df(updated_df, reader) new_dataset = new_data.build_full_trainset() # Fit new dataset model.fit(new_dataset) # make predictions for the user results = [] for movie_id in df_movies_popularity['movieId'].unique(): predicted_score = model.predict(default_user_id, movie_id)[3] results.append((movie_id, predicted_score)) # order the predictions from highest to lowest rated ranked_movies = pd.DataFrame(results, columns=['movieId', 'predicted_score']) ranked_movies = ranked_movies[~ranked_movies['movieId'].isin(watched_movies_id)] ranked_movies = ranked_movies.sort_values('predicted_score', ascending=False).reset_index(drop=True) ranked_movies = pd.merge(ranked_movies, df_movies, on='movieId') # ranked_movies = ranked_movies[ranked_movies['genres'].str.contains(favorite_genre)] print('The recommendations are the following:') if len(ranked_movies) < n_recommendations: n_recommendations = len(ranked_movies) for row in range(n_recommendations): movie_id = ranked_movies.iloc[row]['movieId'] recommended_title = df_movies[df_movies['movieId'] == movie_id]['title'].item() print(f'No. {row+1} is {recommended_title}') ``` #### Let's test it out! I'm going to try different genres to see how the model behaves. #### `Action` ``` give_n_recommendations(svd) ``` #### `Documentary` ``` give_n_recommendations(svd) ``` #### `Crime` ``` give_n_recommendations(svd) ``` #### `Romance` ``` give_n_recommendations(svd) ``` # Conclusion <a class="anchor" id="conclusion"></a> I'm happy with the results. However, I think the function is a bit limited. I'd like to have the recommender in an app. To do that, I'm going to use Streamlit. **[Go back to Table of Contents](#toc)** # Export files to create app <a class="anchor" id="lit"></a> I'm going to export some files so I can use them in Streamlit ``` # Export it to use it on streamlit ratings_movies_df.to_csv('./app/data/movies_by_rating.csv', index=0) df_ratings_clean.to_csv('./app/data/user_movie_ratings.csv', index=0) dump.dump('./app/data/svd.pkl', algo=svd) ``` # [Check out the App!](https://movie-recommender-reno.herokuapp.com/) # Improvements <a class="anchor" id="improvements"></a> - Use Normalized Discounted Cumulative Gain (NDCG) to evaluate models. - Develop a Content-Based layer using `tags` and `genres` or even `title`/`year`. - Sometimes I rate Star Wars with 1 star and the recommender outputs more Start Wars movies. **[Go back to Table of Contents](#toc)**
github_jupyter
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import re import time from surprise import Reader, Dataset, dump from surprise.model_selection import cross_validate, GridSearchCV from surprise.prediction_algorithms import KNNBasic, KNNBaseline, SVD, SVDpp from surprise.accuracy import rmse from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') %matplotlib inline # Import datasets df_movies = pd.read_csv('./app/data/movies.csv') df_ratings = pd.read_csv('./app/data/ratings.csv') # Show first rows display(df_movies.head()) display(df_ratings.head()) # Check for nulls and data types display(df_movies.info()) display(df_ratings.info()) # Create column with array of genres and calculate the Number of Genres per movie df_movies['genres_array'] = df_movies['genres'].str.split('|') # Flattened genres stacked_genres = df_movies['genres_array'].apply(pd.Series).stack(level=0).reset_index() stacked_genres.columns = ['index', 'level_1', 'genre'] # Combine original dataframe with flattened genres using the index df_movies_new = pd.merge(df_movies, stacked_genres, how='left', left_index=True, right_on=['index']) df_movies_new = df_movies_new[['movieId', 'title', 'genre']] # One-hot Encoding of Genre column one_hot = pd.get_dummies(df_movies_new['genre']) # Get list of genres (it's going to be useful soon) list_of_genres = list(one_hot.columns) # Combine the new dataframe with the one-hot encoded dataframe df_movies_new = pd.merge(df_movies_new, one_hot, left_index=True, right_index=True) df_movies_new = df_movies_new.drop('genre', axis=1) # Use groupby to have one row per movie df_movies_new = df_movies_new.groupby(['movieId', 'title']).sum()[list_of_genres].reset_index() # Split year and title df_movies_new['release_year'] = df_movies_new.apply(lambda x: x['title'].strip()[-5:][:-1], axis=1) df_movies_new['release_year'] = df_movies_new.apply(lambda x: x['release_year'] if len(re.findall("[0-9]{4}", x['release_year'])) == 1 else np.nan, axis=1) df_movies_new['title'] = df_movies_new.apply(lambda x: x['title'][:-6].strip() if x['release_year'] != np.nan else x['title'], axis=1) # Create empty dictionary to store the no of movies by genre no_of_movies_by_genre = {} for genre in list_of_genres: no_of_movies = df_movies_new[genre].sum() no_of_movies_by_genre[genre] = no_of_movies # Transform that into a dataframe to_plot = pd.DataFrame.from_dict(no_of_movies_by_genre, orient='index').reset_index() to_plot.columns = ['genre', 'no_of_movies'] to_plot = to_plot.sort_values('no_of_movies', ascending=False).reset_index(drop=True) # Plot plt.figure(figsize=(10,8)) sns.barplot(x="no_of_movies", y="genre", data=to_plot) plt.title('No of Movies by Genre', size=14) plt.xlabel('No. of Movies', size=13) plt.ylabel(None) plt.show() # Convert timestamp column to datetime df_ratings['datetime'] = pd.to_datetime(df_ratings['timestamp'], unit='s') df_ratings['year'] = df_ratings['datetime'].dt.year # Create plot with No. of ratings per year to_plot = df_ratings.groupby('year').count()['rating'].reset_index() plt.figure(figsize=(17,5)) sns.barplot(x='year', y='rating', data=to_plot, color='blue', alpha=0.5) plt.title('No of Ratings per Year') plt.show() # Create Plot with No. of Unique Users giving ratings to_plot = df_ratings.groupby('year').nunique()['userId'].reset_index() plt.figure(figsize=(17,5)) sns.barplot(x='year', y='userId', data=to_plot, color='blue', alpha=0.5) plt.title('No. of Users rating movies per Year') plt.show() # Calculate Mean and Count the No. of Ratings to a given movie mean_ratings_df = df_ratings.groupby('movieId').agg(avg_rating=('rating', 'mean'), count_rating=('rating', 'count')).reset_index() # Calculate the Overall Average Rating mean_ratings_df['overall_avg_rating'] = mean_ratings_df['avg_rating'].mean() mean_ratings_df.head() # Plot plt.figure(figsize=(15,5)) sns.boxplot(x=mean_ratings_df['count_rating']) plt.title('Boxplot of No. of Ratings given to movies') plt.show() # Calculate different quatiles n_of_users = df_ratings['userId'].nunique() n_of_movies = len(mean_ratings_df) quantiles_list = [] for n in range(10, 100, 5): q = mean_ratings_df['count_rating'].quantile(n/100) n_of_selected_movies = len(mean_ratings_df[mean_ratings_df['count_rating'] >= q]) quantiles_list.append([n, q, n_of_selected_movies]) pd.DataFrame(quantiles_list, columns=['quantile', 'quantile_value', 'number_of_movies']) df_ratings.groupby('userId').count()['movieId'].describe() def weighted_rating(df): """ Calculates the IMDB's Weighted Rating using the following formula: (v / (v+m) * R) + (m / (m+v) * C) where: - v is the number of votes for the movie; - m is the minimum votes required to be listed in the chart; - R is the average rating of the movie; And - C is the mean vote across the whole report """ v = df['count_rating'] m = df['minimum_no_of_ratings'] R = df['avg_rating'] C = df['overall_avg_rating'] return (v / (v+m) * R) + (m / (m+v) * C) # Create Copy popularity_df = mean_ratings_df.copy() # Calculate the 95th quantile and the weighted rating popularity_df['minimum_no_of_ratings'] = popularity_df['count_rating'].quantile(0.95) popularity_df['weighted_rating'] = popularity_df.apply(weighted_rating, axis=1) # Grab the top 10 ids top_ten_ids = popularity_df.sort_values('weighted_rating', ascending=False)['movieId'][:10].values # Print them for idx, movie_id in enumerate(top_ten_ids): print((idx + 1), df_movies[df_movies['movieId'] == movie_id]['title'].item()) # Create a new dataframe to train the model. df_ratings_clean = df_ratings[['userId', 'movieId', 'rating']] # Randomly pick 50,000 datapoints fmor the dataset sample_df = df_ratings_clean.sample(n=50000, random_state=111) # Split the sample data in two so I can test the best hyperparameters later on train_df, test_df = train_test_split(sample_df, train_size=.50, random_state=111) # Create reader and dataset objects reader = Reader() traindata = Dataset.load_from_df(train_df, reader) testdata = Dataset.load_from_df(test_df, reader) # Perform a gridsearch with SVD param_grid = {'n_factors':[10, 15, 20] , 'n_epochs': [10, 20] , 'lr_all': [0.008, 0.012] , 'reg_all': [0.06, 0.1] , 'random_state': [111]} gs_model = GridSearchCV(SVD, param_grid=param_grid, n_jobs = -1, joblib_verbose=False) %time gs_model.fit(traindata) print('The best parameters are:') gs_model.best_params['rmse'] # Convert results from the GridSearchCV to dataframes df_params = pd.DataFrame(gs_model.cv_results['params']) df_rmse = pd.DataFrame(gs_model.cv_results['mean_test_rmse'], columns=['mean_test_rmse']) df_time = pd.DataFrame(gs_model.cv_results['mean_fit_time'], columns=['mean_fit_time']) df_results = pd.concat([df_params, df_rmse, df_time], axis=1) def compare_metrics_chart(df, column_a, column_b): """ Function to plot the comparison of two metrics in a GridSearchCV run. Args: df(pd.Dataframe): Pandas Dataframe with GridSearchCV metrics. column_a(str): First metric column_b(str): Second Metric """ # Create Figure fig = plt.figure(figsize=(10,5)) # Create first axis ax = fig.add_subplot(111) # Plot Column A sns.lineplot(data=df[column_a], color="g", ax=ax) # Set Y Label ax.set_ylabel(column_a, color='g', size=10) # Create axis 2 ax2 = plt.twinx() # Plot Column B sns.lineplot(data=df[column_b], color="b", ax=ax2) # Set Y Label ax2.set_ylabel(column_b, color='b', size=10) # Change the format of the title column_a_title = column_a.replace('_', ' ').title() column_b_title = column_b.replace('_', ' ').title() plt.title(column_a_title + ' vs. ' + column_b_title) plt.show(); compare_metrics_chart(df_results, 'n_factors', 'mean_test_rmse') compare_metrics_chart(df_results, 'n_factors', 'mean_fit_time') compare_metrics_chart(df_results, 'n_epochs', 'mean_test_rmse') compare_metrics_chart(df_results, 'n_epochs', 'mean_fit_time') compare_metrics_chart(df_results, 'reg_all', 'mean_test_rmse') compare_metrics_chart(df_results, 'reg_all', 'mean_fit_time') compare_metrics_chart(df_results, 'lr_all', 'mean_test_rmse') compare_metrics_chart(df_results, 'lr_all', 'mean_fit_time') def full_model_training_evaluation(model, model_name, traindata, testdata): """ Train and test different models and collect fit time and train/test RMSE. Args: model(surprise.prediction_algorithms): Model instances from the surprise package. model_name(str): Model name created by the User. A way to identify the model. traindata(surprise.dataset.DatasetAutoFolds): Train dataset testdata(surprise.dataset.DatasetAutoFolds): Test dataset Returns: results(dict): A dictionary with the model name, fit time and RMSE's (train/test). """ # Stor results in dictionary results = {} results['model_name'] = model_name print('Training', model_name, 'model') # Fit on train data start_time = time.time() model.fit(traindata.build_full_trainset()) end_time = time.time() total_time = round(end_time - start_time, 2) results['fit_time_in_seconds'] = total_time # Get RMSE on train data predictions_train = model.test(traindata.build_full_trainset().build_testset()) rmse_train = rmse(predictions_train, verbose=False).round(2) results['rmse_train'] = rmse_train # Get RMSE on test data predictions_test = model.test(testdata.build_full_trainset().build_testset()) rmse_test = rmse(predictions_test, verbose=False).round(2) results['rmse_test'] = rmse_test return results # Create SVD model with the best hyperparameters svd = SVD(n_factors=15, n_epochs=20, lr_all=0.012, reg_all=0.06, random_state=111) # SVD++: Use the same hyperparameters svd_pp = SVDpp(n_factors=15, n_epochs=20, lr_all=0.012, reg_all=0.06, random_state=111) # Different instances of KNN Basic models with different hyperparameters knn_basic_person_baseline = KNNBasic(sim_options={'name':'pearson_baseline', 'user_based':True}, verbose=False) knn_basic_person = KNNBasic(sim_options={'name':'pearson', 'user_based':True}, verbose=False) knn_basic_cosine = KNNBasic(sim_options={'name':'cosine', 'user_based':True}, verbose=False) # Different instances of KNN Baseline models with different hyperparameters knn_base_person_baseline = KNNBaseline(sim_options={'name':'pearson_baseline', 'user_based':True}, verbose=False) knn_base_person = KNNBaseline(sim_options={'name':'pearson', 'user_based':True}, verbose=False) knn_base_cosine = KNNBaseline(sim_options={'name':'cosine', 'user_based':True}, verbose=False) # Put all models in a dictionary models = {'SVD': svd, 'SVD++': svd_pp, 'KNNBasic Cosine': knn_basic_cosine, 'KNNBasic Person': knn_basic_person, 'KNNBasic Person Baseline': knn_basic_person_baseline, 'KNNBaseline Cosine': knn_base_cosine, 'KNNBaseline Person': knn_base_person, 'KNNBaseline Person Baseline': knn_base_person_baseline} # Loop through different models and evaluate them model_results = [] for model_name, model_instance in models.items(): results = full_model_training_evaluation(model_instance, model_name, traindata, testdata) model_results.append(results) pd.DataFrame(model_results) # Create list of genres list_of_genres = stacked_genres['genre'].sort_values().unique()[1:] # Combine mean ratings and movies details ratings_movies_df = pd.merge(mean_ratings_df, df_movies, on='movieId') def filtered_dataset(genre): """ Function to filter the dataset given the genre and remove outliers. Args: genre(str): The genre the user has chosen to come with recommendations. Returns: genre_df(pd.DataFrame): Filtered Dataframe with only the chosen genre. """ # Keep only the selected genre genre_df = ratings_movies_df[ratings_movies_df['genres'].str.contains(genre)] # Calculate the 95th quantile and the weighted rating minimum_no_of_ratings = genre_df['count_rating'].quantile(0.95) genre_df['minimum_no_of_ratings'] = minimum_no_of_ratings genre_df['weighted_rating'] = genre_df.apply(weighted_rating, axis=1) # Remove movies with not enough ratings genre_df = genre_df[genre_df['count_rating'] >= minimum_no_of_ratings] # Sorted it by weighted rating so we have the highest ratings on the top genre_df = genre_df.sort_values('weighted_rating', ascending=False) genre_df = genre_df.reset_index(drop=True) # Keep certain relevant columns genre_df = genre_df[['movieId', 'title', 'genres', 'count_rating', 'minimum_no_of_ratings', 'weighted_rating']] return genre_df def rate_movie(n_of_movies=5, default_user_id=9999999): """ Function to request a new user to review some movies. Args: n_of_movies(int): Number of ratings the new will have to give. default_user_id(int): Random user id that will be given to the user to be able to reference to it later. Returns: new_ratings_df(pd.DataFrame): Pandas Dataframe with the new ratings favorite_genre(str): The User's favorite genre """ # Print a list of the available genres print('List of Available Genres: ', ", ".join(list_of_genres)) # Gather input from user on which genre will be analyzed favorite_genre = input('Choose one genre from the following (case-sensitive): ') # Filter the dataset df_movies_popularity = filtered_dataset(favorite_genre) # Keep only movies that contain the chosen genre favorite_genre_movies = df_movies_popularity[df_movies_popularity['genres'].str.contains(favorite_genre)] # Keep the highest rated movies favorite_genre_movies = favorite_genre_movies.iloc[:20].sample(frac=1, random_state=111) favorite_genre_movies = favorite_genre_movies.iloc[:n_of_movies] print('') # Created to store ratings from user ratings_list = [] # Loop through dataframe with movies to be rated for row in favorite_genre_movies.iterrows(): # Extract Title and ID movie_title = row[1]['title'] movie_id = row[1]['movieId'] print('Movie to rate: ', movie_title) # Gather rating from user rating = input('How do you rate this movie on a scale of 1-5, press n if you have not seen :\n') # Deal with users not typing a number and create a new variable with the integer try: rating_int = int(rating) except: rating_int = 1 # While the Rating is not valid, keep asking the user while (rating != 'n') and not (1 <= rating_int <=5): rating = input('Please rate the movie between 1-5 or n if you have not seen : \n') else: # If the rating is different from 'n' then we need to add the rating to the list if rating != 'n': ratings_list.append({'userId': default_user_id, 'movieId': movie_id, 'rating': rating_int}) print('') # Convert to DataFrame new_ratings_df = pd.DataFrame(ratings_list) return new_ratings_df, favorite_genre, df_movies_popularity def give_n_recommendations(model, default_user_id=9999999, n_recommendations=5): """ Function to request a new user to review movies and give recommendations based on that. Args: model(surprise.prediction_algorithms): Model instances from the surprise package. default_user_id(int): Random user id that will be given to the user to be able to reference to it later. n_recommendations(int): Number of recommendations that will be given to the user. """ # Extract ratings from the user new_ratings_df, favorite_genre, df_movies_popularity = rate_movie(default_user_id=default_user_id) watched_movies_id = new_ratings_df['movieId'] ## add the new ratings to the original ratings DataFrame updated_df = pd.concat([new_ratings_df, df_ratings_clean]) new_data = Dataset.load_from_df(updated_df, reader) new_dataset = new_data.build_full_trainset() # Fit new dataset model.fit(new_dataset) # make predictions for the user results = [] for movie_id in df_movies_popularity['movieId'].unique(): predicted_score = model.predict(default_user_id, movie_id)[3] results.append((movie_id, predicted_score)) # order the predictions from highest to lowest rated ranked_movies = pd.DataFrame(results, columns=['movieId', 'predicted_score']) ranked_movies = ranked_movies[~ranked_movies['movieId'].isin(watched_movies_id)] ranked_movies = ranked_movies.sort_values('predicted_score', ascending=False).reset_index(drop=True) ranked_movies = pd.merge(ranked_movies, df_movies, on='movieId') # ranked_movies = ranked_movies[ranked_movies['genres'].str.contains(favorite_genre)] print('The recommendations are the following:') if len(ranked_movies) < n_recommendations: n_recommendations = len(ranked_movies) for row in range(n_recommendations): movie_id = ranked_movies.iloc[row]['movieId'] recommended_title = df_movies[df_movies['movieId'] == movie_id]['title'].item() print(f'No. {row+1} is {recommended_title}') give_n_recommendations(svd) give_n_recommendations(svd) give_n_recommendations(svd) give_n_recommendations(svd) # Export it to use it on streamlit ratings_movies_df.to_csv('./app/data/movies_by_rating.csv', index=0) df_ratings_clean.to_csv('./app/data/user_movie_ratings.csv', index=0) dump.dump('./app/data/svd.pkl', algo=svd)
0.662906
0.885829
# eICU Collaborative Research Database # Notebook 5: Prediction This notebook explores how a decision trees can be trained to predict in-hospital mortality of patients. ## Load libraries and connect to the database ``` # Import libraries import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # model building from sklearn import ensemble, impute, metrics, preprocessing, tree from sklearn.model_selection import cross_val_score, train_test_split from sklearn.pipeline import Pipeline # Make pandas dataframes prettier from IPython.display import display, HTML, Image plt.rcParams.update({'font.size': 20}) %matplotlib inline plt.style.use('ggplot') # Access data using Google BigQuery. from google.colab import auth from google.cloud import bigquery # authenticate auth.authenticate_user() # Set up environment variables project_id='tdothealthhack-team' os.environ["GOOGLE_CLOUD_PROJECT"]=project_id ``` To make our lives easier, we'll also install and import a set of helper functions from the `datathon2` package. We will be using the following functions from the package: - `plot_model_pred_2d`: to visualize our data, helping to display a class split assigned by a tree vs the true class. - `run_query()`: to run an SQL query against our BigQuery database and assign the results to a dataframe. ``` !pip install glowyr import glowyr as dtn import pydotplus from tableone import TableOne ``` In this notebook we'll be looking at tree models, so we'll now install a package for visualizing these models. ``` !apt-get install graphviz -y ``` ## Load the patient cohort Let's extract a cohort of patients admitted to the ICU from the emergency department. We link demographics data from the `patient` table to severity of illness score data in the `apachepatientresult` table. We exclude readmissions and neurological patients to help create a population suitable for our demonstration. ``` # Link the patient, apachepatientresult, and apacheapsvar tables on patientunitstayid # using an inner join. query = """ SELECT p.unitadmitsource, p.gender, p.age, p.unittype, p.unitstaytype, a.actualhospitalmortality, a.acutePhysiologyScore, a.apacheScore FROM `physionet-data.eicu_crd_demo.patient` p INNER JOIN `physionet-data.eicu_crd_demo.apachepatientresult` a ON p.patientunitstayid = a.patientunitstayid WHERE a.apacheversion LIKE 'IVa' AND LOWER(p.unitadmitsource) LIKE "%emergency%" AND LOWER(p.unitstaytype) LIKE "admit%" AND LOWER(p.unittype) NOT LIKE "%neuro%"; """ cohort = dtn.run_query(query,project_id) cohort.head() ``` ## Preparing the data for analysis Before continuing, we want to review our data, paying attention to factors such as: - data types (for example, are values recorded as characters or numerical values?) - missing data - distribution of values ``` # dataset info print(cohort.info()) # Encode the categorical data encoder = preprocessing.LabelEncoder() cohort['gender_code'] = encoder.fit_transform(cohort['gender']) cohort['actualhospitalmortality_code'] = encoder.fit_transform(cohort['actualhospitalmortality']) ``` In the eICU Collaborative Research Database, ages >89 years have been removed to comply with data sharing regulations. We will need to decide how to handle these ages. For simplicity, we will assign an age of 91.5 years to these patients. ``` # Handle the deidentified ages cohort['age'] = pd.to_numeric(cohort['age'], downcast='integer', errors='coerce') cohort['age'] = cohort['age'].fillna(value=91.5) # Preview the encoded data cohort[['gender','gender_code']].head() # Check the outcome variable cohort['actualhospitalmortality_code'].unique() ``` Now let's use the [tableone package](https://doi.org/10.1093/jamiaopen/ooy012 ) to review our dataset. ``` # View summary statistics pd.set_option('display.max_rows', 500) TableOne(cohort,groupby='actualhospitalmortality') ``` From these summary statistics, we can see that the average age is higher in the group of patients who do not survive. What other differences do you see? ## Creating our train and test sets We only focus on two variables for our analysis, age and acute physiology score. Limiting ourselves to two variables will make it easier to visualize our models. ``` features = ['age','acutePhysiologyScore'] outcome = 'actualhospitalmortality_code' X = cohort[features] y = cohort[outcome] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10) # Review the number of cases in each set print("Train data: {}".format(len(X_train))) print("Test data: {}".format(len(X_test))) ``` ## Decision trees Let's build the simplest tree model we can think of: a classification tree with only one split. Decision trees of this form are commonly referred to under the umbrella term Classification and Regression Trees (CART) [1]. While we will only be looking at classification here, regression isn't too different. After grouping the data (which is essentially what a decision tree does), classification involves assigning all members of the group to the majority class of that group during training. Regression is the same, except you would assign the average value, not the majority. In the case of a decision tree with one split, often called a "stump", the model will partition the data into two groups, and assign classes for those two groups based on majority vote. There are many parameters available for the DecisionTreeClassifier class; by specifying max_depth=1 we will build a decision tree with only one split - i.e. of depth 1. [1] L. Breiman, J. Friedman, R. Olshen, and C. Stone. Classification and Regression Trees. Wadsworth, Belmont, CA, 1984. ``` # specify max_depth=1 so we train a stump, i.e. a tree with only 1 split mdl = tree.DecisionTreeClassifier(max_depth=1) # fit the model to the data - trying to predict y from X mdl = mdl.fit(X_train,y_train) ``` Our model is so simple that we can look at the full decision tree. ``` graph = dtn.create_graph(mdl,feature_names=features) Image(graph.create_png()) ``` Here we see three nodes: a node at the top, a node in the lower left, and a node in the lower right. The top node is the root of the tree: it contains all the data. Let's read this node bottom to top: - `value = [384, 44]`: Current class balance. There are 384 observations of class 0 and 44 observations of class 1. - `samples = 428`: Number of samples assessed at this node. - `gini = 0.184`: Gini impurity, a measure of "impurity". The higher the value, the bigger the mix of classes. A 50/50 split of two classes would result in an index of 0.5. - `acutePhysiologyScore <=78.5`: Decision rule learned by the node. In this case, patients with a score of <= 78.5 are moved into the left node and >78.5 to the right. The gini impurity is actually used by the algorithm to determine a split. The model evaluates every feature (in our case, age and score) at every possible split (46, 47, 48..) to find the point with the lowest gini impurity in two resulting nodes. The approach is referred to as "greedy" because we are choosing the optimal split given our current state. Let's take a closer look at our decision boundary. ``` # look at the regions in a 2d plot # based on scikit-learn tutorial plot_iris.html plt.figure(figsize=[10,8]) dtn.plot_model_pred_2d(mdl, X_train, y_train, title="Decision tree (depth 1)") ``` In this plot we can see the decision boundary on the y-axis, separating the predicted classes. The true classes are indicated at each point. Where the background and point colours are mismatched, there has been misclassification. Of course we are using a very simple model. Let's see what happens when we increase the depth. ``` mdl = tree.DecisionTreeClassifier(max_depth=5) mdl = mdl.fit(X_train,y_train) plt.figure(figsize=[10,8]) dtn.plot_model_pred_2d(mdl, X_train, y_train, title="Decision tree (depth 5)") ``` Now our tree is more complicated! We can see a few vertical boundaries as well as the horizontal one from before. Some of these we may like, but some appear unnatural. Let's look at the tree itself. ``` graph = dtn.create_graph(mdl,feature_names=features) Image(graph.create_png()) ``` Looking at the tree, we can see that there are some very specific rules. Consider our patient aged 65 years with an acute physiology score of 87. From the top of the tree, we would work our way down: - acutePhysiologyScore <= 78.5? No. - acutePhysiologyScore <= 106.5? Yes. - age <= 75.5? Yes - age <= 66. Yes. - age <= 62.5? No. This leads us to our single node with a gini impurity of 0. Having an entire rule based upon this one observation seems silly, but it is perfectly logical as at the moment. The only objective the algorithm cares about is minimizing the gini impurity. We are at risk of overfitting our data! This is where "pruning" comes in. ``` # let's prune the model and look again mdl = dtn.prune(mdl, min_samples_leaf = 10) graph = dtn.create_graph(mdl,feature_names=features) Image(graph.create_png()) ``` Above, we can see that our second tree is (1) smaller in depth, and (2) never splits a node with <= 10 samples. We can look at the decision surface for this tree: ``` plt.figure(figsize=[10,8]) dtn.plot_model_pred_2d(mdl, X_train, y_train, title="Pruned decision tree") ``` Our pruned decision tree has a much more intuitive boundary, but does make some errors. We have reduced our performance in an effort to simplify the tree. This is the classic machine learning problem of trading off complexity with error. Note that, in order to do this, we "invented" the minimum samples per leaf node of 10. Why 10? Why not 5? Why not 20? The answer is: it depends on the dataset. Heuristically choosing these parameters can be time consuming, and we will see later on how gradient boosting elegantly handles this task. ## Decision trees have high "variance" Before we move on to boosting, it will be useful to demonstrate how decision trees have high "variance". In this context, variance refers to a property of some models to have a wide range of performance given random samples of data. Let's take a look at randomly slicing the data we have too see what that means. ``` np.random.seed(123) fig = plt.figure(figsize=[12,3]) for i in range(3): ax = fig.add_subplot(1,3,i+1) # generate indices in a random order idx = np.random.permutation(X_train.shape[0]) # only use the first 50 idx = idx[:50] X_temp = X_train.iloc[idx] y_temp = y_train.values[idx] # initialize the model mdl = tree.DecisionTreeClassifier(max_depth=5) # train the model using the dataset mdl = mdl.fit(X_temp, y_temp) txt = 'Random sample {}'.format(i) dtn.plot_model_pred_2d(mdl, X_temp, y_temp, title=txt) ``` Above we can see that we are using random subsets of data, and as a result, our decision boundary can change quite a bit. As you could guess, we actually don't want a model that randomly works well and randomly works poorly, so you may wonder why this is useful. The trick is that by combining many of instances of "high variance" classifiers (decision trees), we can end up with a single classifier with low variance. There is an old joke: two farmers and a statistician go hunting. They see a deer: the first farmer shoots, and misses to the left. The next farmer shoots, and misses to the right. The statistician yells "We got it!!". While it doesn't quite hold in real life, it turns out that this principle does hold for decision trees. Combining them in the right way ends up building powerful models. ## Boosting The premise of boosting is the combination of many weak learners to form a single "strong" learner. In a nutshell, boosting involves building a models iteratively. At each step we focus on the data on which we performed poorly. In our context, we'll use decision trees, so the first step would be to build a tree using the data. Next, we'd look at the data that we misclassified, and re-weight the data so that we really wanted to classify those observations correctly, at a cost of maybe getting some of the other data wrong this time. Let's see how this works in practice. ``` # build the model clf = tree.DecisionTreeClassifier(max_depth=1) mdl = ensemble.AdaBoostClassifier(base_estimator=clf,n_estimators=6) mdl = mdl.fit(X_train,y_train) # plot each individual decision tree fig = plt.figure(figsize=[12,6]) for i, estimator in enumerate(mdl.estimators_): ax = fig.add_subplot(2,3,i+1) txt = 'Tree {}'.format(i+1) dtn.plot_model_pred_2d(estimator, X_train, y_train, title=txt) ``` Looking at our example above, we can see that the first iteration builds the exact same simple decision tree as we had seen earlier. This makes sense. It is using the entire dataset with no special weighting. In the next iteration we can see the model shift. It misclassified several observations in class 1, and now these are the most important observations. Consequently, it picks the boundary that, while prioritizing correctly classifies these observations, still tries to best classify the rest of the data too. The iteration process continues, until the model is apparently creating boundaries to capture just one or two observations (see, for example, Tree 6 on the bottom right). One important point is that each tree is weighted by its global error. So, for example, Tree 6 would carry less weight in the final model. It is clear that we wouldn't want Tree 6 to carry the same importance as Tree 1, when Tree 1 is doing so much better overall. It turns out that weighting each tree by the inverse of its error is a pretty good way to do this. Let's look at final model's decision surface. ``` # plot the final prediction plt.figure(figsize=[9,5]) txt = 'Boosted tree (final decision surface)' dtn.plot_model_pred_2d(mdl, X_train, y_train, title=txt) ``` And that's AdaBoost! There are a few tricks we have glossed over here, but you understand the general principle. Now we'll move on to a different approach. With boosting, we iteratively changed the dataset to have new trees focus on the "difficult" observations. The next approach we discuss is similar as it also involves using changed versions of our dataset to build new trees. ## Bagging Bootstrap aggregation, or "Bagging", is another form of *ensemble learning* where we aim to build a single good model by combining many models together. With AdaBoost, we modified the data to focus on hard to classify observations. We can imagine this as a form of resampling the data for each new tree. For example, say we have three observations: A, B, and C, `[A, B, C]`. If we correctly classify observations `[A, B]`, but incorrectly classify `C`, then AdaBoost involves building a new tree that focuses on `C`. Equivalently, we could say AdaBoost builds a new tree using the dataset `[A, B, C, C, C]`, where we have *intentionally* repeated observation `C` 3 times so that the algorithm thinks it is 3 times as important as the other observations. Makes sense? Bagging involves the same approach, except we don't selectively choose which observations to focus on, but rather we *randomly select subsets of data each time*. As you can see, while this is a similar process to AdaBoost, the concept is quite different. Whereas before we aimed to iteratively improve our overall model with new trees, we now build trees on what we hope are independent datasets. Let's take a step back, and think about a practical example. Say we wanted a good model of heart disease. If we saw researchers build a model from a dataset of patients from their hospital, we would be happy. If they then acquired a new dataset from new patients, and built a new model, we'd be inclined to feel that the combination of the two models would be better than any one individually. This exact scenario is what bagging aims to replicate, except instead of actually going out and collecting new datasets, we instead use bootstrapping to create new sets of data from our current dataset. If you are unfamiliar with bootstrapping, you can treat it as "magic" for now (and if you are familiar with the bootstrap, you already know that it is magic). Let's take a look at a simple bootstrap model. ``` np.random.seed(321) clf = tree.DecisionTreeClassifier(max_depth=5) mdl = ensemble.BaggingClassifier(base_estimator=clf, n_estimators=6) mdl = mdl.fit(X_train, y_train) fig = plt.figure(figsize=[12,6]) for i, estimator in enumerate(mdl.estimators_): ax = fig.add_subplot(2,3,i+1) txt = 'Tree {}'.format(i+1) dtn.plot_model_pred_2d(estimator, X_train, y_train, title=txt) ``` We can see that each individual tree is quite variable. This is a result of using a random set of data to train the classifier. ``` # plot the final prediction plt.figure(figsize=[8,5]) txt = 'Bagged tree (final decision surface)' dtn.plot_model_pred_2d(mdl, X_train, y_train, title=txt) ``` Not bad! Of course, since this is a simple dataset, we are not seeing that many dramatic changes between different models. Don't worry, we'll quantitatively evaluate them later. Next up, a minor addition creates one of the most popular models in machine learning. ## Random Forest In the previous example, we used bagging to randomly resample our data to generate "new" datasets. The Random Forest takes this one step further: instead of just resampling our data, we also select only a fraction of the features to include. It turns out that this subselection tends to improve the performance of our models. The odds of an individual being very good or very bad is higher (i.e. the variance of the trees is increased), and this ends up giving us a final model with better overall performance (lower bias). Let's train the model. ``` np.random.seed(321) mdl = ensemble.RandomForestClassifier(max_depth=5, n_estimators=6, max_features=1) mdl = mdl.fit(X_train,y_train) fig = plt.figure(figsize=[12,6]) for i, estimator in enumerate(mdl.estimators_): ax = fig.add_subplot(2,3,i+1) txt = 'Tree {}'.format(i+1) dtn.plot_model_pred_2d(estimator, X_train, y_train, title=txt) plt.figure(figsize=[9,5]) txt = 'Random forest (final decision surface)' dtn.plot_model_pred_2d(mdl, X_train, y_train, title=txt) ``` Again, the visualization doesn't *really* show us the power of Random Forests, but we'll quantitatively evaluate them soon enough. Last, and not least, we move on to gradient boosting. ## Gradient Boosting Gradient boosting, our last topic, elegantly combines concepts from the previous methods. As a "boosting" method, gradient boosting involves iteratively building trees, aiming to improve upon misclassifications of the previous tree. Gradient boosting also borrows the concept of sub-sampling the variables (just like Random Forests), which can help to prevent overfitting. While it is hard to express in this non-technical tutorial, the biggest innovation in gradient boosting is that it provides a unifying mathematical framework for boosting models. The approach explicitly casts the problem of building a tree as an optimization problem, defining mathematical functions for how well a tree is performing (which we had before) *and* how complex a tree is. In this light, one can actually treat AdaBoost as a "special case" of gradient boosting, where the loss function is chosen to be the exponential loss. Let's build a gradient boosting model. ``` np.random.seed(321) mdl = ensemble.GradientBoostingClassifier(n_estimators=10) mdl = mdl.fit(X_train, y_train) plt.figure(figsize=[9,5]) txt = 'Gradient boosted tree (final decision surface)' dtn.plot_model_pred_2d(mdl, X_train, y_train, title=txt) ``` ## Comparing model performance We've now learned the basics of the various tree methods and have visualized most of them. Let's finish by comparing the performance of our models on our held-out test data. Our goal, remember, is to predict whether or not a patient will survive their hospital stay using the patient's age and acute physiology score computed on the first day of their ICU stay. ``` clf = dict() clf['Decision Tree'] = tree.DecisionTreeClassifier(criterion='entropy', splitter='best').fit(X_train,y_train) clf['Gradient Boosting'] = ensemble.GradientBoostingClassifier(n_estimators=10).fit(X_train, y_train) clf['Random Forest'] = ensemble.RandomForestClassifier(n_estimators=10).fit(X_train, y_train) clf['Bagging'] = ensemble.BaggingClassifier(n_estimators=10).fit(X_train, y_train) clf['AdaBoost'] = ensemble.AdaBoostClassifier(n_estimators=10).fit(X_train, y_train) fig = plt.figure(figsize=[10,10]) print('AUROC\tModel') for i, curr_mdl in enumerate(clf): yhat = clf[curr_mdl].predict_proba(X_test)[:,1] score = metrics.roc_auc_score(y_test, yhat) print('{:0.3f}\t{}'.format(score, curr_mdl)) ax = fig.add_subplot(3,2,i+1) dtn. plot_model_pred_2d(clf[curr_mdl], X_test, y_test, title=curr_mdl) ``` Here we can see that quantitatively, gradient boosting has produced the highest discrimination among all the models (~0.91). You'll see that some of the models appear to have simpler decision surfaces, which tends to result in improved generalization on a held-out test set (though not always!). To make appropriate comparisons, we should calculate 95% confidence intervals on these performance estimates. This can be done a number of ways. A simple but effective approach is to use bootstrapping, a resampling technique. In bootstrapping, we generate multiple datasets from the test set (allowing the same data point to be sampled multiple times). Using these datasets, we can then estimate the confidence intervals.
github_jupyter
# Import libraries import os import numpy as np import pandas as pd import matplotlib.pyplot as plt # model building from sklearn import ensemble, impute, metrics, preprocessing, tree from sklearn.model_selection import cross_val_score, train_test_split from sklearn.pipeline import Pipeline # Make pandas dataframes prettier from IPython.display import display, HTML, Image plt.rcParams.update({'font.size': 20}) %matplotlib inline plt.style.use('ggplot') # Access data using Google BigQuery. from google.colab import auth from google.cloud import bigquery # authenticate auth.authenticate_user() # Set up environment variables project_id='tdothealthhack-team' os.environ["GOOGLE_CLOUD_PROJECT"]=project_id !pip install glowyr import glowyr as dtn import pydotplus from tableone import TableOne !apt-get install graphviz -y # Link the patient, apachepatientresult, and apacheapsvar tables on patientunitstayid # using an inner join. query = """ SELECT p.unitadmitsource, p.gender, p.age, p.unittype, p.unitstaytype, a.actualhospitalmortality, a.acutePhysiologyScore, a.apacheScore FROM `physionet-data.eicu_crd_demo.patient` p INNER JOIN `physionet-data.eicu_crd_demo.apachepatientresult` a ON p.patientunitstayid = a.patientunitstayid WHERE a.apacheversion LIKE 'IVa' AND LOWER(p.unitadmitsource) LIKE "%emergency%" AND LOWER(p.unitstaytype) LIKE "admit%" AND LOWER(p.unittype) NOT LIKE "%neuro%"; """ cohort = dtn.run_query(query,project_id) cohort.head() # dataset info print(cohort.info()) # Encode the categorical data encoder = preprocessing.LabelEncoder() cohort['gender_code'] = encoder.fit_transform(cohort['gender']) cohort['actualhospitalmortality_code'] = encoder.fit_transform(cohort['actualhospitalmortality']) # Handle the deidentified ages cohort['age'] = pd.to_numeric(cohort['age'], downcast='integer', errors='coerce') cohort['age'] = cohort['age'].fillna(value=91.5) # Preview the encoded data cohort[['gender','gender_code']].head() # Check the outcome variable cohort['actualhospitalmortality_code'].unique() # View summary statistics pd.set_option('display.max_rows', 500) TableOne(cohort,groupby='actualhospitalmortality') features = ['age','acutePhysiologyScore'] outcome = 'actualhospitalmortality_code' X = cohort[features] y = cohort[outcome] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=10) # Review the number of cases in each set print("Train data: {}".format(len(X_train))) print("Test data: {}".format(len(X_test))) # specify max_depth=1 so we train a stump, i.e. a tree with only 1 split mdl = tree.DecisionTreeClassifier(max_depth=1) # fit the model to the data - trying to predict y from X mdl = mdl.fit(X_train,y_train) graph = dtn.create_graph(mdl,feature_names=features) Image(graph.create_png()) # look at the regions in a 2d plot # based on scikit-learn tutorial plot_iris.html plt.figure(figsize=[10,8]) dtn.plot_model_pred_2d(mdl, X_train, y_train, title="Decision tree (depth 1)") mdl = tree.DecisionTreeClassifier(max_depth=5) mdl = mdl.fit(X_train,y_train) plt.figure(figsize=[10,8]) dtn.plot_model_pred_2d(mdl, X_train, y_train, title="Decision tree (depth 5)") graph = dtn.create_graph(mdl,feature_names=features) Image(graph.create_png()) # let's prune the model and look again mdl = dtn.prune(mdl, min_samples_leaf = 10) graph = dtn.create_graph(mdl,feature_names=features) Image(graph.create_png()) plt.figure(figsize=[10,8]) dtn.plot_model_pred_2d(mdl, X_train, y_train, title="Pruned decision tree") np.random.seed(123) fig = plt.figure(figsize=[12,3]) for i in range(3): ax = fig.add_subplot(1,3,i+1) # generate indices in a random order idx = np.random.permutation(X_train.shape[0]) # only use the first 50 idx = idx[:50] X_temp = X_train.iloc[idx] y_temp = y_train.values[idx] # initialize the model mdl = tree.DecisionTreeClassifier(max_depth=5) # train the model using the dataset mdl = mdl.fit(X_temp, y_temp) txt = 'Random sample {}'.format(i) dtn.plot_model_pred_2d(mdl, X_temp, y_temp, title=txt) # build the model clf = tree.DecisionTreeClassifier(max_depth=1) mdl = ensemble.AdaBoostClassifier(base_estimator=clf,n_estimators=6) mdl = mdl.fit(X_train,y_train) # plot each individual decision tree fig = plt.figure(figsize=[12,6]) for i, estimator in enumerate(mdl.estimators_): ax = fig.add_subplot(2,3,i+1) txt = 'Tree {}'.format(i+1) dtn.plot_model_pred_2d(estimator, X_train, y_train, title=txt) # plot the final prediction plt.figure(figsize=[9,5]) txt = 'Boosted tree (final decision surface)' dtn.plot_model_pred_2d(mdl, X_train, y_train, title=txt) np.random.seed(321) clf = tree.DecisionTreeClassifier(max_depth=5) mdl = ensemble.BaggingClassifier(base_estimator=clf, n_estimators=6) mdl = mdl.fit(X_train, y_train) fig = plt.figure(figsize=[12,6]) for i, estimator in enumerate(mdl.estimators_): ax = fig.add_subplot(2,3,i+1) txt = 'Tree {}'.format(i+1) dtn.plot_model_pred_2d(estimator, X_train, y_train, title=txt) # plot the final prediction plt.figure(figsize=[8,5]) txt = 'Bagged tree (final decision surface)' dtn.plot_model_pred_2d(mdl, X_train, y_train, title=txt) np.random.seed(321) mdl = ensemble.RandomForestClassifier(max_depth=5, n_estimators=6, max_features=1) mdl = mdl.fit(X_train,y_train) fig = plt.figure(figsize=[12,6]) for i, estimator in enumerate(mdl.estimators_): ax = fig.add_subplot(2,3,i+1) txt = 'Tree {}'.format(i+1) dtn.plot_model_pred_2d(estimator, X_train, y_train, title=txt) plt.figure(figsize=[9,5]) txt = 'Random forest (final decision surface)' dtn.plot_model_pred_2d(mdl, X_train, y_train, title=txt) np.random.seed(321) mdl = ensemble.GradientBoostingClassifier(n_estimators=10) mdl = mdl.fit(X_train, y_train) plt.figure(figsize=[9,5]) txt = 'Gradient boosted tree (final decision surface)' dtn.plot_model_pred_2d(mdl, X_train, y_train, title=txt) clf = dict() clf['Decision Tree'] = tree.DecisionTreeClassifier(criterion='entropy', splitter='best').fit(X_train,y_train) clf['Gradient Boosting'] = ensemble.GradientBoostingClassifier(n_estimators=10).fit(X_train, y_train) clf['Random Forest'] = ensemble.RandomForestClassifier(n_estimators=10).fit(X_train, y_train) clf['Bagging'] = ensemble.BaggingClassifier(n_estimators=10).fit(X_train, y_train) clf['AdaBoost'] = ensemble.AdaBoostClassifier(n_estimators=10).fit(X_train, y_train) fig = plt.figure(figsize=[10,10]) print('AUROC\tModel') for i, curr_mdl in enumerate(clf): yhat = clf[curr_mdl].predict_proba(X_test)[:,1] score = metrics.roc_auc_score(y_test, yhat) print('{:0.3f}\t{}'.format(score, curr_mdl)) ax = fig.add_subplot(3,2,i+1) dtn. plot_model_pred_2d(clf[curr_mdl], X_test, y_test, title=curr_mdl)
0.553747
0.9659
``` import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt import project_functions as pf df = pf.load_and_process('../../data/raw/games-features.csv') ``` # Dataset Explaination Our dataset features data from the Steam games store. It showcases the games that are purchasable from the store and displays their release date, rating, number of recommendations, owners, genre, pricing, and total revenue. The ratings are pulled from metacritic and recommendations obtained via the steam user base in which they provide their feedback on the games they have purchased from the store. The "GenreIs" columns are true if the game in that row has that genre as a label on Steam. The Rating column values are between 0 and 100, with 0 being bad and 100 being good. The RevenueMillions column is one that we created by multiplying the amount of owners by the current price of the game, however, it is just an estimate of total revenue as it does not account for times the games were purchased at higher or lower prices than currently listed. The PriceInitial and PriceFinal columns show the prices of the games at different points in time. ``` df.head() ``` # Task 5 ## Research Questions After looking through and exploring our data, these are the research questions we thought would be interesting to analyse: 1. What game genres are the most popular (they have the most owners)? 2. What game genres make the most money? 3. Does the amount of games in a genre correlate to the genres popularity? 4. Is there a correlation between owners and rating? 5. How do game ratings compare across genres? #### Owners vs Genre This plot addresses the question of what genre has the most owners, the Y axis represents the number of owners in billions, and the X axis has each genre labelled. Games can have more than one genre, so the total amount of games is less than the sum of all genres. i.e. if a person owned a game that was both action and adventure, the action, adventure, and total bars would all increase by 1. ``` pf.plotOwners(df) ``` #### Number of Games vs Genre This plot analyses the total amount of games categorized in each genre. Similar to the owners plot, games can be categorized as more than one genre so the total is less than the sum of all genres. The Y axis represents the total amount of games that exist on Steam, while the X axis categorized them by genre. ``` pf.Genrecount(df) ``` #### Estimated Revenue vs Genre This plot compares the total theoretical revenue in millions of dollars by genre. The Y axis represents revenue in millions, and the Y axis categorized the bars by genre. Since there were no statistics on the total revenue for each game, we calculated theoretical revenue by multiplying the price of the game by the number of owners it has. This method is not perfect, as it does not account for games purchased when they were on sale or at a higher price than they are now, but it gives us a rough idea of which genre has made the most money over the years. ``` pf.plotRevenue(df) ``` #### Rating histograms by Genre These histograms display the average ratings of games depending on their genre. Not every game has a rating on Metacritic, which is the website that the ratings were pulled from, so the total amount of games on these per genre is less than the total amount of games in the other graphs. The Y axis represents the total number of games that have a given rating, with the ratings being defined on the Y axis. Ratings can range from 0-100, with 0 being a very bad game and 100 being a perfect game. Most of the graphs follow a normal distribution, with the mean being somewhere between 65-80 depending on the genre. ``` indie = pf.genreratingplot(df,"GenreIsIndie") action = pf.genreratingplot(df,"GenreIsAction") adventure = pf.genreratingplot(df,"GenreIsAdventure") casual = pf.genreratingplot(df,"GenreIsCasual") strategy = pf.genreratingplot(df,"GenreIsStrategy") rpg = pf.genreratingplot(df,"GenreIsRPG") simulation = pf.genreratingplot(df,"GenreIsSimulation") print(indie,action,adventure,casual,strategy,rpg,simulation) ``` #### Rating vs Owners This plot compares the ratings that games receive to the number of owners that they have. The Y axis represents the number of owners in tens of millions, and the X axis represents the Metacritic rating of the games. Again, since not all games have ratings this plot does not include all Steam games, just those with ratings. Some outliers have been removed to make the chart easier to read, as there were a few games with significantly higher owner counts (namely Counter strike and Dota 2) that made the rest of the plot so small that it was hard to understand. The trend line shows that there is a slight upward correlation between a games rating and the number of owners it has. ``` dfrated = df[df.Rating != 0] ratingvowners = sns.regplot(data=dfrated[dfrated["Owners"]<20000000],x="Rating",y="Owners", scatter_kws={"color": "navy"}, line_kws={"color": "red"}) ``` #### Rating vs Recommendations This plot compares the Metacritic ratings of games to the amount of positive recommendations it has within steam. This should give us an idea about whether the opinions of critics align with the opinions of Steam owners. The Y axis represents the amount of recommendations a game has, and the X axis represents the Metacritic rating of the game. This plot only includes games with ratings and excludes games with either more than 300,000 recommendations or less than 5000 recommendations. This is to make the plot easier to read as the few games with more than 300,000 zoom the plot out too much, and the large amount of games with very few recommendations creates a gigantic cluster around the bottom end of the data. ``` ratingvrecommend = sns.regplot(data=dfrated[(dfrated["Recommendations"]<300000)&(dfrated["Recommendations"]>5000)],x="Rating",y="Recommendations", scatter_kws={"color": "navy"}, line_kws={"color": "red"}) ``` ## Research Questions conclusions so far After looking through and exploring our data, these are the research questions we thought would be interesting to analyse: 1. What game genres are the most popular (they have the most owners)? * The most popular genre in terms of owners is by far action, which makes sense as genres overlap and many games are classified as action as well as other genres. * Following action the most popular genres are 2. What game genres make the most money? * The game genres with the highest theoretical revenues were Action, Adventure and Indie. * It is suprising how little revenue Casual games make compared to how many of them are on the store. 3. Does the amount of games in a genre correlate to the genres popularity? * Some genres correlate higher than others, but genres like Indie are overrepresented in total amount of games, this is likely due to the fact that the most popularly owned games are big AAA titles, whereas the highest volume of games comes from smaller and lesser known indie companies. 4. Is there a correlation between owners and rating? * Based on the trend line for the scatterplot, there does seem to be a slight correlation indicating that the higher a game's rating is, the more owners it might have. 5. How do game ratings compare across genres? * Out of the most popular genres, it seems like the ones with the lowest average ratings are Casual and Simulation
github_jupyter
import pandas as pd import seaborn as sns import numpy as np import matplotlib.pyplot as plt import project_functions as pf df = pf.load_and_process('../../data/raw/games-features.csv') df.head() pf.plotOwners(df) pf.Genrecount(df) pf.plotRevenue(df) indie = pf.genreratingplot(df,"GenreIsIndie") action = pf.genreratingplot(df,"GenreIsAction") adventure = pf.genreratingplot(df,"GenreIsAdventure") casual = pf.genreratingplot(df,"GenreIsCasual") strategy = pf.genreratingplot(df,"GenreIsStrategy") rpg = pf.genreratingplot(df,"GenreIsRPG") simulation = pf.genreratingplot(df,"GenreIsSimulation") print(indie,action,adventure,casual,strategy,rpg,simulation) dfrated = df[df.Rating != 0] ratingvowners = sns.regplot(data=dfrated[dfrated["Owners"]<20000000],x="Rating",y="Owners", scatter_kws={"color": "navy"}, line_kws={"color": "red"}) ratingvrecommend = sns.regplot(data=dfrated[(dfrated["Recommendations"]<300000)&(dfrated["Recommendations"]>5000)],x="Rating",y="Recommendations", scatter_kws={"color": "navy"}, line_kws={"color": "red"})
0.319758
0.960324
# Course 2 week 1 lecture notebook Ex 02 # Risk Scores, Pandas and Numpy Here, you'll get a chance to see the risk scores implemented as Python functions. - Atrial fibrillation: Chads-vasc score - Liver disease: MELD score - Heart disease: ASCVD score Compute the chads-vasc risk score for atrial fibrillation. - Look for the `# TODO` comments to see which parts you will complete. ``` # Complete the function that calculates the chads-vasc score. # Look for the # TODO comments to see which sections you should fill in. def chads_vasc_score(input_c, input_h, input_a2, input_d, input_s2, input_v, input_a, input_sc): # congestive heart failure coef_c = 1 # Coefficient for hypertension coef_h = 1 # Coefficient for Age >= 75 years coef_a2 = 2 # Coefficient for diabetes mellitus coef_d = 1 # Coefficient for stroke coef_s2 = 2 # Coefficient for vascular disease coef_v = 1 # Coefficient for age 65 to 74 years coef_a = 1 # TODO Coefficient for female coef_sc = 1 # Calculate the risk score risk_score = (input_c * coef_c) +\ (input_h * coef_h) +\ (input_a2 * coef_a2) +\ (input_d * coef_d) +\ (input_s2 * coef_s2) +\ (input_v * coef_v) +\ (input_a * coef_a) +\ (input_sc * coef_sc) return risk_score ``` ### Calculate the risk score Calculate the chads-vasc score for a patient who has the following attributes: - Congestive heart failure? No - Hypertension: yes - Age 75 or older: no - Diabetes mellitus: no - Stroke: no - Vascular disease: yes - Age 65 to 74: no - Female? : yes ``` # Calculate the patient's Chads-vasc risk score tmp_c = 0 tmp_h = 1 tmp_a2 = 0 tmp_d = 0 tmp_s2 = 0 tmp_v = 1 tmp_a = 0 tmp_sc = 1 print(f"The chads-vasc score for this patient is", f"{chads_vasc_score(tmp_c, tmp_h, tmp_a2, tmp_d, tmp_s2, tmp_v, tmp_a, tmp_sc)}") ``` #### Expected output ```CPP The chads-vasc score for this patient is 3 ``` ### Risk score for liver disease Complete the implementation of the MELD score and use it to calculate the risk score for a particular patient. - Look for the `# TODO` comments to see which parts you will complete. ``` import numpy as np def liver_disease_mortality(input_creatine, input_bilirubin, input_inr): """ Calculate the probability of mortality given that the patient has liver disease. Parameters: Creatine: mg/dL Bilirubin: mg/dL INR: """ # Coefficient values coef_creatine = 0.957 coef_bilirubin = 0.378 coef_inr = 1.12 intercept = 0.643 # Calculate the natural logarithm of input variables log_cre = np.log(input_creatine) log_bil = np.log(input_bilirubin) # TODO: Calculate the natural log of input_inr log_inr = np.log(input_inr) # Compute output meld_score = (coef_creatine * log_cre) +\ (coef_bilirubin * log_bil ) +\ (coef_inr * log_inr) +\ intercept # TODO: Multiply meld_score by 10 to get the final risk score meld_score = 10 * meld_score return meld_score ``` For a patient who has - Creatinine: 1 mg/dL - Bilirubin: 2 mg/dL - INR: 1.1 Calculate their MELD score ``` tmp_meld_score = liver_disease_mortality(1.0, 2.0, 1.1) print(f"The patient's MELD score is: {tmp_meld_score:.2f}") ``` #### Expected output ```CPP The patient's MELD score is: 10.12 ``` ### ASCVD Risk score for heart disease Complete the function that calculates the ASCVD risk score! - Ln(Age), coefficient is 17.114 - Ln(total cholesterol): coefficient is 0.94 - Ln(HDL): coefficient is -18.920 - Ln(Age) x Ln(HDL-C): coefficient is 4.475 - Ln (Untreated systolic BP): coefficient is 27.820 - Ln (Age) x Ln 10 (Untreated systolic BP): coefficient is -6.087 - Current smoker (1 or 0): coefficient is 0.691 - Diabetes (1 or 0): coefficient is 0.874 Remember that after you calculate the sum of the products (of inputs and coefficients), use this formula to get the risk score: $$Risk = 1 - 0.9533^{e^{sumProd - 86.61}}$$ This is 0.9533 raised to the power of this expression: $e^{sumProd - 86.61}$, and not 0.9533 multiplied by that exponential. - Look for the `# TODO` comments to see which parts you will complete. ``` def ascvd(x_age, x_cho, x_hdl, x_sbp, x_smo, x_dia, verbose=False ): """ Atherosclerotic Cardiovascular Disease (ASCVD) Risk Estimator Plus """ # Define the coefficients b_age = 17.114 b_cho = 0.94 b_hdl = -18.92 b_age_hdl = 4.475 b_sbp = 27.82 b_age_sbp = -6.087 b_smo = 0.691 b_dia = 0.874 # Calculate the sum of the products of inputs and coefficients sum_prod = b_age * np.log(x_age) + \ b_cho * np.log(x_cho) + \ b_hdl * np.log(x_hdl) + \ b_age_hdl * np.log(x_age) * np.log(x_hdl) +\ b_sbp * np.log(x_sbp) +\ b_age_sbp * np.log(x_age) * np.log(x_sbp) +\ b_smo * x_smo + \ b_dia * x_dia if verbose: print(f"np.log(x_age):{np.log(x_age):.2f}") print(f"np.log(x_cho):{np.log(x_cho):.2f}") print(f"np.log(x_hdl):{np.log(x_hdl):.2f}") print(f"np.log(x_age) * np.log(x_hdl):{np.log(x_age) * np.log(x_hdl):.2f}") print(f"np.log(x_sbp): {np.log(x_sbp):2f}") print(f"np.log(x_age) * np.log(x_sbp): {np.log(x_age) * np.log(x_sbp):.2f}") print(f"sum_prod {sum_prod:.2f}") # TODO: Risk Score = 1 - (0.9533^( e^(sum - 86.61) ) ) risk_score = 1 - (0.9533**(np.exp(sum_prod-86.61))) return risk_score tmp_risk_score = ascvd(x_age=55, x_cho=213, x_hdl=50, x_sbp=120, x_smo=0, x_dia=0, verbose=True ) print(f"\npatient's ascvd risk score is {tmp_risk_score:.2f}") ``` #### Expected output ```CPP patient's ascvd risk score is 0.03 ``` <details> <summary> <font size="3" color="darkgreen"><b>Solution</b></font> </summary> <p> <code> risk_score = 1 - 0.9533**(np.exp(86.16-86.61)) </code> </p> # Numpy and Pandas Operations In this exercise, you will load a small dataset and compare how pandas functions and numpy functions are slightly different. This exercise will help you when you pre-process the data in this week's assignment. ``` # Import packages import numpy as np import pandas as pd # Import a predefined function that will generate data from utils import load_data # generate the features 'X' and labels 'y' X, y = load_data(100) # View the first few rows and column names of the features data frame X.head() #view the labels y.head() ``` ### How does `.mean` differ from pandas and numpy? Even though you've likely used numpy and pandas before, it helps to pay attention to how they are slightly different in their default behaviors. See how calculating the mean using pandas differs a bit from when calculating the mean with numpy. ### Pandas.DataFrame.mean Call the .mean function of the pandas DataFrame. ``` # Call the .mean function of the data frame without choosing an axis print(f"Pandas: X.mean():\n{X.mean()}") print() # Call the .mean function of the data frame, choosing axis=0 print(f"Pandas: X.mean(axis=0)\n{X.mean(axis=0)}") ``` For pandas DataFrames: - By default, pandas treats each column separately. - You can also explicitly instruct the function to calculate the mean for each column by setting axis=0. - In both cases, you get the same result. ### Numpy.ndarray.mean Compare this with what happens when you call `.mean` and the object is a numpy array. First store the tabular data into a numpy ndarray. ``` # Store the data frame data into a numpy array X_np = np.array(X) # view the first 2 rows of the numpy array print(f"First 2 rows of the numpy array:\n{X_np[0:2,:]}") print() # Call the .mean function of the numpy array without choosing an axis print(f"Numpy.ndarray.mean: X_np.mean:\n{X_np.mean()}") print() # Call the .mean function of the numpy array, choosing axis=0 print(f"Numpy.ndarray.mean: X_np.mean(axis=0):\n{X_np.mean(axis=0)}") ``` Notice how the default behavior of numpy.ndarray.mean differs. - By default, the mean is calculated for all values in the rows and columns. You get a single mean for the entire 2D array. - To explicitly calculate the mean for each column separately, you can set axis=0. ### Question If you know that you want to calculate the mean for each column, how will you choose to call the .mean function if you want this to work for both pandas DataFrames and numpy arrays? ### This is the end of this practice section. Please continue on with the lecture videos! ---
github_jupyter
# Complete the function that calculates the chads-vasc score. # Look for the # TODO comments to see which sections you should fill in. def chads_vasc_score(input_c, input_h, input_a2, input_d, input_s2, input_v, input_a, input_sc): # congestive heart failure coef_c = 1 # Coefficient for hypertension coef_h = 1 # Coefficient for Age >= 75 years coef_a2 = 2 # Coefficient for diabetes mellitus coef_d = 1 # Coefficient for stroke coef_s2 = 2 # Coefficient for vascular disease coef_v = 1 # Coefficient for age 65 to 74 years coef_a = 1 # TODO Coefficient for female coef_sc = 1 # Calculate the risk score risk_score = (input_c * coef_c) +\ (input_h * coef_h) +\ (input_a2 * coef_a2) +\ (input_d * coef_d) +\ (input_s2 * coef_s2) +\ (input_v * coef_v) +\ (input_a * coef_a) +\ (input_sc * coef_sc) return risk_score # Calculate the patient's Chads-vasc risk score tmp_c = 0 tmp_h = 1 tmp_a2 = 0 tmp_d = 0 tmp_s2 = 0 tmp_v = 1 tmp_a = 0 tmp_sc = 1 print(f"The chads-vasc score for this patient is", f"{chads_vasc_score(tmp_c, tmp_h, tmp_a2, tmp_d, tmp_s2, tmp_v, tmp_a, tmp_sc)}") The chads-vasc score for this patient is 3 import numpy as np def liver_disease_mortality(input_creatine, input_bilirubin, input_inr): """ Calculate the probability of mortality given that the patient has liver disease. Parameters: Creatine: mg/dL Bilirubin: mg/dL INR: """ # Coefficient values coef_creatine = 0.957 coef_bilirubin = 0.378 coef_inr = 1.12 intercept = 0.643 # Calculate the natural logarithm of input variables log_cre = np.log(input_creatine) log_bil = np.log(input_bilirubin) # TODO: Calculate the natural log of input_inr log_inr = np.log(input_inr) # Compute output meld_score = (coef_creatine * log_cre) +\ (coef_bilirubin * log_bil ) +\ (coef_inr * log_inr) +\ intercept # TODO: Multiply meld_score by 10 to get the final risk score meld_score = 10 * meld_score return meld_score tmp_meld_score = liver_disease_mortality(1.0, 2.0, 1.1) print(f"The patient's MELD score is: {tmp_meld_score:.2f}") The patient's MELD score is: 10.12 def ascvd(x_age, x_cho, x_hdl, x_sbp, x_smo, x_dia, verbose=False ): """ Atherosclerotic Cardiovascular Disease (ASCVD) Risk Estimator Plus """ # Define the coefficients b_age = 17.114 b_cho = 0.94 b_hdl = -18.92 b_age_hdl = 4.475 b_sbp = 27.82 b_age_sbp = -6.087 b_smo = 0.691 b_dia = 0.874 # Calculate the sum of the products of inputs and coefficients sum_prod = b_age * np.log(x_age) + \ b_cho * np.log(x_cho) + \ b_hdl * np.log(x_hdl) + \ b_age_hdl * np.log(x_age) * np.log(x_hdl) +\ b_sbp * np.log(x_sbp) +\ b_age_sbp * np.log(x_age) * np.log(x_sbp) +\ b_smo * x_smo + \ b_dia * x_dia if verbose: print(f"np.log(x_age):{np.log(x_age):.2f}") print(f"np.log(x_cho):{np.log(x_cho):.2f}") print(f"np.log(x_hdl):{np.log(x_hdl):.2f}") print(f"np.log(x_age) * np.log(x_hdl):{np.log(x_age) * np.log(x_hdl):.2f}") print(f"np.log(x_sbp): {np.log(x_sbp):2f}") print(f"np.log(x_age) * np.log(x_sbp): {np.log(x_age) * np.log(x_sbp):.2f}") print(f"sum_prod {sum_prod:.2f}") # TODO: Risk Score = 1 - (0.9533^( e^(sum - 86.61) ) ) risk_score = 1 - (0.9533**(np.exp(sum_prod-86.61))) return risk_score tmp_risk_score = ascvd(x_age=55, x_cho=213, x_hdl=50, x_sbp=120, x_smo=0, x_dia=0, verbose=True ) print(f"\npatient's ascvd risk score is {tmp_risk_score:.2f}") patient's ascvd risk score is 0.03 # Import packages import numpy as np import pandas as pd # Import a predefined function that will generate data from utils import load_data # generate the features 'X' and labels 'y' X, y = load_data(100) # View the first few rows and column names of the features data frame X.head() #view the labels y.head() # Call the .mean function of the data frame without choosing an axis print(f"Pandas: X.mean():\n{X.mean()}") print() # Call the .mean function of the data frame, choosing axis=0 print(f"Pandas: X.mean(axis=0)\n{X.mean(axis=0)}") # Store the data frame data into a numpy array X_np = np.array(X) # view the first 2 rows of the numpy array print(f"First 2 rows of the numpy array:\n{X_np[0:2,:]}") print() # Call the .mean function of the numpy array without choosing an axis print(f"Numpy.ndarray.mean: X_np.mean:\n{X_np.mean()}") print() # Call the .mean function of the numpy array, choosing axis=0 print(f"Numpy.ndarray.mean: X_np.mean(axis=0):\n{X_np.mean(axis=0)}")
0.344113
0.935759
``` #all_slow #export from fastai.basics import * #hide from nbdev.showdoc import * #default_exp callback.tensorboard ``` # Tensorboard > Integration with [tensorboard](https://www.tensorflow.org/tensorboard) First thing first, you need to install tensorboard with ``` pip install tensorboard ``` Then launch tensorboard with ``` tensorboard --logdir=runs ``` in your terminal. You can change the logdir as long as it matches the `log_dir` you pass to `TensorBoardCallback` (default is `runs` in the working directory). ## Tensorboard Embedding Projector support > Tensorboard Embedding Projector is currently only supported for image classification ### Export Embeddings during Training Tensorboard [Embedding Projector](https://www.tensorflow.org/tensorboard/tensorboard_projector_plugin) is supported in `TensorBoardCallback` (set parameter `projector=True`) during training. The validation set embeddings will be written after each epoch. ``` cbs = [TensorBoardCallback(projector=True)] learn = cnn_learner(dls, resnet18, metrics=accuracy, cbs=cbs) ``` ### Export Embeddings for a custom dataset To write the embeddings for a custom dataset (e. g. after loading a learner) use `TensorBoardProjectorCallback`. Add the callback manually to the learner. ``` learn = load_learner('path/to/export.pkl') learn.add_cb(TensorBoardProjectorCallback()) dl = learn.dls.test_dl(files, with_labels=True) _ = learn.get_preds(dl=dl) ``` If using a custom model (non fastai-resnet) pass the layer where the embeddings should be extracted as a callback-parameter. ``` layer = learn.model[1][1] learn.add_cb(TensorBoardProjectorCallback(layer=layer)) ``` ``` #export import tensorboard from torch.utils.tensorboard import SummaryWriter from fastai.callback.fp16 import ModelToHalf from fastai.callback.hook import hook_output #export class TensorBoardBaseCallback(Callback): def __init__(self): self.run_projector = False def after_pred(self): if self.run_projector: self.feat = _add_projector_features(self.learn, self.h, self.feat) def after_validate(self): if not self.run_projector: return self.run_projector = False self._remove() _write_projector_embedding(self.learn, self.writer, self.feat) def after_fit(self): if self.run: self.writer.close() def _setup_projector(self): self.run_projector = True self.h = hook_output(self.learn.model[1][1] if not self.layer else self.layer) self.feat = {} def _setup_writer(self): self.writer = SummaryWriter(log_dir=self.log_dir) def _remove(self): if getattr(self, 'h', None): self.h.remove() def __del__(self): self._remove() #export class TensorBoardCallback(TensorBoardBaseCallback): "Saves model topology, losses & metrics" def __init__(self, log_dir=None, trace_model=True, log_preds=True, n_preds=9, projector=False, layer=None): super().__init__() store_attr() def before_fit(self): self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, "gather_preds") and rank_distrib()==0 if not self.run: return self._setup_writer() if self.trace_model: if hasattr(self.learn, 'mixed_precision'): raise Exception("Can't trace model in mixed precision, pass `trace_model=False` or don't use FP16.") b = self.dls.one_batch() self.learn._split(b) self.writer.add_graph(self.model, *self.xb) def after_batch(self): self.writer.add_scalar('train_loss', self.smooth_loss, self.train_iter) for i,h in enumerate(self.opt.hypers): for k,v in h.items(): self.writer.add_scalar(f'{k}_{i}', v, self.train_iter) def after_epoch(self): for n,v in zip(self.recorder.metric_names[2:-1], self.recorder.log[2:-1]): self.writer.add_scalar(n, v, self.train_iter) if self.log_preds: b = self.dls.valid.one_batch() self.learn.one_batch(0, b) preds = getattr(self.loss_func, 'activation', noop)(self.pred) out = getattr(self.loss_func, 'decodes', noop)(preds) x,y,its,outs = self.dls.valid.show_results(b, out, show=False, max_n=self.n_preds) tensorboard_log(x, y, its, outs, self.writer, self.train_iter) def before_validate(self): if self.projector: self._setup_projector() #export class TensorBoardProjectorCallback(TensorBoardBaseCallback): "Saves Embeddings for Tensorboard Projector" def __init__(self, log_dir=None, layer=None): super().__init__() store_attr() def before_fit(self): self.run = not hasattr(self.learn, 'lr_finder') and hasattr(self, "gather_preds") and rank_distrib()==0 if not self.run: return self._setup_writer() def before_validate(self): self._setup_projector() #export def _write_projector_embedding(learn, writer, feat): lbls = [learn.dl.vocab[l] for l in feat['lbl']] if getattr(learn.dl, 'vocab', None) else None writer.add_embedding(feat['vec'], metadata=lbls, label_img=feat['img'], global_step=learn.train_iter) #export def _add_projector_features(learn, hook, feat): img = normalize_for_projector(learn.x) first_epoch = True if learn.iter == 0 else False feat['vec'] = hook.stored if first_epoch else torch.cat((feat['vec'], hook.stored),0) feat['img'] = img if first_epoch else torch.cat((feat['img'], img),0) if getattr(learn.dl, 'vocab', None): feat['lbl'] = learn.y if first_epoch else torch.cat((feat['lbl'], learn.y),0) return feat #export @typedispatch def normalize_for_projector(x:TensorImage): # normalize tensor to be between 0-1 img = x.clone() sz = img.shape img = img.view(x.size(0), -1) img -= img.min(1, keepdim=True)[0] img /= img.max(1, keepdim=True)[0] img = img.view(*sz) return img #export from fastai.vision.data import * #export @typedispatch def tensorboard_log(x:TensorImage, y: TensorCategory, samples, outs, writer, step): fig,axs = get_grid(len(samples), add_vert=1, return_fig=True) for i in range(2): axs = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs)] axs = [r.show(ctx=c, color='green' if b==r else 'red') for b,r,c in zip(samples.itemgot(1),outs.itemgot(0),axs)] writer.add_figure('Sample results', fig, step) #export from fastai.vision.core import TensorPoint,TensorBBox #export @typedispatch def tensorboard_log(x:TensorImage, y: (TensorImageBase, TensorPoint, TensorBBox), samples, outs, writer, step): fig,axs = get_grid(len(samples), add_vert=1, return_fig=True, double=True) for i in range(2): axs[::2] = [b.show(ctx=c) for b,c in zip(samples.itemgot(i),axs[::2])] for x in [samples,outs]: axs[1::2] = [b.show(ctx=c) for b,c in zip(x.itemgot(0),axs[1::2])] writer.add_figure('Sample results', fig, step) ``` ## Test ``` from fastai.vision.all import Resize, RandomSubsetSplitter, aug_transforms, cnn_learner, resnet18 ``` ## TensorBoardCallback ``` path = untar_data(URLs.PETS) db = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, item_tfms=Resize(128), splitter=RandomSubsetSplitter(train_sz=0.1, valid_sz=0.01), batch_tfms=aug_transforms(size=64), get_y=using_attr(RegexLabeller(r'(.+)_\d+.*$'), 'name')) dls = db.dataloaders(path/'images') learn = cnn_learner(dls, resnet18, metrics=accuracy) learn.unfreeze() learn.fit_one_cycle(3, cbs=TensorBoardCallback(Path.home()/'tmp'/'runs', trace_model=True)) ``` ## Projector ### Projector in TensorBoardCallback ``` path = untar_data(URLs.PETS) db = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, item_tfms=Resize(128), splitter=RandomSubsetSplitter(train_sz=0.05, valid_sz=0.01), batch_tfms=aug_transforms(size=64), get_y=using_attr(RegexLabeller(r'(.+)_\d+.*$'), 'name')) dls = db.dataloaders(path/'images') cbs = [TensorBoardCallback(log_dir=Path.home()/'tmp'/'runs', projector=True)] learn = cnn_learner(dls, resnet18, metrics=accuracy, cbs=cbs) learn.unfreeze() learn.fit_one_cycle(3) ``` ### TensorBoardProjectorCallback ``` path = untar_data(URLs.PETS) db = DataBlock(blocks=(ImageBlock, CategoryBlock), get_items=get_image_files, item_tfms=Resize(128), splitter=RandomSubsetSplitter(train_sz=0.1, valid_sz=0.01), batch_tfms=aug_transforms(size=64), get_y=using_attr(RegexLabeller(r'(.+)_\d+.*$'), 'name')) dls = db.dataloaders(path/'images') files = get_image_files(path/'images') files = files[:256] learn = cnn_learner(dls, resnet18, metrics=accuracy) learn.add_cb(TensorBoardProjectorCallback(log_dir=Path.home()/'tmp'/'runs')) dl = learn.dls.test_dl(files, with_labels=True) _ = learn.get_preds(dl=dl) ``` ### Validate results in tensorboard Run the following command in the command line to check if the projector embeddings have been correctly wirtten: ``` tensorboard --logdir=~/tmp/runs ``` Open http://localhost:6006 in browser (TensorBoard Projector doesn't work correctly in Safari!) ## Export - ``` #hide from nbdev.export import * notebook2script() ```
github_jupyter
#all_slow #export from fastai.basics import * #hide from nbdev.showdoc import * #default_exp callback.tensorboard pip install tensorboard in your terminal. You can change the logdir as long as it matches the `log_dir` you pass to `TensorBoardCallback` (default is `runs` in the working directory). ## Tensorboard Embedding Projector support > Tensorboard Embedding Projector is currently only supported for image classification ### Export Embeddings during Training Tensorboard [Embedding Projector](https://www.tensorflow.org/tensorboard/tensorboard_projector_plugin) is supported in `TensorBoardCallback` (set parameter `projector=True`) during training. The validation set embeddings will be written after each epoch. ### Export Embeddings for a custom dataset To write the embeddings for a custom dataset (e. g. after loading a learner) use `TensorBoardProjectorCallback`. Add the callback manually to the learner. If using a custom model (non fastai-resnet) pass the layer where the embeddings should be extracted as a callback-parameter. ## Test ## TensorBoardCallback ## Projector ### Projector in TensorBoardCallback ### TensorBoardProjectorCallback ### Validate results in tensorboard Run the following command in the command line to check if the projector embeddings have been correctly wirtten: Open http://localhost:6006 in browser (TensorBoard Projector doesn't work correctly in Safari!) ## Export -
0.718496
0.86511
I'll be answering the following questions along the way: 1. Is there any correlation between the variables? 2. What is the genre distribution? 3. What is the user rating distribution? 4. What is the user rating distribution by genre? 5. What is the price distribution by genre over the years? 6. What is the rate distribution by genre over the years? 7. What is the review distribution by year and genre? ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import plotly.express as px # text data import string import re df = pd.read_csv('AmazonBooks.csv') df = pd.read_csv('AmazonBooks.csv') df.info() df.head() # Check for correlations pd.get_dummies(df[['Year','User Rating', 'Price', 'Genre']]).corr() ``` There are no direct correlations visible between any of the variables. This means that the User Rating is not influenced by the genre, price or year of release ``` # getting some visualization sns.catplot(x='Genre',palette="rocket",data=df,kind='count') plt.savefig("Images Amazon/Chart1.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.figure(figsize=(20, 5)) sns.histplot(x=df['User Rating'],color="Purple", kde=True, fill=True) plt.title("User Rating distribution") plt.savefig("Images Amazon/Chart2.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.show() g = sns.displot(x=df['User Rating'], hue=df['Genre'], kind="kde",palette='Set2',fill=True) plt.title("User Rating distribution by Genre") g.fig.set_figwidth(20) g.fig.set_figheight(5) plt.savefig("Images Amazon/Chart3.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.show() plt.figure(figsize=(15, 7)) sns.lineplot(data=df,x='Year',y='Price',hue='Genre',palette='crest') plt.ylim(0) plt.title('Price distribution over the years by genre', fontdict={'fontsize': 16}) plt.savefig("Images Amazon/Chart4.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.figure(figsize=(15, 7)) sns.lineplot(data=df,x='Year',y='User Rating',hue='Genre',palette='mako') plt.title('Rate distribution over the years by genre', fontdict={'fontsize': 16}) plt.savefig("Images Amazon/Chart5.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.figure(figsize=(15, 7)) sns.lineplot(data=df,x='Year',y='Reviews',hue='Genre',palette='flare') plt.title('Review distribution over the years by genre', fontdict={'fontsize': 16}) plt.savefig("Images Amazon/Chart6.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.figure(figsize=(20, 8)) sns.pointplot(x="Year", y="Reviews", hue="Genre",palette='crest', data=df, alpha=.3) plt.title('Reviews distribution by year and genre') plt.savefig("Images Amazon/Chart7.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.show() sns.lineplot(data=df, x="Year", y="User Rating") plt.title('Year vs Rating',fontdict={'fontsize': 14}) plt.savefig("Images Amazon/Chart8.png", bbox_inches="tight",pad_inches=2,transparent=True) sns.lineplot(data=df, x="Year", y="Price") plt.title('Year vs Price',fontdict={'fontsize': 14}) plt.savefig("Images Amazon/Chart9.png", bbox_inches="tight",pad_inches=2,transparent=True) ``` Below are few extra codes ``` df['User Rating'].value_counts() df['Author'].value_counts() # Get the mean User RAting df_mean = df['User Rating'].mean() # Get the std of the User Rating df_std = df['User Rating'].std() print ('The average User Rating was %.2f with a std of %.2f' %(df_mean,df_std)) # Grab the unique object from the Author Column author_names = df.Author.unique() #Show author_names #Fiding the mean for each author author_rating = df.groupby('Author')['User Rating'].mean() author_rating.drop_duplicates() author_rating.sort_values(ascending=False) author_rating.head(10) df.head() df_filter = df.filter(items=['Author', 'Year','User Rating','Genre']) df_filter ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline import plotly.express as px # text data import string import re df = pd.read_csv('AmazonBooks.csv') df = pd.read_csv('AmazonBooks.csv') df.info() df.head() # Check for correlations pd.get_dummies(df[['Year','User Rating', 'Price', 'Genre']]).corr() # getting some visualization sns.catplot(x='Genre',palette="rocket",data=df,kind='count') plt.savefig("Images Amazon/Chart1.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.figure(figsize=(20, 5)) sns.histplot(x=df['User Rating'],color="Purple", kde=True, fill=True) plt.title("User Rating distribution") plt.savefig("Images Amazon/Chart2.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.show() g = sns.displot(x=df['User Rating'], hue=df['Genre'], kind="kde",palette='Set2',fill=True) plt.title("User Rating distribution by Genre") g.fig.set_figwidth(20) g.fig.set_figheight(5) plt.savefig("Images Amazon/Chart3.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.show() plt.figure(figsize=(15, 7)) sns.lineplot(data=df,x='Year',y='Price',hue='Genre',palette='crest') plt.ylim(0) plt.title('Price distribution over the years by genre', fontdict={'fontsize': 16}) plt.savefig("Images Amazon/Chart4.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.figure(figsize=(15, 7)) sns.lineplot(data=df,x='Year',y='User Rating',hue='Genre',palette='mako') plt.title('Rate distribution over the years by genre', fontdict={'fontsize': 16}) plt.savefig("Images Amazon/Chart5.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.figure(figsize=(15, 7)) sns.lineplot(data=df,x='Year',y='Reviews',hue='Genre',palette='flare') plt.title('Review distribution over the years by genre', fontdict={'fontsize': 16}) plt.savefig("Images Amazon/Chart6.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.figure(figsize=(20, 8)) sns.pointplot(x="Year", y="Reviews", hue="Genre",palette='crest', data=df, alpha=.3) plt.title('Reviews distribution by year and genre') plt.savefig("Images Amazon/Chart7.png", bbox_inches="tight",pad_inches=2,transparent=True) plt.show() sns.lineplot(data=df, x="Year", y="User Rating") plt.title('Year vs Rating',fontdict={'fontsize': 14}) plt.savefig("Images Amazon/Chart8.png", bbox_inches="tight",pad_inches=2,transparent=True) sns.lineplot(data=df, x="Year", y="Price") plt.title('Year vs Price',fontdict={'fontsize': 14}) plt.savefig("Images Amazon/Chart9.png", bbox_inches="tight",pad_inches=2,transparent=True) df['User Rating'].value_counts() df['Author'].value_counts() # Get the mean User RAting df_mean = df['User Rating'].mean() # Get the std of the User Rating df_std = df['User Rating'].std() print ('The average User Rating was %.2f with a std of %.2f' %(df_mean,df_std)) # Grab the unique object from the Author Column author_names = df.Author.unique() #Show author_names #Fiding the mean for each author author_rating = df.groupby('Author')['User Rating'].mean() author_rating.drop_duplicates() author_rating.sort_values(ascending=False) author_rating.head(10) df.head() df_filter = df.filter(items=['Author', 'Year','User Rating','Genre']) df_filter
0.393152
0.889864
# Home Credit Default Risk Competition Consider this collection of notebooks as a case-study intended for those who are beginners in Machine Learning. We have tried to expand upon the code with our comments available in some of the notebooks on Kaggle. # Data The data as provided by [Home Credit](http://www.homecredit.net/about-us.aspx), is divided in seven interlinked tables. The data description is as follows. The interlinkages between data sources is depiced in the following diagram. * application_train/application_test: the main training and testing data with information about each loan application at Home Credit. Every loan has its own row and is identified by the feature `SK_ID_CURR`. The training application data comes with the `TARGET` indicating 0: the loan was repaid or 1: the loan was not repaid. * bureau: data concerning client's previous credits from other financial institutions. Each previous credit has its own row in bureau, but one loan in the application data can have multiple previous credits. * bureau_balance: monthly data about the previous credits in bureau. Each row is one month of a previous credit, and a single previous credit can have multiple rows, one for each month of the credit length. * previous_application: previous applications for loans at Home Credit of clients who have loans in the application data. Each current loan in the application data can have multiple previous loans. Each previous application has one row and is identified by the feature `SK_ID_PREV`. * POS_CASH_BALANCE: monthly data about previous point of sale or cash loans clients have had with Home Credit. Each row is one month of a previous point of sale or cash loan, and a single previous loan can have many rows. * credit_card_balance: monthly data about previous credit cards clients have had with Home Credit. Each row is one month of a credit card balance, and a single credit card can have many rows. * installments_payment: payment history for previous loans at Home Credit. There is one row for every made payment and one row for every missed payment. This diagram shows how all of the data is related: ![image](https://storage.googleapis.com/kaggle-media/competitions/home-credit/home_credit.png) The definitions of all the columns is provided in `HomeCredit_columns_description.csv`. __Some references__ * [Credit Education](https://myscore.cibil.com/CreditView/creditEducation.page?enterprise=CIBIL&_ga=2.245893574.372615569.1603669858-164953316.1602941832&_gac=1.254345978.1602941832.CjwKCAjwrKr8BRB_EiwA7eFaplQtBsmINtLxLHOCalWYdx-uO20kyaj0AvRVD8WKNO4cj5mP7MoBTRoC6TEQAvD_BwE) * [Credit Appraisal Methodology and Statndards](https://www.paisadukan.com/credit-assessment-methodology) ``` ############# ```
github_jupyter
#############
0.099733
0.990348
# LSTM Example with Scalecast ``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scalecast.Forecaster import Forecaster sns.set(rc={'figure.figsize':(15,8)}) ``` ## Data preprocessing ``` data = pd.read_csv('AirPassengers.csv',parse_dates=['Month']) data.head() data.shape data['Month'].min() data['Month'].max() ``` ## EDA ``` f = Forecaster(y=data['#Passengers'],current_dates=data['Month']) f.plot() f f.plot_acf(lags=26) plt.show() f.plot_pacf(lags=26) plt.show() f.seasonal_decompose().plot() plt.show() stat, pval, _, _, _, _ = f.adf_test(full_res=True) print(stat) print(pval) ``` ## LSTM Model ``` f.set_test_length(12) f.generate_future_dates(12) f.set_estimator('lstm') ``` ### Attempt 1 ``` f.manual_forecast(call_me='lstm_default') f.plot_test_set(ci=True) ``` ### Attempt 2 ``` f.manual_forecast(call_me='lstm_24lags',lags=24) f.plot_test_set(ci=True) ``` ### Attempt 3 ``` f.manual_forecast(call_me='lstm_24lags_5epochs',lags=24,epochs=5,validation_split=.2,shuffle=True) f.plot_test_set(ci=True) ``` ### Attempt 4 ``` from tensorflow.keras.callbacks import EarlyStopping f.manual_forecast(call_me='lstm_24lags_earlystop_3layers', lags=24, epochs=25, validation_split=.2, shuffle=True, callbacks=EarlyStopping(monitor='val_loss', patience=5), lstm_layer_sizes=(16,16,16), dropout=(0,0,0)) f.plot_test_set(ci=True) ``` ### Attempt 5 ``` f.manual_forecast(call_me='lstm_best', lags=36, batch_size=32, epochs=15, validation_split=.2, shuffle=True, activation='tanh', optimizer='Adam', learning_rate=0.001, lstm_layer_sizes=(72,)*4, dropout=(0,)*4, plot_loss=True) f.plot_test_set(order_by='LevelTestSetMAPE',models='top_2',ci=True) ``` ## MLR Modeling ``` f.set_estimator('mlr') f.add_ar_terms(24) f.add_seasonal_regressors('month','quarter',dummy=True) f.add_seasonal_regressors('year') f.add_time_trend() f.diff() f.manual_forecast() f.plot_test_set(order_by='LevelTestSetMAPE',models='top_2') f.plot_test_set(models='mlr',ci=True) f.plot(order_by='LevelTestSetMAPE',models='top_2') ``` ## Benchmarking ``` f.export('model_summaries',determine_best_by='LevelTestSetMAPE')[ ['ModelNickname','LevelTestSetMAPE','LevelTestSetRMSE','LevelTestSetR2','best_model'] ] ``` ## Export Results ``` f.export_forecasts_with_cis('mlr') f.export_test_set_preds_with_cis('mlr') ``` ### Export Feature Info ``` f.save_feature_importance() f.export_feature_importance('mlr') ```
github_jupyter
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from scalecast.Forecaster import Forecaster sns.set(rc={'figure.figsize':(15,8)}) data = pd.read_csv('AirPassengers.csv',parse_dates=['Month']) data.head() data.shape data['Month'].min() data['Month'].max() f = Forecaster(y=data['#Passengers'],current_dates=data['Month']) f.plot() f f.plot_acf(lags=26) plt.show() f.plot_pacf(lags=26) plt.show() f.seasonal_decompose().plot() plt.show() stat, pval, _, _, _, _ = f.adf_test(full_res=True) print(stat) print(pval) f.set_test_length(12) f.generate_future_dates(12) f.set_estimator('lstm') f.manual_forecast(call_me='lstm_default') f.plot_test_set(ci=True) f.manual_forecast(call_me='lstm_24lags',lags=24) f.plot_test_set(ci=True) f.manual_forecast(call_me='lstm_24lags_5epochs',lags=24,epochs=5,validation_split=.2,shuffle=True) f.plot_test_set(ci=True) from tensorflow.keras.callbacks import EarlyStopping f.manual_forecast(call_me='lstm_24lags_earlystop_3layers', lags=24, epochs=25, validation_split=.2, shuffle=True, callbacks=EarlyStopping(monitor='val_loss', patience=5), lstm_layer_sizes=(16,16,16), dropout=(0,0,0)) f.plot_test_set(ci=True) f.manual_forecast(call_me='lstm_best', lags=36, batch_size=32, epochs=15, validation_split=.2, shuffle=True, activation='tanh', optimizer='Adam', learning_rate=0.001, lstm_layer_sizes=(72,)*4, dropout=(0,)*4, plot_loss=True) f.plot_test_set(order_by='LevelTestSetMAPE',models='top_2',ci=True) f.set_estimator('mlr') f.add_ar_terms(24) f.add_seasonal_regressors('month','quarter',dummy=True) f.add_seasonal_regressors('year') f.add_time_trend() f.diff() f.manual_forecast() f.plot_test_set(order_by='LevelTestSetMAPE',models='top_2') f.plot_test_set(models='mlr',ci=True) f.plot(order_by='LevelTestSetMAPE',models='top_2') f.export('model_summaries',determine_best_by='LevelTestSetMAPE')[ ['ModelNickname','LevelTestSetMAPE','LevelTestSetRMSE','LevelTestSetR2','best_model'] ] f.export_forecasts_with_cis('mlr') f.export_test_set_preds_with_cis('mlr') f.save_feature_importance() f.export_feature_importance('mlr')
0.400632
0.835249
# Time Series with Pandas Project Exercise For this exercise, answer the questions below given the dataset: https://fred.stlouisfed.org/series/UMTMVS This dataset is the Value of Manufacturers' Shipments for All Manufacturing Industries. **Import any necessary libraries.** ``` # CODE HERE import numpy as np import pandas as pd %matplotlib inline ``` **Read in the data UMTMVS.csv file from the Data folder** ``` # CODE HERE df = pd.read_csv('../Data/UMTMVS.csv') ``` **Check the head of the data** ``` # CODE HERE df.head() ``` **Set the DATE column as the index.** ``` # CODE HERE df = df.set_index('DATE') df.head() ``` **Check the data type of the index.** ``` # CODE HERE df.index ``` **Convert the index to be a datetime index. Note, there are many, many correct ways to do this!** ``` # CODE HERE df.index = pd.to_datetime(df.index) df.index ``` **Plot out the data, choose a reasonable figure size** ``` # CODE HERE df.plot(figsize=(14,8)) ``` **What was the percent increase in value from Jan 2009 to Jan 2019?** ``` #CODE HERE 100 * (df.loc['2019-01-01'] - df.loc['2009-01-01']) / df.loc['2009-01-01'] ``` **What was the percent decrease from Jan 2008 to Jan 2009?** ``` #CODE HERE 100 * (df.loc['2009-01-01'] - df.loc['2008-01-01']) / df.loc['2008-01-01'] ``` **What is the month with the least value after 2005?** [HINT](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.idxmin.html) ``` #CODE HERE df.loc['2005-01-01':].idxmin() ``` **What 6 months have the highest value?** ``` # CODE HERE df.sort_values(by='UMTMVS',ascending=False).head(5) ``` **How many millions of dollars in value was lost in 2008? (Another way of posing this question is what was the value difference between Jan 2008 and Jan 2009)** ``` # CODE HERE df.loc['2008-01-01'] - df.loc['2009-01-01'] ``` **Create a bar plot showing the average value in millions of dollars per year** ``` # CODE HERE df.resample('Y').mean().plot.bar(figsize=(15,8)) ``` **What year had the biggest increase in mean value from the previous year's mean value? (Lots of ways to get this answer!)** [HINT for a useful method](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.DataFrame.idxmax.html) ``` # CODE HERE yearly_data = df.resample('Y').mean() yearly_data_shift = yearly_data.shift(1) yearly_data.head() change = yearly_data - yearly_data_shift change['UMTMVS'].idxmax() ``` **Plot out the yearly rolling mean on top of the original data. Recall that this is monthly data and there are 12 months in a year!** ``` # CODE HERE df['Yearly Mean'] = df['UMTMVS'].rolling(window=12).mean() df[['UMTMVS','Yearly Mean']].plot(figsize=(12,5)).autoscale(axis='x',tight=True); ``` **BONUS QUESTION (HARD).** **Some month in 2008 the value peaked for that year. How many months did it take to surpass that 2008 peak? (Since it crashed immediately after this peak) There are many ways to get this answer. NOTE: I get 70 months as my answer, you may get 69 or 68, depending on whether or not you count the start and end months. Refer to the video solutions for full explanation on this.** ``` #CODE HERE df = pd.read_csv('../Data/UMTMVS.csv',index_col='DATE',parse_dates=True) df.head() df2008 = df.loc['2008-01-01':'2009-01-01'] df2008.idxmax() df2008.max() df_post_peak = df.loc['2008-06-01':] df_post_peak[df_post_peak>=510081].dropna() len(df.loc['2008-06-01':'2014-03-01']) ```
github_jupyter
# CODE HERE import numpy as np import pandas as pd %matplotlib inline # CODE HERE df = pd.read_csv('../Data/UMTMVS.csv') # CODE HERE df.head() # CODE HERE df = df.set_index('DATE') df.head() # CODE HERE df.index # CODE HERE df.index = pd.to_datetime(df.index) df.index # CODE HERE df.plot(figsize=(14,8)) #CODE HERE 100 * (df.loc['2019-01-01'] - df.loc['2009-01-01']) / df.loc['2009-01-01'] #CODE HERE 100 * (df.loc['2009-01-01'] - df.loc['2008-01-01']) / df.loc['2008-01-01'] #CODE HERE df.loc['2005-01-01':].idxmin() # CODE HERE df.sort_values(by='UMTMVS',ascending=False).head(5) # CODE HERE df.loc['2008-01-01'] - df.loc['2009-01-01'] # CODE HERE df.resample('Y').mean().plot.bar(figsize=(15,8)) # CODE HERE yearly_data = df.resample('Y').mean() yearly_data_shift = yearly_data.shift(1) yearly_data.head() change = yearly_data - yearly_data_shift change['UMTMVS'].idxmax() # CODE HERE df['Yearly Mean'] = df['UMTMVS'].rolling(window=12).mean() df[['UMTMVS','Yearly Mean']].plot(figsize=(12,5)).autoscale(axis='x',tight=True); #CODE HERE df = pd.read_csv('../Data/UMTMVS.csv',index_col='DATE',parse_dates=True) df.head() df2008 = df.loc['2008-01-01':'2009-01-01'] df2008.idxmax() df2008.max() df_post_peak = df.loc['2008-06-01':] df_post_peak[df_post_peak>=510081].dropna() len(df.loc['2008-06-01':'2014-03-01'])
0.172416
0.979056
<a href="https://colab.research.google.com/github/Victoooooor/SimpleJobs/blob/main/movenet.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` #@title !pip install -q imageio !pip install -q opencv-python !pip install -q git+https://github.com/tensorflow/docs #@title import tensorflow as tf import tensorflow_hub as hub from tensorflow_docs.vis import embed import numpy as np import cv2 import os # Import matplotlib libraries from matplotlib import pyplot as plt from matplotlib.collections import LineCollection import matplotlib.patches as patches import imageio from IPython.display import HTML, display from google.colab import files import sys import time import shutil from google.colab.patches import cv2_imshow import copy from base64 import b64encode #@title KEYPOINT_DICT = { 'nose': 0, 'left_eye': 1, 'right_eye': 2, 'left_ear': 3, 'right_ear': 4, 'left_shoulder': 5, 'right_shoulder': 6, 'left_elbow': 7, 'right_elbow': 8, 'left_wrist': 9, 'right_wrist': 10, 'left_hip': 11, 'right_hip': 12, 'left_knee': 13, 'right_knee': 14, 'left_ankle': 15, 'right_ankle': 16 } # Maps bones to a matplotlib color name. KEYPOINT_EDGE_INDS_TO_COLOR = { (0, 1): 'm', (0, 2): 'c', (1, 3): 'm', (2, 4): 'c', (0, 5): 'm', (0, 6): 'c', (5, 7): 'm', (7, 9): 'm', (6, 8): 'c', (8, 10): 'c', (5, 6): 'y', (5, 11): 'm', (6, 12): 'c', (11, 12): 'y', (11, 13): 'm', (13, 15): 'm', (12, 14): 'c', (14, 16): 'c' } def _keypoints_and_edges_for_display(keypoints_with_scores, height, width, keypoint_threshold=0.11): """Returns high confidence keypoints and edges for visualization. Args: keypoints_with_scores: A numpy array with shape [1, 1, 17, 3] representing the keypoint coordinates and scores returned from the MoveNet model. height: height of the image in pixels. width: width of the image in pixels. keypoint_threshold: minimum confidence score for a keypoint to be visualized. Returns: A (keypoints_xy, edges_xy, edge_colors) containing: * the coordinates of all keypoints of all detected entities; * the coordinates of all skeleton edges of all detected entities; * the colors in which the edges should be plotted. """ keypoints_all = [] keypoint_edges_all = [] edge_colors = [] num_instances, _, _, _ = keypoints_with_scores.shape for idx in range(num_instances): kpts_x = keypoints_with_scores[0, idx, :, 1] kpts_y = keypoints_with_scores[0, idx, :, 0] kpts_scores = keypoints_with_scores[0, idx, :, 2] kpts_absolute_xy = np.stack( [width * np.array(kpts_x), height * np.array(kpts_y)], axis=-1) kpts_above_thresh_absolute = kpts_absolute_xy[ kpts_scores > keypoint_threshold, :] keypoints_all.append(kpts_above_thresh_absolute) for edge_pair, color in KEYPOINT_EDGE_INDS_TO_COLOR.items(): if (kpts_scores[edge_pair[0]] > keypoint_threshold and kpts_scores[edge_pair[1]] > keypoint_threshold): x_start = kpts_absolute_xy[edge_pair[0], 0] y_start = kpts_absolute_xy[edge_pair[0], 1] x_end = kpts_absolute_xy[edge_pair[1], 0] y_end = kpts_absolute_xy[edge_pair[1], 1] line_seg = np.array([[x_start, y_start], [x_end, y_end]]) keypoint_edges_all.append(line_seg) edge_colors.append(color) if keypoints_all: keypoints_xy = np.concatenate(keypoints_all, axis=0) else: keypoints_xy = np.zeros((0, 17, 2)) if keypoint_edges_all: edges_xy = np.stack(keypoint_edges_all, axis=0) else: edges_xy = np.zeros((0, 2, 2)) return keypoints_xy, edges_xy, edge_colors def draw_prediction_on_image( image, keypoints_with_scores, crop_region=None, close_figure=False, output_image_height=None): """Draws the keypoint predictions on image. Args: image: A numpy array with shape [height, width, channel] representing the pixel values of the input image. keypoints_with_scores: A numpy array with shape [1, 1, 17, 3] representing the keypoint coordinates and scores returned from the MoveNet model. crop_region: A dictionary that defines the coordinates of the bounding box of the crop region in normalized coordinates (see the init_crop_region function below for more detail). If provided, this function will also draw the bounding box on the image. output_image_height: An integer indicating the height of the output image. Note that the image aspect ratio will be the same as the input image. Returns: A numpy array with shape [out_height, out_width, channel] representing the image overlaid with keypoint predictions. """ height, width, channel = image.shape aspect_ratio = float(width) / height fig, ax = plt.subplots(figsize=(12 * aspect_ratio, 12)) # To remove the huge white borders fig.tight_layout(pad=0) ax.margins(0) ax.set_yticklabels([]) ax.set_xticklabels([]) plt.axis('off') im = ax.imshow(image) line_segments = LineCollection([], linewidths=(4), linestyle='solid') ax.add_collection(line_segments) # Turn off tick labels scat = ax.scatter([], [], s=60, color='#FF1493', zorder=3) (keypoint_locs, keypoint_edges, edge_colors) = _keypoints_and_edges_for_display( keypoints_with_scores, height, width) line_segments.set_segments(keypoint_edges) line_segments.set_color(edge_colors) if keypoint_edges.shape[0]: line_segments.set_segments(keypoint_edges) line_segments.set_color(edge_colors) if keypoint_locs.shape[0]: scat.set_offsets(keypoint_locs) if crop_region is not None: xmin = max(crop_region['x_min'] * width, 0.0) ymin = max(crop_region['y_min'] * height, 0.0) rec_width = min(crop_region['x_max'], 0.99) * width - xmin rec_height = min(crop_region['y_max'], 0.99) * height - ymin rect = patches.Rectangle( (xmin,ymin),rec_width,rec_height, linewidth=1,edgecolor='b',facecolor='none') ax.add_patch(rect) fig.canvas.draw() image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) image_from_plot = image_from_plot.reshape( fig.canvas.get_width_height()[::-1] + (3,)) plt.close(fig) if output_image_height is not None: output_image_width = int(output_image_height / height * width) image_from_plot = cv2.resize( image_from_plot, dsize=(output_image_width, output_image_height), interpolation=cv2.INTER_CUBIC) return image_from_plot def to_gif(images, fps): """Converts image sequence (4D numpy array) to gif.""" imageio.mimsave('./animation.gif', images, fps=fps) return embed.embed_file('./animation.gif') def progress(value, max=100): return HTML(""" <progress value='{value}' max='{max}', style='width: 100%' > {value} </progress> """.format(value=value, max=max)) def show_video(video_path, video_width = 600): video_file = open(video_path, "r+b").read() video_url = f"data:video/mp4;base64,{b64encode(video_file).decode()}" return HTML(f"""<video width={video_width} controls><source src="{video_url}"></video>""") # Load the input image. def get_pose(image, thresh = 0.2): detection_threshold = thresh image = tf.expand_dims(image, axis=0) image_origin = copy.copy(image) image = tf.cast(tf.image.resize_with_pad( image, 256, 256), dtype=tf.int32) _, image_height, image_width, channel = image_origin.shape # print(image_height, image_width) if channel != 3: sys.exit('Image isn\'t in RGB format.') output = movenet(image) people = output['output_0'].numpy()[:, :, :51].reshape((6, 17, 3)) if image_width > image_height: # print('scaling') dif = people - 0.5 people[:,:,0] = 0.5 + image_width/image_height * dif[:,:,0] elif image_width < image_height: # print('scaling') dif = people - 0.5 people[:,:,1] = 0.5 + image_height/image_width * dif[:,:,1] # Save landmarks if all landmarks were detected ppl = [] for i in range(6): # print(output['output_0'][0, i, -1]) if output['output_0'][0, i, -1] > detection_threshold: ppl.append(people[i]) should_keep_image = len(ppl) > 0 if not should_keep_image: print('No pose was confidentlly detected.') #draw all merged_img = np.squeeze(image_origin.numpy(), axis=0) for pp in ppl: merged_img = draw_prediction_on_image( merged_img, np.array([[pp]]), output_image_height=image_height) return merged_img, ppl def get_vid(filename, fhandle, desti = 'processed.mp4', interval = 5): video_file = desti video = cv2.VideoCapture(filename) if not video.isOpened(): sys.exit('video does not exist') fps = int(video.get(cv2.CAP_PROP_FPS)) frame_num = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') video_writer = cv2.VideoWriter(video_file,fourcc,fps,(frame_width,frame_height)) print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)) frame_counter = 0 while True: ret, frame = video.read() if ret == True: tfframe= tf.convert_to_tensor(frame) new_frame, data = get_pose(tfframe) video_writer.write(new_frame) if frame_counter % interval == 0: data=np.delete(data,2,2) data[:,:,[0,1]] = data[:,:,[1,0]] np.savetxt(fhandle, data.flatten(), fmt='%.18e', newline=',') fhandle.write(b"\n") frame_counter += 1 if ret == False: break video.release() video_writer.release() cv2.destroyAllWindows() return video_file #@title model = hub.load("https://tfhub.dev/google/movenet/multipose/lightning/1") movenet = model.signatures['serving_default'] #params interval = 5 #meaning save to csv every 5 frames uploaded = files.upload() filename = next(iter(uploaded)) #@title text_name = 'pose.csv' try: os.remove(text_name) except: None with open(text_name, "ab") as csv: # numpy.savetxt(csv, a) gen = get_vid(filename, csv, interval = interval) csv.close() audiofile = '_sound.mp3' withsound = 'output.mp4' !ffmpeg -i {filename} -f mp3 -ab 192000 -vn {audiofile} !ffmpeg -i {gen} -i {audiofile} -map 0:0 -map 1:0 -c:v copy -c:a copy {withsound} !zip -r file.zip {text_name} {withsound} files.download('file.zip') try: os.remove(text_name) os.remove(filename) os.remove(audiofile) os.remove(gen) os.remove(withsound) except: None ```
github_jupyter
#@title !pip install -q imageio !pip install -q opencv-python !pip install -q git+https://github.com/tensorflow/docs #@title import tensorflow as tf import tensorflow_hub as hub from tensorflow_docs.vis import embed import numpy as np import cv2 import os # Import matplotlib libraries from matplotlib import pyplot as plt from matplotlib.collections import LineCollection import matplotlib.patches as patches import imageio from IPython.display import HTML, display from google.colab import files import sys import time import shutil from google.colab.patches import cv2_imshow import copy from base64 import b64encode #@title KEYPOINT_DICT = { 'nose': 0, 'left_eye': 1, 'right_eye': 2, 'left_ear': 3, 'right_ear': 4, 'left_shoulder': 5, 'right_shoulder': 6, 'left_elbow': 7, 'right_elbow': 8, 'left_wrist': 9, 'right_wrist': 10, 'left_hip': 11, 'right_hip': 12, 'left_knee': 13, 'right_knee': 14, 'left_ankle': 15, 'right_ankle': 16 } # Maps bones to a matplotlib color name. KEYPOINT_EDGE_INDS_TO_COLOR = { (0, 1): 'm', (0, 2): 'c', (1, 3): 'm', (2, 4): 'c', (0, 5): 'm', (0, 6): 'c', (5, 7): 'm', (7, 9): 'm', (6, 8): 'c', (8, 10): 'c', (5, 6): 'y', (5, 11): 'm', (6, 12): 'c', (11, 12): 'y', (11, 13): 'm', (13, 15): 'm', (12, 14): 'c', (14, 16): 'c' } def _keypoints_and_edges_for_display(keypoints_with_scores, height, width, keypoint_threshold=0.11): """Returns high confidence keypoints and edges for visualization. Args: keypoints_with_scores: A numpy array with shape [1, 1, 17, 3] representing the keypoint coordinates and scores returned from the MoveNet model. height: height of the image in pixels. width: width of the image in pixels. keypoint_threshold: minimum confidence score for a keypoint to be visualized. Returns: A (keypoints_xy, edges_xy, edge_colors) containing: * the coordinates of all keypoints of all detected entities; * the coordinates of all skeleton edges of all detected entities; * the colors in which the edges should be plotted. """ keypoints_all = [] keypoint_edges_all = [] edge_colors = [] num_instances, _, _, _ = keypoints_with_scores.shape for idx in range(num_instances): kpts_x = keypoints_with_scores[0, idx, :, 1] kpts_y = keypoints_with_scores[0, idx, :, 0] kpts_scores = keypoints_with_scores[0, idx, :, 2] kpts_absolute_xy = np.stack( [width * np.array(kpts_x), height * np.array(kpts_y)], axis=-1) kpts_above_thresh_absolute = kpts_absolute_xy[ kpts_scores > keypoint_threshold, :] keypoints_all.append(kpts_above_thresh_absolute) for edge_pair, color in KEYPOINT_EDGE_INDS_TO_COLOR.items(): if (kpts_scores[edge_pair[0]] > keypoint_threshold and kpts_scores[edge_pair[1]] > keypoint_threshold): x_start = kpts_absolute_xy[edge_pair[0], 0] y_start = kpts_absolute_xy[edge_pair[0], 1] x_end = kpts_absolute_xy[edge_pair[1], 0] y_end = kpts_absolute_xy[edge_pair[1], 1] line_seg = np.array([[x_start, y_start], [x_end, y_end]]) keypoint_edges_all.append(line_seg) edge_colors.append(color) if keypoints_all: keypoints_xy = np.concatenate(keypoints_all, axis=0) else: keypoints_xy = np.zeros((0, 17, 2)) if keypoint_edges_all: edges_xy = np.stack(keypoint_edges_all, axis=0) else: edges_xy = np.zeros((0, 2, 2)) return keypoints_xy, edges_xy, edge_colors def draw_prediction_on_image( image, keypoints_with_scores, crop_region=None, close_figure=False, output_image_height=None): """Draws the keypoint predictions on image. Args: image: A numpy array with shape [height, width, channel] representing the pixel values of the input image. keypoints_with_scores: A numpy array with shape [1, 1, 17, 3] representing the keypoint coordinates and scores returned from the MoveNet model. crop_region: A dictionary that defines the coordinates of the bounding box of the crop region in normalized coordinates (see the init_crop_region function below for more detail). If provided, this function will also draw the bounding box on the image. output_image_height: An integer indicating the height of the output image. Note that the image aspect ratio will be the same as the input image. Returns: A numpy array with shape [out_height, out_width, channel] representing the image overlaid with keypoint predictions. """ height, width, channel = image.shape aspect_ratio = float(width) / height fig, ax = plt.subplots(figsize=(12 * aspect_ratio, 12)) # To remove the huge white borders fig.tight_layout(pad=0) ax.margins(0) ax.set_yticklabels([]) ax.set_xticklabels([]) plt.axis('off') im = ax.imshow(image) line_segments = LineCollection([], linewidths=(4), linestyle='solid') ax.add_collection(line_segments) # Turn off tick labels scat = ax.scatter([], [], s=60, color='#FF1493', zorder=3) (keypoint_locs, keypoint_edges, edge_colors) = _keypoints_and_edges_for_display( keypoints_with_scores, height, width) line_segments.set_segments(keypoint_edges) line_segments.set_color(edge_colors) if keypoint_edges.shape[0]: line_segments.set_segments(keypoint_edges) line_segments.set_color(edge_colors) if keypoint_locs.shape[0]: scat.set_offsets(keypoint_locs) if crop_region is not None: xmin = max(crop_region['x_min'] * width, 0.0) ymin = max(crop_region['y_min'] * height, 0.0) rec_width = min(crop_region['x_max'], 0.99) * width - xmin rec_height = min(crop_region['y_max'], 0.99) * height - ymin rect = patches.Rectangle( (xmin,ymin),rec_width,rec_height, linewidth=1,edgecolor='b',facecolor='none') ax.add_patch(rect) fig.canvas.draw() image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) image_from_plot = image_from_plot.reshape( fig.canvas.get_width_height()[::-1] + (3,)) plt.close(fig) if output_image_height is not None: output_image_width = int(output_image_height / height * width) image_from_plot = cv2.resize( image_from_plot, dsize=(output_image_width, output_image_height), interpolation=cv2.INTER_CUBIC) return image_from_plot def to_gif(images, fps): """Converts image sequence (4D numpy array) to gif.""" imageio.mimsave('./animation.gif', images, fps=fps) return embed.embed_file('./animation.gif') def progress(value, max=100): return HTML(""" <progress value='{value}' max='{max}', style='width: 100%' > {value} </progress> """.format(value=value, max=max)) def show_video(video_path, video_width = 600): video_file = open(video_path, "r+b").read() video_url = f"data:video/mp4;base64,{b64encode(video_file).decode()}" return HTML(f"""<video width={video_width} controls><source src="{video_url}"></video>""") # Load the input image. def get_pose(image, thresh = 0.2): detection_threshold = thresh image = tf.expand_dims(image, axis=0) image_origin = copy.copy(image) image = tf.cast(tf.image.resize_with_pad( image, 256, 256), dtype=tf.int32) _, image_height, image_width, channel = image_origin.shape # print(image_height, image_width) if channel != 3: sys.exit('Image isn\'t in RGB format.') output = movenet(image) people = output['output_0'].numpy()[:, :, :51].reshape((6, 17, 3)) if image_width > image_height: # print('scaling') dif = people - 0.5 people[:,:,0] = 0.5 + image_width/image_height * dif[:,:,0] elif image_width < image_height: # print('scaling') dif = people - 0.5 people[:,:,1] = 0.5 + image_height/image_width * dif[:,:,1] # Save landmarks if all landmarks were detected ppl = [] for i in range(6): # print(output['output_0'][0, i, -1]) if output['output_0'][0, i, -1] > detection_threshold: ppl.append(people[i]) should_keep_image = len(ppl) > 0 if not should_keep_image: print('No pose was confidentlly detected.') #draw all merged_img = np.squeeze(image_origin.numpy(), axis=0) for pp in ppl: merged_img = draw_prediction_on_image( merged_img, np.array([[pp]]), output_image_height=image_height) return merged_img, ppl def get_vid(filename, fhandle, desti = 'processed.mp4', interval = 5): video_file = desti video = cv2.VideoCapture(filename) if not video.isOpened(): sys.exit('video does not exist') fps = int(video.get(cv2.CAP_PROP_FPS)) frame_num = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) frame_width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*'mp4v') video_writer = cv2.VideoWriter(video_file,fourcc,fps,(frame_width,frame_height)) print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps)) frame_counter = 0 while True: ret, frame = video.read() if ret == True: tfframe= tf.convert_to_tensor(frame) new_frame, data = get_pose(tfframe) video_writer.write(new_frame) if frame_counter % interval == 0: data=np.delete(data,2,2) data[:,:,[0,1]] = data[:,:,[1,0]] np.savetxt(fhandle, data.flatten(), fmt='%.18e', newline=',') fhandle.write(b"\n") frame_counter += 1 if ret == False: break video.release() video_writer.release() cv2.destroyAllWindows() return video_file #@title model = hub.load("https://tfhub.dev/google/movenet/multipose/lightning/1") movenet = model.signatures['serving_default'] #params interval = 5 #meaning save to csv every 5 frames uploaded = files.upload() filename = next(iter(uploaded)) #@title text_name = 'pose.csv' try: os.remove(text_name) except: None with open(text_name, "ab") as csv: # numpy.savetxt(csv, a) gen = get_vid(filename, csv, interval = interval) csv.close() audiofile = '_sound.mp3' withsound = 'output.mp4' !ffmpeg -i {filename} -f mp3 -ab 192000 -vn {audiofile} !ffmpeg -i {gen} -i {audiofile} -map 0:0 -map 1:0 -c:v copy -c:a copy {withsound} !zip -r file.zip {text_name} {withsound} files.download('file.zip') try: os.remove(text_name) os.remove(filename) os.remove(audiofile) os.remove(gen) os.remove(withsound) except: None
0.760651
0.820001
``` import numpy as np import myUtil as mu import matplotlib.pyplot as plt #Generate 25 laws filters K=np.array([[1,4,6,4,1],[-1,-2,0,2,1],[-1,0,2,0,-1],[-1,2,0,-2,1],[1,-4,6,-4,1]]) N=len(K) laws_filters=np.zeros((N*N,N,N)) for i in range(N): for j in range(N): laws_filters[i*N+j]=np.matmul(K[i][:,np.newaxis],K[j][np.newaxis,:]) texture_images=np.zeros((12,128,128)) path_base="./HW4_Images/" for i in range(12): texture_images[i,:,:]=mu.readRaw(path_base+"texture"+str(i+1)+".raw",128,128) plt.subplot(4,3,i+1) plt.imshow(texture_images[i,:,:],cmap='gray') plt.show() def getSampleEnergy(img,filters): I=img-np.mean(img) [height,width]=img.shape [N,h,w]=filters.shape Energy=np.zeros(25) for n in range(N): G=mu.do_filter(img,filters[n,:,:]) for i in range(height): for j in range(width): Energy[n]+=G[i,j]**2 Energy[n]/=(height*width) return Energy def getSampleEnergy_without_extension(img,filters): I=img-np.mean(img) [height,width]=img.shape [N,h,w]=filters.shape Energy=np.zeros(25) for n in range(N): G=np.zeros((height-h+1,width-w+1)) for i in range(h//2,height-h//2): for j in range(w//2,width-w//2): G[i-h//2,j-w//2]=np.sum(img[i-h//2:i+h//2+1,j-w//2:j+w//2+1]*filters[n,:,:]) Energy[n]=np.sum(G**2)/((height-h+1)*(width-w+1)) return Energy #Get the Energy of every texture Image #This step is very time consuming due to the HW is only allowed to use basic tools Energy=np.zeros((12,25)) for i in range(12): print("computing i =",i) Energy[i]=getSampleEnergy(texture_images[i,:,:],laws_filters) Energy_without=np.zeros((12,25)) for i in range(12): print("computing i =",i) Energy_without[i]=getSampleEnergy_without_extension(texture_images[i,:,:],laws_filters) Energy_25D_s=(Energy_without-np.mean(Energy_without,axis=0))/np.std(Energy_without,axis=0) Energy_25D=(Energy-np.mean(Energy,axis=0))/np.std(Energy,axis=0) #apply pca from sklearn.decomposition import PCA pca=PCA(n_components=3) pca.fit(Energy_25D) Energy_3D=pca.transform(Energy_25D) print("Energy_25D.shape =",Energy_25D.shape) print("Energy_3D.shape =",Energy_3D.shape) #apply kmeans from sklearn.cluster import KMeans kmeans_25D=KMeans(n_clusters=4,random_state=0).fit(Energy_25D) print(kmeans_25D.labels_) kmeans_3D=KMeans(n_clusters=4,random_state=0).fit(Energy_3D) print(kmeans_3D.labels_) from mpl_toolkits.mplot3d import Axes3D print(Energy_3D) fig=plt.figure() ax=Axes3D(fig) ax.scatter(Energy_3D[:,0],Energy_3D[:,1],Energy_3D[:,2]) plt.show() ```
github_jupyter
import numpy as np import myUtil as mu import matplotlib.pyplot as plt #Generate 25 laws filters K=np.array([[1,4,6,4,1],[-1,-2,0,2,1],[-1,0,2,0,-1],[-1,2,0,-2,1],[1,-4,6,-4,1]]) N=len(K) laws_filters=np.zeros((N*N,N,N)) for i in range(N): for j in range(N): laws_filters[i*N+j]=np.matmul(K[i][:,np.newaxis],K[j][np.newaxis,:]) texture_images=np.zeros((12,128,128)) path_base="./HW4_Images/" for i in range(12): texture_images[i,:,:]=mu.readRaw(path_base+"texture"+str(i+1)+".raw",128,128) plt.subplot(4,3,i+1) plt.imshow(texture_images[i,:,:],cmap='gray') plt.show() def getSampleEnergy(img,filters): I=img-np.mean(img) [height,width]=img.shape [N,h,w]=filters.shape Energy=np.zeros(25) for n in range(N): G=mu.do_filter(img,filters[n,:,:]) for i in range(height): for j in range(width): Energy[n]+=G[i,j]**2 Energy[n]/=(height*width) return Energy def getSampleEnergy_without_extension(img,filters): I=img-np.mean(img) [height,width]=img.shape [N,h,w]=filters.shape Energy=np.zeros(25) for n in range(N): G=np.zeros((height-h+1,width-w+1)) for i in range(h//2,height-h//2): for j in range(w//2,width-w//2): G[i-h//2,j-w//2]=np.sum(img[i-h//2:i+h//2+1,j-w//2:j+w//2+1]*filters[n,:,:]) Energy[n]=np.sum(G**2)/((height-h+1)*(width-w+1)) return Energy #Get the Energy of every texture Image #This step is very time consuming due to the HW is only allowed to use basic tools Energy=np.zeros((12,25)) for i in range(12): print("computing i =",i) Energy[i]=getSampleEnergy(texture_images[i,:,:],laws_filters) Energy_without=np.zeros((12,25)) for i in range(12): print("computing i =",i) Energy_without[i]=getSampleEnergy_without_extension(texture_images[i,:,:],laws_filters) Energy_25D_s=(Energy_without-np.mean(Energy_without,axis=0))/np.std(Energy_without,axis=0) Energy_25D=(Energy-np.mean(Energy,axis=0))/np.std(Energy,axis=0) #apply pca from sklearn.decomposition import PCA pca=PCA(n_components=3) pca.fit(Energy_25D) Energy_3D=pca.transform(Energy_25D) print("Energy_25D.shape =",Energy_25D.shape) print("Energy_3D.shape =",Energy_3D.shape) #apply kmeans from sklearn.cluster import KMeans kmeans_25D=KMeans(n_clusters=4,random_state=0).fit(Energy_25D) print(kmeans_25D.labels_) kmeans_3D=KMeans(n_clusters=4,random_state=0).fit(Energy_3D) print(kmeans_3D.labels_) from mpl_toolkits.mplot3d import Axes3D print(Energy_3D) fig=plt.figure() ax=Axes3D(fig) ax.scatter(Energy_3D[:,0],Energy_3D[:,1],Energy_3D[:,2]) plt.show()
0.211498
0.505615
``` # Initialize Otter import otter grader = otter.Notebook("lab07.ipynb") ``` # Lab 7: Crime and Penalty Welcome to Lab 7! ``` # Run this cell to set up the notebook, but please don't change it. # These lines import the Numpy and Datascience modules. import numpy as np from datascience import * # These lines do some fancy plotting magic. import matplotlib %matplotlib inline import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import warnings warnings.simplefilter('ignore', FutureWarning) ``` ## 1. A/B Testing A/B testing is a form of hypothesis testing that allows you to make comparisons between two distributions. You'll almost never be explicitly asked to perform an A/B test. Make sure you can identify situations where the test is appropriate and know how to correctly implement each step. **Question 1.1:** The following statements are the unordered steps of an A/B hypothesis test: 1. Choose a test statistic (typically the difference in means between two categories) 2. Shuffle the labels of the original sample, find your simulated test statistic, and repeat many times 3. Find the value of the observed test statistic 4. Calculate the p-value based off your observed and simulated test statistics 5. Define a null and alternate model 6. Use the p-value and p-value cutoff to draw a conclusion about the null hypothesis Make an array called `ab_test_order` that contains the correct order of an A/B test, where the first item of the array is the first step of an A/B test and the last item of the array is the last step of an A/B test <!-- BEGIN QUESTION name: q1_1 --> ``` ab_test_order = ... grader.check("q1_1") ``` <!-- BEGIN QUESTION --> **Question 1.2:** If the null hypothesis of an A/B test is correct, should the order of labels affect the differences in means between each group? Why do we shuffle labels in an A/B test? <!-- BEGIN QUESTION name: q1_2 manual: true --> _Type your answer here, replacing this text._ <!-- END QUESTION --> ## 2: Murder Rates Punishment for crime has many [philosophical justifications](http://plato.stanford.edu/entries/punishment/#ThePun). An important one is that fear of punishment may *deter* people from committing crimes. In the United States, some jurisdictions execute people who are convicted of particularly serious crimes, such as murder. This punishment is called the *death penalty* or *capital punishment*. The death penalty is controversial, and deterrence has been one focal point of the debate. There are other reasons to support or oppose the death penalty, but in this project we'll focus on deterrence. The key question about deterrence is: > Through our exploration, does instituting a death penalty for murder actually reduce the number of murders? You might have a strong intuition in one direction, but the evidence turns out to be surprisingly complex. Different sides have variously argued that the death penalty has no deterrent effect and that each execution prevents 8 murders, all using statistical arguments! We'll try to come to our own conclusion. #### The data The main data source for this lab comes from a [paper](http://cjlf.org/deathpenalty/DezRubShepDeterFinal.pdf) by three researchers, Dezhbakhsh, Rubin, and Shepherd. The dataset contains rates of various violent crimes for every year 1960-2003 (44 years) in every US state. The researchers compiled the data from the FBI's Uniform Crime Reports. Since crimes are committed by people, not states, we need to account for the number of people in each state when we're looking at state-level data. Murder rates are calculated as follows: $$\text{murder rate for state X in year Y} = \frac{\text{number of murders in state X in year Y}}{\text{population in state X in year Y}}*100000$$ (Murder is rare, so we multiply by 100,000 just to avoid dealing with tiny numbers.) ``` murder_rates = Table.read_table('crime_rates.csv').select('State', 'Year', 'Population', 'Murder Rate') murder_rates.set_format("Population", NumberFormatter) ``` Murder rates vary over time, and different states exhibit different trends. The rates in some states change dramatically from year to year, while others are quite stable. Let's plot a couple, just to see the variety. **Question 2.1.** Draw a line plot with years on the horizontal axis and murder rates on the vertical axis. Include two lines: one for Alaska murder rates and one for Minnesota murder rates. Create this plot using a single call, `ak_mn.plot('Year')`. *Hint*: To create two lines, you will need create the table `ak_mn` with two columns of murder rates, in addition to a column of years. This table will have the following structure: | Year | Murder rate in Alaska | Murder rate in Minnesota | |------|-----------------------|--------------------------| | 1960 | 10.2 | 1.2 | | 1961 | 11.5 | 1 | | 1962 | 4.5 | 0.9 | <center>... (41 rows omitted)</center> <!-- BEGIN QUESTION name: q2_1 --> ``` # The next lines are provided for you. They create a table # containing only the Alaska information and one containing # only the Minnesota information. ak = murder_rates.where('State', 'Alaska').drop('State', 'Population').relabeled(1, 'Murder rate in Alaska') mn = murder_rates.where('State', 'Minnesota').drop('State', 'Population').relabeled(1, 'Murder rate in Minnesota') # Fill in this line to make a table like the one pictured above. ak_mn = ... ak_mn grader.check("q2_1") ``` **Question 2.2:** Using the table `ak_mn`, draw a line plot that compares the murder rate in Alaska and the murder rate in Minnesota over time. <!-- BEGIN QUESTION name: q2_2 --> ``` # Draw your line plot here ... ``` Now what about the murder rates of other states? Say, for example, California and New York? Run the cell below to plot the murder rates of different pairs of states. ``` # Compare the murder rates of any two states by filling in the blanks below from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets def state(state1, state2): state1_table = murder_rates.where('State', state1).drop('State', 'Population').relabeled(1, 'Murder rate in {}'.format(state1)) state2_table = murder_rates.where('State', state2).drop('State', 'Population').relabeled(1, 'Murder rate in {}'.format(state2)) s1_s2 = state1_table.join('Year', state2_table) s1_s2.plot('Year') plt.show() states_array = murder_rates.group('State').column('State') _ = interact(state, state1=widgets.Dropdown(options=list(states_array),value='California'), state2=widgets.Dropdown(options=list(states_array),value='New York') ) ``` ## 3. The Death Penalty Some US states have the death penalty, and others don't, and laws have changed over time. In addition to changes in murder rates, we will also consider whether the death penalty was in force in each state and each year. Using this information, we would like to investigate how the presence of the death penalty affects the murder rate of a state. <!-- BEGIN QUESTION --> **Question 3.1.** We want to know whether the death penalty *causes* a change in the murder rate. Why is it not sufficient to compare murder rates in places and times when the death penalty was in force with places and times when it wasn't? <!-- BEGIN QUESTION name: q3_1 manual: true --> _Type your answer here, replacing this text._ <!-- END QUESTION --> ### A Natural Experiment In order to attempt to investigate the causal relationship between the death penalty and murder rates, we're going to take advantage of a *natural experiment*. A natural experiment happens when something other than experimental design applies a treatment to one group and not to another (control) group, and we have some hope that the treatment and control groups don't have any other systematic differences. Our natural experiment is this: in 1972, a Supreme Court decision called *Furman v. Georgia* banned the death penalty throughout the US. Suddenly, many states went from having the death penalty to not having the death penalty. As a first step, let's see how murder rates changed before and after the court decision. We'll define the test as follows: > **Population:** All the states that had the death penalty before the 1972 abolition. (There is no control group for the states that already lacked the death penalty in 1972, so we must omit them.) This includes all US states **except** Alaska, Hawaii, Maine, Michigan, Wisconsin, and Minnesota. > **Treatment group:** The states in that population, in 1973 (the year after 1972). > **Control group:** The states in that population, in 1971 (the year before 1972). > **Null hypothesis:** Murder rates in 1971 and 1973 come from the same distribution. > **Alternative hypothesis:** Murder rates were higher in 1973 than they were in 1971. Our alternative hypothesis is related to our suspicion that murder rates increase when the death penalty is eliminated. <!-- BEGIN QUESTION --> **Question 3.2:** Should we use an A/B test to test these hypotheses? If yes, what is our "A" group and what is our "B" group? <!-- BEGIN QUESTION name: q3_2 manual: true --> _Type your answer here, replacing this text._ <!-- END QUESTION --> The `death_penalty` table below describes whether each state allowed the death penalty in 1971. ``` non_death_penalty_states = make_array('Alaska', 'Hawaii', 'Maine', 'Michigan', 'Wisconsin', 'Minnesota') def had_death_penalty_in_1971(state): """Returns True if the argument is the name of a state that had the death penalty in 1971.""" # The implementation of this function uses a bit of syntax # we haven't seen before. Just trust that it behaves as its # documentation claims. return state not in non_death_penalty_states states = murder_rates.group('State').select('State') death_penalty = states.with_column('Death Penalty', states.apply(had_death_penalty_in_1971, 0)) death_penalty ``` **Question 3.3:** Use the `death_penalty` and `murder_rates` tables to find murder rates in 1971 for states with the death penalty before the abolition. Create a new table `preban_rates` that contains the same information as `murder_rates`, along with a column `Death Penalty` that contains booleans (`True` or `False`) describing if states had the death penalty in 1971. <!-- BEGIN QUESTION name: q3_3 --> ``` # States that had death penalty in 1971 preban_rates = ... preban_rates grader.check("q3_3") ``` **Question 3.4:** Create a table `postban_rates` that contains the same information as `preban_rates`, but for 1973 instead of 1971. `postban_rates` should only contain the states found in `preban_rates`. <!-- BEGIN QUESTION name: q3_4 --> ``` postban_rates = ... postban_rates = postban_rates.sort("State") postban_rates grader.check("q3_4") ``` **Question 3.5:** Use `preban_rates_copy` and `postban_rates` to create a table `change_in_death_rates` that contains each state's population, murder rate, and whether or not that state had the death penalty for both 1971 and 1973. *Hint:* `tbl_1.append(tbl_2)` with create a new table that includes rows from both `tbl_1` and `tbl_2`. Both tables must have the exactly the same columns, in the same order. <!-- BEGIN QUESTION name: q3_5 --> ``` preban_rates_copy = preban_rates.copy() change_in_death_rates = ... change_in_death_rates ``` Run the cell below to view the distribution of death rates during the pre-ban and post-ban time periods. ``` change_in_death_rates.hist('Murder Rate', group = 'Death Penalty') ``` **Question 3.6:** Create a table `rate_means` that contains the average murder rates for the states that had the death penalty and the states that didn't have the death penalty. It should have two columns: one indicating if the penalty was in place, and one that contains the average murder rate for each group. <!-- BEGIN QUESTION name: q3_6 --> ``` rate_means = ... rate_means grader.check("q3_6") ``` <!-- BEGIN QUESTION --> **Question 3.7:** We want to figure out if there is a difference between the distribution of death rates in 1971 and 1973. Specifically, we want to test if murder rates were higher in 1973 than they were in 1971. What should the test statistic be? How does it help us differentiate whether the data supports the null and alternative? If you are in lab, confirm your answer with a lab TA/LA before moving on. <!-- BEGIN QUESTION name: q3_7 manual: true --> _Type your answer here, replacing this text._ <!-- END QUESTION --> **Question 3.8:** Set `observed_difference` to the observed test statistic using the `rate_means` table <!-- BEGIN QUESTION name: q3_8 --> ``` observed_difference = ... observed_difference grader.check("q3_8") ``` **Question 3.9:** Given a table like `change_in_death_rates`, a value column `label`, and a group column `group_label`, write a function that calculates the appropriate test statistic. <!-- BEGIN QUESTION name: q3_9 --> ``` # SOLUTION def find_test_stat(table, labels_col, values_col): ... find_test_stat(change_in_death_rates, "Death Penalty", "Murder Rate") grader.check("q3_9") ``` When we run a simulation for A/B testing, we resample by shuffling the labels of the original sample. If the null hypothesis is true and the murder rate distributions are the same, we expect that the difference in mean death rates will be not change when "Death Penalty" labels are changed. **Question 3.10:** Write a function `simulate_and_test_statistic` to compute one trial of our A/B test. Your function should run a simulation and return a test statistic. Note: The test here is fairly lenient, if you have an issue with the following questions, make sure to take a look at your answer to 3.9. Specifically, make sure that you are taking the directionality of our alternative hypothesis into account. <!-- BEGIN QUESTION name: q3_10 --> ``` # SOLUTION def simulate_and_test_statistic(table, labels_col, values_col): ... simulate_and_test_statistic(change_in_death_rates, "Death Penalty", "Murder Rate") grader.check("q3_10") ``` **Question 3.11:** Simulate 5000 trials of our A/B test and store the test statistics in an array called `differences` <!-- BEGIN QUESTION name: q3_11 --> ``` # This cell might take a couple seconds to run differences = make_array() ... differences grader.check("q3_11") ``` Run the cell below to view a histogram of your simulated test statistics plotted with your observed test statistic ``` Table().with_column('Difference Between Group Means', differences).hist() plt.scatter(observed_difference, 0, color='red', s=30, zorder=2); ``` **Question 3.12:** Find the p-value for your test and assign it to `empirical_P` <!-- BEGIN QUESTION name: q3_12 --> ``` empirical_P = ... empirical_P grader.check("q3_12") ``` <!-- BEGIN QUESTION --> **Question 3.13:** Using a 5% P-value cutoff, draw a conclusion about the null and alternative hypotheses. Describe your findings using simple, non-technical language. What does your analysis tell you about murder rates after the death penalty was suspended? What can you claim about causation from your statistical analysis? <!-- BEGIN QUESTION name: q3_13 manual: true --> _Type your answer here, replacing this text._ <!-- END QUESTION --> **You're done! Congratulations.** Run the cells below to check your work and submit to okpy. --- To double-check your work, the cell below will rerun all of the autograder tests. ``` grader.check_all() ``` ## Submission Make sure you have run all cells in your notebook in order before running the cell below, so that all images/graphs appear in the output. The cell below will generate a zip file for you to submit. **Please save before exporting!** ``` # Save your notebook first, then run this cell to export your submission. grader.export() ```
github_jupyter
# Initialize Otter import otter grader = otter.Notebook("lab07.ipynb") # Run this cell to set up the notebook, but please don't change it. # These lines import the Numpy and Datascience modules. import numpy as np from datascience import * # These lines do some fancy plotting magic. import matplotlib %matplotlib inline import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import warnings warnings.simplefilter('ignore', FutureWarning) ab_test_order = ... grader.check("q1_1") murder_rates = Table.read_table('crime_rates.csv').select('State', 'Year', 'Population', 'Murder Rate') murder_rates.set_format("Population", NumberFormatter) # The next lines are provided for you. They create a table # containing only the Alaska information and one containing # only the Minnesota information. ak = murder_rates.where('State', 'Alaska').drop('State', 'Population').relabeled(1, 'Murder rate in Alaska') mn = murder_rates.where('State', 'Minnesota').drop('State', 'Population').relabeled(1, 'Murder rate in Minnesota') # Fill in this line to make a table like the one pictured above. ak_mn = ... ak_mn grader.check("q2_1") # Draw your line plot here ... # Compare the murder rates of any two states by filling in the blanks below from ipywidgets import interact, interactive, fixed, interact_manual import ipywidgets as widgets def state(state1, state2): state1_table = murder_rates.where('State', state1).drop('State', 'Population').relabeled(1, 'Murder rate in {}'.format(state1)) state2_table = murder_rates.where('State', state2).drop('State', 'Population').relabeled(1, 'Murder rate in {}'.format(state2)) s1_s2 = state1_table.join('Year', state2_table) s1_s2.plot('Year') plt.show() states_array = murder_rates.group('State').column('State') _ = interact(state, state1=widgets.Dropdown(options=list(states_array),value='California'), state2=widgets.Dropdown(options=list(states_array),value='New York') ) non_death_penalty_states = make_array('Alaska', 'Hawaii', 'Maine', 'Michigan', 'Wisconsin', 'Minnesota') def had_death_penalty_in_1971(state): """Returns True if the argument is the name of a state that had the death penalty in 1971.""" # The implementation of this function uses a bit of syntax # we haven't seen before. Just trust that it behaves as its # documentation claims. return state not in non_death_penalty_states states = murder_rates.group('State').select('State') death_penalty = states.with_column('Death Penalty', states.apply(had_death_penalty_in_1971, 0)) death_penalty # States that had death penalty in 1971 preban_rates = ... preban_rates grader.check("q3_3") postban_rates = ... postban_rates = postban_rates.sort("State") postban_rates grader.check("q3_4") preban_rates_copy = preban_rates.copy() change_in_death_rates = ... change_in_death_rates change_in_death_rates.hist('Murder Rate', group = 'Death Penalty') rate_means = ... rate_means grader.check("q3_6") observed_difference = ... observed_difference grader.check("q3_8") # SOLUTION def find_test_stat(table, labels_col, values_col): ... find_test_stat(change_in_death_rates, "Death Penalty", "Murder Rate") grader.check("q3_9") # SOLUTION def simulate_and_test_statistic(table, labels_col, values_col): ... simulate_and_test_statistic(change_in_death_rates, "Death Penalty", "Murder Rate") grader.check("q3_10") # This cell might take a couple seconds to run differences = make_array() ... differences grader.check("q3_11") Table().with_column('Difference Between Group Means', differences).hist() plt.scatter(observed_difference, 0, color='red', s=30, zorder=2); empirical_P = ... empirical_P grader.check("q3_12") grader.check_all() # Save your notebook first, then run this cell to export your submission. grader.export()
0.534612
0.973695
# Getting started with Captum Insights: a simple model on CIFAR10 dataset Demonstrates how to use Captum Insights embedded in a notebook to debug a CIFAR model and test samples. This is a slight modification of the CIFAR_TorchVision_Interpret notebook. More details about the model can be found here: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py **Note:** Before running this tutorial, please install the torchvision, and IPython packages. ``` import os import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from captum.insights import AttributionVisualizer, Batch from captum.insights.features import ImageFeature ``` Define functions for classification classes and pretrained model. ``` def get_classes(): classes = [ "Plane", "Car", "Bird", "Cat", "Deer", "Dog", "Frog", "Horse", "Ship", "Truck", ] return classes def get_pretrained_model(): class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool1 = nn.MaxPool2d(2, 2) self.pool2 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() self.relu3 = nn.ReLU() self.relu4 = nn.ReLU() def forward(self, x): x = self.pool1(self.relu1(self.conv1(x))) x = self.pool2(self.relu2(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = self.relu3(self.fc1(x)) x = self.relu4(self.fc2(x)) x = self.fc3(x) return x net = Net() net.load_state_dict(torch.load("models/cifar_torchvision.pt")) return net def baseline_func(input): return input * 0 def formatted_data_iter(): dataset = torchvision.datasets.CIFAR10( root="data/test", train=False, download=True, transform=transforms.ToTensor() ) dataloader = iter( torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False, num_workers=2) ) while True: images, labels = next(dataloader) yield Batch(inputs=images, labels=labels) ``` Run the visualizer and render inside notebook for interactive debugging. ``` normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) model = get_pretrained_model() visualizer = AttributionVisualizer( models=[model], score_func=lambda o: torch.nn.functional.softmax(o, 1), classes=get_classes(), features=[ ImageFeature( "Photo", baseline_transforms=[baseline_func], input_transforms=[normalize], ) ], dataset=formatted_data_iter(), ) visualizer.render() # show a screenshot if using notebook non-interactively from IPython.display import Image Image(filename='img/captum_insights.png') ```
github_jupyter
import os import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from captum.insights import AttributionVisualizer, Batch from captum.insights.features import ImageFeature def get_classes(): classes = [ "Plane", "Car", "Bird", "Cat", "Deer", "Dog", "Frog", "Horse", "Ship", "Truck", ] return classes def get_pretrained_model(): class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool1 = nn.MaxPool2d(2, 2) self.pool2 = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) self.relu1 = nn.ReLU() self.relu2 = nn.ReLU() self.relu3 = nn.ReLU() self.relu4 = nn.ReLU() def forward(self, x): x = self.pool1(self.relu1(self.conv1(x))) x = self.pool2(self.relu2(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = self.relu3(self.fc1(x)) x = self.relu4(self.fc2(x)) x = self.fc3(x) return x net = Net() net.load_state_dict(torch.load("models/cifar_torchvision.pt")) return net def baseline_func(input): return input * 0 def formatted_data_iter(): dataset = torchvision.datasets.CIFAR10( root="data/test", train=False, download=True, transform=transforms.ToTensor() ) dataloader = iter( torch.utils.data.DataLoader(dataset, batch_size=4, shuffle=False, num_workers=2) ) while True: images, labels = next(dataloader) yield Batch(inputs=images, labels=labels) normalize = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) model = get_pretrained_model() visualizer = AttributionVisualizer( models=[model], score_func=lambda o: torch.nn.functional.softmax(o, 1), classes=get_classes(), features=[ ImageFeature( "Photo", baseline_transforms=[baseline_func], input_transforms=[normalize], ) ], dataset=formatted_data_iter(), ) visualizer.render() # show a screenshot if using notebook non-interactively from IPython.display import Image Image(filename='img/captum_insights.png')
0.905044
0.978935
``` import pickle import numpy as np import random from tqdm import tqdm import os import os.path from clear_texts import * import tensorflow as tf def loggin(log_str): print(log_str) print(tf.VERSION) #functions for generating traning sequenses, decodinn, encoding, text generation text = ''.join(get_textes()) def get_encode_and_decode_dicts(text): later_counter = Counter() later_counter.update(text) alphabit = set(later_counter.keys()) charter_to_inx = { ch:inx for inx,ch in enumerate(alphabit)} inx_to_charter = { inx:ch for ch,inx in charter_to_inx.items()} return charter_to_inx, inx_to_charter, alphabit charter_to_inx, inx_to_charter, alphabit = get_encode_and_decode_dicts(text) alphabit_size = len(alphabit) def encode_seq(seq): return np.array([ charter_to_inx[ch] for ch in seq ]) def decode_seq(seq): return "".join([ inx_to_charter[inx] for inx in seq ]) def sequenses_generator(text, batch_len, seq_length): while True: X = [] while len(X) < batch_len: shift = np.random.randint(len(text)-seq_length) seq_in = text[shift:seq_length+shift] X.append(encode_seq(seq_in)) yield np.array(X) - from os import listdir from os.path import isfile, join def rnn_placeholders(state): """Convert RNN state tensors to placeholders with the zero state as default.""" if isinstance(state, tf.contrib.rnn.LSTMStateTuple): c, h = state c = tf.placeholder_with_default(c, c.shape, c.op.name) h = tf.placeholder_with_default(h, h.shape, h.op.name) return tf.contrib.rnn.LSTMStateTuple(c, h) elif isinstance(state, tf.Tensor): h = state h = tf.placeholder_with_default(h, h.shape, h.op.name) return h else: structure = [rnn_placeholders(x) for x in state] return tuple(structure) class Model(object): def __init__(self, sess, seq_length, alphabit_size, verbas = True, state_sizes=[128,128]): self.sess = sess self.state_sizes = state_sizes if verbas: loggin('Create NN') #data paceholder self.train_input = tf.placeholder(tf.int32, [None, seq_length]) one_hot_input = tf.one_hot(self.train_input, alphabit_size) if verbas: loggin('rnn_cell input shape %s' % str(one_hot_input.get_shape())) #define weights and rnn cells #add LSTM cells def lstm_cell(state_size): return tf.contrib.rnn.BasicLSTMCell(state_size) cells = [lstm_cell(_) for _ in state_sizes ] self.rnn_cell = tf.contrib.rnn.MultiRNNCell(cells) #add output layer waights if verbas: loggin('rnn_cell output shape %s' % str(state_sizes[-1])) self.output_w = tf.get_variable(shape=(state_sizes[-1], alphabit_size), initializer=tf.contrib.layers.xavier_initializer(), name = "out_w") self.output_b = tf.get_variable(shape=(alphabit_size),initializer=tf.constant_initializer(0.0), name = "out_b") if verbas: loggin('w shape %s, b shape %s' % (str(self.output_w.shape), str(self.output_b.shape)) ) #1)deffine learning graph #forwarg pass with tf.variable_scope("rnn_layer"): lstm_output, lstm_states = tf.nn.dynamic_rnn(self.rnn_cell, one_hot_input, dtype = tf.float32) #we need only last 1/4 start_position = seq_length//4 count = seq_length-start_position-1 trancated_lstm_output = tf.slice(lstm_output, begin = [0,start_position,0], size = [-1,count,-1]) if verbas: loggin('trancated_lstm_output shape %s'% str( trancated_lstm_output.get_shape() ) ) rnn_output = tf.reshape(trancated_lstm_output, [-1,state_sizes[-1]]) if verbas: loggin('rnn_output shape %s'% str( rnn_output.get_shape() ) ) output_layer = tf.nn.xw_plus_b(rnn_output,self.output_w,self.output_b) if verbas: loggin('output_layer shape %s'% str( output_layer.get_shape() ) ) target = tf.reshape(tf.slice(one_hot_input, begin = [0,start_position+1,0], size = [-1,count,-1]),[-1,alphabit_size]) if verbas: loggin('target shape %s'% str( target.get_shape() ) ) self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=target, logits=output_layer)) if verbas: loggin('self.loss shape %s'% str( self.loss.get_shape() ) ) self.learning_rate = tf.placeholder(tf.float32) self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate).minimize(self.loss) #2)deffine acc func pred_classes = tf.argmax(output_layer, axis=1) y_classes = tf.argmax(target ,axis = 1) self.accuracy = tf.reduce_mean(tf.cast(tf.equal(pred_classes, y_classes), tf.float32)) tf.summary.scalar('loss', self.loss) tf.summary.scalar('accuracy', self.accuracy) self.merged_summary = tf.summary.merge_all() #3)predict next charter graph self.input = tf.placeholder(tf.int32) self.input_state = rnn_placeholders(self.rnn_cell.zero_state(1,tf.float32)) rnn_input = tf.reshape(tf.one_hot(self.input, alphabit_size), [1,1,alphabit_size]) with tf.variable_scope("rnn_layer"): tf.get_variable_scope().reuse_variables() predict_rnn_output, self.output_state = tf.nn.dynamic_rnn(self.rnn_cell, rnn_input, dtype = tf.float32, initial_state = self.input_state ) predict_rnn_output = tf.reshape(predict_rnn_output, [-1,state_sizes[-1]]) predict_output_layer = tf.nn.xw_plus_b(predict_rnn_output,self.output_w,self.output_b) self.predict_output = tf.nn.softmax(predict_output_layer) def train_on_batch(self, batch, learning_rate = 1e-3): feed = {self.train_input: batch,self.learning_rate:learning_rate} return self.sess.run([ self.loss, self.optimizer ], feed)[0] def get_loss(self, batch): feed = {self.train_input: batch} return self.sess.run(self.loss, feed) def get_accuracy(self, batch): feed = {self.train_input: batch} return self.sess.run(self.accuracy, feed) def get_summary(self, batch): feed = {self.train_input: batch} return self.sess.run(self.merged_summary, feed) def get_accuracy_for_seq(self,seq): encoded = encode_seq(seq) predicted = [] self.reset_state() for _ in encoded: predicted.append( self.step(_) ) eq_count = sum( [ int(_[1] == _[0].argmax()) for _ in zip(predicted[:-1], list(encoded)[1:]) ] ) return eq_count/len(predicted) def step(self ,ch_inx): feed = {self.input: ch_inx, self.input_state: self.curent_state} out, self.curent_state = self.sess.run([self.predict_output, self.output_state], feed) return out def reset_state(self): self.curent_state = self.sess.run(self.rnn_cell.zero_state(1,tf.float32)) def __deffault_fname__(self): fname = "statesize-%s-cellcount-%s-.ckpt" % ( str(self.state_sizes[-1]),str(len(self.state_sizes)) ) return fname def save(self, fname = None, loss = None): savepath = "model_waights" if not fname: fname = self.__deffault_fname__() if loss: fname = "loss-%s-"%(str(loss))+fname fname = join(savepath, fname) saver = tf.train.Saver() save_path = saver.save(self.sess, fname) print("Model saved in file: %s" % save_path) def load(self, fname = None): savepath = "model_waights" if not fname: fname = self.__deffault_fname__() fname = join(savepath, fname) saver = tf.train.Saver() saver.restore(self.sess, fname) def load_best(self): savepath = "model_waights" files = [f for f in listdir(savepath) if isfile(join(savepath, f))] all_checkpoint_parameters = [] for fname in files: checkpoint_parameters = {"fname":fname} parts_of_name = fname.split('-') while len(parts_of_name)>1: key = parts_of_name[0] value = parts_of_name[1] checkpoint_parameters[key] = value parts_of_name = parts_of_name[2:] all_checkpoint_parameters.append(checkpoint_parameters) all_checkpoint_parameters = [par for par in all_checkpoint_parameters if "statesize" in par and "cellcount" in par and "loss" in par ] all_checkpoint_parameters = [par for par in all_checkpoint_parameters if int(par["statesize"])==self.state_sizes[-1] and int(par["cellcount"])==len(self.state_sizes) ] if not len(all_checkpoint_parameters): loggin('No checkpoints for this model') return all_checkpoint_parameters = sorted(all_checkpoint_parameters, key=lambda x: float(x['loss'])) fname = all_checkpoint_parameters[0]['fname'].split('.ckpt')[0]+'.ckpt' loggin('loaded from %s' % fname) self.load(fname) def train(model, eps_count = 10, batch_len = 256, batchs_in_ep = 256, learning_rate = 1e-3): print('gen:+',decode_seq(generate(model, 100)),'+') texts = get_textes() random.shuffle(texts) train_text = ''.join(texts[50:]) test_text = ''.join(texts[:50]) seed = np.random.randint(100000) np.random.seed(123) x_for_loss_check = sequenses_generator(test_text, 512, seq_length).__next__() np.random.seed(seed) data_gen = sequenses_generator(train_text, batch_len, seq_length) min_loss = model.get_loss(x_for_loss_check) for ep in range(eps_count): for batches_processed in tqdm(range(batchs_in_ep)): train_x = data_gen.__next__() train_loss = model.train_on_batch(train_x, learning_rate = learning_rate) loss = model.get_loss(x_for_loss_check) acc = model.get_accuracy_for_seq(test_text) if min_loss > loss: min_loss = loss model.save(loss = loss) print('ep %s acc %s, last loss %s, train_loss: %s' % ( ep,str(acc), str(loss), str(train_loss) ) ) print('gen:+',decode_seq(generate(model, 100)),'+') seq_length = 200 eps_count = 20 batch_len = 256 batchs_in_ep = 64 state_sizes = [512,512,512] learning_rate = 1e-3 new_model = True with tf.Graph().as_default(): with tf.Session() as sess: model = Model(sess, seq_length = seq_length, verbas = True, alphabit_size = alphabit_size, state_sizes = state_sizes) sess.run(tf.global_variables_initializer()) if not new_model: model.load_best() train(model, eps_count = eps_count, batch_len = batch_len, batchs_in_ep = batchs_in_ep, learning_rate = learning_rate) seq_length = 100 state_sizes = [512,512,512] with tf.Graph().as_default(): with tf.Session() as sess: model = Model(sess, seq_length = seq_length, verbas = True, alphabit_size = alphabit_size, state_sizes = state_sizes) sess.run(tf.global_variables_initializer()) model.load_best() print('generate random poetry:') print('gen:+',decode_seq(generate(model, 10000, sampling = False, pattern = "о любви.")),'+') ```
github_jupyter
import pickle import numpy as np import random from tqdm import tqdm import os import os.path from clear_texts import * import tensorflow as tf def loggin(log_str): print(log_str) print(tf.VERSION) #functions for generating traning sequenses, decodinn, encoding, text generation text = ''.join(get_textes()) def get_encode_and_decode_dicts(text): later_counter = Counter() later_counter.update(text) alphabit = set(later_counter.keys()) charter_to_inx = { ch:inx for inx,ch in enumerate(alphabit)} inx_to_charter = { inx:ch for ch,inx in charter_to_inx.items()} return charter_to_inx, inx_to_charter, alphabit charter_to_inx, inx_to_charter, alphabit = get_encode_and_decode_dicts(text) alphabit_size = len(alphabit) def encode_seq(seq): return np.array([ charter_to_inx[ch] for ch in seq ]) def decode_seq(seq): return "".join([ inx_to_charter[inx] for inx in seq ]) def sequenses_generator(text, batch_len, seq_length): while True: X = [] while len(X) < batch_len: shift = np.random.randint(len(text)-seq_length) seq_in = text[shift:seq_length+shift] X.append(encode_seq(seq_in)) yield np.array(X) - from os import listdir from os.path import isfile, join def rnn_placeholders(state): """Convert RNN state tensors to placeholders with the zero state as default.""" if isinstance(state, tf.contrib.rnn.LSTMStateTuple): c, h = state c = tf.placeholder_with_default(c, c.shape, c.op.name) h = tf.placeholder_with_default(h, h.shape, h.op.name) return tf.contrib.rnn.LSTMStateTuple(c, h) elif isinstance(state, tf.Tensor): h = state h = tf.placeholder_with_default(h, h.shape, h.op.name) return h else: structure = [rnn_placeholders(x) for x in state] return tuple(structure) class Model(object): def __init__(self, sess, seq_length, alphabit_size, verbas = True, state_sizes=[128,128]): self.sess = sess self.state_sizes = state_sizes if verbas: loggin('Create NN') #data paceholder self.train_input = tf.placeholder(tf.int32, [None, seq_length]) one_hot_input = tf.one_hot(self.train_input, alphabit_size) if verbas: loggin('rnn_cell input shape %s' % str(one_hot_input.get_shape())) #define weights and rnn cells #add LSTM cells def lstm_cell(state_size): return tf.contrib.rnn.BasicLSTMCell(state_size) cells = [lstm_cell(_) for _ in state_sizes ] self.rnn_cell = tf.contrib.rnn.MultiRNNCell(cells) #add output layer waights if verbas: loggin('rnn_cell output shape %s' % str(state_sizes[-1])) self.output_w = tf.get_variable(shape=(state_sizes[-1], alphabit_size), initializer=tf.contrib.layers.xavier_initializer(), name = "out_w") self.output_b = tf.get_variable(shape=(alphabit_size),initializer=tf.constant_initializer(0.0), name = "out_b") if verbas: loggin('w shape %s, b shape %s' % (str(self.output_w.shape), str(self.output_b.shape)) ) #1)deffine learning graph #forwarg pass with tf.variable_scope("rnn_layer"): lstm_output, lstm_states = tf.nn.dynamic_rnn(self.rnn_cell, one_hot_input, dtype = tf.float32) #we need only last 1/4 start_position = seq_length//4 count = seq_length-start_position-1 trancated_lstm_output = tf.slice(lstm_output, begin = [0,start_position,0], size = [-1,count,-1]) if verbas: loggin('trancated_lstm_output shape %s'% str( trancated_lstm_output.get_shape() ) ) rnn_output = tf.reshape(trancated_lstm_output, [-1,state_sizes[-1]]) if verbas: loggin('rnn_output shape %s'% str( rnn_output.get_shape() ) ) output_layer = tf.nn.xw_plus_b(rnn_output,self.output_w,self.output_b) if verbas: loggin('output_layer shape %s'% str( output_layer.get_shape() ) ) target = tf.reshape(tf.slice(one_hot_input, begin = [0,start_position+1,0], size = [-1,count,-1]),[-1,alphabit_size]) if verbas: loggin('target shape %s'% str( target.get_shape() ) ) self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=target, logits=output_layer)) if verbas: loggin('self.loss shape %s'% str( self.loss.get_shape() ) ) self.learning_rate = tf.placeholder(tf.float32) self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate).minimize(self.loss) #2)deffine acc func pred_classes = tf.argmax(output_layer, axis=1) y_classes = tf.argmax(target ,axis = 1) self.accuracy = tf.reduce_mean(tf.cast(tf.equal(pred_classes, y_classes), tf.float32)) tf.summary.scalar('loss', self.loss) tf.summary.scalar('accuracy', self.accuracy) self.merged_summary = tf.summary.merge_all() #3)predict next charter graph self.input = tf.placeholder(tf.int32) self.input_state = rnn_placeholders(self.rnn_cell.zero_state(1,tf.float32)) rnn_input = tf.reshape(tf.one_hot(self.input, alphabit_size), [1,1,alphabit_size]) with tf.variable_scope("rnn_layer"): tf.get_variable_scope().reuse_variables() predict_rnn_output, self.output_state = tf.nn.dynamic_rnn(self.rnn_cell, rnn_input, dtype = tf.float32, initial_state = self.input_state ) predict_rnn_output = tf.reshape(predict_rnn_output, [-1,state_sizes[-1]]) predict_output_layer = tf.nn.xw_plus_b(predict_rnn_output,self.output_w,self.output_b) self.predict_output = tf.nn.softmax(predict_output_layer) def train_on_batch(self, batch, learning_rate = 1e-3): feed = {self.train_input: batch,self.learning_rate:learning_rate} return self.sess.run([ self.loss, self.optimizer ], feed)[0] def get_loss(self, batch): feed = {self.train_input: batch} return self.sess.run(self.loss, feed) def get_accuracy(self, batch): feed = {self.train_input: batch} return self.sess.run(self.accuracy, feed) def get_summary(self, batch): feed = {self.train_input: batch} return self.sess.run(self.merged_summary, feed) def get_accuracy_for_seq(self,seq): encoded = encode_seq(seq) predicted = [] self.reset_state() for _ in encoded: predicted.append( self.step(_) ) eq_count = sum( [ int(_[1] == _[0].argmax()) for _ in zip(predicted[:-1], list(encoded)[1:]) ] ) return eq_count/len(predicted) def step(self ,ch_inx): feed = {self.input: ch_inx, self.input_state: self.curent_state} out, self.curent_state = self.sess.run([self.predict_output, self.output_state], feed) return out def reset_state(self): self.curent_state = self.sess.run(self.rnn_cell.zero_state(1,tf.float32)) def __deffault_fname__(self): fname = "statesize-%s-cellcount-%s-.ckpt" % ( str(self.state_sizes[-1]),str(len(self.state_sizes)) ) return fname def save(self, fname = None, loss = None): savepath = "model_waights" if not fname: fname = self.__deffault_fname__() if loss: fname = "loss-%s-"%(str(loss))+fname fname = join(savepath, fname) saver = tf.train.Saver() save_path = saver.save(self.sess, fname) print("Model saved in file: %s" % save_path) def load(self, fname = None): savepath = "model_waights" if not fname: fname = self.__deffault_fname__() fname = join(savepath, fname) saver = tf.train.Saver() saver.restore(self.sess, fname) def load_best(self): savepath = "model_waights" files = [f for f in listdir(savepath) if isfile(join(savepath, f))] all_checkpoint_parameters = [] for fname in files: checkpoint_parameters = {"fname":fname} parts_of_name = fname.split('-') while len(parts_of_name)>1: key = parts_of_name[0] value = parts_of_name[1] checkpoint_parameters[key] = value parts_of_name = parts_of_name[2:] all_checkpoint_parameters.append(checkpoint_parameters) all_checkpoint_parameters = [par for par in all_checkpoint_parameters if "statesize" in par and "cellcount" in par and "loss" in par ] all_checkpoint_parameters = [par for par in all_checkpoint_parameters if int(par["statesize"])==self.state_sizes[-1] and int(par["cellcount"])==len(self.state_sizes) ] if not len(all_checkpoint_parameters): loggin('No checkpoints for this model') return all_checkpoint_parameters = sorted(all_checkpoint_parameters, key=lambda x: float(x['loss'])) fname = all_checkpoint_parameters[0]['fname'].split('.ckpt')[0]+'.ckpt' loggin('loaded from %s' % fname) self.load(fname) def train(model, eps_count = 10, batch_len = 256, batchs_in_ep = 256, learning_rate = 1e-3): print('gen:+',decode_seq(generate(model, 100)),'+') texts = get_textes() random.shuffle(texts) train_text = ''.join(texts[50:]) test_text = ''.join(texts[:50]) seed = np.random.randint(100000) np.random.seed(123) x_for_loss_check = sequenses_generator(test_text, 512, seq_length).__next__() np.random.seed(seed) data_gen = sequenses_generator(train_text, batch_len, seq_length) min_loss = model.get_loss(x_for_loss_check) for ep in range(eps_count): for batches_processed in tqdm(range(batchs_in_ep)): train_x = data_gen.__next__() train_loss = model.train_on_batch(train_x, learning_rate = learning_rate) loss = model.get_loss(x_for_loss_check) acc = model.get_accuracy_for_seq(test_text) if min_loss > loss: min_loss = loss model.save(loss = loss) print('ep %s acc %s, last loss %s, train_loss: %s' % ( ep,str(acc), str(loss), str(train_loss) ) ) print('gen:+',decode_seq(generate(model, 100)),'+') seq_length = 200 eps_count = 20 batch_len = 256 batchs_in_ep = 64 state_sizes = [512,512,512] learning_rate = 1e-3 new_model = True with tf.Graph().as_default(): with tf.Session() as sess: model = Model(sess, seq_length = seq_length, verbas = True, alphabit_size = alphabit_size, state_sizes = state_sizes) sess.run(tf.global_variables_initializer()) if not new_model: model.load_best() train(model, eps_count = eps_count, batch_len = batch_len, batchs_in_ep = batchs_in_ep, learning_rate = learning_rate) seq_length = 100 state_sizes = [512,512,512] with tf.Graph().as_default(): with tf.Session() as sess: model = Model(sess, seq_length = seq_length, verbas = True, alphabit_size = alphabit_size, state_sizes = state_sizes) sess.run(tf.global_variables_initializer()) model.load_best() print('generate random poetry:') print('gen:+',decode_seq(generate(model, 10000, sampling = False, pattern = "о любви.")),'+')
0.583559
0.195517
# Measurement of an Acoustic Impulse Response *This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the masters module Selected Topics in Audio Signal Processing, Communications Engineering, Universität Rostock. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:Sascha.Spors@uni-rostock.de).* ## Introduction The propagation of sound from one position (e.g. transmitter) to another (e.g. receiver) conforms reasonable well to the properties of a linear time-invariant (LTI) system. Consequently, the impulse response $h[k]$ characterizes the propagation of sound between theses two positions. Impulse responses have various applications in acoustics. For instance as [head-related impulse responses](https://en.wikipedia.org/wiki/Head-related_transfer_function) (HRIRs) or room impulse responses (RIRs) for the characterization of room acoustics. The following example demonstrates how an acoustic transfer function can be estimated by spectral divison using the soundcard of a computer. The module [`sounddevice`](http://python-sounddevice.readthedocs.org/) provides access to the soundcard via [`portaudio`](http://www.portaudio.com/). ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import scipy.signal as sig import sounddevice as sd ``` ## Generation of the Measurement Signal We generate a linear sweep as measurement signal ``` fs = 44100 # sampling rate T = 5 # length of the measurement signal in sec Tr = 2 # length of the expected system response in sec t = np.linspace(0, T, T*fs) x = sig.chirp(t, 20, T, 20000, 'linear', phi=90) x = 0.9 * x / np.max(np.abs(x)) ``` ## Playback of Measurement Signal and Recording of Room Response The measurement signal $x[k]$ is played through the output of the soundcard and the response $y[k]$ is captured synchronously by the input of the soundcard. The length of the played/captured signal has to be of equal length when using the soundcard. The measurement signal $x[k]$ is zero-padded so that the captured signal $y[k]$ includes the complete system response. Please be careful with the volume of your speakers. Start with a very low volume and increase until a reasonable microphone level is reached. This holds especially for the low and high frequencies which can damage your speakers at high levels. After both the loudspeaker and microphone levels are set, repeat the recording with fixed settings. ``` x = np.concatenate((x, np.zeros(Tr*fs))) y = sd.playrec(x, fs, channels=1) sd.wait() y = np.squeeze(y) print('Playback level: ', 20*np.log10(max(x)), ' dB') print('Input level: ', 20*np.log10(max(y)), ' dB') ``` ## Computation of the Acoustic Impulse Response The impulse response is computed by spectral division $h[k] = \text{IDFT}_N \left\{ \frac{ \text{DFT}_N \{ y[k] \}}{\text{DFT}_N \{ x[k] \}} \right\} $ ``` H = np.fft.rfft(y) / np.fft.rfft(x) h = np.fft.irfft(H) h = h[0:Tr*fs] ``` and plotted for illustration ``` plt.figure(figsize=(10, 5)) t = 1/fs * np.arange(len(h)) plt.plot(t, h) plt.axis([0.0, 1.0, -1.1*np.max(np.abs(h)), 1.1*np.max(np.abs(h))]) plt.xlabel(r'$t$ in s') plt.ylabel(r'$\hat{h}[k]$'); ``` **Copyright** This notebook is provided as [Open Educational Resources](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text/images/data are licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Selected Topics in Audio Signal Processing - Supplementary Material, 2017*.
github_jupyter
%matplotlib inline import numpy as np import matplotlib.pyplot as plt import scipy.signal as sig import sounddevice as sd fs = 44100 # sampling rate T = 5 # length of the measurement signal in sec Tr = 2 # length of the expected system response in sec t = np.linspace(0, T, T*fs) x = sig.chirp(t, 20, T, 20000, 'linear', phi=90) x = 0.9 * x / np.max(np.abs(x)) x = np.concatenate((x, np.zeros(Tr*fs))) y = sd.playrec(x, fs, channels=1) sd.wait() y = np.squeeze(y) print('Playback level: ', 20*np.log10(max(x)), ' dB') print('Input level: ', 20*np.log10(max(y)), ' dB') H = np.fft.rfft(y) / np.fft.rfft(x) h = np.fft.irfft(H) h = h[0:Tr*fs] plt.figure(figsize=(10, 5)) t = 1/fs * np.arange(len(h)) plt.plot(t, h) plt.axis([0.0, 1.0, -1.1*np.max(np.abs(h)), 1.1*np.max(np.abs(h))]) plt.xlabel(r'$t$ in s') plt.ylabel(r'$\hat{h}[k]$');
0.568176
0.988646
# Loading Image Data So far we've been working with fairly artificial datasets that you wouldn't typically be using in real projects. Instead, you'll likely be dealing with full-sized images like you'd get from smart phone cameras. In this notebook, we'll look at how to load images and use them to train neural networks. We'll be using a [dataset of cat and dog photos](https://www.kaggle.com/c/dogs-vs-cats) available from Kaggle. Here are a couple example images: <img src='assets/dog_cat.png'> We'll use this dataset to train a neural network that can differentiate between cats and dogs. These days it doesn't seem like a big accomplishment, but five years ago it was a serious challenge for computer vision systems. ``` %matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import torch from torchvision import datasets, transforms import helper ``` The easiest way to load image data is with `datasets.ImageFolder` from `torchvision` ([documentation](http://pytorch.org/docs/master/torchvision/datasets.html#imagefolder)). In general you'll use `ImageFolder` like so: ```python dataset = datasets.ImageFolder('path/to/data', transform=transform) ``` where `'path/to/data'` is the file path to the data directory and `transform` is a list of processing steps built with the [`transforms`](http://pytorch.org/docs/master/torchvision/transforms.html) module from `torchvision`. ImageFolder expects the files and directories to be constructed like so: ``` root/dog/xxx.png root/dog/xxy.png root/dog/xxz.png root/cat/123.png root/cat/nsdf3.png root/cat/asd932_.png ``` where each class has it's own directory (`cat` and `dog`) for the images. The images are then labeled with the class taken from the directory name. So here, the image `123.png` would be loaded with the class label `cat`. You can download the dataset already structured like this [from here](https://s3.amazonaws.com/content.udacity-data.com/nd089/Cat_Dog_data.zip). I've also split it into a training set and test set. ### Transforms When you load in the data with `ImageFolder`, you'll need to define some transforms. For example, the images are different sizes but we'll need them to all be the same size for training. You can either resize them with `transforms.Resize()` or crop with `transforms.CenterCrop()`, `transforms.RandomResizedCrop()`, etc. We'll also need to convert the images to PyTorch tensors with `transforms.ToTensor()`. Typically you'll combine these transforms into a pipeline with `transforms.Compose()`, which accepts a list of transforms and runs them in sequence. It looks something like this to scale, then crop, then convert to a tensor: ```python transform = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor()]) ``` There are plenty of transforms available, I'll cover more in a bit and you can read through the [documentation](http://pytorch.org/docs/master/torchvision/transforms.html). ### Data Loaders With the `ImageFolder` loaded, you have to pass it to a [`DataLoader`](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader). The `DataLoader` takes a dataset (such as you would get from `ImageFolder`) and returns batches of images and the corresponding labels. You can set various parameters like the batch size and if the data is shuffled after each epoch. ```python dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True) ``` Here `dataloader` is a [generator](https://jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/). To get data out of it, you need to loop through it or convert it to an iterator and call `next()`. ```python # Looping through it, get a batch on each loop for images, labels in dataloader: pass # Get one batch images, labels = next(iter(dataloader)) ``` >**Exercise:** Load images from the `Cat_Dog_data/train` folder, define a few transforms, then build the dataloader. ``` data_dir = '/Users/mohamedabdelbary/Documents/Dev/deep-learning-v2-pytorch/dogs-vs-cats/' transform = transforms.Compose([ transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor() ]) dataset = datasets.ImageFolder(data_dir, transform=transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True) # Run this to test your data loader images, labels = next(iter(dataloader)) helper.imshow(images[0], normalize=False) ``` If you loaded the data correctly, you should see something like this (your image will be different): <img src='assets/cat_cropped.png' width=244> ## Data Augmentation A common strategy for training neural networks is to introduce randomness in the input data itself. For example, you can randomly rotate, mirror, scale, and/or crop your images during training. This will help your network generalize as it's seeing the same images but in different locations, with different sizes, in different orientations, etc. To randomly rotate, scale and crop, then flip your images you would define your transforms like this: ```python train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) ``` You'll also typically want to normalize images with `transforms.Normalize`. You pass in a list of means and list of standard deviations, then the color channels are normalized like so ```input[channel] = (input[channel] - mean[channel]) / std[channel]``` Subtracting `mean` centers the data around zero and dividing by `std` squishes the values to be between -1 and 1. Normalizing helps keep the network work weights near zero which in turn makes backpropagation more stable. Without normalization, networks will tend to fail to learn. You can find a list of all [the available transforms here](http://pytorch.org/docs/0.3.0/torchvision/transforms.html). When you're testing however, you'll want to use images that aren't altered (except you'll need to normalize the same way). So, for validation/test images, you'll typically just resize and crop. >**Exercise:** Define transforms for training data and testing data below. Leave off normalization for now. ``` data_dir = '/Users/mohamedabdelbary/Documents/Dev/deep-learning-v2-pytorch/dogs-vs-cats' # TODO: Define transforms for the training data and testing data train_transforms = transforms.Compose([ transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) test_transforms = transforms.Compose([ transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor() ]) # Pass transforms in here, then run the next cell to see how the transforms look train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms) test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms) trainloader = torch.utils.data.DataLoader(train_data, batch_size=32) testloader = torch.utils.data.DataLoader(test_data, batch_size=32) # change this to the trainloader or testloader data_iter = iter(testloader) images, labels = next(data_iter) fig, axes = plt.subplots(figsize=(10,4), ncols=4) for ii in range(4): ax = axes[ii] helper.imshow(images[ii], ax=ax, normalize=False) ``` Your transformed images should look something like this. <center>Training examples:</center> <img src='assets/train_examples.png' width=500px> <center>Testing examples:</center> <img src='assets/test_examples.png' width=500px> At this point you should be able to load data for training and testing. Now, you should try building a network that can classify cats vs dogs. This is quite a bit more complicated than before with the MNIST and Fashion-MNIST datasets. To be honest, you probably won't get it to work with a fully-connected network, no matter how deep. These images have three color channels and at a higher resolution (so far you've seen 28x28 images which are tiny). In the next part, I'll show you how to use a pre-trained network to build a model that can actually solve this problem. ``` # Optional TODO: Attempt to build a network to classify cats vs dogs from this dataset ```
github_jupyter
%matplotlib inline %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import torch from torchvision import datasets, transforms import helper dataset = datasets.ImageFolder('path/to/data', transform=transform) root/dog/xxx.png root/dog/xxy.png root/dog/xxz.png root/cat/123.png root/cat/nsdf3.png root/cat/asd932_.png transform = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor()]) dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True) # Looping through it, get a batch on each loop for images, labels in dataloader: pass # Get one batch images, labels = next(iter(dataloader)) data_dir = '/Users/mohamedabdelbary/Documents/Dev/deep-learning-v2-pytorch/dogs-vs-cats/' transform = transforms.Compose([ transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor() ]) dataset = datasets.ImageFolder(data_dir, transform=transform) dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True) # Run this to test your data loader images, labels = next(iter(dataloader)) helper.imshow(images[0], normalize=False) train_transforms = transforms.Compose([transforms.RandomRotation(30), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]) Subtracting `mean` centers the data around zero and dividing by `std` squishes the values to be between -1 and 1. Normalizing helps keep the network work weights near zero which in turn makes backpropagation more stable. Without normalization, networks will tend to fail to learn. You can find a list of all [the available transforms here](http://pytorch.org/docs/0.3.0/torchvision/transforms.html). When you're testing however, you'll want to use images that aren't altered (except you'll need to normalize the same way). So, for validation/test images, you'll typically just resize and crop. >**Exercise:** Define transforms for training data and testing data below. Leave off normalization for now. Your transformed images should look something like this. <center>Training examples:</center> <img src='assets/train_examples.png' width=500px> <center>Testing examples:</center> <img src='assets/test_examples.png' width=500px> At this point you should be able to load data for training and testing. Now, you should try building a network that can classify cats vs dogs. This is quite a bit more complicated than before with the MNIST and Fashion-MNIST datasets. To be honest, you probably won't get it to work with a fully-connected network, no matter how deep. These images have three color channels and at a higher resolution (so far you've seen 28x28 images which are tiny). In the next part, I'll show you how to use a pre-trained network to build a model that can actually solve this problem.
0.829561
0.991161
``` import course;course.header() ``` # The csv module There are several ways to interact with files that contain data in a "comma separated value" format. We cover the [basic csv module](https://docs.python.org/3/library/csv.html), as it is sometimes really helpful to retain only a fraction of the information of a csv to avoid memory overflow. ``` import csv with open("../data/amino_acid_properties.csv") as aap: aap_reader = csv.DictReader(aap, delimiter=",") for line_dict in aap_reader: print(line_dict) break ``` Print not always very readable - use pretty print! :) ``` import pprint pprint.pprint(line_dict) ``` The hydropathy index is the energy released or required ot transfer the amino acid from water to a hydrophobic environment. - Arg: +4.5 kcal/mol - Ile: -4.5 kcal/mol We can also use the csv module to write csvs, or tab separated value files if we change the delimiter to "\t" ``` with open("../data/test.csv", "w") as output: aap_writer = csv.DictWriter(output, fieldnames=["Name", "3-letter code"]) aap_writer.writeheader() aap_writer.writerow({"Name": "Alanine", "3-letter code": "Ala", "1-letter code": "A"}) !cat ../data/test.csv ``` ## Fix it! ``` # fix it with open("c", "w") as output: aap_writer = csv.DictWriter(output, fieldnames=["Name", "3-letter code"], extrasaction='ignore') aap_writer.writeheader() aap_writer.writerow({"Name": "Alanine", "3-letter code": "Ala", "1-letter code": "A"}) ``` # Collections - high performance containers ... sorta ## [collections.Counter](https://docs.python.org/3.7/library/collections.html#counter-objects) A counter tool is provided to support convenient and rapid tallies. For example ``` from collections import Counter s = """ MQRLMMLLATSGACLGLLAVAAVAAAGANPAQRDTHSLLPTHRRQKRDWIWNQMHIDEEK NTSLPHHVGKIKSSVSRKNAKYLLKGEYVGKVFRVDAETGDVFAIERLDRENISEYHLTA VIVDKDTGENLETPSSFTIKVHDVNDNWPVFTHRLFNASVPESSAVGTSVISVTAVDADD PTVGDHASVMYQILKGKEYFAIDNSGRIITITKSLDREKQARYEIVVEARDAQGLRGDSG TATVLVTLQDINDNFPFFTQTKYTFVVPEDTRVGTSVGSLFVEDPDEPQNRMTKYSILRG DYQDAFTIETNPAHNEGIIKPMKPLDYEYIQQYSFIVEATDPTIDL RYMSPPAGNRAQVI """ Counter(s) # Counter objects can be added together Counter("AABB") + Counter("BBCC") # Works with any type of object that are comparable Counter([(1, 1), (1, 2), (2, 1), (1, 1)]) ``` ## [collections.deque](https://docs.python.org/3.7/library/collections.html#deque-objects) Deque \[deck\] or double-ended queue can be used for many tasks, e.g. building a sliding window ``` from collections import deque s = """MQRLMMLLATSGACLGLLAVAAVAAAGANPAQRDTHSLLPTHRRQKRDWIWNQMHIDEEKNTSLPHHVGKIKSSVSRKNAKYLLKGEYVGKVFRVDAETGDVFAIERLDRENISEYHLTA""" window = deque([], maxlen=5) for pos, aa in enumerate(s): window.append(aa) print(window) if pos > 7: break Counter(window) ``` ## [collections.defaultdicts](https://docs.python.org/3.7/library/collections.html#defaultdict-objects) Defaultdicts are like dicts yet they treat missing values not with an error, thus testing if key exists is not neccessary and makes life easier :) Ofcourse, one needs to define the default value that is taken if a key is not existent. I use it a lot for counting ```python counter["error"] += 1 ``` or collecting elements in lists ```python sorter["typeA"].append({"name": "John"}) ``` No more, let's check if I have the key and if not I need to initialize. ``` from collections import defaultdict ddict_int = defaultdict(int) # ^---- default factory ddict_list = defaultdict(list) ddict_int[10] += 10 ddict_int ddict_int[0] def default_factory_with_prefilled_dictionary(): return {"__name": "our custom dict", "errors": 0} ddict_custom = defaultdict(default_factory_with_prefilled_dictionary) ``` Does that work? ``` ddict_custom[10] += 10 ddict_custom["what_ever_key"] ddict_custom[10]['errors'] += 10 ddict_custom ```
github_jupyter
import course;course.header() import csv with open("../data/amino_acid_properties.csv") as aap: aap_reader = csv.DictReader(aap, delimiter=",") for line_dict in aap_reader: print(line_dict) break import pprint pprint.pprint(line_dict) with open("../data/test.csv", "w") as output: aap_writer = csv.DictWriter(output, fieldnames=["Name", "3-letter code"]) aap_writer.writeheader() aap_writer.writerow({"Name": "Alanine", "3-letter code": "Ala", "1-letter code": "A"}) !cat ../data/test.csv # fix it with open("c", "w") as output: aap_writer = csv.DictWriter(output, fieldnames=["Name", "3-letter code"], extrasaction='ignore') aap_writer.writeheader() aap_writer.writerow({"Name": "Alanine", "3-letter code": "Ala", "1-letter code": "A"}) from collections import Counter s = """ MQRLMMLLATSGACLGLLAVAAVAAAGANPAQRDTHSLLPTHRRQKRDWIWNQMHIDEEK NTSLPHHVGKIKSSVSRKNAKYLLKGEYVGKVFRVDAETGDVFAIERLDRENISEYHLTA VIVDKDTGENLETPSSFTIKVHDVNDNWPVFTHRLFNASVPESSAVGTSVISVTAVDADD PTVGDHASVMYQILKGKEYFAIDNSGRIITITKSLDREKQARYEIVVEARDAQGLRGDSG TATVLVTLQDINDNFPFFTQTKYTFVVPEDTRVGTSVGSLFVEDPDEPQNRMTKYSILRG DYQDAFTIETNPAHNEGIIKPMKPLDYEYIQQYSFIVEATDPTIDL RYMSPPAGNRAQVI """ Counter(s) # Counter objects can be added together Counter("AABB") + Counter("BBCC") # Works with any type of object that are comparable Counter([(1, 1), (1, 2), (2, 1), (1, 1)]) from collections import deque s = """MQRLMMLLATSGACLGLLAVAAVAAAGANPAQRDTHSLLPTHRRQKRDWIWNQMHIDEEKNTSLPHHVGKIKSSVSRKNAKYLLKGEYVGKVFRVDAETGDVFAIERLDRENISEYHLTA""" window = deque([], maxlen=5) for pos, aa in enumerate(s): window.append(aa) print(window) if pos > 7: break Counter(window) counter["error"] += 1 sorter["typeA"].append({"name": "John"}) from collections import defaultdict ddict_int = defaultdict(int) # ^---- default factory ddict_list = defaultdict(list) ddict_int[10] += 10 ddict_int ddict_int[0] def default_factory_with_prefilled_dictionary(): return {"__name": "our custom dict", "errors": 0} ddict_custom = defaultdict(default_factory_with_prefilled_dictionary) ddict_custom[10] += 10 ddict_custom["what_ever_key"] ddict_custom[10]['errors'] += 10 ddict_custom
0.250271
0.852752
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); </script> # The Spinning Effective One-Body Hamiltonian ## Author: Tyler Knowles ### Formatting improvements courtesy Brandon Clark ## This module documents the reduced spinning effective one-body Hamiltonian as numerically implemented in LALSuite's SEOBNRv3 gravitational waveform approximant. **Notebook Status:** <font color='red'><b> In progress </b></font> **Validation Notes:** This module is under active development -- do ***not*** use the resulting code for scientific applications. In the future, this module will be validated against the LALSuite [SEOBNRv3/SEOBNRv3_opt code]( https://git.ligo.org/lscsoft/lalsuite.) that was reviewed and approved for LIGO parameter estimation by the LIGO Scientific Collaboration. ## Introduction ### The Physical System of Interest Consider two black holes with masses $m_{1}$, $m_{2}$ and spins ${\bf S}_{1}$, ${\bf S}_{2}$ in a binary system. The spinning effective one-body ("SEOB") Hamiltonian $H_{\rm real}$ (defined in [this cell](#hreal)) describes the dynamics of this system; we will define $H_{\rm real}$ as in [Barausse and Buonanno (2010)](https://arxiv.org/abs/0912.3517) Section VE. There, $H_{\rm real}$ is canonically transformed and mapped to an effective Hamiltonian $H_{\rm eff}$ (defined in [this cell](#heff)) describing the motion of a test particle of mass $\mu$ (defined in [this cell](#mu)) and spin ${\bf S}^{*}$ (defined in [this cell](#sstar)) moving in a defomred Kerr metric of mass $M$ (defined in [this cell](#m)) and spin ${\bf S}_{\rm Kerr}$ (defined in [this cell](#skerr)). Here we seek to break up $H_{\rm real}$ and document the terms in such a way that the resulting Python code can be used to numerically evaluate $H_{\rm real}$. We write $H_{\rm real}$ in terms of Cartesian quasi-isotropic coordinates $x$, $y$, and $z$ (see [Barausse and Buonanno (2010)](https://arxiv.org/abs/0912.3517) Section III). The spatial coordinates $r$, $\theta$, and $\phi$ referenced throughout are [Boyer-Lindquist coordinates](https://en.wikipedia.org/wiki/Boyer%E2%80%93Lindquist_coordinates) (see [Barausse and Buonanno (2010)](https://arxiv.org/abs/0912.3517) Section IV). Please note that throughout this notebook we adpot the following conventions: 1. $c = 1$ where $c$ is the speed of light in a vacuum, 1. spacial tensor indicies are denoted by lowercase Latin letters, and 1. repeated indices indicate Einstein summation notation. ### Citations Throughout this module, we will refer to * [Barausse and Buonanno (2010)](https://arxiv.org/abs/0912.3517) as BB2010, * [Barausse and Buonanno (2011)](https://arxiv.org/abs/1107.2904) as BB2011, * [Pan, Buonanno, Buchman, et. al. 2010](https://arxiv.org/abs/0912.3466v2) as P2010, * [Taracchini, Pan, Buonanno, et al (2012)](https://arxiv.org/abs/1202.0790) as T2012, and * [Damour, Jaranowski, and Schaefer (2000)](https://arxiv.org/abs/gr-qc/0005034) as D2000. <a id='toc'></a> # Table of Contents $$\label{toc}$$ This notebook is organized as follows 1. [Step 1](#hreal): The Real Hamiltonian $H_{\rm real}$ 1. [Step 2](#heff): The Effective Hamiltonian $H_{\rm eff}$ 1. [Step 3](#heff_terms): Terms of $H_{\rm eff}$ 1. [Step 3.a](#hs): Leading Order Spin Effects $H_{\rm S}$ 1. [Step 3.b](#hns): The Nonspinning Hamiltonian $H_{\rm NS}$ 1. [Step 3.c](#hd): The Quadrupole Deformation $H_{\rm D}$ 1. [Step 4](#hso): The Spin-Orbit Term $H_{\rm SO}$ 1. [Step 4.a](#hsoterm1): $H_{\rm SO}$ Term 1 1. [Step 4.b](#hsoterm2coeff): $H_{\rm SO}$ Term 2 Coefficient 1. [Step 4.c](#hsoterm2): $H_{\rm SO}$ Term 2 1. [Step 4.c.i](#hsoterm2a): $H_{\rm SO}$ Term 2a 1. [Step 4.c.ii](#hsoterm2b): $H_{\rm SO}$ Term 2b 1. [Step 4.c.iii](#hsoterm2c): $H_{\rm SO}$ Term 2c 1. [Step 5](#hss): The Spin-Spin Term $H_{\rm SS}$ 1. [Step 5.a](#hssterm1): $H_{\rm SS}$ Term 1 1. [Step 5.b](#hssterm2coeff): $H_{\rm SS}$ Term 2 coefficient 1. [Step 5.c](#hssterm2): $H_{\rm SS}$ Term 2 1. [Step 5.d](#hssterm3coeff): $H_{\rm SS}$ Term 3 coefficient 1. [Step 5.e](#hssterm3): $H_{\rm SS}$ Term 3 1. [Step 6](#hnsterms): The $H_{\rm NS}$ Terms 1. [Step 6.a](#betapsum): $\beta p$ Sum 1. [Step 6.b](#alpha): $\alpha$ 1. [Step 6.c](#hnsradicand): $H_{\rm NS}$ Radicand 1. [Step 6.c.i](#gammappsum): $\gamma p$ Sum 1. [Step 6.c.ii](#q4): ${\cal Q}_{4}$ 1. [Step 7](#hdterms): The $H_{\rm D}$ Terms 1. [Step 7.a](#hdcoeff): $H_{\rm D}$ Coefficient 1. [Step 7.b](#hdsum): $H_{\rm D}$ Sum 1. [Step 7.b.i](#hdsumterm1): $H_{\rm D}$ Sum Term 1 1. [Step 7.b.ii](#hdsumterm2): $H_{\rm D}$ Sum Term 2 1. [Step 8](#dotproducts): Common Dot Products 1. [Step 8.a](#sdotskerr): ${\bf S} \cdot {\bf S}_{\rm Kerr}$ 1. [Step 8.b](#sdotxi): ${\bf S} \cdot \boldsymbol{\xi}$ 1. [Step 8.c](#sdotv): ${\bf S} \cdot {\bf v}$ 1. [Step 8.d](#sdotn): ${\bf S} \cdot {\bf n}$ 1. [Step 8.e](#sdotskerrhat): ${\bf S} \cdot \hat{\bf S}_{\rm Kerr}$ 1. [Step 8.f](#sstardotn): ${\bf S}^{*} \cdot {\bf n}$ 1. [Step 9](#hreal_spin_combos): $H_{\rm real}$ Spin Combination ${\bf S}^{*}$ 1. [Step 9a](#sstar): ${\bf S}^{*}$ 1. [Step 9b](#deltasigmastar): $\Delta_{\sigma^{*}}$ 1. [Step 9c](#sigmastarcoeff): $\sigma^{*}$ Coefficient 1. [Step 9c i](#sigmastarcoeffterm1): $\sigma^{*}$ Coefficient Term 1 1. [Step 9c ii](#sigmastarcoeffterm2): $\sigma^{*}$ Coefficient Term 2 1. [Step 9c iii](#sigmastarcoeffterm3): $\sigma^{*}$ Coefficient Term 3 1. [Step 9d](#sigmacoeff): $\sigma$ Coefficient 1. [Step 9d i](#sigmacoeffterm1): $\sigma$ Coefficient Term 1 1. [Step 9d ii](#sigmacoeffterm2): $\sigma$ Coefficient Term 2 1. [Step 9d iii](#sigmacoeffterm3): $\sigma$ Coefficient Term 3 1. [Step 10](#metpotderivs): Derivatives of the Metric Potential 1. [Step 10.a](#omegar): $\omega_{r}$ 1. [Step 10.b](#nur): $\nu_{r}$ 1. [Step 10.c](#mur): $\mu_{r}$ 1. [Step 10.d](#omegacostheta): $\omega_{\cos\theta}$ 1. [Step 10.e](#nucostheta): $\nu_{\cos\theta}$ 1. [Step 10.f](#mucostheta): $\mu_{\cos\theta}$ 1. [Step 10.g](#lambdatprm): $\Lambda_{t}^{\prime}$ 1. [Step 10.h](#omegatildeprm): $\tilde{\omega}_{\rm fd}^{\prime}$ 1. [Step 11](#metpots): The Deformed and Rescaled Metric Potentials 1. [Step 11.a](#omega): $\omega$ 1. [Step 11.b](#exp2nu): $e^{2 \nu}$ 1. [Step 11.c](#btilde): $\tilde{B}$ 1. [Step 11.d](#brtilde): $\tilde{B}_{r}$ 1. [Step 11.e](#exp2mu): $e^{2 \tilde{\mu}}$ 1. [Step 11.f](#jtilde): $\tilde{J}$ 1. [Step 11.g](#q): $Q$ 1. [Step 11.g.i](#drsipn2): $\frac{ \Delta_{r} }{ \Sigma } \left( \hat{\bf p} \cdot {\bf n} \right)^{2}$ 1. [Step 11.g.ii](#qcoeff1): Q Coefficient 1 1. [Step 11.g.iii](#qcoeff2): Q Coefficient 2 1. [Step 12](#tort): Tortoise terms 1. [Step 12.a](#pphi): $p_{\phi}$ 1. [Step 12.b](#pdotvr): $\hat{\bf p} \cdot {\bf v} r$ 1. [Step 12.c](#pdotn): $\hat{\bf p} \cdot {\bf n}$ 1. [Step 12.d](#pdotxir): $\hat{\bf p} \cdot \boldsymbol{\xi} r$ 1. [Step 12.e](#hatp): $\hat{\bf p}$ 1. [Step 12.f](#prt): prT 1. [Step 12.g](#csi2): csi2 1. [Step 12.h](#csi1): csi1 1. [Step 12.i](#csi): csi 1. [Step 13](#metric): Metric Terms 1. [Step 13.a](#lambdat): $\Lambda_{t}$ 1. [Step 13.b](#deltar): $\Delta_{r}$ 1. [Step 13.c](#deltat): $\Delta_{t}$ 1. [Step 13.d](#deltatprm): $\Delta_{t}^{\prime}$ 1. [Step 13.e](#deltau): $\Delta_{u}$ 1. [Step 13.e.i](#deltaubar): $\bar{\Delta}_{u}$ 1. [Step 13.e.ii](#deltaucalib): $\Delta_{u}$ Calibration Term 1. [Step 13.e.iii](#calib_coeffs): Calibration Coefficients 1. [Step 13.e.iv](#k): $K$ 1. [Step 13.f](#omegatilde): $\tilde{\omega}_{\rm fd}$ 1. [Step 13.g](#dinv): $D^{-1}$ 1. [Step 14](#coord): Terms Dependent on Coordinates 1. [Step 14.a](#usigma): $\Sigma$ 1. [Step 14.b](#w2): $\varpi^{2}$ 1. [Step 14.d](#sin2theta): $\sin^{2}\theta$ 1. [Step 14.e](#costheta): $\cos\theta$ 1. [Step 15](#vectors): Important Vectors 1. [Step 15.a](#v): ${\bf v}$ 1. [Step 15.b](#xi): $\boldsymbol{\xi}$ 1. [Step 15.c](#e3): ${\bf e}_{3}$ 1. [Step 15.d](#n): ${\bf n}$ 1. [Step 16](#spin_combos): Spin Combinations $\boldsymbol{\sigma}$, $\boldsymbol{\sigma}^{*}$, and ${\bf S}_{\rm Kerr}$ 1. [Step 16.a](#a): $a$ 1. [Step 16.b](#skerrhat): $\hat{\bf S}_{\rm Kerr}$ 1. [Step 16.c](#skerrmag): $\left\lvert {\bf S}_{\rm Kerr} \right\rvert$ 1. [Step 16.d](#skerr): ${\bf S}_{\rm Kerr}$ 1. [Step 16.e](#sigma): $\boldsymbol{\sigma}$ 1. [Step 16.f](#sigmastar): $\boldsymbol{\sigma}^{*}$ 1. [Step 17](#fundquant): Fundamental Quantities 1. [Step 17.a](#u): $u$ 1. [Step 17.b](#r): $r$ 1. [Step 17.c](#eta): $\eta$ 1. [Step 17.d](#mu): $\mu$ 1. [Step 17.e](#m): $M$ 1. [Step 18](#validation): Validation 1. [Step 19](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file <a id='hreal'></a> # Step 1: The real Hamiltonian $H_{\textrm{real}}$ \[Back to [top](#toc)\] $$\label{hreal}$$ The SEOB Hamiltonian $H_{\rm real}$ is given by [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.69): \begin{equation*} H_{\rm real} = M \sqrt{ 1 + 2 \eta \left( \frac{ H_{\rm eff} }{ \mu } - 1 \right) }. \end{equation*} Here $H_{\rm eff}$ (defined in [this cell](#heff)) is an *effective* Hamiltonian (see [this cell](#intro)) and $M$ (defined in [this cell](#m)), $\mu$ (defined in [this cell](#mu)), and $\eta$ (defined in [this cell](#eta)) are constants determined by $m_{1}$ and $m_{2}$. ``` %%writefile SEOBNR/Hamiltonian-Hreal_on_top.txt Hreal = sp.sqrt(1 + 2*eta*(Heff - 1)) ``` <a id='heff'></a> # Step 2: The Effective Hamiltonian $H_{\rm eff}$ \[Back to [top](#toc)\] $$\label{heff}$$ The effective Hamiltonian $H_{\rm eff}$ is given by [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.70): \begin{equation*} H_{\rm eff} = H_{\rm S} + \underbrace{ \beta^{i} p_{i} + \alpha \sqrt{ \mu^{2} + \gamma^{ij} p_{i} p_{j} + {\cal Q}_{4} } }_{ H_{\rm NS} } - \underbrace{ \frac{ \mu }{ 2 M r^{3} } \left( \delta^{ij} - 3 n^{i} n^{j} \right) S^{*}_{i} S^{*}_{j} }_{ H_{\rm D} }. \end{equation*} Here $H_{\rm S}$ (considered further in [this cell](#hs)) denotes leading order effects of spin-spin and spin-orbit coupling, $H_{\rm NS}$ (considered further in [this cell](#hns)) is the Hamiltonian for a nonspinning test particle, and $H_{\rm D}$ (considered further in [this cell](#hd)) describes quadrupole deformation of the coupling of the particle's spin with itself to leading order. <font color='red'> FIXME: find documentation for the calibration terms. </font> ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Heff = Hs + Hns - Hd + dSS*eta*u*u*u*u*(S1x*S1x + S1y*S1y + S1z*S1z + S2x*S2x + S2y*S2y + S2z*S2z) ``` <a id='heff_terms'></a> # Step 3: Terms of $H_{\rm eff}$ \[Back to [top](#toc)\] $$\label{heff_terms}$$ In this step, we break down each of the terms $H_{\rm S}$ (defined in [this cell](#hs)), $H_{\rm NS}$ (defined in [this cell](#hns)), and $H_{\rm D}$ (defined in [this cell](#hd)) in $H_{\rm eff}$ (defined in [this cell](#heff)). <a id='hs'></a> ## Step 3.a: Leading Order Spin Effects $H_{\rm S}$ \[Back to [top](#toc)\] $$\label{hs}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.17), \begin{equation*} H_{\rm S} = H_{\rm SO} + H_{\rm SS} \end{equation*} where $H_{\rm SO}$ (defined in [this cell](#hso)) includes spin-orbit terms and $H_{\rm SS}$ (defined in [this cell](#hss)) includes spin-spin terms. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hs = Hso + Hss ``` <a id='hns'></a> ## Step 3.b: The Nonspinning Hamiltonian $H_{\rm NS}$ \[Back to [top](#toc)\] $$\label{hns}$$ We defined $H_{\rm NS}$ in [this cell](#heff) as \begin{equation*} H_{\rm NS} = \underbrace{ \beta^{i} p_{i} }_{ \beta\ p\ \rm sum } + \alpha \sqrt{ \smash[b]{ \underbrace{ \mu^{2} + \gamma^{ij} p_{i} p_{j} + {\cal Q}_{4} }_{ H_{\rm NS}\ \rm radicand } } }. \end{equation*} We compute $\beta\ p$ sum in [this cell](#betapsum), $\alpha$ in [this cell](#alpha), and $H_{\rm NS}$ radicand in [this cell](#hnsradicand). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hns = betapsum + alpha*sp.sqrt(Hnsradicand) ``` <a id='hd'></a> ## Step 3.c: The Quadrupole Deformation $H_{\rm D}$ \[Back to [top](#toc)\] $$\label{hd}$$ We defined $H_{\rm D}$ in [this cell](#heff) as: \begin{equation*} H_{\rm D} = \underbrace{ \frac{ \mu }{ 2 M r^{3} } }_{H_{\rm D}\ {\rm coefficient}} \underbrace{ \left( \delta^{ij} - 3 n^{i} n^{j} \right) S^{*}_{i} S^{*}_{j} }_{H_{\rm D}\ {\rm sum}} \end{equation*} We compute $H_{\rm D}$ coefficient in [this cell](#hdcoeff) and $H_{\rm D}$ sum in [this cell](#hdsum). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hd = Hdcoeff*Hdsum ``` <a id='hso'></a> # Step 4: The Spin-Orbit Term $H_{\rm SO}$ \[Back to [top](#toc)\] $$\label{hso}$$ We will write [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.18) as: \begin{align*} H_{\rm SO} = H_{\rm SO}\ {\rm Term\ 1} + H_{\rm SO}\ {\rm Term\ 2\ coefficient} * H_{\rm SO}\ {\rm Term\ 2}. \end{align*} We define and consider $H_{\rm SO}$ Term 1 in [this cell](#hsoterm1), $H_{\rm SO}$ Term 2 coefficient in [this cell](#hsoterm2coeff), and $H_{\rm SO}$ Term 2 in [this cell](#hsoterm2). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hso = HsoTerm1 + HsoTerm2coeff*HsoTerm2 ``` <a id='hsoterm1'></a> ## Step 4.a: $H_{\rm SO}$ Term 1 \[Back to [top](#toc)\] $$\label{hsoterm1}$$ Combining our notation $H_{\rm SO}$ (defined in [this cell](#hso)) with [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.18), we have \begin{equation*} H_{\rm SO}\ {\rm Term\ 1} = \frac{ e^{2 \nu - \tilde{\mu} } \left( e^{\tilde{\mu} + \nu} - \tilde{B} \right) \left( \hat{\bf p} \cdot \boldsymbol{\xi} r \right) \left( {\bf S} \cdot \hat{\bf S}_{\rm Kerr} \right) }{ \tilde{B}^{2} \sqrt{Q} \xi^{2} }. \end{equation*} We will write \begin{equation*} H_{\rm SO}\ {\rm Term\ 1} = \frac{ e^{2 \nu} \left( e^{\tilde{\mu}} e^{\nu} - \tilde{B} \right) \left( \hat{\bf p} \cdot \boldsymbol{\xi} r \right) \left( {\bf S} \cdot \hat{\bf S}_{\rm Kerr} \right) }{ e^{ \tilde{\mu} } \tilde{B}^{2} \sqrt{Q} \xi^{2} }. \end{equation*} We define $e^{\tilde{\mu}}$ in [this cell](#exp2mu), $e^{\nu}$ in [this cell](#exp2nu), $\tilde{B}$ in [this cell](#btilde), $\hat{\bf p} \cdot \boldsymbol{\xi} r$ in [this cell](#pdotxir), ${\bf S} \cdot \hat{\bf S}_{\rm Kerr}$ in [this cell](#sdotskerrhat), $Q$ in [this cell](#q), and $\boldsymbol{\xi}^{2}$ in [this cell](#sin2theta). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm1 = exp2nu*(expmu*expnu - Btilde)*pdotxir*SdotSkerrhat/(expmu*Btilde*Btilde*sp.sqrt(Q)*xisq) ``` <a id='hsoterm2coeff'></a> ## Step 4.b: $H_{\rm SO}$ Term 2 Coefficient \[Back to [top](#toc)\] $$\label{hsoterm2coeff}$$ Combining our notation $H_{\rm SO}$ (defined in [this cell](#hso)) with [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.18), we have \begin{equation*} H_{\rm SO}\ {\rm Term\ 2\ coefficient} = \frac{ e^{\nu - 2 \tilde{\mu}} }{ \tilde{B}^{2} \left( \sqrt{Q} + 1 \right) \sqrt{Q} \xi^{2} } \end{equation*} which we write in the form \begin{equation*} H_{\rm SO}\ {\rm Term\ 2\ coefficient} = \frac{ e^{\nu} }{ e^{2 \tilde{\mu}} \tilde{B}^{2} \left( Q + \sqrt{Q} \right) \xi^{2} }. \end{equation*} We define and consider $e^{\nu}$ in [this cell](#exp2nu), $e^{\tilde{\mu}}$ in [this cell](#exp2mu), $\tilde{B}$ in [this cell](#btilde), $Q$ in [this cell](#q), and $\xi^{2}$ in [this cell](#sin2theta). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2coeff = expnu/(exp2mu*Btilde*Btilde*(Q + sp.sqrt(Q))*xisq) ``` <a id='hsoterm2'></a> ## Step 4.c: $H_{\rm SO}$ Term 2 \[Back to [top](#toc)\] $$\label{hsoterm2}$$ Combining our notation $H_{\rm SO}$ (defined in [this cell](#hso)) with [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.18), we have \begin{align*} H_{\rm SO}\ {\rm Term\ 2} &= \underbrace{ \left( {\bf S} \cdot \boldsymbol{\xi} \right) \tilde{J} \left[ \mu_r \left( \hat{\bf p} \cdot {\bf v} r \right) \left( \sqrt{Q} + 1 \right) - \mu_{\cos \theta} \left( \hat{\bf p} \cdot {\bf n} \right) \xi^{2} -\sqrt{Q} \left( \nu_r \left( \hat{\bf p} \cdot {\bf v} r \right) + \left( \mu_{\cos \theta} - \nu_{\cos \theta} \right) \left( \hat{\bf p} \cdot {\bf n} \right) \xi^{2} \right) \right] \tilde{B}^{2} }_{H_{\rm SO}\ {\rm Term\ 2a}} \\ &\ \ \ \ \ + \underbrace{ e^{\tilde{\mu} + \nu} \left( \hat{\bf p} \cdot \boldsymbol{\xi} r \right) \left( 2 \sqrt{Q} + 1 \right) \left[ \tilde{J} \nu_r \left( {\bf S} \cdot {\bf v} \right) - \nu_{\cos \theta} \left( {\bf S} \cdot {\bf n} \right) \xi^{2} \right] \tilde{B} }_{H_{\rm SO}\ {\rm Term\ 2b}} - \underbrace{ \tilde{J} \tilde{B}_{r} e^{\tilde{\mu} + \nu} \left( \hat{\bf p} \cdot \boldsymbol{\xi} r \right) \left( \sqrt{Q} + 1 \right) \left( {\bf S} \cdot {\bf v} \right) }_{H_{\rm SO}\ {\rm Term\ 2c}} \end{align*} We compute $H_{\rm SO}$ Term 2a in [this cell](#hsoterm2a), $H_{\rm SO}$ Term 2b in [this cell](#hsoterm2b), and $H_{\rm SO}$ Term 2c in [this cell](#hsoterm2c). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2 = HsoTerm2a + HsoTerm2b - HsoTerm2c ``` <a id='hsoterm2a'></a> ### Step 4.c.i: $H_{\rm SO}$ Term 2a \[Back to [top](#toc)\] $$\label{hsoterm2a}$$ We defined $H_{\rm S0}$ Term 2a in [this cell](#hsoterm2) as \begin{equation*} H_{\rm SO}\ {\rm Term\ 2a} = \left( {\bf S} \cdot \boldsymbol{\xi} \right) \tilde{J} \left[ \mu_r \left( \hat{\bf p} \cdot {\bf v} r \right) \left( \sqrt{Q} + 1 \right) - \mu_{\cos \theta} \left( \hat{\bf p} \cdot {\bf n} \right) \xi^{2} -\sqrt{Q} \left( \nu_r \left( \hat{\bf p} \cdot {\bf v} r \right) + \left( \mu_{\cos \theta} - \nu_{\cos \theta} \right) \left( \hat{\bf p} \cdot {\bf n} \right) \xi^{2} \right) \right] \tilde{B}^{2}. \end{equation*} We define ${\bf S} \cdot \boldsymbol{\xi}$ in [this cell](#sdotxi), $\tilde{J}$ in [this cell](#jtilde), $\mu_{r}$ in [this cell](#mur), $\hat{\bf p} \cdot {\bf v} r$ in [this cell](#pdotvr), $Q$ in [this cell](#q), $\mu_{\cos \theta}$ in [this cell](#mucostheta), $\hat{\bf p} \cdot {\bf n}$ in [this cell](#pdotn), $\xi^{2}$ in [this cell](#sin2theta), $\nu_{r}$ in [this cell](#nur), $\nu_{\cos\theta}$ in [this cell](#nucostheta), and $\tilde{B}$ in [this cell](#btilde). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2a = Sdotxi*Jtilde*(mur*pdotvr*(sp.sqrt(Q) + 1) - mucostheta*pdotn*xisq - sp.sqrt(Q)*(nur*pdotvr + (mucostheta - nucostheta)*pdotn*xisq))*Btilde*Btilde ``` <a id='hsoterm2b'></a> ### Step 4.c.ii: $H_{\rm SO}$ Term 2b \[Back to [top](#toc)\] $$\label{hsoterm2b}$$ We defined $H_{\rm S0}$ Term 2b in [this cell](#hsoterm2) as \begin{equation*} H_{\rm SO}\ {\rm Term\ 2b} = e^{\tilde{\mu} + \nu} \left( \hat{\bf p} \cdot \boldsymbol{\xi} r \right) \left( 2 \sqrt{Q} + 1 \right) \left[ \tilde{J} \nu_r \left( {\bf S} \cdot {\bf v} \right) - \nu_{\cos \theta} \left( {\bf S} \cdot {\bf n} \right) \xi^{2} \right] \tilde{B}. \end{equation*} We define $e^{\tilde{\mu}}$ in [this cell](#exp2mu), $e^{\nu}$ in [this cell](#exp2nu), $\hat{\bf p} \cdot \xi r$ in [this cell](#pdotxir), $Q$ in [this cell](#q), $\tilde{J}$ in [this cell](#jtilde), $\nu_{r}$ in [this cell](#nur), ${\bf S} \cdot {\bf v}$ in [this cell](#sdotv), $\nu_{\cos\theta}$ in [this cell](#nucostheta), ${\bf S} \cdot {\bf n}$ in [this cell](#sdotn), $\xi^{2}$ in [this cell](#sin2theta), and $\tilde{B}$ in [this cell](#btilde). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2b = expmu*expnu*pdotxir*(2*sp.sqrt(Q) + 1)*(Jtilde*nur*Sdotv - nucostheta*Sdotn*xisq)*Btilde ``` <a id='hsoterm2c'></a> ### Step 4.c.iii: $H_{\rm SO}$ Term 2c \[Back to [top](#toc)\] $$\label{hsoterm2c}$$ We defined $H_{\rm S0}$ Term 2c in [this cell](#hsoterm2) as \begin{equation*} H_{\rm SO}\ {\rm Term\ 2c} = \tilde{J} \tilde{B}_{r} e^{\tilde{\mu} + \nu} \left( \hat{\bf p} \cdot \boldsymbol{\xi} r \right) \left( \sqrt{Q} + 1 \right) \left( {\bf S} \cdot {\bf v} \right) \end{equation*} We define $\tilde{J}$ in [this cell](#jtilde), $\tilde{B}_{r}$ in [this cell](#brtilde), $e^{\tilde{\mu}}$ in [this cell](#exp2mu), $e^{\nu}$ in [this cell](#exp2nu), $\hat{\bf p} \cdot \xi r$ in [this cell](#pdotxir), $Q$ in [this cell](#q), and ${\bf S} \cdot {\bf v}$ in [this cell](#sdotv). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2c = Jtilde*Brtilde*expmu*expnu*pdotxir*(sp.sqrt(Q) + 1)*Sdotv ``` <a id='hss'></a> # Step 5: The Spin-Spin Term $H_{\rm SS}$ \[Back to [top](#toc)\] $$\label{hss}$$ We will write [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.19) as \begin{equation*} H_{\rm SS} = H_{\rm SS}\ {\rm Term\ 1} + H_{\rm SS}\ {\rm Term\ 2\ coefficient} * H_{\rm SS}\ {\rm Term\ 2} + H_{\rm SS}\ {\rm Term\ 3\ coefficient} * H_{\rm SS}\ {\rm Term\ 3}. \end{equation*} We define $H_{\rm SS}$ Term 1 in [this cell](#hssterm1), $H_{\rm SS}$ Term 2 coefficient in [this cell](#hssterm2coeff), $H_{\rm SS}$ Term 2 in [this cell](#hssterm2), $H_{\rm SS}$ Term 3 coefficient in [this cell](#hssterm3coeff), and $H_{\rm SS}$ Term 3 in [this cell](#hssterm3). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hss = HssTerm1 + HssTerm2coeff*HssTerm2 + HssTerm3coeff*HssTerm3 ``` <a id='hssterm1'></a> ## Step 5.a: $H_{\rm SS}$ Term 1 \[Back to [top](#toc)\] $$\label{hssterm1}$$ Combining [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.19) with our definition of $H_{\rm SS}$ Term 1 in [this cell](#hss), we have \begin{equation*} H_{\rm SS}\ {\rm Term\ 1} = \omega \left( {\bf S} \cdot \hat{\bf S}_{\rm Kerr} \right). \end{equation*} We define $\omega$ in [this cell](#omega) and ${\bf S} \cdot \hat{\bf S}_{\rm Kerr}$ in [this cell](#sdotskerrhat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm1 = omega*SdotSkerrhat ``` <a id='hssterm2coeff'></a> ## Step 5.b: $H_{\rm SS}$ Term 2 Coefficient \[Back to [top](#toc)\] $$\label{hssterm2coeff}$$ Combining [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.19) with ore definition of $H_{\rm SS}$ Term 2 coefficient in [this cell](#hss), we have \begin{equation*} H_{\rm SS}\ {\rm Term\ 2\ coefficient} = \frac{ e^{-3 \tilde{\mu} -\nu} \tilde{J} \omega_{r} }{ 2 \tilde{B} \left( \sqrt{Q} + 1 \right) \sqrt{Q} \xi^{2} } \end{equation*} which we write as \begin{equation*} H_{\rm SS}\ {\rm Term\ 2\ coefficient} = \frac{ \tilde{J} \omega_{r} }{ 2 e^{2 \tilde{\mu}} e^{\tilde{\mu}} e^{\nu} \tilde{B} \left( Q + \sqrt{Q} \right) \xi^{2} }. \end{equation*} We define $\tilde{J}$ in [this cell](#jtilde), $\omega_{r}$ in [this cell](#omegar), $e^{\tilde{\mu}}$ in [this cell](#exp2mu), $e^{\nu}$ in [this cell](#exp2nu), $\tilde{B}$ in [this cell](#btilde), $Q$ in [this cell](#q), and $\xi^{2}$ in [this cell](#sin2theta). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm2coeff = Jtilde*omegar/(2*exp2mu*expmu*expnu*Btilde*(Q + sp.sqrt(Q))*xisq) ``` <a id='hssterm2'></a> ## Step 5.c: $H_{\rm SS}$ Term 2 \[Back to [top](#toc)\] $$\label{hssterm2}$$ Combining [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.19) with our definition of $H_{\rm SS}$ Term 2 in [this cell](#hss), we have \begin{equation*} H_{\rm SS}\ {\rm Term\ 2} = -e^{\tilde{\mu} + \nu} \left( {\bf \hat{p}} \cdot {\bf v} r \right) \left( {\bf \hat{p}} \cdot {\bf \xi} r \right) \left( {\bf S} \cdot {\bf \xi} \right) \tilde{B} + e^{2 \left( \tilde{\mu} + \nu \right)} \left( {\bf \hat{p}} \cdot {\bf \xi} r \right)^2 \left( {\bf S} \cdot {\bf v} \right) + e^{2 \tilde{\mu}} \left( 1 + \sqrt{Q} \right) \sqrt{Q} \left( {\bf S} \cdot {\bf v} \right)\xi^2 \tilde{B}^{2} + \tilde{J} \left( {\bf \hat{p}} \cdot {\bf n} \right) \left[ \left( {\bf \hat{p}} \cdot {\bf v} r \right) \left( {\bf S} \cdot {\bf n}\right) - \tilde{J} \left( {\bf \hat{p}} \cdot {\bf n} \right) \left( {\bf S} \cdot {\bf v} \right)\right] \xi^{2} \tilde{B}^{2} \end{equation*} which we write as \begin{align*} H_{\rm SS}\ {\rm Term\ 2} &= e^{\tilde{\mu}} \left( {\bf \hat{p}} \cdot {\bf \xi} r \right) \left[ e^{\tilde{\mu}} e^{2 \nu} \left( {\bf \hat{p}} \cdot {\bf \xi} r \right) \left( {\bf S} \cdot {\bf v} \right) - e^{\nu} \left( {\bf \hat{p}} \cdot {\bf v} r \right) \left( {\bf S} \cdot {\bf \xi} \right) \tilde{B} \right] \\ &\ \ \ \ \ + \xi^2 \tilde{B}^{2} \left\{ e^{2 \tilde{\mu}} \left( \sqrt{Q} + Q \right) \left( {\bf S} \cdot {\bf v} \right) + \tilde{J} \left( {\bf \hat{p}} \cdot {\bf n} \right) \left[ \left( {\bf \hat{p}} \cdot {\bf v} r \right) \left( {\bf S} \cdot {\bf n}\right) - \tilde{J} \left( {\bf \hat{p}} \cdot {\bf n} \right) \left( {\bf S} \cdot {\bf v} \right)\right] \right\} \end{align*} We define $e^{\tilde{\mu}}$ in [this cell](#exp2mu), $\hat{\bf p} \cdot \boldsymbol{\xi} r$ in [this cell](#pdotxir), $e^{\nu}$ in [this cell](#exp2nu), ${\bf S} \cdot {\bf v}$ in [this cell](#sdotv), $\hat{\bf p} \cdot {\bf v} r$ in [this cell](#pdotvr), ${\bf S} \cdot \boldsymbol{\xi}$ in [this cell](#sdotxi), $\tilde{B}$ in [this cell](#btilde), $Q$ in [this cell](#q), $\tilde{J}$ in [this cell](#jtilde), $\hat{\bf p} \cdot {\bf n}$ in [this cell](#pdotn), ${\bf S} \cdot {\bf n}$ in [this cell](#sdotn), and $\xi^{2}$ in [this cell](#sin2theta). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm2 = expmu*pdotxir*(expmu*exp2nu*pdotxir*Sdotv - expnu*pdotvr*Sdotxi*Btilde) + xisq*Btilde*Btilde*(exp2mu*(sp.sqrt(Q) + Q)*Sdotv + Jtilde*pdotn*(pdotvr*Sdotn - Jtilde*pdotn*Sdotv)) ``` <a id='hssterm3coeff'></a> ## Step 5.d: $H_{\rm SS}$ Term 3 Coefficient \[Back to [top](#toc)\] $$\label{hssterm3coeff}$$ Combining [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.19) with our definition of $H_{\rm SS}$ Term 3 coefficient in [this cell](#hss), we have \begin{equation*} H_{\rm SS}\ {\rm Term\ 3\ coefficient} = \frac{ e^{-3 \tilde{\mu} - \nu} \omega_{\cos\theta} }{ 2 \tilde{B} \left( \sqrt{Q} + 1 \right) \sqrt{Q} } \end{equation*} which we write as \begin{equation*} H_{\rm SS}\ {\rm Term\ 3\ coefficient} = \frac{ \omega_{\cos\theta} }{ 2 e^{2 \tilde{\mu}} e^{\tilde{\mu}} e^{\nu} \tilde{B} \left( Q + \sqrt{Q} \right) }. \end{equation*} We define $\omega_{\cos\theta}$ in [this cell](#omegacostheta), $e^{\tilde{\mu}}$ in [this cell](#exp2mu), $e^{\nu}$ in [this cell](#exp2nu), and $\tilde{B}$ in [this cell](#btilde), $Q$ in [this cell](#q). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm3coeff = omegacostheta/(2*exp2mu*expmu*expnu*Btilde*(Q + sp.sqrt(Q))) ``` <a id='hssterm3'></a> ## Step 5.e: $H_{\rm SS}$ Term 3 \[Back to [top](#toc)\] $$\label{hssterm3}$$ Combining [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.19) with our definition of $H_{\rm SS}$ Term 3 in [this cell](#hss), we have \begin{align*} H_{\rm SS}\ {\rm Term\ 3} &= -e^{2 \left( \tilde{\mu} + \nu \right)} \left( \hat{\bf p} \cdot {\bf \xi} r \right)^{2} \left( {\bf S} \cdot {\bf n} \right) + e^{\tilde{\mu} +\nu} \tilde{J} \left( {\bf \hat{p}} \cdot {\bf n} \right) \left( {\bf \hat{p}} \cdot {\bf \xi} r \right) \left( {\bf S} \cdot {\bf \xi} \right) \tilde{B} \\ &\ \ \ \ \ + \left[ \left( {\bf S} \cdot {\bf n} \right) \left( {\bf \hat{p}} \cdot {\bf v} r \right)^{2} - \tilde{J} \left( {\bf \hat{p}} \cdot {\bf n} \right) \left( {\bf S} \cdot {\bf v} \right) \left( {\bf \hat{p}} \cdot {\bf v} r\right) - e^{2 \tilde{\mu}} \left( 1 + \sqrt{Q} \right) \sqrt{Q} \left( {\bf S} \cdot {\bf n} \right) \xi^{2} \right] \tilde{B}^{2} \end{align*} which we write as \begin{align*} H_{\rm SS}\ {\rm Term\ 3} &= e^{\tilde{\mu}} e^{\nu} \left( \hat{\bf p} \cdot {\bf \xi} r \right) \left[ \tilde{J} \left( {\bf \hat{p}} \cdot {\bf n} \right) \left( {\bf S} \cdot {\bf \xi} \right) \tilde{B} - e^{\tilde{\mu}} e^{\nu} \left( \hat{\bf p} \cdot {\bf \xi} r \right) \left( {\bf S} \cdot {\bf n} \right) \right] \\ &\ \ \ \ \ + \left\{ \left( {\bf \hat{p}} \cdot {\bf v} r \right) \left[ \left( {\bf S} \cdot {\bf n} \right) \left( {\bf \hat{p}} \cdot {\bf v} r \right) - \tilde{J} \left( {\bf \hat{p}} \cdot {\bf n} \right) \left( {\bf S} \cdot {\bf v} \right) \right] - e^{2 \tilde{\mu}} \left( \sqrt{Q} + Q \right) \left( {\bf S} \cdot {\bf n} \right) \xi^{2} \right\} \tilde{B}^{2} \end{align*} We define $e^{\tilde{\mu}}$ in [this cell](#exp2mu), $e^{\nu}$ in [this cell](#exp2nu), $\hat{\bf p} \cdot \boldsymbol{\xi} r$ in [this cell](#pdotxir), $\tilde{J}$ in [this cell](#jtilde), $\hat{\bf p} \cdot {\bf n}$ in [this cell](#pdotn), ${\bf S} \cdot \boldsymbol{\xi}$ in [this cell](#sdotxi), $\tilde{B}$ in [this cell](#btilde), ${\bf S} \cdot {\bf n}$ in [this cell](#sdotn), $\hat{\bf p} \cdot {\bf v} r$ in [this cell](#pdotvr), ${\bf S} \cdot {\bf v}$ in [this cell](#sdotv), $Q$ in [this cell](#q), and $\xi^{2}$ in [this cell](#sin2theta). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm3 = expmu*expnu*pdotxir*(Jtilde*pdotn*Sdotxi*Btilde - expmu*expnu*pdotxir*Sdotn) + (pdotvr*(Sdotn*pdotvr - Jtilde*pdotn*Sdotv) - exp2mu*(sp.sqrt(Q) + Q)*Sdotn*xisq)*Btilde*Btilde ``` <a id='hnsterms'></a> # Step 6: $H_{\rm NS}$ Terms \[Back to [top](#toc)\] $$\label{hnsterms}$$ We collect here the terms in $H_{\rm NS}$ (defined in [this cell](#hns)). <a id='betapsum'></a> ## Step 6.a: $\beta p$ sum \[Back to [top](#toc)\] $$\label{betapsum}$$ We defined the term $\beta p$ sum in [this cell](#hns) as \begin{equation*} \beta p\ {\rm sum} = \beta^{i} p_{i}. \end{equation*} From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.45), we have \begin{equation*} \beta^{i} = \frac{ g^{ti} }{ g^{tt} }, \end{equation*} but from [BB2010](https://arxiv.org/abs/0912.3517) Equations (5.36) we see that $g^{tr} = g^{t \theta} = 0$. Thus only $\beta^{\phi}$ is nonzero. Combining [BB2010](https://arxiv.org/abs/0912.3517) Equations (5.45), (5.36e), and (5.36a), we find \begin{equation*} \beta^{\phi} = \frac{ -\frac{ \tilde{\omega}_{\rm fd} }{ \Delta_{t} \Sigma } }{ -\frac{ \Lambda_{t} }{ \Delta_{t} \Sigma } } = \frac{ \tilde{\omega}_{\rm fd} }{ \Lambda_{t} } \end{equation*} Therefore \begin{equation*} \beta^{i} p_{i} = \frac{ \tilde{\omega}_{\rm fd} }{ \Lambda_{t} } p_{\phi}. \end{equation*} We define $\tilde{\omega}_{\rm fd}$ in [this cell](#omegatilde), $\Lambda_{t}$ in [this cell](#lambdat), and $p_{\phi}$ in [this cell](#pphi). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt betapsum = omegatilde*pphi/Lambdat ``` <a id='alpha'></a> ## Step 6.b: $\alpha$ \[Back to [top](#toc)\] $$\label{alpha}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.44), we have \begin{equation*} \alpha = \frac{ 1 }{ \sqrt{ -g^{tt}} }, \end{equation*} and from [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.36a) we have \begin{equation*} g^{tt} = -\frac{ \Lambda_{t} }{ \Delta_{t} \Sigma }. \end{equation*} Therefore \begin{equation*} \alpha = \sqrt{ \frac{ \Delta_{t} \Sigma }{ \Lambda_{t} } }. \end{equation*} We define $\Delta_{t}$ in [this cell](#deltat), $\Sigma$ in [this cell](#usigma), and $\Lambda_{t}$ in [this cell](#lambdat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt alpha = sp.sqrt(Deltat*Sigma/Lambdat) ``` <a id='hnsradicand'></a> ## Step 6.c: $H_{\rm NS}$ radicand \[Back to [top](#toc)\] $$\label{hnsradicand}$$ Recall that we defined $H_{\rm NS}$ radicand in [this cell](#hns) as \begin{equation*} H_{\rm NS}\ {\rm radicand} = \mu^{2} + \underbrace{\gamma^{ij} p_{i} p_{j}}_{\gamma p\ \rm sum} + {\cal Q}_{4} \end{equation*} We define $\mu$ in [this cell](#mu), $\gamma p$ sum in [this cell](#gammappsum), and ${\cal Q}_{4}$ in [this cell](#q4). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hnsradicand = 1 + gammappsum + Q4 ``` <a id='gammappsum'></a> ### Step 6.c.i: $\gamma^{ij} p_{i} p_{j}$ \[Back to [top](#toc)\] $$\label{gammappsum}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.46), we have \begin{equation*} \gamma^{ij} = g^{ij} - \frac{ g^{ti} g^{tj} }{ g^{tt} }. \end{equation*} Combining this result with [BB2010](https://arxiv.org/abs/0912.3517) Equations 5.36, we have \begin{equation*} \gamma^{r\theta} = \gamma^{r\phi} = \gamma^{\theta r} = \gamma^{\theta\phi} = \gamma^{\phi r} = \gamma^{\phi\theta} = 0 \end{equation*} and \begin{align*} \gamma^{rr} &= g^{rr} = \frac{ \Delta_{r} }{ \Sigma } \\ \gamma^{\theta\theta} &= g^{\theta\theta} = \frac{ 1 }{ \Sigma } \\ \gamma^{\phi\phi} &= \frac{ \Sigma }{ \Lambda_{t} \sin^{2} \theta }. \end{align*} Therefore \begin{align*} \gamma^{ij} p_{i} p_{j} &= \gamma^{rr} p_{r} p_{r} + \gamma^{\theta\theta} p_{\theta} p_{\theta} + \gamma^{\phi\phi} p_{\phi} p_{\phi} \\ &= \frac{ \Delta_{r} }{ \Sigma } p_{r}^{2} + \frac{ 1 }{ \Sigma } p_{\theta}^{2} + \frac{ \Sigma }{ \Lambda_{t} \sin^{2} \theta } p_{\phi}^{2}. \end{align*} Converting Boyer-Lindquist coordinates to tortoise coordinates (the transformation for which is found in the Appendix of [P2010](https://arxiv.org/abs/0912.3466v2)), we have \begin{align*} p_{r} &= \hat{\bf p} \cdot {\bf n} \\ p_{\theta} &= \hat{\bf p} \cdot {\bf v} \frac{ r }{ \sin \theta } \\ p_{\phi} &= \hat{\bf p} \cdot \boldsymbol{\xi} r. \end{align*} Therefore \begin{equation*} \gamma^{ij} p_{i} p_{j} = \frac{ \Delta_{r} }{ \Sigma } \left( \hat{\bf p} \cdot {\bf n} \right)^{2} + \Sigma^{-1} \left( \hat{\bf p} \cdot {\bf v} \frac{ r }{ \sin \theta } \right)^{2} + \frac{ \Sigma }{ \Lambda_{t} \sin^{2} \theta } \left( \hat{\bf p} \cdot \boldsymbol{\xi} r \right)^{2}. \end{equation*} We define $\Delta_{r}$ in [this cell](#deltar), $\Sigma$ in [this cell](#sigma), $\hat{\bf p} \cdot {\bf n}$ in [this cell](#pdotn), $\hat{\bf p} \cdot {\bf v} r$ in [this cell](#pdotvr), $\sin^{2} \theta$ in [this cell](#sin2theta), $\Lambda_{t}$ in [this cell](#lambdat), and $\hat{\bf p} \cdot \boldsymbol{\xi} r$ in [this cell](#pdotxir). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt gammappsum = Deltar/Sigma*pdotn*pdotn + 1/Sigma*pdotvr*pdotvr/sin2theta + Sigma/Lambdat/sin2theta*pdotxir*pdotxir ``` <a id='q4'></a> ### Step 6.c.ii: ${\cal Q}_{4}$ \[Back to [top](#toc)\] $$\label{q4}$$ From [T2012](https://arxiv.org/abs/1202.0790) Equation (15), \begin{equation*} {\cal Q}_{4} \propto \frac{ p_{r^{*}}^{4} }{ r^{2} } \left( r^{2} + \chi_{\rm Kerr}^{2} \right)^{4}. \end{equation*} We denote $p_{r^{*}}$ by prT. Converting from tortoise coordinates to physical coordinates(the transformation for which is found in the Appendix of [P2010](https://arxiv.org/abs/0912.3466v2)), we find \begin{equation*} {\cal Q}_{4} = \frac{ prT^{4} }{ r^{2} } z_{3} \end{equation*} where $z_{3}$ is found in [D2000](https://arxiv.org/abs/gr-qc/0005034) Equation (4.34): \begin{equation*} z_{3} = 2 \left( 4 - 3 \nu \right) \nu. \end{equation*} In the notation of [BB2010](https://arxiv.org/abs/0912.3517), $\nu = \eta$ (see discussion after [T2012](https://arxiv.org/abs/1202.0790) Equation (2)). Thus \begin{equation*} {\cal Q}_{4} = 2 prT^{4} u^{2} \left( 4 - 3 \eta \right) \eta. \end{equation*} We define prT in [this cell](#prt), $u$ in [this cell](#u), and $\eta$ in [this cell](#eta) below. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Q4 = 2*prT*prT*prT*prT*u*u*(4 - 3*eta)*eta ``` <a id='hdterms'></a> # Step 7: The $H_{\rm D}$ Terms \[Back to [top](#toc)\] $$\label{hdterms}$$ Recall we defined $H_{\rm D}$ in [this cell](#hd) as \begin{equation*} H_{\rm D} = H_{\rm D}\ {\rm coeffecient} * H_{\rm D}\ {\rm sum}. \end{equation*} In this step we break down each of $H_{\rm D}$ coefficient (defined in [this cell](#hdcoeff)) and $H_{\rm D}$ sum (defined in [this cell](#hdsum)). <a id='hdcoeff'></a> ## Step 7.a: $H_{\rm D}$ Coefficient \[Back to [top](#toc)\] $$\label{hdcoeff}$$ From our definition of $H_{\rm D}$ in [this cell](#hd), we have \begin{equation*} H_{\rm D}\ {\rm coefficient} = \frac{ \mu }{ 2 M r^{3} }, \end{equation*} and recalling the definition of [$\eta$](#eta) we'll write \begin{equation*} H_{\rm D}\ {\rm coefficient} = \frac{ \eta }{ 2 r^{3} }. \end{equation*} We define $\eta$ in [this cell](#eta) and $r$ in [this cell](#r). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hdcoeff = sp.Rational(1,2)/(r*r*r) ``` <a id='hdsum'></a> ## Step 7.b: $H_{\rm D}$ Sum \[Back to [top](#toc)\] $$\label{hdsum}$$ From our definition of $H_{\rm D}$ in [this cell](#hd), we have \begin{align*} H_{\rm D}\ {\rm sum} &= \left( \delta^{ij} - 3 n^{i} n^{j} \right) S^{*}_{i} S^{*}_{j} \\ &= \underbrace{\delta^{ij} S^{*}_{i} S^{*}_{j}}_{\rm Term\ 1} - \underbrace{3 n^{i} n^{j} S^{*}_{i} S^{*}_{j}}_{\rm Term\ 2}. \end{align*} We compute $H_{\rm D}$ Term 1 in [this cell](#hdsumterm1) and $H_{\rm D}$ Term 2 in [this cell](#hdsumterm2). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hdsum = HdsumTerm1 - HdsumTerm2 ``` <a id='hdsumterm1'></a> ### Step 7.b.i: $H_{\rm D}$ Sum Term 1 \[Back to [top](#toc)\] $$\label{hdsumterm1}$$ From our definition of $H_{\rm D}$ sum Term 1 in [this cell](#hdsum), we have \begin{equation*} H_{\rm D}\ {\rm sum\ Term\ 1} = \delta^{ij} S^{*}_{i} S^{*}_{j} \end{equation*} where $\delta^{ij}$ is the Kronecker delta: \begin{equation*} \delta_{ij} = \left\{ \begin{array}{cc} 0, & i \not= j \\ 1, & i = j. \end{array} \right. \end{equation*} Thus we have \begin{equation*} H_{\rm D}\ {\rm sum\ Term\ 1} = S^{*}_{1} S^{*}_{1} + S^{*}_{2} S^{*}_{2} + S^{*}_{3} S^{*}_{3} \end{equation*} We define ${\bf S}^{*}$ in [this cell](#hreal_spin_combos). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HdsumTerm1 = Sstar1*Sstar1 + Sstar2*Sstar2 + Sstar3*Sstar3 ``` <a id='hdsumterm2'></a> ### Step 7.b.ii: $H_{\rm D}$ Sum Term 2 \[Back to [top](#toc)\] $$\label{hdsumterm2}$$ From our definition of $H_{\rm D}$ sum Term 2 in [this cell](#hdsum), we have \begin{align*} H_{\rm D}\ {\rm sum\ Term\ 2} &= 3 n^{i} n^{j} S^{*}_{i} S^{*}_{j} \\ &= 3 \left( {\bf S}^{*} \cdot {\bf n} \right)^{2} \\ \end{align*} We define ${\bf S}^{*} \cdot {\bf n}$ in [this cell](#sstardotn). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HdsumTerm2 = 3*Sstardotn*Sstardotn ``` <a id='dotproducts'></a> # Step 8: Common Dot Products \[Back to [top](#toc)\] $$\label{dotproducts}$$ What follows are definitions of many common dot products. <a id='sdotskerr'></a> ## Step 8.a: ${\bf S} \cdot {\bf S}_{\rm Kerr}$ \[Back to [top](#toc)\] $$\label{sdotskerr}$$ We have \begin{equation*} {\bf S} \cdot {\bf S}_{\rm Kerr} = S^{1} S^{1}_{\rm Kerr} + S^{2} S^{2}_{\rm Kerr} + S^{3} S^{3}_{\rm Kerr}. \end{equation*} We define ${\bf S}_{\rm Kerr}$ in [this cell](#skerr). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt SdotSkerr = S1*Skerr1 + S2*Skerr2 + S3*Skerr3 ``` <a id='sdotxi'></a> ## Step 8.b: ${\bf S} \cdot \boldsymbol{\xi}$ \[Back to [top](#toc)\] $$\label{sdotxi}$$ We have \begin{equation*} {\bf S} \cdot \boldsymbol{\xi} = S^{1} \xi^{1} + S^{2} \xi^{2} + S^{3} \xi^{3} \end{equation*} We define $\xi$ in [this cell](#xi). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sdotxi = S1*xi1 + S2*xi2 + S3*xi3 ``` <a id='sdotv'></a> ## Step 8.c: ${\bf S} \cdot {\bf v}$ \[Back to [top](#toc)\] $$\label{sdotv}$$ We have \begin{equation*} {\bf S} \cdot {\bf v} = S^{1} v^{1} + S^{2} v^{2} + S^{3} v^{3}. \end{equation*} We define ${\bf v}$ in [this cell](#v). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sdotv = S1*v1 + S2*v2 + S3*v3 ``` <a id='sdotn'></a> ## Step 8.d: ${\bf S} \cdot {\bf n}$ \[Back to [top](#toc)\] $$\label{sdotn}$$ We have \begin{equation*} {\bf S} \cdot {\bf n} = S^{1} n^{1} + S^{2} n^{2} + S^{3} n^{3}. \end{equation*} We define ${\bf n}$ in [this cell](#n). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sdotn = S1*n1 + S2*n2 + S3*n3 ``` <a id='sdotskerrhat'></a> ## Step 8.e: ${\bf S} \cdot \hat{\bf S}_{\rm Kerr}$ \[Back to [top](#toc)\] $$\label{sdotskerrhat}$$ We have \begin{equation*} {\bf S} \cdot \hat{\bf S}_{\rm Kerr} = S^{1} \hat{S}_{\rm Kerr}^{1} + S^{2} \hat{S}_{\rm Kerr}^{2} + S^{3} \hat{S}_{\rm Kerr}^{3}. \end{equation*} We define $\hat{\bf S}_{\rm Kerr}$ in [this cell](#skerrhat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt SdotSkerrhat = S1*Skerrhat1 + S2*Skerrhat2 + S3*Skerrhat3 ``` <a id='sstardotn'></a> ## Step 8.f: ${\bf S}^{*} \cdot {\bf n}$ \[Back to [top](#toc)\] $$\label{sstardotn}$$ We have \begin{equation*} {\bf S}^{*} \cdot {\bf n} = {\bf S}^{*}_{1} n_{1} + {\bf S}^{*}_{2} n_{2} + {\bf S}^{*}_{3} n_{3}. \end{equation*} We define ${\bf S}^{*}$ in [this cell](#sstar) and ${\bf n}$ in [this cell](#n). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sstardotn = Sstar1*n1 + Sstar2*n2 + Sstar3*n3 ``` <a id='hreal_spin_combos'></a> # Step 9: $H_{\rm real}$ Spin Combination ${\bf S}^{*}$ \[Back to [top](#toc)\] $$\label{hreal_spin_combos}$$ We collect here terms defining and containing ${\bf S}^{*}$. <a id='sstar'></a> ## Step 9.a: ${\bf S}^{*}$ \[Back to [top](#toc)\] $$\label{sstar}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.63): \begin{equation*} {\bf S}^{*} = \boldsymbol{\sigma}^{*} + \frac{ 1 }{ c^{2} } \boldsymbol{\Delta}_{\sigma^{*}}. \end{equation*} We define $\boldsymbol{\sigma}^{*}$ in [this cell](#sigmastar) and $\boldsymbol{\Delta}_{\sigma^{*}}$ in [this cell](#deltasigmastar). Please note: after normalization, ${\bf S} = {\bf S}^{*}$. See [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.26). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt S1 = Sstar1 S2 = Sstar2 S3 = Sstar3 Sstar1 = sigmastar1 + Deltasigmastar1 Sstar2 = sigmastar2 + Deltasigmastar2 Sstar3 = sigmastar3 + Deltasigmastar3 ``` <a id='deltasigmastar'></a> ## Step 9.b: $\boldsymbol{\Delta}_{\sigma^{*}}$ \[Back to [top](#toc)\] $$\label{deltasigmastar}$$ We can write $\boldsymbol{\Delta}_{\sigma^{*}}$ as \begin{equation*} \boldsymbol{\Delta}_{\sigma^{*}} = \boldsymbol{\sigma}^{*} \left( \boldsymbol{\sigma}^{*}\ {\rm coefficient} \right) + \boldsymbol{\sigma} \left( \boldsymbol{\sigma}\ {\rm coefficient} \right) \end{equation*} For further dissection, see $\boldsymbol{\sigma}^{*}$ in [this cell](#sigmastar), $\boldsymbol{\sigma}^{*}$ coefficient in [this cell](#sigmastarcoeff), $\boldsymbol{\sigma}$ in [this cell](#sigma), and $\boldsymbol{\sigma}$ coefficient in [this cell](#sigmacoeff). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltasigmastar1 = sigmastar1*sigmastarcoeff + sigma1*sigmacoeff Deltasigmastar2 = sigmastar2*sigmastarcoeff + sigma2*sigmacoeff Deltasigmastar3 = sigmastar3*sigmastarcoeff + sigma3*sigmacoeff ``` <a id='sigmastarcoeff'></a> ## Step 9.c: $\boldsymbol{\sigma}^{*}$ coefficient \[Back to [top](#toc)\] $$\label{sigmastarcoeff}$$ We will break down $\boldsymbol{\sigma}^{*}\ {\rm coefficient}$ into three terms: \begin{equation*} \boldsymbol{\sigma}^{*}\ {\rm coefficient} = \boldsymbol{\sigma}^{*}\ {\rm coefficient\ Term\ 1} + \boldsymbol{\sigma}^{*}\ {\rm coefficient\ Term\ 2} + \boldsymbol{\sigma}^{*}\ {\rm coefficient\ Term\ 3} \end{equation*} We compute $\boldsymbol{\sigma}^{*}$ coefficient Term 1 in [this cell](#sigmastarcoeffterm1), $\boldsymbol{\sigma}^{*}$ coefficient Term 2 in [this cell](#sigmastarcoeffterm2), and $\boldsymbol{\sigma}^{*}$ coefficient Term 3 in [this cell](#sigmastarcoeffterm3). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastarcoeff = sigmastarcoeffTerm1 + sigmastarcoeffTerm2 + sigmastarcoeffTerm3 ``` <a id='sigmastarcoeffterm1'></a> ### Step 9.c.i: $\boldsymbol{\sigma}^{*}$ Coefficient Term 1 \[Back to [top](#toc)\] $$\label{sigmastarcoeffterm1}$$ We build this term from [BB2011](https://arxiv.org/abs/1107.2904) Equation (51) with $b_{0} = 0$ (see discussion preceeding [T2012](https://arxiv.org/abs/1202.0790) Equation (4)), where what is listed below is the coefficient on $\boldsymbol{\sigma}^{*}$: \begin{align*} \boldsymbol{\sigma}^{*}\ {\rm coefficient\ Term\ 1} &= \frac{7}{6} \eta \frac{M}{r} + \frac{1}{3} \eta \left( Q - 1 \right) - \frac{5}{2} \eta \frac{ \Delta_r }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \\ &= \frac{ \eta }{ 12 } \left( 14 \frac{ M }{ r } + 4 \left( Q - 1 \right) - 30 \frac{ \Delta_r }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \right) \end{align*} We group together and compute $Q-1$ in [this cell](#q) and $\frac{ \Delta_r }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2}$ in [this cell](#drsipn2); we define $r$ in [this cell](#r), $\eta$ in [this cell](#eta), and $M$ in [this cell](#m) below. Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastarcoeffTerm1 = eta/12*(14/r + 4*Qminus1 - 30*DrSipn2) ``` <a id='sigmastarcoeffterm2'></a> ### Step 9.c.ii: $\boldsymbol{\sigma}^{*}$ Coefficient Term 2 \[Back to [top](#toc)\] $$\label{sigmastarcoeffterm2}$$ We build this term from [BB2011](https://arxiv.org/abs/1107.2904) Equation (52) with all $b_{i} = 0$, $i \in \left\{0, 1, 2, 3\right\}$ (see discussion preceeding [T2012](https://arxiv.org/abs/1202.0790) Equation (4)), and just the coefficient on $\boldsymbol{\sigma}^{*}$. In the LALSuite code this is the variable 'sMultiplier1': \begin{align*} \boldsymbol{\sigma}^{*}\ {\rm coefficient\ Term\ 2} &= \frac{1}{36} \left( 353 \eta - 27 \eta^2 \right) \left( \frac{M}{r} \right)^{2} + \frac{5}{3} \left( 3 \eta^2 \right) \frac{ \Delta_{r}^{2} }{ \Sigma^{2} } \left( {\bf n} \cdot \hat{\bf p} \right)^{4} \\ &\ \ \ \ \ + \frac{1}{72} \left( -23 \eta -3 \eta^{2} \right) \left( Q - 1 \right)^{2} + \frac{1}{36} \left( -103 \eta + 60 \eta^{2} \right) \frac{M}{r} \left( Q - 1 \right) \\ &\ \ \ \ \ + \frac{1}{12} \left( 16 \eta - 21 \eta^{2} \right) \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \left( Q - 1 \right) + \frac{1}{12} \left( 47 \eta - 54 \eta^{2} \right) \frac{M}{r} \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \\ &= \frac{ \eta }{ 72 r^{2} } \left[ \left( 706 - 54 \eta \right) M^{2} + 360 \eta r^{2} \frac{ \Delta_{r}^{2} }{ \Sigma^{2} } \left( {\bf n} \cdot \hat{\bf p} \right)^{4} + r^{2} \left( -23 - 3 \eta \right) \left( Q - 1 \right)^{2} + \left( -206 + 120 \eta \right) M r \left( Q - 1 \right) \right. \\ &\ \ \ \ \ + \left. \left( 96 - 126 \eta \right) r^{2} \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \left( Q - 1 \right) + \left( 282 - 324 \eta \right) M r \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \right] \\ &= \frac{ \eta }{ 72 r^{2} } \left[ 706 + r \left( -206 M \left( Q - 1 \right) + 282 M \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} + r \left( Q -1 \right) \left( 96 \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} - 23 \left( Q - 1 \right) \right) \right) \right. \\ &\ \ \ \ \ + \left. \eta \left( -54 M^{2} + r \left( 120 M \left( Q -1 \right) - 324 M \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \right.\right.\right. \\ &\ \ \ \ \ + \left.\left.\left. r \left( 360 \frac{ \Delta_{r}^{2} }{ \Sigma^{2} } \left( {\bf n} \cdot \hat{\bf p} \right)^{4} + \left( Q - 1 \right) \left( -126 \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} - 3 \left( Q - 1 \right) \right) \right)\right) \right) \right] \end{align*} We define $r$ in [this cell](#r), $\eta$ in [this cell](#eta), and $M$ in [this cell](#m); we group together and define $Q - 1$ in [this cell](#q), and $\frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2}$ in [this cell](#drsipn2). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastarcoeffTerm2 = eta/(72*r*r)*(706 + r*(-206*Qminus1 + 282*DrSipn2 + r*Qminus1*(96*DrSipn2 - 23*Qminus1)) + eta*(-54 + r*(120*Qminus1 - 324*DrSipn2 + r*(360*DrSipn2*DrSipn2 + Qminus1*(-126*DrSipn2 - 3*Qminus1))))) ``` <a id='sigmastarcoeffterm3'></a> ### Step 9.c.iii: $\boldsymbol{\sigma}^{*}$ Coefficient Term 3 \[Back to [top](#toc)\] $$\label{sigmastarcoeffterm3}$$ <font color='red'>From an unidentified source,</font> \begin{equation*} \boldsymbol{\sigma}^{*}\ {\rm coefficient\ Term\ 3} \equiv 0. \end{equation*} ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastarcoeffTerm3 = 0 ``` <a id='sigmacoeff'></a> ## Step 9.d: $\boldsymbol{\sigma}$ coefficient \[Back to [top](#toc)\] $$\label{sigmacoeff}$$ We will break down $\boldsymbol{\sigma}\ {\rm coefficient}$ into three terms: \begin{equation*} \boldsymbol{\sigma}\ {\rm coefficient} = \boldsymbol{\sigma}\ {\rm coefficient\ Term\ 1} + \boldsymbol{\sigma}\ {\rm coefficient\ Term\ 2} + \boldsymbol{\sigma}\ {\rm coefficient\ Term\ 3} \end{equation*} We compute $\boldsymbol{\sigma}$ coefficient Term 1 in [this cell](#sigmacoeffterm1), $\boldsymbol{\sigma}$ coefficient Term 2 in [this cell](#sigmacoeffterm2), and $\boldsymbol{\sigma}$ coefficient Term 3 in [this cell](#sigmacoeffterm3). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmacoeff = sigmacoeffTerm1 + sigmacoeffTerm2 + sigmacoeffTerm3 ``` <a id='sigmacoeffterm1'></a> ### Step 9.d.i: $\boldsymbol{\sigma}$ Coefficient Term 1 \[Back to [top](#toc)\] $$\label{sigmacoeffterm1}$$ We build this term from [BB2011](https://arxiv.org/abs/1107.2904) Equation (51) with $a_{0} = 0$ (see discussion preceeding [T2012](https://arxiv.org/abs/1202.0790) Equation (4)), where what is listed below is the coefficient on $\boldsymbol{\sigma}$: \begin{align*} \boldsymbol{\sigma}\ {\rm coefficient\ Term\ 1} &= -\frac{2}{3} \eta \frac{ M }{ r } + \frac{1}{4} \eta \left( Q - 1 \right) - 3 \eta \frac{ \Delta_r }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \\ &= \frac{ \eta }{ 12 } \left( -8 \frac{ M }{ r } + 3 \left( Q - 1 \right) - 36 \smash[b]{\underbrace{ \frac{ \Delta_r }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} }_{\rm DrSipn2}} \vphantom{\underbrace{a}_{b}} \right) \end{align*} We define $\eta$ in [this cell](#eta), $M$ in [this cell](#m), $Q-1$ in [this cell](#q), and $\frac{ \Delta_r }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2}$ in [this cell](#drsipn2). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmacoeffTerm1 = eta/12*(-8/r + 3*Qminus1 - 36*DrSipn2) ``` <a id='sigmacoeffterm2'></a> ### Step 9.d.ii: $\boldsymbol{\sigma}$ Coefficient Term 2 \[Back to [top](#toc)\] $$\label{sigmacoeffterm2}$$ We build this term from [BB2011](https://arxiv.org/abs/1107.2904) Equation (52) with all $a_{i} = 0$, $i \in \left\{0, 1, 2, 3\right\}$ (see discussion preceeding [T2012](https://arxiv.org/abs/1202.0790) Equation (4)), and just the coefficient on $\boldsymbol{\sigma}$: \begin{align*} \boldsymbol{\sigma}\ {\rm coefficient\ Term\ 2} &= \frac{1}{9} \left( -56 \eta -21 \eta^{2} \right) \left( \frac{ M }{ r } \right)^{2} + \frac{5}{24} \left( 27 \eta^{2} \right) \frac{ \Delta_r^{2} }{ \Sigma^{2} } \left( {\bf n} \cdot \hat{\bf p} \right)^{4} \\ &\ \ \ \ \ + \frac{1}{144} \left(-45 \eta \right) \left( Q - 1 \right)^{2} + \frac{1}{36} \left( -109 \eta + 51 \eta^{2} \right) \frac{ M }{ r } \left( Q - 1 \right) \\ &\ \ \ \ \ + \frac{1}{24} \left( 6 \eta - 39\eta^{2} \right) \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \left( Q - 1 \right) + \frac{1}{24} \left( -16 \eta - 147 \eta^{2} \right) \frac{ M }{ r } \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \\ &= \frac{ \eta }{ 144 r^{2} } \left[ -896 M^{2} + r \left( -436 M \left( Q - 1 \right) - 96 M \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \right.\right. \\ &\ \ \ \ \ \left.\left. + r \left( -45 \left( Q - 1 \right)^{2} + 36 \left( Q - 1 \right) \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \right) \right) + \eta \left( -336 M^{2} + r \left( 204 M \left( Q -1 \right) - 882 M \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \right.\right.\right. \\ &\ \ \ \ \ \left.\left.\left. + r \left( 810 \frac{ \Delta_{r}^{2} }{ \Sigma^{2} } \left( {\bf n} \cdot \hat{\bf p} \right)^{4} - 234 \left( Q - 1 \right) \frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2} \right) \right) \right) \right] \end{align*} We define $\eta$ in [this cell](#eta), $M$ in [this cell](#M), $Q - 1$ in [this cell](#q), and $\frac{ \Delta_{r} }{ \Sigma } \left( {\bf n} \cdot \hat{\bf p} \right)^{2}$ in [this cell](#drsipn2). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmacoeffTerm2 = eta/(144*r*r)*(-896 + r*(-436*Qminus1 - 96*DrSipn2 + r*(-45*Qminus1*Qminus1 + 36*Qminus1*DrSipn2)) + eta*(-336 + r*(204*Qminus1 - 882*DrSipn2 + r*(810*DrSipn2*DrSipn2 - 234*Qminus1*DrSipn2)))) ``` <a id='sigmacoeffterm3'></a> ### Step 9.d.iii: $\boldsymbol{\sigma}$ Coefficient Term 3 \[Back to [top](#toc)\] $$\label{sigmacoeffterm3}$$ <font color='red'>From an unidentified source,</font> \begin{equation*} \boldsymbol{\sigma}\ {\rm coefficient\ Term\ 3} = \eta dSO u^{3}. \end{equation*} We define $\eta$ in [this cell](#eta), $u$ in [this cell](#u), and FIXME we need a reference for dSO. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmacoeffTerm3 = eta*dSO*u*u*u ``` <a id='metpotderivs'></a> # Step 10: Derivatives of the Metric Potential \[Back to [top](#toc)\] $$\label{metpotderivs}$$ We collect here terms dependent on derivatives of the metric potential (see [BB2010](https://arxiv.org/abs/0912.3517) Equations (5.47)). <a id='omegar'></a> ## Step 10.a: $\omega_{r}$ \[Back to [top](#toc)\] $$\label{omegar}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47b) we have \begin{equation*} \omega_{r} = \frac{ \Lambda_{t} \tilde{\omega}_{\rm fd}^{\prime} - \Lambda_{t}^{\prime} \tilde{\omega}_{\rm fd} }{ \Lambda_{t}^{2} }. \end{equation*} We define $\Lambda_{t}$ in [this cell](#lambdat), $\tilde{\omega}_{\rm fd}^{\prime}$ in [this cell](#omegatildeprm), $\Lambda_{t}^{\prime}$ in [this cell](#lambdatprm), and $\tilde{\omega}_{\rm fd}$ in [this cell](#omegatilde). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omegar = (Lambdat*omegatildeprm - Lambdatprm*omegatilde)/(Lambdat*Lambdat) ``` <a id='nur'></a> ## Step 10.b: $\nu_{r}$ \[Back to [top](#toc)\] $$\label{nur}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47c) we have \begin{equation*} \nu_{r} = \frac{ r }{ \Sigma } + \frac{ \varpi^{2} \left( \varpi^{2} \Delta^{\prime}_{t} - 4 r \Delta_{t} \right) }{ 2 \Lambda_{t} \Delta_{t} }. \end{equation*} We define $r$ in [this cell](#r), $\Sigma$ in [this cell](#usigma), $\varpi^{2}$ in [this cell](#w2), $\Delta_{t}^{\prime}$ in [this cell](#deltatprm), $\Delta_{t}$ in [this cell](#deltat), and $\Lambda_{t}$ in [this cell](#lambdat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt nur = r/Sigma + w2*(w2*Deltatprm - 4*r*Deltat)/(2*Lambdat*Deltat) ``` <a id='mur'></a> ## Step 10.c: $\mu_{r}$ \[Back to [top](#toc)\] $$\label{mur}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47d) we have \begin{equation*} \mu_{r} = \frac{ r }{ \Sigma } - \frac{ 1 }{ \sqrt{ \Delta_{r} } }. \end{equation*} We define $r$ in [this cell](#r), $\Sigma$ in [this cell](#usigma), and $\Delta_{r}$ in [this cell](#deltar). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt mur = r/Sigma - 1/sp.sqrt(Deltar) ``` <a id='omegacostheta'></a> ## Step 10.d: $\omega_{\cos\theta}$ \[Back to [top](#toc)\] $$\label{omegacostheta}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47f), we have \begin{equation*} \omega_{\cos\theta} = -\frac{ 2 a^{2} \cos\theta \Delta_{t} \tilde{\omega}_{\rm fd} }{ \Lambda_{t}^{2} }. \end{equation*} We define $a$ in [this cell](#a), $\cos\theta$ in [this cell](#costheta), $\Delta_{t}$ in [this cell](#deltat), $\tilde{\omega}_{\rm fd}$ in [this cell](#omegatilde), and $\Lambda_{t}$ in [this cell](#lambdat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omegacostheta = -2*a*a*costheta*Deltat*omegatilde/(Lambdat*Lambdat) ``` <a id='nucostheta'></a> ## Step 10.e: $\nu_{\cos\theta}$ \[Back to [top](#toc)\] $$\label{nucostheta}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47g) we have \begin{equation*} \nu_{\cos\theta} = \frac{ a^{2} \varpi^{2} \cos\theta \left( \varpi^{2} - \Delta_{t} \right) }{ \Lambda_{t} \Sigma }. \end{equation*} We define $a$ in [this cell](#a), $\varpi^{2}$ in [this cell](#w2), $\cos\theta$ in [this cell](#costheta), $\Delta_{t}$ in [this cell](#deltat), $\Lambda_{t}$ in [this cell](#lambdat), and $\Sigma$ in [this cell](#usigma). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt nucostheta = a*a*w2*costheta*(w2 - Deltat)/(Lambdat*Sigma) ``` <a id='mucostheta'></a> ## Step 10.f: $\mu_{\cos \theta}$ \[Back to [top](#toc)\] $$\label{mucostheta}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47h) we have \begin{equation*} \mu_{\cos \theta} = \frac{ a^{2} \cos \theta }{ \Sigma }. \end{equation*} We define $a$ in [this cell](#a), $\cos \theta$ in [this cell](#costheta), and $\Sigma$ in [this cell](#usigma) below. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt mucostheta = a*a*costheta/Sigma ``` <a id='lambdatprm'></a> ## Step 10.g: $\Lambda_{t}^{\prime}$ \[Back to [top](#toc)\] $$\label{lambdatprm}$$ From the discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equations (5.47), we know that the prime notation indicates a derivative with respect to $r$. Using the definiton of $\Lambda_{t}$ in [this cell](#lambdat), we have \begin{equation*} \Lambda_{t}^{\prime} = 4 \left( a^{2} + r^{2} \right) r - a^{2} \Delta_{t}^{\prime} \sin^{2} \theta. \end{equation*} We define $a$ in [this cell](#a), $r$ in [this cell](#r), $\Delta_{u}$ in [this cell](#deltau), and $\sin^{2}\theta$ in [this cell](#sin2theta). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Lambdatprm = 4*(a*a + r*r)*r - 2*a*a*Deltatprm*sin2theta ``` <a id='omegatildeprm'></a> ## Step 10.h: $\tilde{\omega}_{\rm fd}^{\prime}$ \[Back to [top](#toc)\] $$\label{omegatildeprm}$$ From the discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47), we know that the prime notation indicates a derivative with respect to $r$. Using the definiton of $\tilde{\omega}_{\rm fd}$ in [this cell](#omegatilde), we have \begin{equation*} \tilde{\omega}_{\rm fd}^{\prime} = 2 a M. \end{equation*} We define $a$ in [this cell](#a) and $M$ in [this cell](#m). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omegatildeprm = 2*a ``` <a id='metpots'></a> # Step 11: The Deformed and Rescaled Metric Potentials \[Back to [top](#toc)\] $$\label{metpots}$$ We collect here terms of the deformed and scaled metric potentils. See [BB2010](https://arxiv.org/abs/0912.3517) Equations (5.30)--(5.34) and (5.48)--(5.52). <a id='omega'></a> ## Step 11.a: $\omega$ \[Back to [top](#toc)\] $$\label{omega}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.31) we have \begin{equation*} \omega = \frac{ \tilde{\omega}_{\rm fd} }{ \Lambda_{t} }. \end{equation*} We define $\tilde{\omega}_{\rm fd}$ in [this cell](#omegatilde) and $\Lambda_{t}$ in [this cell](#lambdat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omega = omegatilde/Lambdat ``` <a id='exp2nu'></a> ## Step 11.b: $e^{2\nu}$ and $e^{\nu}$ \[Back to [top](#toc)\] $$\label{exp2nu}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.32), we have \begin{equation*} e^{2 \nu} = \frac{ \Delta_{t} \Sigma }{ \Lambda_t }. \end{equation*} It follows that \begin{equation*} e^{\nu} = \sqrt{ \frac{ \Delta_{t} \Sigma }{ \Lambda_t } }. \end{equation*} We define $\Delta_{t}$ in [this cell](#deltat), $\Sigma$ in [this cell](#usigma), and $\Lambda_{t}$ in [this cell](#lambdat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt expnu = sp.sqrt(exp2nu) exp2nu = Deltat*Sigma/Lambdat ``` <a id='btilde'></a> ## Step 11.c: $\tilde{B}$ \[Back to [top](#toc)\] $$\label{btilde}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.48), we have \begin{equation*} \tilde{B} = \sqrt{ \Delta_{t} }. \end{equation*} We define $\Delta_{t}$ in [this cell](#deltat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Btilde = sp.sqrt(Deltat) ``` <a id='brtilde'></a> ## Step 11.d: $\tilde{B}_{r}$ \[Back to [top](#toc)\] $$\label{brtilde}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.49), we have \begin{equation*} \tilde{B}_{r} = \frac{ \sqrt{ \Delta_{r} } \Delta_{t}^{\prime} - 2 \Delta_{t} }{ 2 \sqrt{ \Delta_{r} \Delta_{t} } }. \end{equation*} We define $\Delta_{r}$ in [this cell](#deltar), $\Delta_{t}^{\prime}$ in [this cell](#deltatprm), and $\Delta_{t}$ in [this cell](#deltat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Brtilde = (sp.sqrt(Deltar)*Deltatprm - 2*Deltat)/(2*sp.sqrt(Deltar*Deltat)) ``` <a id='exp2mu'></a> ## Step 11.e: $e^{2\tilde{\mu}}$ and $e^{\tilde{\mu}}$ \[Back to [top](#toc)\] $$\label{exp2mu}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.50), we have \begin{equation*} e^{2 \tilde{\mu}} = \Sigma. \end{equation*} It follows that \begin{equation*} e^{\tilde{\mu}} = \sqrt{ \Sigma }. \end{equation*} We define $\Sigma$ in [this cell](#usigma). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt expmu = sp.sqrt(exp2mu) exp2mu = Sigma ``` <a id='jtilde'></a> ## Step 11.f: $\tilde{J}$ \[Back to [top](#toc)\] $$\label{jtilde}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.51) we have \begin{equation*} \tilde{J} = \sqrt{ \Delta_{r} }. \end{equation*} We define $\Delta_{r}$ in [this cell](#deltar) below. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Jtilde = sp.sqrt(Deltar) ``` <a id='q'></a> ## Step 11.g: $Q$ \[Back to [top](#toc)\] $$\label{q}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.52), \begin{equation*} Q = 1 + \underbrace{ \frac{ \Delta_{r} }{ \Sigma } \left( \hat{\bf p} \cdot {\bf n} \right)^{2} }_{\rm DrSipn2} + \underbrace{ \frac{ \Sigma }{ \Lambda_t \sin^{2} \theta } }_{\rm Q\ coefficient\ 1} \left( \smash[b]{ \underbrace{ \hat{\bf p} \cdot \boldsymbol{\xi} r }_{\rm pdotxir} } \right)^{2} + \underbrace{ \frac{ 1 }{ \Sigma \sin^{2} \theta } }_{\rm Q\ coefficient\ 2} \left( \smash[b]{ \underbrace{ \hat{\bf p} \cdot {\bf v} r }_{\rm pdotvr} } \right)^{2}; \end{equation*} We group togther and compute $\frac{ \Delta_{r} }{ \Sigma } \left( \hat{\bf p} \cdot {\bf n} \right)^{2}$ in [this cell](#drsipn2), $\frac{ \Sigma }{ \Lambda_t \sin^{2} \theta }$ in [this cell](#qcoeff1), $\hat{\bf p} \cdot \boldsymbol{\xi} r$ in [this cell](#pdotxir), $\frac{ 1 }{ \Sigma \sin^{2} \theta }$ in [this cell](#qcoeff2), and $\hat{\bf p} \cdot {\bf v} r$ in [this cell](#pdotvr). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Qminus1 = Q - 1 Q = 1 + DrSipn2 + Qcoeff1*pdotxir*pdotxir + Qcoeff2*pdotvr*pdotvr ``` <a id='drsipn2'></a> ### Step 11.g.i: $\frac{ \Delta_{r} }{ \Sigma } \left( \hat{\bf p} \cdot {\bf n} \right)^{2}$ \[Back to [top](#toc)\] $$\label{drsipn2}$$ We define $\Delta_{r}$ in [this cell](#deltar), $\Sigma$ in [this cell](#usigma), and $\hat{\bf p} \cdot {\bf n}$ in [this cell](#pdotn). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt DrSipn2 = Deltar*pdotn*pdotn/Sigma ``` <a id='qcoeff1'></a> ### Step 11.g.ii: Q Coefficient 1 \[Back to [top](#toc)\] $$\label{qcoeff1}$$ We defined $Q$ coefficient 1 in [this cell](#q) as \begin{equation*} Q\ {\rm coefficient\ 1} = \frac{ \Sigma }{ \Lambda_t \sin^{2} \theta } \end{equation*} We define $\Sigma$ in [this cell](#usigma), $\Lambda_{t}$ in [this cell](#lambdat), and $\sin^{2} \theta$ in [this cell](#sin2theta). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Qcoeff1 = Sigma/(Lambdat*sin2theta) ``` <a id='qcoeff2'></a> ### Step 11.g.iii: Q Coefficient 2 \[Back to [top](#toc)\] $$\label{qcoeff2}$$ We defined $Q$ coefficient 2 in [this cell](#q) as \begin{equation*} Q\ {\rm coefficient\ 2} = \frac{ 1 }{ \Sigma \sin^{2} \theta } \end{equation*} We define $\Sigma$ in [this cell](#usigma) and $\sin^{2} \theta$ in [this cell](#sin2theta). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Qcoeff2 = 1/(Sigma*sin2theta) ``` <a id='tort'></a> # Step 12: Tortoise Terms \[Back to [top](#toc)\] $$\label{tort}$$ We collect here terms related to the conversion from Boyer-Lindquist coordinates to tortoise coordinates. Details of the converstion are given in the appendix of [P2010](https://arxiv.org/abs/0912.3466v2). <a id='pphi'></a> ## Step 12.a: $p_{\phi}$ \[Back to [top](#toc)\] $$\label{pphi}$$ From the discussion preceding [BB2010](https://arxiv.org/abs/0912.3517) Equation (3.41), the phi component of the tortoise momentum $p_{\phi}$ is given by \begin{equation*} p_{\phi} = \hat{\bf p} \cdot \boldsymbol{\xi} r. \end{equation*} We define $\hat{\bf p} \cdot \boldsymbol{\xi} r$ in [this cell](#pdotxir). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt pphi = pdotxir ``` <a id='pdotvr'></a> ## Step 12.b: $\hat{\bf p} \cdot {\bf v} r$ \[Back to [top](#toc)\] $$\label{pdotvr}$$ We have \begin{equation*} \hat{\bf p} \cdot {\bf v} r = \left( \hat{p}_{1} v_{1} + \hat{p}_{2} v_{2} + \hat{p}_{3} v_{3} \right) r \end{equation*} We define $\hat{\bf p}$ in [this cell](#hatp), ${\bf v}$ in [this cell](#v), and $r$ in [this cell](#r). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt pdotvr = (phat1*v1 + phat2*v2 + phat3*v3)*r ``` <a id='pdotn'></a> ## Step 12.c: $\hat{\bf p} \cdot {\bf n}$ \[Back to [top](#toc)\] $$\label{pdotn}$$ We have \begin{equation*} \hat{\bf p} \cdot {\bf n} = \hat{p}_{1} n_{1} + \hat{p}_{2} n_{2} + \hat{p}_{3} n_{3} \end{equation*} We define $\hat{\bf p}$ in [this cell](#hatp) and ${\bf n}$ in [this cell](#n). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt pdotn = phat1*n1 + phat2*n2 + phat3*n3 ``` <a id='pdotxir'></a> ## Step 12.d: $\hat{\bf p} \cdot \boldsymbol{\xi} r$ \[Back to [top](#toc)\] $$\label{pdotxir}$$ We have \begin{equation*} \hat{\bf p} \cdot \boldsymbol{\xi} r = \left( \hat{p}_{1} \xi_{1} + \hat{p}_{2} \xi_{2} + \hat{p}_{3} \xi_{3} \right) r \end{equation*} We define $\hat{\bf p}$ in [this cell](#hatp), $\boldsymbol{\xi}$ in [this cell](#xi), and $r$ in [this cell](#r). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt pdotxir = (phat1*xi1 + phat2*xi2 + phat3*xi3)*r ``` <a id='hatp'></a> ## Step 12.e: $\hat{\bf p}$ \[Back to [top](#toc)\] $$\label{hatp}$$ From the discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (3.41), we have $\hat{\bf p} = {\bf p}/m$ where $m$ is the mass of a nonspinning test particle and ${\bf p}$ is *conjugate* momentum. Following Lines 319--321 of LALSimIMRSpinEOBHamiltonianPrec.c, we convert the Boyer-Lindquist momentum ${\bf p}$ to the tortoise momentum (see the appendix of [P2010](https://arxiv.org/abs/0912.3466v2)) via \begin{align*} \hat{\bf p} = {\bf p} + {\rm prT} \left( 1 - \frac{1}{\rm csi1} \right) {\bf n} \end{align*} We define prT in [this cell](#prt), csi1 in [this cell](#csi1), and ${\bf n}$ in [this cell](#n). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt phat1 = p1 + prT*(1 - 1/csi1)*n1 phat2 = p2 + prT*(1 - 1/csi1)*n2 phat3 = p3 + prT*(1 - 1/csi1)*n3 ``` <a id='prt'></a> ## Step 12.f: prT \[Back to [top](#toc)\] $$\label{prt}$$ The first component of the momenum vector, after conversion to tortoise coordinates (see the Appendix of [P2010](https://arxiv.org/abs/0912.3466v2)), is \begin{align*} {\rm prT} = {\rm csi2}\left( p_{1} n_{1} + p_{2} n_{2} + p_{3} n_{3} \right) \end{align*} We define csi2 in [this cell](#csi2) and ${\bf n}$ in [this cell](#n). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt prT = csi2*(p1*n1 + p2*n2 + p3*n3) ``` <a id='csi2'></a> ## Step 12.g: csi2 \[Back to [top](#toc)\] $$\label{csi2}$$ From the transformation to tortoise coordinates in the Appendix of [P2010](https://arxiv.org/abs/0912.3466v2), \begin{equation*} {\rm csi2} = 1 + \left( \frac{1}{2} - \frac{1}{2}{\rm sign}\left( \frac{3}{2} - \tau \right) \right) \left( {\rm csi} - 1 \right) \end{equation*} We define csi in [this cell](#csi); $\tau$ is a tortoise coordinate ($\tau \in \left\{ 0, 1 ,2 \right\}$). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt csi2 = 1 + (sp.Rational(1,2) - sp.Rational(1,2)*sp.sign(sp.Rational(3,2) - tortoise))*(csi - 1) ``` <a id='csi1'></a> ## Step 12.h: csi1 \[Back to [top](#toc)\] $$\label{csi1}$$ From the transformation to tortoise coordinates in the Appendix of [P2010](https://arxiv.org/abs/0912.3466v2), \begin{equation*} {\rm csi1} = 1 + \left( 1 - \left\lvert 1 - \tau \right\rvert \right) \left( {\rm csi} - 1 \right) \end{equation*} We define csi in [this cell](#csi); $\tau$ is a tortoise coordinate ($\tau \in \left\{ 0, 1 ,2 \right\}$). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt csi1 = 1 + (1 - sp.abs(1-tortoise))*(csi - 1) ``` <a id='csi'></a> ## Step 12.i: csi \[Back to [top](#toc)\] $$\label{csi}$$ From the transformation to tortoise coordinates in the Appendix of [P2010](https://arxiv.org/abs/0912.3466v2), \begin{equation*} {\rm csi} = \frac{ \sqrt{ \Delta_{t} \Delta_{r} } }{ \varpi^{2} }. \end{equation*} We define $\Delta_{t}$ in [this cell](#deltat), $\Delta_{r}$ in [this cell](#deltar), and $\varpi^{2}$ in [this cell](#w2). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt csi = sp.sqrt(Deltar*Deltat)/w2 ``` <a id='metric'></a> # Step 13: Metric Terms \[Back to [top](#toc)\] $$\label{metric}$$ We collect here terms used to define the deformed Kerr metric. See [BB2010](https://arxiv.org/abs/0912.3517) Equations (5.38)--(5.40) and (5.71)--(5.75). <a id='lambdat'></a> ## Step 13.a: $\Lambda_{t}$ \[Back to [top](#toc)\] $$\label{lambdat}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.39), \begin{equation*} \Lambda_{t} = \varpi^{4} - a^{2} \Delta_{t} \sin^{2} \theta. \end{equation*} We define $\varpi^{2}$ in [this cell](#w2), $a$ in [this cell](#a), $\Delta_{t}$ in [this cell](#deltat), and $\sin^{2}\theta$ in [this cell](#sin2theta). <font color='red'>Note that in the LALSuite code (see LALSimIMRSpinEOBHamiltonianPrec.c line 289) we take the absolute value.</font> ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Lambdat = w2*w2 - a*a*Deltat*sin2theta ``` <a id='deltar'></a> ## Step 13.b: $\Delta_{r}$ \[Back to [top](#toc)\] $$\label{deltar}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.38), \begin{equation*} \Delta_{r} = \Delta_{t} D^{-1}. \end{equation*} We define $\Delta_{t}$ in [this cell](#deltat) and $D^{-1}$ in [this cell](#dinv). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltar = Deltat*Dinv ``` <a id='deltat'></a> ## Step 13.c: $\Delta_{t}$ \[Back to [top](#toc)\] $$\label{deltat}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.71), we have \begin{equation*} \Delta_{t} = r^{2} \Delta_{u}. \end{equation*} We define $\Delta_{u}$ in [this cell](#deltau) and $r$ in [this cell](#r). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltat = r*r*Deltau ``` <a id='deltatprm'></a> ## Step 13.d: $\Delta_{t}^{\prime}$ \[Back to [top](#toc)\] $$\label{deltatprm}$$ From the discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47), we know that the prime notation indicates a derivative with respect to $r$. Using the definition of [$\Delta_{t}$](#deltat), we have \begin{equation*} \Delta_{t}^{\prime} = 2 r \Delta_{u} + r^{2} \Delta_{u}^{\prime}. \end{equation*} We define $\Delta_{u}$ in [this cell](#deltau) and $r$ in [this cell](#r). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltatprm = 2*r*Deltau + r*r*Deltauprm ``` <a id='deltau'></a> ## Step 13.e: $\Delta_{u}$ \[Back to [top](#toc)\] $$\label{deltau}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.73), we have \begin{equation*} \Delta_u = \bar{\Delta}_{u} \left[ \smash[b]{\underbrace{ 1 + \eta \Delta_{0} + \eta \log \left( 1 + {\rm logarg} \right) }_{\Delta_{u}\ {\rm calibration\ term}}} \vphantom{\underbrace{1}_{n}} \right] \end{equation*} We compute $\bar{\Delta}_{u}$ in [this cell](#deltaubar) and $\Delta_{u}$ calibration term and logarg in [this cell](#deltaucalib). <font color='red'>Note that we take the absolute value of $\Delta_{u}$ in the LALSuite code (see LALSimIMRSpinEOBHamiltonianPrec.c line 279). Also note that there are two additional terms in the LALSuite code whose origin is unclear. The LIGO doc "Prescriptions for SEOBNRv2" ("Changes to SEOBNRv1") cites PRD 89 (2014) 061502, which cites BB2011, which does not include any information on these additional terms (as far as I can tell).</font> From the discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47), we know that primes denote derivatives with respect to $r$. We have \begin{equation*} \Delta_u = \bar{\Delta}^{\prime}_{u} \left( \Delta_{u}\ {\rm calibration\ term} \right) + \bar{\Delta}_{u} \left( \Delta_{u}\ {\rm calibration\ term} \right)^{\prime} \end{equation*} ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltauprm = Deltaubarprm*Deltaucalib + Deltaubar*Deltaucalibprm Deltau = Deltaubar*Deltaucalib ``` <a id='deltaubar'></a> ### Step 13.e.i: $\bar{\Delta}_{u}$ \[Back to [top](#toc)\] $$\label{deltaubar}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.75), we have \begin{equation*} \bar{\Delta}_u = \frac{ a^{2} u^{2} }{ M^{2} } + \frac{ 1 }{ \eta K - 1 } \left( 2 u + \frac{ 1 }{ \eta K - 1 } \right). \end{equation*} We define $a$ in [this cell](#a), $u$ in [this cell](#u), $M$ in [this cell](#m), $\eta$ in [this cell](#eta), and $K$ in [this cell](#k). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. From the discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47), we know that primes denote derivatives with respect to $r$. We have \begin{equation*} \bar{\Delta}^{\prime}_u = \frac{ -2 a^{2} u^{3} }{ M^{2} } - \frac{ 2 u^{2} }{ \eta K - 1 }. \end{equation*} ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltaubarprm = -2*a*a*u*u*u - 2*u*u/(etaKminus1) Deltaubar = a*a*u*u + (2*u + 1/etaKminus1)/etaKminus1 ``` <a id='deltaucalib'></a> ### Step 13.e.ii: $\Delta_{u}$ Calibration Term \[Back to [top](#toc)\] $$\label{deltaucalib}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.73), we have \begin{align*} \Delta_u\ {\rm calibration\ term} &= 1 + \eta \Delta_{0} + \eta \log \left( 1 + \Delta_{1} u + \Delta_{2} u^{2} + \Delta_{3} u^{3} + \Delta_{4} u^{4} \right) \\ &= 1 + \eta \left[ \Delta_{0} + \log \left( 1 + \Delta_{1} u + \Delta_{2} u^{2} + \Delta_{3} u^{3} + \Delta_{4} u^{4} \right) \right]. \end{align*} In [Taracchini, Buonanno, et. al (2014)](https://arxiv.org/pdf/1311.2544.pdf) Equation (2), an additional term is added but is only defined in the LIGO document [Changes to SEOBNRv1](https://dcc.ligo.org/DocDB/0114/T1400476/003/ChangesToSEOBNRv1.pdf) (see the subsection "New 4PN term in the radial potential $\Delta_{u}$" under the "Hamiltonian" section). We then have \begin{equation*} \Delta_u\ {\rm calibration\ term} = 1 + \eta \left[ \Delta_{0} + \log \left( 1 + \Delta_{1} u + \Delta_{2} u^{2} + \Delta_{3} u^{3} + \Delta_{4} u^{4} + \Delta_{5} u^{5} \right) \right]. \end{equation*} In the LALSuite code itself (see LALSimIMRSpinEOBHamiltonianPrec.c line 274 on Git commit a70b43d), there's one more term, for which documentation is elusive. That bring us to \begin{equation*} \Delta_u\ {\rm calibration\ term} = 1 + \eta \left[ \Delta_{0} + \log \left( 1 + \underbrace{ \Delta_{1} u + \Delta_{2} u^{2} + \Delta_{3} u^{3} + \Delta_{4} u^{4} + \Delta_{5} u^{5} + \Delta_{5\ell} u^{5} \ln\left(u\right) }_{ \rm logarg } \right) \right]. \end{equation*} Note our notation for logarg. We define $u$ in [this cell](#u), $\eta$ in [this cell](#eta), and the calibration coefficients $\Delta_{i}$, $i \in \left\{0, 1, 2, 3, 4\right\}$, in [this cell](#calib_coeffs). <font color='red'>We need documentation for the final term. As coded in LALSuite, there exits an absolute value inside the logarithm. Why? Is it really safe and/or necessary?</font> From the discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.47), we know that primes denote derivatives with respect to $r$. We have \begin{equation*} \left( \Delta_u\ {\rm calibration\ term} \right)^{\prime} = \frac{ -\eta u^{2} \left( \Delta_{1} + 2 \Delta_{2} u + 3 \Delta_{3} u^{2} + 4 \Delta_{4} u^{3} + 5 \Delta_{5} u^{4} + 5 \Delta_{5\ell} u^{4} \ln\left( u \right) + \Delta_{5\ell} u^{5} u^{-1} \right) }{ 1 + {\rm logarg} }. \end{equation*} ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltaucalibprm = -eta*u*u*(Delta1 + u*(2*Delta2 + u*(3*Delta3 + u*(4*Delta4 + u*(5*(Delta5 + Delta5l*sp.log(u)))))))/(1 + logarg) Deltaucalib = 1 + eta*(Delta0 + sp.log(1 + logarg)) logarg = u*(Delta1 + u*(Delta2 + u*(Delta3 + u*(Delta4 + u*(Delta5 + Delta5l*sp.log(u)))))) ``` <a id='calib_coeffs'></a> ### Step 13.e.iii: Calibration Coefficients $\Delta_{i}$, $i \in \left\{0, 1, 2, 3, 4\right\}$ \[Back to [top](#toc)\] $$\label{calib_coeffs}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equations (5.77)--(5.81), we have \begin{align*} \Delta_{0} &= K \left( \eta K - 2 \right) \\ \Delta_{1} &= -2 \left( \eta K - 1 \right) \left( K + \Delta_{0} \right) \\ \Delta_{2} &= \frac{1}{2} \Delta_{1} \left( -4 \eta K + \Delta_{1} + 4 \right) - \frac{ a^{2} }{ M^{2} } \left( \eta K - 1 \right)^{2} \Delta_{0}\\ \Delta_{3} &= \frac{1}{3} \left[ -\Delta_{1}^{3} + 3 \left( \eta K - 1 \right) \Delta_{1}^{2} + 3 \Delta_{2} \Delta_{1} - 6 \left( \eta K - 1 \right) \left( -\eta K + \Delta_{2} + 1 \right) - 3 \frac{ a^{2} }{ M^{2} } \left( \eta K - 1 \right)^{2} \Delta_{1} \right] \\ &= -\frac{1}{3}\Delta_{1}^{3} + \left( \eta K - 1 \right) \Delta_{1}^{2} + \Delta_{2} \Delta_{1} - 2 \left( \eta K - 1 \right) \left( \Delta_{2}- \left( \eta K - 1 \right) \right) - \frac{ a^{2} }{ M^{2} } \left( \eta K - 1 \right)^{2} \Delta_{1} \\ \Delta_{4} &= \frac{1}{12} \left\{ 6 \frac{ a^{2} }{ M^{2} } \left( \Delta_{1}^{2} - 2 \Delta_{2} \right) \left( \eta K - 1 \right)^{2} + 3 \Delta_{1}^{4} - 8 \left( \eta K - 1 \right) \Delta_{1}^{3} - 12 \Delta_{2} \Delta_{1}^{2} + 12 \left[ 2 \left( \eta K - 1 \right) \Delta_{2} + \Delta_{3} \right] \Delta_{1} \right.\\ &\left.\ \ \ \ \ + 12 \left( \frac{94}{3} - \frac{41}{32} \pi^{2} \right) \left( \eta K - 1 \right)^{2} + 6 \left[ \Delta_{2}^{2} - 4 \Delta_{3} \left( \eta K - 1 \right) \right] \right\} \\ \Delta_{5} &= \left( \eta K - 1 \right)^{2} \left\{ \left( -\frac{4237}{60} + \frac{128}{5}\gamma + \frac{2275}{512} \pi^{2} - \frac{1}{3} a^{2} \left\{ \Delta_{1}^{3} - 3 \Delta_{1} \Delta_{2} + 3 \Delta_{3} \right\} \right.\right. \\ &\ \ \ \ \ - \frac{ \Delta_{1}^{5} - 5 \Delta_{1}^{3} \Delta_{2} + 5 \Delta_{1} \Delta_{2}^{2} + 5 \Delta_{1}^{2} \Delta_{3} - 5 \Delta_{2} \Delta_{3} - 5 \Delta_{1} \Delta_{4} }{ 5 \left( \eta K - 1 \right)^{2} } \\ &\left.\left.\ \ \ \ \ + \frac{ \Delta_{1}^{4} - 4 \Delta_{1}^{2} \Delta_{2} + 2 \Delta_{2}^{2} + 4 \Delta_{1} \Delta_{3} - 4 \Delta_{4} }{ 2\left( \eta K - 1 \right) } + \frac{256}{5} \log(2) \right) + \frac{64}{5} \log(u) \right\} \\ \Delta_{5\ell} &= \frac{64}{5} \left( \eta K - 1 \right)^{2}. \end{align*} We define $K$ in [this cell](#k), $\eta$ in [this cell](#eta), $a$ in [this cell](#a), and $M$ in [this cell](#m). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. Note that the constant $\gamma$ is the Euler-Mascheroni, and the value is taken from the [LALSuite documentation](https://lscsoft.docs.ligo.org/lalsuite/lal/group___l_a_l_constants__h.html). In the Python code we donote $\gamma$ by EMgamma. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Delta5l = etaKminus1*etaKminus1*sp.Rational(64,5) Delta5 = etaKminus1*etaKminus1*((sp.Rational(-4237,60) + sp.Rational(128,5)*EMgamma + sp.Rational(2275,512)*sp.pi*sp.pi - sp.Rational(1,3)*a*a*(Delta1*Delta1*Delta1 - 3*Delta1*Delta2 + 3*Delta3) - (Delta1*Delta1*Delta1*Delta1*Delta1 - 5*Delta1*Delta1*Delta1*Delta2 + 5*Delta1*Delta2*Delta2 + 5*Delta1*Delta1*Delta3 - 5*Delta2*Delta3 - 5*Delta1*Delta4)/(5*etaKminus1*etaKminus1) + (Delta1*Delta1*Delta1*Delta1 - 4*Delta1*Delta1*Delta2 + 2*Delta2*Delta2 + 4*Delta1*Delta3 - 4*Delta4)/(2*etaKminus1) + sp.Rational(256,5)*sp.log(2))) Delta4 = sp.Rational(1,12)*(6*a*a*(Delta1*Delta1 - 2*Delta2)*etaKminus1*etaKminus1 + 3*Delta1*Delta1*Delta1*Delta1 - 8*etaKminus1*Delta1*Delta1*Delta1 -12*Delta2*Delta1*Delta1 + 12*(2*etaKminus1*Delta2 + Delta3)*Delta1 + 12*(sp.Rational(94,3) - sp.Rational(41,32)*sp.pi*sp.pi)*etaKminus1*etaKminus1 + 6*(Delta2*Delta2 - 4*Delta3*etaKminus1)) Delta3 = -sp.Rational(1,3)*Delta1*Delta1*Delta1 + etaKminus1*Delta1*Delta1 + Delta2*Delta1 -2*etaKminus1*(Delta2 - etaKminus1) - a*a*etaKminus1*etaKminus1*Delta1 Delta2 = sp.Rational(1,2)*Delta1*(Delta1 - 4*etaKminus1) - a*a*etaKminus1*etaKminus1*Delta0 Delta1 = -2*etaKminus1*(K + Delta0) Delta0 = K*(eta*K - 2) ``` <a id='k'></a> ### Step 13.e.iv: $K$ \[Back to [top](#toc)\] $$\label{k}$$ Following the discussion in the two paragrahs after [BB2010](https://arxiv.org/abs/0912.3517) Equation (6.11), we have \begin{equation*} K = 1.4467 \left( 1 - 4 \eta \right)^{2} + 4 \left( 1 - 2 \eta \right) \eta. \end{equation*} <font color='red'>There exists a difference between $K$ as defined in BB2010 and what is coded in LALSuite. Has this value been updated?</font> The term $\eta K - 1$ is sufficiently common that we also define it: \begin{equation*} {\rm etaKminus1} = \eta K - 1. \end{equation*} We define $\eta$ in [this cell](#eta) below. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt etaKminus1 = eta*K - 1 K = c20 + c21*eta + c22*(eta*eta) + c23*(eta*eta)*eta c20 = 1.712 c21 = -1.803949138004582 c22 = -39.77229225266885 c23 = 103.16588921239249 ``` <a id='omegatilde'></a> ## Step 13.f: $\tilde{\omega}_{\rm fd}$ \[Back to [top](#toc)\] $$\label{omegatilde}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.40), we have \begin{equation*} \tilde{\omega}_{\rm fd} = 2 a M r + \omega_{1}^{\rm fd} \eta \frac{ a M^{3} }{ r } + \omega_{2}^{\rm fd} \eta \frac{ M a^{3} }{ r }. \end{equation*} From discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (6.7), we set $\omega_{1}^{\rm fd} = \omega_{2}^{\rm fd} = 0$. Thus \begin{equation*} \tilde{\omega}_{\rm fd} = 2 a M r. \end{equation*} We define $a$ in [this cell](#a), $M$ in [this cell](#m), and $r$ in [this cell](#r) below. Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omegatilde = 2*a*r ``` <a id='dinv'></a> ## Step 13.g: $D^{-1}$ \[Back to [top](#toc)\] $$\label{dinv}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.83), \begin{equation*} D^{-1} = 1 + \log \left[ 1 + 6 \eta u^{2} + 2 \left( 26 - 3 \eta \right) \eta u^{3} \right]. \end{equation*} We define $\eta$ in [this cell](#eta) and $u$ in [this cell](#u). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Dinv = 1 + sp.log(1 + 6*eta*u*u + 2*(26 - 3*eta)*eta*u*u*u) ``` <a id='coord'></a> # Step 14: Terms Dependent on Coordinates \[Back to [top](#toc)\] $$\label{coord}$$ We collect here terms directly dependend on the coordinates. See [BB2010](https://arxiv.org/abs/0912.3517) Equations (4.5) and (4.6). <a id='usigma'></a> ## Step 14.a: $\Sigma$ \[Back to [top](#toc)\] $$\label{usigma}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.5), we have \begin{equation*} \Sigma = r^{2} + a^{2} \cos^{2} \theta. \end{equation*} We define $r$ in [this cell](#r), $a$ in [this cell](#a), and $\cos \theta$ in [this cell](#costheta). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sigma = r*r + a*a*costheta*costheta ``` <a id='w2'></a> ## Step 14.b: $\varpi^{2}$ \[Back to [top](#toc)\] $$\label{w2}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.7), \begin{equation*} \varpi^{2} = a^{2} + r^{2}. \end{equation*} We define $a$ in [this cell](#a) and $r$ in [this cell](#r). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt w2 = a*a + r*r ``` <a id='sin2theta'></a> ## Step 14.d: $\sin^{2} \theta$ \[Back to [top](#toc)\] $$\label{sin2theta}$$ Using a common trigonometric idenitity, \begin{equation*} \sin^{2} \theta = 1 - \cos^{2} \theta. \end{equation*} We define $\cos \theta$ in [this cell](#costheta). Note that by construction (from discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.52)) \begin{equation*} \xi^{2} = \sin^{2} \theta. \end{equation*} ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt xisq = sin2theta sin2theta = 1 - costheta*costheta ``` <a id='costheta'></a> ## Step 14.e: $\cos \theta$ \[Back to [top](#toc)\] $$\label{costheta}$$ From the discussion in [BB2010](https://arxiv.org/abs/0912.3517) after equation (5.52) (noting that ${\bf e}_{3} = \hat{\bf S}_{\rm Kerr}$), \begin{equation*} \cos \theta = {\bf e}_{3} \cdot {\bf n} = {\bf e}_{3}^{1} n^{1} + {\bf e}_{3}^{2} n^{2} + {\bf e}_{3}^{3} n^{3}. \end{equation*} We define ${\bf e}_{3}$ in [this cell](#e3) and ${\bf n}$ in [this cell](#n). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt costheta = e31*n1 + e32*n2 + e33*n3 ``` <a id='vectors'></a> # Step 15: Important Vectors \[Back to [top](#toc)\] $$\label{vectors}$$ We collect the vectors common for computing $H_{\rm real}$ (defined in [this cell](#hreal)) below. <a id='v'></a> ## Step 15.a: ${\bf v}$ \[Back to [top](#toc)\] $$\label{v}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (3.39), we have \begin{equation*} {\bf v} = {\bf n} \times \boldsymbol{\xi}. \end{equation*} We define ${\bf n}$ in [this cell](#n) and $\boldsymbol{\xi}$ in [this cell](#xi). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt v1 = n2*xi3 - n3*xi2 v2 = n3*xi1 - n1*xi3 v3 = n1*xi2 - n2*xi1 ``` <a id='xi'></a> ## Step 15.b: $\boldsymbol{\xi}$ \[Back to [top](#toc)\] $$\label{xi}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (3.38), we have \begin{equation*} \boldsymbol{\xi} = {\bf e}_{3} \times {\bf n}. \end{equation*} We define ${\bf e}_{3}$ in [this cell](#e3) and ${\bf n}$ in [this cell](#n). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt xi1 = e32*n3 - e33*n2 xi2 = e31*n3 + e33*n1 xi3 = e31*n2 - e32*n1 ``` <a id='e3'></a> ## Step 15.c: ${\bf e}_{3}$ \[Back to [top](#toc)\] $$\label{e3}$$ From the discussion in [BB2010](https://arxiv.org/abs/0912.3517) after equation (5.52), \begin{equation*} {\bf e}_{3} = \hat{\bf S}_{\rm Kerr}. \end{equation*} We define $\hat{\bf S}_{\rm Kerr}$ in [this cell](#skerrhat). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt e31 = Skerrhat1 e32 = Skerrhat2 e33 = Skerrhat3 ``` <a id='n'></a> ## Step 15.d: ${\bf n}$ \[Back to [top](#toc)\] $$\label{n}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (3.37), we have \begin{equation*} {\bf n} = \frac{\bf x }{ r } \end{equation*} where ${\bf x} = (x, y, z)$. We define $r$ in [this cell](#r). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt n1 = x/r n2 = y/r n3 = z/r ``` <a id='spin_combos'></a> # Step 16: Spin Combinations $\boldsymbol{\sigma}$, $\boldsymbol{\sigma}^{*}$, and ${\bf S}_{\rm Kerr}$ \[Back to [top](#toc)\] $$\label{spin_combos}$$ We collect here various combinations of the spins. <a id='a'></a> ## Step 16.a: $a$ \[Back to [top](#toc)\] $$\label{a}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.9), we have \begin{equation*} a = \frac{ \left\lvert {\bf S}_{\rm Kerr} \right\rvert }{ M }. \end{equation*} We define $\left\lvert{\bf S}_{\rm Kerr}\right\rvert$ in [this cell](#skerrmag) and $M$ in [this cell](#m). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt a = Skerrmag ``` <a id='skerrhat'></a> ## Step 16.b: $\hat{\bf S}_{\rm Kerr}$ \[Back to [top](#toc)\] $$\label{skerrhat}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (4.24), we have \begin{equation*} \hat{\bf S}_{\rm Kerr} = \frac{ {\bf S}_{\rm Kerr} }{ \left\lvert {\bf S}_{\rm Kerr} \right\rvert }. \end{equation*} We define ${\bf S}_{\rm Kerr}$ in [this cell](#skerr). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Skerrhat1 = Skerr1/Skerrmag Skerrhat2 = Skerr2/Skerrmag Skerrhat3 = Skerr3/Skerrmag ``` <a id='skerrmag'></a> ## Step 16.c: $\left\lvert {\bf S}_{\rm Kerr} \right\rvert$ \[Back to [top](#toc)\] $$\label{skerrmag}$$ We have \begin{equation*} \left\lvert {\bf S}_{\rm Kerr} \right\rvert = \sqrt{ {\bf S}_{\rm Kerr}^{1} {\bf S}_{\rm Kerr}^{1} + {\bf S}_{\rm Kerr}^{2} {\bf S}_{\rm Kerr}^{2} + {\bf S}_{\rm Kerr}^{3} {\bf S}_{\rm Kerr}^{3} }. \end{equation*} We define ${\bf S}_{\rm Kerr}$ in [this cell](#skerr). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Skerrmag = sp.sqrt(Skerr1*Skerr1 + Skerr2*Skerr2 + Skerr3*Skerr3) ``` <a id='skerr'></a> ## Step 16.d: ${\bf S}_{\rm Kerr}$ \[Back to [top](#toc)\] $$\label{skerr}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.64): \begin{equation*} {\bf S}_{\rm Kerr} = \boldsymbol{\sigma} + \frac{ 1 }{ c^{2} } \boldsymbol{\Delta}_{\sigma}. \end{equation*} In [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.67), $\boldsymbol{\Delta}_{\sigma} = 0$. Thus \begin{equation*} {\bf S}_{\rm Kerr} = \boldsymbol{\sigma}. \end{equation*} We define $\boldsymbol{\sigma}$ in [this cell](#sigma). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Skerr1 = sigma1 Skerr2 = sigma2 Skerr3 = sigma3 ``` <a id='sigma'></a> ## Step 16.e: $\boldsymbol{\sigma}$ \[Back to [top](#toc)\] $$\label{sigma}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.2): \begin{equation*} \boldsymbol{\sigma} = {\bf S}_{1} + {\bf S}_{2}. \end{equation*} ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigma1 = S1x + S2x sigma2 = S1y + S2y sigma3 = S1z + S2z ``` <a id='sigmastar'></a> ## Step 16.f: $\boldsymbol{\sigma}^{*}$ \[Back to [top](#toc)\] $$\label{sigmastar}$$ From [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.3): \begin{equation*} \boldsymbol{\sigma}^{*} = \frac{ m_{2} }{ m_{1} } {\bf S}_{1} + \frac{ m_{1} }{ m_{2} }{\bf S}_{2}. \end{equation*} ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastar1 = m2/m1*S1x + m1/m2*S2x sigmastar2 = m2/m1*S1y + m1/m2*S2y sigmastar3 = m2/m1*S1z + m1/m2*S2z ``` <a id='fundquant'></a> # Step 17: Fundamental Quantities \[Back to [top](#toc)\] $$\label{fundquant}$$ We collect here fundamental quantities from which we build $H_{\rm real}$ (defined in [this cell](#Hreal)). <a id='u'></a> ## Step 17.a: $u$ \[Back to [top](#toc)\] $$\label{u}$$ From the discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.40), \begin{equation*} u = \frac{ M }{ r }. \end{equation*} We define $M$ in [this cell](#m) and $r$ in [this cell](#r). Note that for agreement with LALSuite, we omit the factor of $M$ in our code. ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt u = 1/r ``` <a id='r'></a> ## Step 17.b: $r$ \[Back to [top](#toc)\] $$\label{r}$$ From the discussion after [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.52), \begin{equation*} r = \sqrt{ x^{2} + y^{2} + z^{2} }. \end{equation*} ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt r = sp.sqrt(x*x + y*y + z*z) ``` <a id='eta'></a> ## Step 17.c: $\eta$ \[Back to [top](#toc)\] $$\label{eta}$$ From the discussion preceding [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.1), \begin{equation*} \eta = \frac{ \mu }{ M }. \end{equation*} We define $\mu$ in [this cell](#mu). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt eta = mu/M ``` <a id='mu'></a> ## Step 17.d: $\mu$ \[Back to [top](#toc)\] $$\label{mu}$$ From the discussion preceding [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.1), \begin{equation*} \mu = \frac{ m_{1} m_{2} }{ M }. \end{equation*} We define $M$ in [this cell](#m). ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt mu = m1*m2/M ``` <a id='m'></a> ## Step 17.e: $M$ \[Back to [top](#toc)\] $$\label{m}$$ From the discussion preceding [BB2010](https://arxiv.org/abs/0912.3517) Equation (5.1), \begin{equation*} M = m_{1} + m_{2}. \end{equation*} ``` %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt M = m1 + m2 ``` <a id='validation'></a> # Step 18: Validation \[Back to [top](#toc)\] $$\label{validation}$$ The following code cell reverses the order of the expressions output to SEOBNR/Hamiltonian_on_top.txt and creates a Python function to validate the value of $H_{\rm real}$ against the SEOBNRv3 Hamiltonian value computed in LALSuite git commit bba40f21e9 for command-line input parameters -M 23 -m 10 -f 20 -X 0.01 -Y 0.02 -Z -0.03 -x 0.04 -y -0.05 -z 0.06. ``` import numpy as np import difflib, sys, os # The subterms in the Hamiltonian expression are sometimes written on more than # one line for readability in this Jupyter notebook. We first create a file of # one-line expressions, Hamiltonian-Hreal_one_line_expressions.txt. with open('SEOBNR/Hamiltonian-Hreal_one_line_expressions.txt', 'w') as output: count = 0 # Read output of this notebook for line in list(open("SEOBNR/Hamiltonian-Hreal_on_top.txt")): # Read the first line if count == 0: prevline=line #Check if prevline is a complete expression elif "=" in prevline and "=" in line: output.write("%s\n" % prevline.strip('\n')) prevline=line # Check if line needs to be adjoined to prevline elif "=" in prevline and not "=" in line: prevline = prevline.strip('\n') prevline = (prevline+line).replace(" ","") # Be sure to print the last line. if count == len(list(open("SEOBNR/Hamiltonian-Hreal_on_top.txt")))-1: if not "=" in line: print("ERROR. Algorithm not robust if there is no equals sign on the final line. Sorry.") sys.exit(1) else: output.write("%s" % line) count = count + 1 # Now reverse the expressions and write them in a function # This formulation is used to check that we get a reasonable H_real value with open('SEOBNR/Hreal_on_bottom.py', 'w') as output: output.write("import numpy as np\ndef compute_Hreal(m1=23., m2=10., EMgamma=0.577215664901532860606512090082402431, tortoise=1, dSO=-7.966696593617955e+01, dSS=1.261873764525631e+01, x=2.129681018601393e+01, y=0.000000000000000e+00, z=0.000000000000000e+00, p1=0.000000000000000e+00, p2=2.335391115580442e-01, p3=-4.235164736271502e-22, S1x=4.857667584940312e-03, S1y=9.715161660389764e-03, S1z=-1.457311842632286e-02, S2x=3.673094582185491e-03, S2y=-4.591302628615413e-03, S2z=5.509696538546906e-03):\n") for line in reversed(list(open("SEOBNR/Hamiltonian-Hreal_one_line_expressions.txt"))): output.write("\t%s\n" % line.rstrip().replace("sp.sqrt", "np.sqrt").replace("sp.Rational", "np.divide").replace("sp.abs", "np.abs").replace("sp.log", "np.log").replace("sp.sign", "np.sign").replace("sp.pi", "np.pi")) output.write("\treturn Hreal") # Now reverse the expressions in a standalone text file # This formulation is used as a harsher validation check that all expressions agree with a trusted list with open('SEOBNR/Hamiltonian_expressions.txt-VALIDATION', 'w') as output: for line in reversed(list(open("SEOBNR/Hamiltonian-Hreal_one_line_expressions.txt"))): output.write("\t%s\n" % line.rstrip().replace("sp.sqrt", "np.sqrt").replace("sp.Rational", "np.divide").replace("sp.abs", "np.abs").replace("sp.log", "np.log").replace("sp.sign", "np.sign").replace("sp.pi", "np.pi")) print("Printing difference between notebook output and a trusted list of expressions...") # Open the files to compare # TYLERK: ask if we want a separate directory for validation valdir = "SEOBNR/" outdir = "SEOBNR/" file = "Hamiltonian_expressions.txt" outfile = "Hamiltonian_expressions.txt-VALIDATION" print("Checking file " + outfile) with open(os.path.join(valdir+file)) as file1, open(os.path.join(outdir+outfile)) as file2: # Read the lines of each file file1_lines = file1.readlines() file2_lines = file2.readlines() num_diffs = 0 for line in difflib.unified_diff(file1_lines, file2_lines, fromfile=os.path.join(valdir+file), tofile=os.path.join(outdir+outfile)): sys.stdout.writelines(line) num_diffs = num_diffs + 1 if num_diffs == 0: print("No difference. TEST PASSED!") else: print("ERROR: Disagreement found with the trusted file. See differences above.") # Import the new Hamiltonian function and the trusted Hamiltonian function # TYLERK: maybe remove this later? import SEOBNR.Hreal_trusted_computation as Hreal_trusted import SEOBNR.Hreal_on_bottom as Hreal_new # Compute the trusted and new Hamiltonian values; compare; exit if they disagree! Hreal = Hreal_trusted.compute_Hreal() Hreal_temp = Hreal_new.compute_Hreal() if(np.abs(Hreal-Hreal_temp)>1e-14): print("ERROR. You have broken the Hamiltonian computation!") sys.exit(1) ``` <a id='latex_pdf_output'></a> # Step 19: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] $$\label{latex_pdf_output}$$ The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-SEOBNR_Documentation.pdf](Tutorial-SEOBNR_Documentation.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ``` !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-SEOBNR_Documentation.ipynb !pdflatex -interaction=batchmode Tutorial-SEOBNR_Documentation.tex !pdflatex -interaction=batchmode Tutorial-SEOBNR_Documentation.tex !pdflatex -interaction=batchmode Tutorial-SEOBNR_Documentation.tex !rm -f Tut*.out Tut*.aux Tut*.log ```
github_jupyter
%%writefile SEOBNR/Hamiltonian-Hreal_on_top.txt Hreal = sp.sqrt(1 + 2*eta*(Heff - 1)) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Heff = Hs + Hns - Hd + dSS*eta*u*u*u*u*(S1x*S1x + S1y*S1y + S1z*S1z + S2x*S2x + S2y*S2y + S2z*S2z) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hs = Hso + Hss %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hns = betapsum + alpha*sp.sqrt(Hnsradicand) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hd = Hdcoeff*Hdsum %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hso = HsoTerm1 + HsoTerm2coeff*HsoTerm2 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm1 = exp2nu*(expmu*expnu - Btilde)*pdotxir*SdotSkerrhat/(expmu*Btilde*Btilde*sp.sqrt(Q)*xisq) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2coeff = expnu/(exp2mu*Btilde*Btilde*(Q + sp.sqrt(Q))*xisq) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2 = HsoTerm2a + HsoTerm2b - HsoTerm2c %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2a = Sdotxi*Jtilde*(mur*pdotvr*(sp.sqrt(Q) + 1) - mucostheta*pdotn*xisq - sp.sqrt(Q)*(nur*pdotvr + (mucostheta - nucostheta)*pdotn*xisq))*Btilde*Btilde %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2b = expmu*expnu*pdotxir*(2*sp.sqrt(Q) + 1)*(Jtilde*nur*Sdotv - nucostheta*Sdotn*xisq)*Btilde %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HsoTerm2c = Jtilde*Brtilde*expmu*expnu*pdotxir*(sp.sqrt(Q) + 1)*Sdotv %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hss = HssTerm1 + HssTerm2coeff*HssTerm2 + HssTerm3coeff*HssTerm3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm1 = omega*SdotSkerrhat %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm2coeff = Jtilde*omegar/(2*exp2mu*expmu*expnu*Btilde*(Q + sp.sqrt(Q))*xisq) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm2 = expmu*pdotxir*(expmu*exp2nu*pdotxir*Sdotv - expnu*pdotvr*Sdotxi*Btilde) + xisq*Btilde*Btilde*(exp2mu*(sp.sqrt(Q) + Q)*Sdotv + Jtilde*pdotn*(pdotvr*Sdotn - Jtilde*pdotn*Sdotv)) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm3coeff = omegacostheta/(2*exp2mu*expmu*expnu*Btilde*(Q + sp.sqrt(Q))) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HssTerm3 = expmu*expnu*pdotxir*(Jtilde*pdotn*Sdotxi*Btilde - expmu*expnu*pdotxir*Sdotn) + (pdotvr*(Sdotn*pdotvr - Jtilde*pdotn*Sdotv) - exp2mu*(sp.sqrt(Q) + Q)*Sdotn*xisq)*Btilde*Btilde %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt betapsum = omegatilde*pphi/Lambdat %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt alpha = sp.sqrt(Deltat*Sigma/Lambdat) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hnsradicand = 1 + gammappsum + Q4 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt gammappsum = Deltar/Sigma*pdotn*pdotn + 1/Sigma*pdotvr*pdotvr/sin2theta + Sigma/Lambdat/sin2theta*pdotxir*pdotxir %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Q4 = 2*prT*prT*prT*prT*u*u*(4 - 3*eta)*eta %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hdcoeff = sp.Rational(1,2)/(r*r*r) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Hdsum = HdsumTerm1 - HdsumTerm2 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HdsumTerm1 = Sstar1*Sstar1 + Sstar2*Sstar2 + Sstar3*Sstar3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt HdsumTerm2 = 3*Sstardotn*Sstardotn %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt SdotSkerr = S1*Skerr1 + S2*Skerr2 + S3*Skerr3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sdotxi = S1*xi1 + S2*xi2 + S3*xi3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sdotv = S1*v1 + S2*v2 + S3*v3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sdotn = S1*n1 + S2*n2 + S3*n3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt SdotSkerrhat = S1*Skerrhat1 + S2*Skerrhat2 + S3*Skerrhat3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sstardotn = Sstar1*n1 + Sstar2*n2 + Sstar3*n3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt S1 = Sstar1 S2 = Sstar2 S3 = Sstar3 Sstar1 = sigmastar1 + Deltasigmastar1 Sstar2 = sigmastar2 + Deltasigmastar2 Sstar3 = sigmastar3 + Deltasigmastar3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltasigmastar1 = sigmastar1*sigmastarcoeff + sigma1*sigmacoeff Deltasigmastar2 = sigmastar2*sigmastarcoeff + sigma2*sigmacoeff Deltasigmastar3 = sigmastar3*sigmastarcoeff + sigma3*sigmacoeff %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastarcoeff = sigmastarcoeffTerm1 + sigmastarcoeffTerm2 + sigmastarcoeffTerm3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastarcoeffTerm1 = eta/12*(14/r + 4*Qminus1 - 30*DrSipn2) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastarcoeffTerm2 = eta/(72*r*r)*(706 + r*(-206*Qminus1 + 282*DrSipn2 + r*Qminus1*(96*DrSipn2 - 23*Qminus1)) + eta*(-54 + r*(120*Qminus1 - 324*DrSipn2 + r*(360*DrSipn2*DrSipn2 + Qminus1*(-126*DrSipn2 - 3*Qminus1))))) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastarcoeffTerm3 = 0 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmacoeff = sigmacoeffTerm1 + sigmacoeffTerm2 + sigmacoeffTerm3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmacoeffTerm1 = eta/12*(-8/r + 3*Qminus1 - 36*DrSipn2) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmacoeffTerm2 = eta/(144*r*r)*(-896 + r*(-436*Qminus1 - 96*DrSipn2 + r*(-45*Qminus1*Qminus1 + 36*Qminus1*DrSipn2)) + eta*(-336 + r*(204*Qminus1 - 882*DrSipn2 + r*(810*DrSipn2*DrSipn2 - 234*Qminus1*DrSipn2)))) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmacoeffTerm3 = eta*dSO*u*u*u %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omegar = (Lambdat*omegatildeprm - Lambdatprm*omegatilde)/(Lambdat*Lambdat) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt nur = r/Sigma + w2*(w2*Deltatprm - 4*r*Deltat)/(2*Lambdat*Deltat) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt mur = r/Sigma - 1/sp.sqrt(Deltar) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omegacostheta = -2*a*a*costheta*Deltat*omegatilde/(Lambdat*Lambdat) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt nucostheta = a*a*w2*costheta*(w2 - Deltat)/(Lambdat*Sigma) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt mucostheta = a*a*costheta/Sigma %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Lambdatprm = 4*(a*a + r*r)*r - 2*a*a*Deltatprm*sin2theta %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omegatildeprm = 2*a %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omega = omegatilde/Lambdat %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt expnu = sp.sqrt(exp2nu) exp2nu = Deltat*Sigma/Lambdat %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Btilde = sp.sqrt(Deltat) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Brtilde = (sp.sqrt(Deltar)*Deltatprm - 2*Deltat)/(2*sp.sqrt(Deltar*Deltat)) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt expmu = sp.sqrt(exp2mu) exp2mu = Sigma %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Jtilde = sp.sqrt(Deltar) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Qminus1 = Q - 1 Q = 1 + DrSipn2 + Qcoeff1*pdotxir*pdotxir + Qcoeff2*pdotvr*pdotvr %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt DrSipn2 = Deltar*pdotn*pdotn/Sigma %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Qcoeff1 = Sigma/(Lambdat*sin2theta) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Qcoeff2 = 1/(Sigma*sin2theta) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt pphi = pdotxir %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt pdotvr = (phat1*v1 + phat2*v2 + phat3*v3)*r %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt pdotn = phat1*n1 + phat2*n2 + phat3*n3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt pdotxir = (phat1*xi1 + phat2*xi2 + phat3*xi3)*r %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt phat1 = p1 + prT*(1 - 1/csi1)*n1 phat2 = p2 + prT*(1 - 1/csi1)*n2 phat3 = p3 + prT*(1 - 1/csi1)*n3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt prT = csi2*(p1*n1 + p2*n2 + p3*n3) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt csi2 = 1 + (sp.Rational(1,2) - sp.Rational(1,2)*sp.sign(sp.Rational(3,2) - tortoise))*(csi - 1) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt csi1 = 1 + (1 - sp.abs(1-tortoise))*(csi - 1) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt csi = sp.sqrt(Deltar*Deltat)/w2 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Lambdat = w2*w2 - a*a*Deltat*sin2theta %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltar = Deltat*Dinv %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltat = r*r*Deltau %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltatprm = 2*r*Deltau + r*r*Deltauprm %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltauprm = Deltaubarprm*Deltaucalib + Deltaubar*Deltaucalibprm Deltau = Deltaubar*Deltaucalib %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltaubarprm = -2*a*a*u*u*u - 2*u*u/(etaKminus1) Deltaubar = a*a*u*u + (2*u + 1/etaKminus1)/etaKminus1 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Deltaucalibprm = -eta*u*u*(Delta1 + u*(2*Delta2 + u*(3*Delta3 + u*(4*Delta4 + u*(5*(Delta5 + Delta5l*sp.log(u)))))))/(1 + logarg) Deltaucalib = 1 + eta*(Delta0 + sp.log(1 + logarg)) logarg = u*(Delta1 + u*(Delta2 + u*(Delta3 + u*(Delta4 + u*(Delta5 + Delta5l*sp.log(u)))))) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Delta5l = etaKminus1*etaKminus1*sp.Rational(64,5) Delta5 = etaKminus1*etaKminus1*((sp.Rational(-4237,60) + sp.Rational(128,5)*EMgamma + sp.Rational(2275,512)*sp.pi*sp.pi - sp.Rational(1,3)*a*a*(Delta1*Delta1*Delta1 - 3*Delta1*Delta2 + 3*Delta3) - (Delta1*Delta1*Delta1*Delta1*Delta1 - 5*Delta1*Delta1*Delta1*Delta2 + 5*Delta1*Delta2*Delta2 + 5*Delta1*Delta1*Delta3 - 5*Delta2*Delta3 - 5*Delta1*Delta4)/(5*etaKminus1*etaKminus1) + (Delta1*Delta1*Delta1*Delta1 - 4*Delta1*Delta1*Delta2 + 2*Delta2*Delta2 + 4*Delta1*Delta3 - 4*Delta4)/(2*etaKminus1) + sp.Rational(256,5)*sp.log(2))) Delta4 = sp.Rational(1,12)*(6*a*a*(Delta1*Delta1 - 2*Delta2)*etaKminus1*etaKminus1 + 3*Delta1*Delta1*Delta1*Delta1 - 8*etaKminus1*Delta1*Delta1*Delta1 -12*Delta2*Delta1*Delta1 + 12*(2*etaKminus1*Delta2 + Delta3)*Delta1 + 12*(sp.Rational(94,3) - sp.Rational(41,32)*sp.pi*sp.pi)*etaKminus1*etaKminus1 + 6*(Delta2*Delta2 - 4*Delta3*etaKminus1)) Delta3 = -sp.Rational(1,3)*Delta1*Delta1*Delta1 + etaKminus1*Delta1*Delta1 + Delta2*Delta1 -2*etaKminus1*(Delta2 - etaKminus1) - a*a*etaKminus1*etaKminus1*Delta1 Delta2 = sp.Rational(1,2)*Delta1*(Delta1 - 4*etaKminus1) - a*a*etaKminus1*etaKminus1*Delta0 Delta1 = -2*etaKminus1*(K + Delta0) Delta0 = K*(eta*K - 2) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt etaKminus1 = eta*K - 1 K = c20 + c21*eta + c22*(eta*eta) + c23*(eta*eta)*eta c20 = 1.712 c21 = -1.803949138004582 c22 = -39.77229225266885 c23 = 103.16588921239249 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt omegatilde = 2*a*r %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Dinv = 1 + sp.log(1 + 6*eta*u*u + 2*(26 - 3*eta)*eta*u*u*u) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Sigma = r*r + a*a*costheta*costheta %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt w2 = a*a + r*r %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt xisq = sin2theta sin2theta = 1 - costheta*costheta %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt costheta = e31*n1 + e32*n2 + e33*n3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt v1 = n2*xi3 - n3*xi2 v2 = n3*xi1 - n1*xi3 v3 = n1*xi2 - n2*xi1 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt xi1 = e32*n3 - e33*n2 xi2 = e31*n3 + e33*n1 xi3 = e31*n2 - e32*n1 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt e31 = Skerrhat1 e32 = Skerrhat2 e33 = Skerrhat3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt n1 = x/r n2 = y/r n3 = z/r %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt a = Skerrmag %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Skerrhat1 = Skerr1/Skerrmag Skerrhat2 = Skerr2/Skerrmag Skerrhat3 = Skerr3/Skerrmag %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Skerrmag = sp.sqrt(Skerr1*Skerr1 + Skerr2*Skerr2 + Skerr3*Skerr3) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt Skerr1 = sigma1 Skerr2 = sigma2 Skerr3 = sigma3 %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigma1 = S1x + S2x sigma2 = S1y + S2y sigma3 = S1z + S2z %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt sigmastar1 = m2/m1*S1x + m1/m2*S2x sigmastar2 = m2/m1*S1y + m1/m2*S2y sigmastar3 = m2/m1*S1z + m1/m2*S2z %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt u = 1/r %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt r = sp.sqrt(x*x + y*y + z*z) %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt eta = mu/M %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt mu = m1*m2/M %%writefile -a SEOBNR/Hamiltonian-Hreal_on_top.txt M = m1 + m2 import numpy as np import difflib, sys, os # The subterms in the Hamiltonian expression are sometimes written on more than # one line for readability in this Jupyter notebook. We first create a file of # one-line expressions, Hamiltonian-Hreal_one_line_expressions.txt. with open('SEOBNR/Hamiltonian-Hreal_one_line_expressions.txt', 'w') as output: count = 0 # Read output of this notebook for line in list(open("SEOBNR/Hamiltonian-Hreal_on_top.txt")): # Read the first line if count == 0: prevline=line #Check if prevline is a complete expression elif "=" in prevline and "=" in line: output.write("%s\n" % prevline.strip('\n')) prevline=line # Check if line needs to be adjoined to prevline elif "=" in prevline and not "=" in line: prevline = prevline.strip('\n') prevline = (prevline+line).replace(" ","") # Be sure to print the last line. if count == len(list(open("SEOBNR/Hamiltonian-Hreal_on_top.txt")))-1: if not "=" in line: print("ERROR. Algorithm not robust if there is no equals sign on the final line. Sorry.") sys.exit(1) else: output.write("%s" % line) count = count + 1 # Now reverse the expressions and write them in a function # This formulation is used to check that we get a reasonable H_real value with open('SEOBNR/Hreal_on_bottom.py', 'w') as output: output.write("import numpy as np\ndef compute_Hreal(m1=23., m2=10., EMgamma=0.577215664901532860606512090082402431, tortoise=1, dSO=-7.966696593617955e+01, dSS=1.261873764525631e+01, x=2.129681018601393e+01, y=0.000000000000000e+00, z=0.000000000000000e+00, p1=0.000000000000000e+00, p2=2.335391115580442e-01, p3=-4.235164736271502e-22, S1x=4.857667584940312e-03, S1y=9.715161660389764e-03, S1z=-1.457311842632286e-02, S2x=3.673094582185491e-03, S2y=-4.591302628615413e-03, S2z=5.509696538546906e-03):\n") for line in reversed(list(open("SEOBNR/Hamiltonian-Hreal_one_line_expressions.txt"))): output.write("\t%s\n" % line.rstrip().replace("sp.sqrt", "np.sqrt").replace("sp.Rational", "np.divide").replace("sp.abs", "np.abs").replace("sp.log", "np.log").replace("sp.sign", "np.sign").replace("sp.pi", "np.pi")) output.write("\treturn Hreal") # Now reverse the expressions in a standalone text file # This formulation is used as a harsher validation check that all expressions agree with a trusted list with open('SEOBNR/Hamiltonian_expressions.txt-VALIDATION', 'w') as output: for line in reversed(list(open("SEOBNR/Hamiltonian-Hreal_one_line_expressions.txt"))): output.write("\t%s\n" % line.rstrip().replace("sp.sqrt", "np.sqrt").replace("sp.Rational", "np.divide").replace("sp.abs", "np.abs").replace("sp.log", "np.log").replace("sp.sign", "np.sign").replace("sp.pi", "np.pi")) print("Printing difference between notebook output and a trusted list of expressions...") # Open the files to compare # TYLERK: ask if we want a separate directory for validation valdir = "SEOBNR/" outdir = "SEOBNR/" file = "Hamiltonian_expressions.txt" outfile = "Hamiltonian_expressions.txt-VALIDATION" print("Checking file " + outfile) with open(os.path.join(valdir+file)) as file1, open(os.path.join(outdir+outfile)) as file2: # Read the lines of each file file1_lines = file1.readlines() file2_lines = file2.readlines() num_diffs = 0 for line in difflib.unified_diff(file1_lines, file2_lines, fromfile=os.path.join(valdir+file), tofile=os.path.join(outdir+outfile)): sys.stdout.writelines(line) num_diffs = num_diffs + 1 if num_diffs == 0: print("No difference. TEST PASSED!") else: print("ERROR: Disagreement found with the trusted file. See differences above.") # Import the new Hamiltonian function and the trusted Hamiltonian function # TYLERK: maybe remove this later? import SEOBNR.Hreal_trusted_computation as Hreal_trusted import SEOBNR.Hreal_on_bottom as Hreal_new # Compute the trusted and new Hamiltonian values; compare; exit if they disagree! Hreal = Hreal_trusted.compute_Hreal() Hreal_temp = Hreal_new.compute_Hreal() if(np.abs(Hreal-Hreal_temp)>1e-14): print("ERROR. You have broken the Hamiltonian computation!") sys.exit(1) !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-SEOBNR_Documentation.ipynb !pdflatex -interaction=batchmode Tutorial-SEOBNR_Documentation.tex !pdflatex -interaction=batchmode Tutorial-SEOBNR_Documentation.tex !pdflatex -interaction=batchmode Tutorial-SEOBNR_Documentation.tex !rm -f Tut*.out Tut*.aux Tut*.log
0.112028
0.923454
# Background Scan Operation How to run update operations on a namespace in background. This notebook requires Aerospike datbase running locally and that Java kernel has been installed. Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) for additional details and the docker container. ## Ensure database is running This notebook requires that Aerospike datbase is running. ``` import io.github.spencerpark.ijava.IJava; import io.github.spencerpark.jupyter.kernel.magic.common.Shell; IJava.getKernelInstance().getMagics().registerMagics(Shell.class); %sh asd ``` ## Download Aerospike client from POM ``` %%loadFromPOM <dependencies> <dependency> <groupId>com.aerospike</groupId> <artifactId>aerospike-client</artifactId> <version>5.0.0</version> </dependency> </dependencies> ``` ## Initialize the client and populate test data Initialize the client and connect to the cluster. The configuration is for Aerospike database running on port 3000 of localhost which is the default. Modify config if your environment is different (Aerospike database running on a different host or different port). ``` import com.aerospike.client.AerospikeClient; import com.aerospike.client.policy.WritePolicy; import com.aerospike.client.Bin; import com.aerospike.client.Key; AerospikeClient client = new AerospikeClient("localhost", 3000); System.out.println("Initialized the client and connected to the cluster."); String Namespace = "test"; String Set = "background-scan-update"; // add records with keys "id-1" to "id-10" and // bins bin1 (integer values 1-10) and bin2 (integer values 1000-1010). WritePolicy wpolicy = new WritePolicy(); wpolicy.sendKey = true; for (int i = 1; i <= 10; i++) { Key key = new Key(Namespace, Set, "id-"+i); Bin bin1 = new Bin(new String("bin1"), i); Bin bin2 = new Bin(new String("bin2"), 1000+i); client.put(wpolicy, key, bin1, bin2); } System.out.format("Test data popuated.\n");; ``` # Scan and Update Namespace Multi-record updates and deletes can be done in two ways: 1. List of bin updates: A list of update operations can be specified, each operating on a bin. The list can be specified in two ways: 1. As part of the Statement object where the query (index) filter is also specified. <pre> Statement::setOps(Operation[] ops] AerospikeClient::execute(WritePolicy policy, Statement statement) </pre> 2. As an operation list parameter in the execute call. <pre> AerospikeClient::execute(WritePolicy policy, Statement statement, Operation[] ops) </pre> 2. User Defined Function (UDF): Record-oriented UDFs implement arbitrary logic in a Lua function that is registered with the server and invoked through an API call. <pre> ExecuteTask execute(WritePolicy policy, Statement statement, String packageName, String functionName, Value... functionArgs) </pre> ``` import com.aerospike.client.AerospikeClient; import com.aerospike.client.policy.WritePolicy; import com.aerospike.client.Bin; import com.aerospike.client.Key; import com.aerospike.client.query.Statement; import com.aerospike.client.Operation; import com.aerospike.client.task.ExecuteTask; int POLL_INTERVAL_MS = 3000; int WAIT_TILL_COMPLETE_MS = 100000; enum ExecOption{STMT, EXEC_ARG, UDF}; WritePolicy wpolicy = new WritePolicy(); wpolicy.sendKey = true; wpolicy.maxRetries = 0; // specify TTL here wpolicy.expiration = -2; void scanAndUpdate(String seedNodeIp, String namespace, Operation[] ops, ExecOption option) { System.out.format("Performing scan-and-update on namespace %s with option %s.\n", namespace, option); AerospikeClient client = new AerospikeClient(seedNodeIp, 3000); Statement stmt = new Statement(); ExecuteTask task; stmt.setNamespace(namespace); switch(option) { case STMT: stmt.setOperations(ops); task = client.execute(wpolicy, stmt); task.waitTillComplete(POLL_INTERVAL_MS, WAIT_TILL_COMPLETE_MS); break; case EXEC_ARG: task = client.execute(wpolicy, stmt, ops); task.waitTillComplete(POLL_INTERVAL_MS, WAIT_TILL_COMPLETE_MS); break; case UDF: // plug in UDFModule, UDFFunction (assumes you have registered the module separately) //task = client.execute(wpolicy, stmt, "UDFModule", "UDFFunction"); //task.waitTillComplete(POLL_INTERVAL_MS, WAIT_TILL_COMPLETE_MS); break; default: System.out.format("Error: wrong ExecOption\n"); } client.close(); System.out.format("Scan-and-update done.\n");; } String seedNode = "127.0.0.1"; String namespace = "test"; Operation ops[] = { Operation.touch() }; scanAndUpdate(seedNode, namespace, ops, ExecOption.STMT); scanAndUpdate(seedNode, namespace, ops, ExecOption.EXEC_ARG); scanAndUpdate(seedNode, namespace, ops, ExecOption.UDF); ``` ## Clean up Finally close the client connection. ``` client.truncate(null, Namespace, null, null); client.close(); System.out.println("Removed tutorial data and closed server connection."); ``` ## Next steps Visit [Aerospike notebooks repo](https://github.com/aerospike-examples/interactive-notebooks) to run additional Aerospike notebooks. To run a different notebook, download the notebook from the repo to your local machine, and then click on File->Open, and select Upload.
github_jupyter
import io.github.spencerpark.ijava.IJava; import io.github.spencerpark.jupyter.kernel.magic.common.Shell; IJava.getKernelInstance().getMagics().registerMagics(Shell.class); %sh asd %%loadFromPOM <dependencies> <dependency> <groupId>com.aerospike</groupId> <artifactId>aerospike-client</artifactId> <version>5.0.0</version> </dependency> </dependencies> import com.aerospike.client.AerospikeClient; import com.aerospike.client.policy.WritePolicy; import com.aerospike.client.Bin; import com.aerospike.client.Key; AerospikeClient client = new AerospikeClient("localhost", 3000); System.out.println("Initialized the client and connected to the cluster."); String Namespace = "test"; String Set = "background-scan-update"; // add records with keys "id-1" to "id-10" and // bins bin1 (integer values 1-10) and bin2 (integer values 1000-1010). WritePolicy wpolicy = new WritePolicy(); wpolicy.sendKey = true; for (int i = 1; i <= 10; i++) { Key key = new Key(Namespace, Set, "id-"+i); Bin bin1 = new Bin(new String("bin1"), i); Bin bin2 = new Bin(new String("bin2"), 1000+i); client.put(wpolicy, key, bin1, bin2); } System.out.format("Test data popuated.\n");; import com.aerospike.client.AerospikeClient; import com.aerospike.client.policy.WritePolicy; import com.aerospike.client.Bin; import com.aerospike.client.Key; import com.aerospike.client.query.Statement; import com.aerospike.client.Operation; import com.aerospike.client.task.ExecuteTask; int POLL_INTERVAL_MS = 3000; int WAIT_TILL_COMPLETE_MS = 100000; enum ExecOption{STMT, EXEC_ARG, UDF}; WritePolicy wpolicy = new WritePolicy(); wpolicy.sendKey = true; wpolicy.maxRetries = 0; // specify TTL here wpolicy.expiration = -2; void scanAndUpdate(String seedNodeIp, String namespace, Operation[] ops, ExecOption option) { System.out.format("Performing scan-and-update on namespace %s with option %s.\n", namespace, option); AerospikeClient client = new AerospikeClient(seedNodeIp, 3000); Statement stmt = new Statement(); ExecuteTask task; stmt.setNamespace(namespace); switch(option) { case STMT: stmt.setOperations(ops); task = client.execute(wpolicy, stmt); task.waitTillComplete(POLL_INTERVAL_MS, WAIT_TILL_COMPLETE_MS); break; case EXEC_ARG: task = client.execute(wpolicy, stmt, ops); task.waitTillComplete(POLL_INTERVAL_MS, WAIT_TILL_COMPLETE_MS); break; case UDF: // plug in UDFModule, UDFFunction (assumes you have registered the module separately) //task = client.execute(wpolicy, stmt, "UDFModule", "UDFFunction"); //task.waitTillComplete(POLL_INTERVAL_MS, WAIT_TILL_COMPLETE_MS); break; default: System.out.format("Error: wrong ExecOption\n"); } client.close(); System.out.format("Scan-and-update done.\n");; } String seedNode = "127.0.0.1"; String namespace = "test"; Operation ops[] = { Operation.touch() }; scanAndUpdate(seedNode, namespace, ops, ExecOption.STMT); scanAndUpdate(seedNode, namespace, ops, ExecOption.EXEC_ARG); scanAndUpdate(seedNode, namespace, ops, ExecOption.UDF); client.truncate(null, Namespace, null, null); client.close(); System.out.println("Removed tutorial data and closed server connection.");
0.205456
0.791499
``` import numpy as np from matplotlib import pyplot as plt import copy #This corresponds to pic in book arr = [[[-1,1],[-1,1],[1,-1],[-1,1]], [[-1,-1],[-1,-1],[-1,1],[1,-1]], [[-1,1],[-1,1],[1,1],[-1,1]], [[-1,1],[-1,1],[1,1],[-1,1]]] arr = np.array(arr) def initialise_state(N): #N is the grid dimension (in the above example, N=4) ''' Author: Siddharth Bachoti ~Function Description~ ''' grid = np.ones((N,N,2),dtype=int) return np.array(grid) def plot_vector(p1,p2): ''' Author: Siddharth Chaini ''' p1 = np.array(p1) p2 = np.array(p2) dp = p2-p1 plt.quiver(p1[0], p1[1], dp[0], dp[1],angles='xy', scale_units='xy', scale=1, headwidth = 5, headlength = 7) def get_coord_list(arr): ''' Author: Siddharth Chaini ''' coord_list=[] num = len(arr) for i in range(num): temp_coord = [] for j in range(num): current_elems = arr[i][j] xpt = (num-1)-i ypt = j temp_coord.append((xpt,ypt)) coord_list.append(temp_coord) return coord_list def visualise_2d_model(arr): ''' Author: Siddharth Chaini ''' num = len(arr) plt.axes().set_aspect('equal') coord_list = get_coord_list(arr) for i in range(num): for j in range(num): current_up_state = arr[i][j][0] current_right_state = arr[i][j][1] x_current = coord_list[i][j][1] y_current = coord_list[i][j][0] lower_neighbour_up_state = arr[(i+1)%num][j][0] x_up = coord_list[(i+1)%num][j][1] y_up = coord_list[(i+1)%num][j][0] left_neighbour_right_state = arr[i][j-1][1] x_left = coord_list[i][j-1][1] y_left = coord_list[i][j-1][0] current_down_state = -(lower_neighbour_up_state) current_left_state = -(left_neighbour_right_state) plt.plot(x_current,y_current,'ob') if current_up_state == 1: plot_vector([x_current,y_current],[x_current,y_current+1]) elif current_up_state == -1: plot_vector([x_current,y_current+1],[x_current,y_current]) if current_right_state == 1: plot_vector([x_current,y_current],[x_current+1,y_current]) elif current_right_state == -1: plot_vector([x_current+1,y_current],[x_current,y_current]) if current_down_state == 1: plot_vector([x_current,y_current],[x_current,y_current-1]) elif current_down_state == -1: plot_vector([x_current,y_current-1],[x_current,y_current]) if current_left_state == 1: plot_vector([x_current,y_current],[x_current-1,y_current]) elif current_left_state == -1: plot_vector([x_current-1,y_current],[x_current,y_current]) plt.xlim(-1,num+1) plt.ylim(-1,num+1) plt.axis('off') plt.show() plt.close() visualise_2d_model(arr) def check_config(arr): ''' Author: Tanmay Bhore ''' flag=True N=len(arr) for i in range(len(arr)): for j in range(len(arr)): current_up_state = arr[i][j][0] current_right_state = arr[i][j][1] lower_neighbour_up_state = arr[(i+1)%N][j][0] left_neighbour_right_state = arr[i][j-1][1] current_left_state = -(left_neighbour_right_state) current_down_state = -(lower_neighbour_up_state) if (current_up_state + current_right_state + current_left_state + current_down_state) != 0: flag=False break return flag def long_loop(arr2, verbose=True): ''' Author: Team ℏ ''' arr = copy.deepcopy(arr2) N=len(arr) iters=0 n1 = np.random.randint(low=0, high=N) n2 = np.random.randint(low=0, high=N) inital_pt =(n1,n2) prev_choice=None while True: iters+=1 if n1==inital_pt[0] and n2==inital_pt[1] and iters!=1: if verbose: print(f"Completed in {iters} iterations.") # assert(check_config(arr)) break current_up_state = arr[n1][n2][0] current_right_state = arr[n1][n2][1] lower_neighbour_up_state = arr[(n1+1)%N][n2][0] left_neighbour_right_state = arr[n1][n2-1][1] current_down_state = -(lower_neighbour_up_state) current_left_state = -(left_neighbour_right_state) current_states_dict = {"up":current_up_state,"right":current_right_state,"down":current_down_state,"left":current_left_state} outgoing_state_dict={} incoming_state_dict={} for key in current_states_dict.keys(): if current_states_dict[key]==1: #current state is outgoing outgoing_state_dict[key]=current_states_dict[key] else: incoming_state_dict[key]=current_states_dict[key] if prev_choice =="right": forbidden_choice="left" if prev_choice =="up": forbidden_choice="down" if prev_choice =="left": forbidden_choice="right" if prev_choice =="down": forbidden_choice="up" else: forbidden_choice=None while True: out_choice = np.random.choice(list(outgoing_state_dict.keys())) if out_choice !=forbidden_choice: break prev_choice=out_choice if out_choice == "up": arr[n1][n2][0]= - (arr[n1][n2][0]) n1=(n1-1)%N n2=n2 continue if out_choice == "right": arr[n1][n2][1]= - (arr[n1][n2][1]) n1=n1 n2=(n2+1)%N continue if out_choice == "down": arr[(n1+1)%N][n2][0]= - (arr[(n1+1)%N][n2][0]) n1=(n1+1)%N n2=n2 continue if out_choice == "left": arr[n1][(n2-1)%N][1]= - (arr[n1][(n2-1)%N][1]) n1=n1 n2=(n2-1)%N continue return arr name = ' '.join(map(str, arr.flatten())).replace(' ','') name ``` ## DEBUGGING ``` for i in range(8): newarr = long_loop(initialise_state(20)) check_config(newarr) get_coord_list(arr) num = len(arr) plt.axes().set_aspect('equal') coord_list = get_coord_list(arr) for i in range(num): for j in range(num): print("********************************") current_up_state = arr[i][j][0] current_right_state = arr[i][j][1] x_current = coord_list[i][j][1] y_current = coord_list[i][j][0] print(f"Current Coords = ({x_current},{y_current})") left_neighbour_right_state = arr[i][j-1][1] x_left = coord_list[i][j-1][1] y_left = coord_list[i][j-1][0] lower_neighbour_up_state = arr[(i+1)%num][j][0] x_up = coord_list[(i+1)%num][j][1] y_up = coord_list[(i+1)%num][j][0] current_left_state = -(left_neighbour_right_state) current_down_state = -(lower_neighbour_up_state) print(f"Current States : up = {current_up_state}, right = {current_right_state}") print(f"Current States : down = {current_down_state}, left = {current_left_state}") print(f"Left Neighbour Coords = ({x_left},{y_left})") print(f"Left Neighbour States : right = {left_neighbour_right_state}") print(f"Lower Neighbour Coords = ({x_up},{y_up})") print(f"Lower Neighbour Right State = {left_neighbour_right_state}") print(f"Lower Neighbour States : up = {lower_neighbour_up_state}") plt.plot(x_current,y_current,'ob') if current_up_state == 1: plot_vector([x_current,y_current],[x_current,y_current+1]) elif current_up_state == -1: plot_vector([x_current,y_current+1],[x_current,y_current]) if current_right_state == 1: plot_vector([x_current,y_current],[x_current+1,y_current]) elif current_right_state == -1: plot_vector([x_current+1,y_current],[x_current,y_current]) if current_down_state == 1: plot_vector([x_current,y_current],[x_current,y_current-1]) elif current_down_state == -1: plot_vector([x_current,y_current-1],[x_current,y_current]) if current_left_state == 1: plot_vector([x_current,y_current],[x_current-1,y_current]) elif current_left_state == -1: plot_vector([x_current-1,y_current],[x_current,y_current]) plt.xlim(-1,num+1) plt.ylim(-1,num+1) plt.axis('off') plt.show() plt.close() #This corresponds to pic in book arr = [[[-1,1],[-1,1],[1,-1],[-1,1]], [[-1,-1],[-1,-1],[-1,1],[1,-1]], [[-1,1],[-1,1],[1,1],[-1,1]], [[-1,1],[-1,1],[1,1],[-1,1]]] arr = np.array(arr) arr2 = np.array( [ [[1,1],[1,1]], [[1,1],[1,1]] ] ) visualise_2d_model(arr2) arr3 = np.array( [ [[1,1],[1,1],[1,1]], [[1,1],[1,1],[1,1]], [[1,1],[1,1],[1,1]] ] ) visualise_2d_model(arr3) arr4 = np.array( [ [[-1,-1],[-1,-1],[-1,-1]], [[-1,-1],[-1,-1],[-1,-1]], [[-1,-1],[-1,-1],[-1,-1]] ] ) visualise_2d_model(arr4) ```
github_jupyter
import numpy as np from matplotlib import pyplot as plt import copy #This corresponds to pic in book arr = [[[-1,1],[-1,1],[1,-1],[-1,1]], [[-1,-1],[-1,-1],[-1,1],[1,-1]], [[-1,1],[-1,1],[1,1],[-1,1]], [[-1,1],[-1,1],[1,1],[-1,1]]] arr = np.array(arr) def initialise_state(N): #N is the grid dimension (in the above example, N=4) ''' Author: Siddharth Bachoti ~Function Description~ ''' grid = np.ones((N,N,2),dtype=int) return np.array(grid) def plot_vector(p1,p2): ''' Author: Siddharth Chaini ''' p1 = np.array(p1) p2 = np.array(p2) dp = p2-p1 plt.quiver(p1[0], p1[1], dp[0], dp[1],angles='xy', scale_units='xy', scale=1, headwidth = 5, headlength = 7) def get_coord_list(arr): ''' Author: Siddharth Chaini ''' coord_list=[] num = len(arr) for i in range(num): temp_coord = [] for j in range(num): current_elems = arr[i][j] xpt = (num-1)-i ypt = j temp_coord.append((xpt,ypt)) coord_list.append(temp_coord) return coord_list def visualise_2d_model(arr): ''' Author: Siddharth Chaini ''' num = len(arr) plt.axes().set_aspect('equal') coord_list = get_coord_list(arr) for i in range(num): for j in range(num): current_up_state = arr[i][j][0] current_right_state = arr[i][j][1] x_current = coord_list[i][j][1] y_current = coord_list[i][j][0] lower_neighbour_up_state = arr[(i+1)%num][j][0] x_up = coord_list[(i+1)%num][j][1] y_up = coord_list[(i+1)%num][j][0] left_neighbour_right_state = arr[i][j-1][1] x_left = coord_list[i][j-1][1] y_left = coord_list[i][j-1][0] current_down_state = -(lower_neighbour_up_state) current_left_state = -(left_neighbour_right_state) plt.plot(x_current,y_current,'ob') if current_up_state == 1: plot_vector([x_current,y_current],[x_current,y_current+1]) elif current_up_state == -1: plot_vector([x_current,y_current+1],[x_current,y_current]) if current_right_state == 1: plot_vector([x_current,y_current],[x_current+1,y_current]) elif current_right_state == -1: plot_vector([x_current+1,y_current],[x_current,y_current]) if current_down_state == 1: plot_vector([x_current,y_current],[x_current,y_current-1]) elif current_down_state == -1: plot_vector([x_current,y_current-1],[x_current,y_current]) if current_left_state == 1: plot_vector([x_current,y_current],[x_current-1,y_current]) elif current_left_state == -1: plot_vector([x_current-1,y_current],[x_current,y_current]) plt.xlim(-1,num+1) plt.ylim(-1,num+1) plt.axis('off') plt.show() plt.close() visualise_2d_model(arr) def check_config(arr): ''' Author: Tanmay Bhore ''' flag=True N=len(arr) for i in range(len(arr)): for j in range(len(arr)): current_up_state = arr[i][j][0] current_right_state = arr[i][j][1] lower_neighbour_up_state = arr[(i+1)%N][j][0] left_neighbour_right_state = arr[i][j-1][1] current_left_state = -(left_neighbour_right_state) current_down_state = -(lower_neighbour_up_state) if (current_up_state + current_right_state + current_left_state + current_down_state) != 0: flag=False break return flag def long_loop(arr2, verbose=True): ''' Author: Team ℏ ''' arr = copy.deepcopy(arr2) N=len(arr) iters=0 n1 = np.random.randint(low=0, high=N) n2 = np.random.randint(low=0, high=N) inital_pt =(n1,n2) prev_choice=None while True: iters+=1 if n1==inital_pt[0] and n2==inital_pt[1] and iters!=1: if verbose: print(f"Completed in {iters} iterations.") # assert(check_config(arr)) break current_up_state = arr[n1][n2][0] current_right_state = arr[n1][n2][1] lower_neighbour_up_state = arr[(n1+1)%N][n2][0] left_neighbour_right_state = arr[n1][n2-1][1] current_down_state = -(lower_neighbour_up_state) current_left_state = -(left_neighbour_right_state) current_states_dict = {"up":current_up_state,"right":current_right_state,"down":current_down_state,"left":current_left_state} outgoing_state_dict={} incoming_state_dict={} for key in current_states_dict.keys(): if current_states_dict[key]==1: #current state is outgoing outgoing_state_dict[key]=current_states_dict[key] else: incoming_state_dict[key]=current_states_dict[key] if prev_choice =="right": forbidden_choice="left" if prev_choice =="up": forbidden_choice="down" if prev_choice =="left": forbidden_choice="right" if prev_choice =="down": forbidden_choice="up" else: forbidden_choice=None while True: out_choice = np.random.choice(list(outgoing_state_dict.keys())) if out_choice !=forbidden_choice: break prev_choice=out_choice if out_choice == "up": arr[n1][n2][0]= - (arr[n1][n2][0]) n1=(n1-1)%N n2=n2 continue if out_choice == "right": arr[n1][n2][1]= - (arr[n1][n2][1]) n1=n1 n2=(n2+1)%N continue if out_choice == "down": arr[(n1+1)%N][n2][0]= - (arr[(n1+1)%N][n2][0]) n1=(n1+1)%N n2=n2 continue if out_choice == "left": arr[n1][(n2-1)%N][1]= - (arr[n1][(n2-1)%N][1]) n1=n1 n2=(n2-1)%N continue return arr name = ' '.join(map(str, arr.flatten())).replace(' ','') name for i in range(8): newarr = long_loop(initialise_state(20)) check_config(newarr) get_coord_list(arr) num = len(arr) plt.axes().set_aspect('equal') coord_list = get_coord_list(arr) for i in range(num): for j in range(num): print("********************************") current_up_state = arr[i][j][0] current_right_state = arr[i][j][1] x_current = coord_list[i][j][1] y_current = coord_list[i][j][0] print(f"Current Coords = ({x_current},{y_current})") left_neighbour_right_state = arr[i][j-1][1] x_left = coord_list[i][j-1][1] y_left = coord_list[i][j-1][0] lower_neighbour_up_state = arr[(i+1)%num][j][0] x_up = coord_list[(i+1)%num][j][1] y_up = coord_list[(i+1)%num][j][0] current_left_state = -(left_neighbour_right_state) current_down_state = -(lower_neighbour_up_state) print(f"Current States : up = {current_up_state}, right = {current_right_state}") print(f"Current States : down = {current_down_state}, left = {current_left_state}") print(f"Left Neighbour Coords = ({x_left},{y_left})") print(f"Left Neighbour States : right = {left_neighbour_right_state}") print(f"Lower Neighbour Coords = ({x_up},{y_up})") print(f"Lower Neighbour Right State = {left_neighbour_right_state}") print(f"Lower Neighbour States : up = {lower_neighbour_up_state}") plt.plot(x_current,y_current,'ob') if current_up_state == 1: plot_vector([x_current,y_current],[x_current,y_current+1]) elif current_up_state == -1: plot_vector([x_current,y_current+1],[x_current,y_current]) if current_right_state == 1: plot_vector([x_current,y_current],[x_current+1,y_current]) elif current_right_state == -1: plot_vector([x_current+1,y_current],[x_current,y_current]) if current_down_state == 1: plot_vector([x_current,y_current],[x_current,y_current-1]) elif current_down_state == -1: plot_vector([x_current,y_current-1],[x_current,y_current]) if current_left_state == 1: plot_vector([x_current,y_current],[x_current-1,y_current]) elif current_left_state == -1: plot_vector([x_current-1,y_current],[x_current,y_current]) plt.xlim(-1,num+1) plt.ylim(-1,num+1) plt.axis('off') plt.show() plt.close() #This corresponds to pic in book arr = [[[-1,1],[-1,1],[1,-1],[-1,1]], [[-1,-1],[-1,-1],[-1,1],[1,-1]], [[-1,1],[-1,1],[1,1],[-1,1]], [[-1,1],[-1,1],[1,1],[-1,1]]] arr = np.array(arr) arr2 = np.array( [ [[1,1],[1,1]], [[1,1],[1,1]] ] ) visualise_2d_model(arr2) arr3 = np.array( [ [[1,1],[1,1],[1,1]], [[1,1],[1,1],[1,1]], [[1,1],[1,1],[1,1]] ] ) visualise_2d_model(arr3) arr4 = np.array( [ [[-1,-1],[-1,-1],[-1,-1]], [[-1,-1],[-1,-1],[-1,-1]], [[-1,-1],[-1,-1],[-1,-1]] ] ) visualise_2d_model(arr4)
0.184988
0.452294
# Temporal-Difference Methods In this notebook, you will write your own implementations of many Temporal-Difference (TD) methods. While we have provided some starter code, you are welcome to erase these hints and write your code from scratch. --- ### Part 0: Explore CliffWalkingEnv We begin by importing the necessary packages. ``` import sys import gym import numpy as np import random import math from collections import defaultdict, deque import matplotlib.pyplot as plt %matplotlib inline import check_test from plot_utils import plot_values ``` Use the code cell below to create an instance of the [CliffWalking](https://github.com/openai/gym/blob/master/gym/envs/toy_text/cliffwalking.py) environment. ``` env = gym.make('CliffWalking-v0') ``` The agent moves through a $4\times 12$ gridworld, with states numbered as follows: ``` [[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], [36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]] ``` At the start of any episode, state `36` is the initial state. State `47` is the only terminal state, and the cliff corresponds to states `37` through `46`. The agent has 4 potential actions: ``` UP = 0 RIGHT = 1 DOWN = 2 LEFT = 3 ``` Thus, $\mathcal{S}^+=\{0, 1, \ldots, 47\}$, and $\mathcal{A} =\{0, 1, 2, 3\}$. Verify this by running the code cell below. ``` print(env.action_space) print(env.observation_space) ``` In this mini-project, we will build towards finding the optimal policy for the CliffWalking environment. The optimal state-value function is visualized below. Please take the time now to make sure that you understand _why_ this is the optimal state-value function. _**Note**: You can safely ignore the values of the cliff "states" as these are not true states from which the agent can make decisions. For the cliff "states", the state-value function is not well-defined._ ``` # define the optimal state-value function V_opt = np.zeros((4,12)) V_opt[0][0:13] = -np.arange(3, 15)[::-1] V_opt[1][0:13] = -np.arange(3, 15)[::-1] + 1 V_opt[2][0:13] = -np.arange(3, 15)[::-1] + 2 V_opt[3][0] = -13 plot_values(V_opt) ``` ### Part 1: TD Control: Sarsa In this section, you will write your own implementation of the Sarsa control algorithm. Your algorithm has four arguments: - `env`: This is an instance of an OpenAI Gym environment. - `num_episodes`: This is the number of episodes that are generated through agent-environment interaction. - `alpha`: This is the step-size parameter for the update step. - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). The algorithm returns as output: - `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`. Please complete the function in the code cell below. (_Feel free to define additional functions to help you to organize your code._) ``` def update_Q_sarsa(alpha, gamma, Q, state, action, reward, next_state=None, next_action=None): """Returns updated Q-value for the most recent experience.""" current = Q[state][action] # estimate in Q-table (for current state, action pair) # get value of state, action pair at next time step Qsa_next = Q[next_state][next_action] if next_state is not None else 0 target = reward + (gamma * Qsa_next) # construct TD target new_value = current + (alpha * (target - current)) # get updated value return new_value def epsilon_greedy(Q, state, nA, eps): """Selects epsilon-greedy action for supplied state. Params ====== Q (dictionary): action-value function state (int): current state nA (int): number actions in the environment eps (float): epsilon """ if random.random() > eps: # select greedy action with probability epsilon return np.argmax(Q[state]) else: # otherwise, select an action randomly return random.choice(np.arange(env.action_space.n)) def sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100): nA = env.action_space.n # number of actions Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays # monitor performance tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes for i_episode in range(1, num_episodes+1): # monitor progress if i_episode % 100 == 0: print("\rEpisode {}/{}".format(i_episode, num_episodes), end="") sys.stdout.flush() score = 0 # initialize score state = env.reset() # start episode eps = 1.0 / i_episode # set value of epsilon action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection while True: next_state, reward, done, info = env.step(action) # take action A, observe R, S' score += reward # add reward to agent's score if not done: next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \ state, action, reward, next_state, next_action) state = next_state # S <- S' action = next_action # A <- A' if done: Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \ state, action, reward) tmp_scores.append(score) # append score break if (i_episode % plot_every == 0): avg_scores.append(np.mean(tmp_scores)) # plot performance plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores)) plt.xlabel('Episode Number') plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every) plt.show() # print best 100-episode performance print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores)) return Q ``` Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function. If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default. ``` # obtain the estimated optimal policy and corresponding action-value function Q_sarsa = sarsa(env, 5000, .01) # print the estimated optimal policy policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12) check_test.run_check('td_control_check', policy_sarsa) print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):") print(policy_sarsa) # plot the estimated optimal state-value function V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)]) plot_values(V_sarsa) ``` ### Part 2: TD Control: Q-learning In this section, you will write your own implementation of the Q-learning control algorithm. Your algorithm has four arguments: - `env`: This is an instance of an OpenAI Gym environment. - `num_episodes`: This is the number of episodes that are generated through agent-environment interaction. - `alpha`: This is the step-size parameter for the update step. - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). The algorithm returns as output: - `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`. Please complete the function in the code cell below. (_Feel free to define additional functions to help you to organize your code._) ``` def update_Q_sarsamax(alpha, gamma, Q, state, action, reward, next_state=None): """Returns updated Q-value for the most recent experience.""" current = Q[state][action] # estimate in Q-table (for current state, action pair) Qsa_next = np.max(Q[next_state]) if next_state is not None else 0 # value of next state target = reward + (gamma * Qsa_next) # construct TD target new_value = current + (alpha * (target - current)) # get updated value return new_value def q_learning(env, num_episodes, alpha, gamma=1.0, plot_every=100): """Q-Learning - TD Control Params ====== num_episodes (int): number of episodes to run the algorithm alpha (float): learning rate gamma (float): discount factor plot_every (int): number of episodes to use when calculating average score """ nA = env.action_space.n # number of actions Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays # monitor performance tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes for i_episode in range(1, num_episodes+1): # monitor progress if i_episode % 100 == 0: print("\rEpisode {}/{}".format(i_episode, num_episodes), end="") sys.stdout.flush() score = 0 # initialize score state = env.reset() # start episode eps = 1.0 / i_episode # set value of epsilon while True: action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection next_state, reward, done, info = env.step(action) # take action A, observe R, S' score += reward # add reward to agent's score Q[state][action] = update_Q_sarsamax(alpha, gamma, Q, \ state, action, reward, next_state) state = next_state # S <- S' if done: tmp_scores.append(score) # append score break if (i_episode % plot_every == 0): avg_scores.append(np.mean(tmp_scores)) # plot performance plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores)) plt.xlabel('Episode Number') plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every) plt.show() # print best 100-episode performance print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores)) return Q ``` Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function. If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default. ``` # obtain the estimated optimal policy and corresponding action-value function Q_sarsamax = q_learning(env, 5000, .01) # print the estimated optimal policy policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12)) check_test.run_check('td_control_check', policy_sarsamax) print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):") print(policy_sarsamax) # plot the estimated optimal state-value function plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)]) ``` ### Part 3: TD Control: Expected Sarsa In this section, you will write your own implementation of the Expected Sarsa control algorithm. Your algorithm has four arguments: - `env`: This is an instance of an OpenAI Gym environment. - `num_episodes`: This is the number of episodes that are generated through agent-environment interaction. - `alpha`: This is the step-size parameter for the update step. - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`). The algorithm returns as output: - `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`. Please complete the function in the code cell below. (_Feel free to define additional functions to help you to organize your code._) ``` def update_Q_expsarsa(alpha, gamma, nA, eps, Q, state, action, reward, next_state=None): """Returns updated Q-value for the most recent experience.""" current = Q[state][action] # estimate in Q-table (for current state, action pair) policy_s = np.ones(nA) * eps / nA # current policy (for next state S') policy_s[np.argmax(Q[next_state])] = 1 - eps + (eps / nA) # greedy action Qsa_next = np.dot(Q[next_state], policy_s) # get value of state at next time step target = reward + (gamma * Qsa_next) # construct target new_value = current + (alpha * (target - current)) # get updated value return new_value def expected_sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100): """Expected SARSA - TD Control Params ====== num_episodes (int): number of episodes to run the algorithm alpha (float): step-size parameters for the update step gamma (float): discount factor plot_every (int): number of episodes to use when calculating average score """ nA = env.action_space.n # number of actions Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays # monitor performance tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes for i_episode in range(1, num_episodes+1): # monitor progress if i_episode % 100 == 0: print("\rEpisode {}/{}".format(i_episode, num_episodes), end="") sys.stdout.flush() score = 0 # initialize score state = env.reset() # start episode eps = 0.005 # set value of epsilon while True: action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection next_state, reward, done, info = env.step(action) # take action A, observe R, S' score += reward # add reward to agent's score # update Q Q[state][action] = update_Q_expsarsa(alpha, gamma, nA, eps, Q, \ state, action, reward, next_state) state = next_state # S <- S' if done: tmp_scores.append(score) # append score break if (i_episode % plot_every == 0): avg_scores.append(np.mean(tmp_scores)) # plot performance plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores)) plt.xlabel('Episode Number') plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every) plt.show() # print best 100-episode performance print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores)) return Q ``` Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function. If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default. ``` # obtain the estimated optimal policy and corresponding action-value function Q_expsarsa = expected_sarsa(env, 5000, 1) # print the estimated optimal policy policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12) check_test.run_check('td_control_check', policy_expsarsa) print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):") print(policy_expsarsa) # plot the estimated optimal state-value function plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)]) ```
github_jupyter
import sys import gym import numpy as np import random import math from collections import defaultdict, deque import matplotlib.pyplot as plt %matplotlib inline import check_test from plot_utils import plot_values env = gym.make('CliffWalking-v0') [[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], [36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]] UP = 0 RIGHT = 1 DOWN = 2 LEFT = 3 print(env.action_space) print(env.observation_space) # define the optimal state-value function V_opt = np.zeros((4,12)) V_opt[0][0:13] = -np.arange(3, 15)[::-1] V_opt[1][0:13] = -np.arange(3, 15)[::-1] + 1 V_opt[2][0:13] = -np.arange(3, 15)[::-1] + 2 V_opt[3][0] = -13 plot_values(V_opt) def update_Q_sarsa(alpha, gamma, Q, state, action, reward, next_state=None, next_action=None): """Returns updated Q-value for the most recent experience.""" current = Q[state][action] # estimate in Q-table (for current state, action pair) # get value of state, action pair at next time step Qsa_next = Q[next_state][next_action] if next_state is not None else 0 target = reward + (gamma * Qsa_next) # construct TD target new_value = current + (alpha * (target - current)) # get updated value return new_value def epsilon_greedy(Q, state, nA, eps): """Selects epsilon-greedy action for supplied state. Params ====== Q (dictionary): action-value function state (int): current state nA (int): number actions in the environment eps (float): epsilon """ if random.random() > eps: # select greedy action with probability epsilon return np.argmax(Q[state]) else: # otherwise, select an action randomly return random.choice(np.arange(env.action_space.n)) def sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100): nA = env.action_space.n # number of actions Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays # monitor performance tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes for i_episode in range(1, num_episodes+1): # monitor progress if i_episode % 100 == 0: print("\rEpisode {}/{}".format(i_episode, num_episodes), end="") sys.stdout.flush() score = 0 # initialize score state = env.reset() # start episode eps = 1.0 / i_episode # set value of epsilon action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection while True: next_state, reward, done, info = env.step(action) # take action A, observe R, S' score += reward # add reward to agent's score if not done: next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \ state, action, reward, next_state, next_action) state = next_state # S <- S' action = next_action # A <- A' if done: Q[state][action] = update_Q_sarsa(alpha, gamma, Q, \ state, action, reward) tmp_scores.append(score) # append score break if (i_episode % plot_every == 0): avg_scores.append(np.mean(tmp_scores)) # plot performance plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores)) plt.xlabel('Episode Number') plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every) plt.show() # print best 100-episode performance print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores)) return Q # obtain the estimated optimal policy and corresponding action-value function Q_sarsa = sarsa(env, 5000, .01) # print the estimated optimal policy policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12) check_test.run_check('td_control_check', policy_sarsa) print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):") print(policy_sarsa) # plot the estimated optimal state-value function V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)]) plot_values(V_sarsa) def update_Q_sarsamax(alpha, gamma, Q, state, action, reward, next_state=None): """Returns updated Q-value for the most recent experience.""" current = Q[state][action] # estimate in Q-table (for current state, action pair) Qsa_next = np.max(Q[next_state]) if next_state is not None else 0 # value of next state target = reward + (gamma * Qsa_next) # construct TD target new_value = current + (alpha * (target - current)) # get updated value return new_value def q_learning(env, num_episodes, alpha, gamma=1.0, plot_every=100): """Q-Learning - TD Control Params ====== num_episodes (int): number of episodes to run the algorithm alpha (float): learning rate gamma (float): discount factor plot_every (int): number of episodes to use when calculating average score """ nA = env.action_space.n # number of actions Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays # monitor performance tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes for i_episode in range(1, num_episodes+1): # monitor progress if i_episode % 100 == 0: print("\rEpisode {}/{}".format(i_episode, num_episodes), end="") sys.stdout.flush() score = 0 # initialize score state = env.reset() # start episode eps = 1.0 / i_episode # set value of epsilon while True: action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection next_state, reward, done, info = env.step(action) # take action A, observe R, S' score += reward # add reward to agent's score Q[state][action] = update_Q_sarsamax(alpha, gamma, Q, \ state, action, reward, next_state) state = next_state # S <- S' if done: tmp_scores.append(score) # append score break if (i_episode % plot_every == 0): avg_scores.append(np.mean(tmp_scores)) # plot performance plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores)) plt.xlabel('Episode Number') plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every) plt.show() # print best 100-episode performance print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores)) return Q # obtain the estimated optimal policy and corresponding action-value function Q_sarsamax = q_learning(env, 5000, .01) # print the estimated optimal policy policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12)) check_test.run_check('td_control_check', policy_sarsamax) print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):") print(policy_sarsamax) # plot the estimated optimal state-value function plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)]) def update_Q_expsarsa(alpha, gamma, nA, eps, Q, state, action, reward, next_state=None): """Returns updated Q-value for the most recent experience.""" current = Q[state][action] # estimate in Q-table (for current state, action pair) policy_s = np.ones(nA) * eps / nA # current policy (for next state S') policy_s[np.argmax(Q[next_state])] = 1 - eps + (eps / nA) # greedy action Qsa_next = np.dot(Q[next_state], policy_s) # get value of state at next time step target = reward + (gamma * Qsa_next) # construct target new_value = current + (alpha * (target - current)) # get updated value return new_value def expected_sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100): """Expected SARSA - TD Control Params ====== num_episodes (int): number of episodes to run the algorithm alpha (float): step-size parameters for the update step gamma (float): discount factor plot_every (int): number of episodes to use when calculating average score """ nA = env.action_space.n # number of actions Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays # monitor performance tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes for i_episode in range(1, num_episodes+1): # monitor progress if i_episode % 100 == 0: print("\rEpisode {}/{}".format(i_episode, num_episodes), end="") sys.stdout.flush() score = 0 # initialize score state = env.reset() # start episode eps = 0.005 # set value of epsilon while True: action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection next_state, reward, done, info = env.step(action) # take action A, observe R, S' score += reward # add reward to agent's score # update Q Q[state][action] = update_Q_expsarsa(alpha, gamma, nA, eps, Q, \ state, action, reward, next_state) state = next_state # S <- S' if done: tmp_scores.append(score) # append score break if (i_episode % plot_every == 0): avg_scores.append(np.mean(tmp_scores)) # plot performance plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores)) plt.xlabel('Episode Number') plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every) plt.show() # print best 100-episode performance print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores)) return Q # obtain the estimated optimal policy and corresponding action-value function Q_expsarsa = expected_sarsa(env, 5000, 1) # print the estimated optimal policy policy_expsarsa = np.array([np.argmax(Q_expsarsa[key]) if key in Q_expsarsa else -1 for key in np.arange(48)]).reshape(4,12) check_test.run_check('td_control_check', policy_expsarsa) print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):") print(policy_expsarsa) # plot the estimated optimal state-value function plot_values([np.max(Q_expsarsa[key]) if key in Q_expsarsa else 0 for key in np.arange(48)])
0.522446
0.960878
# Spatial discretisation So far, we've seen time derivatives and ordinary differential equations of the form $$ \dot{u} = f(t, u). $$ Most problems one encounters in the real world have spatial as well as time derivatives. Our first example is the [*Poisson equation*](https://en.wikipedia.org/wiki/Poisson%27s_equation): $$ \begin{align} -\frac{\text{d}^2 u}{\text{d} x^2} &= f(x) \quad x \in \Omega = (-1, 1)\\ u(-1) &= a\\ \frac{\text{d} u}{\text{d} x}(1) &= b\\ \end{align}. $$ This is termed a *boundary value problem* (BVP), as opposed to the ODEs which were *initial value problems*, since we do not specify an initial condition, but rather a condition on the boundary of the domain. This equation appears in a remarkably large number of places. As one example, it models the equilibrium temperature profile of a thermally conducting material maintained at constant temperature $a$ on the left and cooled at constant rate $b$ on the right. To solve this problem numerically we must make a number of choices: - how to represent the solution $u$; - how to compute its derivatives; - how to enforce the boundary conditions; - how and where to evaluate $f$; - in what sense we would like our solution to satisfy the equation. [Iserles' book](http://www.damtp.cam.ac.uk/user/ai/Arieh_Iserles/Textbook.html) contains a quite mathematical treatment of finite differences. I also like [Randy LeVeque's *Finite difference methods for ordinary and partial differential equations*](http://staff.washington.edu/rjl/fdmbook/). ## Finite difference framework We will focus on _finite difference_ methods here, which make the following choices in answer to the questions above: - The solution $u(x)$ is represented by _pointwise_ values $u_i = u(x_i)$ at some discrete set of points $-1 = x_0 < x_1 < \dots < x_N = 1$. Importantly, the framework _does not_ specify the value of $u$ outside of these points; - derivatives of $u$ at points $x_i$ are approximated using differencing formulae that utilise a finite number of neighbouring points (independent of $N$); - boundary conditions are either enforced pointwise (e.g. the $u(-1)$ case above), or (when constraining derivatives) with one-sided differencing formulae; - $f$ is evaluated pointwise at each $x_i$; - we require that our finite difference method satisfies the equation pointwise at each $x_i$ in the interior of the domain. ### Differencing formulae Our starting point is the definition of a derivative: $$ \frac{d u(x)}{d x} = \lim_{\epsilon \to 0} \frac{u(x + \epsilon) - u(x)}{\epsilon}. $$ If we wish to approximate this in our finite difference framework, where we only have point values, we can do so using neighbouring values. Writing $x_{i+1} - x_i = h$ for convenience, we can write $$ \frac{d u(x_i)}{d x} \approx \frac{u(x_{i+1}) - u(x_i)}{h} = \frac{u_{i+1} - u_i}{h} =: D_+ u_i. $$ This is a _one-sided_ approximation: we only use $u_i$ and $u_{i+1}$. Another one-sided approximation would be to offset in the other direction $$ D_{-} u_i := \frac{u_i - u_{i-1}}{h}. $$ Finally, we can also use a _centred_ approximation by averaging the two one-sided approximations: $$ D_0 u_i := \frac{u_{i+1} - u_{i-1}}{2h} = \frac{1}{2} (D_+ + D_-) u_i. $$ Let's have a picture ``` %matplotlib notebook import numpy from matplotlib import pyplot import matplotlib.lines as mlines pyplot.style.use('ggplot') n = 200 h = 2/(n-1) x = numpy.linspace(1,2.5,n) pyplot.plot(x, numpy.sin(x)); def newline(p1, p2, **kwargs): ax = pyplot.gca() xmin, xmax = ax.get_xbound() if(p2[0] == p1[0]): xmin = xmax = p1[0] ymin, ymax = ax.get_ybound() else: ymax = p1[1]+(p2[1]-p1[1])/(p2[0]-p1[0])*(xmax-p1[0]) ymin = p1[1]+(p2[1]-p1[1])/(p2[0]-p1[0])*(xmin-p1[0]) l = mlines.Line2D([xmin,xmax], [ymin,ymax], **kwargs) ax.add_line(l) return l h = 0.25 xi = 1.6 ximinus = xi - h xiplus = xi + h pyplot.plot([ximinus, xi, xiplus], numpy.sin([ximinus, xi, xiplus]), marker="o", linestyle="none") newline((xi, numpy.sin(xi)), (xiplus, numpy.sin(xiplus)), linestyle="dashed", label="$D_+ u(x)$") newline((xi, numpy.sin(xi)), (ximinus, numpy.sin(ximinus)), linestyle="dotted", label="$D_- u(x)$") newline((ximinus, numpy.sin(ximinus)), (xiplus, numpy.sin(xiplus)), linestyle="-.", label="$D_0 u(x)$") newline((xi, numpy.sin(xi)), (xiplus, numpy.sin(xi) + h*numpy.cos(xi)), color="black", label="$u'(x)$") pyplot.legend(); ``` Let's look at the full approximate derivative too. ``` n = 10 h = 2/(n-1) x = numpy.linspace(-1,1,n) u = numpy.sin(x) pyplot.figure() pyplot.plot(x, numpy.cos(x), label="$u'$"); pyplot.plot(x[1:], (u[1:] - u[:-1])/h, label="$D_-$", marker="o", linestyle="none") pyplot.legend(); ``` ## Accuracy Certainly from the picture of the slope above, it appears that the centered difference formula is more accurate than the one-sided approximations. Can we formalise this at all? To do so, we turn to the favourite tool of the budding numericist, the *taylor expansion*. ### Recap, Taylor expansions For a sufficiently smooth function $u$, given a point $x$, we can represent the function at a new point $x + h$ by its Taylor expansion $$ u(x + h) = u(x) + u'(x) h + \frac{1}{2} u''(x) h^2 + \frac{1}{6} u'''(x) h^3 + \dots = \sum_{n=0}^\infty \frac{1}{n!} h^n u^{(n)}(x) $$ where the notation $u'$ is shorthand for $\frac{\text{d} u}{\text{d} x}$ and $u^{(n)}(x) = \frac{\text{d}^n u}{\text{d} x^n}$. Let's look at how this works for a sample function. ``` from functools import reduce from operator import mul def u(x, n=0): factor = (-1)**(n // 2) if n % 2 == 0: u_ = numpy.sin else: u_ = numpy.cos return factor * u_(x) pyplot.figure() x = numpy.linspace(0.5, 1.75, 500) pyplot.plot(x, u(x)); x0 = 0.6 h = 0.8 def fac(n): return reduce(mul, range(1, n+1), 1) def taylor(u, x0, h, n): return u(x0) + sum(h**i/fac(i) * u(x0, i) for i in range(1, n)) xs = numpy.linspace(x0, x0+h, 20) for n in range(1, 5): pyplot.plot(xs, [taylor(u, x0, x - x0, n) for x in xs], marker="o", label=r"$\tilde{u} + \mathcal{O}(h^%d)$" % n) pyplot.legend(); ``` If we chop off the series at some finite $n$ we write $$ u(x + h) = u(x) + u'(x) h + \frac{1}{2} u''(x) h^2 + \mathcal{O}(h^3) $$ with $h$ sufficiently small. To determine the order of a method, we substitute the Taylor expansion into the differencing expression and calculate. As an example, let us consider the one-sided differencing operator $D_+$. To simplify notation, we will choose $x = 0$, and we have $$ \begin{align} u'(0) &\approx \frac{u(h) - u(0)}{h} \quad \text{ definition of } D_+\\ &= h^{-1}(\underbrace{u(0) + u'(0) h + \frac{1}{2} u''(0) h^2 + \mathcal{O}(h^3)}_{u(h)} - u(0)) \\ &= u'(0) + \frac{1}{2} u''(0) h + \mathcal{O}(h^2)\\ \end{align}. $$ The leading-order error term in the right hand side is $\mathcal{O}(h)$, and so we say that this is a _first-order_ method. Derivation that the operator $D_-$ is also first-order proceeds identically. #### Questions 1. Show that the centered difference operator $D_0$ computes a second-order accurate derivative. ## Stability We will postpone mathematical discussion of stability for a while, and give an intuition for some potential problems. Let us first check that our implementation of differencing operators provides us with the expected (mathematical) convergence orders for a smooth function. ``` def dplus(x, u): return x[:-1], (u[1:] - u[:-1])/(x[1:] - x[:-1]) def dminus(x, u): return x[1:], (u[1:] - u[:-1])/(x[1:] - x[:-1]) def center(x, u): return x[1:-1], (u[2:] - u[:-2])/(x[2:] - x[:-2]) grids = 2**numpy.arange(3, 10) def error(f, df, op): for n in grids: x = numpy.linspace(-1, 1, n) x, y = op(x, f(x)) yield numpy.linalg.norm(y - df(x), numpy.inf) pyplot.figure() for op in [dplus, dminus, center]: pyplot.loglog(1/grids, list(error(numpy.sin, numpy.cos, op)), marker="o", linestyle="none", label=op.__name__) pyplot.xlabel("Resolution ($h$)") pyplot.ylabel("$l_\infty$ error in derivative") pyplot.loglog(1/grids, 1/grids, label="$h$") pyplot.loglog(1/grids, 1/grids**2, label="$h^2$") pyplot.legend(); ``` So both the one-sided differences are first-order accurate, whereas the centered difference is second-order accurate, as expected. One thing to be wary of, however, is using some of these approximations for functions that are "rough" on the grid scale. We can make this question more precise by asking whether there are functions whose derivatives are non-zero, but for which our numerical approximations compute $u'(x_i) = 0$. Let's try and contrive an example: ``` x = numpy.linspace(-1, 1, 9) xf = numpy.linspace(-1, 1, 100) def f(x): return numpy.cos(1/2 + 4*numpy.pi*x) def df(x): return -4*numpy.pi*numpy.sin(1/2 + 4*numpy.pi*x) pyplot.figure() pyplot.plot(x, f(x), marker="o", label="coarse") pyplot.plot(xf, f(xf), "-", label="fine") pyplot.legend(); ``` What about the derivatives? ``` pyplot.figure() for op in [dplus, dminus, center]: x_, y = op(x, f(x)) pyplot.plot(x_, y, "o-", label=op.__name__) pyplot.plot(xf, df(xf), "-", label="Exact") pyplot.legend(); ``` The centered difference approximation produces a _zero_ derivative for this function. Hence if we have a solution $u(x)$, we can (at least to the numerical operator) construct a new solution $\tilde{u}(x) = u(x) + f(x)$. Suddenly, even if our actual equation has a unique solution, the numerical solution is no longer unique. This turns out to cause all kinds of terrible issues with numerical algorithms and must be avoided at all costs. ## Higher-order derivatives We can compute high-order derivatives by repeatedly applying differencing operators for lower-order derivatives. For example, the second derivative $$ \frac{\text{d}^2 u}{\text{d} x^2} \approx D^2 u_i = D_+ D_- u_i = \frac{1}{h^2}\left(u_{i+1} - 2 u_i + u_{i-1}\right) = D_- D_+ u_i. $$ ### Questions 1. Show that this is a _second-order_ accurate approximation of the second derivative 2. We could also use $D^2 u_i = D_0 D_0 u_i$, derive the stencil for this case. 3. Finally, show that if we define a "half-step" centered difference operator $$ \hat{D}_0 u = \frac{1}{h}\left[u\left(x + \frac{h}{2}\right) - u\left(x - \frac{h}{2}\right)\right] $$ then we have $$ D^2 = \hat{D}_0 \circ \hat{D}_0 = D_+ \circ D_- $$ ## Boundary conditions The final missing piece required before we can solve the our first PDE is to figure out how we will treat boundary conditions. To do this, we will first recast the differencing operators as matrices. it is then somewhat easier to see what is going on. We can think of the differencing operator acting on an entire vector $$ U = \begin{bmatrix} u_0\\ u_1\\ \vdots\\ u_N \end{bmatrix} $$ at once. For example, we can write $$ D_+ = \frac{1}{h} \begin{bmatrix} -1 & 1 & 0 & \dots & 0\\ 0 & -1 & 1 & \dots & 0\\ \vdots & \ddots & \ddots & \vdots & \vdots\\ 0 & \dots & 0 & -1 & 1\\ 0 & \dots & 0 & 0 & -1\\ \end{bmatrix} $$ and $$ D^2 = \frac{1}{h^2} \begin{bmatrix} -2 & 1 & 0 & \dots & 0\\ 1 & -2 & 1 & \dots & 0\\ \vdots & \ddots & \ddots & \vdots & \vdots\\ 0 & \dots & \dots 1 & -2 & 1\\ 0 & \dots & 0 & 1 & -2\\ \end{bmatrix}. $$ Recall, our problem was to find $u \in (-1, 1)$ satisfying $$ \begin{align} \frac{\text{d}^2 u}{\text{d} x^2} u &= f\\ u(-1) &= a\\ \frac{\text{d} u}{\text{d} x}(1) &= b \end{align} $$ In matrix form, this becomes $$ \underbrace{D^2}_{A} \underbrace{\begin{bmatrix} u_0\\ u_1\\ \vdots\\ u_N \end{bmatrix}}_{U} = \underbrace{\begin{bmatrix} f_0\\f_1\\\vdots\\f_N\end{bmatrix}}_{F}. $$ This works perfectly in the interior of the domain, but we need to figure out what to do at the boundaries. For example, we can't use the standard differencing operator for $D^2$ on $u_N$, because $u_{N+1}$ does not exist. Fortunately, our boundary conditions inform how to modify the matrix appropriately. ### Dirichlet conditions These conditions, of the form $$ u(-1) = a $$ specify the _value_ of the solution at a particular point (or set of points). This means, that rather than solving a small equation to determine the value at this point, we _already know_ and can instead replace the relevant rows of the matrix. Let us suppose we have ordered our points such that $u(-1)$ corresponds to $u_0$. Then we have $$ \begin{bmatrix} 1 & 0 & \dots & 0\\ & & & \\ & & A_{1:,:} & \\ & & &\\ \end{bmatrix} U = \begin{bmatrix} a\\ \\ F_{1:} \\ \\ \end{bmatrix}. $$ In general, if we have a boundary value $\alpha_i$ that constrains $u_i$ then we replace the $i$th row with the identity, and the $i$th value in the right hand side with $\alpha_i$. This modification destroys any symmetry that might have existed in the matrix $A$, since we have zeroed rows, but not the corresponding columns. If we write the linear system in block form, we can, however, see a way around this: $$ \begin{bmatrix} I & 0\\ A_{10} & A_{11} \end{bmatrix} \begin{bmatrix} U_0\\ U_1 \end{bmatrix} = \begin{bmatrix} F_0\\ F_1 \end{bmatrix} $$ Since we know $U_0$ (they are just $F_0$), we can forward-substitute and move the lower-left block of the matrix onto the right hand side, to produce $$ \begin{bmatrix} I & 0\\ 0 & A_{11} \end{bmatrix} \begin{bmatrix} U_0\\ U_1 \end{bmatrix} = \begin{bmatrix} F_0\\ F_1 - A_{10}F_0 \end{bmatrix}. $$ This is often a convenient form to work with. Alternately, since the equations for $U_0$ are just the identity, we can write our solver to just handle $$ A_{11} U_1 = F_1 - A_{10}F_0 $$ and insert the boundary values into a big vector whenever we need to visualise it. ## Neumann conditions We can now treat boundary conditions that constrain the value of the solution, but recall that the condition at $x=1$ instead constrains gradient of the solution. We cannot do this by setting values, but must instead form an equation for the boundary value. ### One-sided difference There are typically two ways to do this. We either come up with a one-sided differencing formula for the derivative directly. For example, recalling the one-sided difference we might replace the boundary term $$ \frac{\text{d} u}{\text{d} x}(1) = b $$ by $$ \frac{u_n - u_{n-1}}{h} = b. $$ This is simple but has some potential drawbacks 1. We need to make a different choice for the discretisation on the boundary to that in the interior 2. This choice may not have the same order of accuracy 3. It may destroy symmetry that previously existed in the problem. ### Ghost values An alternate option is to introduce a (or possibly more than one) _ghost value_ outside of the domain such that we can then just use our interior discretisation. We then define the value of this ghost point to be the reflection (possibly weighted by the boundary value) of the interior point. That is, we introduce $u_{n+1} = u(x_{n+1})$ and set $$ u_{n+1} = u_{n-1} + 2b(\underbrace{x_n - x_{n-1}}_{h}). $$ Now we can use our interior discretisation $$ \frac{-u_{n-1} + 2u_n - u_{n+1}}{h^2} = f(x_n) $$ substituting in the definition of $u_{n+1}$ we obtain $$ \begin{align} \frac{-u_{n-1} + 2u_n - (u_{n-1} + 2bh)}{h^2} &= f(x_n)\\ \frac{2(u_n - u_{n-1})}{h^2} &= f(x_n) + \frac{2b}{h}\\ \frac{u_n - u_{n-1}}{h^2} &= \frac{f(x_n)}{2} + \frac{b}{h} \end{align}. $$ Let's compare these approaches. ``` def laplacian(N, rhsfunc): x = numpy.linspace(0, 1, N+1) h = 1/N rhs = rhsfunc(x) e = numpy.ones(N) # interior discretisation L = (2*numpy.eye(N+1) - numpy.diag(e, 1) - numpy.diag(e, -1)) / h**2 return x, L, rhs, h def apply_dirichlet(L, rhs, h, vals, indices): N, _ = L.shape diag = numpy.eye(1, N) bcmask = numpy.zeros(N, dtype=bool) bcmask[indices] = True # Dirichlet rows L[numpy.ix_(bcmask)] = numpy.vstack([numpy.roll(diag, i) for i in indices]) rhs[numpy.ix_(bcmask)] = vals # Forward substitute rhs[numpy.ix_(~bcmask)] -= L[numpy.ix_(~bcmask, bcmask)] @ vals L[numpy.ix_(~bcmask, bcmask)] = 0 return L, rhs, h def apply_neumann_oneside(L, rhs, h, b, index): N, _ = L.shape assert index == N - 1 L[index, :] = 0 L[index, index] = 1/h L[index, index - 1] = -1/h rhs[index] = b return L, rhs, h def apply_neumann_ghost(L, rhs, h, b, index): N, _ = L.shape L[index, index] /= 2 rhs[index] = b/h + rhs[index]/2 return L, rhs, h ``` We'll solve $$ \begin{align} -\frac{\text{d}^2 u}{\text{d} x^2} &= e^x \text{ in } (0, 1)\\ u(0) &= e^{0}\\ \frac{\text{d} u}{\text{d} x} &= e^{1}\\ \end{align} $$ with convenient exact solution $u(x) = e^{x}$. ``` N = 10 rhsfunc = lambda x: -numpy.exp(x) exact = lambda x: numpy.exp(x) x, L, rhs, h = laplacian(N, rhsfunc) L, rhs, h = apply_dirichlet(L, rhs, h, [exact(0)], [0]) L, rhs, h = apply_neumann_oneside(L, rhs, h, exact(1), N) uoneside = numpy.linalg.solve(L, rhs) x, L, rhs, h = laplacian(N, rhsfunc) L, rhs, h = apply_dirichlet(L, rhs, h, [exact(0)], [0]) L, rhs, h = apply_neumann_ghost(L, rhs, h, exact(1), N) ughost = numpy.linalg.solve(L, rhs) pyplot.figure() pyplot.plot(x, uoneside, label="Computed Oneside") pyplot.plot(x, ughost, label="Computed Ghost") pyplot.plot(x, exact(x), label="Exact") pyplot.legend(); ``` ## Observations Perhaps unsurprisingly, the one-sided application of the Neumann conditions performs worse than the ghost version. Interestingly, they are effectively the *same* discretisation in the matrix, the only difference is that in the ghost version, we corrected the right hand side we're solving for by a small amount to take into account the issues in the one-sided discretisation. It looks like we have a lower-order scheme. Let's check by performing an MMS test. To do so, we have to introduce how to measure errors. Since our discrete solution $u_i$ is supposed to approximate $u(x_i)$ it is natural to consider the pointwise errors $u_i - u(x_i)$. Let us now consider how to measure the size of the error vector (or indeed any vector). $$ E = \begin{bmatrix} u_0 - u(x_0)\\ \vdots\\ u_n - u(x_n) \end{bmatrix} $$ So far we have been using the $\infty$-norm or $\max$-norm. $$ \|E\|_\infty := \max_{0 \le i \le n} |E_i| = \max_{0 \le i \le n} |u_i - u(x_i)| $$ which measures the largest pointwise error over the interval. Other common norms are the $1$-norm $$ \|E\|_1 = h \sum_{i=0}^n |E_i| $$ and the $2$-norm $$ \|E\|_2 = \left(h \sum_{i=0}^n |E_i|^2 \right)^{1/2}. $$ Notice the factor of $h$ appearing in these definitions. This is needed so the norm does not spuriously grow when we add more points. ### Aside These are special cases of $l_p$ norms $$ \|E\|_p = \left(h \sum_{i=0}^n |E_i|^p\right)^{1/p}. $$ ``` # Here we use the 2-norm def error(u, exact, h): return numpy.sqrt(h)*numpy.linalg.norm(u - exact) def mms_errors(neumann): errors = [] Ns = numpy.asarray(list(2**i for i in range(4, 11))) rhsfunc = lambda x: -numpy.exp(x) exact = lambda x: numpy.exp(x) for N in Ns: x, L, rhs, h = laplacian(N, rhsfunc) L, rhs, h = apply_dirichlet(L, rhs, h, [exact(0)], [0]) L, rhs, h = neumann(L, rhs, h, exact(1), N) u = numpy.linalg.solve(L, rhs) errors.append(error(u, exact(x), 1/N)) return 1/Ns, numpy.asarray(errors) _, oneside = mms_errors(apply_neumann_oneside) hs, ghost = mms_errors(apply_neumann_ghost) pyplot.figure() pyplot.loglog(hs, oneside, "o", label="Oneside"); pyplot.loglog(hs, ghost, "x", label="Ghost"); pyplot.loglog(hs, hs, label="$\mathcal{O}(h)$"); pyplot.loglog(hs, hs**2, label="$\mathcal{O}(h^2)$"); pyplot.legend(); ``` This confirms our suspicion that the one-sided differencing for the Neumann condition is only first-order accurate. An alternative approach to obtaining a second-order scheme (rather than the ghost method above) is to try and determine a second-order accurate one-sided difference approximation to the first derivative. We will state an example first, and then see where it comes from. A second-order accurate one-sided approximation to the first derivative is obtained with $$ \frac{\text{d} u}{\text{d} x} \approx \frac{1}{h}\left(\frac{3}{2} u_i - 2 u_{i-1} + \frac{1}{2} u_{i-2}\right) $$ ``` def apply_neumann_oneside_second(L, rhs, h, b, index): N, _ = L.shape assert index == N - 1 L[index, :] = 0 L[index, index] = 3/(2*h) L[index, index - 1] = -2/h L[index, index - 2] = 1/(2*h) rhs[index] = b return L, rhs, h _, oneside = mms_errors(apply_neumann_oneside) _, second = mms_errors(apply_neumann_oneside_second) hs, ghost = mms_errors(apply_neumann_ghost) pyplot.figure() pyplot.loglog(hs, oneside, "o", label="Oneside"); pyplot.loglog(hs, second, "s", label="Oneside second order"); pyplot.loglog(hs, ghost, "x", label="Ghost"); pyplot.loglog(hs, hs, label="$\mathcal{O}(h)$"); pyplot.loglog(hs, hs**2, label="$\mathcal{O}(h^2)$"); pyplot.legend(); ``` This converges at second order as, perhaps, expected. The absolute error is a little worse than the ghosted version. However, this approach is sometimes more convenient, especially on irregularly spaced meshes. ## Deriving high-order finite difference stencils Where did the approximation $$ \frac{\text{d} u}{\text{d} x} \approx \frac{1}{h}\left(\frac{3}{2} u_i - 2 u_{i-1} + \frac{1}{2} u_{i-2}\right) $$ come from? Given some points at which we're allowed to evaluate $u$, we can derive an appropriate formula from the Taylor series using the *method of undetermined coefficients*. This works in a very similar way to determining the truncation error for a given expansion. For the example above, we want to approximation $u'(x)$ and we are given $u_i = u(x)$, $u_{i-1} = u(x - h)$, and $u_{i-2} = u(x - 2h)$. We can write our differencing operator as a *linear combination* of the provided points $$ D_2 u(x) = a u(x) + bu(x-h) + c u(x - 2h) $$ where our goal is to determine $a$, $b$, and $c$ to minimise the truncation error (that is, give the best possible accuracy). Let's Taylor-expand on the right hand side $$ D_2 u(x) = a u(x) + b \overbrace{\left(u(x) - hu'(x) + \frac{h^2}{2} u''(x) - \frac{h^3}{6} u'''(x)\right)}^{u(x - h)} + c \overbrace{\left(u(x) - 2h u'(x) + \frac{4h^2}{2}u''(x) - \frac{8 h^3}{6} u'''(x)\right)}^{u(x-2h)} + \mathcal{O}(h^4) $$ gathering terms we have $$ D_2 u(x) = (a + b + c)u(x) - (b + 2c) h u'(x) + \frac{1}{2}(b + 4c)h^2 u''(x) - \frac{1}{6}(b + 8c) h^3 u'''(x) + \mathcal{O}(h^4). $$ To maximise the accuracy of agreement with $u'(x)$ we need $$ \begin{aligned} a + b + c &= 0 && \text{zeroing the $h^0 u(x)$ term}\\ b + 2c &= -\frac{1}{h} && \text{ensuring that we have a $u'(x)$ term}\\ b + 4c &= 0 && \text{zeroing the $h^2 u''(x)$ term}\\ b + 8c &= 0 && \text{zeroing the $h^3 u'''(x)$ term}. \end{aligned} $$ Since we have only three unknowns, we can only satisfy three equations. To maxmimise the accuracy, we'll choose to zero the $h^2$ and $h^0$ terms, and live with the $h^3$ term. We therefore need to solve the linear system $$ \begin{bmatrix} 1 & 1 & 1\\ 0 & 1 & 2\\ 0 & 1 & 4 \end{bmatrix} \begin{bmatrix} a \\ b \\ c \end{bmatrix} = \begin{bmatrix} 0 \\ -\frac{1}{h} \\ 0 \end{bmatrix}. $$ ``` import numpy A = numpy.asarray([[1, 1, 1], [0, 1, 2], [0, 1, 4]]) b = numpy.asarray([0, -1, 0]) numpy.linalg.solve(A, b) ``` So we have $$ \begin{bmatrix} a \\ b \\ c \end{bmatrix} = \frac{1}{2h} \begin{bmatrix} 3\\ -4\\ 1 \end{bmatrix} $$ and hence our optimal formula is $$ D_2 u(x) = \frac{1}{2h}(3 u_i - 4 u_{i-1} + u_{i-2}) $$ as advertised. We can immediately determine the accuracy of this approximation since we know the first term we did not manage to match exactly is $$ - \frac{1}{6}(b + 8c) h^3 u'''(x) $$ substituting in the values for $b$ and $c$ we have $$ \begin{aligned} D_2 u(x) - u'(x) &= -\frac{1}{6}\left(\frac{-2}{h} + \frac{8}{2h}\right) h^3 u'''(x) + \mathcal{O}(h^4)\\ &= -\frac{1}{3} h^2 u'''(x) + \mathcal{O}(h^4) \end{aligned} $$ so this approximation is second order accurate. ``` def dtwo(x, u): h = x[2:] - x[1:-1] du = 1/(2*h) * (3 * u[2:] - 4*u[1:-1] + u[:-2]) return x[2:], du grids = 2**numpy.arange(3, 10) def error(f, df, op): for n in grids: x = numpy.linspace(-1, 1, n) x, y = op(x, f(x)) yield numpy.sqrt(1/n)*numpy.linalg.norm(y - df(x), None) pyplot.figure() pyplot.loglog(1/grids, list(error(numpy.sin, numpy.cos, dtwo)), marker="o", linestyle="none", label=dtwo.__name__) pyplot.xlabel("Resolution ($h$)") pyplot.ylabel("$l_2$ error in derivative") pyplot.loglog(1/grids, 1/grids**2, label="$h^2$") pyplot.loglog(1/grids, 1/grids**2, label="$h^2$") pyplot.legend(); numpy.linalg.norm? ``` ## Questions 1. Use this technique to derive a third-order accurate one-sided differencing operator for $\frac{\text{d}}{\text{d} x}$ using 4 points $u(x)$, $u(x - h)$, $u(x - 2h)$, $u(x - 3h)$. ## Differencing for advection For centered difference approximations, there is no asymmetry in the stencil. For one-sided approximations, however, there is. If we just consider a three-point region centered around a point $i$, we can write the three approximations to $\frac{\text{d}}{\text{d} x}$ we have seen as stencils: $$ \begin{aligned} D_+ &= \frac{1}{h}\begin{bmatrix}0 & -1 &1\end{bmatrix}\\ D_- &= \frac{1}{h}\begin{bmatrix}-1 & 1 & 0\end{bmatrix}\\ D_0 &= \frac{1}{2h}\begin{bmatrix}-1 & 0 & 1\end{bmatrix}\\ \end{aligned}. $$ Recall that we noticed that the centered difference approximation sometimes gave catastrophic results (all zero derivatives) for very rough functions. The question therefore might arise how to pick between $D_+$ and $D_-$. We will study this using the linear *advection equation* as a prototype. This equation models the transport of some material by a bulk motion. This (especially when talking about fluid flow) is called convection. As usual [wikipedia has lots of information](https://en.wikipedia.org/wiki/Advection). It looks remarkably benign, find $u(x, t)$ satisfying $$ \partial_t u + c \cdot \nabla u = f(t, x) $$ Where $c$ is the advecting velocity and $$ \nabla u = \partial_x u $$ in one dimension, and $$ \nabla u = \begin{bmatrix} \partial_x u\\ \partial_y u\end{bmatrix} $$ in two dimensions. This is a first-order PDE, for which we need to supply one boundary condition (to pin down the spatial derivative) and one initial condition (to start everything off). The boundary condition, it turns out, has to be at the *inflow* boundary. Let's try and solve this equation with an explicit Euler time integration scheme on the interval $[0, 5]$ and look at the effect of the different differencing operators. If we write this out we have (using superscripts for time points and subscripts for spatial points) $$ u^{n+1}_i = u^n_i + \Delta t (f^n_i - c D u^n_i). $$ We'll set the forcing function $f$ to be zero and pick boundary condition $$ u(t, 0) = 0 $$ and initial condition $$ u(0, x) = e^{-2(x - 2.5)^2} $$ ``` L = 30 nx = 50 x = numpy.linspace(0, L, nx + 1) h = L/nx u = numpy.exp(-2*(x - L/2)**2) uhat = numpy.zeros_like(x) uhat[nx//3:2*nx//3] = 1 def Aupwind(nx, h, c): A = numpy.zeros((nx+1, nx+1), dtype=float) for i in range(1, nx+1): A[i, i-1] = c/h A[i, i] = -c/h # Boundary condition (will be fixed later) A[0, 0] = 0 return A def Adownwind(nx, h, c): A = numpy.zeros((nx+1, nx+1), dtype=float) for i in range(1, nx): A[i, i] = c/h A[i, i+1] = -c/h # Boundary condition (will be fixed later) A[0, 0] = 0 A[nx, nx] = c/h return A t = 0 tfinal = 10 c = 1 dt = 2/(nx + 1) Id = numpy.eye(nx+1) downwind = Id + dt*Adownwind(nx, h, c) upwind = Id + dt*Aupwind(nx, h, c) u = uhat hist = [(t, u)] while t < tfinal: u = upwind @ u hist.append((t, u)) t += dt hist = numpy.asarray(hist) pyplot.figure() for t, hist_ in hist[::len(hist)//10]: pyplot.plot(x, hist_, label=f"$t = {t:3.1f}$") pyplot.legend(); ``` ## Observations 1. The "downwind" discretisation is unstable (blowing up near the boundary). The upwind version is stable, if we have a small enough time step. This makes physical sense, because the downwind discretisation is trying to obtain information "from the future". 2. The upwind discretisation is *dissipative*: the correct physical solution is just to transport the initial condition to the right, but we see that the peak spreads and flattens. 3. This also occurs with sharp fronts (e.g. transporting a hat function) 4. Some of this gets easier if we naturally have some diffusion (a second derivative) in the equation (as in the coursework).
github_jupyter
%matplotlib notebook import numpy from matplotlib import pyplot import matplotlib.lines as mlines pyplot.style.use('ggplot') n = 200 h = 2/(n-1) x = numpy.linspace(1,2.5,n) pyplot.plot(x, numpy.sin(x)); def newline(p1, p2, **kwargs): ax = pyplot.gca() xmin, xmax = ax.get_xbound() if(p2[0] == p1[0]): xmin = xmax = p1[0] ymin, ymax = ax.get_ybound() else: ymax = p1[1]+(p2[1]-p1[1])/(p2[0]-p1[0])*(xmax-p1[0]) ymin = p1[1]+(p2[1]-p1[1])/(p2[0]-p1[0])*(xmin-p1[0]) l = mlines.Line2D([xmin,xmax], [ymin,ymax], **kwargs) ax.add_line(l) return l h = 0.25 xi = 1.6 ximinus = xi - h xiplus = xi + h pyplot.plot([ximinus, xi, xiplus], numpy.sin([ximinus, xi, xiplus]), marker="o", linestyle="none") newline((xi, numpy.sin(xi)), (xiplus, numpy.sin(xiplus)), linestyle="dashed", label="$D_+ u(x)$") newline((xi, numpy.sin(xi)), (ximinus, numpy.sin(ximinus)), linestyle="dotted", label="$D_- u(x)$") newline((ximinus, numpy.sin(ximinus)), (xiplus, numpy.sin(xiplus)), linestyle="-.", label="$D_0 u(x)$") newline((xi, numpy.sin(xi)), (xiplus, numpy.sin(xi) + h*numpy.cos(xi)), color="black", label="$u'(x)$") pyplot.legend(); n = 10 h = 2/(n-1) x = numpy.linspace(-1,1,n) u = numpy.sin(x) pyplot.figure() pyplot.plot(x, numpy.cos(x), label="$u'$"); pyplot.plot(x[1:], (u[1:] - u[:-1])/h, label="$D_-$", marker="o", linestyle="none") pyplot.legend(); from functools import reduce from operator import mul def u(x, n=0): factor = (-1)**(n // 2) if n % 2 == 0: u_ = numpy.sin else: u_ = numpy.cos return factor * u_(x) pyplot.figure() x = numpy.linspace(0.5, 1.75, 500) pyplot.plot(x, u(x)); x0 = 0.6 h = 0.8 def fac(n): return reduce(mul, range(1, n+1), 1) def taylor(u, x0, h, n): return u(x0) + sum(h**i/fac(i) * u(x0, i) for i in range(1, n)) xs = numpy.linspace(x0, x0+h, 20) for n in range(1, 5): pyplot.plot(xs, [taylor(u, x0, x - x0, n) for x in xs], marker="o", label=r"$\tilde{u} + \mathcal{O}(h^%d)$" % n) pyplot.legend(); def dplus(x, u): return x[:-1], (u[1:] - u[:-1])/(x[1:] - x[:-1]) def dminus(x, u): return x[1:], (u[1:] - u[:-1])/(x[1:] - x[:-1]) def center(x, u): return x[1:-1], (u[2:] - u[:-2])/(x[2:] - x[:-2]) grids = 2**numpy.arange(3, 10) def error(f, df, op): for n in grids: x = numpy.linspace(-1, 1, n) x, y = op(x, f(x)) yield numpy.linalg.norm(y - df(x), numpy.inf) pyplot.figure() for op in [dplus, dminus, center]: pyplot.loglog(1/grids, list(error(numpy.sin, numpy.cos, op)), marker="o", linestyle="none", label=op.__name__) pyplot.xlabel("Resolution ($h$)") pyplot.ylabel("$l_\infty$ error in derivative") pyplot.loglog(1/grids, 1/grids, label="$h$") pyplot.loglog(1/grids, 1/grids**2, label="$h^2$") pyplot.legend(); x = numpy.linspace(-1, 1, 9) xf = numpy.linspace(-1, 1, 100) def f(x): return numpy.cos(1/2 + 4*numpy.pi*x) def df(x): return -4*numpy.pi*numpy.sin(1/2 + 4*numpy.pi*x) pyplot.figure() pyplot.plot(x, f(x), marker="o", label="coarse") pyplot.plot(xf, f(xf), "-", label="fine") pyplot.legend(); pyplot.figure() for op in [dplus, dminus, center]: x_, y = op(x, f(x)) pyplot.plot(x_, y, "o-", label=op.__name__) pyplot.plot(xf, df(xf), "-", label="Exact") pyplot.legend(); def laplacian(N, rhsfunc): x = numpy.linspace(0, 1, N+1) h = 1/N rhs = rhsfunc(x) e = numpy.ones(N) # interior discretisation L = (2*numpy.eye(N+1) - numpy.diag(e, 1) - numpy.diag(e, -1)) / h**2 return x, L, rhs, h def apply_dirichlet(L, rhs, h, vals, indices): N, _ = L.shape diag = numpy.eye(1, N) bcmask = numpy.zeros(N, dtype=bool) bcmask[indices] = True # Dirichlet rows L[numpy.ix_(bcmask)] = numpy.vstack([numpy.roll(diag, i) for i in indices]) rhs[numpy.ix_(bcmask)] = vals # Forward substitute rhs[numpy.ix_(~bcmask)] -= L[numpy.ix_(~bcmask, bcmask)] @ vals L[numpy.ix_(~bcmask, bcmask)] = 0 return L, rhs, h def apply_neumann_oneside(L, rhs, h, b, index): N, _ = L.shape assert index == N - 1 L[index, :] = 0 L[index, index] = 1/h L[index, index - 1] = -1/h rhs[index] = b return L, rhs, h def apply_neumann_ghost(L, rhs, h, b, index): N, _ = L.shape L[index, index] /= 2 rhs[index] = b/h + rhs[index]/2 return L, rhs, h N = 10 rhsfunc = lambda x: -numpy.exp(x) exact = lambda x: numpy.exp(x) x, L, rhs, h = laplacian(N, rhsfunc) L, rhs, h = apply_dirichlet(L, rhs, h, [exact(0)], [0]) L, rhs, h = apply_neumann_oneside(L, rhs, h, exact(1), N) uoneside = numpy.linalg.solve(L, rhs) x, L, rhs, h = laplacian(N, rhsfunc) L, rhs, h = apply_dirichlet(L, rhs, h, [exact(0)], [0]) L, rhs, h = apply_neumann_ghost(L, rhs, h, exact(1), N) ughost = numpy.linalg.solve(L, rhs) pyplot.figure() pyplot.plot(x, uoneside, label="Computed Oneside") pyplot.plot(x, ughost, label="Computed Ghost") pyplot.plot(x, exact(x), label="Exact") pyplot.legend(); # Here we use the 2-norm def error(u, exact, h): return numpy.sqrt(h)*numpy.linalg.norm(u - exact) def mms_errors(neumann): errors = [] Ns = numpy.asarray(list(2**i for i in range(4, 11))) rhsfunc = lambda x: -numpy.exp(x) exact = lambda x: numpy.exp(x) for N in Ns: x, L, rhs, h = laplacian(N, rhsfunc) L, rhs, h = apply_dirichlet(L, rhs, h, [exact(0)], [0]) L, rhs, h = neumann(L, rhs, h, exact(1), N) u = numpy.linalg.solve(L, rhs) errors.append(error(u, exact(x), 1/N)) return 1/Ns, numpy.asarray(errors) _, oneside = mms_errors(apply_neumann_oneside) hs, ghost = mms_errors(apply_neumann_ghost) pyplot.figure() pyplot.loglog(hs, oneside, "o", label="Oneside"); pyplot.loglog(hs, ghost, "x", label="Ghost"); pyplot.loglog(hs, hs, label="$\mathcal{O}(h)$"); pyplot.loglog(hs, hs**2, label="$\mathcal{O}(h^2)$"); pyplot.legend(); def apply_neumann_oneside_second(L, rhs, h, b, index): N, _ = L.shape assert index == N - 1 L[index, :] = 0 L[index, index] = 3/(2*h) L[index, index - 1] = -2/h L[index, index - 2] = 1/(2*h) rhs[index] = b return L, rhs, h _, oneside = mms_errors(apply_neumann_oneside) _, second = mms_errors(apply_neumann_oneside_second) hs, ghost = mms_errors(apply_neumann_ghost) pyplot.figure() pyplot.loglog(hs, oneside, "o", label="Oneside"); pyplot.loglog(hs, second, "s", label="Oneside second order"); pyplot.loglog(hs, ghost, "x", label="Ghost"); pyplot.loglog(hs, hs, label="$\mathcal{O}(h)$"); pyplot.loglog(hs, hs**2, label="$\mathcal{O}(h^2)$"); pyplot.legend(); import numpy A = numpy.asarray([[1, 1, 1], [0, 1, 2], [0, 1, 4]]) b = numpy.asarray([0, -1, 0]) numpy.linalg.solve(A, b) def dtwo(x, u): h = x[2:] - x[1:-1] du = 1/(2*h) * (3 * u[2:] - 4*u[1:-1] + u[:-2]) return x[2:], du grids = 2**numpy.arange(3, 10) def error(f, df, op): for n in grids: x = numpy.linspace(-1, 1, n) x, y = op(x, f(x)) yield numpy.sqrt(1/n)*numpy.linalg.norm(y - df(x), None) pyplot.figure() pyplot.loglog(1/grids, list(error(numpy.sin, numpy.cos, dtwo)), marker="o", linestyle="none", label=dtwo.__name__) pyplot.xlabel("Resolution ($h$)") pyplot.ylabel("$l_2$ error in derivative") pyplot.loglog(1/grids, 1/grids**2, label="$h^2$") pyplot.loglog(1/grids, 1/grids**2, label="$h^2$") pyplot.legend(); numpy.linalg.norm? L = 30 nx = 50 x = numpy.linspace(0, L, nx + 1) h = L/nx u = numpy.exp(-2*(x - L/2)**2) uhat = numpy.zeros_like(x) uhat[nx//3:2*nx//3] = 1 def Aupwind(nx, h, c): A = numpy.zeros((nx+1, nx+1), dtype=float) for i in range(1, nx+1): A[i, i-1] = c/h A[i, i] = -c/h # Boundary condition (will be fixed later) A[0, 0] = 0 return A def Adownwind(nx, h, c): A = numpy.zeros((nx+1, nx+1), dtype=float) for i in range(1, nx): A[i, i] = c/h A[i, i+1] = -c/h # Boundary condition (will be fixed later) A[0, 0] = 0 A[nx, nx] = c/h return A t = 0 tfinal = 10 c = 1 dt = 2/(nx + 1) Id = numpy.eye(nx+1) downwind = Id + dt*Adownwind(nx, h, c) upwind = Id + dt*Aupwind(nx, h, c) u = uhat hist = [(t, u)] while t < tfinal: u = upwind @ u hist.append((t, u)) t += dt hist = numpy.asarray(hist) pyplot.figure() for t, hist_ in hist[::len(hist)//10]: pyplot.plot(x, hist_, label=f"$t = {t:3.1f}$") pyplot.legend();
0.560974
0.986891
``` import requests from bs4 import BeautifulSoup import pandas as pd ``` ### Get the URL of the website with election results - <i>Here we are looking at <b>UNOFFICIAL</b> results collected by this site </i> ``` url = "https://www.tibetsun.com/elections/sikyong-2016-final-round-results#election-results" req = requests.get(url) data = req.text soup = BeautifulSoup(data) ``` ### Read how BeautifulSoup (parser) works [here](https://www.crummy.com/software/BeautifulSoup/bs4/doc/#searching-the-tree) - Here I'm getting the data collection ready by creating these List variables - First data row happens to be the overall total (votes) - In few seconds, you will see below, the rest of the vote count is provided by location ``` #Overall Results location_total_vote_list = [] ls_vote_count_list = [] pt_vote_count_list = [] election_results = soup.find("table", class_="election-results elections-page") location_total_vote_list.append("Total Overall") ### Read the Beautiful Soup documentation to find out how to target specific HTML tags ### Here I'm find all the "TR" Table row from within the Table data in HTML from the site ### In Particular, I'm finding for all "TR", the "nm" or Name of candidates. ### And alos, total "vt" or votes the candidate got for row in election_results.find_all('tr'): if("Lobsang" in row.find(class_="nm").string): ls_vote_count_list.append(row.find(class_="vt").string) elif ("Penpa" in row.find(class_="nm").string): pt_vote_count_list.append(row.find(class_="vt").string) ### TESTING #print(row.find(class_ ="nm").string) #print(row.find(class_="vt").string) def test_results(): print(len(location_total_vote_list)) print(len(ls_vote_count_list)) print(len(pt_vote_count_list)) ``` #### Here we will be collecting or scraping all the vote count for each location - We are using beautiful soup to automate the data collection (bulk) ``` ### HERE finding all the Table Rows with vote total for each "Location" #Results by location - find all table (HTML) rows with "Location" names results_by_location = soup.find("div", class_="vote-results") ``` - We have the bulk data, let's iterate over each row or chunk of the data - Each chunk contains, the names of candidates and their vote total - Unhide the comments to see the what's happening behind the scene ``` #Iterate through each "Location" data row in Table #Extract the total vote count for each candidate for row in results_by_location.find_all("table"): #print(row.contents[1].find('h3')) #print(row.contents[2].find("h3").contents[0].string) location_total_vote_list.append(row.contents[2].find("h3").contents[0].string) #print(" Lobsang:", row.contents[4].find(class_="vt").string) ls_vote_count_list.append(row.contents[4].find(class_ ="vt").string) #print(" Penpa:", row.contents[6].find(class_="vt").string) pt_vote_count_list.append(row.contents[6].find(class_="vt").string) #test_results() def test_location_vote_count(): for index in range(0, len(location_total_vote_list)): print(location_total_vote_list[index]) print(" LS: ", ls_vote_count_list[index]) print(" PT: ", pt_vote_count_list[index]) ``` ### Convert the List files into beautiful DataFrame - This allows us to do manipulation, cleaning and more calculations much faster and easier ``` #Convert Lists to DataFrame (Panda) vote_total_table = pd.DataFrame( {'Location':location_total_vote_list, 'Lobsang Sangay': ls_vote_count_list, 'Penpa Tsering': pt_vote_count_list }) ``` - More cleaning to do, convert the "vote counts" which are "objects" (strings) into "integers" - Without this conversion, we can not do math calculations or we will get errors (Type errors) ``` #Convert "vote count" which is String Object into "int" #This allows calculations to be done vote_total_table['Lobsang Sangay'] = vote_total_table['Lobsang Sangay'].str.replace(',','').astype('int') vote_total_table['Penpa Tsering'] = vote_total_table['Penpa Tsering'].str.replace(',','').astype('int') #Test show the first row with Overall Vote Count Data vote_total_table.head(1) ``` #### We are doing more cleaning here - We have votes from big European regions - But we need to calculate the overall Europe vote total ``` #Tabulate the EU data total #All Table rows containing "Europe" in Location tag eu = vote_total_table.query('Location.str.contains("Europe") and not(Location.str.contains("Overall"))', engine='python') print("Lobsang Sangay: ", eu['Lobsang Sangay'].sum()) print("Penpa Tsering: ", eu['Penpa Tsering'].sum()) ``` #### Little bit more cleaning to do - We have votes local communitites mostly in Australia and some in Asia - But we need to calculate the overall "Australasia" vote total ``` #Tabulate the Austral_Asia data total #All Table rows containing "Austra" in Location tag vote_total_table.loc[vote_total_table.Location == "Australasia (overall)", 'Location'] = "Australasia (Overall)" asia = vote_total_table.query('Location.str.contains("Austra") and not(Location.str.contains("Overall"))', engine='python') print("Lobsang Sangay: ", asia['Lobsang Sangay'].sum()) print("Penpa Tsering: ", asia['Penpa Tsering'].sum()) asia asia_total = vote_total_table[vote_total_table['Location'].str.contains("Austra")] asia_total ``` - "North America (overall)" already exists: - Modify and update: (overall) to "(Overall)" to make it consistent ``` vote_total_table.loc[vote_total_table.Location == "North America (overall)", 'Location'] = "North America (Overall)" ``` - Add "Europe" Overall to the dataframe (Vote_total_table) ``` ### Europe Overall, add it to Vote_Total_Table Count eu_total = vote_total_table[vote_total_table['Location'].str.contains("Europe")] eu_total["Lobsang Sangay"].sum() eu_total["Penpa Tsering"].sum() vote_total_table =vote_total_table.append(pd.Series(['Europe (Overall)', eu_total["Lobsang Sangay"].sum(), eu_total["Penpa Tsering"].sum()], index=['Location', 'Lobsang Sangay', 'Penpa Tsering']), ignore_index=True) ``` #### Create a South Asia Total ``` ### South Asia ## Tabulate the South Asia Vote Count Total by filtering out: Europe, AustralaAsia and North America. #Add this Total to Vote_Count_Total_Table south_asia_total = vote_total_table.query('~Location.str.contains("Europe") and' +'~Location.str.contains("Australa") and ~Location.str.contains("America")' +'and ~Location.str.contains("Overall")', engine='python') #print(south_asia_total) vote_total_table = vote_total_table.append(pd.Series(['South Asia (Overall)', south_asia_total["Lobsang Sangay"].sum(), south_asia_total["Penpa Tsering"].sum()], index=['Location', 'Lobsang Sangay', 'Penpa Tsering']), ignore_index=True) ``` #### Create India and Nepal (Subtotals) ``` ###Nepal & India Only ##Tabulate total vote count for Nepal and India (separately) nepal_total = vote_total_table.query('Location.str.contains("Nepal")', engine='python') vote_total_table = vote_total_table.append(pd.Series(['Nepal (Overall)', nepal_total['Lobsang Sangay'].sum(), nepal_total['Penpa Tsering'].sum()], index=['Location', 'Lobsang Sangay', 'Penpa Tsering']), ignore_index=True) india_total = south_asia_total.query('~Location.str.contains("Nepal")', engine='python') vote_total_table = vote_total_table.append(pd.Series(['India (Overall)', india_total['Lobsang Sangay'].sum(), india_total['Penpa Tsering'].sum()], index=['Location', 'Lobsang Sangay', 'Penpa Tsering']), ignore_index = True); nepal_total ``` #### Create a new column "Vote Population %" - This column calculates what portion of the total voting population does this region or communit represents ``` ### Calculate the Total Vote Count, in order to calculate the candidate's vote count (%) grand_total = vote_total_table['Lobsang Sangay'][0] + vote_total_table['Penpa Tsering'][0] vote_total_table['Voter Population %'] = round(100*((vote_total_table['Lobsang Sangay'] + vote_total_table['Penpa Tsering']) / grand_total), 2) ``` - Create an additional column called "Lobsang Sangay %" - This column is created by calculating the winning candidate's Vote received % ``` ### Calculate the Winning Candidate's performance for each location or region vote_total_table['Lobsang Sangay %'] = round(100*(vote_total_table['Lobsang Sangay']/(vote_total_table['Lobsang Sangay'] + vote_total_table['Penpa Tsering'])), 1) ``` #### Sort the Table by "Voter Population %" by descending order - So we will see which regions or communities represents the bulk of the voting population ``` vote_total_table = vote_total_table.sort_values(by=['Voter Population %'], ascending=False) vote_total_table.head(20) ``` - Here we are using the sorted table to indentify where the winning candidate performance was subpar ``` ### Laggards Region laggards = vote_total_table.query('~Location.str.contains("Overall")', engine='python').sort_values(by=['Lobsang Sangay %'], ascending=True) print(laggards[laggards["Lobsang Sangay %"] <= 57.3]["Voter Population %"].sum(), "%") #print(temp) laggards[laggards["Lobsang Sangay %"] <= 57.3] ## 37.5 % ``` - Here we see the regions or communities where the winning candidate performed the best ``` ###Best performer location strong_holds = vote_total_table.query('~Location.str.contains("Overall")', engine='python').sort_values(by=['Lobsang Sangay %'], ascending=False).head(30) print((strong_holds[strong_holds["Lobsang Sangay %"] >= 57.3]["Voter Population %"].sum(), "%")) strong_holds[strong_holds["Lobsang Sangay %"] >= 57.3] ``` #### Vote Count by regionals - See the top performing regions - Notice the "overall total" at the second last row ``` vote_total_table.query('Location.str.contains("Overall")', engine='python').sort_values(by=['Lobsang Sangay %'], ascending=False).head(20) vote_total_table.to_csv("2016_election_table.csv", index=False) ```
github_jupyter
import requests from bs4 import BeautifulSoup import pandas as pd url = "https://www.tibetsun.com/elections/sikyong-2016-final-round-results#election-results" req = requests.get(url) data = req.text soup = BeautifulSoup(data) #Overall Results location_total_vote_list = [] ls_vote_count_list = [] pt_vote_count_list = [] election_results = soup.find("table", class_="election-results elections-page") location_total_vote_list.append("Total Overall") ### Read the Beautiful Soup documentation to find out how to target specific HTML tags ### Here I'm find all the "TR" Table row from within the Table data in HTML from the site ### In Particular, I'm finding for all "TR", the "nm" or Name of candidates. ### And alos, total "vt" or votes the candidate got for row in election_results.find_all('tr'): if("Lobsang" in row.find(class_="nm").string): ls_vote_count_list.append(row.find(class_="vt").string) elif ("Penpa" in row.find(class_="nm").string): pt_vote_count_list.append(row.find(class_="vt").string) ### TESTING #print(row.find(class_ ="nm").string) #print(row.find(class_="vt").string) def test_results(): print(len(location_total_vote_list)) print(len(ls_vote_count_list)) print(len(pt_vote_count_list)) ### HERE finding all the Table Rows with vote total for each "Location" #Results by location - find all table (HTML) rows with "Location" names results_by_location = soup.find("div", class_="vote-results") #Iterate through each "Location" data row in Table #Extract the total vote count for each candidate for row in results_by_location.find_all("table"): #print(row.contents[1].find('h3')) #print(row.contents[2].find("h3").contents[0].string) location_total_vote_list.append(row.contents[2].find("h3").contents[0].string) #print(" Lobsang:", row.contents[4].find(class_="vt").string) ls_vote_count_list.append(row.contents[4].find(class_ ="vt").string) #print(" Penpa:", row.contents[6].find(class_="vt").string) pt_vote_count_list.append(row.contents[6].find(class_="vt").string) #test_results() def test_location_vote_count(): for index in range(0, len(location_total_vote_list)): print(location_total_vote_list[index]) print(" LS: ", ls_vote_count_list[index]) print(" PT: ", pt_vote_count_list[index]) #Convert Lists to DataFrame (Panda) vote_total_table = pd.DataFrame( {'Location':location_total_vote_list, 'Lobsang Sangay': ls_vote_count_list, 'Penpa Tsering': pt_vote_count_list }) #Convert "vote count" which is String Object into "int" #This allows calculations to be done vote_total_table['Lobsang Sangay'] = vote_total_table['Lobsang Sangay'].str.replace(',','').astype('int') vote_total_table['Penpa Tsering'] = vote_total_table['Penpa Tsering'].str.replace(',','').astype('int') #Test show the first row with Overall Vote Count Data vote_total_table.head(1) #Tabulate the EU data total #All Table rows containing "Europe" in Location tag eu = vote_total_table.query('Location.str.contains("Europe") and not(Location.str.contains("Overall"))', engine='python') print("Lobsang Sangay: ", eu['Lobsang Sangay'].sum()) print("Penpa Tsering: ", eu['Penpa Tsering'].sum()) #Tabulate the Austral_Asia data total #All Table rows containing "Austra" in Location tag vote_total_table.loc[vote_total_table.Location == "Australasia (overall)", 'Location'] = "Australasia (Overall)" asia = vote_total_table.query('Location.str.contains("Austra") and not(Location.str.contains("Overall"))', engine='python') print("Lobsang Sangay: ", asia['Lobsang Sangay'].sum()) print("Penpa Tsering: ", asia['Penpa Tsering'].sum()) asia asia_total = vote_total_table[vote_total_table['Location'].str.contains("Austra")] asia_total vote_total_table.loc[vote_total_table.Location == "North America (overall)", 'Location'] = "North America (Overall)" ### Europe Overall, add it to Vote_Total_Table Count eu_total = vote_total_table[vote_total_table['Location'].str.contains("Europe")] eu_total["Lobsang Sangay"].sum() eu_total["Penpa Tsering"].sum() vote_total_table =vote_total_table.append(pd.Series(['Europe (Overall)', eu_total["Lobsang Sangay"].sum(), eu_total["Penpa Tsering"].sum()], index=['Location', 'Lobsang Sangay', 'Penpa Tsering']), ignore_index=True) ### South Asia ## Tabulate the South Asia Vote Count Total by filtering out: Europe, AustralaAsia and North America. #Add this Total to Vote_Count_Total_Table south_asia_total = vote_total_table.query('~Location.str.contains("Europe") and' +'~Location.str.contains("Australa") and ~Location.str.contains("America")' +'and ~Location.str.contains("Overall")', engine='python') #print(south_asia_total) vote_total_table = vote_total_table.append(pd.Series(['South Asia (Overall)', south_asia_total["Lobsang Sangay"].sum(), south_asia_total["Penpa Tsering"].sum()], index=['Location', 'Lobsang Sangay', 'Penpa Tsering']), ignore_index=True) ###Nepal & India Only ##Tabulate total vote count for Nepal and India (separately) nepal_total = vote_total_table.query('Location.str.contains("Nepal")', engine='python') vote_total_table = vote_total_table.append(pd.Series(['Nepal (Overall)', nepal_total['Lobsang Sangay'].sum(), nepal_total['Penpa Tsering'].sum()], index=['Location', 'Lobsang Sangay', 'Penpa Tsering']), ignore_index=True) india_total = south_asia_total.query('~Location.str.contains("Nepal")', engine='python') vote_total_table = vote_total_table.append(pd.Series(['India (Overall)', india_total['Lobsang Sangay'].sum(), india_total['Penpa Tsering'].sum()], index=['Location', 'Lobsang Sangay', 'Penpa Tsering']), ignore_index = True); nepal_total ### Calculate the Total Vote Count, in order to calculate the candidate's vote count (%) grand_total = vote_total_table['Lobsang Sangay'][0] + vote_total_table['Penpa Tsering'][0] vote_total_table['Voter Population %'] = round(100*((vote_total_table['Lobsang Sangay'] + vote_total_table['Penpa Tsering']) / grand_total), 2) ### Calculate the Winning Candidate's performance for each location or region vote_total_table['Lobsang Sangay %'] = round(100*(vote_total_table['Lobsang Sangay']/(vote_total_table['Lobsang Sangay'] + vote_total_table['Penpa Tsering'])), 1) vote_total_table = vote_total_table.sort_values(by=['Voter Population %'], ascending=False) vote_total_table.head(20) ### Laggards Region laggards = vote_total_table.query('~Location.str.contains("Overall")', engine='python').sort_values(by=['Lobsang Sangay %'], ascending=True) print(laggards[laggards["Lobsang Sangay %"] <= 57.3]["Voter Population %"].sum(), "%") #print(temp) laggards[laggards["Lobsang Sangay %"] <= 57.3] ## 37.5 % ###Best performer location strong_holds = vote_total_table.query('~Location.str.contains("Overall")', engine='python').sort_values(by=['Lobsang Sangay %'], ascending=False).head(30) print((strong_holds[strong_holds["Lobsang Sangay %"] >= 57.3]["Voter Population %"].sum(), "%")) strong_holds[strong_holds["Lobsang Sangay %"] >= 57.3] vote_total_table.query('Location.str.contains("Overall")', engine='python').sort_values(by=['Lobsang Sangay %'], ascending=False).head(20) vote_total_table.to_csv("2016_election_table.csv", index=False)
0.130923
0.734
<!--BOOK_INFORMATION--> <img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png"> *This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).* *The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!* <!--NAVIGATION--> < [Data Manipulation with Pandas](03.00-Introduction-to-Pandas.ipynb) | [Contents](Index.ipynb) | [Data Indexing and Selection](03.02-Data-Indexing-and-Selection.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.01-Introducing-Pandas-Objects.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a> ``` %qtconsole --style solarized-dark ``` # Introducing Pandas Objects At the very basic level, Pandas objects can be thought of as enhanced versions of NumPy structured arrays in which the rows and columns are identified with labels rather than simple integer indices. As we will see during the course of this chapter, Pandas provides a host of useful tools, methods, and functionality on top of the basic data structures, but nearly everything that follows will require an understanding of what these structures are. Thus, before we go any further, let's introduce these three fundamental Pandas data structures: the ``Series``, ``DataFrame``, and ``Index``. We will start our code sessions with the standard NumPy and Pandas imports: ``` import numpy as np import pandas as pd ``` ## The Pandas Series Object A Pandas ``Series`` is a one-dimensional array of indexed data. It can be created from a list or array as follows: ``` data = pd.Series([0.25, 0.5, 0.75, 1.0]) data ``` As we see in the output, the ``Series`` wraps both a sequence of values and a sequence of indices, which we can access with the ``values`` and ``index`` attributes. The ``values`` are simply a familiar NumPy array: ``` data.values ``` The ``index`` is an array-like object of type ``pd.Index``, which we'll discuss in more detail momentarily. ``` data.index ``` Like with a NumPy array, data can be accessed by the associated index via the familiar Python square-bracket notation: ``` data[1] data[1:3] ``` As we will see, though, the Pandas ``Series`` is much more general and flexible than the one-dimensional NumPy array that it emulates. ### ``Series`` as generalized NumPy array From what we've seen so far, it may look like the ``Series`` object is basically interchangeable with a one-dimensional NumPy array. The essential difference is the presence of the index: while the Numpy Array has an *implicitly defined* integer index used to access the values, the Pandas ``Series`` has an *explicitly defined* index associated with the values. This explicit index definition gives the ``Series`` object additional capabilities. For example, the index need not be an integer, but can consist of values of any desired type. For example, if we wish, we can use strings as an index: ``` data = pd.Series([0.25, 0.5, 0.75, 1.0], index=['a', 'b', 'c', 'd']) data ``` And the item access works as expected: ``` data['b'] ``` We can even use non-contiguous or non-sequential indices: ``` data = pd.Series([0.25, 0.5, 0.75, 1.0], index=[2, 5, 3, 7]) data data[5] ``` ### Series as specialized dictionary In this way, you can think of a Pandas ``Series`` a bit like a specialization of a Python dictionary. A dictionary is a structure that maps arbitrary keys to a set of arbitrary values, and a ``Series`` is a structure which maps typed keys to a set of typed values. This typing is important: just as the type-specific compiled code behind a NumPy array makes it more efficient than a Python list for certain operations, the type information of a Pandas ``Series`` makes it much more efficient than Python dictionaries for certain operations. The ``Series``-as-dictionary analogy can be made even more clear by constructing a ``Series`` object directly from a Python dictionary: ``` population_dict = {'California': 38332521, 'Texas': 26448193, 'New York': 19651127, 'Florida': 19552860, 'Illinois': 12882135} population = pd.Series(population_dict) population ``` By default, a ``Series`` will be created where the index is drawn from the sorted keys. From here, typical dictionary-style item access can be performed: ``` population['California'] ``` Unlike a dictionary, though, the ``Series`` also supports array-style operations such as slicing: ``` population['California':'Illinois'] ``` We'll discuss some of the quirks of Pandas indexing and slicing in [Data Indexing and Selection](03.02-Data-Indexing-and-Selection.ipynb). ### Constructing Series objects We've already seen a few ways of constructing a Pandas ``Series`` from scratch; all of them are some version of the following: ```python >>> pd.Series(data, index=index) ``` where ``index`` is an optional argument, and ``data`` can be one of many entities. For example, ``data`` can be a list or NumPy array, in which case ``index`` defaults to an integer sequence: ``` pd.Series([2, 4, 6]) ``` ``data`` can be a scalar, which is repeated to fill the specified index: ``` pd.Series(5, index=[100, 200, 300]) ``` ``data`` can be a dictionary, in which ``index`` defaults to the sorted dictionary keys: ``` pd.Series({2:'a', 1:'b', 3:'c'}) ``` In each case, the index can be explicitly set if a different result is preferred: ``` pd.Series({2:'a', 1:'b', 3:'c'}, index=[3, 2]) ``` Notice that in this case, the ``Series`` is populated only with the explicitly identified keys. ## The Pandas DataFrame Object The next fundamental structure in Pandas is the ``DataFrame``. Like the ``Series`` object discussed in the previous section, the ``DataFrame`` can be thought of either as a generalization of a NumPy array, or as a specialization of a Python dictionary. We'll now take a look at each of these perspectives. ### DataFrame as a generalized NumPy array If a ``Series`` is an analog of a one-dimensional array with flexible indices, a ``DataFrame`` is an analog of a two-dimensional array with both flexible row indices and flexible column names. Just as you might think of a two-dimensional array as an ordered sequence of aligned one-dimensional columns, you can think of a ``DataFrame`` as a sequence of aligned ``Series`` objects. Here, by "aligned" we mean that they share the same index. To demonstrate this, let's first construct a new ``Series`` listing the area of each of the five states discussed in the previous section: ``` area_dict = {'California': 423967, 'Texas': 695662, 'New York': 141297, 'Florida': 170312, 'Illinois': 149995} area = pd.Series(area_dict) area ``` Now that we have this along with the ``population`` Series from before, we can use a dictionary to construct a single two-dimensional object containing this information: ``` states = pd.DataFrame({'population': population, 'area': area}) states ``` Like the ``Series`` object, the ``DataFrame`` has an ``index`` attribute that gives access to the index labels: ``` states.index ``` Additionally, the ``DataFrame`` has a ``columns`` attribute, which is an ``Index`` object holding the column labels: ``` states.columns ``` Thus the ``DataFrame`` can be thought of as a generalization of a two-dimensional NumPy array, where both the rows and columns have a generalized index for accessing the data. ### DataFrame as specialized dictionary Similarly, we can also think of a ``DataFrame`` as a specialization of a dictionary. Where a dictionary maps a key to a value, a ``DataFrame`` maps a column name to a ``Series`` of column data. For example, asking for the ``'area'`` attribute returns the ``Series`` object containing the areas we saw earlier: ``` states['area'] ``` Notice the potential point of confusion here: in a two-dimesnional NumPy array, ``data[0]`` will return the first *row*. For a ``DataFrame``, ``data['col0']`` will return the first *column*. Because of this, it is probably better to think about ``DataFrame``s as generalized dictionaries rather than generalized arrays, though both ways of looking at the situation can be useful. We'll explore more flexible means of indexing ``DataFrame``s in [Data Indexing and Selection](03.02-Data-Indexing-and-Selection.ipynb). ### Constructing DataFrame objects A Pandas ``DataFrame`` can be constructed in a variety of ways. Here we'll give several examples. #### From a single Series object A ``DataFrame`` is a collection of ``Series`` objects, and a single-column ``DataFrame`` can be constructed from a single ``Series``: ``` pd.DataFrame(population, columns=['population']) ``` #### From a list of dicts Any list of dictionaries can be made into a ``DataFrame``. We'll use a simple list comprehension to create some data: ``` data = [{'a': i, 'b': 2 * i} for i in range(3)] pd.DataFrame(data) ``` Even if some keys in the dictionary are missing, Pandas will fill them in with ``NaN`` (i.e., "not a number") values: ``` pd.DataFrame([{'a': 1, 'b': 2}, {'b': 3, 'c': 4}]) ``` #### From a dictionary of Series objects As we saw before, a ``DataFrame`` can be constructed from a dictionary of ``Series`` objects as well: ``` pd.DataFrame({'population': population, 'area': area}) ``` #### From a two-dimensional NumPy array Given a two-dimensional array of data, we can create a ``DataFrame`` with any specified column and index names. If omitted, an integer index will be used for each: ``` pd.DataFrame(np.random.rand(3, 2), columns=['foo', 'bar'], index=['a', 'b', 'c']) ``` #### From a NumPy structured array We covered structured arrays in [Structured Data: NumPy's Structured Arrays](02.09-Structured-Data-NumPy.ipynb). A Pandas ``DataFrame`` operates much like a structured array, and can be created directly from one: ``` A = np.zeros(3, dtype=[('A', 'i8'), ('B', 'f8')]) A pd.DataFrame(A) ``` ## The Pandas Index Object We have seen here that both the ``Series`` and ``DataFrame`` objects contain an explicit *index* that lets you reference and modify data. This ``Index`` object is an interesting structure in itself, and it can be thought of either as an *immutable array* or as an *ordered set* (technically a multi-set, as ``Index`` objects may contain repeated values). Those views have some interesting consequences in the operations available on ``Index`` objects. As a simple example, let's construct an ``Index`` from a list of integers: ``` ind = pd.Index([2, 3, 5, 7, 11]) ind ``` ### Index as immutable array The ``Index`` in many ways operates like an array. For example, we can use standard Python indexing notation to retrieve values or slices: ``` ind[1] ind[::2] ``` ``Index`` objects also have many of the attributes familiar from NumPy arrays: ``` print(ind.size, ind.shape, ind.ndim, ind.dtype) ``` One difference between ``Index`` objects and NumPy arrays is that indices are immutable–that is, they cannot be modified via the normal means: ``` ind[1] = 0 ``` This immutability makes it safer to share indices between multiple ``DataFrame``s and arrays, without the potential for side effects from inadvertent index modification. ### Index as ordered set Pandas objects are designed to facilitate operations such as joins across datasets, which depend on many aspects of set arithmetic. The ``Index`` object follows many of the conventions used by Python's built-in ``set`` data structure, so that unions, intersections, differences, and other combinations can be computed in a familiar way: ``` indA = pd.Index([1, 3, 5, 7, 9]) indB = pd.Index([2, 3, 5, 7, 11]) indA & indB # intersection indA | indB # union indA ^ indB # symmetric difference ``` These operations may also be accessed via object methods, for example ``indA.intersection(indB)``. <!--NAVIGATION--> < [Data Manipulation with Pandas](03.00-Introduction-to-Pandas.ipynb) | [Contents](Index.ipynb) | [Data Indexing and Selection](03.02-Data-Indexing-and-Selection.ipynb) > <a href="https://colab.research.google.com/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/03.01-Introducing-Pandas-Objects.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open and Execute in Google Colaboratory"></a>
github_jupyter
%qtconsole --style solarized-dark import numpy as np import pandas as pd data = pd.Series([0.25, 0.5, 0.75, 1.0]) data data.values data.index data[1] data[1:3] data = pd.Series([0.25, 0.5, 0.75, 1.0], index=['a', 'b', 'c', 'd']) data data['b'] data = pd.Series([0.25, 0.5, 0.75, 1.0], index=[2, 5, 3, 7]) data data[5] population_dict = {'California': 38332521, 'Texas': 26448193, 'New York': 19651127, 'Florida': 19552860, 'Illinois': 12882135} population = pd.Series(population_dict) population population['California'] population['California':'Illinois'] >>> pd.Series(data, index=index) pd.Series([2, 4, 6]) pd.Series(5, index=[100, 200, 300]) pd.Series({2:'a', 1:'b', 3:'c'}) pd.Series({2:'a', 1:'b', 3:'c'}, index=[3, 2]) area_dict = {'California': 423967, 'Texas': 695662, 'New York': 141297, 'Florida': 170312, 'Illinois': 149995} area = pd.Series(area_dict) area states = pd.DataFrame({'population': population, 'area': area}) states states.index states.columns states['area'] pd.DataFrame(population, columns=['population']) data = [{'a': i, 'b': 2 * i} for i in range(3)] pd.DataFrame(data) pd.DataFrame([{'a': 1, 'b': 2}, {'b': 3, 'c': 4}]) pd.DataFrame({'population': population, 'area': area}) pd.DataFrame(np.random.rand(3, 2), columns=['foo', 'bar'], index=['a', 'b', 'c']) A = np.zeros(3, dtype=[('A', 'i8'), ('B', 'f8')]) A pd.DataFrame(A) ind = pd.Index([2, 3, 5, 7, 11]) ind ind[1] ind[::2] print(ind.size, ind.shape, ind.ndim, ind.dtype) ind[1] = 0 indA = pd.Index([1, 3, 5, 7, 9]) indB = pd.Index([2, 3, 5, 7, 11]) indA & indB # intersection indA | indB # union indA ^ indB # symmetric difference
0.239172
0.993056
# Le Bloc Note pour gérer vos dépots GitHub > Cet exercice a pour objectif de vous accompagner dans la création d'un compte [GitHub](https://github.com/) et pour sa gestion en ligne de commande depuis votre navigateur via un interpréteur interactif **jupyter** en mode **Notebook** fonctionnant, par exemple, sur le serveur http://jupyterhub.ecmorlaix.bzh:8092. > Mais tout d'abord **félicitations** d'être déjà parvenu à ouvrir ce notebook, bientôt l'instruction `git` n'aura *"presque"* plus de secret pour vous, et les verbes "tirer", "commettre", "fusionner" et "pousser", trouveront un nouveau sens ! > Ce document est un notebook jupyter, pour bien vous familiariser avec cet environnement regardez cette rapide [Introduction](Introduction-Le_BN_pour_explorer.ipynb). --- ## Mise en situation : GitHub en lui-même n’est rien de plus qu’un réseau social comme Facebook ou Flickr. Vous construisez un profil, vous y déposez des projets à partager et vous vous connectez avec d’autres utilisateurs en suivant leurs comptes. Même si la plupart des utilisateurs y déposent des projets de programmes ou de code, rien ne vous empêche d’y placer des textes ou tout type de fichier à présenter dans vos répertoires de projets. **GitHub** est basé sur **Git**, une création de [Linus Torwald](https://fr.wikipedia.org/wiki/Linus_Torvalds) qui est l'inventeur du [noyau Linux](https://fr.wikipedia.org/wiki/Noyau_Linux). Il en est arrivé à créer [Git](https://fr.wikipedia.org/wiki/Git) pour justement pouvoir gérer le développement du projet Linux... Git est un logiciel de contrôle de version, ce qui signifie qu’il gère les modifications d’un projet sans écraser n’importe quelle partie du projet. Pourquoi utiliser quelque chose comme Git ? Supposons que vous mettiez à jour avec un collègue des pages sur le même site web. Vous faites des modifications, vous les sauvegardez et les versez sur le site. À ce stade, tout va bien. Le problème survient quand votre collègue travaille sur la même page que vous en même temps. L’un de vous va voir son travail écrasé. Une application de contrôle de version comme Git empêche ça d’arriver. Vous et votre collègue pouvez chacun de votre côté verser vos révisions sur la même page, et Git sauvegardera deux copies. Plus tard, vous pourrez fusionner vos modifications sans perdre le travail dans le processus. Vous pouvez même revenir en arrière à tout moment, parce que Git conserve une « copie instantanée » de tous les changements produits. Créer un compte sur GitHub.com apporte les contrôles de versions à vos projets web, et leur confère des fonctionnalités de réseaux sociaux. Vous pouvez parcourir les projets d’autres utilisateurs de Github, et même y télécharger des copies pour vous-même afin de les modifier, apprendre ou les enrichir. D’autres utilisateurs peuvent faire la même chose avec vos projets publics, repérer vos erreurs et suggérer des corrections. De toute façon, aucune donnée ne se perd parce que Git enregistre un “instantané” de chaque modification. source : https://www.christopheducamp.com/2013/12/15/github-pour-nuls-partie-1/ et https://www.christopheducamp.com/2013/12/16/github-pour-nuls-partie-2/ # Besoin n°1 : *J'ai un dossier de travail sur mon espace du serveur jupyterhub du lycée dont je veux faire une copie public sur GitHub pour pouvoir ensuite maintenir à jour son développement soit depuis l'interface graphique de GitHub en ligne soit depuis l'environnement interactif **jupyter** en mode **Notebook**...* ## Créer un dépot GitHub : Créer un compte sur GitHub (Sign up) depuis un navigateur à l'adresse https://github.com/, ou identifier vous (Sign in) si vous avez déjà un compte. <img src="https://ericecmorlaix.github.io/img/GitHub00.png" alt="inscription GitHub" width=30%> A l'adresse https://github.com/new créer un nouveau répertoire de dépot nommé comme votre dossier de travail sur le serveur ou par exemple `pNomRepo` : <img src="https://ericecmorlaix.github.io/img/GitHub01.png" alt="nouveau repo GitHub" width=50%> Cocher la case "Initialize this repository with a README" puis cliquer sur le bouton "Create repository". Voilà, vous faites maintenant parti d'un autre réseau social mondial celui des développeur de code... > Remarquer que le fichier `Readme` à pour extension `.md` pour **Mardown** si vous ne connaissez pas ce langage de description rudimentaire rendez-vous sur le bloc-note [Markdown](MarkDown-Le_BN_pour_rapporter.ipynb). Il est possible de gérer un compte GitHub via son interface graphique depuis un navigateur ou sur un ordinateur personnel ou une tablette en y installant l'application GitHub Desktop adaptée. Pour vous intitier plus complètement dans ce sens https://guides.github.com/activities/hello-world/. Mais les ordinateurs du lycée ne sont pas personnel et sur le serveur http://jupyterhub.ecmorlaix.bzh:8092 il n'y a pas d'interface graphique. Il nous faut donc recourir *"aux supers pouvoirs"* de **la ligne de commande**... ## Gérer en ligne de commande > En apprenant à piloter vos dépôts GitHub en ligne de commande, vous comprendrez alors le processus de la [gestion de version](https://fr.wikipedia.org/wiki/Logiciel_de_gestion_de_versions) [décentralisée](https://fr.wikipedia.org/wiki/Gestion_de_versions#Gestion_de_versions_d.C3.A9centralis.C3.A9e) et par transposition vous serez naturellement et rapidement capable de le faire aussi depuis n'importe quelle interface graphique. L'inverse n'est pas vrai... > Les intructions suivantes sont à saisir dans un terminal. Il est cependant possible de les exécuter depuis les cellules de code de ce bloc-note mais il sera alors nécessaire d'ajouter un `!` devant certaines... *** > Si vous n'êtes pas encore familiarisé avec la ligne de commnande dans un environnement jupyter rendez-vous sur le bloc-note [Terminal](Terminal-Un_BN_pour_la_ligne_de_commande.ipynb). *** ## Mon pense-bête pour git : A mesure que vous explorez ce bloc-note compléter la liste descriptive suivante pour établir votre synthèse des principales instructions à retenir : <hr> <dl> <dt>git init</dt> <dd>...</dd> <hr> <dt>git ...</dt> <dd>...</dd> </dl> <hr> ## Configuration Générale : ### Configurer un nom d'utilisateur pour l'ensemble de vos dossiers ``` git config --global user.name "votrePseudoGitHub" ``` #### Vérifier la configuration : ``` git config --global user.name ``` ### Configurer une adresse mail pour l'ensemble de vos dossiers ``` git config --global user.email "prenom.nom@eleves.ecmorlaix.fr" ``` #### Vérifier la configuration : ``` git config --global user.email ``` ### Obtenir un listing de la configuration globale ``` git config --list ``` ## Lier un dossier du serveur à un dépôt GitHub et récupérer son contenu : ### Si vous n'en avez pas, créer un dossier sur le serveur du même nom que le dépot GitHub correspondant : ``` mkdir ~/pNomRepo ``` ### Ce placer dans ce dossier ``` cd ~/pNomRepo ``` ### Initialiser ce dossier en tant que dossier git : ``` git init ``` #### Vérifier la présence d'un dossier caché .git : ``` ls -a ``` ### Lier ce dossier à votre dépôt distant GitHub : ``` git remote add origin https://github.com/votrePseudoGitHub/pNomRepo.git ``` > La commande `git remote remove ...` permet de supprimer une liaison distante. #### Vérifier la liaison distante : ``` git remote -v ``` ### Récupérer les données du dépôt GitHub dans votre dossier sur le serveur = "Tirer" ``` git pull origin master ``` > `origin` est le nom de la liaison distante (remote) définit par défaut et `master` est le nom de la branche principale #### Vérifier la bonne récupération du fichier `Readme.md`... # Besoin n°2 : *J'ai un dépot sur mon compte GitHub dont je veux faire une copie de travail sur mon espace du serveur jupyterhub du lycée pour pouvoir ensuite maintenir à jour son développement soit depuis l'interface graphique de GitHub en ligne soit depuis l'environnement interactif **jupyter** en mode **Notebook**...* ## Cloner un dépôt GitHub : ### Copier dans un nouveau dossier nommé par exemple `monRepo` sur le serveur. ``` git clone https://github.com/votrePseudoGitHub/pNomRepo.git monRepo ``` > Si on ne précise pas le nom du dossier à la fin de l'instruction, alors c'est le nom du dépôt GitHub cloner qui est attribué par défaut au dossier auquel il sera lié. > Il faut ajouter `.git` à l'URL d'un dépot GitHub pour le cloner. #### Vérifier la liaison, la récupération du fichier `Readme.md` et l'état de votre copie de travail : ``` cd ~/monRepo git remote -v ls -a git status ``` # Besoin n°3 : *Maintenant que j'ai un dépot sur mon compte GitHub lié à une copie de travail sur mon espace du serveur jupyterhub du lycée, je souhaite maintenir à jour son développement soit depuis l'interface graphique de GitHub en ligne soit depuis l'environnement interactif **jupyter** en mode **Notebook**...* ## Mettre à jour le dépôt GitHub lié avec les évolutions de votre dossier sur le serveur : ### Créer un fichier sur le serveur : ``` echo "Bonjour, voici ma contibution !">~/monRepo/bonjour.txt less ~/monRepo/bonjour.txt # touche "Q" pour quitter git status ``` ### Modifier un fichier depuis le serveur : Ouvrir le fichier README.md dans l'éditeur de texte de l'environnement jupyter ou en ligne de commande avec [Nano](https://doc.ubuntu-fr.org/nano) (ou [Vim](https://doc.ubuntu-fr.org/vim), ou [Emacs](https://doc.ubuntu-fr.org/emacs)) ``` nano -w ~/monRepo/README.md ``` Effectuer des modifications telles que : <img src="https://ericecmorlaix.github.io/img/Nano.png" alt="nouveau repo GitHub" width=90%> > Pour Nano, utiliser les touches `Ctrl+X` pour Quitter, `O` pour valider les modifications puis appuyer sur la touche `Entrée` sans changer le nom du fichier `README.md`. ``` git status ``` ### Visualiser tous les changements réalisés à ce stade ``` git diff ``` > Les suppresions apparaissent en rouge précédées d'un signe **`-`**, et les ajouts apparaissent en vert prédédés d'un signe **`+`**. > Utiliser avec la commande `git diff nomDuFichier` pour ne voir que les modifications effectuées sur un fichier ### Ajouter les changements à mettre à jour à l'index ``` git add bonjour.txt README.md git status ``` >###### Remarque : > La commande `git add *` permet d'ajouter tous les fichiers modifiés en une fois. > > La commande `git add dossier/`permet d'ajouter un dossier et tout sont contenu. > > La commande `git reset HEAD -- nomDuFichier` permet d'enlever de l'index un fichier ajouté par erreur. > > La commande `git checkout nomDuFichier` permet d'annuler les modifications faites sur un fichier depuis l'état précédent. ### Valider et consigner vos modifications = "Commiter" ``` git commit -m "Ajout Bonjour" git status ``` > La commande `git commit -a -m "mon message"` permet de directement valider et consigner les modifications qui concernent tous les fichiers déjà suivis sans avoir à passer préalablement par une commande `add`. > > La commande `git commit` sans argument ouvre l'éditeur définit par défaut (Nano) pour permettre d'écrire le message de `commit`; pour sortir, faire `Ctrl+X` pour Quitter, `O` pour valider, puis `Entrée`. ### Afficher un historique des révisions successives effectués dans un dépot : ``` git log ``` > Chaque révision porte un identifiant unique sur 20 octets. > Essayer aussi les commandes `git log --graph`, `git log -p`, `git log --stat` et `git log --oneline --graph --decorate`. > Ce qu'il faut comprendre lorsque l'on fait un `commit` c'est que l'on génère un point d'étape du développement d'un projet sur lequel on peut revenir ensuite à tout moment grace à son identifiant... ### Afficher les modifications contenues dans un commit : ``` git show # affiche les modifications du dernier commit en date git show identifiantDuCommit # affiche les modifications d'un commit en particulier ``` > Pour l'instant cette mise à jour n'existe que dans votre dossier sur le serveur jupyter, votre copie de travail, et pas encore sur GitHub. > >Avec Git on peut encore la modifier à ce niveau là si on a fait une erreur : - Annuler le dernier commit sans perdre les changements : `git reset HEAD^` - Annuler votre dernier commit et les changements effectués : `git reset --hard HEAD^` ### Mettre à jour votre dépôt GitHub avec votre copie de travail = "Pousser" : > Attention, l'instruction suivante ne fonctionnera pas dans un bloc-note même avec un `!` devant. Il faut impérativement l'exécuter dans un terminal... ``` git push origin master ``` Renseigner alors votre votre pseudo et votre mot de passse GitHub... #### Vérifier l'état de votre copie de travail : ``` git status ``` #### Enfin il ne reste plus qu'à vérifier la mise à jour du contenu de votre dépot GitHub en ligne... > La commande `git revert identifiantDuCommit` permet de créer un `commit` inverse pour l'annuler. ## Mettre à jour le dossier lié sur le serveur avec les évolutions de votre dépôt GitHub : ### Modifier un fichier depuis GitHub : Dans l’interface web de GitHub, éditer et modifier le fichier `README.md`, et commiter les changements. ### Tirer les changement dans votre copie de travail du serveur : ``` cd ~/monRepo git status git log git pull git status git log ``` ##### Modifier à nouveau le fichier `README.md` sur votre ordinateur, commitez les changements, puis poussez vos changements sur GitHub : ``` git status git push ``` ##### A ce stade, faire encore un changement à `README.md` dans l’interface de GitHub. Commiter. Mais ne tirer pas encore vers le serveur. ##### Créer un fichier `index.html` dans votre copie de travail sur le serveur contenant le code : ##### Ajouter, commiter, puis essayer de pousser, que se passe-t-il ? ##### Tirer, Valider le message proposé (`Ctrl+X`), puis lire le log avec ``` git log --graph # touche "Q" pour quitter ``` Que s’est-il passé ? Quels sont les nouveaux mots... ##### Pousser maintenant. ###### Analyser le même log avec l’interface web de GitHub, à l’adresse https://github.com/votrePseudoGitHub/pNomRepo/network. # Besoin n°4 Nous venons d'observer qu'un dépot distant GitHub et sa copie de travail sur le serveur jupyterhub ont pu évoluer parallèlement et indépendamment l'un de l'autre tout en subissant des modifications différentes chacun selon sa propre voie. Enfin, nous avons pu les fusionner pour les rendre identiques à nouveau. > **Remarque :** >Lorsque vous récupérez les nouveaux commits depuis GitHub avec `git pull`, cela revient en fait à appeler deux commandes différentes : - `git fetch`, qui s’occupe du téléchargement des nouveaux commits, - et `git merge`, qui fusionne les commits téléchargés issus de la branche de GitHub dans la branche du serveur jupyterhub. Ainsi, il pourrait être intéressant de disposer d'une version de développement pour essayer des choses sans affecter la version principale qui resterait alors pleinement fonctionnelle... ## Développer sur des branches différentes puis les fusionner avec la branche master : Les branches sont utilisées pour développer des fonctionnalités isolées des autres. La branche master est la branche par défaut quand vous créez un dépôt. Utilisez les autres branches pour le développement et fusionnez ensuite à la branche principale quand vous avez fini. <img src="https://ericecmorlaix.github.io/img/BrancheDev.png" alt="branche dev" width=50%> ### Voir toutes les branches ``` git branch ``` > Il y a une étoile devant la branche sur laquelle vous êtes actuellement. ### Créer une branche nommée par exemple `pnom-dev`: ``` git branch pnom-dev ``` ### Changer de branche courante : ``` git checkout pnom-dev ``` ##### Vérifier la création et le changement de branche courante avec un `git branch`. > Attention, si vous changez de branche sans avoir fait de `commit` alors vous perdrez toutes les modifications faites depuis le dernier `commit`à moins de faire un `git stash`au préalable... Lorsque vous reviendrez travailler sur cette branche il faudra faire un `git stash apply` pour récupérer vos plus récentes modifications réalisées depuis le dernier `commit`. ##### Faire des modifications à un fichier et un commit sur la branche `pnom-dev` puis vérifier avec un `git log --graph` ##### Revenir sur la branche master par un `git checkout master` ### Créer une nouvelle branche nommée `autre-dev` et basculer dessus directement ``` git checkout -b autre-dev ``` ### Voir la liste et l'état de toutes les branches : ``` git branch -av ``` > On observe que les branches `pnom-dev` et `autre-dev` ne sont pas disponibles sur le dépot GitHub. > une branche n'est pas disponible pour les autres tant que vous ne l'avez pas envoyée vers votre dépôt distant avec `git push origin autre-dev` ##### Faire des modifications à un fichier et un `commit` sur la branche `autre-dev` et vérifier avec `git branch -av` ##### Revenir sur la branche master par un `git checkout master` ##### Faire alors des modifications et un commit sur la branche `master` et vérifier avec `git branch -av` ##### Faire un push vers la branche principale distante et vérifier avec `git branch -av` ##### Observer l'état des logs avec un `git log --graph` ### Fusionner une branche avec la branche principale > Il faut revenir sur la branche `master` pour faire un `merge` avec cette branche : ``` git merge pnom-dev ``` ##### Faire un `git diff pnom-dev master` pour avoir un aperçu des changements après fusion. > A partir de là, il y a deux possibilités : - Soit la fusion automatique est possible, tout va bien, et vous pouvez poursuivre ; - Soit la fusion automatique a échoué car il y a des conflits, vous avez alors encore deux choix : - Soit vous faites machine arrière et vous renoncez à la fusion en faisant `git merge --abort` ; - Soit vous éditez les fichiers en conflits et vous les corrigez manuellement en choississant la bonne version que vous enregistrer, et puis vous les marquer comme fusionnés avec un `git add nomFichierEnConflit`, enfin, vous faites un nouveau `commit`. Alors, si tout va bien, vous avez effectué un pas de géant car vous venez de résoudre votre premier conflit ! ### Supprimer une branche devenue inutile après fusion : ``` git branch -d pnom-dev ``` ##### Essayer de faire `git branch -d autre-dev`, quel est le problème ? ### Supprimer une branche devenue inutile sans fusion : ``` git branch -D autre-dev ``` > Supprimer une branche présente sur le dépôt GitHub avec `git push origin --delete nom_de_la_branche_distante` #### Vérifier vos fusions, suppressions, et branches sur votre copie de travail ``` git status git branch git log graph ``` ##### Pousser maintenant. ###### Analyser le même log avec l’interface web de GitHub, à l’adresse https://github.com/votrePseudoGitHub/pNomRepo/network. ### Résumé visuel du workflow Git + GitHub pour gérer votre dépot via le serveur jupyterhub : ... # Besoin n°5 : un serveur Web ## Ma page Web enfin hébergée par GitHub : A l’adresse https://github.com/votrePseudoGitHub/pNomRepo/settings, renommer le repository au format `votrePseudoGitHub.github.io` puis visiter l'adresse https://votrePseudoGitHub.github.io/index.html ## Transformer un dépot GitHub en serveur pour vos projets Web statiques : A la lecture des vidéos de Daniel Shiffman suivantes, déposer sur GitHub et publier vos précédentes pages web et codes d'activités réalisées sur P5.js : - Version de base détaillée : https://www.youtube.com/embed/bFVtrlyH-kc - Version résumée intégrant P5.js : https://www.youtube.com/embed/8HPYsDTk17A > Par la suite vous pourrez cloner ce dépot et le gérer à distance depuis le serveur jupyterhub, la commande pour pousser sera alors `git push origin gh-pages` ``` %%HTML <center> <h2>Version de base détaillée :</h2> <iframe width="560" height="315" src="https://www.youtube.com/embed/bFVtrlyH-kc" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <h2>Version résumée intégrant P5.js :</h2> <iframe width="560" height="315" src="https://www.youtube.com/embed/8HPYsDTk17A" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> </center> ``` # Besoin n°6 : Collaborer à plusieurs sur un dépot de projet commun : Source : http://defeo.lu/in202/tutorials/tutorial4/ Cette partie est à développer en binôme. Choisissez un collègue avec qui travailler et installez-vous sur un même ilot. Vous allez travailler en parallèle sur la construction d’un site web qui parle de Git. On va vous appeler collègue A, et collègue B. Les opérations à réaliser par A ou B seront précédées respectivement par **(- A -)** ou par **(- B -)**. Le reste des consignes concerne, bien sûr, les deux. Au sein d’une même section délimitée par des traits horizontaux, vous pouvez travailler en parallèle. Avant de passer à la section suivante, attendez que votre collègue ait terminé. ## 1 - Préparer un répertoire partagé **** - **(- A -)** Créez un nouveau répertoire GitHub en vous rendant à l’adresse https://github.com/new. Nommez `Projet` le répertoire, renseignez une description en quelques mots, cochez la case `Initialize this repository with a README`, et cliquez sur `Create repository`. - **(- A -)** Dans les réglages du projet, sélectionnez l’onglet `Collaborators` (url http://github.com/votrePseudoGitHub/Projet/settings/collaboration). Renseignez l’identifiant GitHub de **B** dans la case, et cliquez sur `Add collaborator`. Votre mot de passe vous sera demandé. Maintenant vous avez tous les deux les droits en écriture sur le répertoire. **** ## 2 - Obtenir une copie locale du répertoire - En ligne de commande avec Git, clonez le répertoire créé par A sur votre espace du serveur jupyterhub. On utilisera la commande `clone`, comme vu précédemment pour le besoin n°2. **** - **(- B -)** Créez un fichier index.html dans le répertoire, rempli avec le squelette classique : ``` <!DOCTYPE html> <html lang="fr"> <head> <meta charset="utf-8"> <title>Le titre de ma page</title> <meta name="author" content="Prenom NOM"> </head> <body> <h1></h1> </body> </html> ``` - **(- B -)** Créez un `commit` contenant ce fichier. Vérifiez avec les commandes `log` et `status` que le fichier est bien commité. Poussez vos changements avec la commande `push`. **** - Vérfiez dans l’interface GitHub que les modifications ont bien été reçues par le serveur. ## 3 - Recevoir les mises à jour **** - **(- A -)** Avec la commande `pull --ff-only`, tirez les modifications apportées par votre collègue. Vérifiez que le fichier `index.html` est bien reçu. - **(- A -)** Ouvrez et modifiez le fichier `index.html`. Obtenez une page qui ressemble à peu près à cela : <center> <h1> Le fantastique guide de Git </h1> <h2> par nom de A &amp; nom de B</h2> <h3>Sera bientôt disponible à partir de cette page web</h3> </center> - **(- A -)** Faites une faute d’orthographe dans le nom de votre collègue. Ajoutez un peu de contenu. - **(- A -)** Commitez et poussez. - **(- B -)** Créez un nouveau fichier `commandes.html` et y inclure votre liste descriptive des principales commandes de Git que vous avez connues jusqu’ici telle que : ```` <hr> <dl> <dt>git init</dt> <dd>...</dd> <hr> <dt>git ...</dt> <dd>...</dd> </dl> <hr> ```` - **(- B -)** Ne commitez pas. Vérifiez avec la commande `status` que vous avez bien des modifications en cours dans votre répertoire. La sortie de la commande `status` devrait normalement commencer par ```` Sur la branche master Votre branche est à jour avec 'origin/master'. ```` qui vous informe que votre copie locale est à jour. **** - **(- B -)** Utilisez la commande `fetch`, puis à nouveau la commande `status`. Si A a correctement poussé ses modifications, la commande status vous annoncera que votre copie locale n’est plus à jour, par le message : ```` Sur la branche master Votre branche est en retard sur 'origin/master' de 1 commit, ... ```` Si le message vous dit que vous êtes encore à jour, attendez que votre collègue ait poussé, puis répétez l’opération. - **(- B -)** Tirez les modifications de votre collègue avec 'pull --ff-only'. Vérifiez avec les commandes 'log' et 'status' que vous avez bien reçu les modifications. Si tout s’est bien passé, vous avez constaté que Git est arrivé à tirer les modifications distantes, même si vous aviez des modifications locales non encore commitées. En effet les modifications concernaient deux fichiers différents, Git a donc pu les intégrer sans difficultés. Git ne détruit jamais les données : si vos modifications avaient porté sur les mêmes fichiers, il aurait refusé de tirer. ## 4 - Même chose, rôles inversés - **(- B -)** Commitez maintenant vos modifications à 'commandes.html'. Vérifiez avec les commandes 'status' et 'log' que le 'commit' a bien réussi. - **(- B -)** Poussez les modifications avec la commande 'push'. - **(- A -)** Ajoutez une feuille de style à index.html. Ne committez pas. - **(- A -)** Utilisez la commande fetch, puis à nouveau la commande status. Si B a correctement poussé ses modifications, la commande status vous annoncera que votre copie locale n’est plus à jour, par le message On branch master Your branch is behind of 'origin/master' by 1 commit. - **(- A -)** Si le message vous dit que vous êtes encore up-to-date, attendez que votre collègue ait poussé, puis répétez l’opération. - **(- A -)** Tirez les modifications de votre collègue avec pull --ff-only. Vérifiez avec les commandes log et status que vous avez bien reçu les modifications. # Compléments : ## Créer des tags : A partir des premiers chiffres hexadécimaux de l'identifiant d'un `commit` (obtenu en faisant un `git log`), on peut créer un tag, c'est à dire publier une version de votre projet dans un état fonctionnel donné, une release, en faisant `git tag 1.0.0 1b2e1d63ff` ## Navigation dans GitHub : Des liens relatifs à ajouter dans le fichier `README.md` pour une navigation depuis un dépôt de GitHub : - `[Éditez ce fichier en cliquant ici](../../edit/master/README.md)` - `[Les réglages, c'est par là](../../settings)` - `[Obtenez une copie de ce dépot dans votre compte GitHub en cliquant ici](../../fork)` - Pour les transformer en liens absolus, il faut remplacer `../../` par `https://github.com/votrePseudoGitHub/pNomRepo/` ## Ressources : - RTFM : `~$ git help`, `~$ man git` ou `~$ info git` (touche `Q` pour sortir) ; - La bible en Français : https://git-scm.com/book/fr/v2 ; - La chaine GitHub Training & Guides sur Youtube : https://www.youtube.com/GitHubGuides ; - Sur la Chaine [The Coding Train](https://www.youtube.com/channel/UCvjgXvBlbQiydffZU7m1_aw) de [Daniel Shiffman](https://shiffman.net/) une playlist pour [Git & GitHub](https://www.youtube.com/playlist?list=PLRqwX-V7Uu6ZF9C0YMKuns9sLDzK6zoiV) ; - Un Tutoriel interactif : https://www.codecademy.com/learn/learn-git ; - Le tutoriel de Atlassian : https://www.atlassian.com/git/tutorials ; - Une explication interactive des principales commandes : http://onlywei.github.io/explain-git-with-d3/ ; - Un tutoriel en Français : https://openclassrooms.com/fr/courses/1233741-gerez-vos-codes-source-avec-git ; - Un petit guide en Français : http://rogerdudler.github.io/git-guide/index.fr.html ; - Un pense bête pas bète : https://gist.github.com/aquelito/8596717. - Un client multi-plateforme git cola : http://git-cola.github.io/ *** > **Félicitations !** Vous êtes parvenu au bout des activités de ce bloc note. > Vous êtes maintenant capable de gérer vos dépots GitHub depuis un navigateur via un environnement interactif jupyter notebook. > Pour explorer plus avant d'autres fonctionnalités de jupyter notebook repassez par le [Sommaire](index.ipynb). *** <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/"><img alt="Licence Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by-sa/4.0/88x31.png" /></a><br />Ce document est mis à disposition selon les termes de la <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">Licence Creative Commons Attribution - Partage dans les Mêmes Conditions 4.0 International</a>. Pour toute question, suggestion ou commentaire : <a href="mailto:eric.madec@ecmorlaix.fr">eric.madec@ecmorlaix.fr</a>
github_jupyter
git config --global user.name "votrePseudoGitHub" git config --global user.name git config --global user.email "prenom.nom@eleves.ecmorlaix.fr" git config --global user.email git config --list mkdir ~/pNomRepo cd ~/pNomRepo git init ls -a git remote add origin https://github.com/votrePseudoGitHub/pNomRepo.git git remote -v git pull origin master git clone https://github.com/votrePseudoGitHub/pNomRepo.git monRepo cd ~/monRepo git remote -v ls -a git status echo "Bonjour, voici ma contibution !">~/monRepo/bonjour.txt less ~/monRepo/bonjour.txt # touche "Q" pour quitter git status nano -w ~/monRepo/README.md git status git diff git add bonjour.txt README.md git status git commit -m "Ajout Bonjour" git status git log git show # affiche les modifications du dernier commit en date git show identifiantDuCommit # affiche les modifications d'un commit en particulier git push origin master git status cd ~/monRepo git status git log git pull git status git log git status git push git log --graph # touche "Q" pour quitter git branch git branch pnom-dev git checkout pnom-dev git checkout -b autre-dev git branch -av git merge pnom-dev git branch -d pnom-dev git branch -D autre-dev git status git branch git log graph %%HTML <center> <h2>Version de base détaillée :</h2> <iframe width="560" height="315" src="https://www.youtube.com/embed/bFVtrlyH-kc" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <h2>Version résumée intégrant P5.js :</h2> <iframe width="560" height="315" src="https://www.youtube.com/embed/8HPYsDTk17A" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> </center> <!DOCTYPE html> <html lang="fr"> <head> <meta charset="utf-8"> <title>Le titre de ma page</title> <meta name="author" content="Prenom NOM"> </head> <body> <h1></h1> </body> </html> <hr> <dl> <dt>git init</dt> <dd>...</dd> <hr> <dt>git ...</dt> <dd>...</dd> </dl> <hr> Sur la branche master Votre branche est à jour avec 'origin/master'. Sur la branche master Votre branche est en retard sur 'origin/master' de 1 commit, ...
0.243732
0.826011
``` import json from collections import Counter import operator import numpy as np pl_title = json.load(open('../MODEL_1_PL_NAME_NEW/PID_PROCESSED_TITLE_LIST_PROCESSED.json')) Train = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TRAIN.json')) len(Train) Train['967445'] word_tracks_list = {} for pl in Train: for word in pl_title[pl]: try: word_tracks_list[word] += Train[pl] except: word_tracks_list[word] = Train[pl] word_tracks_freq = {} for word in word_tracks_list: word_tracks_freq[word] = Counter(word_tracks_list[word]) ``` # Train ``` T0_Test_raw = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TEST_T0.json')) T0_Test = {} for pl in T0_Test_raw.keys()[:1000]: T0_Test[pl] = T0_Test_raw[pl][:] i = 0 R = {} for pl in T0_Test: if i % 10 == 0: print i i += 1 R[pl] = {} for word in pl_title[pl]: try: R[pl] = dict(Counter(R[pl])+Counter(word_tracks_freq[word])) except: continue len(R) #delete empty R_noempty = {} for pl in R: if R[pl] != {}: R_noempty[pl] = R[pl] len(R_noempty) R_sorted_raw = {} for pl in R_noempty: R_sorted_raw[pl] = [] current = sorted(R_noempty[pl].items(), key=operator.itemgetter(1), reverse = True) for track in current[:500]: R_sorted_raw[pl].append(track[0]) #delete recommanded pl not 500 R_sorted = {} for pl in R_sorted_raw: if len(R_sorted_raw[pl]) == 500: R_sorted[pl] = R_sorted_raw[pl] len(R_sorted) ``` # Evaluation ``` G_raw = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TEST.json')) G = {} for pl in R_sorted: G[pl] = G_raw[pl] len(G) def r_precision(G, R): limit_R = R[:len(G)] if len(G) != 0: return len(list(set(G).intersection(set(limit_R)))) * 1.0 / len(G) else: return 0 def ndcg(G, R): r = [1 if i in set(G) else 0 for i in R] r = np.asfarray(r) dcg = r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1))) #k = len(set(G).intersection(set(R))) k = len(G) if k > 0: idcg = 1 + np.sum(np.ones(k - 1) / np.log2(np.arange(2, k + 1))) return dcg * 1.0 / idcg else: return 0 def clicks(G, R): n = 1 for i in R: if i in set(G): return (n - 1) * 1.0 / 10 return 51 R_precision = {} NDCG = {} Clicks = {} i = 0 for pl in R_sorted: R_precision[pl] = r_precision(G[pl], R_sorted[pl]) NDCG[pl] = ndcg(G[pl], R_sorted[pl]) Clicks[pl] = clicks(G[pl], R_sorted[pl]) # 1000 data print sum(R_precision.values()) / len(R_precision) print sum(NDCG.values()) / len(NDCG) print sum(Clicks.values()) / len(Clicks) # 100 data print sum(R_precision.values()) / len(R_precision) print sum(NDCG.values()) / len(NDCG) print sum(Clicks.values()) / len(Clicks) ```
github_jupyter
import json from collections import Counter import operator import numpy as np pl_title = json.load(open('../MODEL_1_PL_NAME_NEW/PID_PROCESSED_TITLE_LIST_PROCESSED.json')) Train = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TRAIN.json')) len(Train) Train['967445'] word_tracks_list = {} for pl in Train: for word in pl_title[pl]: try: word_tracks_list[word] += Train[pl] except: word_tracks_list[word] = Train[pl] word_tracks_freq = {} for word in word_tracks_list: word_tracks_freq[word] = Counter(word_tracks_list[word]) T0_Test_raw = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TEST_T0.json')) T0_Test = {} for pl in T0_Test_raw.keys()[:1000]: T0_Test[pl] = T0_Test_raw[pl][:] i = 0 R = {} for pl in T0_Test: if i % 10 == 0: print i i += 1 R[pl] = {} for word in pl_title[pl]: try: R[pl] = dict(Counter(R[pl])+Counter(word_tracks_freq[word])) except: continue len(R) #delete empty R_noempty = {} for pl in R: if R[pl] != {}: R_noempty[pl] = R[pl] len(R_noempty) R_sorted_raw = {} for pl in R_noempty: R_sorted_raw[pl] = [] current = sorted(R_noempty[pl].items(), key=operator.itemgetter(1), reverse = True) for track in current[:500]: R_sorted_raw[pl].append(track[0]) #delete recommanded pl not 500 R_sorted = {} for pl in R_sorted_raw: if len(R_sorted_raw[pl]) == 500: R_sorted[pl] = R_sorted_raw[pl] len(R_sorted) G_raw = json.load(open('../DATA_PROCESSING/PL_TRACKS_5_TEST.json')) G = {} for pl in R_sorted: G[pl] = G_raw[pl] len(G) def r_precision(G, R): limit_R = R[:len(G)] if len(G) != 0: return len(list(set(G).intersection(set(limit_R)))) * 1.0 / len(G) else: return 0 def ndcg(G, R): r = [1 if i in set(G) else 0 for i in R] r = np.asfarray(r) dcg = r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1))) #k = len(set(G).intersection(set(R))) k = len(G) if k > 0: idcg = 1 + np.sum(np.ones(k - 1) / np.log2(np.arange(2, k + 1))) return dcg * 1.0 / idcg else: return 0 def clicks(G, R): n = 1 for i in R: if i in set(G): return (n - 1) * 1.0 / 10 return 51 R_precision = {} NDCG = {} Clicks = {} i = 0 for pl in R_sorted: R_precision[pl] = r_precision(G[pl], R_sorted[pl]) NDCG[pl] = ndcg(G[pl], R_sorted[pl]) Clicks[pl] = clicks(G[pl], R_sorted[pl]) # 1000 data print sum(R_precision.values()) / len(R_precision) print sum(NDCG.values()) / len(NDCG) print sum(Clicks.values()) / len(Clicks) # 100 data print sum(R_precision.values()) / len(R_precision) print sum(NDCG.values()) / len(NDCG) print sum(Clicks.values()) / len(Clicks)
0.165121
0.58945
# Matrizes e vetores ## License All content can be freely used and adapted under the terms of the [Creative Commons Attribution 4.0 International License](http://creativecommons.org/licenses/by/4.0/). ![Creative Commons License](https://i.creativecommons.org/l/by/4.0/88x31.png) ## Representação de uma matriz Antes de podermos manipular matrizes e vetores no computador, precisamos de um jeito de armazená-los em variáveis. Para vetores, o candidato natural são **listas**. $$ \mathbf{v} = \begin{bmatrix}1 \\ 2 \\ 3\end{bmatrix} $$ O vetor acima poderia ser representado em código como: ``` v = [1, 2, 3] print(v) ``` Uma matriz pode ser vista como um conjunto de vetores, ou um vetor de vetores. Cada vetor seria equivalente a uma linha da matriz: $$ \mathbf{A} = \begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \\ \end{bmatrix} = \begin{bmatrix} [1 & 2 & 3] \\ [4 & 5 & 6] \\ [7 & 8 & 9] \\ \end{bmatrix} $$ Logo, um jeito de representar uma matriz em Python é através de uma **lista de listas**: ``` A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] print(A) ``` O Python permite quebrar a linha quando o comando está entre `[` ou `(`: ``` A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] print(A) ``` *Note que o print acima imprime a matriz toda em uma única linha*. A matriz `A` é uma lista como qualquer outra. A única diferença é que **os elementos da lista são outras listas**. O elemnto `[0]` da lista `A` será a lista correspondente a primeira linha: ``` print(A[0]) ``` Como `A[0]` é uma lista, podemos pegar os elementos dessa lista da mesma forma: ``` print(A[0][0]) print(A[1][2]) ``` ### Exemplo Queremos imprimir cada elemento da matriz `A` da seguinte forma: 1 2 3 4 5 6 7 8 9 Podemos utilizar o `for` para pegar cada linha da matriz. Para cada linha da matriz, queremos imprimir o elemento. Quando terminarmos de imprimir a linha, vamos pular uma linha. ``` for i in range(3): # Anda sobre as linhas for j in range(3): # Anda sobre as colunas print(A[i][j], '', end='') # end='' faz com que print não pule uma linha print() # Imprime nada e pula uma linha ``` ## Somando matrizes A soma de duas matrizes produz uma matriz que terá a soma dos elementos correspondentes: $$ \begin{bmatrix} a & b & c \\ d & e & f \\ g & h & i \\ \end{bmatrix} + \begin{bmatrix} j & l & m \\ n & o & p \\ q & r & s \\ \end{bmatrix} = \begin{bmatrix} a + j & b + l & c + m \\ d + n & e + o & f + p \\ g + q & h + r & i + s \\ \end{bmatrix} $$ De forma genérica, o $j$-ésimo elemento da $i$-ésima linha de uma matriz $\mathbf{A}$ é $A_{ij}$. Se temos duas matrizes $\mathbf{A}$ e $\mathbf{B}$, a soma pode ser escrita como: $$ C_{ij} = A_{ij} + B_{ij} $$ ## Tarefa Some as matrizes `A` e `B` definidas abaixo e guarde o resultado em uma matriz `C`. Imprima a matriz `C`. **Dicas**: * Você pode criar a matriz `C` antes ou durante a soma. Lembre-se que a matriz é uma lista e listas possuem o método `append`. ``` A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] B = [[3, 4, 5], [6, 7, 8], [9, 10, 11]] nlin_a = 3 nlin_b = 3 ncol_a = 3 ncol_b = 3 C = [] for i in range(3): linha = [] for j in range(3): soma = A[i][j] + B[i][j] linha.append(soma) C.append(linha) print(C) for i in range(3): for j in range(3): print(C[i][j], '', end = '') print() ``` ### Resultado esperado Seu código deve imprimir exatamente: 4 6 8 10 12 14 16 18 20 ## Multiplicando uma matriz por um vetor A multiplicação de uma matriz por um vetor é $$ \begin{bmatrix} a & b \\ c & d \\ \end{bmatrix} \begin{bmatrix} e \\ f \\ \end{bmatrix} = \begin{bmatrix} ae + bf \\ ce + df \\ \end{bmatrix} $$ Sendo o vetor $\mathbf{u} = \mathbf{A}\mathbf{v}$, cada elemento $i$ de $\mathbf{u}$ é $$ u_i = \sum\limits_{k=1}^{N} A_{ik}v_k $$ ## Tarefa Faça a multiplicação da matriz pelo vetor definidos abaixo. Guarde o resultado em uma lista. **Dicas** ``` A = [[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]] v = [12, 13, 14, 15] nlin = 3 ncol = 4 u = [] for i in range(3): mult = 0 for k in range(4): mult = mult + A[i][k]*v[k] u.append(mult) print(u) ``` ### Resultado esperado O seu código deve imprimir exatamente: [140, 302, 464] ## Multiplicação de matrizes A multiplicação de matrizes é feita de forma diferente da soma. É mais fácil mostrar do que explicar: $$ \begin{bmatrix} a & b \\ c & d \\ \end{bmatrix} \begin{bmatrix} e & f \\ g & h \\ \end{bmatrix} = \begin{bmatrix} ae + bg & af + bh \\ ce + dg & cf + dh \\ \end{bmatrix} $$ Sendo $\mathbf{C} = \mathbf{A}\mathbf{B}$, cada elemento $ij$ de $\mathbf{C}$ é $$ C_{ij} = \sum\limits_{k=1}^{N} A_{ik}B_{kj} $$ ## Tarefa Faça a multiplicação das duas matrizes definidas abaixo. Guarde o resultado em uma matriz (lista de listas). ``` A = [[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]] B = [[3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14]] nlin_a = 3 nlin_b = 4 ncol_a = 4 ncol_b = 3 C = [] for i in range(nlin_a): linha = [] for j in range(ncol_b): soma = 0 for k in range(nlin_b): soma = soma + A[i][k]*B[k][j] linha.append(soma) C.append(linha) for i in range(nlin_a): for j in range(ncol_b): print(C[i][j], ' ', end = '') print() ``` ### Resultado esperado O seu código deve imprimir exatamente: 90 100 110 180 202 224 270 304 338 ## Tarefa Bônus Calcule o produto $\mathbf{A}^T\mathbf{A}$ para a matriz $A$ definida abaixo. $\mathbf{A}^T$ é a matriz transposta de $A$: $$ \mathbf{A} = \begin{bmatrix} a & b & c \\ d & e & f \\ \end{bmatrix} $$ $$ \mathbf{A}^T = \begin{bmatrix} a & d \\ b & e \\ c & f \\ \end{bmatrix} $$ Ou seja, as linhas de $\mathbf{A}$ são as colunas de $\mathbf{A}^T$. ``` A = [[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]] nlin = 3 ncol = 4 At = [] ncol_At = 3 nlin_At = 4 for i in range(nlin_At): linha = [] for j in range(ncol_At): transposta = A[j][i] linha.append(transposta) At.append(linha) print(At) B = [] for i in range(nlin_At): linha = [] for j in range(ncol): soma = 0 for k in range(ncol_At): soma = soma + At[i][k]*A[k][j] linha.append(soma) B.append(linha) print(B) for i in range(nlin_At): for j in range(ncol): print(B[i][j], ' ', end = '') print() ``` ### Resultado esperado O seu código deve imprimir exatamente: 66 78 90 102 78 93 108 123 90 108 126 144 102 123 144 165
github_jupyter
v = [1, 2, 3] print(v) A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] print(A) A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] print(A) print(A[0]) print(A[0][0]) print(A[1][2]) for i in range(3): # Anda sobre as linhas for j in range(3): # Anda sobre as colunas print(A[i][j], '', end='') # end='' faz com que print não pule uma linha print() # Imprime nada e pula uma linha A = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] B = [[3, 4, 5], [6, 7, 8], [9, 10, 11]] nlin_a = 3 nlin_b = 3 ncol_a = 3 ncol_b = 3 C = [] for i in range(3): linha = [] for j in range(3): soma = A[i][j] + B[i][j] linha.append(soma) C.append(linha) print(C) for i in range(3): for j in range(3): print(C[i][j], '', end = '') print() A = [[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]] v = [12, 13, 14, 15] nlin = 3 ncol = 4 u = [] for i in range(3): mult = 0 for k in range(4): mult = mult + A[i][k]*v[k] u.append(mult) print(u) A = [[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]] B = [[3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14]] nlin_a = 3 nlin_b = 4 ncol_a = 4 ncol_b = 3 C = [] for i in range(nlin_a): linha = [] for j in range(ncol_b): soma = 0 for k in range(nlin_b): soma = soma + A[i][k]*B[k][j] linha.append(soma) C.append(linha) for i in range(nlin_a): for j in range(ncol_b): print(C[i][j], ' ', end = '') print() A = [[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]] nlin = 3 ncol = 4 At = [] ncol_At = 3 nlin_At = 4 for i in range(nlin_At): linha = [] for j in range(ncol_At): transposta = A[j][i] linha.append(transposta) At.append(linha) print(At) B = [] for i in range(nlin_At): linha = [] for j in range(ncol): soma = 0 for k in range(ncol_At): soma = soma + At[i][k]*A[k][j] linha.append(soma) B.append(linha) print(B) for i in range(nlin_At): for j in range(ncol): print(B[i][j], ' ', end = '') print()
0.035528
0.981997
``` # Tuples if __name__ == '__main__': n = int(input()) integer_list = map(int,input().split()) t = tuple(integer_list) print(hash(t)) # Lists N = int(input()) lis=list() for _ in range(N): s=input().strip().split(" ") if s[0]=="insert": lis.insert(int(s[1]),int(s[2])) if s[0]=="print": print(lis) if s[0]=="remove": lis.remove(int(s[1])) if s[0]=="append": lis.append(int(s[1])) if s[0]=="sort": lis.sort() if s[0]=="pop": lis.pop() if s[0]=="reverse": lis.reverse() # Python if-else #!/bin/python import math import os import random import re import sys if __name__ == '__main__': n = int(input().strip()) if n%2 != 0: print("Weird") elif n%2 == 0 : if n>=2 and n<=5 : print("Not Weird") elif n>=6 and n<=20: print("Weird") else : print("Not Weird") # Arithmetic Operators if __name__ == '__main__': a = int(input()) b = int(input()) print(a+b) print(a-b) print(a*b) # Python division if __name__ == '__main__': a = int(input()) b = int(input()) print(a//b) print(a/b) # Loops if __name__ == '__main__': n = int(input()) if n>=0 : for i in range(n): print(i**2) # Write a function def is_leap(year): leap = False # Write your logic here if year % 400 == 0 : leap = True return leap else: if year % 100 == 0 : return leap elif year % 4==0 : leap = True return leap else: return leap year = int(input()) print(is_leap(year)) # Print function if __name__ == '__main__': n = int(input()) for i in range(1,n+1): print(i, end= "") # Runners-up score n = int(input()) arr = map(int, input().split()) print(sorted(set(arr))[-2]) # List Comprehension x = int(input()) y = int(input()) z = int(input()) n = int(input()) #x,y,z,n = (int(input()) for _ in range(4)) print(list([i,j,k] for i in range(x+1) for j in range(y+1) for k in range(z+1) if i+j+k != n)) # Nested Lists if __name__ == '__main__': name,score=[],[] for _ in range(int(input())): name.append(input()) score.append(float(input())) print("\n".join(sorted([name[i] for i in range(len(name)) if score[i]==sorted(list(set(score)))[1]]))) # Finding the percentage: if __name__ == '__main__': n = int(input()) student_marks = {} for _ in range(n): name, *line = input().split() scores = list(map(float, line)) student_marks[name] = scores query_name = input() print('%.2f'%(sum(student_marks[query_name])/3)) ```
github_jupyter
# Tuples if __name__ == '__main__': n = int(input()) integer_list = map(int,input().split()) t = tuple(integer_list) print(hash(t)) # Lists N = int(input()) lis=list() for _ in range(N): s=input().strip().split(" ") if s[0]=="insert": lis.insert(int(s[1]),int(s[2])) if s[0]=="print": print(lis) if s[0]=="remove": lis.remove(int(s[1])) if s[0]=="append": lis.append(int(s[1])) if s[0]=="sort": lis.sort() if s[0]=="pop": lis.pop() if s[0]=="reverse": lis.reverse() # Python if-else #!/bin/python import math import os import random import re import sys if __name__ == '__main__': n = int(input().strip()) if n%2 != 0: print("Weird") elif n%2 == 0 : if n>=2 and n<=5 : print("Not Weird") elif n>=6 and n<=20: print("Weird") else : print("Not Weird") # Arithmetic Operators if __name__ == '__main__': a = int(input()) b = int(input()) print(a+b) print(a-b) print(a*b) # Python division if __name__ == '__main__': a = int(input()) b = int(input()) print(a//b) print(a/b) # Loops if __name__ == '__main__': n = int(input()) if n>=0 : for i in range(n): print(i**2) # Write a function def is_leap(year): leap = False # Write your logic here if year % 400 == 0 : leap = True return leap else: if year % 100 == 0 : return leap elif year % 4==0 : leap = True return leap else: return leap year = int(input()) print(is_leap(year)) # Print function if __name__ == '__main__': n = int(input()) for i in range(1,n+1): print(i, end= "") # Runners-up score n = int(input()) arr = map(int, input().split()) print(sorted(set(arr))[-2]) # List Comprehension x = int(input()) y = int(input()) z = int(input()) n = int(input()) #x,y,z,n = (int(input()) for _ in range(4)) print(list([i,j,k] for i in range(x+1) for j in range(y+1) for k in range(z+1) if i+j+k != n)) # Nested Lists if __name__ == '__main__': name,score=[],[] for _ in range(int(input())): name.append(input()) score.append(float(input())) print("\n".join(sorted([name[i] for i in range(len(name)) if score[i]==sorted(list(set(score)))[1]]))) # Finding the percentage: if __name__ == '__main__': n = int(input()) student_marks = {} for _ in range(n): name, *line = input().split() scores = list(map(float, line)) student_marks[name] = scores query_name = input() print('%.2f'%(sum(student_marks[query_name])/3))
0.110495
0.26765
``` %matplotlib inline from pylab import * ``` --- # Get the data * Load the Olivetti Face dataset * Import the smile/no smile reference data ``` from sklearn import datasets faces = datasets.fetch_olivetti_faces() faces.keys() # Display some images for i in range(10): face = faces.images[i] subplot(1, 10, i + 1) imshow(face.reshape((64, 64)), cmap='gray') axis('off') # Download results-smile-GT-BLS.xml from https://blesaux.github.io/teaching/IOGS-machine-learning # Then load results-smile-GT-BLS.xml from computer in the "/content/" directory import json from google.colab import files uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format(name=fn, length=len(uploaded[fn]))) # Open reference data results = json.load(open('/content/results-smile-GT-BLS.xml')) y = list(results.values()) print(y) yy = [int(yy==True) for yy in y] print(yy) # Analyze reference data yes, no = (sum([results[x] == True for x in results]), sum([results[x] == False for x in results])) bar([0, 1], [no, yes]) ylim(0, max(yes, no)) xticks([0.4, 1.4], ['no smile', 'smile']); smiling_indices = [int(i) for i in results if results[i] == True] fig = plt.figure(figsize=(12, 12)) fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05) for i in range(len(smiling_indices)): # plot the images in a matrix of 20x20 p = fig.add_subplot(20, 20, i + 1) p.imshow(faces.images[smiling_indices[i]], cmap=plt.cm.bone) # label the image with the target value p.text(0, 14, "smiling") p.text(0, 60, str(i)) p.axis('off') ``` --- # Feature extraction * Compute Histogram of Gradients (HoGs) features on **all images** * Understand what HoGs are ``` from __future__ import division, print_function from time import time import numpy as np import matplotlib.pyplot as plt from skimage import feature # Compute HoG features hog_vec = [] hog_vis = [] for i in range(len(faces.images)): image = faces.images[i] hvec, hvis = feature.hog(image, visualise=True) hog_vec.append(hvec) hog_vis.append(hvis) print('Number of features of size... ',np.array(hog_vec).shape) # Understand HOG features from random import randint ii = randint(0, len(faces.images)) print(len(faces.images), ii) fig, ax = plt.subplots(1, 2, figsize=(12, 6), subplot_kw=dict(xticks=[], yticks=[])) ax[0].imshow( faces.images[ii], cmap='gray') ax[0].set_title('input image') ax[1].imshow(hog_vis[ii]) ax[1].set_title('visualization of HOG features'); print(hog_vec[ii]) print(hog_vec[ii].shape) print( np.max(hog_vec) ) print( np.min(hog_vec) ) ``` --- # Prepare train and test data * Split data in training / test set Info: https://scikit-learn.org/stable/model_selection.html ``` from sklearn.model_selection import train_test_split hog_train, hog_test, yy_train, yy_test = ... ``` --- # Trees * Built a tree classifier on HOG Features. * Visualize the decision tree * Info: https://scikit-learn.org/stable/modules/tree.html * Evaluate the trained model * Info: https://scikit-learn.org/stable/modules/model_evaluation.html ``` # Train decision tree from sklearn import tree clf = ... # Visualize the decision tree # Test the model on a single image from random import randrange rnd_test = random_index = randrange(len(yy_test)) # Compute predictions for all test, and estimate performance statistics ``` --- # Random Forest: a bagging example In this part, the objective it to build a **custom random forest**. **After the course**, check built-in scikit-learn random forests: https://scikit-learn.org/stable/modules/ensemble.html * Train various trees on subsets of the training set * Use a voting procedure to aggregate the individual predictions on the * Evaluate: * Evaluate the custom forest; * Compare with previous decision tree; * Compare with individual trees of the forest. Numpy tips: [np.arange](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.arange.html), [numpy.sum](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.sum.html), [numpy.mean](https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.mean.html), [numpy.where](https://docs.scipy.org/doc/numpy-1.15.0/reference/generated/numpy.where.html) ``` # The training data are: hog_train, yy_train # The test data are: hog_test, yy_test # First prepare several subsets # Second train decision trees on each subset nb_trees = ... forest = [] # Get predictions on the test dataset print(len(forest)) # Vote ```
github_jupyter
%matplotlib inline from pylab import * from sklearn import datasets faces = datasets.fetch_olivetti_faces() faces.keys() # Display some images for i in range(10): face = faces.images[i] subplot(1, 10, i + 1) imshow(face.reshape((64, 64)), cmap='gray') axis('off') # Download results-smile-GT-BLS.xml from https://blesaux.github.io/teaching/IOGS-machine-learning # Then load results-smile-GT-BLS.xml from computer in the "/content/" directory import json from google.colab import files uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format(name=fn, length=len(uploaded[fn]))) # Open reference data results = json.load(open('/content/results-smile-GT-BLS.xml')) y = list(results.values()) print(y) yy = [int(yy==True) for yy in y] print(yy) # Analyze reference data yes, no = (sum([results[x] == True for x in results]), sum([results[x] == False for x in results])) bar([0, 1], [no, yes]) ylim(0, max(yes, no)) xticks([0.4, 1.4], ['no smile', 'smile']); smiling_indices = [int(i) for i in results if results[i] == True] fig = plt.figure(figsize=(12, 12)) fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05) for i in range(len(smiling_indices)): # plot the images in a matrix of 20x20 p = fig.add_subplot(20, 20, i + 1) p.imshow(faces.images[smiling_indices[i]], cmap=plt.cm.bone) # label the image with the target value p.text(0, 14, "smiling") p.text(0, 60, str(i)) p.axis('off') from __future__ import division, print_function from time import time import numpy as np import matplotlib.pyplot as plt from skimage import feature # Compute HoG features hog_vec = [] hog_vis = [] for i in range(len(faces.images)): image = faces.images[i] hvec, hvis = feature.hog(image, visualise=True) hog_vec.append(hvec) hog_vis.append(hvis) print('Number of features of size... ',np.array(hog_vec).shape) # Understand HOG features from random import randint ii = randint(0, len(faces.images)) print(len(faces.images), ii) fig, ax = plt.subplots(1, 2, figsize=(12, 6), subplot_kw=dict(xticks=[], yticks=[])) ax[0].imshow( faces.images[ii], cmap='gray') ax[0].set_title('input image') ax[1].imshow(hog_vis[ii]) ax[1].set_title('visualization of HOG features'); print(hog_vec[ii]) print(hog_vec[ii].shape) print( np.max(hog_vec) ) print( np.min(hog_vec) ) from sklearn.model_selection import train_test_split hog_train, hog_test, yy_train, yy_test = ... # Train decision tree from sklearn import tree clf = ... # Visualize the decision tree # Test the model on a single image from random import randrange rnd_test = random_index = randrange(len(yy_test)) # Compute predictions for all test, and estimate performance statistics # The training data are: hog_train, yy_train # The test data are: hog_test, yy_test # First prepare several subsets # Second train decision trees on each subset nb_trees = ... forest = [] # Get predictions on the test dataset print(len(forest)) # Vote
0.480235
0.854703
<a href="https://colab.research.google.com/github/agemagician/CodeTrans/blob/main/prediction/single%20task/function%20documentation%20generation/php/base_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> **<h3>Predict the documentation for php code using codeTrans single task training model</h3>** <h4>You can make free prediction online through this <a href="https://huggingface.co/SEBIS/code_trans_t5_base_code_documentation_generation_php">Link</a></h4> (When using the prediction online, you need to parse and tokenize the code first.) **1. Load necessry libraries including huggingface transformers** ``` !pip install -q transformers sentencepiece from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline ``` **2. Load the token classification pipeline and load it into the GPU if avilabile** ``` pipeline = SummarizationPipeline( model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_base_code_documentation_generation_php"), tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_base_code_documentation_generation_php", skip_special_tokens=True), device=0 ) ``` **3 Give the code for summarization, parse and tokenize it** ``` code = "public static function update ( $ table ) { if ( ! is_array ( $ table ) ) { $ table = json_decode ( $ table , true ) ; } if ( ! SchemaManager :: tableExists ( $ table [ 'oldName' ] ) ) { throw SchemaException :: tableDoesNotExist ( $ table [ 'oldName' ] ) ; } $ updater = new self ( $ table ) ; $ updater -> updateTable ( ) ; }" #@param {type:"raw"} !pip install tree_sitter !git clone https://github.com/tree-sitter/tree-sitter-php from tree_sitter import Language, Parser Language.build_library( 'build/my-languages.so', ['tree-sitter-php'] ) PHP_LANGUAGE = Language('build/my-languages.so', 'php') parser = Parser() parser.set_language(PHP_LANGUAGE) def get_string_from_code(node, lines): line_start = node.start_point[0] line_end = node.end_point[0] char_start = node.start_point[1] char_end = node.end_point[1] if line_start != line_end: code_list.append(' '.join([lines[line_start][char_start:]] + lines[line_start+1:line_end] + [lines[line_end][:char_end]])) else: code_list.append(lines[line_start][char_start:char_end]) def my_traverse(node, code_list): lines = code.split('\n') if node.child_count == 0: get_string_from_code(node, lines) elif node.type == 'string': get_string_from_code(node, lines) else: for n in node.children: my_traverse(n, code_list) return ' '.join(code_list) tree = parser.parse(bytes(code, "utf8")) code_list=[] tokenized_code = my_traverse(tree.root_node, code_list) print("Output after tokenization: " + tokenized_code) ``` **4. Make Prediction** ``` pipeline([tokenized_code]) ```
github_jupyter
!pip install -q transformers sentencepiece from transformers import AutoTokenizer, AutoModelWithLMHead, SummarizationPipeline pipeline = SummarizationPipeline( model=AutoModelWithLMHead.from_pretrained("SEBIS/code_trans_t5_base_code_documentation_generation_php"), tokenizer=AutoTokenizer.from_pretrained("SEBIS/code_trans_t5_base_code_documentation_generation_php", skip_special_tokens=True), device=0 ) code = "public static function update ( $ table ) { if ( ! is_array ( $ table ) ) { $ table = json_decode ( $ table , true ) ; } if ( ! SchemaManager :: tableExists ( $ table [ 'oldName' ] ) ) { throw SchemaException :: tableDoesNotExist ( $ table [ 'oldName' ] ) ; } $ updater = new self ( $ table ) ; $ updater -> updateTable ( ) ; }" #@param {type:"raw"} !pip install tree_sitter !git clone https://github.com/tree-sitter/tree-sitter-php from tree_sitter import Language, Parser Language.build_library( 'build/my-languages.so', ['tree-sitter-php'] ) PHP_LANGUAGE = Language('build/my-languages.so', 'php') parser = Parser() parser.set_language(PHP_LANGUAGE) def get_string_from_code(node, lines): line_start = node.start_point[0] line_end = node.end_point[0] char_start = node.start_point[1] char_end = node.end_point[1] if line_start != line_end: code_list.append(' '.join([lines[line_start][char_start:]] + lines[line_start+1:line_end] + [lines[line_end][:char_end]])) else: code_list.append(lines[line_start][char_start:char_end]) def my_traverse(node, code_list): lines = code.split('\n') if node.child_count == 0: get_string_from_code(node, lines) elif node.type == 'string': get_string_from_code(node, lines) else: for n in node.children: my_traverse(n, code_list) return ' '.join(code_list) tree = parser.parse(bytes(code, "utf8")) code_list=[] tokenized_code = my_traverse(tree.root_node, code_list) print("Output after tokenization: " + tokenized_code) pipeline([tokenized_code])
0.548915
0.842539
``` import primes ``` # The Sieve of Eratosthenes Sharon, Kynan, Jet September 26th, 2017 ## Description The goal of this algorithm was to find all prime numbers up to "n". The Sieve of Eratosthenes algorithm works just like the title. First, all positive integers are generated in a list up to "n" starting with 2. Since 2 is a prime number, all of the numbers that are a multiple of 2 are removed. Then, we move on to the next number in our list, which is 3. Since 3 is a prime number, all the numbers that are multiples of 3 are removed. This step is repeated until we reach a list of prime numbers from 2 to the largest prime number less than or equal to "n". The function that we created, primes.py, returns all prime numbers smaller than the positive interger number input by user from Terminal. The main funtion first checks if the input number is a positive interger and then call eratosthenes module to get all the prime numbers smaller than the positive interger number input by user from Terminal, and print on the terminal. We also decided to use a list to store our data because they can be extended or reduced easily with the append and remove command. We were also more familiar with the functionality of lists. ## Algorithm In the eratosthenes module, it first generates all positive integers less than n by calling a generator, starting from the number 2. Then remove all multiples of 2. Then remove all multiples of the next largest remaining (prime) number (using % mod operator), see below core codes. Repeat until all remaining numbers go through. Finally, return the set of remaining (prime) numbers. '''Python-primes.py #remove all multiples of the next largest remaining (prime) number (using % mod operator) for _ in range(0, lenList-1): i = 0 prime=True, while listRM[i] < largestNum: if largestNum % listRM[i] == 0: prime=False break else: prime=True i += 1 if prime == False: listRM.remove(largestNum) lenList -= 1 idxList -= 1 largestNum = listRM[idxList-1] ''' ### The mod operator was used in the code: $largestNum \% listRM[i] == 0 $ ## Example1 return all prime less than 100 ``` import primes primes.eratosthenes(100) ``` ## Generating Prime Number In the previous Eratosthenes algorithm where we used the function eratosthenes(), we composed a list and then selected the prime numbers, now in this alternative algorithm gen_eratosthenes(), we use a generator to yield a list that contains only prime numbers. We just need to give a command of how many prime numbers we want in our list. Modifying the code to use the generator method makes the algorithm much more efficient because we do not have to run a loop through the list and remove numbers that are multiples of primes. We decided to use a list to store our data again because it works very well with a generator along with the yield command to store the sequence of prime numbers. '''Python-gen_eratosthenes def prime_check(Num, primeList): for primeNum in primeList: if Num % primeNum == 0: return False return True def gen_eratosthenes(): primeList = [] num = 2 while True: primeList.append(num) yield num num = num + 1 while prime_check(num, primeList) == False: num = num + 1 ''' ## Benchmarking Implementations ### first algorithm (eratosthenes) ``` import primes %timeit primes.eratosthenes(80) ``` ### second algorithm (generator) ``` import primes def try_gen_prime(n): g = primes.gen_eratosthenes() p = next(g) primeList =[p] while p < n: p = next(g) primeList.append(p) return primeList %timeit try_gen_prime(40) The first algorithm uses a generator to create a lost from 2 to n and removes values when they are multiples of the primes that are in the list, while the second algorithm uses only one generator and the yield command to display a list of prime numbers, thus the 2nd algorithm is much faster than the 1st algorithm. ```
github_jupyter
import primes import primes primes.eratosthenes(100) import primes %timeit primes.eratosthenes(80) import primes def try_gen_prime(n): g = primes.gen_eratosthenes() p = next(g) primeList =[p] while p < n: p = next(g) primeList.append(p) return primeList %timeit try_gen_prime(40) The first algorithm uses a generator to create a lost from 2 to n and removes values when they are multiples of the primes that are in the list, while the second algorithm uses only one generator and the yield command to display a list of prime numbers, thus the 2nd algorithm is much faster than the 1st algorithm.
0.307566
0.94256
<a href="https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/08_ee_js_to_ipynb.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a> Uncomment the following line to install [geemap](https://geemap.org) if needed. ``` # !pip install geemap ``` ## Automatic conversion from Earth Engine JavaScripts to Python scripts ### Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` import geemap geemap.show_youtube('RpIaalFk4H8') ``` ### Convert Earth Eninge JavaScripts to Python scripts ``` import os from geemap.conversion import * # Create a temporary working directory work_dir = os.path.join(os.path.expanduser('~'), 'geemap') # Get Earth Engine JavaScript examples. There are five examples in the geemap package folder. # Change js_dir to your own folder containing your Earth Engine JavaScripts, # such as js_dir = '/path/to/your/js/folder' js_dir = get_js_examples(out_dir=work_dir) # Convert all Earth Engine JavaScripts in a folder recursively to Python scripts. js_to_python_dir(in_dir=js_dir, out_dir=js_dir, use_qgis=True) print("Python scripts saved at: {}".format(js_dir)) ``` ### Convert Earth Engine Python scripts to Jupyter Notebooks ``` # Convert all Earth Engine Python scripts in a folder recursively to Jupyter notebooks. nb_template = get_nb_template() # Get the notebook template from the package folder. py_to_ipynb_dir(js_dir, nb_template) # Execute all Jupyter notebooks in a folder recursively and save the output cells. execute_notebook_dir(in_dir=js_dir) ```
github_jupyter
# !pip install geemap import geemap geemap.show_youtube('RpIaalFk4H8') import os from geemap.conversion import * # Create a temporary working directory work_dir = os.path.join(os.path.expanduser('~'), 'geemap') # Get Earth Engine JavaScript examples. There are five examples in the geemap package folder. # Change js_dir to your own folder containing your Earth Engine JavaScripts, # such as js_dir = '/path/to/your/js/folder' js_dir = get_js_examples(out_dir=work_dir) # Convert all Earth Engine JavaScripts in a folder recursively to Python scripts. js_to_python_dir(in_dir=js_dir, out_dir=js_dir, use_qgis=True) print("Python scripts saved at: {}".format(js_dir)) # Convert all Earth Engine Python scripts in a folder recursively to Jupyter notebooks. nb_template = get_nb_template() # Get the notebook template from the package folder. py_to_ipynb_dir(js_dir, nb_template) # Execute all Jupyter notebooks in a folder recursively and save the output cells. execute_notebook_dir(in_dir=js_dir)
0.395484
0.980034
``` """ Concat all data files into one [Training] """ """ This code takes all the text files in a directory and merge them into one """ import pandas as pd import os import glob import numpy as np import multiprocessing import string import random import time import shutil dataDirectory = r"/home/muddi004/muddi/GIANT/downsized-1564-v2/Block-text-files/" outputfilePath = r"/home/muddi004/muddi/citationParser/data/" + 'GIANT-1564-v2-train.txt' datafiles = glob.glob(os.path.join(dataDirectory, "*.txt")) #datafiles = datafiles[0:350] # take 40 files to get sm print(len(datafiles)) with open(outputfilePath,'wb') as wfd: for f in datafiles:#['seg1.txt','seg2.txt','seg3.txt']: with open(f,'rb') as fd: shutil.copyfileobj(fd, wfd) """ Concat all data files into one [Validation] """ """ This code takes all the text files in a directory and merge them into one """ import pandas as pd import os import glob import numpy as np import multiprocessing import string import random import time import shutil dataDirectory = r"/home/muddi004/muddi/GIANT/data-validation/" outputfilePath = r"/home/muddi004/muddi/citationParser/data/" + 'data-validation.txt' datafiles = glob.glob(os.path.join(dataDirectory, "*.txt")) with open(outputfilePath,'wb') as wfd: for f in datafiles:#['seg1.txt','seg2.txt','seg3.txt']: with open(f,'rb') as fd: shutil.copyfileobj(fd, wfd) """ Concat all data files into one [Test] """ """ This code takes all the text files in a directory and merge them into one """ import pandas as pd import os import glob import numpy as np import multiprocessing import string import random import time import shutil dataDirectory = r"/home/muddi004/muddi/GIANT/GIANT-test/Sample-three/BlockTokenizer/" outputfilePath = r"/home/muddi004/muddi/GIANT/GIANT-test/Sample-three/BlockTokenizer/concat/" + 'GIANT-block-test.txt' datafiles = glob.glob(os.path.join(dataDirectory, "*.txt")) with open(outputfilePath,'wb') as wfd: for f in datafiles:#['seg1.txt','seg2.txt','seg3.txt']: with open(f,'rb') as fd: shutil.copyfileobj(fd, wfd) """ Concat all data files into one [Testing code block] """ """ This code takes all the text files in a directory and merge them into one """ import pandas as pd import os import glob import numpy as np import multiprocessing import string import random import time import shutil dataDirectory = r"/home/muddi004/muddi/GIANT/downsized-100/text-files/" outputfilePath = r"/home/muddi004/muddi/GIANT/downsized-100/text-files/cancat/" + 'giant-test.txt' datafiles = glob.glob(os.path.join(dataDirectory, "*.txt")) datafiles = datafiles[:300] with open(outputfilePath,'wb') as wfd: for f in datafiles:#['seg1.txt','seg2.txt','seg3.txt']: with open(f,'rb') as fd: shutil.copyfileobj(fd, wfd) pip install seqeval from seqeval.metrics import accuracy_score from seqeval.metrics import classification_report from seqeval.metrics import f1_score y_true = [['B-PER', 'I-PER', 'B-PUNC', 'I-PER']] y_pred = [['B-PER', 'I-PER', 'B-PUNC', 'I-PER']] #f1_score(y_true, y_pred) print(classification_report(y_true, y_pred)) ```
github_jupyter
""" Concat all data files into one [Training] """ """ This code takes all the text files in a directory and merge them into one """ import pandas as pd import os import glob import numpy as np import multiprocessing import string import random import time import shutil dataDirectory = r"/home/muddi004/muddi/GIANT/downsized-1564-v2/Block-text-files/" outputfilePath = r"/home/muddi004/muddi/citationParser/data/" + 'GIANT-1564-v2-train.txt' datafiles = glob.glob(os.path.join(dataDirectory, "*.txt")) #datafiles = datafiles[0:350] # take 40 files to get sm print(len(datafiles)) with open(outputfilePath,'wb') as wfd: for f in datafiles:#['seg1.txt','seg2.txt','seg3.txt']: with open(f,'rb') as fd: shutil.copyfileobj(fd, wfd) """ Concat all data files into one [Validation] """ """ This code takes all the text files in a directory and merge them into one """ import pandas as pd import os import glob import numpy as np import multiprocessing import string import random import time import shutil dataDirectory = r"/home/muddi004/muddi/GIANT/data-validation/" outputfilePath = r"/home/muddi004/muddi/citationParser/data/" + 'data-validation.txt' datafiles = glob.glob(os.path.join(dataDirectory, "*.txt")) with open(outputfilePath,'wb') as wfd: for f in datafiles:#['seg1.txt','seg2.txt','seg3.txt']: with open(f,'rb') as fd: shutil.copyfileobj(fd, wfd) """ Concat all data files into one [Test] """ """ This code takes all the text files in a directory and merge them into one """ import pandas as pd import os import glob import numpy as np import multiprocessing import string import random import time import shutil dataDirectory = r"/home/muddi004/muddi/GIANT/GIANT-test/Sample-three/BlockTokenizer/" outputfilePath = r"/home/muddi004/muddi/GIANT/GIANT-test/Sample-three/BlockTokenizer/concat/" + 'GIANT-block-test.txt' datafiles = glob.glob(os.path.join(dataDirectory, "*.txt")) with open(outputfilePath,'wb') as wfd: for f in datafiles:#['seg1.txt','seg2.txt','seg3.txt']: with open(f,'rb') as fd: shutil.copyfileobj(fd, wfd) """ Concat all data files into one [Testing code block] """ """ This code takes all the text files in a directory and merge them into one """ import pandas as pd import os import glob import numpy as np import multiprocessing import string import random import time import shutil dataDirectory = r"/home/muddi004/muddi/GIANT/downsized-100/text-files/" outputfilePath = r"/home/muddi004/muddi/GIANT/downsized-100/text-files/cancat/" + 'giant-test.txt' datafiles = glob.glob(os.path.join(dataDirectory, "*.txt")) datafiles = datafiles[:300] with open(outputfilePath,'wb') as wfd: for f in datafiles:#['seg1.txt','seg2.txt','seg3.txt']: with open(f,'rb') as fd: shutil.copyfileobj(fd, wfd) pip install seqeval from seqeval.metrics import accuracy_score from seqeval.metrics import classification_report from seqeval.metrics import f1_score y_true = [['B-PER', 'I-PER', 'B-PUNC', 'I-PER']] y_pred = [['B-PER', 'I-PER', 'B-PUNC', 'I-PER']] #f1_score(y_true, y_pred) print(classification_report(y_true, y_pred))
0.293607
0.260237
``` # Visualization of the KO+ChIP Gold Standard from: # Miraldi et al. (2018) "Leveraging chromatin accessibility for transcriptional regulatory network inference in Th17 Cells" # TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load # Change "canvas" to "SVG" (drop-down menu in cell below) to enable drag interactions with nodes & labels # More info about jp_gene_viz and user interface instructions are available on Github: # https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/dNetwork%20widget%20overview.ipynb # Info specific to the "Multi-network" view: # https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/Combined%20widgets.ipynb # directory containing gene expression data and network folder directory = "." # folder containing networks netPath = 'Networks' # name of gene expression file expressionFile = 'Th0_Th17_48hTh.txt' # sample condition for initial gene node color sampleConditionOfInt = 'Th17(48h)' # The starting conditions for each of the networks is a list of tuples. Tuple entries are: # 0. network file name (column format) (as found in directory) # 1. column of the expression matrix that you want the nodes to be colored by # 2. network title, to which we'll add the gene and peak cutoffs # 3. cut off for edge strength, note TRN edges strengths are quantile for 15 TFs/gene, to see top 10 TFs/gene, # increase cutoff to .33, etc. networkInits = [ ('ChIP_A17_KOall_ATh_bias50_maxComb_sp.tsv',sampleConditionOfInt,' Final ChIP/ATAC(Th17)+KO+ATAC(Th) TRN',.93), ('ATAC_Th17_bias50_maxComb_sp.tsv',sampleConditionOfInt,'Final ATAC-only TRN', .93), ("KO75_KOrk_1norm_sp.tsv",sampleConditionOfInt,'KO G.S. (25 TFs)',0), ("KC1p5_sp.tsv",sampleConditionOfInt,'KO-ChIP G.S. (9 TFs)',0)] tfFocus = 1 # If 1, automatically applies the "TF only" function, so we can focus on TFs # If 0, all genes shown # Uncomment to run without install (in binder, for example) import sys if ".." not in sys.path: sys.path.append("..") from jp_gene_viz import dNetwork dNetwork.load_javascript_support() from jp_gene_viz import multiple_network from jp_gene_viz import LExpression LExpression.load_javascript_support() networkList = list() # this list will contain heatmap-linked network objects for networkInit in networkInits: networkFile = networkInit[0] curr = LExpression.LinkedExpressionNetwork() print directory + '/' + networkFile curr.load_network(directory + '/' + netPath + '/' + networkFile) networkList.append(curr) # visualize the networks -- HARD CODED for 4 networks: M = multiple_network.MultipleNetworks( [[networkList[0], networkList[1]], [networkList[2], networkList[3]]]) M.svg_width = 500 M.show() # Set network preferences count = 0 for curr in networkList: networkInit = networkInits[count] # get title information + curr column for shading of figures currCol = networkInit[1] titleInf = networkInit[2] threshhold = networkInit[3] # set threshold curr.network.threshhold_slider.value = threshhold curr.network.apply_click(None) curr.network.restore_click(None) if tfFocus: # focus on TF core curr.network.tf_only_click(None) curr.network.layout_click(None) # layout network curr.network.connected_only_click() curr.network.layout_dropdown.value = 'fruchterman_reingold' curr.network.layout_click() # set title curr.network.title_html.value = titleInf # add labels curr.network.labels_button.value=True curr.network.draw_click(None) # Load heatmap curr.load_heatmap(directory + '/' + expressionFile) # color nodes according to a sample column in the gene expression matrix curr.gene_click(None) curr.expression.transform_dropdown.value = 'Z score' curr.expression.apply_transform() curr.expression.col = currCol curr.condition_click(None) count += 1 ```
github_jupyter
# Visualization of the KO+ChIP Gold Standard from: # Miraldi et al. (2018) "Leveraging chromatin accessibility for transcriptional regulatory network inference in Th17 Cells" # TO START: In the menu above, choose "Cell" --> "Run All", and network + heatmap will load # Change "canvas" to "SVG" (drop-down menu in cell below) to enable drag interactions with nodes & labels # More info about jp_gene_viz and user interface instructions are available on Github: # https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/dNetwork%20widget%20overview.ipynb # Info specific to the "Multi-network" view: # https://github.com/simonsfoundation/jp_gene_viz/blob/master/doc/Combined%20widgets.ipynb # directory containing gene expression data and network folder directory = "." # folder containing networks netPath = 'Networks' # name of gene expression file expressionFile = 'Th0_Th17_48hTh.txt' # sample condition for initial gene node color sampleConditionOfInt = 'Th17(48h)' # The starting conditions for each of the networks is a list of tuples. Tuple entries are: # 0. network file name (column format) (as found in directory) # 1. column of the expression matrix that you want the nodes to be colored by # 2. network title, to which we'll add the gene and peak cutoffs # 3. cut off for edge strength, note TRN edges strengths are quantile for 15 TFs/gene, to see top 10 TFs/gene, # increase cutoff to .33, etc. networkInits = [ ('ChIP_A17_KOall_ATh_bias50_maxComb_sp.tsv',sampleConditionOfInt,' Final ChIP/ATAC(Th17)+KO+ATAC(Th) TRN',.93), ('ATAC_Th17_bias50_maxComb_sp.tsv',sampleConditionOfInt,'Final ATAC-only TRN', .93), ("KO75_KOrk_1norm_sp.tsv",sampleConditionOfInt,'KO G.S. (25 TFs)',0), ("KC1p5_sp.tsv",sampleConditionOfInt,'KO-ChIP G.S. (9 TFs)',0)] tfFocus = 1 # If 1, automatically applies the "TF only" function, so we can focus on TFs # If 0, all genes shown # Uncomment to run without install (in binder, for example) import sys if ".." not in sys.path: sys.path.append("..") from jp_gene_viz import dNetwork dNetwork.load_javascript_support() from jp_gene_viz import multiple_network from jp_gene_viz import LExpression LExpression.load_javascript_support() networkList = list() # this list will contain heatmap-linked network objects for networkInit in networkInits: networkFile = networkInit[0] curr = LExpression.LinkedExpressionNetwork() print directory + '/' + networkFile curr.load_network(directory + '/' + netPath + '/' + networkFile) networkList.append(curr) # visualize the networks -- HARD CODED for 4 networks: M = multiple_network.MultipleNetworks( [[networkList[0], networkList[1]], [networkList[2], networkList[3]]]) M.svg_width = 500 M.show() # Set network preferences count = 0 for curr in networkList: networkInit = networkInits[count] # get title information + curr column for shading of figures currCol = networkInit[1] titleInf = networkInit[2] threshhold = networkInit[3] # set threshold curr.network.threshhold_slider.value = threshhold curr.network.apply_click(None) curr.network.restore_click(None) if tfFocus: # focus on TF core curr.network.tf_only_click(None) curr.network.layout_click(None) # layout network curr.network.connected_only_click() curr.network.layout_dropdown.value = 'fruchterman_reingold' curr.network.layout_click() # set title curr.network.title_html.value = titleInf # add labels curr.network.labels_button.value=True curr.network.draw_click(None) # Load heatmap curr.load_heatmap(directory + '/' + expressionFile) # color nodes according to a sample column in the gene expression matrix curr.gene_click(None) curr.expression.transform_dropdown.value = 'Z score' curr.expression.apply_transform() curr.expression.col = currCol curr.condition_click(None) count += 1
0.609757
0.747455
# Bagging This notebook introduces a very natural strategy to build ensembles of machine learning models named "bagging". "Bagging" stands for Bootstrap AGGregatING. It uses bootstrap resampling (random sampling with replacement) to learn several models on random variations of the training set. At predict time, the predictions of each learner are aggregated to give the final predictions. First, we will generate a simple synthetic dataset to get insights regarding bootstraping. ``` import pandas as pd import numpy as np # create a random number generator that will be used to set the randomness rng = np.random.RandomState(1) def generate_data(n_samples=30): """Generate synthetic dataset. Returns `data_train`, `data_test`, `target_train`.""" x_min, x_max = -3, 3 x = rng.uniform(x_min, x_max, size=n_samples) noise = 4.0 * rng.randn(n_samples) y = x ** 3 - 0.5 * (x + 1) ** 2 + noise y /= y.std() data_train = pd.DataFrame(x, columns=["Feature"]) data_test = pd.DataFrame( np.linspace(x_max, x_min, num=300), columns=["Feature"]) target_train = pd.Series(y, name="Target") return data_train, data_test, target_train import matplotlib.pyplot as plt import seaborn as sns data_train, data_test, target_train = generate_data(n_samples=30) sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) _ = plt.title("Synthetic regression dataset") ``` The relationship between our feature and the target to predict is non-linear. However, a decision tree is capable of approximating such a non-linear dependency: ``` from sklearn.tree import DecisionTreeRegressor tree = DecisionTreeRegressor(max_depth=3, random_state=0) tree.fit(data_train, target_train) y_pred = tree.predict(data_test) ``` Remember that the term "test" here refers to data that was not used for training and computing an evaluation metric on such a synthetic test set would be meaningless. ``` sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) plt.plot(data_test["Feature"], y_pred, label="Fitted tree") plt.legend() _ = plt.title("Predictions by a single decision tree") ``` Let's see how we can use bootstraping to learn several trees. ## Bootstrap resampling A bootstrap sample corresponds to a resampling with replacement, of the original dataset, a sample that is the same size as the original dataset. Thus, the bootstrap sample will contain some data points several times while some of the original data points will not be present. We will create a function that given `data` and `target` will return a resampled variation `data_bootstrap` and `target_bootstrap`. ``` def bootstrap_sample(data, target): # Indices corresponding to a sampling with replacement of the same sample # size than the original data bootstrap_indices = rng.choice( np.arange(target.shape[0]), size=target.shape[0], replace=True, ) # In pandas, we need to use `.iloc` to extract rows using an integer # position index: data_bootstrap = data.iloc[bootstrap_indices] target_bootstrap = target.iloc[bootstrap_indices] return data_bootstrap, target_bootstrap ``` We will generate 3 bootstrap samples and qualitatively check the difference with the original dataset. ``` n_bootstraps = 3 for bootstrap_idx in range(n_bootstraps): # draw a bootstrap from the original data data_bootstrap, target_booststrap = bootstrap_sample( data_train, target_train, ) plt.figure() plt.scatter(data_bootstrap["Feature"], target_booststrap, color="tab:blue", facecolors="none", alpha=0.5, label="Resampled data", s=180, linewidth=5) plt.scatter(data_train["Feature"], target_train, color="black", s=60, alpha=1, label="Original data") plt.title(f"Resampled data #{bootstrap_idx}") plt.legend() ``` Observe that the 3 variations all share common points with the original dataset. Some of the points are randomly resampled several times and appear as darker blue circles. The 3 generated bootstrap samples are all different from the original dataset and from each other. To confirm this intuition, we can check the number of unique samples in the bootstrap samples. ``` data_train_huge, data_test_huge, target_train_huge = generate_data( n_samples=100_000) data_bootstrap_sample, target_bootstrap_sample = bootstrap_sample( data_train_huge, target_train_huge) ratio_unique_sample = (np.unique(data_bootstrap_sample).size / data_bootstrap_sample.size) print( f"Percentage of samples present in the original dataset: " f"{ratio_unique_sample * 100:.1f}%" ) ``` On average, ~63.2% of the original data points of the original dataset will be present in a given bootstrap sample. The other ~36.8% are repeated samples. We are able to generate many datasets, all slightly different. Now, we can fit a decision tree for each of these datasets and they all shall be slightly different as well. ``` bag_of_trees = [] for bootstrap_idx in range(n_bootstraps): tree = DecisionTreeRegressor(max_depth=3, random_state=0) data_bootstrap_sample, target_bootstrap_sample = bootstrap_sample( data_train, target_train) tree.fit(data_bootstrap_sample, target_bootstrap_sample) bag_of_trees.append(tree) ``` Now that we created a bag of different trees, we can use each of the trees to predict the samples within the range of data. They shall give slightly different predictions. ``` sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) for tree_idx, tree in enumerate(bag_of_trees): tree_predictions = tree.predict(data_test) plt.plot(data_test["Feature"], tree_predictions, linestyle="--", alpha=0.8, label=f"Tree #{tree_idx} predictions") plt.legend() _ = plt.title("Predictions of trees trained on different bootstraps") ``` ## Aggregating Once our trees are fitted and we are able to get predictions for each of them. In regression, the most straightforward way to combine those predictions is just to average them: for a given test data point, we feed the input feature values to each of the `n` trained models in the ensemble and as a result compute `n` predicted values for the target variable. The final prediction of the ensemble for the test data point is the average of those `n` values. We can plot the averaged predictions from the previous example. ``` sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) bag_predictions = [] for tree_idx, tree in enumerate(bag_of_trees): tree_predictions = tree.predict(data_test) plt.plot(data_test["Feature"], tree_predictions, linestyle="--", alpha=0.8, label=f"Tree #{tree_idx} predictions") bag_predictions.append(tree_predictions) bag_predictions = np.mean(bag_predictions, axis=0) plt.plot(data_test["Feature"], bag_predictions, label="Averaged predictions", linestyle="-") plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left") _ = plt.title("Predictions of bagged trees") ``` The unbroken red line shows the averaged predictions, which would be the final predictions given by our 'bag' of decision tree regressors. Note that the predictions of the ensemble is more stable because of the averaging operation. As a result, the bag of trees as a whole is less likely to overfit than the individual trees. ## Bagging in scikit-learn Scikit-learn implements the bagging procedure as a "meta-estimator", that is an estimator that wraps another estimator: it takes a base model that is cloned several times and trained independently on each bootstrap sample. The following code snippet shows how to build a bagging ensemble of decision trees. We set `n_estimators=100` instead of 3 in our manual implementation above to get a stronger smoothing effect. ``` from sklearn.ensemble import BaggingRegressor bagged_trees = BaggingRegressor( base_estimator=DecisionTreeRegressor(max_depth=3), n_estimators=100, ) _ = bagged_trees.fit(data_train, target_train) ``` Let us visualize the predictions of the ensemble on the same interval of data: ``` sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) bagged_trees_predictions = bagged_trees.predict(data_test) plt.plot(data_test["Feature"], bagged_trees_predictions) _ = plt.title("Predictions from a bagging classifier") ``` Because we use 100 trees in the ensemble, the average prediction is indeed slightly smoother but very similar to our previous average plot. It is possible to access the internal models of the ensemble stored as a Python list in the `bagged_trees.estimators_` attribute after fitting. Let us compare the based model predictions with their average: ``` for tree_idx, tree in enumerate(bagged_trees.estimators_): label = "Predictions of individual trees" if tree_idx == 0 else None # we convert `data_test` into a NumPy array to avoid a warning raised in scikit-learn tree_predictions = tree.predict(data_test.to_numpy()) plt.plot(data_test["Feature"], tree_predictions, linestyle="--", alpha=0.1, color="tab:blue", label=label) sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) bagged_trees_predictions = bagged_trees.predict(data_test) plt.plot(data_test["Feature"], bagged_trees_predictions, color="tab:orange", label="Predictions of ensemble") _ = plt.legend() ``` We used a low value of the opacity parameter `alpha` to better appreciate the overlap in the prediction functions of the individual trees. This visualization gives some insights on the uncertainty in the predictions in different areas of the feature space. ## Bagging complex pipelines While we used a decision tree as a base model, nothing prevents us of using any other type of model. As we know that the original data generating function is a noisy polynomial transformation of the input variable, let us try to fit a bagged polynomial regression pipeline on this dataset: ``` from sklearn.linear_model import Ridge from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import MinMaxScaler from sklearn.pipeline import make_pipeline polynomial_regressor = make_pipeline( MinMaxScaler(), PolynomialFeatures(degree=4), Ridge(alpha=1e-10), ) ``` This pipeline first scales the data to the 0-1 range with `MinMaxScaler`. Then it extracts degree-4 polynomial features. The resulting features will all stay in the 0-1 range by construction: if `x` lies in the 0-1 range then `x ** n` also lies in the 0-1 range for any value of `n`. Then the pipeline feeds the resulting non-linear features to a regularized linear regression model for the final prediction of the target variable. Note that we intentionally use a small value for the regularization parameter `alpha` as we expect the bagging ensemble to work well with slightly overfit base models. The ensemble itself is simply built by passing the resulting pipeline as the `base_estimator` parameter of the `BaggingRegressor` class: ``` bagging = BaggingRegressor( base_estimator=polynomial_regressor, n_estimators=100, random_state=0, ) _ = bagging.fit(data_train, target_train) for i, regressor in enumerate(bagging.estimators_): # we convert `data_test` into a NumPy array to avoid a warning raised in scikit-learn regressor_predictions = regressor.predict(data_test.to_numpy()) base_model_line = plt.plot( data_test["Feature"], regressor_predictions, linestyle="--", alpha=0.2, label="Predictions of base models" if i == 0 else None, color="tab:blue" ) sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) bagging_predictions = bagging.predict(data_test) plt.plot(data_test["Feature"], bagging_predictions, color="tab:orange", label="Predictions of ensemble") plt.ylim(target_train.min(), target_train.max()) plt.legend() _ = plt.title("Bagged polynomial regression") ``` The predictions of this bagged polynomial regression model looks qualitatively better than the bagged trees. This is somewhat expected since the base model better reflects our knowldege of the true data generating process. Again the different shades induced by the overlapping blue lines let us appreciate the uncertainty in the prediction of the bagged ensemble. To conclude this notebook, we note that the bootstrapping procedure is a generic tool of statistics and is not limited to build ensemble of machine learning models. The interested reader can learn more on the [Wikipedia article on bootstrapping](https://en.wikipedia.org/wiki/Bootstrapping_(statistics)).
github_jupyter
import pandas as pd import numpy as np # create a random number generator that will be used to set the randomness rng = np.random.RandomState(1) def generate_data(n_samples=30): """Generate synthetic dataset. Returns `data_train`, `data_test`, `target_train`.""" x_min, x_max = -3, 3 x = rng.uniform(x_min, x_max, size=n_samples) noise = 4.0 * rng.randn(n_samples) y = x ** 3 - 0.5 * (x + 1) ** 2 + noise y /= y.std() data_train = pd.DataFrame(x, columns=["Feature"]) data_test = pd.DataFrame( np.linspace(x_max, x_min, num=300), columns=["Feature"]) target_train = pd.Series(y, name="Target") return data_train, data_test, target_train import matplotlib.pyplot as plt import seaborn as sns data_train, data_test, target_train = generate_data(n_samples=30) sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) _ = plt.title("Synthetic regression dataset") from sklearn.tree import DecisionTreeRegressor tree = DecisionTreeRegressor(max_depth=3, random_state=0) tree.fit(data_train, target_train) y_pred = tree.predict(data_test) sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) plt.plot(data_test["Feature"], y_pred, label="Fitted tree") plt.legend() _ = plt.title("Predictions by a single decision tree") def bootstrap_sample(data, target): # Indices corresponding to a sampling with replacement of the same sample # size than the original data bootstrap_indices = rng.choice( np.arange(target.shape[0]), size=target.shape[0], replace=True, ) # In pandas, we need to use `.iloc` to extract rows using an integer # position index: data_bootstrap = data.iloc[bootstrap_indices] target_bootstrap = target.iloc[bootstrap_indices] return data_bootstrap, target_bootstrap n_bootstraps = 3 for bootstrap_idx in range(n_bootstraps): # draw a bootstrap from the original data data_bootstrap, target_booststrap = bootstrap_sample( data_train, target_train, ) plt.figure() plt.scatter(data_bootstrap["Feature"], target_booststrap, color="tab:blue", facecolors="none", alpha=0.5, label="Resampled data", s=180, linewidth=5) plt.scatter(data_train["Feature"], target_train, color="black", s=60, alpha=1, label="Original data") plt.title(f"Resampled data #{bootstrap_idx}") plt.legend() data_train_huge, data_test_huge, target_train_huge = generate_data( n_samples=100_000) data_bootstrap_sample, target_bootstrap_sample = bootstrap_sample( data_train_huge, target_train_huge) ratio_unique_sample = (np.unique(data_bootstrap_sample).size / data_bootstrap_sample.size) print( f"Percentage of samples present in the original dataset: " f"{ratio_unique_sample * 100:.1f}%" ) bag_of_trees = [] for bootstrap_idx in range(n_bootstraps): tree = DecisionTreeRegressor(max_depth=3, random_state=0) data_bootstrap_sample, target_bootstrap_sample = bootstrap_sample( data_train, target_train) tree.fit(data_bootstrap_sample, target_bootstrap_sample) bag_of_trees.append(tree) sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) for tree_idx, tree in enumerate(bag_of_trees): tree_predictions = tree.predict(data_test) plt.plot(data_test["Feature"], tree_predictions, linestyle="--", alpha=0.8, label=f"Tree #{tree_idx} predictions") plt.legend() _ = plt.title("Predictions of trees trained on different bootstraps") sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) bag_predictions = [] for tree_idx, tree in enumerate(bag_of_trees): tree_predictions = tree.predict(data_test) plt.plot(data_test["Feature"], tree_predictions, linestyle="--", alpha=0.8, label=f"Tree #{tree_idx} predictions") bag_predictions.append(tree_predictions) bag_predictions = np.mean(bag_predictions, axis=0) plt.plot(data_test["Feature"], bag_predictions, label="Averaged predictions", linestyle="-") plt.legend(bbox_to_anchor=(1.05, 0.8), loc="upper left") _ = plt.title("Predictions of bagged trees") from sklearn.ensemble import BaggingRegressor bagged_trees = BaggingRegressor( base_estimator=DecisionTreeRegressor(max_depth=3), n_estimators=100, ) _ = bagged_trees.fit(data_train, target_train) sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) bagged_trees_predictions = bagged_trees.predict(data_test) plt.plot(data_test["Feature"], bagged_trees_predictions) _ = plt.title("Predictions from a bagging classifier") for tree_idx, tree in enumerate(bagged_trees.estimators_): label = "Predictions of individual trees" if tree_idx == 0 else None # we convert `data_test` into a NumPy array to avoid a warning raised in scikit-learn tree_predictions = tree.predict(data_test.to_numpy()) plt.plot(data_test["Feature"], tree_predictions, linestyle="--", alpha=0.1, color="tab:blue", label=label) sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) bagged_trees_predictions = bagged_trees.predict(data_test) plt.plot(data_test["Feature"], bagged_trees_predictions, color="tab:orange", label="Predictions of ensemble") _ = plt.legend() from sklearn.linear_model import Ridge from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import MinMaxScaler from sklearn.pipeline import make_pipeline polynomial_regressor = make_pipeline( MinMaxScaler(), PolynomialFeatures(degree=4), Ridge(alpha=1e-10), ) bagging = BaggingRegressor( base_estimator=polynomial_regressor, n_estimators=100, random_state=0, ) _ = bagging.fit(data_train, target_train) for i, regressor in enumerate(bagging.estimators_): # we convert `data_test` into a NumPy array to avoid a warning raised in scikit-learn regressor_predictions = regressor.predict(data_test.to_numpy()) base_model_line = plt.plot( data_test["Feature"], regressor_predictions, linestyle="--", alpha=0.2, label="Predictions of base models" if i == 0 else None, color="tab:blue" ) sns.scatterplot(x=data_train["Feature"], y=target_train, color="black", alpha=0.5) bagging_predictions = bagging.predict(data_test) plt.plot(data_test["Feature"], bagging_predictions, color="tab:orange", label="Predictions of ensemble") plt.ylim(target_train.min(), target_train.max()) plt.legend() _ = plt.title("Bagged polynomial regression")
0.856317
0.963057
### Dependencies for the interactive plots apart from rdkit, oechem and other qc* packages !conda install -c conda-forge plotly -y !conda install -c plotly jupyter-dash -y !conda install -c plotly plotly-orca -y ``` #imports import numpy as np from scipy import stats import fragmenter from openeye import oechem TD_datasets = [ 'Fragment Stability Benchmark', # 'Fragmenter paper', # 'OpenFF DANCE 1 eMolecules t142 v1.0', 'OpenFF Fragmenter Validation 1.0', 'OpenFF Full TorsionDrive Benchmark 1', 'OpenFF Gen 2 Torsion Set 1 Roche 2', 'OpenFF Gen 2 Torsion Set 2 Coverage 2', 'OpenFF Gen 2 Torsion Set 3 Pfizer Discrepancy 2', 'OpenFF Gen 2 Torsion Set 4 eMolecules Discrepancy 2', 'OpenFF Gen 2 Torsion Set 5 Bayer 2', 'OpenFF Gen 2 Torsion Set 6 Supplemental 2', 'OpenFF Group1 Torsions 2', 'OpenFF Group1 Torsions 3', 'OpenFF Primary Benchmark 1 Torsion Set', 'OpenFF Primary Benchmark 2 Torsion Set', 'OpenFF Primary TorsionDrive Benchmark 1', 'OpenFF Rowley Biaryl v1.0', 'OpenFF Substituted Phenyl Set 1', 'OpenFF-benchmark-ligand-fragments-v1.0', 'Pfizer Discrepancy Torsion Dataset 1', 'SMIRNOFF Coverage Torsion Set 1', # 'SiliconTX Torsion Benchmark Set 1', 'TorsionDrive Paper' ] def oeb2oemol(oebfile): """ Takes in oebfile and generates oemolList Parameters ---------- oebfile : String Title of an oeb file Returns ------- mollist : List of objects List of OEMols in the .oeb file """ ifs = oechem.oemolistream(oebfile) mollist = [] for mol in ifs.GetOEGraphMols(): mollist.append(oechem.OEGraphMol(mol)) return mollist def compute_r_ci(wbos, max_energies): return (stats.linregress(wbos, max_energies)[2])**2 def plot_interactive(fileList, t_id): """ Takes in a list of oeb files and plots wbo vs torsion barrier, combining all the datasets and plotting by each tid in the combined dataset Note: ***Plot is interactive (or returns chemical structures) only for the last usage Parameters ---------- fileList: list of strings each string is a oeb file name Eg. ['rowley.oeb'] or ['rowley.oeb', 'phenyl.oeb'] t_id: str torsion id, eg., 't43' """ import plotly.express as px from jupyter_dash import JupyterDash import dash_core_components as dcc import dash_html_components as html import pandas as pd import plotly.graph_objects as go from dash.dependencies import Input, Output from rdkit import Chem from rdkit.Chem.Draw import MolsToGridImage import base64 from io import BytesIO from plotly.validators.scatter.marker import SymbolValidator import ntpath df = pd.DataFrame(columns = ['tid', 'tb', 'wbo', 'cmiles', 'TDindices', 'filename']) fig = go.Figure({'layout' : go.Layout(height=900, width=1000, xaxis={'title': 'Wiberg Bond Order'}, yaxis={'title': 'Torsion barrier (kJ/mol)'}, #paper_bgcolor='white', plot_bgcolor='rgba(0,0,0,0)', margin={'l': 40, 'b': 200, 't': 40, 'r': 10}, legend={'orientation': 'h', 'y': -0.2}, legend_font=dict(family='Arial', color='black', size=15), hovermode=False, dragmode='select')}) fig.update_xaxes(title_font=dict(size=26, family='Arial', color='black'), ticks="outside", tickwidth=2, tickcolor='black', ticklen=10, tickfont=dict(family='Arial', color='black', size=20), showgrid=False, gridwidth=1, gridcolor='black', mirror=True, linewidth=2, linecolor='black', showline=True) fig.update_yaxes(title_font=dict(size=26, family='Arial', color='black'), ticks="outside", tickwidth=2, tickcolor='black', ticklen=10, tickfont=dict(family='Arial', color='black', size=20), showgrid=False, gridwidth=1, gridcolor='black', mirror=True, linewidth=2, linecolor='black', showline=True) colors = fragmenter.chemi._KELLYS_COLORS colors = colors * 2 raw_symbols = SymbolValidator().values symbols = [] for i in range(0,len(raw_symbols),8): symbols.append(raw_symbols[i]) count = 0 fname = [] for fileName in fileList: molList = [] fname = fileName molList = oeb2oemol(fname) for m in molList: tid = m.GetData("IDMatch") fname = ntpath.basename(fileName) df = df.append({'tid': tid, 'tb': m.GetData("TB"), 'wbo' : m.GetData("WBO"), 'cmiles' : m.GetData("cmiles"), 'TDindices' : m.GetData("TDindices"), 'filename' : fname}, ignore_index = True) x = df[(df.filename == fname) & (df.tid == t_id)].wbo y = df.loc[x.index].tb fig.add_scatter(x=x, y=y, mode="markers", name=fname, marker_color=colors[count], marker_symbol=count, marker_size=13) count += 1 x = df[df.tid == t_id].wbo y = df.loc[x.index].tb slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("tid: ", t_id, "r_value: ", r_value, "slope: ", slope, "intercept: ", intercept) fig.add_traces(go.Scatter( x=np.unique(x), y=np.poly1d([slope, intercept])(np.unique(x)), showlegend=False, mode ='lines')) slope_text = 'slope: '+str('%.2f' % slope) r_value = 'r_val: '+str('%.2f' % r_value) fig_text = slope_text + ', '+ r_value fig.add_annotation(text=fig_text, font = {'family': "Arial", 'size': 22, 'color': 'black'}, xref="paper", yref="paper", x=1, y=1, showarrow=False) graph_component = dcc.Graph(id="graph_id", figure=fig) image_component = html.Img(id="structure-image") external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = JupyterDash(__name__) app.layout = html.Div([ html.Div([graph_component]), html.Div([image_component])]) @app.callback( Output('structure-image', 'src'), [Input('graph_id', 'selectedData')]) def display_selected_data(selectedData): max_structs = 40 structs_per_row = 1 empty_plot = "data:image/gif;base64,R0lGODlhAQABAAAAACwAAAAAAQABAAA=" if selectedData: if len(selectedData['points']) == 0: return empty_plot print("# of points selected = ", len(selectedData['points'])) xval = [x['x'] for x in selectedData['points']] yval = [x['y'] for x in selectedData['points']] match_df = df[df['tb'].isin(yval) & df['tid'].isin([t_id])] smiles_list = list(match_df.cmiles) name_list = list(match_df.tid) name_list = [] hl_atoms = [] for i in range(len(smiles_list)): print(smiles_list[i]) indices_tup = match_df.iloc[i].TDindices indices_list = [x + 1 for x in list(indices_tup)] hl_atoms.append(indices_list) tid = match_df.iloc[i].tid tor_bar = match_df.iloc[i].tb wbo_tor = match_df.iloc[i].wbo cmiles_str = match_df.iloc[i].cmiles tmp = [str(tid), ':', 'TDindices [', str(indices_tup[0]+1), str(indices_tup[1]+1), str(indices_tup[2]+1), str(indices_tup[3]+1), ']', 'wbo:', str('%.2f'%(wbo_tor)), 'TB:', str('%.2f'%(tor_bar)), '(kJ/mol)'] name_list.append(' '.join(tmp)) mol_list = [Chem.MolFromSmiles(x) for x in smiles_list] print(len(mol_list)) img = MolsToGridImage(mol_list[0:max_structs], subImgSize=(500, 500), molsPerRow=structs_per_row, legends=name_list) # , # highlightAtomLists=hl_atoms) buffered = BytesIO() img.save(buffered, format="PNG", legendFontSize=60) encoded_image = base64.b64encode(buffered.getvalue()) src_str = 'data:image/png;base64,{}'.format(encoded_image.decode()) else: return empty_plot return src_str if __name__ == '__main__': app.run_server(mode='inline', port=8061, debug=True) return fig ``` `rowley_t43 = plot_interactive(['./FF_1.2.1/OpenFF Rowley Biaryl v1.0.oeb'], t_id='t43')` ``` folder_name = './FF_1.3.0-tig-8/' TD_datasets = [ 'Fragment Stability Benchmark', # 'Fragmenter paper', # 'OpenFF DANCE 1 eMolecules t142 v1.0', 'OpenFF Fragmenter Validation 1.0', 'OpenFF Full TorsionDrive Benchmark 1', 'OpenFF Gen 2 Torsion Set 1 Roche 2', 'OpenFF Gen 2 Torsion Set 2 Coverage 2', 'OpenFF Gen 2 Torsion Set 3 Pfizer Discrepancy 2', 'OpenFF Gen 2 Torsion Set 4 eMolecules Discrepancy 2', 'OpenFF Gen 2 Torsion Set 5 Bayer 2', 'OpenFF Gen 2 Torsion Set 6 Supplemental 2', 'OpenFF Group1 Torsions 2', 'OpenFF Group1 Torsions 3', 'OpenFF Primary Benchmark 1 Torsion Set', 'OpenFF Primary Benchmark 2 Torsion Set', 'OpenFF Primary TorsionDrive Benchmark 1', 'OpenFF Rowley Biaryl v1.0', 'OpenFF Substituted Phenyl Set 1', 'OpenFF-benchmark-ligand-fragments-v1.0', 'Pfizer Discrepancy Torsion Dataset 1', 'SMIRNOFF Coverage Torsion Set 1', # 'SiliconTX Torsion Benchmark Set 1', 'TorsionDrive Paper' ] TD_working_oeb = [folder_name+x+'.oeb' for x in TD_datasets] # all_t43 = plot_interactive(TD_working_oeb, t_id='t43') tig_ids = ['TIG2'] for iid in tig_ids: tmp = plot_interactive(TD_working_oeb, t_id=iid) # tmp.write_image(folder_name+"fig_"+str(iid)+".pdf") ```
github_jupyter
#imports import numpy as np from scipy import stats import fragmenter from openeye import oechem TD_datasets = [ 'Fragment Stability Benchmark', # 'Fragmenter paper', # 'OpenFF DANCE 1 eMolecules t142 v1.0', 'OpenFF Fragmenter Validation 1.0', 'OpenFF Full TorsionDrive Benchmark 1', 'OpenFF Gen 2 Torsion Set 1 Roche 2', 'OpenFF Gen 2 Torsion Set 2 Coverage 2', 'OpenFF Gen 2 Torsion Set 3 Pfizer Discrepancy 2', 'OpenFF Gen 2 Torsion Set 4 eMolecules Discrepancy 2', 'OpenFF Gen 2 Torsion Set 5 Bayer 2', 'OpenFF Gen 2 Torsion Set 6 Supplemental 2', 'OpenFF Group1 Torsions 2', 'OpenFF Group1 Torsions 3', 'OpenFF Primary Benchmark 1 Torsion Set', 'OpenFF Primary Benchmark 2 Torsion Set', 'OpenFF Primary TorsionDrive Benchmark 1', 'OpenFF Rowley Biaryl v1.0', 'OpenFF Substituted Phenyl Set 1', 'OpenFF-benchmark-ligand-fragments-v1.0', 'Pfizer Discrepancy Torsion Dataset 1', 'SMIRNOFF Coverage Torsion Set 1', # 'SiliconTX Torsion Benchmark Set 1', 'TorsionDrive Paper' ] def oeb2oemol(oebfile): """ Takes in oebfile and generates oemolList Parameters ---------- oebfile : String Title of an oeb file Returns ------- mollist : List of objects List of OEMols in the .oeb file """ ifs = oechem.oemolistream(oebfile) mollist = [] for mol in ifs.GetOEGraphMols(): mollist.append(oechem.OEGraphMol(mol)) return mollist def compute_r_ci(wbos, max_energies): return (stats.linregress(wbos, max_energies)[2])**2 def plot_interactive(fileList, t_id): """ Takes in a list of oeb files and plots wbo vs torsion barrier, combining all the datasets and plotting by each tid in the combined dataset Note: ***Plot is interactive (or returns chemical structures) only for the last usage Parameters ---------- fileList: list of strings each string is a oeb file name Eg. ['rowley.oeb'] or ['rowley.oeb', 'phenyl.oeb'] t_id: str torsion id, eg., 't43' """ import plotly.express as px from jupyter_dash import JupyterDash import dash_core_components as dcc import dash_html_components as html import pandas as pd import plotly.graph_objects as go from dash.dependencies import Input, Output from rdkit import Chem from rdkit.Chem.Draw import MolsToGridImage import base64 from io import BytesIO from plotly.validators.scatter.marker import SymbolValidator import ntpath df = pd.DataFrame(columns = ['tid', 'tb', 'wbo', 'cmiles', 'TDindices', 'filename']) fig = go.Figure({'layout' : go.Layout(height=900, width=1000, xaxis={'title': 'Wiberg Bond Order'}, yaxis={'title': 'Torsion barrier (kJ/mol)'}, #paper_bgcolor='white', plot_bgcolor='rgba(0,0,0,0)', margin={'l': 40, 'b': 200, 't': 40, 'r': 10}, legend={'orientation': 'h', 'y': -0.2}, legend_font=dict(family='Arial', color='black', size=15), hovermode=False, dragmode='select')}) fig.update_xaxes(title_font=dict(size=26, family='Arial', color='black'), ticks="outside", tickwidth=2, tickcolor='black', ticklen=10, tickfont=dict(family='Arial', color='black', size=20), showgrid=False, gridwidth=1, gridcolor='black', mirror=True, linewidth=2, linecolor='black', showline=True) fig.update_yaxes(title_font=dict(size=26, family='Arial', color='black'), ticks="outside", tickwidth=2, tickcolor='black', ticklen=10, tickfont=dict(family='Arial', color='black', size=20), showgrid=False, gridwidth=1, gridcolor='black', mirror=True, linewidth=2, linecolor='black', showline=True) colors = fragmenter.chemi._KELLYS_COLORS colors = colors * 2 raw_symbols = SymbolValidator().values symbols = [] for i in range(0,len(raw_symbols),8): symbols.append(raw_symbols[i]) count = 0 fname = [] for fileName in fileList: molList = [] fname = fileName molList = oeb2oemol(fname) for m in molList: tid = m.GetData("IDMatch") fname = ntpath.basename(fileName) df = df.append({'tid': tid, 'tb': m.GetData("TB"), 'wbo' : m.GetData("WBO"), 'cmiles' : m.GetData("cmiles"), 'TDindices' : m.GetData("TDindices"), 'filename' : fname}, ignore_index = True) x = df[(df.filename == fname) & (df.tid == t_id)].wbo y = df.loc[x.index].tb fig.add_scatter(x=x, y=y, mode="markers", name=fname, marker_color=colors[count], marker_symbol=count, marker_size=13) count += 1 x = df[df.tid == t_id].wbo y = df.loc[x.index].tb slope, intercept, r_value, p_value, std_err = stats.linregress(x, y) print("tid: ", t_id, "r_value: ", r_value, "slope: ", slope, "intercept: ", intercept) fig.add_traces(go.Scatter( x=np.unique(x), y=np.poly1d([slope, intercept])(np.unique(x)), showlegend=False, mode ='lines')) slope_text = 'slope: '+str('%.2f' % slope) r_value = 'r_val: '+str('%.2f' % r_value) fig_text = slope_text + ', '+ r_value fig.add_annotation(text=fig_text, font = {'family': "Arial", 'size': 22, 'color': 'black'}, xref="paper", yref="paper", x=1, y=1, showarrow=False) graph_component = dcc.Graph(id="graph_id", figure=fig) image_component = html.Img(id="structure-image") external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = JupyterDash(__name__) app.layout = html.Div([ html.Div([graph_component]), html.Div([image_component])]) @app.callback( Output('structure-image', 'src'), [Input('graph_id', 'selectedData')]) def display_selected_data(selectedData): max_structs = 40 structs_per_row = 1 empty_plot = "data:image/gif;base64,R0lGODlhAQABAAAAACwAAAAAAQABAAA=" if selectedData: if len(selectedData['points']) == 0: return empty_plot print("# of points selected = ", len(selectedData['points'])) xval = [x['x'] for x in selectedData['points']] yval = [x['y'] for x in selectedData['points']] match_df = df[df['tb'].isin(yval) & df['tid'].isin([t_id])] smiles_list = list(match_df.cmiles) name_list = list(match_df.tid) name_list = [] hl_atoms = [] for i in range(len(smiles_list)): print(smiles_list[i]) indices_tup = match_df.iloc[i].TDindices indices_list = [x + 1 for x in list(indices_tup)] hl_atoms.append(indices_list) tid = match_df.iloc[i].tid tor_bar = match_df.iloc[i].tb wbo_tor = match_df.iloc[i].wbo cmiles_str = match_df.iloc[i].cmiles tmp = [str(tid), ':', 'TDindices [', str(indices_tup[0]+1), str(indices_tup[1]+1), str(indices_tup[2]+1), str(indices_tup[3]+1), ']', 'wbo:', str('%.2f'%(wbo_tor)), 'TB:', str('%.2f'%(tor_bar)), '(kJ/mol)'] name_list.append(' '.join(tmp)) mol_list = [Chem.MolFromSmiles(x) for x in smiles_list] print(len(mol_list)) img = MolsToGridImage(mol_list[0:max_structs], subImgSize=(500, 500), molsPerRow=structs_per_row, legends=name_list) # , # highlightAtomLists=hl_atoms) buffered = BytesIO() img.save(buffered, format="PNG", legendFontSize=60) encoded_image = base64.b64encode(buffered.getvalue()) src_str = 'data:image/png;base64,{}'.format(encoded_image.decode()) else: return empty_plot return src_str if __name__ == '__main__': app.run_server(mode='inline', port=8061, debug=True) return fig folder_name = './FF_1.3.0-tig-8/' TD_datasets = [ 'Fragment Stability Benchmark', # 'Fragmenter paper', # 'OpenFF DANCE 1 eMolecules t142 v1.0', 'OpenFF Fragmenter Validation 1.0', 'OpenFF Full TorsionDrive Benchmark 1', 'OpenFF Gen 2 Torsion Set 1 Roche 2', 'OpenFF Gen 2 Torsion Set 2 Coverage 2', 'OpenFF Gen 2 Torsion Set 3 Pfizer Discrepancy 2', 'OpenFF Gen 2 Torsion Set 4 eMolecules Discrepancy 2', 'OpenFF Gen 2 Torsion Set 5 Bayer 2', 'OpenFF Gen 2 Torsion Set 6 Supplemental 2', 'OpenFF Group1 Torsions 2', 'OpenFF Group1 Torsions 3', 'OpenFF Primary Benchmark 1 Torsion Set', 'OpenFF Primary Benchmark 2 Torsion Set', 'OpenFF Primary TorsionDrive Benchmark 1', 'OpenFF Rowley Biaryl v1.0', 'OpenFF Substituted Phenyl Set 1', 'OpenFF-benchmark-ligand-fragments-v1.0', 'Pfizer Discrepancy Torsion Dataset 1', 'SMIRNOFF Coverage Torsion Set 1', # 'SiliconTX Torsion Benchmark Set 1', 'TorsionDrive Paper' ] TD_working_oeb = [folder_name+x+'.oeb' for x in TD_datasets] # all_t43 = plot_interactive(TD_working_oeb, t_id='t43') tig_ids = ['TIG2'] for iid in tig_ids: tmp = plot_interactive(TD_working_oeb, t_id=iid) # tmp.write_image(folder_name+"fig_"+str(iid)+".pdf")
0.668015
0.692207
``` import requests import sys import pandas as pd import dateutil.parser as dp import json from pandas.io.json import json_normalize # MET Frost client ID client_id = '' ``` #### Get sources (stations) in Bergen ``` # issue an HTTP GET request r = requests.get( 'https://frost.met.no/sources/v0.jsonld', {'ids': None}, auth=(client_id, '') ) def codec_utf8(s): return s.encode('utf-8').decode('utf-8') # should be used for Python 3 # extract some data from the response if r.status_code == 200: for item in r.json()['data']: try: if 'BERGEN' in item['name']: sys.stdout.write('ID: {}\n'.format(item['id'])) sys.stdout.write('Name: {}\n'.format(codec_utf8(item['name']))) if 'geometry' in item: sys.stdout.write('longitude: {}\n'.format(item['geometry']['coordinates'][0])) sys.stdout.write('latitude: {}\n'.format(item['geometry']['coordinates'][1])) if 'municipality' in item: sys.stdout.write('Municipality: {}\n'.format(codec_utf8(item['municipality']))) if 'county' in item: sys.stdout.write('County: {}\n'.format(codec_utf8(item['county']))) sys.stdout.write('Country: {}\n'.format(codec_utf8(item['country']))) if 'externalIds' in item: for ext_id in item['externalIds']: sys.stdout.write('external ID: {}\n'.format(ext_id)) else: sys.stdout.write('no external IDs found\n') print('\n') except: print('\n') else: sys.stdout.write('error:\n') sys.stdout.write('\tstatus code: {}\n'.format(r.status_code)) if 'error' in r.json(): assert(r.json()['error']['code'] == r.status_code) sys.stdout.write('\tmessage: {}\n'.format(r.json()['error']['message'])) sys.stdout.write('\treason: {}\n'.format(r.json()['error']['reason'])) else: sys.stdout.write('\tother error\n') ``` #### Get temperature data ``` endpoint = 'https://frost.met.no/observations/v0.jsonld' for year in range(2013, 2020): parameters = { 'sources': 'SN50540', 'elements': 'air_temperature', 'referencetime': str(year) + '-01-01/' + str(year) + '-12-31', } r = requests.get( 'https://frost.met.no/observations/v0.jsonld', parameters, auth=(client_id, '') ) # Check if the request worked, print out any errors json_data = r.json() if r.status_code == 200: data = json_data['data'] print('Data retrieved from frost.met.no!') else: print('Error! Returned status code %s' % r.status_code) print('Message: %s' % json_data['error']['message']) print('Reason: %s' % json_data['error']['reason']) with open('../../data/weather/florida/json/weather_florida_' + str(year) + '.json', 'w') as f: json.dump(json_data, f) parameters = { 'sources': 'SN50540', 'elements': 'air_temperature', 'referencetime': '2020-01-01/2020-09-08', } r = requests.get( 'https://frost.met.no/observations/v0.jsonld', parameters, auth=(client_id, '') ) # Check if the request worked, print out any errors json_data = r.json() if r.status_code == 200: data = json_data['data'] print('Data retrieved from frost.met.no!') else: print('Error! Returned status code %s' % r.status_code) print('Message: %s' % json_data['error']['message']) print('Reason: %s' % json_data['error']['reason']) with open('../../data/weather/florida/json/weather_florida_2020.json', 'w') as f: json.dump(json_data, f) ``` #### Convert to csv ``` json_data_path = '../../data/weather/florida/json/' for path in os.listdir(json_data_path): if path[-5:] == '.json': df_current = json_normalize(pd.read_json(json_data_path + path)['data']) df_current['observations'] = df_current.observations.map(lambda x: x[0]) df_current[['elementId', 'exposureCategory', 'level.levelType', 'level.unit', 'level.value', 'performanceCategory', 'qualityCode', 'timeOffset', 'timeResolution', 'timeSeriesId', 'unit', 'value']] = json_normalize(df_current['observations']) df_current.drop('observations', axis=1, inplace=True) print('Unique level values for ' + path + ':', df_current['level.value'].unique().item()) df_current.to_csv('../../data/weather/florida/csv/' + path[:-5] + '.csv', index=False) ```
github_jupyter
import requests import sys import pandas as pd import dateutil.parser as dp import json from pandas.io.json import json_normalize # MET Frost client ID client_id = '' # issue an HTTP GET request r = requests.get( 'https://frost.met.no/sources/v0.jsonld', {'ids': None}, auth=(client_id, '') ) def codec_utf8(s): return s.encode('utf-8').decode('utf-8') # should be used for Python 3 # extract some data from the response if r.status_code == 200: for item in r.json()['data']: try: if 'BERGEN' in item['name']: sys.stdout.write('ID: {}\n'.format(item['id'])) sys.stdout.write('Name: {}\n'.format(codec_utf8(item['name']))) if 'geometry' in item: sys.stdout.write('longitude: {}\n'.format(item['geometry']['coordinates'][0])) sys.stdout.write('latitude: {}\n'.format(item['geometry']['coordinates'][1])) if 'municipality' in item: sys.stdout.write('Municipality: {}\n'.format(codec_utf8(item['municipality']))) if 'county' in item: sys.stdout.write('County: {}\n'.format(codec_utf8(item['county']))) sys.stdout.write('Country: {}\n'.format(codec_utf8(item['country']))) if 'externalIds' in item: for ext_id in item['externalIds']: sys.stdout.write('external ID: {}\n'.format(ext_id)) else: sys.stdout.write('no external IDs found\n') print('\n') except: print('\n') else: sys.stdout.write('error:\n') sys.stdout.write('\tstatus code: {}\n'.format(r.status_code)) if 'error' in r.json(): assert(r.json()['error']['code'] == r.status_code) sys.stdout.write('\tmessage: {}\n'.format(r.json()['error']['message'])) sys.stdout.write('\treason: {}\n'.format(r.json()['error']['reason'])) else: sys.stdout.write('\tother error\n') endpoint = 'https://frost.met.no/observations/v0.jsonld' for year in range(2013, 2020): parameters = { 'sources': 'SN50540', 'elements': 'air_temperature', 'referencetime': str(year) + '-01-01/' + str(year) + '-12-31', } r = requests.get( 'https://frost.met.no/observations/v0.jsonld', parameters, auth=(client_id, '') ) # Check if the request worked, print out any errors json_data = r.json() if r.status_code == 200: data = json_data['data'] print('Data retrieved from frost.met.no!') else: print('Error! Returned status code %s' % r.status_code) print('Message: %s' % json_data['error']['message']) print('Reason: %s' % json_data['error']['reason']) with open('../../data/weather/florida/json/weather_florida_' + str(year) + '.json', 'w') as f: json.dump(json_data, f) parameters = { 'sources': 'SN50540', 'elements': 'air_temperature', 'referencetime': '2020-01-01/2020-09-08', } r = requests.get( 'https://frost.met.no/observations/v0.jsonld', parameters, auth=(client_id, '') ) # Check if the request worked, print out any errors json_data = r.json() if r.status_code == 200: data = json_data['data'] print('Data retrieved from frost.met.no!') else: print('Error! Returned status code %s' % r.status_code) print('Message: %s' % json_data['error']['message']) print('Reason: %s' % json_data['error']['reason']) with open('../../data/weather/florida/json/weather_florida_2020.json', 'w') as f: json.dump(json_data, f) json_data_path = '../../data/weather/florida/json/' for path in os.listdir(json_data_path): if path[-5:] == '.json': df_current = json_normalize(pd.read_json(json_data_path + path)['data']) df_current['observations'] = df_current.observations.map(lambda x: x[0]) df_current[['elementId', 'exposureCategory', 'level.levelType', 'level.unit', 'level.value', 'performanceCategory', 'qualityCode', 'timeOffset', 'timeResolution', 'timeSeriesId', 'unit', 'value']] = json_normalize(df_current['observations']) df_current.drop('observations', axis=1, inplace=True) print('Unique level values for ' + path + ':', df_current['level.value'].unique().item()) df_current.to_csv('../../data/weather/florida/csv/' + path[:-5] + '.csv', index=False)
0.272411
0.258338
# Noisy Convolutional Neural Network Example Build a noisy convolutional neural network with TensorFlow v2. - Author: Gagandeep Singh - Project: https://github.com/czgdp1807/noisy_weights Experimental Details - Datasets: The MNIST database of handwritten digits has been used for training and testing. Observations - It has been observed that accuracy of the model isn't affected on testing it with MNIST digits. - The uncertainty expressed by the model is low which is expected since train and test disitributions are same. References - [1] https://github.com/aymericdamien/TensorFlow-Examples/ ``` from __future__ import absolute_import, division, print_function import tensorflow as tf from tensorflow.keras import Model, layers import numpy as np # MNIST dataset parameters. num_classes = 10 # total classes (0-9 digits). # Training parameters. learning_rate = 0.001 training_steps = 200 batch_size = 128 display_step = 10 # Network parameters. conv1_filters = 32 # number of filters for 1st conv layer. conv2_filters = 64 # number of filters for 2nd conv layer. fc1_units = 1024 # number of neurons for 1st fully-connected layer. # Prepare MNIST data. from tensorflow.keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() # Convert to float32. x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32) # Normalize images value from [0, 255] to [0, 1]. x_train, x_test = x_train / 255., x_test / 255. # Use tf.data API to shuffle and batch data. train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1) # Create TF Model. class ConvNet(Model): # Set layers. def __init__(self): super(ConvNet, self).__init__() # Convolution Layer with 32 filters and a kernel size of 5. self.conv1 = layers.Conv2D(32, kernel_size=5, activation=tf.nn.relu) # Max Pooling (down-sampling) with kernel size of 2 and strides of 2. self.maxpool1 = layers.MaxPool2D(2, strides=2) # Convolution Layer with 64 filters and a kernel size of 3. self.conv2 = layers.Conv2D(64, kernel_size=3, activation=tf.nn.relu) # Max Pooling (down-sampling) with kernel size of 2 and strides of 2. self.maxpool2 = layers.MaxPool2D(2, strides=2) # Flatten the data to a 1-D vector for the fully connected layer. self.flatten = layers.Flatten() # Fully connected layer. self.fc1 = layers.Dense(1024) # Apply Dropout (if is_training is False, dropout is not applied). self.dropout = layers.Dropout(rate=0.5) # Output layer, class prediction. self.out = layers.Dense(num_classes) # Set forward pass. def call(self, x, is_training=False): def add_noise(_layer): noisy_weights = [] for weight in _layer.get_weights(): noisy_weights.append(weight + tf.random.normal(weight.shape, 0., 0.001)) _layer.set_weights(noisy_weights) if not is_training: add_noise(self.conv1) add_noise(self.conv2) add_noise(self.fc1) add_noise(self.out) x = tf.reshape(x, [-1, 28, 28, 1]) x = self.conv1(x) x = self.maxpool1(x) x = self.conv2(x) x = self.maxpool2(x) x = self.flatten(x) x = self.fc1(x) x = self.dropout(x, training=is_training) x = self.out(x) if not is_training: # tf cross entropy expect logits without softmax, so only # apply softmax when not training. x = tf.nn.softmax(x) return x # Build neural network model. conv_net = ConvNet() # Cross-Entropy Loss. # Note that this will apply 'softmax' to the logits. def cross_entropy_loss(x, y): # Convert labels to int 64 for tf cross-entropy function. y = tf.cast(y, tf.int64) # Apply softmax to logits and compute cross-entropy. loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x) # Average loss across the batch. return tf.reduce_mean(loss) # Accuracy metric. def accuracy(y_pred, y_true): # Predicted class is the index of highest score in prediction vector (i.e. argmax). correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64)) return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1) # Stochastic gradient descent optimizer. optimizer = tf.optimizers.Adam(learning_rate) # Optimization process. def run_optimization(x, y): # Wrap computation inside a GradientTape for automatic differentiation. with tf.GradientTape() as g: # Forward pass. pred = conv_net(x, is_training=True) # Compute loss. loss = cross_entropy_loss(pred, y) # Variables to update, i.e. trainable variables. trainable_variables = conv_net.trainable_variables # Compute gradients. gradients = g.gradient(loss, trainable_variables) # Update W and b following gradients. optimizer.apply_gradients(zip(gradients, trainable_variables)) # Run training for the given number of steps. for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1): # Run the optimization to update W and b values. run_optimization(batch_x, batch_y) if step % display_step == 0: pred = conv_net(batch_x) loss = cross_entropy_loss(pred, batch_y) acc = accuracy(pred, batch_y) print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc)) # Test model on validation set. pred = conv_net(x_test) print("Test Accuracy: %f" % accuracy(pred, y_test)) # Visualize predictions. import matplotlib.pyplot as plt def compute_entropy(preds): uncertainties = [] for i in range(preds.shape[0]): uncertainties.append(-tf.reduce_mean(tf.math.multiply(preds[i], tf.math.log(preds[i])))) return tf.convert_to_tensor(uncertainties) n_images = 5 test_images = x_test[:n_images] n_samples = 10 predictions = [] for i in range(n_samples): predictions.append(conv_net(test_images)) predictions = tf.convert_to_tensor(predictions) predictions = tf.reduce_mean(predictions, 0) uncertainty = compute_entropy(predictions) print(uncertainty) # Display image and model prediction. for i in range(n_images): plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') plt.show() print("Model prediction: %i" % np.argmax(predictions.numpy()[i])) ```
github_jupyter
from __future__ import absolute_import, division, print_function import tensorflow as tf from tensorflow.keras import Model, layers import numpy as np # MNIST dataset parameters. num_classes = 10 # total classes (0-9 digits). # Training parameters. learning_rate = 0.001 training_steps = 200 batch_size = 128 display_step = 10 # Network parameters. conv1_filters = 32 # number of filters for 1st conv layer. conv2_filters = 64 # number of filters for 2nd conv layer. fc1_units = 1024 # number of neurons for 1st fully-connected layer. # Prepare MNIST data. from tensorflow.keras.datasets import mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() # Convert to float32. x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32) # Normalize images value from [0, 255] to [0, 1]. x_train, x_test = x_train / 255., x_test / 255. # Use tf.data API to shuffle and batch data. train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1) # Create TF Model. class ConvNet(Model): # Set layers. def __init__(self): super(ConvNet, self).__init__() # Convolution Layer with 32 filters and a kernel size of 5. self.conv1 = layers.Conv2D(32, kernel_size=5, activation=tf.nn.relu) # Max Pooling (down-sampling) with kernel size of 2 and strides of 2. self.maxpool1 = layers.MaxPool2D(2, strides=2) # Convolution Layer with 64 filters and a kernel size of 3. self.conv2 = layers.Conv2D(64, kernel_size=3, activation=tf.nn.relu) # Max Pooling (down-sampling) with kernel size of 2 and strides of 2. self.maxpool2 = layers.MaxPool2D(2, strides=2) # Flatten the data to a 1-D vector for the fully connected layer. self.flatten = layers.Flatten() # Fully connected layer. self.fc1 = layers.Dense(1024) # Apply Dropout (if is_training is False, dropout is not applied). self.dropout = layers.Dropout(rate=0.5) # Output layer, class prediction. self.out = layers.Dense(num_classes) # Set forward pass. def call(self, x, is_training=False): def add_noise(_layer): noisy_weights = [] for weight in _layer.get_weights(): noisy_weights.append(weight + tf.random.normal(weight.shape, 0., 0.001)) _layer.set_weights(noisy_weights) if not is_training: add_noise(self.conv1) add_noise(self.conv2) add_noise(self.fc1) add_noise(self.out) x = tf.reshape(x, [-1, 28, 28, 1]) x = self.conv1(x) x = self.maxpool1(x) x = self.conv2(x) x = self.maxpool2(x) x = self.flatten(x) x = self.fc1(x) x = self.dropout(x, training=is_training) x = self.out(x) if not is_training: # tf cross entropy expect logits without softmax, so only # apply softmax when not training. x = tf.nn.softmax(x) return x # Build neural network model. conv_net = ConvNet() # Cross-Entropy Loss. # Note that this will apply 'softmax' to the logits. def cross_entropy_loss(x, y): # Convert labels to int 64 for tf cross-entropy function. y = tf.cast(y, tf.int64) # Apply softmax to logits and compute cross-entropy. loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x) # Average loss across the batch. return tf.reduce_mean(loss) # Accuracy metric. def accuracy(y_pred, y_true): # Predicted class is the index of highest score in prediction vector (i.e. argmax). correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64)) return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1) # Stochastic gradient descent optimizer. optimizer = tf.optimizers.Adam(learning_rate) # Optimization process. def run_optimization(x, y): # Wrap computation inside a GradientTape for automatic differentiation. with tf.GradientTape() as g: # Forward pass. pred = conv_net(x, is_training=True) # Compute loss. loss = cross_entropy_loss(pred, y) # Variables to update, i.e. trainable variables. trainable_variables = conv_net.trainable_variables # Compute gradients. gradients = g.gradient(loss, trainable_variables) # Update W and b following gradients. optimizer.apply_gradients(zip(gradients, trainable_variables)) # Run training for the given number of steps. for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1): # Run the optimization to update W and b values. run_optimization(batch_x, batch_y) if step % display_step == 0: pred = conv_net(batch_x) loss = cross_entropy_loss(pred, batch_y) acc = accuracy(pred, batch_y) print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc)) # Test model on validation set. pred = conv_net(x_test) print("Test Accuracy: %f" % accuracy(pred, y_test)) # Visualize predictions. import matplotlib.pyplot as plt def compute_entropy(preds): uncertainties = [] for i in range(preds.shape[0]): uncertainties.append(-tf.reduce_mean(tf.math.multiply(preds[i], tf.math.log(preds[i])))) return tf.convert_to_tensor(uncertainties) n_images = 5 test_images = x_test[:n_images] n_samples = 10 predictions = [] for i in range(n_samples): predictions.append(conv_net(test_images)) predictions = tf.convert_to_tensor(predictions) predictions = tf.reduce_mean(predictions, 0) uncertainty = compute_entropy(predictions) print(uncertainty) # Display image and model prediction. for i in range(n_images): plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray') plt.show() print("Model prediction: %i" % np.argmax(predictions.numpy()[i]))
0.939519
0.974893
# GitHub Workshop <center><img src="./Images/Git-Largest.jpg"></center> ### About Version Control System(VCS) What is “version control”, and why should you use? Version control is a system that records changes to a file or set of files over time so that you can recall specific versions later. #### Centralised Version Control Systems <center><img src="./Images/image1.PNG" width="500"></center> #### Distributed Version Control Systems - Which is our Git <center><img src="./Images/image2.PNG" width="450"></center> ### Important difference between Git and other VCS The major difference between Git and any other VCS (Subversion and friends included) is the way Git thinks about its data. Git consideres the files as #### Snapshots, Not Differences # Git Basics ### The Three States ---------------------------------------- `committed` `modified` and `staged` * Committed means that the data is safely stored in your local database. * Modified means that you have changed the file but have not committed it to your database yet. * Staged means that you have marked a modified file in its current version to go into your next commit *_snapshot_*. ### The Three States <center><img src="./Images/image3.PNG" width="700"></center> ## Installing Git(GitBash) ## Customizing Git Importance of `git config` #### Two Main `config` comands : -------------------------------------------------- * $ `git config --global user.name "John Doe"` * $ `git config --global user.email johndoe@example.com` --------------------------------------------------- *You need to do this only once* An example of `config` to change your editor -------------------------------------------- * $ `git config --global core.editor "'C:/Program Files/Notepad++/notepad++.exe' -multiInst -nosession"` ### Creating/Cloning repository ----------------------------------------------- Two ways of getting Git repository: * Create/Use one local repository as a Git repository * Clone one that is already created To create a local repository --------------------------------------------- * $ `cd /c/user/my_project` * $ `git init` To clone a repository --------------------------------------------- * $ `git clone <link>` --------------------------------------------- Link to the repository ending with .git. ex: `<link>` - https://github.com/prabhuSub/Text-based-Game-using-Python.git This creates a directory and also initiates a .git in the folder ### Recording changes to your repository --------------------------------------- <center><img src="./Images/image4.PNG" width="700"></center> Once my Git repository is setup, I usually go with a first command, which is: ----------------------------------------- * $ `git status` ---------------------------- It gives you an output similar to the below if you have nothing to commit: * $ `git status` ``` On branch master Your branch is up-to-date with 'origin/master'. nothing to commit, working directory clean ``` * $ `git fetch` `git fetch` command only downloads the data to your local repository — it doesn’t automatically merge it with any of your work or modify what you’re currently working on. You have to merge it manually into your work when you’re ready. ------------------------------------------------- * $ `git pull` `git pull` automatically fetch and then merge that remote branch into your current branch * $ `git push <remote> <branch>` If you want to push your master branch to your origin server (again, cloning generally sets up both of those names for you automatically), then you can run this to push any commits you’ve done back up to the server * $`git tag` Creating tags help you name the versions and later see the version hostory and their logs respectively. Ex: <center><img src="./Images/code1.PNG" width="500"></center> ## Git Branching Nearly every VCS has some form of branching support. Branching means you diverge from the main line of development and continue to do work without messing with that main line. #### This is how your actual commits for a file is mapped to the system. <center><img src="./Images/image5.PNG" width="800"></center> #### Now, further if you commit more contents or changes to your files, the further linking of the commits with the address is as shown below: ------------------------ <center><img src="./Images/image6.PNG" width="800"></center> How does Git know what branch you’re currently on? It keeps a special pointer called `HEAD` <center><img src="./Images/image7.PNG" width="600"></center> ### Creating a new branch * $ `git branch testing` This creates a new branch called "testing" <center><img src="./Images/image8.PNG" width="600"></center> Now, as a branch has been created, I would like to get into it to start working with it * $ `git checkout testing` This moves HEAD to point to the testing branch Always, keep in mind how `HEAD` is working and would be pointing. Thats the key to not get cofused while working with Git <center><img src="./Images/image9.PNG" width="600"></center> Now, If I perfrom a `commit`, my `HEAD` will move ahead <center><img src="./Images/image10.PNG" width="600"></center> Now, If I commit by getting into `master` and update something, what do you think will happen? ### The `Head` moves to a new address for the master <center><img src="./Images/image11.PNG" width="800"></center> To view the branch commits in a graphical term, below is the command: * & `git log --oneline --decorate --graph --all` It will look somewhat, like the below depending on the number of branches and commits you have : <center><img src="./Images/branching.PNG" width="800"></center> Merging your changes from one branch to another. * $ `git checkout master` * $ `git merge hotfix` ``` Updating f42c576..3a0874c Fast-forward index.html | 2 ++ 1 file changed, 2 insertions(+) ``` ### Stashing and Cleaning the branch Stashing takes the dirty state of your working directory — that is, your modified tracked files and staged changes — and saves it on a stack of unfinished changes that you can reapply at any time (even on a different branch) * $ `git stash` Finally, you may not want to stash some work or files in your working directory, but simply get rid of them. The git clean command will do this for you. * $ `git clean` #### General sequence of events for a simple multiple-developer Git workflow <center><img src="./Images/image12.PNG" width="450"></center> #### Basic sequence of this managed-team workflow. <center><img src="./Images/image13.PNG" width="500"></center> At the end, I would provide you the book that I use, and always refer to if I face any issues with Git. Feel free to use and learn. Below are the links ### [Git Reference Book](Books/progit.pdf) Also, a Git Cheat Sheet for a quick reference ### [Git Cheet Sheet](Books/github-git-cheat-sheet.pdf) ## <center>Thank you</center> <font size="4">MIT License <center><img src="./Images/OSI_Approved_License.png" width="100" align="right"></center> Copyright (c) 2019 Prabhu Subramanian Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.</font>
github_jupyter
On branch master Your branch is up-to-date with 'origin/master'. nothing to commit, working directory clean
0.331877
0.870597
Probability theory is a cornerstone for machine learning. We can think of quantum states as probability distributions with certain properties that make them different from our classical notion of probabilities. Contrasting these properties is an easy and straightforward introduction to the most basic concepts we need in quantum computing. Apart from probability theory, linear algebra is also critical for many learning protocols. As we will see, geometry and probabilities are intrinsically linked in quantum computing, but geometric notions are also familiar in dealing with classical probability distributions. This notebook first talks about classical probabilities and stochastic vectors, and introduces quantum states as a natural generalization. Throughout this course, we will assume finite probability distributions and finite dimensional spaces. This significantly simplifies notation and most quantum computers operate over finite dimensional spaces, so we do not lose much in generality. # Classical probability distributions Let us toss a biased coin. Without getting too technical, we can associate a random variable $X$ with the output: it takes the value 0 for heads and the value 1 for tails. We get heads with probability $P(X=0) = p_0$ and tails with $P(X=1) = p_1$ for each toss of the coin. In classical, Kolmogorovian probability theory, $p_i\geq 0$ for all $i$, and the probabilities sum to one: $\sum_i p_i = 1$. Let's sample this distribution ``` import numpy as np n_samples = 100 p_1 = 0.2 x_data = np.random.binomial(1, p_1, (n_samples,)) print(x_data) ``` We naturally expect that the empirically observed frequencies also sum to one: ``` frequency_of_zeros, frequency_of_ones = 0, 0 for x in x_data: if x: frequency_of_ones += 1/n_samples else: frequency_of_zeros += 1/n_samples print(frequency_of_ones+frequency_of_zeros) ``` Since $p_0$ and $p_1$ must be non-negative, all possible probability distributions are restricted to the positive orthant. The normalization constraint puts every possible distribution on a straight line. This plot describes all possible probability distributions by biased and unbiased coins. ``` import matplotlib.pyplot as plt %matplotlib inline p_0 = np.linspace(0, 1, 100) p_1 = 1-p_0 fig, ax = plt.subplots() ax.set_xlim(-1.2, 1.2) ax.set_ylim(-1.2, 1.2) ax.spines['left'].set_position('center') ax.spines['bottom'].set_position('center') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.set_xlabel("$p_0$") ax.xaxis.set_label_coords(1.0, 0.5) ax.set_ylabel("$p_1$") ax.yaxis.set_label_coords(0.5, 1.0) plt.plot(p_0, p_1) ``` We may also arrange the probabilities in a vector $\vec{p} = \begin{bmatrix} p_0 \\ p_1 \end{bmatrix}$. Here, for notational convenience, we put an arrow above the variable representing the vector, to distinguish it from scalars. You will see that quantum states also have a standard notation that provides convenience, but goes much further in usefulness than the humble arrow here. A vector representing a probability distribution is called a *stochastic vector*. The normalization constraint essentially says that the norm of the vector is restricted to one in the $l_1$ norm. In other words, $||\vec{p}||_1 = \sum_i |p_i| = 1$. This would be the unit circle in the $l_1$ norm, but since $p_i\geq 0$, we are restricted to a quarter of the unit circle, just as we plotted above. We can easily verify this with numpy's norm function: ``` p = np.array([[0.8], [0.2]]) np.linalg.norm(p, ord=1) ``` We know that the probability of heads is just the first element in the $\vec{p}$, but since it is a vector, we could use linear algebra to extract it. Geometrically, it means that we project the vector to the first axis. This projection is described by the matrix $\begin{bmatrix} 1 & 0\\0 & 0\end{bmatrix}$. The length in the $l_1$ norm gives the sought probability: ``` Π_0 = np.array([[1, 0], [0, 0]]) np.linalg.norm(Π_0 @ p, ord=1) ``` We can repeat the process to get the probability of tails: ``` Π_1 = np.array([[0, 0], [0, 1]]) np.linalg.norm(Π_1 @ p, ord=1) ``` The two projections play an equivalent role to the values 0 and 1 when we defined the probability distribution. In fact, we could define a new random variable called $\Pi$ that can take the projections $\Pi_0$ and $\Pi_1$ as values and we would end up with an identical probability distribution. This may sound convoluted and unnatural, but the measurement in quantum mechanics is essentially a random variable that takes operator values, such as projections. What happens when we want to transform a probability distribution to another one? For instance, to change the bias of a coin, or to describe the transition of a Markov chain. Since the probability distribution is also a stochastic vector, we can apply a matrix on the vector, where the matrix has to fulfill certain conditions. A left *stochastic matrix* will map stochastic vectors to stochastic vectors when multiplied from the left: its columns add up to one. In other words, it maps probability distributions to probability distributions. For example, starting with a unbiased coin, the map $M$ will transform the distribution to a biased coin: ``` p = np.array([[.5], [.5]]) M = np.array([[0.7, 0.6], [0.3, 0.4]]) np.linalg.norm(M @ p, ord=1) ``` One last concept that will come handy is entropy. A probability distribution's entropy is defined as $H(p) = - \sum_i p_i \log_2 p_i$. Let us plot it over all possible probability distributions of coin tosses: ``` ϵ = 10e-10 p_0 = np.linspace(ϵ, 1-ϵ, 100) p_1 = 1-p_0 H = -(p_0*np.log2(p_0) + p_1*np.log2(p_1)) fig, ax = plt.subplots() ax.set_xlim(0, 1) ax.set_ylim(0, -np.log2(0.5)) ax.set_xlabel("$p_0$") ax.set_ylabel("$H$") plt.plot(p_0, H) plt.axvline(x=0.5, color='k', linestyle='--') ``` Here we can see that the entropy is maximal for the unbiased coin. This is true in general: the entropy peaks for the uniform distribution. In a sense, this is the most unpredictable distribution: if we get heads with probability 0.2, betting tails is a great idea. On the other hand, if the coin is unbiased, then a deterministic strategy is of little help in winning. Entropy quantifies this notion of surprise and unpredictability. # Quantum states A classical coin is a two-level system: it is either heads or tails. At a first look a quantum state is a probability distribution, and the simplest case is a two-level state, which we call a qubit. Just like the way we can write the probability distribution as a column vector, we can write a quantum state as a column vector. For notational convenience that will become apparent later, we write the label of a quantum state in what is called a ket in the Dirac notation. So for instance, for some qubit, we can write $$ |\psi\rangle = \begin{bmatrix} a_0 \\ a_1 \\ \end{bmatrix}. $$ In other words, a ket is just a column vector, exactly like the stochastic vector in the classical case. Instead of putting an arrow over the name of the variable to express that it is a vector, we use the ket to say that it is a column vector that represents a quantum state. There's more to this notation, as we will see. The key difference to classical probability distributions and stochastic vectors is the normalization constraint. The square sum of their absolute values adds up to 1: $$ \sqrt{|a_0|^2+|a_1|^2}=1, $$ where $a_0, a_1\in \mathbb{C}$. In other words, we are normalizing in the $l_2$ norm instead of the $l_1$ norm. Furthermore, we are no longer restricted to the positive orthant: the components of the quantum state vector, which we call *probability amplitudes*, are complex valued. Let us introduce two special qubits, corresponding to the canonical basis vectors in two dimensions: $|0\rangle$ and $|1\rangle$. $$ |0\rangle = \begin{bmatrix} 1 \\ 0 \\ \end{bmatrix}, \,\,\, |1\rangle = \begin{bmatrix} 0 \\ 1 \\ \end{bmatrix}. $$ This basis is also called the computational basis in quantum computing. We can expand an arbitrary qubit state in this basis: $$ |\psi\rangle = \begin{bmatrix} a_0 \\ a_1 \\ \end{bmatrix}=a_0\begin{bmatrix} 1 \\ 0 \\ \end{bmatrix} + a_1\begin{bmatrix} 0 \\ 1 \\ \end{bmatrix}= a_0|0\rangle+a_1|1\rangle. $$ This expansion in a basis is called a superposition. If we sample the qubit state, we obtain the outcome 0 with probability $|a_0|^2$, and 1 with probability $|a_1|^2$. This is known as the Born rule; you will learn more about measurements and this rule in a subsequent notebook. For now, let's take a look at how we can simulate classical coin tossing on a quantum computer. Let's start with a completely biased case where we get heads with probability 1. This means that our qubit $|\psi\rangle=|0\rangle$. We create a circuit of a single qubit and a single classical register where the results of the sampling (measurements) go. ``` from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister from qiskit import execute from qiskit import Aer from qiskit.tools.visualization import plot_histogram, plot_bloch_multivector import numpy as np π = np.pi backend = Aer.get_backend('qasm_simulator') q = QuantumRegister(1) c = ClassicalRegister(1) circuit = QuantumCircuit(q, c) ``` Any qubit is initialized in $|0\rangle$, so if we measure it right away, we should get our maximally biased coin. ``` circuit.measure(q, c) ``` Let us execute it a hundred times and study the result ``` job = execute(circuit, backend, shots=100) result = job.result() result.get_counts(circuit) ``` As expected, all of our outcomes are 0. To understand the possible quantum states, we use the Bloch sphere visualization. Since the probability amplitudes are complex and there are two of them for a single qubit, this would require a four-dimensional space. Now since the vectors are normalized, this removes a degree of freedom, allowing a three-dimensional representation with an appropriate embedding. This embedding is the Bloch sphere. It is slightly different than an ordinary sphere in three dimensions: we identify the north pole with the state $|0\rangle$, and the south pole with $|1\rangle$. In other words, two orthogonal vectors appear as if they were on the same axis -- the axis Z. The computational basis is just one basis: the axes X and Y represent two other bases. Any point on the surface of this sphere is a valid quantum state. This is also true the other way around: every pure quantum state is a point on the Bloch sphere. Here it 'pure' is an important technical term and it essentially means that the state is described by a ket (column vector). Later in the course we will see other states called mix states that are not described by a ket (you will see later that these are inside the Bloch sphere). To make it less abstract, let's plot our $|0\rangle$ on the Bloch sphere: ``` backend_statevector = Aer.get_backend('statevector_simulator') circuit = QuantumCircuit(q, c) circuit.id(q[0]) job = execute(circuit, backend_statevector) plot_bloch_multivector(job.result().get_statevector(circuit)) ``` Compare this sphere with the straight line in the positive orthant that describes all classical probability distributions of coin tosses. You can already see that there is a much richer structure in the quantum probability space. Let us pick another point on the Bloch sphere, that is, another distribution. Let's transform the state $|0\rangle$ to $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$. This corresponds to the unbiased coin, since we will get 0 with probability $|\frac{1}{\sqrt{2}}|^2=1/2$, and the other way around. There are many ways to do this transformation. We pick a rotation around the Y axis by $\pi/2$, which corresponds to the matrix $\frac{1}{\sqrt{2}}\begin{bmatrix} 1 & -1\\1 & 1\end{bmatrix}$. ``` circuit = QuantumCircuit(q, c) circuit.ry(π/2, q[0]) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) ``` To get an intuition why it is called a rotation around the Y axis, let's plot it on the Bloch sphere: ``` circuit = QuantumCircuit(q, c) circuit.ry(π/2, q[0]) job = execute(circuit, backend_statevector) plot_bloch_multivector(job.result().get_statevector(circuit)) ``` It does exactly what it says: it rotates from the north pole of the Bloch sphere. Why is interesting to have complex probability amplitudes instead of non-negative real numbers? To get some insight, take a look what happens if we apply the same rotation to $|1\rangle$. To achieve this, first we flip $|0\rangle$ to $|1\rangle$ by applying a NOT gate (denoted by X in quantum computing) and then the rotation. ``` circuit = QuantumCircuit(q, c) circuit.x(q[0]) circuit.ry(π/2, q[0]) job = execute(circuit, backend_statevector) plot_bloch_multivector(job.result().get_statevector(circuit)) ``` You can verify that the result is $\frac{1}{\sqrt{2}}(-|0\rangle + |1\rangle)$. That is, the exact same state as before, except that the first term got a minus sign: it is a negative probability amplitude. Note that the difference cannot be observed from the statistics: ``` circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) ``` It still looks like an approximately unbiased coin. Yet, that negative sign -- or any complex value -- is what models *interference*, a critically important phenomenon where probability amplitudes can interact in a constructive or a destructive way. To see this, if we apply the rotation twice in a row on $|0\rangle$, we get another deterministic output, $|1\rangle$, although in between the two, it was some superposition. ``` circuit = QuantumCircuit(q, c) circuit.ry(π/2, q[0]) circuit.ry(π/2, q[0]) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) ``` Many quantum algorithms exploit interference, for instance, the seminal [Deutsch-Josza algorithm](https://en.wikipedia.org/wiki/Deutsch–Jozsa_algorithm), which is among the simplest to understand its significance. # More qubits and entanglement We have already seen that quantum states are probability distributions normed to 1 in the $l_2$ norm and we got a first peek at interference. If we introduce more qubits, we see another crucial quantum effect emerging. To do that, we first have to define how we write down the column vector for describing two qubits. We use a tensor product, which, in the case of qubits, is equivalent to the Kronecker product. Given two qubits, $|\psi\rangle=\begin{bmatrix}a_0\\a_1\end{bmatrix}$ and $|\psi'\rangle=\begin{bmatrix}b_0\\b_1\end{bmatrix}$, their product is $|\psi\rangle\otimes|\psi'\rangle=\begin{bmatrix}a_0b_0\\ a_0b_1\\ a_1b_0\\ a_1b_1\end{bmatrix}$. Imagine that you have two registers $q_0$ and $q_1$, each can hold a qubit, and both qubits are in the state $|0\rangle$. Then this composite state would be described by according to this product rule as follows: ``` q0 = np.array([[1], [0]]) q1 = np.array([[1], [0]]) np.kron(q0, q1) ``` This is the $|0\rangle\otimes|0\rangle$ state, which we often abbreviate as $|00\rangle$. The states $|01\rangle$, $|10\rangle$, and $|11\rangle$ are defined analogously, and the four of them give the canonical basis of the four dimensional complex space, $\mathbb{C}^2\otimes\mathbb{C}^2$. Now comes the interesting and counter-intuitive part. In machine learning, we also work with high-dimensional spaces, but we never construct it as a tensor product: it is typically $\mathbb{R}^d$ for some dimension $d$. The interesting part of writing the high-dimensional space as a tensor product is that not all vectors in can be written as a product of vectors in the component space. Take the following state: $|\phi^+\rangle = \frac{1}{\sqrt{2}}(|00\rangle+|11\rangle)$. This vector is clearly in $\mathbb{C}^2\otimes\mathbb{C}^2$, since it is a linear combination of two of the basis vector in this space. Yet, it cannot be written as $|\psi\rangle\otimes|\psi'\rangle$ for some $|\psi\rangle$, $|\psi'\rangle\in\mathbb{C}^2$. To see this, assume that it can be written in this form. Then $$ |\phi^+\rangle = \frac{1}{\sqrt{2}}(|00\rangle+|11\rangle) = \begin{bmatrix}a_0b_0\\ a_0b_1\\ a_1b_0\\ a_1b_1\end{bmatrix} = a_0b_0|00\rangle + a_0b_1|01\rangle + a_1b_0|10\rangle + a_1b_1|11\rangle. $$ $|01\rangle$ and $|10\rangle$ do not appear on the left-hand side, so their coefficients must be zero: $a_1b_0=0$ and $a_0b_1=0$. This leads to a contradiction, since $a_1$ cannot be zero ($a_1b_1=1$), so $b_0$ must be zero, but $a_0b_0=1$. Therefore $|\phi^+\rangle$ cannot be written as a product. States that cannot be written as a product are called entangled states. This is the mathematical form of describing a phenomenon of strong correlations between random variables that exceed what is possible classically. Entanglement plays a central role in countless quantum algorithms. A simple example is [quantum teleportation](https://en.wikipedia.org/wiki/Quantum_teleportation). We will also see its applications in quantum machine learning protocols. We will have a closer look at entanglement in a subsequent notebook on measurements, but as a teaser, let us look at the measurement statistics of the $|\phi^+\rangle$ state. The explanation of the circuit preparing it will also come in a subsequent notebook. ``` q = QuantumRegister(2) c = ClassicalRegister(2) circuit = QuantumCircuit(q, c) circuit.h(q[0]) circuit.cx(q[0], q[1]) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) ``` Notice that 01 or 10 never appear in the measurement statistics. # Further reading Chapter 9 in Quantum Computing since Democritus by Scott Aaronson describes a similar approach to understanding quantum states -- in fact, the interference example was lifted from there.
github_jupyter
import numpy as np n_samples = 100 p_1 = 0.2 x_data = np.random.binomial(1, p_1, (n_samples,)) print(x_data) frequency_of_zeros, frequency_of_ones = 0, 0 for x in x_data: if x: frequency_of_ones += 1/n_samples else: frequency_of_zeros += 1/n_samples print(frequency_of_ones+frequency_of_zeros) import matplotlib.pyplot as plt %matplotlib inline p_0 = np.linspace(0, 1, 100) p_1 = 1-p_0 fig, ax = plt.subplots() ax.set_xlim(-1.2, 1.2) ax.set_ylim(-1.2, 1.2) ax.spines['left'].set_position('center') ax.spines['bottom'].set_position('center') ax.spines['right'].set_color('none') ax.spines['top'].set_color('none') ax.set_xlabel("$p_0$") ax.xaxis.set_label_coords(1.0, 0.5) ax.set_ylabel("$p_1$") ax.yaxis.set_label_coords(0.5, 1.0) plt.plot(p_0, p_1) p = np.array([[0.8], [0.2]]) np.linalg.norm(p, ord=1) Π_0 = np.array([[1, 0], [0, 0]]) np.linalg.norm(Π_0 @ p, ord=1) Π_1 = np.array([[0, 0], [0, 1]]) np.linalg.norm(Π_1 @ p, ord=1) p = np.array([[.5], [.5]]) M = np.array([[0.7, 0.6], [0.3, 0.4]]) np.linalg.norm(M @ p, ord=1) ϵ = 10e-10 p_0 = np.linspace(ϵ, 1-ϵ, 100) p_1 = 1-p_0 H = -(p_0*np.log2(p_0) + p_1*np.log2(p_1)) fig, ax = plt.subplots() ax.set_xlim(0, 1) ax.set_ylim(0, -np.log2(0.5)) ax.set_xlabel("$p_0$") ax.set_ylabel("$H$") plt.plot(p_0, H) plt.axvline(x=0.5, color='k', linestyle='--') from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister from qiskit import execute from qiskit import Aer from qiskit.tools.visualization import plot_histogram, plot_bloch_multivector import numpy as np π = np.pi backend = Aer.get_backend('qasm_simulator') q = QuantumRegister(1) c = ClassicalRegister(1) circuit = QuantumCircuit(q, c) circuit.measure(q, c) job = execute(circuit, backend, shots=100) result = job.result() result.get_counts(circuit) backend_statevector = Aer.get_backend('statevector_simulator') circuit = QuantumCircuit(q, c) circuit.id(q[0]) job = execute(circuit, backend_statevector) plot_bloch_multivector(job.result().get_statevector(circuit)) circuit = QuantumCircuit(q, c) circuit.ry(π/2, q[0]) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) circuit = QuantumCircuit(q, c) circuit.ry(π/2, q[0]) job = execute(circuit, backend_statevector) plot_bloch_multivector(job.result().get_statevector(circuit)) circuit = QuantumCircuit(q, c) circuit.x(q[0]) circuit.ry(π/2, q[0]) job = execute(circuit, backend_statevector) plot_bloch_multivector(job.result().get_statevector(circuit)) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) circuit = QuantumCircuit(q, c) circuit.ry(π/2, q[0]) circuit.ry(π/2, q[0]) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit)) q0 = np.array([[1], [0]]) q1 = np.array([[1], [0]]) np.kron(q0, q1) q = QuantumRegister(2) c = ClassicalRegister(2) circuit = QuantumCircuit(q, c) circuit.h(q[0]) circuit.cx(q[0], q[1]) circuit.measure(q, c) job = execute(circuit, backend, shots=100) plot_histogram(job.result().get_counts(circuit))
0.420838
0.994754
<div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Introduction </h1></div> ![](https://medialibrary.climatecentral.org/uploads/general/2018GreenhouseEffect.png) [Source](https://medialibrary.climatecentral.org/uploads/general/2018GreenhouseEffect.png) <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> GreenHouse Effect </h1></div> ![](https://assets.nrdc.org/sites/default/files/styles/ec_home_alert_large/public/greenhouseeffect_graphic1_2400x1742.png?itok=rNkTHqeP) <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold;">Do you know?</h1> <h3 style="text-align:center;font-weight: bold">1) The three greenhouse gases that are of most concern are Carbon Dioxide (CO<sub>2</sub>), Nitrous Oxide (N<sub>2</sub>O), and Methane (CH<sub>4</sub>).</h3> <h3 style="text-align:center;font-weight: bold">2) . Greenhouse gases are naturally produced; however humans produce additional amounts of certain greenhouse gases.</h3> <h3 style="text-align:center;font-weight: bold">3) Greenhouse gases trap heat in the atmosphere warming the Earth’s temperature above freezing. This is known as the Greenhouse effect.</h3> </div> <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold">About the Dataset</h1> <h3 style="text-align:left;font-weight: bold">A two-column dataset for Greenhouse gas emissions by sector. </h3> <ul> <li>GlobalShare: Share of global greenhouse gas emissions (%)</li> <li>Sub-sector</li> <li>Source of content: ourworldindata.org</li> </ul> </div> <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Importing Libraries </h1></div> ``` import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') plt.style.use('dark_background') import numpy as np import pandas as pd import seaborn as sns import plotly.express as px import plotly.graph_objects as go import warnings warnings.simplefilter(action='ignore', category=Warning) ``` <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Importing Dataset </h1></div> ``` dataset = pd.read_csv('GHG-Emissions-by-sector.csv') ``` <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Exploratory Data Analysis </h1></div> ``` dataset.head() dataset.shape ``` ## This dataset contain 29 Rows and 2 Columns ``` dataset.isnull().sum() ``` ## There are no null values ``` dataset.info() dataset.describe().T.style.bar( subset=['mean'], color='#606ff2').background_gradient( subset=['std'], cmap='PuBu').background_gradient(subset=['50%'], cmap='PuBu') sorted_dataset = dataset.sort_values('GlobalShare', ascending=True).reset_index() ``` <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> ✨Visualisation✨ </h1></div> <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Bar Plots </h1></div> ``` fig = px.bar(sorted_dataset, x = 'GlobalShare',color="Sub-sector", template = 'plotly_dark', title = 'Distrbution of GlobalShare') fig.show() plt.figure(figsize=(12, 6)) sns.countplot(y="GlobalShare", data=sorted_dataset, palette='husl'); fig = px.histogram(sorted_dataset, x="GlobalShare" ,marginal="rug",color= 'Sub-sector', hover_data=dataset.columns, template = 'plotly_dark', title = 'Distribution of GlobalShare represented by SubSector') fig.show() ``` ## From above plot we can conclude that most of the Global Shares lies b/w 0-2 and least b/w 8-10 <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Scatter Plot </h1></div> ``` fig = px.scatter(dataset, x="GlobalShare", y=dataset.index, color= 'Sub-sector', log_x=True, size_max=60, template='plotly_dark', title='Scatter plot for Global Share') fig.show() ``` ## From above plot we can conclude that Global Share are Right Skewed <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Pie Chart </h1></div> ``` fig = px.pie(dataset, names='Sub-sector', values='GlobalShare', template = 'plotly_dark', title = 'Representation of Sub-Sectors with their Global Shares') fig.show() ``` ## Above Plot tells us Sub-Sector Road has max. Global Share <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Box Plot </h1></div> ``` fig = px.box(dataset, y="GlobalShare" ,template = 'plotly_dark', title = 'Representation of Global Shares') fig.show() ``` <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Violin Plot </h1></div> ``` fig = px.violin(dataset, y="GlobalShare",template = 'plotly_dark') fig.show() ``` ## From above plots we can conclude that: 1. There are 3 outliers in global shares 2. Q3 = 4.525 3. Median = 1.9 4. Q1 = 1.225 5. Min value = 0.1 6. Max Value = 11.9 <div style="color:white; display:fill; border-radius:10px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold"> Parrallel Plot </h1></div> ``` fig = px.parallel_categories(dataset,template = 'plotly_dark', color = 'GlobalShare') fig.show() ``` ## From above plot we can identify that some Sub-Secors have Same Global Shares <div style="color:white; display:fill; border-radius:15px; font-size:110%; font-family:cursive; letter-spacing:0.5px; background-color:#4ff0d2; color:Black; font-family:cursive; padding:5px 5px 5px 5px; "> <h1 style="text-align:center;font-weight: bold">Please share your feedback in the comment section. I'll be more than happy to hear ya'll and If you liked my work and learned from it Please Consider Upvoting.</h1> </div>
github_jupyter
import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') plt.style.use('dark_background') import numpy as np import pandas as pd import seaborn as sns import plotly.express as px import plotly.graph_objects as go import warnings warnings.simplefilter(action='ignore', category=Warning) dataset = pd.read_csv('GHG-Emissions-by-sector.csv') dataset.head() dataset.shape dataset.isnull().sum() dataset.info() dataset.describe().T.style.bar( subset=['mean'], color='#606ff2').background_gradient( subset=['std'], cmap='PuBu').background_gradient(subset=['50%'], cmap='PuBu') sorted_dataset = dataset.sort_values('GlobalShare', ascending=True).reset_index() fig = px.bar(sorted_dataset, x = 'GlobalShare',color="Sub-sector", template = 'plotly_dark', title = 'Distrbution of GlobalShare') fig.show() plt.figure(figsize=(12, 6)) sns.countplot(y="GlobalShare", data=sorted_dataset, palette='husl'); fig = px.histogram(sorted_dataset, x="GlobalShare" ,marginal="rug",color= 'Sub-sector', hover_data=dataset.columns, template = 'plotly_dark', title = 'Distribution of GlobalShare represented by SubSector') fig.show() fig = px.scatter(dataset, x="GlobalShare", y=dataset.index, color= 'Sub-sector', log_x=True, size_max=60, template='plotly_dark', title='Scatter plot for Global Share') fig.show() fig = px.pie(dataset, names='Sub-sector', values='GlobalShare', template = 'plotly_dark', title = 'Representation of Sub-Sectors with their Global Shares') fig.show() fig = px.box(dataset, y="GlobalShare" ,template = 'plotly_dark', title = 'Representation of Global Shares') fig.show() fig = px.violin(dataset, y="GlobalShare",template = 'plotly_dark') fig.show() fig = px.parallel_categories(dataset,template = 'plotly_dark', color = 'GlobalShare') fig.show()
0.499268
0.770594
<img src="images/dask_horizontal.svg" align="right" width="30%"> # Data Storage <img src="images/hdd.jpg" width="20%" align="right"> Efficient storage can dramatically improve performance, particularly when operating repeatedly from disk. Decompressing text and parsing CSV files is expensive. One of the most effective strategies with medium data is to use a binary storage format like HDF5. Often the performance gains from doing this is sufficient so that you can switch back to using Pandas again instead of using `dask.dataframe`. In this section we'll learn how to efficiently arrange and store your datasets in on-disk binary formats. We'll use the following: 1. [Pandas `HDFStore`](http://pandas.pydata.org/pandas-docs/stable/io.html#io-hdf5) format on top of `HDF5` 2. Categoricals for storing text data numerically **Main Take-aways** 1. Storage formats affect performance by an order of magnitude 2. Text data will keep even a fast format like HDF5 slow 3. A combination of binary formats, column storage, and partitioned data turns one second wait times into 80ms wait times. ## Create data ``` %run prep.py -d accounts ``` ## Read CSV First we read our csv data as before. CSV and other text-based file formats are the most common storage for data from many sources, because they require minimal pre-processing, can be written line-by-line and are human-readable. Since Pandas' `read_csv` is well-optimized, CSVs are a reasonable input, but far from optimized, since reading required extensive text parsing. ``` import os filename = os.path.join('data', 'accounts.*.csv') filename import dask.dataframe as dd df_csv = dd.read_csv(filename) df_csv.head() ``` ### Write to HDF5 HDF5 and netCDF are binary array formats very commonly used in the scientific realm. Pandas contains a specialized HDF5 format, `HDFStore`. The ``dd.DataFrame.to_hdf`` method works exactly like the ``pd.DataFrame.to_hdf`` method. ``` target = os.path.join('data', 'accounts.h5') target # convert to binary format, takes some time up-front %time df_csv.to_hdf(target, '/data') # same data as before df_hdf = dd.read_hdf(target, '/data') df_hdf.head() ``` ### Compare CSV to HDF5 speeds We do a simple computation that requires reading a column of our dataset and compare performance between CSV files and our newly created HDF5 file. Which do you expect to be faster? ``` %time df_csv.amount.sum().compute() %time df_hdf.amount.sum().compute() ``` Sadly they are about the same, or perhaps even slower. The culprit here is `names` column, which is of `object` dtype and thus hard to store efficiently. There are two problems here: 1. How do we store text data like `names` efficiently on disk? 2. Why did we have to read the `names` column when all we wanted was `amount` ### 1. Store text efficiently with categoricals We can use Pandas categoricals to replace our object dtypes with a numerical representation. This takes a bit more time up front, but results in better performance. More on categoricals at the [pandas docs](http://pandas.pydata.org/pandas-docs/stable/categorical.html) and [this blogpost](http://matthewrocklin.com/blog/work/2015/06/18/Categoricals). ``` # Categorize data, then store in HDFStore %time df_hdf.categorize(columns=['names']).to_hdf(target, '/data2') # It looks the same df_hdf = dd.read_hdf(target, '/data2') df_hdf.head() # But loads more quickly %time df_hdf.amount.sum().compute() ``` This is now definitely faster than before. This tells us that it's not only the file type that we use but also how we represent our variables that influences storage performance. How does the performance of reading depend on the scheduler we use? You can try this with threaded, processes and distributed. However this can still be better. We had to read all of the columns (`names` and `amount`) in order to compute the sum of one (`amount`). We'll improve further on this with `parquet`, an on-disk column-store. First though we learn about how to set an index in a dask.dataframe. ### Exercise `fastparquet` is a library for interacting with parquet-format files, which are a very common format in the Big Data ecosystem, and used by tools such as Hadoop, Spark and Impala. ``` target = os.path.join('data', 'accounts.parquet') df_csv.categorize(columns=['names']).to_parquet(target, storage_options={"has_nulls": True}, engine="fastparquet") ``` Investigate the file structure in the resultant new directory - what do you suppose those files are for? `to_parquet` comes with many options, such as compression, whether to explicitly write NULLs information (not necessary in this case), and how to encode strings. You can experiment with these, to see what effect they have on the file size and the processing times, below. ``` ls -l data/accounts.parquet/ df_p = dd.read_parquet(target) # note that column names shows the type of the values - we could # choose to load as a categorical column or not. df_p.dtypes ``` Rerun the sum computation above for this version of the data, and time how long it takes. You may want to try this more than once - it is common for many libraries to do various setup work when called for the first time. ``` %time df_p.amount.sum().compute() ``` When archiving data, it is common to sort and partition by a column with unique identifiers, to facilitate fast look-ups later. For this data, that column is `id`. Time how long it takes to retrieve the rows corresponding to `id==100` from the raw CSV, from HDF5 and parquet versions, and finally from a new parquet version written after applying `set_index('id')`. ``` # df_p.set_index('id').to_parquet(...) ``` ## Remote files Dask can access various cloud- and cluster-oriented data storage services such as Amazon S3 or HDFS Advantages: * scalable, secure storage Disadvantages: * network speed becomes bottleneck The way to set up dataframes (and other collections) remains very similar to before. Note that the data here is available anonymously, but in general an extra parameter `storage_options=` can be passed with further details about how to interact with the remote storage. ```python taxi = dd.read_csv('s3://nyc-tlc/trip data/yellow_tripdata_2015-*.csv', storage_options={'anon': True}) ``` **Warning**: operations over the Internet can take a long time to run. Such operations work really well in a cloud clustered set-up, e.g., amazon EC2 machines reading from S3 or Google compute machines reading from GCS.
github_jupyter
%run prep.py -d accounts import os filename = os.path.join('data', 'accounts.*.csv') filename import dask.dataframe as dd df_csv = dd.read_csv(filename) df_csv.head() target = os.path.join('data', 'accounts.h5') target # convert to binary format, takes some time up-front %time df_csv.to_hdf(target, '/data') # same data as before df_hdf = dd.read_hdf(target, '/data') df_hdf.head() %time df_csv.amount.sum().compute() %time df_hdf.amount.sum().compute() # Categorize data, then store in HDFStore %time df_hdf.categorize(columns=['names']).to_hdf(target, '/data2') # It looks the same df_hdf = dd.read_hdf(target, '/data2') df_hdf.head() # But loads more quickly %time df_hdf.amount.sum().compute() target = os.path.join('data', 'accounts.parquet') df_csv.categorize(columns=['names']).to_parquet(target, storage_options={"has_nulls": True}, engine="fastparquet") ls -l data/accounts.parquet/ df_p = dd.read_parquet(target) # note that column names shows the type of the values - we could # choose to load as a categorical column or not. df_p.dtypes %time df_p.amount.sum().compute() # df_p.set_index('id').to_parquet(...) taxi = dd.read_csv('s3://nyc-tlc/trip data/yellow_tripdata_2015-*.csv', storage_options={'anon': True})
0.240775
0.978426
# A simple example, solved three ways 1. CVXPY + MOSEK 2. SD ADMM 3. Coordinate descent ``` %load_ext autoreload %autoreload 2 import numpy as np import matplotlib.pyplot as plt import pandas as pd from scipy import signal from time import time import seaborn as sns import cvxpy as cvx sns.set_style('darkgrid') import sys sys.path.append('..') from osd import Problem from osd.components import MeanSquareSmall, SmoothSecondDifference, SparseFirstDiffConvex, Boolean from osd.signal_decomp_bcd import run_bcd from osd.utilities import progress from osd.signal_decomp_admm import calc_obj TOL = 5e-6 ``` ## Data generation ``` np.random.seed(42) t = np.linspace(0, 1000, 200) signal1 = np.sin(2 * np.pi * t * 1 / (500.)) signal2 = signal.square(2 * np.pi * t * 1 / (450.)) X_real = np.zeros((3, len(t)), dtype=float) X_real[0] = 0.15 * np.random.randn(len(signal1)) X_real[1] = signal1 X_real[2] = signal2 y = np.sum(X_real, axis=0) K, T = X_real.shape plt.figure(figsize=(10, 6)) plt.plot(t, np.sum(X_real[1:], axis=0), label='true signal minus noise') plt.plot(t, y, alpha=0.5, label='observed signal') plt.legend() plt.show() ``` # CVXPY + MOSEK ``` c1 = MeanSquareSmall(size=T) c2 = SmoothSecondDifference(weight=1e3/T) c3 = Boolean(scale=2, shift=-1) components = [c1, c2, c3] problem1 = Problem(y, components) problem1.decompose(how='cvx') opt_obj_val = problem1.objective_value opt_obj_val problem1.plot_decomposition(X_real=X_real); ``` # SD ADMM ``` problem2 = Problem(y, components) problem2.decompose(how='admm', stopping_tolerance=1e-3) problem2.objective_value problem2.decompose(X_init=problem2.estimates, how='bcd', stopping_tolerance=1e-10) problem2.objective_value problem2.decompose(X_init=problem2.estimates, how='bcd', stopping_tolerance=1e-10) problem2.objective_value plt.figure() plt.plot(np.r_[problem2.admm_result['obj_vals'], problem2.bcd_result['obj_vals']] - problem2.objective_value + 1e-12) # plt.axvline(problem2.admm_result['it'], color='red', ls='--') plt.title('objective value') plt.yscale('log') plt.show() plt.figure() plt.plot(np.r_[problem2.admm_result['optimality_residual'], problem2.bcd_result['optimality_residual']], label='residual') plt.axvline(problem2.admm_result['it'], color='red', ls='--') plt.yscale('log') plt.legend() plt.title('internal optimality residual') plt.show() problem2.plot_decomposition(X_real=X_real); ``` # Coordinate Descent ``` problem3 = Problem(y, components) problem3.decompose(how='bcd', stopping_tolerance=TOL) problem3.objective_value len(problem3.bcd_result['obj_vals']) plt.figure() ref = np.min(np.r_[problem3.bcd_result['obj_vals'], problem2.admm_result['obj_vals']]) plt.plot(problem3.bcd_result['obj_vals'] - ref, label='coordinate descent') plt.plot(problem2.admm_result['obj_vals'] - ref, label='SD ADMM') plt.title('suboptimality as compared to CVXPY') plt.yscale('log') plt.legend() plt.show() plt.figure() plt.plot(problem3.bcd_result['optimality_residual'], label='coordinate descent') plt.plot(problem2.admm_result['optimality_residual'], label='SD ADMM') plt.yscale('log') plt.title('internal optimality residual') plt.legend() plt.show() plt.scatter(problem3.bcd_result['optimality_residual'], problem3.bcd_result['obj_vals'], label='sd-bcd', marker='.') plt.scatter(problem2.admm_result['optimality_residual'], problem2.admm_result['obj_vals'], label='sd-admm', marker='.') plt.xscale('log') plt.yscale('log') # plt.xlim(plt.ylim()) plt.xlabel('optimality residual') plt.ylabel('subpotimality as compared to cvxpy') # plt.gca().set_aspect('equal') plt.legend() plt.title('Comparison of algorithm optimality residual\nto actual difference between objective value and CVXPY value'); problem3.plot_decomposition(X_real=X_real); ``` ### Hybrid ``` problem2.objective_value 0.02509511305810197 - 0.02509508363937881 ```
github_jupyter
%load_ext autoreload %autoreload 2 import numpy as np import matplotlib.pyplot as plt import pandas as pd from scipy import signal from time import time import seaborn as sns import cvxpy as cvx sns.set_style('darkgrid') import sys sys.path.append('..') from osd import Problem from osd.components import MeanSquareSmall, SmoothSecondDifference, SparseFirstDiffConvex, Boolean from osd.signal_decomp_bcd import run_bcd from osd.utilities import progress from osd.signal_decomp_admm import calc_obj TOL = 5e-6 np.random.seed(42) t = np.linspace(0, 1000, 200) signal1 = np.sin(2 * np.pi * t * 1 / (500.)) signal2 = signal.square(2 * np.pi * t * 1 / (450.)) X_real = np.zeros((3, len(t)), dtype=float) X_real[0] = 0.15 * np.random.randn(len(signal1)) X_real[1] = signal1 X_real[2] = signal2 y = np.sum(X_real, axis=0) K, T = X_real.shape plt.figure(figsize=(10, 6)) plt.plot(t, np.sum(X_real[1:], axis=0), label='true signal minus noise') plt.plot(t, y, alpha=0.5, label='observed signal') plt.legend() plt.show() c1 = MeanSquareSmall(size=T) c2 = SmoothSecondDifference(weight=1e3/T) c3 = Boolean(scale=2, shift=-1) components = [c1, c2, c3] problem1 = Problem(y, components) problem1.decompose(how='cvx') opt_obj_val = problem1.objective_value opt_obj_val problem1.plot_decomposition(X_real=X_real); problem2 = Problem(y, components) problem2.decompose(how='admm', stopping_tolerance=1e-3) problem2.objective_value problem2.decompose(X_init=problem2.estimates, how='bcd', stopping_tolerance=1e-10) problem2.objective_value problem2.decompose(X_init=problem2.estimates, how='bcd', stopping_tolerance=1e-10) problem2.objective_value plt.figure() plt.plot(np.r_[problem2.admm_result['obj_vals'], problem2.bcd_result['obj_vals']] - problem2.objective_value + 1e-12) # plt.axvline(problem2.admm_result['it'], color='red', ls='--') plt.title('objective value') plt.yscale('log') plt.show() plt.figure() plt.plot(np.r_[problem2.admm_result['optimality_residual'], problem2.bcd_result['optimality_residual']], label='residual') plt.axvline(problem2.admm_result['it'], color='red', ls='--') plt.yscale('log') plt.legend() plt.title('internal optimality residual') plt.show() problem2.plot_decomposition(X_real=X_real); problem3 = Problem(y, components) problem3.decompose(how='bcd', stopping_tolerance=TOL) problem3.objective_value len(problem3.bcd_result['obj_vals']) plt.figure() ref = np.min(np.r_[problem3.bcd_result['obj_vals'], problem2.admm_result['obj_vals']]) plt.plot(problem3.bcd_result['obj_vals'] - ref, label='coordinate descent') plt.plot(problem2.admm_result['obj_vals'] - ref, label='SD ADMM') plt.title('suboptimality as compared to CVXPY') plt.yscale('log') plt.legend() plt.show() plt.figure() plt.plot(problem3.bcd_result['optimality_residual'], label='coordinate descent') plt.plot(problem2.admm_result['optimality_residual'], label='SD ADMM') plt.yscale('log') plt.title('internal optimality residual') plt.legend() plt.show() plt.scatter(problem3.bcd_result['optimality_residual'], problem3.bcd_result['obj_vals'], label='sd-bcd', marker='.') plt.scatter(problem2.admm_result['optimality_residual'], problem2.admm_result['obj_vals'], label='sd-admm', marker='.') plt.xscale('log') plt.yscale('log') # plt.xlim(plt.ylim()) plt.xlabel('optimality residual') plt.ylabel('subpotimality as compared to cvxpy') # plt.gca().set_aspect('equal') plt.legend() plt.title('Comparison of algorithm optimality residual\nto actual difference between objective value and CVXPY value'); problem3.plot_decomposition(X_real=X_real); problem2.objective_value 0.02509511305810197 - 0.02509508363937881
0.419053
0.862757
This is a "Neural Network" toy example which implements the basic logical gates. Here we don't use any method to train the NN model. We just guess correct weight. It is meant to show how in principle NN works. ``` import math def sigmoid(x): return 1./(1+ math.exp(-x)) def neuron(inputs, weights): return sigmoid(sum([x*y for x,y in zip(inputs,weights)])) def almost_equal(x,y,epsilon=0.001): return abs(x-y) < epsilon ``` ### We "implement" NN that computes OR operation: | x1| x2| OR| |---|---|---| 0 | 0 | 0 0 | 1 | 1 1 | 0 | 1 1 | 1 | 1 ### Input: * x0 = 1 (bias term) * x1,x2 in [0,1] ### Weights: We "guess" e.g. w0 = -5, w1= 10 and w2= 10 weights. ``` def NN_OR(x1,x2): weights =[-10, 20, 20] inputs = [1, x1, x2] return neuron(weights,inputs) print(NN_OR(1,0)) print(NN_OR(0,0)) assert almost_equal(NN_OR(0,0),0) assert almost_equal(NN_OR(0,1),1) assert almost_equal(NN_OR(1,0),1) assert almost_equal(NN_OR(1,1),1) ``` ### Analogically we "implement" NN that computes AND operation: | x1| x2| AND| |---|---|---| 0 | 0 | 0 0 | 1 | 0 1 | 0 | 0 1 | 1 | 1 ### Input: * x0 = 1 (bias term) * x1,x2 in [0,1] ### Weights: We "guess" e.g. w0 = -30, w1= 20 and w2 = 20 weights. ``` def NN_AND(x1,x2): weights =[-30, 20, 20] inputs = [1, x1, x2] return neuron(weights,inputs) print(NN_AND(1,0)) print(NN_AND(1,1)) assert almost_equal(NN_AND(0,0),0) assert almost_equal(NN_AND(0,1),0) assert almost_equal(NN_AND(1,0),0) assert almost_equal(NN_AND(1,1),1) ``` ### Analogically we "implement" NN that computes NOT operation: | x | NOT| |---|--- | | 0 | 1 | 1 | 0 ### Input: * x0 = 1 (bias term) * x in [0,1] ### Weights: We "guess w0=20 and w1 =-30 ``` def NN_NOT(x): weights =[20, -30] inputs = [1, x] return neuron(weights,inputs) print(NN_NOT(1)) print(NN_NOT(0)) assert almost_equal(NN_NOT(1),0) assert almost_equal(NN_NOT(0),1) ``` ### XOR operation | x1| x2| XOR| |---|---|---| 0 | 0 | 0 0 | 1 | 1 1 | 0 | 1 1 | 1 | 0 It's known that we cannot express XOR with one layer. XOR is equivalent to (x1 OR x2) AND NOT(x1 AND x2) ### Input: * x0 = 1 (bias term) * x1,x2 in [0,1] We will use combination of already existing GATES ``` def NN_XOR(x1,x2): first = NN_OR(x1,x2) second = NN_AND(x1,x2) return NN_AND(first, NN_NOT(second)) print(NN_XOR(1,0)) print(NN_XOR(0,0)) print(NN_XOR(1,1)) assert almost_equal(NN_XOR(0,0),0) assert almost_equal(NN_XOR(0,1),1) assert almost_equal(NN_XOR(1,0),1) assert almost_equal(NN_XOR(1,1),0) ```
github_jupyter
import math def sigmoid(x): return 1./(1+ math.exp(-x)) def neuron(inputs, weights): return sigmoid(sum([x*y for x,y in zip(inputs,weights)])) def almost_equal(x,y,epsilon=0.001): return abs(x-y) < epsilon def NN_OR(x1,x2): weights =[-10, 20, 20] inputs = [1, x1, x2] return neuron(weights,inputs) print(NN_OR(1,0)) print(NN_OR(0,0)) assert almost_equal(NN_OR(0,0),0) assert almost_equal(NN_OR(0,1),1) assert almost_equal(NN_OR(1,0),1) assert almost_equal(NN_OR(1,1),1) def NN_AND(x1,x2): weights =[-30, 20, 20] inputs = [1, x1, x2] return neuron(weights,inputs) print(NN_AND(1,0)) print(NN_AND(1,1)) assert almost_equal(NN_AND(0,0),0) assert almost_equal(NN_AND(0,1),0) assert almost_equal(NN_AND(1,0),0) assert almost_equal(NN_AND(1,1),1) def NN_NOT(x): weights =[20, -30] inputs = [1, x] return neuron(weights,inputs) print(NN_NOT(1)) print(NN_NOT(0)) assert almost_equal(NN_NOT(1),0) assert almost_equal(NN_NOT(0),1) def NN_XOR(x1,x2): first = NN_OR(x1,x2) second = NN_AND(x1,x2) return NN_AND(first, NN_NOT(second)) print(NN_XOR(1,0)) print(NN_XOR(0,0)) print(NN_XOR(1,1)) assert almost_equal(NN_XOR(0,0),0) assert almost_equal(NN_XOR(0,1),1) assert almost_equal(NN_XOR(1,0),1) assert almost_equal(NN_XOR(1,1),0)
0.609292
0.978073
``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from google.colab import drive drive.mount('/content/drive') df = pd.read_csv('/content/drive/My Drive/dataset/creditcard.csv', encoding='utf8') df.head() df.isnull().sum() ``` no missing values in dataframe ``` df.describe() import seaborn as sns plt.figure(figsize=(15,10)) sns.heatmap(df.corr(), annot = True, fmt = '.1f') corrmat = df.corr() importantfeatures = df.columns[np.abs(corrmat["Class"]) > 0.1] print(importantfeatures) # taking rows with amount less than 2126 because greater amount are just outliers somehow tries to reduce rows dfffff = df[df.Amount < 2126] fraud = dfffff[dfffff['Class'] == 1].reset_index() non_fraud = dfffff[dfffff['Class'] == 0].reset_index() fraud = fraud.drop(columns= 'index') non_fraud = non_fraud.drop(columns= 'index') g = sns.countplot(df['Class']) g.set_xticklabels(['Not Fraud','Fraud']) plt.show() non_fraud['Amount'].describe() fraud['Amount'].describe() ``` average transaction amount by fraud is 122 and frequency is 492 and by nonfraud is 87.32 with frequency 284026 ie from 577 transactions 1 is fraud so we can conclude that dataset is imbalanced and transaction of maximum amount of 25691 is so big it can dominate other features so we need to eliminate very high amount by quantile method ``` dfffff.Amount.describe() plt.figure(figsize=(16,8)) plt.subplot(1, 2, 1) plt.title('Histogram of Time for non-fraudulent samples, mean = %f' % (non_fraud["Amount"].mean())) sns.distplot(non_fraud["Amount"]) plt.subplot(1, 2, 2) plt.title('Histogram of Time for fraudulent samples, mean = %f' % (fraud["Amount"].mean())) sns.histplot(fraud["Amount"]) ``` disribution is almost similar on basis of transaction amount ``` corrmat = dfffff.corr() importantfeatures = dfffff.columns[np.abs(corrmat["Class"]) > 0.1] print(importantfeatures) import seaborn as sns n = 1 plt.figure(figsize=(20,15)) importantfeatures = dfffff.drop(['Class','Time'],axis= 1) for column in list(importantfeatures): plt.subplot(8,4,n) n = n+1 sns.boxplot(dfffff[column]) plt.tight_layout() import statsmodels.api as sm dfffff = dfffff.drop(['Time'], axis = 1) X = dfffff.drop(['Class'], axis = 1) y = dfffff['Class'] X_model = sm.add_constant(X) model= sm.OLS(y, X_model).fit() model.summary() #checking p values found v20 and v23 having pvalues greater than 0.05 so we will remve these columns non_fraud = non_fraud.sample(492) # equalising both classes rows newdata = pd.concat([fraud,non_fraud],axis = 0) newdata = newdata.drop(['Time'], axis = 1) X = newdata.drop(['Class'], axis = 1) y = newdata['Class'] X_model = sm.add_constant(X) model= sm.OLS(y, X_model).fit() model.summary() # pvalues greater than 0.05 are removed in the last ie v12 ``` taking threshhold of 0.05 removing p values greater than 0.05 ie v20 and v23 column ``` newdata plt.figure(figsize=(15,10)) sns.heatmap(newdata.corr(), annot = True, fmt = '.1f') # on equalising dataset we can find correlated pairs '''from sklearn.preprocessing import StandardScaler scaler = StandardScaler() dfx = scaler.fit_transform(dfffff.iloc[:,:-1]) dfx = pd.DataFrame(dfx, columns = list(dfffff)[:-1]) dfxx = pd.concat([dfx,dfffff['Class']],axis = 1).dropna().reset_index(drop = True)''' impolist = [] fraudd = dfffff[dfffff['Class']==1] for i in range(0,576): non_ffraud = dfffff[dfffff['Class']==0].iloc[492*i:492*(i+1)] newdataa = pd.concat([fraudd,non_ffraud],axis = 0) corrmat = newdataa.corr() importantfeatures = newdataa.columns[np.abs(corrmat["Class"]) > 0.7] impolist.append(importantfeatures) print(importantfeatures) print(impolist) #check columns having greater correlation with target by dividing normal class datasets into subsets #having equal rows to other class then joining every subset of normal class one by one with fraud and getting correlated values #now we are getting 'V4','V9' ,'V11', 'V12', 'V14' in almost iteration so we will take these columns dfx[0] list(dfffff)[:-1] X = newdata[['V4','V9' ,'V11', 'V12', 'V14']] y = newdata['Class'] X_model = sm.add_constant(X) model= sm.OLS(y, X_model).fit() model.summary() ``` we can conclude that V4,V11,V12,V14 are most collinear with our class ``` X = newdata[['V4' , 'V14']] y = newdata['Class'] X_model = sm.add_constant(X) model= sm.OLS(y, X_model).fit() model.summary() #removed v12 and v9 and v11 becuase v12 is correlated with v14 and in first ols report we are getting v12 as high p value and v9 and v11 are removed on making correlation threshold to 0.8 with target #their is increase in r squared and adj r squared on removing theses y ``` DROP COLUMNS having p values greater than 0.05 * List item * List item ``` from sklearn import metrics from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from xgboost import XGBClassifier from sklearn.metrics import confusion_matrix , classification_report # again here creating datasets by combining normal subsets and fraud dataset and averaging the scores to get results classifiers = { "GaussianNB": GaussianNB(), "KNearest": KNeighborsClassifier(), "Support Vector Classifier": SVC(), "RandomForestClassifier": RandomForestClassifier(), "XGBClassifier":XGBClassifier() } from sklearn.model_selection import train_test_split from statistics import mean score = [] from sklearn.model_selection import cross_val_score fraudd = dfffff[dfffff['Class']==1] for key, classifier in classifiers.items(): accuracyscore = [] recall = [] precision = [] fscore = [] for i in range(0,575): non_ffraud = dfffff[dfffff['Class']==0].iloc[492*i:492*(i+1)] #non_ffraudtest = dfffff[dfffff['Class']==0].iloc[492*(i+1):492*(i+2)] newdataatrain = pd.concat([fraudd,non_ffraud],axis = 0).dropna() X_train,X_test,y_train,y_test = train_test_split(newdataatrain[['V4' , 'V14']],newdataatrain['Class'],stratify = newdataatrain['Class'], test_size=0.2) X = newdataatrain[['V4' , 'V14']] y = newdataatrain['Class'] #X_test = fraudd[['V4' , 'V14']] #y_test = fraudd['Class'] classifier.fit(X_train, y_train) y_preds = classifier.predict(X_test) training_score = cross_val_score(classifier, X, y, cv=10) accuracyscore.append(round(training_score.mean(), 2) * 100) recall.append(metrics.recall_score(y_test,y_preds)) precision.append(metrics.precision_score(y_test,y_preds)) fscore.append(metrics.f1_score(y_test,y_preds)) print("accuracy",mean(accuracyscore),"recal",mean(recall),"precsion",mean(precision),"fscore",mean(fscore),classifier.__class__.__name__) # ransom forest works well we will use this model accuracyscore = [] recall = [] precision = [] fscore = [] classifier = RandomForestClassifier() for i in range(0,575): non_ffraud = dfffff[dfffff['Class']==0].iloc[492*i:492*(i+1)] #non_ffraudtest = dfffff[dfffff['Class']==0].iloc[492*(i+1):492*(i+2)] newdataatrain = pd.concat([fraudd,non_ffraud],axis = 0).dropna() X_train,X_test,y_train,y_test = train_test_split(newdataatrain[['V4' , 'V14']],newdataatrain['Class'],stratify = newdataatrain['Class'], test_size=0.2) X = newdataatrain[['V4' , 'V14']] y = newdataatrain['Class'] #X_test = fraudd[['V4' , 'V14']] #y_test = fraudd['Class'] classifier.fit(X_train, y_train) y_preds = classifier.predict(X_test) training_score = cross_val_score(classifier, X, y, cv=10) accuracyscore.append(round(training_score.mean(), 2) * 100) recall.append(metrics.recall_score(y_test,y_preds)) precision.append(metrics.precision_score(y_test,y_preds)) fscore.append(metrics.f1_score(y_test,y_preds)) import numpy as np for j in np.arange(1,0.5,-0.0000001): accuracyy = [i for i, x in enumerate(accuracyscore) if x > j] recally = [i for i, x in enumerate(recall) if x > j] precisiony = [i for i, x in enumerate(precision) if x > j] fscorey = [i for i, x in enumerate(fscore) if x > j] c = list(set(accuracyy) & set(recally) & set(precisiony) & set(fscorey)) if len(c) > 0: break print(c,j) # we will train on dataset of i = 316 because this is common index of subset of normal which giving maximum score of accuracy recally precision fscore non_ffraud = dfffff[dfffff['Class']==0].iloc[492*316:492*(316+1)] datatotrain = pd.concat([fraudd,non_ffraud],axis = 0).dropna() X = datatotrain[['V4' , 'V14']] y = datatotrain['Class'] X y clf = RandomForestClassifier() #training on dataset created by combining normal class subset and fraud class dataset clf.fit(X, y) import joblib joblib.dump(clf, '/content/drive/My Drive/dataset/assesment.pkl') plt.figure(figsize=(15,10)) sns.heatmap(newdata[['V4' , 'V12', 'V14']].corr(), annot = True, fmt = '.1f') ```
github_jupyter
import pandas as pd import numpy as np import matplotlib.pyplot as plt from google.colab import drive drive.mount('/content/drive') df = pd.read_csv('/content/drive/My Drive/dataset/creditcard.csv', encoding='utf8') df.head() df.isnull().sum() df.describe() import seaborn as sns plt.figure(figsize=(15,10)) sns.heatmap(df.corr(), annot = True, fmt = '.1f') corrmat = df.corr() importantfeatures = df.columns[np.abs(corrmat["Class"]) > 0.1] print(importantfeatures) # taking rows with amount less than 2126 because greater amount are just outliers somehow tries to reduce rows dfffff = df[df.Amount < 2126] fraud = dfffff[dfffff['Class'] == 1].reset_index() non_fraud = dfffff[dfffff['Class'] == 0].reset_index() fraud = fraud.drop(columns= 'index') non_fraud = non_fraud.drop(columns= 'index') g = sns.countplot(df['Class']) g.set_xticklabels(['Not Fraud','Fraud']) plt.show() non_fraud['Amount'].describe() fraud['Amount'].describe() dfffff.Amount.describe() plt.figure(figsize=(16,8)) plt.subplot(1, 2, 1) plt.title('Histogram of Time for non-fraudulent samples, mean = %f' % (non_fraud["Amount"].mean())) sns.distplot(non_fraud["Amount"]) plt.subplot(1, 2, 2) plt.title('Histogram of Time for fraudulent samples, mean = %f' % (fraud["Amount"].mean())) sns.histplot(fraud["Amount"]) corrmat = dfffff.corr() importantfeatures = dfffff.columns[np.abs(corrmat["Class"]) > 0.1] print(importantfeatures) import seaborn as sns n = 1 plt.figure(figsize=(20,15)) importantfeatures = dfffff.drop(['Class','Time'],axis= 1) for column in list(importantfeatures): plt.subplot(8,4,n) n = n+1 sns.boxplot(dfffff[column]) plt.tight_layout() import statsmodels.api as sm dfffff = dfffff.drop(['Time'], axis = 1) X = dfffff.drop(['Class'], axis = 1) y = dfffff['Class'] X_model = sm.add_constant(X) model= sm.OLS(y, X_model).fit() model.summary() #checking p values found v20 and v23 having pvalues greater than 0.05 so we will remve these columns non_fraud = non_fraud.sample(492) # equalising both classes rows newdata = pd.concat([fraud,non_fraud],axis = 0) newdata = newdata.drop(['Time'], axis = 1) X = newdata.drop(['Class'], axis = 1) y = newdata['Class'] X_model = sm.add_constant(X) model= sm.OLS(y, X_model).fit() model.summary() # pvalues greater than 0.05 are removed in the last ie v12 newdata plt.figure(figsize=(15,10)) sns.heatmap(newdata.corr(), annot = True, fmt = '.1f') # on equalising dataset we can find correlated pairs '''from sklearn.preprocessing import StandardScaler scaler = StandardScaler() dfx = scaler.fit_transform(dfffff.iloc[:,:-1]) dfx = pd.DataFrame(dfx, columns = list(dfffff)[:-1]) dfxx = pd.concat([dfx,dfffff['Class']],axis = 1).dropna().reset_index(drop = True)''' impolist = [] fraudd = dfffff[dfffff['Class']==1] for i in range(0,576): non_ffraud = dfffff[dfffff['Class']==0].iloc[492*i:492*(i+1)] newdataa = pd.concat([fraudd,non_ffraud],axis = 0) corrmat = newdataa.corr() importantfeatures = newdataa.columns[np.abs(corrmat["Class"]) > 0.7] impolist.append(importantfeatures) print(importantfeatures) print(impolist) #check columns having greater correlation with target by dividing normal class datasets into subsets #having equal rows to other class then joining every subset of normal class one by one with fraud and getting correlated values #now we are getting 'V4','V9' ,'V11', 'V12', 'V14' in almost iteration so we will take these columns dfx[0] list(dfffff)[:-1] X = newdata[['V4','V9' ,'V11', 'V12', 'V14']] y = newdata['Class'] X_model = sm.add_constant(X) model= sm.OLS(y, X_model).fit() model.summary() X = newdata[['V4' , 'V14']] y = newdata['Class'] X_model = sm.add_constant(X) model= sm.OLS(y, X_model).fit() model.summary() #removed v12 and v9 and v11 becuase v12 is correlated with v14 and in first ols report we are getting v12 as high p value and v9 and v11 are removed on making correlation threshold to 0.8 with target #their is increase in r squared and adj r squared on removing theses y from sklearn import metrics from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from xgboost import XGBClassifier from sklearn.metrics import confusion_matrix , classification_report # again here creating datasets by combining normal subsets and fraud dataset and averaging the scores to get results classifiers = { "GaussianNB": GaussianNB(), "KNearest": KNeighborsClassifier(), "Support Vector Classifier": SVC(), "RandomForestClassifier": RandomForestClassifier(), "XGBClassifier":XGBClassifier() } from sklearn.model_selection import train_test_split from statistics import mean score = [] from sklearn.model_selection import cross_val_score fraudd = dfffff[dfffff['Class']==1] for key, classifier in classifiers.items(): accuracyscore = [] recall = [] precision = [] fscore = [] for i in range(0,575): non_ffraud = dfffff[dfffff['Class']==0].iloc[492*i:492*(i+1)] #non_ffraudtest = dfffff[dfffff['Class']==0].iloc[492*(i+1):492*(i+2)] newdataatrain = pd.concat([fraudd,non_ffraud],axis = 0).dropna() X_train,X_test,y_train,y_test = train_test_split(newdataatrain[['V4' , 'V14']],newdataatrain['Class'],stratify = newdataatrain['Class'], test_size=0.2) X = newdataatrain[['V4' , 'V14']] y = newdataatrain['Class'] #X_test = fraudd[['V4' , 'V14']] #y_test = fraudd['Class'] classifier.fit(X_train, y_train) y_preds = classifier.predict(X_test) training_score = cross_val_score(classifier, X, y, cv=10) accuracyscore.append(round(training_score.mean(), 2) * 100) recall.append(metrics.recall_score(y_test,y_preds)) precision.append(metrics.precision_score(y_test,y_preds)) fscore.append(metrics.f1_score(y_test,y_preds)) print("accuracy",mean(accuracyscore),"recal",mean(recall),"precsion",mean(precision),"fscore",mean(fscore),classifier.__class__.__name__) # ransom forest works well we will use this model accuracyscore = [] recall = [] precision = [] fscore = [] classifier = RandomForestClassifier() for i in range(0,575): non_ffraud = dfffff[dfffff['Class']==0].iloc[492*i:492*(i+1)] #non_ffraudtest = dfffff[dfffff['Class']==0].iloc[492*(i+1):492*(i+2)] newdataatrain = pd.concat([fraudd,non_ffraud],axis = 0).dropna() X_train,X_test,y_train,y_test = train_test_split(newdataatrain[['V4' , 'V14']],newdataatrain['Class'],stratify = newdataatrain['Class'], test_size=0.2) X = newdataatrain[['V4' , 'V14']] y = newdataatrain['Class'] #X_test = fraudd[['V4' , 'V14']] #y_test = fraudd['Class'] classifier.fit(X_train, y_train) y_preds = classifier.predict(X_test) training_score = cross_val_score(classifier, X, y, cv=10) accuracyscore.append(round(training_score.mean(), 2) * 100) recall.append(metrics.recall_score(y_test,y_preds)) precision.append(metrics.precision_score(y_test,y_preds)) fscore.append(metrics.f1_score(y_test,y_preds)) import numpy as np for j in np.arange(1,0.5,-0.0000001): accuracyy = [i for i, x in enumerate(accuracyscore) if x > j] recally = [i for i, x in enumerate(recall) if x > j] precisiony = [i for i, x in enumerate(precision) if x > j] fscorey = [i for i, x in enumerate(fscore) if x > j] c = list(set(accuracyy) & set(recally) & set(precisiony) & set(fscorey)) if len(c) > 0: break print(c,j) # we will train on dataset of i = 316 because this is common index of subset of normal which giving maximum score of accuracy recally precision fscore non_ffraud = dfffff[dfffff['Class']==0].iloc[492*316:492*(316+1)] datatotrain = pd.concat([fraudd,non_ffraud],axis = 0).dropna() X = datatotrain[['V4' , 'V14']] y = datatotrain['Class'] X y clf = RandomForestClassifier() #training on dataset created by combining normal class subset and fraud class dataset clf.fit(X, y) import joblib joblib.dump(clf, '/content/drive/My Drive/dataset/assesment.pkl') plt.figure(figsize=(15,10)) sns.heatmap(newdata[['V4' , 'V12', 'V14']].corr(), annot = True, fmt = '.1f')
0.344003
0.739305
``` import pandas as pd import matplotlib import matplotlib.pyplot as plt %matplotlib inline dataset = pd.read_csv('../data/dataset_total_smells.csv', delimiter=',', index_col=0) print(dataset.shape) dataset.head() Numbers = {'TooManyMethods': [dataset['TooManyMethods'].sum()], 'ExcessiveMethodLength': [dataset['ExcessiveMethodLength'].sum()], 'DataClass': [dataset['DataClass'].sum()], 'ExcessiveClassLength': [dataset['ExcessiveClassLength'].sum()], 'ExcessiveParameterList': [dataset['ExcessiveParameterList'].sum()] } dataframe_numbers = pd.DataFrame(Numbers, index=None) dataframe_numbers.head() plt.figure(figsize=(25,10)) dataframe_numbers.plot(kind='bar',title='CodeSmells') plt.show() print("Repositories: ", dataset["name"].count()) columns = ['TooManyMethods', 'ExcessiveMethodLength', 'DataClass', 'ExcessiveClassLength', 'ExcessiveParameterList'] total_code_smells = sum([dataset[l].sum() for l in columns]) print("Total of Code Smells: ", total_code_smells) print("Average Code Smells per Repository: ", total_code_smells/dataset["name"].count()) def bin_occ(df, label): count = 0 for index, row in df.iterrows(): bin_occ = False if row[label] > 0: ok = True for label_others in columns: if row[label_others] > 0: bin_occ = False break if bin_occ: count += 1 return count Binary_occurrencies = {'OnlyTooManyMethods': [bin_occ(dataset, 'TooManyMethods')], 'OnlyExcessiveMethodLength': [bin_occ(dataset, 'ExcessiveMethodLength')], 'OnlyDataClass': [bin_occ(dataset, 'DataClass')], 'OnlyExcessiveClassLength': [bin_occ(dataset, 'ExcessiveClassLength')], 'OnlyExcessiveParameterList': [bin_occ(dataset, 'ExcessiveParameterList')] } dataframe_bin_occ = pd.DataFrame(Binary_occurrencies, index=None) dataframe_bin_occ.head() plt.figure(figsize=(25,10)) dataframe_bin_occ.plot(kind='bar',title='CodeSmells Binary Occurrencies of Smells') plt.show() def inv_bin_occ(df, label): count = 0 for index, row in df.iterrows(): if row[label] == 0: count += 1 return count Inverse_Binary_occurrencies = {'NotOnlyTooManyMethods': [inv_bin_occ(dataset, 'TooManyMethods')], 'NotOnlyExcessiveMethodLength': [inv_bin_occ(dataset, 'ExcessiveMethodLength')], 'NotOnlyDataClass': [inv_bin_occ(dataset, 'DataClass')], 'NotOnlyExcessiveClassLength': [inv_bin_occ(dataset, 'ExcessiveClassLength')], 'NotOnlyExcessiveParameterList': [inv_bin_occ(dataset, 'ExcessiveParameterList')] } dataframe_bin_occ = pd.DataFrame(Inverse_Binary_occurrencies, index=None) dataframe_bin_occ.head() plt.figure(figsize=(25,10)) dataframe_bin_occ.plot(kind='bar',title='CodeSmells Binary Occurrencies of Smells') plt.show() ``` # Stratified dataset splitting ## Balance train and test set by: - classes frequency - commits - lines of code (retrieve this info) ``` def prepare_dataset_bin(class_): for for class_ in columns: print("Preparing class", class_) ``` # visualizing data ``` plt.figure(figsize=(25,10)) dataset['TooManyMethods'].value_counts().plot(kind='bar',title='TooManyMethods') plt.show() dataset['ExcessiveClassLength'].value_counts().plot(kind='bar',title='ExcessiveClassLength') plt.show() ``` # pre-processing ### calling AST transforming
github_jupyter
import pandas as pd import matplotlib import matplotlib.pyplot as plt %matplotlib inline dataset = pd.read_csv('../data/dataset_total_smells.csv', delimiter=',', index_col=0) print(dataset.shape) dataset.head() Numbers = {'TooManyMethods': [dataset['TooManyMethods'].sum()], 'ExcessiveMethodLength': [dataset['ExcessiveMethodLength'].sum()], 'DataClass': [dataset['DataClass'].sum()], 'ExcessiveClassLength': [dataset['ExcessiveClassLength'].sum()], 'ExcessiveParameterList': [dataset['ExcessiveParameterList'].sum()] } dataframe_numbers = pd.DataFrame(Numbers, index=None) dataframe_numbers.head() plt.figure(figsize=(25,10)) dataframe_numbers.plot(kind='bar',title='CodeSmells') plt.show() print("Repositories: ", dataset["name"].count()) columns = ['TooManyMethods', 'ExcessiveMethodLength', 'DataClass', 'ExcessiveClassLength', 'ExcessiveParameterList'] total_code_smells = sum([dataset[l].sum() for l in columns]) print("Total of Code Smells: ", total_code_smells) print("Average Code Smells per Repository: ", total_code_smells/dataset["name"].count()) def bin_occ(df, label): count = 0 for index, row in df.iterrows(): bin_occ = False if row[label] > 0: ok = True for label_others in columns: if row[label_others] > 0: bin_occ = False break if bin_occ: count += 1 return count Binary_occurrencies = {'OnlyTooManyMethods': [bin_occ(dataset, 'TooManyMethods')], 'OnlyExcessiveMethodLength': [bin_occ(dataset, 'ExcessiveMethodLength')], 'OnlyDataClass': [bin_occ(dataset, 'DataClass')], 'OnlyExcessiveClassLength': [bin_occ(dataset, 'ExcessiveClassLength')], 'OnlyExcessiveParameterList': [bin_occ(dataset, 'ExcessiveParameterList')] } dataframe_bin_occ = pd.DataFrame(Binary_occurrencies, index=None) dataframe_bin_occ.head() plt.figure(figsize=(25,10)) dataframe_bin_occ.plot(kind='bar',title='CodeSmells Binary Occurrencies of Smells') plt.show() def inv_bin_occ(df, label): count = 0 for index, row in df.iterrows(): if row[label] == 0: count += 1 return count Inverse_Binary_occurrencies = {'NotOnlyTooManyMethods': [inv_bin_occ(dataset, 'TooManyMethods')], 'NotOnlyExcessiveMethodLength': [inv_bin_occ(dataset, 'ExcessiveMethodLength')], 'NotOnlyDataClass': [inv_bin_occ(dataset, 'DataClass')], 'NotOnlyExcessiveClassLength': [inv_bin_occ(dataset, 'ExcessiveClassLength')], 'NotOnlyExcessiveParameterList': [inv_bin_occ(dataset, 'ExcessiveParameterList')] } dataframe_bin_occ = pd.DataFrame(Inverse_Binary_occurrencies, index=None) dataframe_bin_occ.head() plt.figure(figsize=(25,10)) dataframe_bin_occ.plot(kind='bar',title='CodeSmells Binary Occurrencies of Smells') plt.show() def prepare_dataset_bin(class_): for for class_ in columns: print("Preparing class", class_) plt.figure(figsize=(25,10)) dataset['TooManyMethods'].value_counts().plot(kind='bar',title='TooManyMethods') plt.show() dataset['ExcessiveClassLength'].value_counts().plot(kind='bar',title='ExcessiveClassLength') plt.show()
0.36659
0.550064
<a href="https://colab.research.google.com/github/Omaromar2255/4433/blob/main/Colab%20RDP/Colab%2033.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # **Colab RDP** : Remote Desktop to Colab Instance > **Warning : Not for Cryptocurrency Mining<br></br>** >**Why are hardware resources such as T4 GPUs not available to me?** The best available hardware is prioritized for users who use Colaboratory interactively rather than for long-running computations. Users who use Colaboratory for long-running computations may be temporarily restricted in the type of hardware made available to them, and/or the duration that the hardware can be used for. We encourage users with high computational needs to use Colaboratory’s UI with a local runtime. Please note that using Colaboratory for cryptocurrency mining is disallowed entirely, and may result in being banned from using Colab altogether. Google Colab can give you Instance with 12GB of RAM and GPU for 12 hours (Max.) for Free users. Anyone can use it to perform Heavy Tasks. To use other similiar Notebooks use my Repository **[Colab Hacks](https://github.com/PradyumnaKrishna/Colab-Hacks)** ``` #@title **Create User** #@markdown Enter Username and Password import os username = "user" #@param {type:"string"} password = "root" #@param {type:"string"} print("Creating User and Setting it up") # Creation of user os.system(f"useradd -m {username}") # Add user to sudo group os.system(f"adduser {username} sudo") # Set password of user to 'root' os.system(f"echo '{username}:{password}' | sudo chpasswd") # Change default shell from sh to bash os.system("sed -i 's/\/bin\/sh/\/bin\/bash/g' /etc/passwd") print(f"User created and configured having username `{username}` and password `{password}`") #@title **RDP** #@markdown It takes 4-5 minutes for installation import os import subprocess #@markdown Visit http://remotedesktop.google.com/headless and copy the command after Authentication CRP = "" #@param {type:"string"} #@markdown Enter a Pin (more or equal to 6 digits) Pin = 123456 #@param {type: "integer"} #@markdown Autostart Notebook in RDP Autostart = False #@param {type: "boolean"} class CRD: def __init__(self, user): os.system("apt update") self.installCRD() self.installDesktopEnvironment() self.installGoogleChorme() self.finish(user) print("\nRDP created succesfully move to https://remotedesktop.google.com/access") @staticmethod def installCRD(): print("Installing Chrome Remote Desktop") subprocess.run(['wget', 'https://dl.google.com/linux/direct/chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE) subprocess.run(['dpkg', '--install', 'chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE) subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE) @staticmethod def installDesktopEnvironment(): print("Installing Desktop Environment") os.system("export DEBIAN_FRONTEND=noninteractive") os.system("apt install --assume-yes xfce4 desktop-base xfce4-terminal") os.system("bash -c 'echo \"exec /etc/X11/Xsession /usr/bin/xfce4-session\" > /etc/chrome-remote-desktop-session'") os.system("apt remove --assume-yes gnome-terminal") os.system("apt install --assume-yes xscreensaver") os.system("systemctl disable lightdm.service") @staticmethod def installGoogleChorme(): print("Installing Google Chrome") subprocess.run(["wget", "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE) subprocess.run(["dpkg", "--install", "google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE) subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE) @staticmethod def finish(user): print("Finalizing") if Autostart: os.makedirs(f"/home/{user}/.config/autostart", exist_ok=True) link = "https://colab.research.google.com/github/PradyumnaKrishna/Colab-Hacks/blob/master/Colab%20RDP/Colab%20RDP.ipynb" colab_autostart = """[Desktop Entry] Type=Application Name=Colab Exec=sh -c "sensible-browser {}" Icon= Comment=Open a predefined notebook at session signin. X-GNOME-Autostart-enabled=true""".format(link) with open(f"/home/{user}/.config/autostart/colab.desktop", "w") as f: f.write(colab_autostart) os.system(f"chmod +x /home/{user}/.config/autostart/colab.desktop") os.system(f"chown {user}:{user} /home/{user}/.config") os.system(f"adduser {user} chrome-remote-desktop") command = f"{CRP} --pin={Pin}" os.system(f"su - {user} -c '{command}'") os.system("service chrome-remote-desktop start") print("Finished Succesfully") try: if CRP == "": print("Please enter authcode from the given link") elif len(str(Pin)) < 6: print("Enter a pin more or equal to 6 digits") else: CRD(username) except NameError as e: print("'username' variable not found, Create a user first") #@title **Google Drive Mount** #@markdown Google Drive can be used as Persistance HDD for files.<br> #@markdown **Choose a method (GDFuse Recommended)** mount_method = "GDFuse" #@param ["GDFuse", "Native"] #@markdown **Options for GDFuse** <br> #@markdown - Visit https://github.com/astrada/google-drive-ocamlfuse/wiki/Team-Drives label = "default" #@param {type:"string"} mount_team_drive = False #@param {type:"boolean"} force_mount = False #@param {type:"boolean"} import os import subprocess class Drive(): creds = {} mountpoint = "" deps = False debug = False def __init__(self, mountpoint="/content/drives", debug=False): os.makedirs(mountpoint, exist_ok=True) self.mountpoint = mountpoint self.debug = debug def _mount_gdfuse(self, mount_dir): os.makedirs(mount_dir, exist_ok=True) subprocess.run( ['google-drive-ocamlfuse', '-o', 'allow_other', '-label', label, mount_dir, ] ) print(f"Drive Mounted at {mount_dir}. If you get input/output error, then `team_drive_id` might be wrong or not accessible.") def _unmount_gdfuse(self, mount_dir): subprocess.run( ['fusermount', '-u', mount_dir, ] ) os.rmdir(mount_dir) def auth(self): from google.colab import auth from oauth2client.client import GoogleCredentials auth.authenticate_user() creds = GoogleCredentials.get_application_default() self.creds = { "id": creds.client_id, "secret": creds.client_secret } def gdfuse(self, label, mound_team_drive=False, force_mount=False): import getpass if not self.creds: self.auth() if not self.deps: print("Installing google-drive-ocamlfuse") subprocess.run(['apt', 'install', 'software-properties-common python-software-properties module-init-tools', '-y']) subprocess.run(['add-apt-repository', 'ppa:alessandro-strada/ppa', '-y']) subprocess.run(['apt', 'update']) subprocess.run(['apt', 'install', '--assume-yes', 'google-drive-ocamlfuse']) self.deps = True base_dir = '/root/.gdfuse' config_dir = f'{base_dir}/{label}' mount_dir = f"{self.mountpoint}/{label}" if force_mount and os.path.exists(mount_dir): self._unmount_gdfuse(mount_dir) elif os.path.exists(mount_dir): print("Drive already mounted") return if not os.path.exists(config_dir) or force_mount: print(f"Please, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id={self.creds['id']}&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force") vcode = getpass.getpass("Enter the Auth Code: ") subprocess.run( ['google-drive-ocamlfuse', '-headless', '-id', self.creds['id'], '-secret', self.creds['secret'], '-label', label, ], text=True, input=vcode ) if mount_team_drive: team_drive_id = input("Enter Team Drive ID: ") subprocess.run( ['sed', '-i', f's/team_drive_id=.*$/team_drive_id={team_drive_id}/g', f'{config_dir}/config' ] ) else: subprocess.run( ['sed', '-i', f's/team_drive_id=.*$/team_drive_id=/g', f'{config_dir}/config' ] ) self._mount_gdfuse(mount_dir) def native(self): from google.colab import drive mount_dir = f"{self.mountpoint}/Native" drive.mount(mount_dir) if 'drive' not in globals(): try: drive = Drive(f"/home/{username}/drives") except NameError: drive = Drive('/content/drives') if mount_method == "Native": drive.native() if mount_method == "GDFuse": drive.gdfuse(label, mount_team_drive, force_mount) #@title install app media ! sudo apt install vlc -y > /dev/null 2>&1 ! sudo aptitude update > /dev/null 2>&1 ! sudo apt-get install libavcodec-extra-53 > /dev/null 2>&1 ! sudo apt-get install -y xarchiver > /dev/null 2>&1 ! sudo apt-get install winff winff-doc ffmpeg libavcodec-extra > /dev/null 2>&1 ! sudo apt-get install libtxc-dxtn-s2tc0 > /dev/null 2>&1 ! sudo apt-get install mesa-utils > /dev/null 2>&1 ! sudo apt-get install xfce4 xfce4-goodies > /dev/null 2>&1 #@title ins bowr ! sudo apt install firefox > /dev/null 2>&1 ! sudo apt install software-properties-common apt-transport-https wget > /dev/null 2>&1 ! wget -q https://packages.microsoft.com/keys/microsoft.asc -O- | sudo apt-key add - > /dev/null 2>&1 ! sudo add-apt-repository "deb [arch=amd64] https://packages.microsoft.com/repos/edge stable main" > /dev/null 2>&1 ! sudo apt install microsoft-edge-dev > /dev/null 2>&1 ! wget -qO- https://deb.opera.com/archive.key | sudo apt-key add - > /dev/null 2>&1 ! sudo add-apt-repository "deb [arch=i386,amd64] https://deb.opera.com/opera-stable/ stable non-free" > /dev/null 2>&1 ! sudo apt install opera-stable #@title ins win ! apt-get update ! sudo dpkg --add-architecture i386 ! wget -nc https://dl.winehq.org/wine-builds/winehq.key; sudo apt-key add winehq.key ! sudo apt-add-repository 'deb https://dl.winehq.org/wine-builds/ubuntu/ bionic main' ! sudo add-apt-repository ppa:cybermax-dexter/sdl2-backport ! sudo apt update && sudo apt install --install-recommends winehq-stable ! sudo apt-get install winetricks ! winetricks dotnet45 #@title **SSH** ! pip install colab_ssh --upgrade &> /dev/null #@markdown Choose a method (Agro Recommended) ssh_method = "Agro" #@param ["Agro", "Ngrok"] #@markdown Copy authtoken from https://dashboard.ngrok.com/auth (only for ngrok) ngrokRegion = "us" #@param ["us", "eu", "ap", "au", "sa", "jp", "in"] def runAgro(): from colab_ssh import launch_ssh_cloudflared launch_ssh_cloudflared(password=password) def runNgrok(): from colab_ssh import launch_ssh from IPython.display import clear_output import getpass ngrokToken = getpass.getpass("Enter the ngrokToken: ") launch_ssh(ngrokToken, password, region=ngrokRegion) clear_output() print("ssh", user, end='@') ! curl -s http://localhost:4040/api/tunnels | python3 -c \ "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'][6:].replace(':', ' -p '))" try: user = username password = password except NameError: print("No user found, using username and password as 'root'") user='root' password='root' if ssh_method == "Agro": runAgro() if ssh_method == "Ngrok": runNgrok() #@title **Colab Shutdown** #@markdown To Kill NGROK Tunnel NGROK = False #@param {type:'boolean'} #@markdown To Sleep Colab Sleep = True #@param {type:'boolean'} if NGROK: ! killall ngrok if Sleep: from time import sleep sleep(43200) ```
github_jupyter
#@title **Create User** #@markdown Enter Username and Password import os username = "user" #@param {type:"string"} password = "root" #@param {type:"string"} print("Creating User and Setting it up") # Creation of user os.system(f"useradd -m {username}") # Add user to sudo group os.system(f"adduser {username} sudo") # Set password of user to 'root' os.system(f"echo '{username}:{password}' | sudo chpasswd") # Change default shell from sh to bash os.system("sed -i 's/\/bin\/sh/\/bin\/bash/g' /etc/passwd") print(f"User created and configured having username `{username}` and password `{password}`") #@title **RDP** #@markdown It takes 4-5 minutes for installation import os import subprocess #@markdown Visit http://remotedesktop.google.com/headless and copy the command after Authentication CRP = "" #@param {type:"string"} #@markdown Enter a Pin (more or equal to 6 digits) Pin = 123456 #@param {type: "integer"} #@markdown Autostart Notebook in RDP Autostart = False #@param {type: "boolean"} class CRD: def __init__(self, user): os.system("apt update") self.installCRD() self.installDesktopEnvironment() self.installGoogleChorme() self.finish(user) print("\nRDP created succesfully move to https://remotedesktop.google.com/access") @staticmethod def installCRD(): print("Installing Chrome Remote Desktop") subprocess.run(['wget', 'https://dl.google.com/linux/direct/chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE) subprocess.run(['dpkg', '--install', 'chrome-remote-desktop_current_amd64.deb'], stdout=subprocess.PIPE) subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE) @staticmethod def installDesktopEnvironment(): print("Installing Desktop Environment") os.system("export DEBIAN_FRONTEND=noninteractive") os.system("apt install --assume-yes xfce4 desktop-base xfce4-terminal") os.system("bash -c 'echo \"exec /etc/X11/Xsession /usr/bin/xfce4-session\" > /etc/chrome-remote-desktop-session'") os.system("apt remove --assume-yes gnome-terminal") os.system("apt install --assume-yes xscreensaver") os.system("systemctl disable lightdm.service") @staticmethod def installGoogleChorme(): print("Installing Google Chrome") subprocess.run(["wget", "https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE) subprocess.run(["dpkg", "--install", "google-chrome-stable_current_amd64.deb"], stdout=subprocess.PIPE) subprocess.run(['apt', 'install', '--assume-yes', '--fix-broken'], stdout=subprocess.PIPE) @staticmethod def finish(user): print("Finalizing") if Autostart: os.makedirs(f"/home/{user}/.config/autostart", exist_ok=True) link = "https://colab.research.google.com/github/PradyumnaKrishna/Colab-Hacks/blob/master/Colab%20RDP/Colab%20RDP.ipynb" colab_autostart = """[Desktop Entry] Type=Application Name=Colab Exec=sh -c "sensible-browser {}" Icon= Comment=Open a predefined notebook at session signin. X-GNOME-Autostart-enabled=true""".format(link) with open(f"/home/{user}/.config/autostart/colab.desktop", "w") as f: f.write(colab_autostart) os.system(f"chmod +x /home/{user}/.config/autostart/colab.desktop") os.system(f"chown {user}:{user} /home/{user}/.config") os.system(f"adduser {user} chrome-remote-desktop") command = f"{CRP} --pin={Pin}" os.system(f"su - {user} -c '{command}'") os.system("service chrome-remote-desktop start") print("Finished Succesfully") try: if CRP == "": print("Please enter authcode from the given link") elif len(str(Pin)) < 6: print("Enter a pin more or equal to 6 digits") else: CRD(username) except NameError as e: print("'username' variable not found, Create a user first") #@title **Google Drive Mount** #@markdown Google Drive can be used as Persistance HDD for files.<br> #@markdown **Choose a method (GDFuse Recommended)** mount_method = "GDFuse" #@param ["GDFuse", "Native"] #@markdown **Options for GDFuse** <br> #@markdown - Visit https://github.com/astrada/google-drive-ocamlfuse/wiki/Team-Drives label = "default" #@param {type:"string"} mount_team_drive = False #@param {type:"boolean"} force_mount = False #@param {type:"boolean"} import os import subprocess class Drive(): creds = {} mountpoint = "" deps = False debug = False def __init__(self, mountpoint="/content/drives", debug=False): os.makedirs(mountpoint, exist_ok=True) self.mountpoint = mountpoint self.debug = debug def _mount_gdfuse(self, mount_dir): os.makedirs(mount_dir, exist_ok=True) subprocess.run( ['google-drive-ocamlfuse', '-o', 'allow_other', '-label', label, mount_dir, ] ) print(f"Drive Mounted at {mount_dir}. If you get input/output error, then `team_drive_id` might be wrong or not accessible.") def _unmount_gdfuse(self, mount_dir): subprocess.run( ['fusermount', '-u', mount_dir, ] ) os.rmdir(mount_dir) def auth(self): from google.colab import auth from oauth2client.client import GoogleCredentials auth.authenticate_user() creds = GoogleCredentials.get_application_default() self.creds = { "id": creds.client_id, "secret": creds.client_secret } def gdfuse(self, label, mound_team_drive=False, force_mount=False): import getpass if not self.creds: self.auth() if not self.deps: print("Installing google-drive-ocamlfuse") subprocess.run(['apt', 'install', 'software-properties-common python-software-properties module-init-tools', '-y']) subprocess.run(['add-apt-repository', 'ppa:alessandro-strada/ppa', '-y']) subprocess.run(['apt', 'update']) subprocess.run(['apt', 'install', '--assume-yes', 'google-drive-ocamlfuse']) self.deps = True base_dir = '/root/.gdfuse' config_dir = f'{base_dir}/{label}' mount_dir = f"{self.mountpoint}/{label}" if force_mount and os.path.exists(mount_dir): self._unmount_gdfuse(mount_dir) elif os.path.exists(mount_dir): print("Drive already mounted") return if not os.path.exists(config_dir) or force_mount: print(f"Please, open the following URL in a web browser: https://accounts.google.com/o/oauth2/auth?client_id={self.creds['id']}&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&response_type=code&access_type=offline&approval_prompt=force") vcode = getpass.getpass("Enter the Auth Code: ") subprocess.run( ['google-drive-ocamlfuse', '-headless', '-id', self.creds['id'], '-secret', self.creds['secret'], '-label', label, ], text=True, input=vcode ) if mount_team_drive: team_drive_id = input("Enter Team Drive ID: ") subprocess.run( ['sed', '-i', f's/team_drive_id=.*$/team_drive_id={team_drive_id}/g', f'{config_dir}/config' ] ) else: subprocess.run( ['sed', '-i', f's/team_drive_id=.*$/team_drive_id=/g', f'{config_dir}/config' ] ) self._mount_gdfuse(mount_dir) def native(self): from google.colab import drive mount_dir = f"{self.mountpoint}/Native" drive.mount(mount_dir) if 'drive' not in globals(): try: drive = Drive(f"/home/{username}/drives") except NameError: drive = Drive('/content/drives') if mount_method == "Native": drive.native() if mount_method == "GDFuse": drive.gdfuse(label, mount_team_drive, force_mount) #@title install app media ! sudo apt install vlc -y > /dev/null 2>&1 ! sudo aptitude update > /dev/null 2>&1 ! sudo apt-get install libavcodec-extra-53 > /dev/null 2>&1 ! sudo apt-get install -y xarchiver > /dev/null 2>&1 ! sudo apt-get install winff winff-doc ffmpeg libavcodec-extra > /dev/null 2>&1 ! sudo apt-get install libtxc-dxtn-s2tc0 > /dev/null 2>&1 ! sudo apt-get install mesa-utils > /dev/null 2>&1 ! sudo apt-get install xfce4 xfce4-goodies > /dev/null 2>&1 #@title ins bowr ! sudo apt install firefox > /dev/null 2>&1 ! sudo apt install software-properties-common apt-transport-https wget > /dev/null 2>&1 ! wget -q https://packages.microsoft.com/keys/microsoft.asc -O- | sudo apt-key add - > /dev/null 2>&1 ! sudo add-apt-repository "deb [arch=amd64] https://packages.microsoft.com/repos/edge stable main" > /dev/null 2>&1 ! sudo apt install microsoft-edge-dev > /dev/null 2>&1 ! wget -qO- https://deb.opera.com/archive.key | sudo apt-key add - > /dev/null 2>&1 ! sudo add-apt-repository "deb [arch=i386,amd64] https://deb.opera.com/opera-stable/ stable non-free" > /dev/null 2>&1 ! sudo apt install opera-stable #@title ins win ! apt-get update ! sudo dpkg --add-architecture i386 ! wget -nc https://dl.winehq.org/wine-builds/winehq.key; sudo apt-key add winehq.key ! sudo apt-add-repository 'deb https://dl.winehq.org/wine-builds/ubuntu/ bionic main' ! sudo add-apt-repository ppa:cybermax-dexter/sdl2-backport ! sudo apt update && sudo apt install --install-recommends winehq-stable ! sudo apt-get install winetricks ! winetricks dotnet45 #@title **SSH** ! pip install colab_ssh --upgrade &> /dev/null #@markdown Choose a method (Agro Recommended) ssh_method = "Agro" #@param ["Agro", "Ngrok"] #@markdown Copy authtoken from https://dashboard.ngrok.com/auth (only for ngrok) ngrokRegion = "us" #@param ["us", "eu", "ap", "au", "sa", "jp", "in"] def runAgro(): from colab_ssh import launch_ssh_cloudflared launch_ssh_cloudflared(password=password) def runNgrok(): from colab_ssh import launch_ssh from IPython.display import clear_output import getpass ngrokToken = getpass.getpass("Enter the ngrokToken: ") launch_ssh(ngrokToken, password, region=ngrokRegion) clear_output() print("ssh", user, end='@') ! curl -s http://localhost:4040/api/tunnels | python3 -c \ "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'][6:].replace(':', ' -p '))" try: user = username password = password except NameError: print("No user found, using username and password as 'root'") user='root' password='root' if ssh_method == "Agro": runAgro() if ssh_method == "Ngrok": runNgrok() #@title **Colab Shutdown** #@markdown To Kill NGROK Tunnel NGROK = False #@param {type:'boolean'} #@markdown To Sleep Colab Sleep = True #@param {type:'boolean'} if NGROK: ! killall ngrok if Sleep: from time import sleep sleep(43200)
0.463201
0.710384
<h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc" style="margin-top: 1em;"><ul class="toc-item"></ul></div> ``` from planet4 import io, markings, plotting %matplotlib inline image_id = 'APF0000gpu' datapath = 'gold_comparison' # datapath = "gold_per_obsid" datapath = 'catalog_1.0b2' plotting.plot_finals(image_id, datapath=datapath) datapath = "gold_per_imageid" datapath = "gold_per_obsid" from planet4.catalog_production import do_cluster_ids from planet4 import projection from planet4.projection import img_x_size, img_y_size import seaborn as sns class FNOTCH_REVIEWER: def __init__(self, image_id, datapath, via_obsid, plot_folder=''): self.image_id = io.check_and_pad_id(image_id) self.datapath = datapath self.via_obsid = via_obsid self.p4id = markings.ImageID(image_id) self.image_name = self.p4id.image_name self.plot_folder = plot_folder if via_obsid: self.pm = io.PathManager(obsid=self.p4id.image_name, datapath=datapath) else: self.pm = io.PathManager(image_id, datapath=datapath) self.db = io.DBManager() self.image_ids = [] self.obsid_data = self.db.get_image_name_markings(self.p4id.image_name) self.check_data_exists() def check_data_exists(self): if self.via_obsid is True: # All should be there in via_obsid productions. return for dx in [-1, 0, 1]: for dy in [-1, 0, 1]: image_id = self.get_other_image_id(dx, dy) if image_id is not None: if not self.pm.clustering_logfile.exists(): print("clustering", image_id) print(self.pm.fanfile) print(self.pm.blotchfile) do_cluster_ids(image_id, savedir=self.datapath, do_obsid=self.via_obsid) @property def tile_coords(self): return self.p4id.tile_coords def get_other_image_id(self, dx, dy): coords = self.p4id.tile_coords data = self.obsid_data.query( "x_tile==@coords[0]+@dx and y_tile==@coords[1]+@dy") try: return data.image_id.iloc[0] except IndexError: return None @property def savename(self): d = Path('plots') / self.plot_folder Path(d).mkdir(exist_ok=True) return f"{d}/{self.image_id}_tiles_separate.png" def plot(self): fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(10,8), sharex=True, sharey=True) self.axes = axes plotting.plot_finals(self.image_id, datapath=self.datapath, ax=axes[1][1], via_obsid=self.via_obsid) axes[1][1].set_title(self.image_id) x = 1 y = 1 for dx in [-1, 0, 1]: for dy in [-1, 0, 1]: image_id = self.get_other_image_id(dx, dy) if image_id is not None: ax = axes[x+dy][y+dx] plotting.plot_finals(image_id, datapath=self.datapath, ax=ax, via_obsid=self.via_obsid) ax.set_title(f"{image_id}, {self.tile_coords[0]+dx}, {self.tile_coords[1]+dy}") fig.tight_layout() fig.subplots_adjust(top=0.91) t = "Fnotching " t = t+"via obsid" if self.via_obsid else t+"via image_id" fig.suptitle(t, fontsize=18) fig.savefig(self.savename, dpi=200) def combine_tiles(self): coords = self.p4id.tile_coords rows = [] for dx in [-1, 0, 1]: cols = [] for dy in [-1, 0, 1]: image_id = self.get_other_image_id(dx, dy) if image_id is not None: self.image_ids.append(image_id) cols.append(markings.ImageID(image_id).subframe) if len(cols) < 2: continue combo = np.vstack([cols[0], cols[1][100:]]) if len(cols) > 2: combo = np.vstack([combo, cols[2][100:]]) rows.append(combo) all_ = np.hstack([rows[0], rows[1][:, 100:]]) if len(rows) > 2: all_ = np.hstack([all_, rows[2][:, 100:]]) return all_ @property def extent(self): p4id = markings.ImageID(self.image_ids[0]) UL = projection.xy_to_hirise(0, 0, *p4id.tile_coords) p4id = markings.ImageID(self.image_ids[-1]) LR = projection.xy_to_hirise(img_x_size, img_y_size, *p4id.tile_coords) return [UL[0], LR[0], LR[1], UL[1]] def plot_all_image(self, all_, ax=None): if ax is None: _, ax = plt.subplots(figsize=(len(self.image_ids), 9)) ax.imshow(all_, origin='upper', extent=self.extent, aspect='equal') return ax def plot_all_in_one(self): all_ = self.combine_tiles() ax = self.plot_all_image(all_) palette = sns.color_palette("bright", 10) for id_, c in zip(self.image_ids, palette): plotting.plot_finals(id_, self.datapath, ax, scope='hirise', via_obsid=True, user_color=c) extent = self.extent ax.set_xlim(*extent[:2]) ax.set_ylim(*extent[2:]) for x in range(len(self.image_ids)//3 - 1): offset = (x + 1) * (markings.img_x_size - 100) ax.axvline(extent[0]+offset, linestyle='dashed', lw=1) ax.axvline(extent[0]+offset+100, linestyle='solid', lw=1) ax.axvline(extent[0]+offset+200, linestyle='dashed', lw=1) for y in range(3): offset = (y + 1) * (markings.img_y_size - 100) ax.axhline(extent[3]+offset, linestyle='dashed', lw=1) ax.axhline(extent[3]+offset+100, linestyle='solid', lw=1) ax.axhline(extent[3]+offset+200, linestyle='dashed', lw=1) t = f"{len(self.image_ids)} merged tiles around {self.image_id}" ax.set_title(t) savename = Path('plots') / self.plot_folder / f"{self.image_id}_overlap_merged.png" savename.parent.mkdir(exist_ok=True) plt.gcf().savefig(str(savename), dpi=200) print(f"Saved {savename}") datapath frev = FNOTCH_REVIEWER('gpu', datapath, via_obsid=True, plot_folder='overlap_merged') frev.plot_all_in_one() frev.plot() plotting.plot_four_tiles_finals(frev.p4id.image_name, datapath, 1, 13) d = 'plots/gold_member_comparisons' plt.gca().set_title('c10, 1, 13') plt.savefig(f"{d}/fourtiles_c10.png", dpi=200) obsid_data = db.get_image_name_markings('ESP_020930_0980') obsid_data = db.get_image_name_markings('ESP_012076_0945') image_ids = ["APF0000b20", "APF0000b2s", "APF0000b1r", "APF0000bm0", "APF0000bln", "APF0000bj5", "APF0000bgm", "APF0000bdl", "APF0000ps6", "APF0000ps1", "APF0000pu3", "APF0000psx", "APF0000ps2",] p4id = markings.ImageID(image_ids[0]) frev = FNOTCH_REVIEWER(p4id.imgid, p4id.image_name, via_obsid=True) frev.plot_all_in_one() frev.plot() frev.image_name for id_ in image_ids: p4id = markings.ImageID(id_) frev = FNOTCH_REVIEWER(p4id.imgid, p4id.image_name, via_obsid=True, plot_folder='check_overlap') print(id_) frev.plot() frev.plot_all_in_one() plt.close('all') frev.plot() xmax = obsid_data.x_tile.max() ymax = obsid_data.y_tile.max() def get_tile_image(df, xtile, ytile): filtered = df.query('x_tile=={} and y_tile=={}'.format(xtile, ytile)) return io.get_subframe(filtered.image_url.iloc[0]) def get_four_tiles_df(df, x0, y0): query = ('x_tile > {} and x_tile < {} and y_tile > {} and y_tile < {}'. format(x0-1, x0+2, y0-1, y0+2)) return df.query(query).sort_values(by=['x_tile', 'y_tile']) def get_four_tiles_img(df, x0, y0): tiles = [] # loop along columns (= to the right) for xtile in [x0, x0+1]: # loop along rows (= down) for ytile in [y0, y0+1]: tiles.append(get_tile_image(df, xtile, ytile)) # tiles[0] and tiles[1] are the left most tiles # we have overlap of 100 pixels in all directions left = np.vstack([tiles[0], tiles[1][100:]]) right = np.vstack([tiles[2], tiles[3][100:]]) # now slicing on axis=1, because I combine in column-direction all_ = np.hstack([left, right[:, 100:]]) return all_ def browse_images(df): xmax = df.x_tile.max() ymax = df.y_tile.max() def view_image(xtile=1, ytile=1): img = get_four_tiles_img(df, xtile, ytile) print(img.shape) plt.imshow(img, origin='upper', aspect='auto') plt.title(f'x_tile: {xtile}, y_tile: {ytile}') plt.show() interact(view_image, xtile=(1, xmax-1), ytile=(1, ymax-1)) l_s = [180.3, 189.7, 195.1, 199.4, 209.2, 243.8] meta_data = [123, 131, 142, 133, 156, 158] spice = [120.4, 121.5, 130.3, 122.2, 123.8, 122.7 ] plt.plot(l_s, meta_data, '-*', label='meta_data and website') plt.plot(l_s, spice, '-*', label='spice') plt.title("Unprojected North Azimuth discrepancies") plt.xlabel("Solar longitude") plt.ylabel("Unprojected North Azimuth") plt.legend() plt.savefig("north_azimuth_deltas.png", dpi=200) ```
github_jupyter
from planet4 import io, markings, plotting %matplotlib inline image_id = 'APF0000gpu' datapath = 'gold_comparison' # datapath = "gold_per_obsid" datapath = 'catalog_1.0b2' plotting.plot_finals(image_id, datapath=datapath) datapath = "gold_per_imageid" datapath = "gold_per_obsid" from planet4.catalog_production import do_cluster_ids from planet4 import projection from planet4.projection import img_x_size, img_y_size import seaborn as sns class FNOTCH_REVIEWER: def __init__(self, image_id, datapath, via_obsid, plot_folder=''): self.image_id = io.check_and_pad_id(image_id) self.datapath = datapath self.via_obsid = via_obsid self.p4id = markings.ImageID(image_id) self.image_name = self.p4id.image_name self.plot_folder = plot_folder if via_obsid: self.pm = io.PathManager(obsid=self.p4id.image_name, datapath=datapath) else: self.pm = io.PathManager(image_id, datapath=datapath) self.db = io.DBManager() self.image_ids = [] self.obsid_data = self.db.get_image_name_markings(self.p4id.image_name) self.check_data_exists() def check_data_exists(self): if self.via_obsid is True: # All should be there in via_obsid productions. return for dx in [-1, 0, 1]: for dy in [-1, 0, 1]: image_id = self.get_other_image_id(dx, dy) if image_id is not None: if not self.pm.clustering_logfile.exists(): print("clustering", image_id) print(self.pm.fanfile) print(self.pm.blotchfile) do_cluster_ids(image_id, savedir=self.datapath, do_obsid=self.via_obsid) @property def tile_coords(self): return self.p4id.tile_coords def get_other_image_id(self, dx, dy): coords = self.p4id.tile_coords data = self.obsid_data.query( "x_tile==@coords[0]+@dx and y_tile==@coords[1]+@dy") try: return data.image_id.iloc[0] except IndexError: return None @property def savename(self): d = Path('plots') / self.plot_folder Path(d).mkdir(exist_ok=True) return f"{d}/{self.image_id}_tiles_separate.png" def plot(self): fig, axes = plt.subplots(nrows=3, ncols=3, figsize=(10,8), sharex=True, sharey=True) self.axes = axes plotting.plot_finals(self.image_id, datapath=self.datapath, ax=axes[1][1], via_obsid=self.via_obsid) axes[1][1].set_title(self.image_id) x = 1 y = 1 for dx in [-1, 0, 1]: for dy in [-1, 0, 1]: image_id = self.get_other_image_id(dx, dy) if image_id is not None: ax = axes[x+dy][y+dx] plotting.plot_finals(image_id, datapath=self.datapath, ax=ax, via_obsid=self.via_obsid) ax.set_title(f"{image_id}, {self.tile_coords[0]+dx}, {self.tile_coords[1]+dy}") fig.tight_layout() fig.subplots_adjust(top=0.91) t = "Fnotching " t = t+"via obsid" if self.via_obsid else t+"via image_id" fig.suptitle(t, fontsize=18) fig.savefig(self.savename, dpi=200) def combine_tiles(self): coords = self.p4id.tile_coords rows = [] for dx in [-1, 0, 1]: cols = [] for dy in [-1, 0, 1]: image_id = self.get_other_image_id(dx, dy) if image_id is not None: self.image_ids.append(image_id) cols.append(markings.ImageID(image_id).subframe) if len(cols) < 2: continue combo = np.vstack([cols[0], cols[1][100:]]) if len(cols) > 2: combo = np.vstack([combo, cols[2][100:]]) rows.append(combo) all_ = np.hstack([rows[0], rows[1][:, 100:]]) if len(rows) > 2: all_ = np.hstack([all_, rows[2][:, 100:]]) return all_ @property def extent(self): p4id = markings.ImageID(self.image_ids[0]) UL = projection.xy_to_hirise(0, 0, *p4id.tile_coords) p4id = markings.ImageID(self.image_ids[-1]) LR = projection.xy_to_hirise(img_x_size, img_y_size, *p4id.tile_coords) return [UL[0], LR[0], LR[1], UL[1]] def plot_all_image(self, all_, ax=None): if ax is None: _, ax = plt.subplots(figsize=(len(self.image_ids), 9)) ax.imshow(all_, origin='upper', extent=self.extent, aspect='equal') return ax def plot_all_in_one(self): all_ = self.combine_tiles() ax = self.plot_all_image(all_) palette = sns.color_palette("bright", 10) for id_, c in zip(self.image_ids, palette): plotting.plot_finals(id_, self.datapath, ax, scope='hirise', via_obsid=True, user_color=c) extent = self.extent ax.set_xlim(*extent[:2]) ax.set_ylim(*extent[2:]) for x in range(len(self.image_ids)//3 - 1): offset = (x + 1) * (markings.img_x_size - 100) ax.axvline(extent[0]+offset, linestyle='dashed', lw=1) ax.axvline(extent[0]+offset+100, linestyle='solid', lw=1) ax.axvline(extent[0]+offset+200, linestyle='dashed', lw=1) for y in range(3): offset = (y + 1) * (markings.img_y_size - 100) ax.axhline(extent[3]+offset, linestyle='dashed', lw=1) ax.axhline(extent[3]+offset+100, linestyle='solid', lw=1) ax.axhline(extent[3]+offset+200, linestyle='dashed', lw=1) t = f"{len(self.image_ids)} merged tiles around {self.image_id}" ax.set_title(t) savename = Path('plots') / self.plot_folder / f"{self.image_id}_overlap_merged.png" savename.parent.mkdir(exist_ok=True) plt.gcf().savefig(str(savename), dpi=200) print(f"Saved {savename}") datapath frev = FNOTCH_REVIEWER('gpu', datapath, via_obsid=True, plot_folder='overlap_merged') frev.plot_all_in_one() frev.plot() plotting.plot_four_tiles_finals(frev.p4id.image_name, datapath, 1, 13) d = 'plots/gold_member_comparisons' plt.gca().set_title('c10, 1, 13') plt.savefig(f"{d}/fourtiles_c10.png", dpi=200) obsid_data = db.get_image_name_markings('ESP_020930_0980') obsid_data = db.get_image_name_markings('ESP_012076_0945') image_ids = ["APF0000b20", "APF0000b2s", "APF0000b1r", "APF0000bm0", "APF0000bln", "APF0000bj5", "APF0000bgm", "APF0000bdl", "APF0000ps6", "APF0000ps1", "APF0000pu3", "APF0000psx", "APF0000ps2",] p4id = markings.ImageID(image_ids[0]) frev = FNOTCH_REVIEWER(p4id.imgid, p4id.image_name, via_obsid=True) frev.plot_all_in_one() frev.plot() frev.image_name for id_ in image_ids: p4id = markings.ImageID(id_) frev = FNOTCH_REVIEWER(p4id.imgid, p4id.image_name, via_obsid=True, plot_folder='check_overlap') print(id_) frev.plot() frev.plot_all_in_one() plt.close('all') frev.plot() xmax = obsid_data.x_tile.max() ymax = obsid_data.y_tile.max() def get_tile_image(df, xtile, ytile): filtered = df.query('x_tile=={} and y_tile=={}'.format(xtile, ytile)) return io.get_subframe(filtered.image_url.iloc[0]) def get_four_tiles_df(df, x0, y0): query = ('x_tile > {} and x_tile < {} and y_tile > {} and y_tile < {}'. format(x0-1, x0+2, y0-1, y0+2)) return df.query(query).sort_values(by=['x_tile', 'y_tile']) def get_four_tiles_img(df, x0, y0): tiles = [] # loop along columns (= to the right) for xtile in [x0, x0+1]: # loop along rows (= down) for ytile in [y0, y0+1]: tiles.append(get_tile_image(df, xtile, ytile)) # tiles[0] and tiles[1] are the left most tiles # we have overlap of 100 pixels in all directions left = np.vstack([tiles[0], tiles[1][100:]]) right = np.vstack([tiles[2], tiles[3][100:]]) # now slicing on axis=1, because I combine in column-direction all_ = np.hstack([left, right[:, 100:]]) return all_ def browse_images(df): xmax = df.x_tile.max() ymax = df.y_tile.max() def view_image(xtile=1, ytile=1): img = get_four_tiles_img(df, xtile, ytile) print(img.shape) plt.imshow(img, origin='upper', aspect='auto') plt.title(f'x_tile: {xtile}, y_tile: {ytile}') plt.show() interact(view_image, xtile=(1, xmax-1), ytile=(1, ymax-1)) l_s = [180.3, 189.7, 195.1, 199.4, 209.2, 243.8] meta_data = [123, 131, 142, 133, 156, 158] spice = [120.4, 121.5, 130.3, 122.2, 123.8, 122.7 ] plt.plot(l_s, meta_data, '-*', label='meta_data and website') plt.plot(l_s, spice, '-*', label='spice') plt.title("Unprojected North Azimuth discrepancies") plt.xlabel("Solar longitude") plt.ylabel("Unprojected North Azimuth") plt.legend() plt.savefig("north_azimuth_deltas.png", dpi=200)
0.459076
0.715035
![](https://i.imgur.com/eBRPvWB.png) # Practical PyTorch: Translation with a Sequence to Sequence Network and Attention In this project we will be teaching a neural network to translate from French to English. ``` [KEY: > input, = target, < output] > il est en train de peindre un tableau . = he is painting a picture . < he is painting a picture . > pourquoi ne pas essayer ce vin delicieux ? = why not try that delicious wine ? < why not try that delicious wine ? > elle n est pas poete mais romanciere . = she is not a poet but a novelist . < she not not a poet but a novelist . > vous etes trop maigre . = you re too skinny . < you re all alone . ``` ... to varying degrees of success. This is made possible by the simple but powerful idea of the [sequence to sequence network](http://arxiv.org/abs/1409.3215), in which two recurrent neural networks work together to transform one sequence to another. An encoder network condenses an input sequence into a single vector, and a decoder network unfolds that vector into a new sequence. To improve upon this model we'll use an [attention mechanism](https://arxiv.org/abs/1409.0473), which lets the decoder learn to focus over a specific range of the input sequence. # Sequence to Sequence Learning A [Sequence to Sequence network](http://arxiv.org/abs/1409.3215), or seq2seq network, or [Encoder Decoder network](https://arxiv.org/pdf/1406.1078v3.pdf), is a model consisting of two separate RNNs called the **encoder** and **decoder**. The encoder reads an input sequence one item at a time, and outputs a vector at each step. The final output of the encoder is kept as the **context** vector. The decoder uses this context vector to produce a sequence of outputs one step at a time. ![](https://i.imgur.com/tVtHhNp.png) When using a single RNN, there is a one-to-one relationship between inputs and outputs. We would quickly run into problems with different sequence orders and lengths that are common during translation. Consider the simple sentence "Je ne suis pas le chat noir" &rarr; "I am not the black cat". Many of the words have a pretty direct translation, like "chat" &rarr; "cat". However the differing grammars cause words to be in different orders, e.g. "chat noir" and "black cat". There is also the "ne ... pas" &rarr; "not" construction that makes the two sentences have different lengths. With the seq2seq model, by encoding many inputs into one vector, and decoding from one vector into many outputs, we are freed from the constraints of sequence order and length. The encoded sequence is represented by a single vector, a single point in some N dimensional space of sequences. In an ideal case, this point can be considered the "meaning" of the sequence. This idea can be extended beyond sequences. Image captioning tasks take an [image as input, and output a description](https://arxiv.org/abs/1411.4555) of the image (img2seq). Some image generation tasks take a [description as input and output a generated image](https://arxiv.org/abs/1511.02793) (seq2img). These models can be referred to more generally as "encoder decoder" networks. ## The Attention Mechanism The fixed-length vector carries the burden of encoding the the entire "meaning" of the input sequence, no matter how long that may be. With all the variance in language, this is a very hard problem. Imagine two nearly identical sentences, twenty words long, with only one word different. Both the encoders and decoders must be nuanced enough to represent that change as a very slightly different point in space. The **attention mechanism** [introduced by Bahdanau et al.](https://arxiv.org/abs/1409.0473) addresses this by giving the decoder a way to "pay attention" to parts of the input, rather than relying on a single vector. For every step the decoder can select a different part of the input sentence to consider. ![](https://i.imgur.com/5y6SCvU.png) Attention is calculated using the current hidden state and each encoder output, resulting in a vector the same size as the input sequence, called the *attention weights*. These weights are multiplied by the encoder outputs to create a weighted sum of encoder outputs, which is called the *context* vector. The context vector and hidden state are used to predict the next output element. ![](https://i.imgur.com/K1qMPxs.png) # Requirements You will need [PyTorch](http://pytorch.org/) to build and train the models, and [matplotlib](https://matplotlib.org/) for plotting training and visualizing attention outputs later. The rest are builtin Python libraries. ``` import unicodedata import string import re import random import time import datetime import math import socket hostname = socket.gethostname() import torch import torch.nn as nn from torch.autograd import Variable from torch import optim import torch.nn.functional as F from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence#, masked_cross_entropy from masked_cross_entropy import * import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np %matplotlib inline ``` Here we will also define a constant to decide whether to use the GPU (with CUDA specifically) or the CPU. **If you don't have a GPU, set this to `False`**. Later when we create tensors, this variable will be used to decide whether we keep them on CPU or move them to GPU. ``` USE_CUDA = True ``` ## Loading data files The data for this project is a set of many thousands of English to French translation pairs. [This question on Open Data Stack Exchange](http://opendata.stackexchange.com/questions/3888/dataset-of-sentences-translated-into-many-languages) pointed me to the open translation site http://tatoeba.org/ which has downloads available at http://tatoeba.org/eng/downloads - and better yet, someone did the extra work of splitting language pairs into individual text files here: http://www.manythings.org/anki/ The English to French pairs are too big to include in the repo, so download `fra-eng.zip`, extract the text file in there, and rename it to `data/eng-fra.txt` before continuing (for some reason the zipfile is named backwards). The file is a tab separated list of translation pairs: ``` I am cold. Je suis froid. ``` Similar to the character encoding used in the character-level RNN tutorials, we will be representing each word in a language as a one-hot vector, or giant vector of zeros except for a single one (at the index of the word). Compared to the dozens of characters that might exist in a language, there are many many more words, so the encoding vector is much larger. We will however cheat a bit and trim the data to only use a few thousand words per language. ### Indexing words We'll need a unique index per word to use as the inputs and targets of the networks later. To keep track of all this we will use a helper class called `Lang` which has word &rarr; index (`word2index`) and index &rarr; word (`index2word`) dictionaries, as well as a count of each word (`word2count`). This class includes a function `trim(min_count)` to remove rare words once they are all counted. ``` PAD_token = 0 SOS_token = 1 EOS_token = 2 class Lang: def __init__(self, name): self.name = name self.trimmed = False self.word2index = {} self.word2count = {} self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"} self.n_words = 3 # Count default tokens def index_words(self, sentence): for word in sentence.split(' '): self.index_word(word) def index_word(self, word): if word not in self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1 # Remove words below a certain count threshold def trim(self, min_count): if self.trimmed: return self.trimmed = True keep_words = [] for k, v in self.word2count.items(): if v >= min_count: keep_words.append(k) print('keep_words %s / %s = %.4f' % ( len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index) )) # Reinitialize dictionaries self.word2index = {} self.word2count = {} self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"} self.n_words = 3 # Count default tokens for word in keep_words: self.index_word(word) ``` ### Reading and decoding files The files are all in Unicode, to simplify we will turn Unicode characters to ASCII, make everything lowercase, and trim most punctuation. ``` # Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427 def unicode_to_ascii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) # Lowercase, trim, and remove non-letter characters def normalize_string(s): s = unicode_to_ascii(s.lower().strip()) s = re.sub(r"([,.!?])", r" \1 ", s) s = re.sub(r"[^a-zA-Z,.!?]+", r" ", s) s = re.sub(r"\s+", r" ", s).strip() return s ``` To read the data file we will split the file into lines, and then split lines into pairs. The files are all English &rarr; Other Language, so if we want to translate from Other Language &rarr; English I added the `reverse` flag to reverse the pairs. ``` def read_langs(lang1, lang2, reverse=False): print("Reading lines...") # Read the file and split into lines # filename = '../data/%s-%s.txt' % (lang1, lang2) filename = '../%s-%s.txt' % (lang1, lang2) lines = open(filename).read().strip().split('\n') # Split every line into pairs and normalize pairs = [[normalize_string(s) for s in l.split('\t')] for l in lines] # Reverse pairs, make Lang instances if reverse: pairs = [list(reversed(p)) for p in pairs] input_lang = Lang(lang2) output_lang = Lang(lang1) else: input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs MIN_LENGTH = 3 MAX_LENGTH = 25 def filter_pairs(pairs): filtered_pairs = [] for pair in pairs: if len(pair[0]) >= MIN_LENGTH and len(pair[0]) <= MAX_LENGTH \ and len(pair[1]) >= MIN_LENGTH and len(pair[1]) <= MAX_LENGTH: filtered_pairs.append(pair) return filtered_pairs ``` The full process for preparing the data is: * Read text file and split into lines * Split lines into pairs and normalize * Filter to pairs of a certain length * Make word lists from sentences in pairs ``` def prepare_data(lang1_name, lang2_name, reverse=False): input_lang, output_lang, pairs = read_langs(lang1_name, lang2_name, reverse) print("Read %d sentence pairs" % len(pairs)) pairs = filter_pairs(pairs) print("Filtered to %d pairs" % len(pairs)) print("Indexing words...") for pair in pairs: input_lang.index_words(pair[0]) output_lang.index_words(pair[1]) print('Indexed %d words in input language, %d words in output' % (input_lang.n_words, output_lang.n_words)) return input_lang, output_lang, pairs input_lang, output_lang, pairs = prepare_data('eng', 'fra', True) ``` ### Filtering vocabularies To get something that trains in under an hour, we'll trim the data set a bit. First we will use the `trim` function on each language (defined above) to only include words that are repeated a certain amount of times through the dataset (this softens the difficulty of learning a correct translation for words that don't appear often). ``` MIN_COUNT = 5 input_lang.trim(MIN_COUNT) output_lang.trim(MIN_COUNT) ``` ### Filtering pairs Now we will go back to the set of all sentence pairs and remove those with unknown words. ``` keep_pairs = [] for pair in pairs: input_sentence = pair[0] output_sentence = pair[1] keep_input = True keep_output = True for word in input_sentence.split(' '): if word not in input_lang.word2index: keep_input = False break for word in output_sentence.split(' '): if word not in output_lang.word2index: keep_output = False break # Remove if pair doesn't match input and output conditions if keep_input and keep_output: keep_pairs.append(pair) print("Trimmed from %d pairs to %d, %.4f of total" % (len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs))) pairs = keep_pairs ``` ## Turning training data into Tensors To train we need to turn the sentences into something the neural network can understand, which of course means numbers. Each sentence will be split into words and turned into a `LongTensor` which represents the index (from the Lang indexes made earlier) of each word. While creating these tensors we will also append the EOS token to signal that the sentence is over. ![](https://i.imgur.com/LzocpGH.png) ``` # Return a list of indexes, one for each word in the sentence, plus EOS def indexes_from_sentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(' ')] + [EOS_token] ``` We can make better use of the GPU by training on batches of many sequences at once, but doing so brings up the question of how to deal with sequences of varying lengths. The simple solution is to "pad" the shorter sentences with some padding symbol (in this case `0`), and ignore these padded spots when calculating the loss. ![](https://i.imgur.com/gGlkEEF.png) ``` # Pad a with the PAD symbol def pad_seq(seq, max_length): seq += [PAD_token for i in range(max_length - len(seq))] return seq ``` To create a Variable for a full batch of inputs (and targets) we get a random sample of sequences and pad them all to the length of the longest sequence. We'll keep track of the lengths of each batch in order to un-pad later. Initializing a `LongTensor` with an array (batches) of arrays (sequences) gives us a `(batch_size x max_len)` tensor - selecting the first dimension gives you a single batch, which is a full sequence. When training the model we'll want a single time step at once, so we'll transpose to `(max_len x batch_size)`. Now selecting along the first dimension returns a single time step across batches. ![](https://i.imgur.com/nBxTG3v.png) ``` def random_batch(batch_size): input_seqs = [] target_seqs = [] # Choose random pairs for i in range(batch_size): pair = random.choice(pairs) input_seqs.append(indexes_from_sentence(input_lang, pair[0])) target_seqs.append(indexes_from_sentence(output_lang, pair[1])) # Zip into pairs, sort by length (descending), unzip seq_pairs = sorted(zip(input_seqs, target_seqs), key=lambda p: len(p[0]), reverse=True) input_seqs, target_seqs = zip(*seq_pairs) # For input and target sequences, get array of lengths and pad with 0s to max length input_lengths = [len(s) for s in input_seqs] input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs] target_lengths = [len(s) for s in target_seqs] target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs] # Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size) input_var = Variable(torch.LongTensor(input_padded)).transpose(0, 1) target_var = Variable(torch.LongTensor(target_padded)).transpose(0, 1) if USE_CUDA: input_var = input_var.cuda() target_var = target_var.cuda() return input_var, input_lengths, target_var, target_lengths ``` We can test this to see that it will return a `(max_len x batch_size)` tensor for input and target sentences, along with a corresponding list of batch lenghts for each (which we will use for masking later). ``` random_batch(2) ``` # Building the models ## The Encoder <img src="images/encoder-network.png" style="float: right" /> The encoder will take a batch of word sequences, a `LongTensor` of size `(max_len x batch_size)`, and output an encoding for each word, a `FloatTensor` of size `(max_len x batch_size x hidden_size)`. The word inputs are fed through an [embedding layer `nn.Embedding`](http://pytorch.org/docs/nn.html#embedding) to create an embedding for each word, with size `seq_len x hidden_size` (as if it was a batch of words). This is resized to `seq_len x 1 x hidden_size` to fit the expected input of the [GRU layer `nn.GRU`](http://pytorch.org/docs/nn.html#gru). The GRU will return both an output sequence of size `seq_len x hidden_size`. ``` class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1): super(EncoderRNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.n_layers = n_layers self.dropout = dropout self.embedding = nn.Embedding(input_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True) def forward(self, input_seqs, input_lengths, hidden=None): # Note: we run this all at once (over multiple batches of multiple sequences) embedded = self.embedding(input_seqs) packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths) outputs, hidden = self.gru(packed, hidden) outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded) outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:] # Sum bidirectional outputs return outputs, hidden ``` ## Attention Decoder ### Interpreting the Bahdanau et al. model [Neural Machine Translation by Jointly Learning to Align and Translate](https://arxiv.org/abs/1409.0473) (Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio) introduced the idea of using attention for seq2seq translation. Each decoder output is conditioned on the previous outputs and some $\mathbf x$, where $\mathbf x$ consists of the current hidden state (which takes into account previous outputs) and the attention "context", which is calculated below. The function $g$ is a fully-connected layer with a nonlinear activation, which takes as input the values $y_{i-1}$, $s_i$, and $c_i$ concatenated. $$ p(y_i \mid \{y_1,...,y_{i-1}\},\mathbf{x}) = g(y_{i-1}, s_i, c_i) $$ The current hidden state $s_i$ is calculated by an RNN $f$ with the last hidden state $s_{i-1}$, last decoder output value $y_{i-1}$, and context vector $c_i$. In the code, the RNN will be a `nn.GRU` layer, the hidden state $s_i$ will be called `hidden`, the output $y_i$ called `output`, and context $c_i$ called `context`. $$ s_i = f(s_{i-1}, y_{i-1}, c_i) $$ The context vector $c_i$ is a weighted sum of all encoder outputs, where each weight $a_{ij}$ is the amount of "attention" paid to the corresponding encoder output $h_j$. $$ c_i = \sum_{j=1}^{T_x} a_{ij} h_j $$ ... where each weight $a_{ij}$ is a normalized (over all steps) attention "energy" $e_{ij}$ ... $$ a_{ij} = \dfrac{exp(e_{ij})}{\sum_{k=1}^{T} exp(e_{ik})} $$ ... where each attention energy is calculated with some function $a$ (such as another linear layer) using the last hidden state $s_{i-1}$ and that particular encoder output $h_j$: $$ e_{ij} = a(s_{i-1}, h_j) $$ ### Interpreting the Luong et al. models [Effective Approaches to Attention-based Neural Machine Translation](https://arxiv.org/abs/1508.04025) (Minh-Thang Luong, Hieu Pham, Christopher D. Manning) describe a few more attention models that offer improvements and simplifications. They describe a few "global attention" models, the distinction between them being the way the attention scores are calculated. The general form of the attention calculation relies on the target (decoder) side hidden state and corresponding source (encoder) side state, normalized over all states to get values summing to 1: $$ a_t(s) = align(h_t, \bar h_s) = \dfrac{exp(score(h_t, \bar h_s))}{\sum_{s'} exp(score(h_t, \bar h_{s'}))} $$ The specific "score" function that compares two states is either *dot*, a simple dot product between the states; *general*, a a dot product between the decoder hidden state and a linear transform of the encoder state; or *concat*, a dot product between a new parameter $v_a$ and a linear transform of the states concatenated together. $$ score(h_t, \bar h_s) = \begin{cases} h_t ^\top \bar h_s & dot \\ h_t ^\top \textbf{W}_a \bar h_s & general \\ v_a ^\top \textbf{W}_a [ h_t ; \bar h_s ] & concat \end{cases} $$ The modular definition of these scoring functions gives us an opportunity to build specific attention module that can switch between the different score methods. The input to this module is always the hidden state (of the decoder RNN) and set of encoder outputs. ### Implementing an attention module ``` class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size if self.method == 'general': self.attn = nn.Linear(self.hidden_size, hidden_size) elif self.method == 'concat': self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.FloatTensor(1, hidden_size)) def forward(self, hidden, encoder_outputs): max_len = encoder_outputs.size(0) this_batch_size = encoder_outputs.size(1) # Create variable to store attention energies attn_energies = Variable(torch.zeros(this_batch_size, max_len)) # B x S if USE_CUDA: attn_energies = attn_energies.cuda() # For each batch of encoder outputs for b in range(this_batch_size): # Calculate energy for each encoder output for i in range(max_len): attn_energies[b, i] = self.score(hidden[:, b], encoder_outputs[i, b].unsqueeze(0)) # Normalize energies to weights in range 0 to 1, resize to 1 x B x S return F.softmax(attn_energies).unsqueeze(1) def score(self, hidden, encoder_output): if self.method == 'dot': energy = hidden.dot(encoder_output) return energy elif self.method == 'general': energy = self.attn(encoder_output) energy = hidden.dot(energy) return energy elif self.method == 'concat': energy = self.attn(torch.cat((hidden, encoder_output), 1)) energy = self.v.dot(energy) return energy ``` ### Implementing the Bahdanau et al. model In summary our decoder should consist of four main parts - an embedding layer turning an input word into a vector; a layer to calculate the attention energy per encoder output; a RNN layer; and an output layer. The decoder's inputs are the last RNN hidden state $s_{i-1}$, last output $y_{i-1}$, and all encoder outputs $h_*$. * embedding layer with inputs $y_{i-1}$ * `embedded = embedding(last_rnn_output)` * attention layer $a$ with inputs $(s_{i-1}, h_j)$ and outputs $e_{ij}$, normalized to create $a_{ij}$ * `attn_energies[j] = attn_layer(last_hidden, encoder_outputs[j])` * `attn_weights = normalize(attn_energies)` * context vector $c_i$ as an attention-weighted average of encoder outputs * `context = sum(attn_weights * encoder_outputs)` * RNN layer(s) $f$ with inputs $(s_{i-1}, y_{i-1}, c_i)$ and internal hidden state, outputting $s_i$ * `rnn_input = concat(embedded, context)` * `rnn_output, rnn_hidden = rnn(rnn_input, last_hidden)` * an output layer $g$ with inputs $(y_{i-1}, s_i, c_i)$, outputting $y_i$ * `output = out(embedded, rnn_output, context)` ``` class BahdanauAttnDecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1): super(BahdanauAttnDecoderRNN, self).__init__() # Define parameters self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout_p = dropout_p self.max_length = max_length # Define layers self.embedding = nn.Embedding(output_size, hidden_size) self.dropout = nn.Dropout(dropout_p) self.attn = Attn('concat', hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout_p) self.out = nn.Linear(hidden_size, output_size) def forward(self, word_input, last_hidden, encoder_outputs): # Note: we run this one step at a time # TODO: FIX BATCHING # Get the embedding of the current input word (last output word) word_embedded = self.embedding(word_input).view(1, 1, -1) # S=1 x B x N word_embedded = self.dropout(word_embedded) # Calculate attention weights and apply to encoder outputs attn_weights = self.attn(last_hidden[-1], encoder_outputs) context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # B x 1 x N context = context.transpose(0, 1) # 1 x B x N # Combine embedded input word and attended context, run through RNN rnn_input = torch.cat((word_embedded, context), 2) output, hidden = self.gru(rnn_input, last_hidden) # Final output layer output = output.squeeze(0) # B x N output = F.log_softmax(self.out(torch.cat((output, context), 1))) # Return final output, hidden state, and attention weights (for visualization) return output, hidden, attn_weights ``` Now we can build a decoder that plugs this Attn module in after the RNN to calculate attention weights, and apply those weights to the encoder outputs to get a context vector. ``` class LuongAttnDecoderRNN(nn.Module): def __init__(self, attn_model, hidden_size, output_size, n_layers=1, dropout=0.1): super(LuongAttnDecoderRNN, self).__init__() # Keep for reference self.attn_model = attn_model self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout = dropout # Define layers self.embedding = nn.Embedding(output_size, hidden_size) self.embedding_dropout = nn.Dropout(dropout) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout) self.concat = nn.Linear(hidden_size * 2, hidden_size) self.out = nn.Linear(hidden_size, output_size) # Choose attention model if attn_model != 'none': self.attn = Attn(attn_model, hidden_size) def forward(self, input_seq, last_hidden, encoder_outputs): # Note: we run this one step at a time # Get the embedding of the current input word (last output word) batch_size = input_seq.size(0) embedded = self.embedding(input_seq) embedded = self.embedding_dropout(embedded) embedded = embedded.view(1, batch_size, self.hidden_size) # S=1 x B x N # Get current hidden state from input word and last hidden state rnn_output, hidden = self.gru(embedded, last_hidden) # Calculate attention from current RNN state and all encoder outputs; # apply to encoder outputs to get weighted average attn_weights = self.attn(rnn_output, encoder_outputs) context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # B x S=1 x N # Attentional vector using the RNN hidden state and context vector # concatenated together (Luong eq. 5) rnn_output = rnn_output.squeeze(0) # S=1 x B x N -> B x N context = context.squeeze(1) # B x S=1 x N -> B x N concat_input = torch.cat((rnn_output, context), 1) concat_output = F.tanh(self.concat(concat_input)) # Finally predict next token (Luong eq. 6, without softmax) output = self.out(concat_output) # Return final output, hidden state, and attention weights (for visualization) return output, hidden, attn_weights ``` ## Testing the models To make sure the encoder and decoder modules are working (and working together) we'll do a full test with a small batch. ``` small_batch_size = 3 input_batches, input_lengths, target_batches, target_lengths = random_batch(small_batch_size) print('input_batches', input_batches.size()) # (max_len x batch_size) print('target_batches', target_batches.size()) # (max_len x batch_size) ``` Create models with a small size (a good idea for eyeball inspection): ``` small_hidden_size = 8 small_n_layers = 2 encoder_test = EncoderRNN(input_lang.n_words, small_hidden_size, small_n_layers) decoder_test = LuongAttnDecoderRNN('general', small_hidden_size, output_lang.n_words, small_n_layers) if USE_CUDA: encoder_test.cuda() decoder_test.cuda() ``` To test the encoder, run the input batch through to get per-batch encoder outputs: ``` encoder_outputs, encoder_hidden = encoder_test(input_batches, input_lengths, None) print('encoder_outputs', encoder_outputs.size()) # max_len x batch_size x hidden_size print('encoder_hidden', encoder_hidden.size()) # n_layers * 2 x batch_size x hidden_size ``` Then starting with a SOS token, run word tokens through the decoder to get each next word token. Instead of doing this with the whole sequence, it is done one at a time, to support using it's own predictions to make the next prediction. This will be one time step at a time, but batched per time step. In order to get this to work for short padded sequences, the batch size is going to get smaller each time. ``` max_target_length = max(target_lengths) # Prepare decoder input and outputs decoder_input = Variable(torch.LongTensor([SOS_token] * small_batch_size)) decoder_hidden = encoder_hidden[:decoder_test.n_layers] # Use last (forward) hidden state from encoder all_decoder_outputs = Variable(torch.zeros(max_target_length, small_batch_size, decoder_test.output_size)) if USE_CUDA: all_decoder_outputs = all_decoder_outputs.cuda() decoder_input = decoder_input.cuda() # Run through decoder one time step at a time for t in range(max_target_length): decoder_output, decoder_hidden, decoder_attn = decoder_test( decoder_input, decoder_hidden, encoder_outputs ) all_decoder_outputs[t] = decoder_output # Store this step's outputs decoder_input = target_batches[t] # Next input is current target # Test masked cross entropy loss loss = masked_cross_entropy( all_decoder_outputs.transpose(0, 1).contiguous(), target_batches.transpose(0, 1).contiguous(), target_lengths ) print('loss', loss.data[0]) ``` # Training ## Defining a training iteration To train we first run the input sentence through the encoder word by word, and keep track of every output and the latest hidden state. Next the decoder is given the last hidden state of the decoder as its first hidden state, and the `<SOS>` token as its first input. From there we iterate to predict a next token from the decoder. ### Teacher Forcing vs. Scheduled Sampling "Teacher Forcing", or maximum likelihood sampling, means using the real target outputs as each next input when training. The alternative is using the decoder's own guess as the next input. Using teacher forcing may cause the network to converge faster, but [when the trained network is exploited, it may exhibit instability](http://minds.jacobs-university.de/sites/default/files/uploads/papers/ESNTutorialRev.pdf). You can observe outputs of teacher-forced networks that read with coherent grammar but wander far from the correct translation - you could think of it as having learned how to listen to the teacher's instructions, without learning how to venture out on its own. The solution to the teacher-forcing "problem" is known as [Scheduled Sampling](https://arxiv.org/abs/1506.03099), which simply alternates between using the target values and predicted values when training. We will randomly choose to use teacher forcing with an if statement while training - sometimes we'll feed use real target as the input (ignoring the decoder's output), sometimes we'll use the decoder's output. ``` def train(input_batches, input_lengths, target_batches, target_lengths, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH): # Zero gradients of both optimizers encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() loss = 0 # Added onto for each word # Run words through encoder encoder_outputs, encoder_hidden = encoder(input_batches, input_lengths, None) # Prepare input and output variables decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size)) decoder_hidden = encoder_hidden[:decoder.n_layers] # Use last (forward) hidden state from encoder max_target_length = max(target_lengths) all_decoder_outputs = Variable(torch.zeros(max_target_length, batch_size, decoder.output_size)) # Move new Variables to CUDA if USE_CUDA: decoder_input = decoder_input.cuda() all_decoder_outputs = all_decoder_outputs.cuda() # Run through decoder one time step at a time for t in range(max_target_length): decoder_output, decoder_hidden, decoder_attn = decoder( decoder_input, decoder_hidden, encoder_outputs ) all_decoder_outputs[t] = decoder_output decoder_input = target_batches[t] # Next input is current target # Loss calculation and backpropagation loss = masked_cross_entropy( all_decoder_outputs.transpose(0, 1).contiguous(), # -> batch x seq target_batches.transpose(0, 1).contiguous(), # -> batch x seq target_lengths ) loss.backward() # Clip gradient norms ec = torch.nn.utils.clip_grad_norm(encoder.parameters(), clip) dc = torch.nn.utils.clip_grad_norm(decoder.parameters(), clip) # Update parameters with optimizers encoder_optimizer.step() decoder_optimizer.step() return loss.data[0], ec, dc ``` ## Running training With everything in place we can actually initialize a network and start training. To start, we initialize models, optimizers, a loss function (criterion), and set up variables for plotting and tracking progress: ``` # Configure models attn_model = 'dot' hidden_size = 500 n_layers = 2 dropout = 0.1 batch_size = 100 batch_size = 50 # Configure training/optimization clip = 50.0 teacher_forcing_ratio = 0.5 learning_rate = 0.0001 decoder_learning_ratio = 5.0 n_epochs = 50000 epoch = 0 plot_every = 20 print_every = 100 evaluate_every = 1000 # Initialize models encoder = EncoderRNN(input_lang.n_words, hidden_size, n_layers, dropout=dropout) decoder = LuongAttnDecoderRNN(attn_model, hidden_size, output_lang.n_words, n_layers, dropout=dropout) # Initialize optimizers and criterion encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio) criterion = nn.CrossEntropyLoss() # Move models to GPU if USE_CUDA: encoder.cuda() decoder.cuda() import sconce job = sconce.Job('seq2seq-translate', { 'attn_model': attn_model, 'n_layers': n_layers, 'dropout': dropout, 'hidden_size': hidden_size, 'learning_rate': learning_rate, 'clip': clip, 'teacher_forcing_ratio': teacher_forcing_ratio, 'decoder_learning_ratio': decoder_learning_ratio, }) job.plot_every = plot_every job.log_every = print_every # Keep track of time elapsed and running averages start = time.time() plot_losses = [] print_loss_total = 0 # Reset every print_every plot_loss_total = 0 # Reset every plot_every ``` Plus helper functions to print time elapsed and estimated time remaining, given the current time and progress. ``` def as_minutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def time_since(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (as_minutes(s), as_minutes(rs)) ``` # Evaluating the network Evaluation is mostly the same as training, but there are no targets. Instead we always feed the decoder's predictions back to itself. Every time it predicts a word, we add it to the output string. If it predicts the EOS token we stop there. We also store the decoder's attention outputs for each step to display later. ``` def evaluate(input_seq, max_length=MAX_LENGTH): input_seqs = [indexes_from_sentence(input_lang, input_seq)] input_lengths = [len(input_seqs[0])] input_batches = Variable(torch.LongTensor(input_seqs), volatile=True).transpose(0, 1) if USE_CUDA: input_batches = input_batches.cuda() # Set to not-training mode to disable dropout encoder.train(False) decoder.train(False) # Run through encoder encoder_outputs, encoder_hidden = encoder(input_batches, input_lengths, None) # Create starting vectors for decoder decoder_input = Variable(torch.LongTensor([SOS_token]), volatile=True) # SOS decoder_hidden = encoder_hidden[:decoder.n_layers] # Use last (forward) hidden state from encoder if USE_CUDA: decoder_input = decoder_input.cuda() # Store output words and attention states decoded_words = [] decoder_attentions = torch.zeros(max_length + 1, max_length + 1) # Run through decoder for di in range(max_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs ) decoder_attentions[di,:decoder_attention.size(2)] += decoder_attention.squeeze(0).squeeze(0).cpu().data # Choose top word from output topv, topi = decoder_output.data.topk(1) ni = topi[0][0] if ni == EOS_token: decoded_words.append('<EOS>') break else: decoded_words.append(output_lang.index2word[ni]) # Next input is chosen word decoder_input = Variable(torch.LongTensor([ni])) if USE_CUDA: decoder_input = decoder_input.cuda() # Set back to training mode encoder.train(True) decoder.train(True) return decoded_words, decoder_attentions[:di+1, :len(encoder_outputs)] ``` We can evaluate random sentences from the training set and print out the input, target, and output to make some subjective quality judgements: ``` def evaluate_randomly(): [input_sentence, target_sentence] = random.choice(pairs) evaluate_and_show_attention(input_sentence, target_sentence) ``` # Visualizing attention A useful property of the attention mechanism is its highly interpretable outputs. Because it is used to weight specific encoder outputs of the input sequence, we can imagine looking where the network is focused most at each time step. You could simply run `plt.matshow(attentions)` to see attention output displayed as a matrix, with the columns being input steps and rows being output steps: ``` import io import torchvision from PIL import Image import visdom vis = visdom.Visdom() def show_plot_visdom(): buf = io.BytesIO() plt.savefig(buf) buf.seek(0) attn_win = 'attention (%s)' % hostname vis.image(torchvision.transforms.ToTensor()(Image.open(buf)), win=attn_win, opts={'title': attn_win}) ``` For a better viewing experience we will do the extra work of adding axes and labels: ``` def show_attention(input_sentence, output_words, attentions): # Set up figure with colorbar fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(attentions.numpy(), cmap='bone') fig.colorbar(cax) # Set up axes ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'], rotation=90) ax.set_yticklabels([''] + output_words) # Show label at every tick ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) show_plot_visdom() plt.show() plt.close() def evaluate_and_show_attention(input_sentence, target_sentence=None): output_words, attentions = evaluate(input_sentence) output_sentence = ' '.join(output_words) print('>', input_sentence) if target_sentence is not None: print('=', target_sentence) print('<', output_sentence) show_attention(input_sentence, output_words, attentions) # Show input, target, output text in visdom win = 'evaluted (%s)' % hostname text = '<p>&gt; %s</p><p>= %s</p><p>&lt; %s</p>' % (input_sentence, target_sentence, output_sentence) vis.text(text, win=win, opts={'title': win}) ``` # Putting it all together **TODO** Run `train_epochs` for `n_epochs` To actually train, we call the train function many times, printing a summary as we go. *Note:* If you're running this notebook you can **train, interrupt, evaluate, and come back to continue training**. Simply run the notebook starting from the following cell (running from the previous cell will reset the models). ``` # Begin! ecs = [] dcs = [] eca = 0 dca = 0 while epoch < n_epochs: epoch += 1 # Get training data for this cycle input_batches, input_lengths, target_batches, target_lengths = random_batch(batch_size) # Run the train function loss, ec, dc = train( input_batches, input_lengths, target_batches, target_lengths, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion ) # Keep track of loss print_loss_total += loss plot_loss_total += loss eca += ec dca += dc job.record(epoch, loss) if epoch % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print_summary = '%s (%d %d%%) %.4f' % (time_since(start, epoch / n_epochs), epoch, epoch / n_epochs * 100, print_loss_avg) print(print_summary) if epoch % evaluate_every == 0: evaluate_randomly() if epoch % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 # TODO: Running average helper ecs.append(eca / plot_every) dcs.append(dca / plot_every) ecs_win = 'encoder grad (%s)' % hostname dcs_win = 'decoder grad (%s)' % hostname vis.line(np.array(ecs), win=ecs_win, opts={'title': ecs_win}) vis.line(np.array(dcs), win=dcs_win, opts={'title': dcs_win}) eca = 0 dca = 0 ``` ## Plotting training loss Plotting is done with matplotlib, using the array `plot_losses` that was created while training. ``` def show_plot(points): plt.figure() fig, ax = plt.subplots() loc = ticker.MultipleLocator(base=0.2) # put ticks at regular intervals ax.yaxis.set_major_locator(loc) plt.plot(points) show_plot(plot_losses) output_words, attentions = evaluate("je suis trop froid .") plt.matshow(attentions.numpy()) show_plot_visdom() evaluate_and_show_attention("elle a cinq ans de moins que moi .") evaluate_and_show_attention("elle est trop petit .") evaluate_and_show_attention("je ne crains pas de mourir .") evaluate_and_show_attention("c est un jeune directeur plein de talent .") evaluate_and_show_attention("est le chien vert aujourd hui ?") evaluate_and_show_attention("le chat me parle .") evaluate_and_show_attention("des centaines de personnes furent arretees ici .") evaluate_and_show_attention("des centaines de chiens furent arretees ici .") evaluate_and_show_attention("ce fromage est prepare a partir de lait de chevre .") ``` # Exercises * Try with a different dataset * Another language pair * Human &rarr; Machine (e.g. IOT commands) * Chat &rarr; Response * Question &rarr; Answer * Replace the embedding pre-trained word embeddings such as word2vec or GloVe * Try with more layers, more hidden units, and more sentences. Compare the training time and results. * If you use a translation file where pairs have two of the same phrase (`I am test \t I am test`), you can use this as an autoencoder. Try this: * Train as an autoencoder * Save only the Encoder network * Train a new Decoder for translation from there
github_jupyter
[KEY: > input, = target, < output] > il est en train de peindre un tableau . = he is painting a picture . < he is painting a picture . > pourquoi ne pas essayer ce vin delicieux ? = why not try that delicious wine ? < why not try that delicious wine ? > elle n est pas poete mais romanciere . = she is not a poet but a novelist . < she not not a poet but a novelist . > vous etes trop maigre . = you re too skinny . < you re all alone . import unicodedata import string import re import random import time import datetime import math import socket hostname = socket.gethostname() import torch import torch.nn as nn from torch.autograd import Variable from torch import optim import torch.nn.functional as F from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence#, masked_cross_entropy from masked_cross_entropy import * import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np %matplotlib inline USE_CUDA = True I am cold. Je suis froid. PAD_token = 0 SOS_token = 1 EOS_token = 2 class Lang: def __init__(self, name): self.name = name self.trimmed = False self.word2index = {} self.word2count = {} self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"} self.n_words = 3 # Count default tokens def index_words(self, sentence): for word in sentence.split(' '): self.index_word(word) def index_word(self, word): if word not in self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1 # Remove words below a certain count threshold def trim(self, min_count): if self.trimmed: return self.trimmed = True keep_words = [] for k, v in self.word2count.items(): if v >= min_count: keep_words.append(k) print('keep_words %s / %s = %.4f' % ( len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index) )) # Reinitialize dictionaries self.word2index = {} self.word2count = {} self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"} self.n_words = 3 # Count default tokens for word in keep_words: self.index_word(word) # Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427 def unicode_to_ascii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) # Lowercase, trim, and remove non-letter characters def normalize_string(s): s = unicode_to_ascii(s.lower().strip()) s = re.sub(r"([,.!?])", r" \1 ", s) s = re.sub(r"[^a-zA-Z,.!?]+", r" ", s) s = re.sub(r"\s+", r" ", s).strip() return s def read_langs(lang1, lang2, reverse=False): print("Reading lines...") # Read the file and split into lines # filename = '../data/%s-%s.txt' % (lang1, lang2) filename = '../%s-%s.txt' % (lang1, lang2) lines = open(filename).read().strip().split('\n') # Split every line into pairs and normalize pairs = [[normalize_string(s) for s in l.split('\t')] for l in lines] # Reverse pairs, make Lang instances if reverse: pairs = [list(reversed(p)) for p in pairs] input_lang = Lang(lang2) output_lang = Lang(lang1) else: input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs MIN_LENGTH = 3 MAX_LENGTH = 25 def filter_pairs(pairs): filtered_pairs = [] for pair in pairs: if len(pair[0]) >= MIN_LENGTH and len(pair[0]) <= MAX_LENGTH \ and len(pair[1]) >= MIN_LENGTH and len(pair[1]) <= MAX_LENGTH: filtered_pairs.append(pair) return filtered_pairs def prepare_data(lang1_name, lang2_name, reverse=False): input_lang, output_lang, pairs = read_langs(lang1_name, lang2_name, reverse) print("Read %d sentence pairs" % len(pairs)) pairs = filter_pairs(pairs) print("Filtered to %d pairs" % len(pairs)) print("Indexing words...") for pair in pairs: input_lang.index_words(pair[0]) output_lang.index_words(pair[1]) print('Indexed %d words in input language, %d words in output' % (input_lang.n_words, output_lang.n_words)) return input_lang, output_lang, pairs input_lang, output_lang, pairs = prepare_data('eng', 'fra', True) MIN_COUNT = 5 input_lang.trim(MIN_COUNT) output_lang.trim(MIN_COUNT) keep_pairs = [] for pair in pairs: input_sentence = pair[0] output_sentence = pair[1] keep_input = True keep_output = True for word in input_sentence.split(' '): if word not in input_lang.word2index: keep_input = False break for word in output_sentence.split(' '): if word not in output_lang.word2index: keep_output = False break # Remove if pair doesn't match input and output conditions if keep_input and keep_output: keep_pairs.append(pair) print("Trimmed from %d pairs to %d, %.4f of total" % (len(pairs), len(keep_pairs), len(keep_pairs) / len(pairs))) pairs = keep_pairs # Return a list of indexes, one for each word in the sentence, plus EOS def indexes_from_sentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(' ')] + [EOS_token] # Pad a with the PAD symbol def pad_seq(seq, max_length): seq += [PAD_token for i in range(max_length - len(seq))] return seq def random_batch(batch_size): input_seqs = [] target_seqs = [] # Choose random pairs for i in range(batch_size): pair = random.choice(pairs) input_seqs.append(indexes_from_sentence(input_lang, pair[0])) target_seqs.append(indexes_from_sentence(output_lang, pair[1])) # Zip into pairs, sort by length (descending), unzip seq_pairs = sorted(zip(input_seqs, target_seqs), key=lambda p: len(p[0]), reverse=True) input_seqs, target_seqs = zip(*seq_pairs) # For input and target sequences, get array of lengths and pad with 0s to max length input_lengths = [len(s) for s in input_seqs] input_padded = [pad_seq(s, max(input_lengths)) for s in input_seqs] target_lengths = [len(s) for s in target_seqs] target_padded = [pad_seq(s, max(target_lengths)) for s in target_seqs] # Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size) input_var = Variable(torch.LongTensor(input_padded)).transpose(0, 1) target_var = Variable(torch.LongTensor(target_padded)).transpose(0, 1) if USE_CUDA: input_var = input_var.cuda() target_var = target_var.cuda() return input_var, input_lengths, target_var, target_lengths random_batch(2) class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1): super(EncoderRNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.n_layers = n_layers self.dropout = dropout self.embedding = nn.Embedding(input_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True) def forward(self, input_seqs, input_lengths, hidden=None): # Note: we run this all at once (over multiple batches of multiple sequences) embedded = self.embedding(input_seqs) packed = torch.nn.utils.rnn.pack_padded_sequence(embedded, input_lengths) outputs, hidden = self.gru(packed, hidden) outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded) outputs = outputs[:, :, :self.hidden_size] + outputs[:, : ,self.hidden_size:] # Sum bidirectional outputs return outputs, hidden class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size if self.method == 'general': self.attn = nn.Linear(self.hidden_size, hidden_size) elif self.method == 'concat': self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.FloatTensor(1, hidden_size)) def forward(self, hidden, encoder_outputs): max_len = encoder_outputs.size(0) this_batch_size = encoder_outputs.size(1) # Create variable to store attention energies attn_energies = Variable(torch.zeros(this_batch_size, max_len)) # B x S if USE_CUDA: attn_energies = attn_energies.cuda() # For each batch of encoder outputs for b in range(this_batch_size): # Calculate energy for each encoder output for i in range(max_len): attn_energies[b, i] = self.score(hidden[:, b], encoder_outputs[i, b].unsqueeze(0)) # Normalize energies to weights in range 0 to 1, resize to 1 x B x S return F.softmax(attn_energies).unsqueeze(1) def score(self, hidden, encoder_output): if self.method == 'dot': energy = hidden.dot(encoder_output) return energy elif self.method == 'general': energy = self.attn(encoder_output) energy = hidden.dot(energy) return energy elif self.method == 'concat': energy = self.attn(torch.cat((hidden, encoder_output), 1)) energy = self.v.dot(energy) return energy class BahdanauAttnDecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1): super(BahdanauAttnDecoderRNN, self).__init__() # Define parameters self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout_p = dropout_p self.max_length = max_length # Define layers self.embedding = nn.Embedding(output_size, hidden_size) self.dropout = nn.Dropout(dropout_p) self.attn = Attn('concat', hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout_p) self.out = nn.Linear(hidden_size, output_size) def forward(self, word_input, last_hidden, encoder_outputs): # Note: we run this one step at a time # TODO: FIX BATCHING # Get the embedding of the current input word (last output word) word_embedded = self.embedding(word_input).view(1, 1, -1) # S=1 x B x N word_embedded = self.dropout(word_embedded) # Calculate attention weights and apply to encoder outputs attn_weights = self.attn(last_hidden[-1], encoder_outputs) context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # B x 1 x N context = context.transpose(0, 1) # 1 x B x N # Combine embedded input word and attended context, run through RNN rnn_input = torch.cat((word_embedded, context), 2) output, hidden = self.gru(rnn_input, last_hidden) # Final output layer output = output.squeeze(0) # B x N output = F.log_softmax(self.out(torch.cat((output, context), 1))) # Return final output, hidden state, and attention weights (for visualization) return output, hidden, attn_weights class LuongAttnDecoderRNN(nn.Module): def __init__(self, attn_model, hidden_size, output_size, n_layers=1, dropout=0.1): super(LuongAttnDecoderRNN, self).__init__() # Keep for reference self.attn_model = attn_model self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout = dropout # Define layers self.embedding = nn.Embedding(output_size, hidden_size) self.embedding_dropout = nn.Dropout(dropout) self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=dropout) self.concat = nn.Linear(hidden_size * 2, hidden_size) self.out = nn.Linear(hidden_size, output_size) # Choose attention model if attn_model != 'none': self.attn = Attn(attn_model, hidden_size) def forward(self, input_seq, last_hidden, encoder_outputs): # Note: we run this one step at a time # Get the embedding of the current input word (last output word) batch_size = input_seq.size(0) embedded = self.embedding(input_seq) embedded = self.embedding_dropout(embedded) embedded = embedded.view(1, batch_size, self.hidden_size) # S=1 x B x N # Get current hidden state from input word and last hidden state rnn_output, hidden = self.gru(embedded, last_hidden) # Calculate attention from current RNN state and all encoder outputs; # apply to encoder outputs to get weighted average attn_weights = self.attn(rnn_output, encoder_outputs) context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # B x S=1 x N # Attentional vector using the RNN hidden state and context vector # concatenated together (Luong eq. 5) rnn_output = rnn_output.squeeze(0) # S=1 x B x N -> B x N context = context.squeeze(1) # B x S=1 x N -> B x N concat_input = torch.cat((rnn_output, context), 1) concat_output = F.tanh(self.concat(concat_input)) # Finally predict next token (Luong eq. 6, without softmax) output = self.out(concat_output) # Return final output, hidden state, and attention weights (for visualization) return output, hidden, attn_weights small_batch_size = 3 input_batches, input_lengths, target_batches, target_lengths = random_batch(small_batch_size) print('input_batches', input_batches.size()) # (max_len x batch_size) print('target_batches', target_batches.size()) # (max_len x batch_size) small_hidden_size = 8 small_n_layers = 2 encoder_test = EncoderRNN(input_lang.n_words, small_hidden_size, small_n_layers) decoder_test = LuongAttnDecoderRNN('general', small_hidden_size, output_lang.n_words, small_n_layers) if USE_CUDA: encoder_test.cuda() decoder_test.cuda() encoder_outputs, encoder_hidden = encoder_test(input_batches, input_lengths, None) print('encoder_outputs', encoder_outputs.size()) # max_len x batch_size x hidden_size print('encoder_hidden', encoder_hidden.size()) # n_layers * 2 x batch_size x hidden_size max_target_length = max(target_lengths) # Prepare decoder input and outputs decoder_input = Variable(torch.LongTensor([SOS_token] * small_batch_size)) decoder_hidden = encoder_hidden[:decoder_test.n_layers] # Use last (forward) hidden state from encoder all_decoder_outputs = Variable(torch.zeros(max_target_length, small_batch_size, decoder_test.output_size)) if USE_CUDA: all_decoder_outputs = all_decoder_outputs.cuda() decoder_input = decoder_input.cuda() # Run through decoder one time step at a time for t in range(max_target_length): decoder_output, decoder_hidden, decoder_attn = decoder_test( decoder_input, decoder_hidden, encoder_outputs ) all_decoder_outputs[t] = decoder_output # Store this step's outputs decoder_input = target_batches[t] # Next input is current target # Test masked cross entropy loss loss = masked_cross_entropy( all_decoder_outputs.transpose(0, 1).contiguous(), target_batches.transpose(0, 1).contiguous(), target_lengths ) print('loss', loss.data[0]) def train(input_batches, input_lengths, target_batches, target_lengths, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH): # Zero gradients of both optimizers encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() loss = 0 # Added onto for each word # Run words through encoder encoder_outputs, encoder_hidden = encoder(input_batches, input_lengths, None) # Prepare input and output variables decoder_input = Variable(torch.LongTensor([SOS_token] * batch_size)) decoder_hidden = encoder_hidden[:decoder.n_layers] # Use last (forward) hidden state from encoder max_target_length = max(target_lengths) all_decoder_outputs = Variable(torch.zeros(max_target_length, batch_size, decoder.output_size)) # Move new Variables to CUDA if USE_CUDA: decoder_input = decoder_input.cuda() all_decoder_outputs = all_decoder_outputs.cuda() # Run through decoder one time step at a time for t in range(max_target_length): decoder_output, decoder_hidden, decoder_attn = decoder( decoder_input, decoder_hidden, encoder_outputs ) all_decoder_outputs[t] = decoder_output decoder_input = target_batches[t] # Next input is current target # Loss calculation and backpropagation loss = masked_cross_entropy( all_decoder_outputs.transpose(0, 1).contiguous(), # -> batch x seq target_batches.transpose(0, 1).contiguous(), # -> batch x seq target_lengths ) loss.backward() # Clip gradient norms ec = torch.nn.utils.clip_grad_norm(encoder.parameters(), clip) dc = torch.nn.utils.clip_grad_norm(decoder.parameters(), clip) # Update parameters with optimizers encoder_optimizer.step() decoder_optimizer.step() return loss.data[0], ec, dc # Configure models attn_model = 'dot' hidden_size = 500 n_layers = 2 dropout = 0.1 batch_size = 100 batch_size = 50 # Configure training/optimization clip = 50.0 teacher_forcing_ratio = 0.5 learning_rate = 0.0001 decoder_learning_ratio = 5.0 n_epochs = 50000 epoch = 0 plot_every = 20 print_every = 100 evaluate_every = 1000 # Initialize models encoder = EncoderRNN(input_lang.n_words, hidden_size, n_layers, dropout=dropout) decoder = LuongAttnDecoderRNN(attn_model, hidden_size, output_lang.n_words, n_layers, dropout=dropout) # Initialize optimizers and criterion encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio) criterion = nn.CrossEntropyLoss() # Move models to GPU if USE_CUDA: encoder.cuda() decoder.cuda() import sconce job = sconce.Job('seq2seq-translate', { 'attn_model': attn_model, 'n_layers': n_layers, 'dropout': dropout, 'hidden_size': hidden_size, 'learning_rate': learning_rate, 'clip': clip, 'teacher_forcing_ratio': teacher_forcing_ratio, 'decoder_learning_ratio': decoder_learning_ratio, }) job.plot_every = plot_every job.log_every = print_every # Keep track of time elapsed and running averages start = time.time() plot_losses = [] print_loss_total = 0 # Reset every print_every plot_loss_total = 0 # Reset every plot_every def as_minutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def time_since(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (as_minutes(s), as_minutes(rs)) def evaluate(input_seq, max_length=MAX_LENGTH): input_seqs = [indexes_from_sentence(input_lang, input_seq)] input_lengths = [len(input_seqs[0])] input_batches = Variable(torch.LongTensor(input_seqs), volatile=True).transpose(0, 1) if USE_CUDA: input_batches = input_batches.cuda() # Set to not-training mode to disable dropout encoder.train(False) decoder.train(False) # Run through encoder encoder_outputs, encoder_hidden = encoder(input_batches, input_lengths, None) # Create starting vectors for decoder decoder_input = Variable(torch.LongTensor([SOS_token]), volatile=True) # SOS decoder_hidden = encoder_hidden[:decoder.n_layers] # Use last (forward) hidden state from encoder if USE_CUDA: decoder_input = decoder_input.cuda() # Store output words and attention states decoded_words = [] decoder_attentions = torch.zeros(max_length + 1, max_length + 1) # Run through decoder for di in range(max_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs ) decoder_attentions[di,:decoder_attention.size(2)] += decoder_attention.squeeze(0).squeeze(0).cpu().data # Choose top word from output topv, topi = decoder_output.data.topk(1) ni = topi[0][0] if ni == EOS_token: decoded_words.append('<EOS>') break else: decoded_words.append(output_lang.index2word[ni]) # Next input is chosen word decoder_input = Variable(torch.LongTensor([ni])) if USE_CUDA: decoder_input = decoder_input.cuda() # Set back to training mode encoder.train(True) decoder.train(True) return decoded_words, decoder_attentions[:di+1, :len(encoder_outputs)] def evaluate_randomly(): [input_sentence, target_sentence] = random.choice(pairs) evaluate_and_show_attention(input_sentence, target_sentence) import io import torchvision from PIL import Image import visdom vis = visdom.Visdom() def show_plot_visdom(): buf = io.BytesIO() plt.savefig(buf) buf.seek(0) attn_win = 'attention (%s)' % hostname vis.image(torchvision.transforms.ToTensor()(Image.open(buf)), win=attn_win, opts={'title': attn_win}) def show_attention(input_sentence, output_words, attentions): # Set up figure with colorbar fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(attentions.numpy(), cmap='bone') fig.colorbar(cax) # Set up axes ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'], rotation=90) ax.set_yticklabels([''] + output_words) # Show label at every tick ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) show_plot_visdom() plt.show() plt.close() def evaluate_and_show_attention(input_sentence, target_sentence=None): output_words, attentions = evaluate(input_sentence) output_sentence = ' '.join(output_words) print('>', input_sentence) if target_sentence is not None: print('=', target_sentence) print('<', output_sentence) show_attention(input_sentence, output_words, attentions) # Show input, target, output text in visdom win = 'evaluted (%s)' % hostname text = '<p>&gt; %s</p><p>= %s</p><p>&lt; %s</p>' % (input_sentence, target_sentence, output_sentence) vis.text(text, win=win, opts={'title': win}) # Begin! ecs = [] dcs = [] eca = 0 dca = 0 while epoch < n_epochs: epoch += 1 # Get training data for this cycle input_batches, input_lengths, target_batches, target_lengths = random_batch(batch_size) # Run the train function loss, ec, dc = train( input_batches, input_lengths, target_batches, target_lengths, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion ) # Keep track of loss print_loss_total += loss plot_loss_total += loss eca += ec dca += dc job.record(epoch, loss) if epoch % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print_summary = '%s (%d %d%%) %.4f' % (time_since(start, epoch / n_epochs), epoch, epoch / n_epochs * 100, print_loss_avg) print(print_summary) if epoch % evaluate_every == 0: evaluate_randomly() if epoch % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 # TODO: Running average helper ecs.append(eca / plot_every) dcs.append(dca / plot_every) ecs_win = 'encoder grad (%s)' % hostname dcs_win = 'decoder grad (%s)' % hostname vis.line(np.array(ecs), win=ecs_win, opts={'title': ecs_win}) vis.line(np.array(dcs), win=dcs_win, opts={'title': dcs_win}) eca = 0 dca = 0 def show_plot(points): plt.figure() fig, ax = plt.subplots() loc = ticker.MultipleLocator(base=0.2) # put ticks at regular intervals ax.yaxis.set_major_locator(loc) plt.plot(points) show_plot(plot_losses) output_words, attentions = evaluate("je suis trop froid .") plt.matshow(attentions.numpy()) show_plot_visdom() evaluate_and_show_attention("elle a cinq ans de moins que moi .") evaluate_and_show_attention("elle est trop petit .") evaluate_and_show_attention("je ne crains pas de mourir .") evaluate_and_show_attention("c est un jeune directeur plein de talent .") evaluate_and_show_attention("est le chien vert aujourd hui ?") evaluate_and_show_attention("le chat me parle .") evaluate_and_show_attention("des centaines de personnes furent arretees ici .") evaluate_and_show_attention("des centaines de chiens furent arretees ici .") evaluate_and_show_attention("ce fromage est prepare a partir de lait de chevre .")
0.478285
0.981113
``` # Dependencies and Setup import pandas as pd # File to Load (Remember to change the path if needed.) school_data_to_load = "Resources/schools_complete.csv" student_data_to_load = "Resources/students_complete.csv" # Read the School Data and Student Data and store into a Pandas DataFrame school_data_df = pd.read_csv(school_data_to_load) student_data_df = pd.read_csv(student_data_to_load) # Cleaning Student Names and Replacing Substrings in a Python String # Add each prefix and suffix to remove to a list. prefixes_suffixes = ["Dr. ", "Mr. ","Ms. ", "Mrs. ", "Miss ", " MD", " DDS", " DVM", " PhD"] # Iterate through the words in the "prefixes_suffixes" list and replace them with an empty space, "". for word in prefixes_suffixes: student_data_df["student_name"] = student_data_df["student_name"].str.replace(word,"") # Check names. student_data_df.head(10) ``` ## Deliverable 1: Replace the reading and math scores. ### Replace the 9th grade reading and math scores at Thomas High School with NaN. ``` # Install numpy using conda install numpy or pip install numpy. # Step 1. Import numpy as np. import numpy as np # Step 2. Use the loc method on the student_data_df to select all the reading scores from the 9th grade at Thomas High School and replace them with NaN. reading_score_df = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "9th") & (student_data_df["reading_score"] > 0), "reading_score"] = np.nan student_data_df # Step 2. Use the loc method on the student_data_df to select all the math scores from the 9th grade at Thomas High School and replace them with NaN. math_score_df = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "9th") & (student_data_df["math_score"] > 0), "math_score"] = np.nan # Step 4. Check the student data for NaN's. student_data_df ``` ## Deliverable 2 : Repeat the school district analysis ### District Summary ``` # Combine the data into a single dataset school_data_complete_df = pd.merge(student_data_df, school_data_df, how = "left" , on =["school_name", "school_name"]) # Calculate the Totals (Schools and Students) school_count = len(school_data_complete_df["school_name"].unique()) student_count = school_data_complete_df["Student ID"].count() # Calculate the Total Budget total_budget = school_data_df["budget"].sum() # Calculate the Average Scores using the "clean_student_data". average_reading_score = school_data_complete_df["reading_score"].mean() average_math_score = school_data_complete_df["math_score"].mean() # Step 1. Get the number of students that are in ninth grade at Thomas High School. # These students have no grades. missing_grades = school_data_complete_df[(school_data_complete_df["math_score"].isna())].count()["student_name"] # Get the total student count student_count = school_data_complete_df["Student ID"].count() # Step 2. Subtract the number of students that are in ninth grade at # Thomas High School from the total student count to get the new total student count. new_student_count = (student_count - missing_grades) # Calculate the passing rates using the "clean_student_data". passing_math_count = school_data_complete_df[(school_data_complete_df["math_score"] >= 70)].count()["student_name"] passing_reading_count = school_data_complete_df[(school_data_complete_df["reading_score"] >= 70)].count()["student_name"] # Step 3. Calculate the passing percentages with the new total student count. passing_math_percentage = passing_math_count/ float(new_student_count) * 100 passing_reading_percentage = passing_reading_count/ float(new_student_count) * 100 # Calculate the students who passed both reading and math. passing_math_reading = school_data_complete_df[(school_data_complete_df["math_score"] >= 70) & (school_data_complete_df["reading_score"] >= 70)] # Calculate the number of students that passed both reading and math. overall_passing_math_reading_count = passing_math_reading["student_name"].count() # Step 4.Calculate the overall passing percentage with new total student count. overall_passing_percentage = overall_passing_math_reading_count/ new_student_count * 100 # Create a DataFrame district_summary_df = pd.DataFrame( [{"Total Schools": school_count, "Total Students": student_count, "Total Budget": total_budget, "Average Math Score": average_math_score, "Average Reading Score": average_reading_score, "% Passing Math": passing_math_percentage, "% Passing Reading": passing_reading_percentage, "% Overall Passing": overall_passing_percentage}]) # Format the "Total Students" to have the comma for a thousands separator. district_summary_df["Total Students"] = district_summary_df["Total Students"].map("{:,}".format) # Format the "Total Budget" to have the comma for a thousands separator, a decimal separator and a "$". district_summary_df["Total Budget"] = district_summary_df["Total Budget"].map("${:,.2f}".format) # Format the columns. district_summary_df["Average Math Score"] = district_summary_df["Average Math Score"].map("{:.1f}".format) district_summary_df["Average Reading Score"] = district_summary_df["Average Reading Score"].map("{:.1f}".format) district_summary_df["% Passing Math"] = district_summary_df["% Passing Math"].map("{:.1f}".format) district_summary_df["% Passing Reading"] = district_summary_df["% Passing Reading"].map("{:.1f}".format) district_summary_df["% Overall Passing"] = district_summary_df["% Overall Passing"].map("{:.1f}".format) # Display the data frame district_summary_df ``` ## School Summary ``` # Determine the School Type per_school_types = school_data_df.set_index(["school_name"])["type"] # Calculate the total student count. per_school_counts = school_data_complete_df["school_name"].value_counts() # Calculate the total school budget and per capita spending per_school_budget = school_data_complete_df.groupby(["school_name"]).mean()["budget"] # Calculate the per capita spending. per_school_capita = per_school_budget / per_school_counts # Calculate the average test scores. per_school_math = school_data_complete_df.groupby(["school_name"]).mean()["math_score"] per_school_reading = school_data_complete_df.groupby(["school_name"]).mean()["reading_score"] # Calculate the passing scores by creating a filtered DataFrame. per_school_passing_math = school_data_complete_df[(school_data_complete_df["math_score"] >= 70)] per_school_passing_reading = school_data_complete_df[(school_data_complete_df["reading_score"] >= 70)] # Calculate the number of students passing math and passing reading by school. per_school_passing_math = per_school_passing_math.groupby(["school_name"]).count()["student_name"] per_school_passing_reading = per_school_passing_reading.groupby(["school_name"]).count()["student_name"] # Calculate the percentage of passing math and reading scores per school. per_school_passing_math = per_school_passing_math / per_school_counts * 100 per_school_passing_reading = per_school_passing_reading / per_school_counts * 100 # Calculate the students who passed both reading and math. per_passing_math_reading = school_data_complete_df[(school_data_complete_df["reading_score"] >= 70) & (school_data_complete_df["math_score"] >= 70)] # Calculate the number of students passing math and passing reading by school. per_passing_math_reading = per_passing_math_reading.groupby(["school_name"]).count()["student_name"] # Calculate the percentage of passing math and reading scores per school. per_overall_passing_percentage = per_passing_math_reading / per_school_counts * 100 # Create the DataFrame per_school_summary_df = pd.DataFrame({ "School Type": per_school_types, "Total Students": per_school_counts, "Total School Budget": per_school_budget, "Per Student Budget": per_school_capita, "Average Math Score": per_school_math, "Average Reading Score": per_school_reading, "% Passing Math": per_school_passing_math, "% Passing Reading": per_school_passing_reading, "% Overall Passing": per_overall_passing_percentage}) # per_school_summary_df.head() # Format the Total School Budget and the Per Student Budget per_school_summary_df["Total School Budget"] = per_school_summary_df["Total School Budget"].map("${:,.2f}".format) per_school_summary_df["Per Student Budget"] = per_school_summary_df["Per Student Budget"].map("${:,.2f}".format) # Display the data frame per_school_summary_df THS_summary = (per_school_summary_df.loc[["Thomas High School"]]) # Step 5. Get the number of 10th-12th graders from Thomas High School (THS). THS_tenth_graders = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "10th")].count()["Student ID"] THS_eleventh_graders = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "11th")].count()["Student ID"] THS_twelfth_graders = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "12th")].count()["Student ID"] # Step 6. Get all the students passing math from THS THS_tenth_graders_passing_math = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "10th") & (student_data_df["math_score"] >= 70)].count()["Student ID"] THS_eleventh_graders_passing_math = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "11th") & (student_data_df["math_score"] >= 70)].count()["Student ID"] THS_twelfth_graders_passing_math = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "12th") & (student_data_df["math_score"] >= 70)].count()["Student ID"] # Step 7. Get all the students passing reading from THS THS_tenth_graders_passing_reading = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "10th") & (student_data_df["reading_score"] >= 70)].count()["Student ID"] THS_eleventh_graders_passing_reading = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "11th") & (student_data_df["reading_score"] >= 70)].count()["Student ID"] THS_twelfth_graders_passing_reading = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "12th") & (student_data_df["reading_score"] >= 70)].count()["Student ID"] # Step 8. Get all the students passing math and reading from THS THS_tenth_graders_passing_overall = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "10th") & (student_data_df["math_score"] >= 70) & (student_data_df["reading_score"] >= 70)].count()["Student ID"] THS_eleventh_graders_passing_overall = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "11th") & (student_data_df["math_score"] >= 70) & (student_data_df["reading_score"] >= 70)].count()["Student ID"] THS_twelfth_graders_passing_overall = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "12th") & (student_data_df["math_score"] >= 70) & (student_data_df["reading_score"] >= 70)].count()["Student ID"] # Step 9. Calculate the percentage of 10th-12th grade students passing math from Thomas High School. THS_tenth_graders_passing_math_percentage = THS_tenth_graders_passing_math/ float(THS_tenth_graders) * 100 THS_eleventh_graders_passing_math_percentage = THS_eleventh_graders_passing_math/ float(THS_eleventh_graders) * 100 THS_twelfth_graders_passing_math_percentage = THS_twelfth_graders_passing_math/ float(THS_twelfth_graders) * 100 # Step 10. Calculate the percentage of 10th-12th grade students passing reading from Thomas High School. THS_tenth_graders_passing_reading_percentage = THS_tenth_graders_passing_reading/ float(THS_tenth_graders) * 100 THS_eleventh_graders_passing_reading_percentage = THS_eleventh_graders_passing_reading/ float(THS_eleventh_graders) * 100 THS_twelfth_graders_passing_reading_percentage = THS_twelfth_graders_passing_reading/ float(THS_twelfth_graders) * 100 # Step 11. Calculate the overall passing percentage of 10th-12th grade from Thomas High School. THS_tenth_graders_passing_overall_percentage = THS_tenth_graders_passing_overall/ float(THS_tenth_graders) * 100 THS_eleventh_graders_passing_overall_percentage = THS_eleventh_graders_passing_overall/ float(THS_eleventh_graders) * 100 THS_twelfth_graders_passing_overall_percentage = THS_twelfth_graders_passing_overall/ float(THS_twelfth_graders) * 100 # Step 12. Replace the passing math percent for Thomas High School in the per_school_summary_df. THS_passing_math = (THS_tenth_graders_passing_math + THS_eleventh_graders_passing_math + THS_twelfth_graders_passing_math) THS_studentbody = (THS_tenth_graders + THS_eleventh_graders + THS_twelfth_graders ) THS_passing_math_percentage = THS_passing_math/ THS_studentbody per_school_summary_df.at['Thomas High School','% Passing Math'] = THS_passing_math_percentage * 100 # Step 13. Replace the passing reading percentage for Thomas High School in the per_school_summary_df. THS_passing_reading = (THS_tenth_graders_passing_reading + THS_eleventh_graders_passing_reading + THS_twelfth_graders_passing_reading) THS_studentbody = (THS_tenth_graders + THS_eleventh_graders + THS_twelfth_graders ) THS_passing_reading_percentage = THS_passing_reading/ THS_studentbody per_school_summary_df.at['Thomas High School','% Passing Reading'] = THS_passing_reading_percentage * 100 # Step 14. Replace the overall passing percentage for Thomas High School in the per_school_summary_df. THS_passing_overall = (THS_tenth_graders_passing_overall + THS_eleventh_graders_passing_overall + THS_twelfth_graders_passing_overall) THS_studentbody = (THS_tenth_graders + THS_eleventh_graders + THS_twelfth_graders ) THS_passing_overall_percentage = THS_passing_overall/ THS_studentbody per_school_summary_df.at['Thomas High School','% Overall Passing'] = THS_passing_overall_percentage * 100 per_school_summary_df THS_summary = (per_school_summary_df.loc[["Thomas High School"]]) ``` ## High and Low Performing Schools ``` # Sort and show top five schools. top_schools = per_school_summary_df.sort_values([ "% Overall Passing"], ascending = False) top_schools.head() # Sort and show top five schools. bottom_schools = per_school_summary_df.sort_values ([ "% Overall Passing"] , ascending = True) bottom_schools.head() ``` ## Math and Reading Scores by Grade ``` # Create a Series of scores by grade levels using conditionals. ninth_graders = school_data_complete_df[(school_data_complete_df["grade"] == "9th")] tenth_graders = school_data_complete_df[(school_data_complete_df["grade"] == "10th")] eleventh_graders = school_data_complete_df[(school_data_complete_df["grade"] == "11th")] twelfth_graders = school_data_complete_df[(school_data_complete_df["grade"] == "12th")] # Group each school Series by the school name for the average math score. ninth_graders_math_scores = ninth_graders.groupby(["school_name"]).mean()["math_score"] tenth_graders_math_scores = tenth_graders.groupby(["school_name"]).mean()["math_score"] eleventh_graders_math_scores = eleventh_graders.groupby(["school_name"]).mean()["math_score"] twelfth_graders_math_scores = twelfth_graders.groupby(["school_name"]).mean()["math_score"] # Group each school Series by the school name for the average reading score. ninth_graders_reading_scores = ninth_graders.groupby(["school_name"]).mean()["reading_score"] tenth_graders_reading_scores = tenth_graders.groupby(["school_name"]).mean()["reading_score"] eleventh_graders_reading_scores = eleventh_graders.groupby(["school_name"]).mean()["reading_score"] twelfth_graders_reading_scores = twelfth_graders.groupby(["school_name"]).mean()["reading_score"] # Combine each Series for average math scores by school into single data frame. math_scores_by_grade = pd.DataFrame({ "9th": ninth_graders_math_scores, "10th": tenth_graders_math_scores, "11th": eleventh_graders_math_scores, "12th": twelfth_graders_math_scores }) # Combine each Series for average reading scores by school into single data frame. reading_scores_by_grade = pd.DataFrame({ "9th": ninth_graders_reading_scores, "10th": tenth_graders_reading_scores, "11th": eleventh_graders_reading_scores, "12th": twelfth_graders_reading_scores }) # Format each grade column. math_scores_by_grade["9th"] = math_scores_by_grade["9th"].map("{:,.1f}".format) math_scores_by_grade["10th"] = math_scores_by_grade["10th"].map("{:,.1f}".format) math_scores_by_grade["11th"] = math_scores_by_grade["11th"].map("{:,.1f}".format) math_scores_by_grade["12th"] = math_scores_by_grade["12th"].map("{:,.1f}".format) # Format each grade column. reading_scores_by_grade["9th"] = reading_scores_by_grade["9th"].map("{:,.1f}".format) reading_scores_by_grade["10th"] = reading_scores_by_grade["10th"].map("{:,.1f}".format) reading_scores_by_grade["11th"] = reading_scores_by_grade["11th"].map("{:,.1f}".format) reading_scores_by_grade["12th"] = reading_scores_by_grade["12th"].map("{:,.1f}".format) # Remove the index. reading_scores_by_grade = reading_scores_by_grade[["9th", "10th", "11th", "12th"]] reading_scores_by_grade.index.name = None # Display the data frame reading_scores_by_grade # Remove the index. math_scores_by_grade = math_scores_by_grade[["9th", "10th", "11th", "12th"]] math_scores_by_grade.index.name = None # Display the data frame math_scores_by_grade ``` ## Scores by School Spending ``` # Establish the spending bins and group names. spending_bins = [0, 585, 630, 645, 675] group_name = ["<$584", "$585 - $629", "$630 - $644" , "$645 - $675"] per_school_summary_df["Spending Ranges (Per Student)"] = pd.cut(per_school_capita, spending_bins, labels = group_name) # Calculate averages for the desired columns. spending_math_scores = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"] spending_reading_scores = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"] spending_passing_math = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"] spending_passing_reading = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"] spending_passing_overall = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Overall Passing"] # Create the DataFrame spending_summary_df = pd.DataFrame({ "Average Math Score" : spending_math_scores, "Average Reading Score" : spending_reading_scores, "% Passing Math" : spending_passing_math, "% Passing Reading" : spending_passing_reading, "% Passing Overall": spending_passing_overall }) # Format the DataFrame spending_summary_df["Average Math Score"] = spending_summary_df["Average Math Score"].map("{:.1f}".format) spending_summary_df["Average Reading Score"] = spending_summary_df["Average Reading Score"].map("{:.1f}".format) spending_summary_df["% Passing Math"] = spending_summary_df["% Passing Math"].map("{:.1f}".format) spending_summary_df["% Passing Reading"] = spending_summary_df["% Passing Reading"].map("{:.1f}".format) spending_summary_df["% Passing Overall"] = spending_summary_df["% Passing Overall"].map("{:.1f}".format) spending_summary_df ``` ## Scores by School Size ``` # Establish the bins. size_bins = [0, 1000, 2000, 5000] group_names = ["Small (<1000)", "Medium (1000-2000)", "Large(2000 -5000)"] # Categorize spending based on the bins. per_school_summary_df["School Size"] = pd.cut(per_school_summary_df["Total Students"], size_bins, labels=group_names) # Calculate averages for the desired columns. size_math_scores = per_school_summary_df.groupby(["School Size"]).mean()["Average Math Score"] size_reading_scores = per_school_summary_df.groupby(["School Size"]).mean()["Average Reading Score"] size_passing_math = per_school_summary_df.groupby(["School Size"]).mean()["% Passing Math"] size_passing_reading = per_school_summary_df.groupby(["School Size"]).mean()["% Passing Reading"] size_passing_overall = per_school_summary_df.groupby(["School Size"]).mean()["% Overall Passing"] # Assemble into DataFrame. size_summary_df = pd.DataFrame({ "Average Math Score" : size_math_scores, "Average Reading Score" : size_reading_scores, "% Passing Math" : size_passing_math, "% Passing Reading" : size_passing_reading, "% Passing Overall": size_passing_overall }) # Format the DataFrame size_summary_df["Average Math Score"] = size_summary_df["Average Math Score"].map("{:.1f}".format) size_summary_df["Average Reading Score"] = size_summary_df["Average Reading Score"].map("{:.1f}".format) size_summary_df["% Passing Math"] = size_summary_df["% Passing Math"].map("{:.1f}".format) size_summary_df["% Passing Reading"] = size_summary_df["% Passing Reading"].map("{:.1f}".format) size_summary_df["% Passing Overall"] = size_summary_df["% Passing Overall"].map("{:.1f}".format) size_summary_df ``` ## Scores by School Type ``` # Calculate averages for the desired columns. type_math_scores = per_school_summary_df.groupby(["School Type"]).mean()["Average Math Score"] type_reading_scores = per_school_summary_df.groupby(["School Type"]).mean()["Average Reading Score"] type_passing_math = per_school_summary_df.groupby(["School Type"]).mean()["% Passing Math"] type_passing_reading = per_school_summary_df.groupby(["School Type"]).mean()["% Passing Reading"] type_passing_overall = per_school_summary_df.groupby(["School Type"]).mean()["% Overall Passing"] # Assemble into DataFrame. type_summary_df = pd.DataFrame({ "Average Math Score" : type_math_scores, "Average Reading Score" : type_reading_scores, "% Passing Math" : type_passing_math, "% Passing Reading" : type_passing_reading, "% Passing Overall": type_passing_overall }) # # Format the DataFrame type_summary_df["Average Math Score"] = type_summary_df["Average Math Score"].map("{:.1f}".format) type_summary_df["Average Reading Score"] = type_summary_df["Average Reading Score"].map("{:.1f}".format) type_summary_df["% Passing Math"] = type_summary_df["% Passing Math"].map("{:.1f}".format) type_summary_df["% Passing Reading"] = type_summary_df["% Passing Reading"].map("{:.1f}".format) type_summary_df["% Passing Overall"] = type_summary_df["% Passing Overall"].map("{:.1f}".format) type_summary_df ```
github_jupyter
# Dependencies and Setup import pandas as pd # File to Load (Remember to change the path if needed.) school_data_to_load = "Resources/schools_complete.csv" student_data_to_load = "Resources/students_complete.csv" # Read the School Data and Student Data and store into a Pandas DataFrame school_data_df = pd.read_csv(school_data_to_load) student_data_df = pd.read_csv(student_data_to_load) # Cleaning Student Names and Replacing Substrings in a Python String # Add each prefix and suffix to remove to a list. prefixes_suffixes = ["Dr. ", "Mr. ","Ms. ", "Mrs. ", "Miss ", " MD", " DDS", " DVM", " PhD"] # Iterate through the words in the "prefixes_suffixes" list and replace them with an empty space, "". for word in prefixes_suffixes: student_data_df["student_name"] = student_data_df["student_name"].str.replace(word,"") # Check names. student_data_df.head(10) # Install numpy using conda install numpy or pip install numpy. # Step 1. Import numpy as np. import numpy as np # Step 2. Use the loc method on the student_data_df to select all the reading scores from the 9th grade at Thomas High School and replace them with NaN. reading_score_df = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "9th") & (student_data_df["reading_score"] > 0), "reading_score"] = np.nan student_data_df # Step 2. Use the loc method on the student_data_df to select all the math scores from the 9th grade at Thomas High School and replace them with NaN. math_score_df = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "9th") & (student_data_df["math_score"] > 0), "math_score"] = np.nan # Step 4. Check the student data for NaN's. student_data_df # Combine the data into a single dataset school_data_complete_df = pd.merge(student_data_df, school_data_df, how = "left" , on =["school_name", "school_name"]) # Calculate the Totals (Schools and Students) school_count = len(school_data_complete_df["school_name"].unique()) student_count = school_data_complete_df["Student ID"].count() # Calculate the Total Budget total_budget = school_data_df["budget"].sum() # Calculate the Average Scores using the "clean_student_data". average_reading_score = school_data_complete_df["reading_score"].mean() average_math_score = school_data_complete_df["math_score"].mean() # Step 1. Get the number of students that are in ninth grade at Thomas High School. # These students have no grades. missing_grades = school_data_complete_df[(school_data_complete_df["math_score"].isna())].count()["student_name"] # Get the total student count student_count = school_data_complete_df["Student ID"].count() # Step 2. Subtract the number of students that are in ninth grade at # Thomas High School from the total student count to get the new total student count. new_student_count = (student_count - missing_grades) # Calculate the passing rates using the "clean_student_data". passing_math_count = school_data_complete_df[(school_data_complete_df["math_score"] >= 70)].count()["student_name"] passing_reading_count = school_data_complete_df[(school_data_complete_df["reading_score"] >= 70)].count()["student_name"] # Step 3. Calculate the passing percentages with the new total student count. passing_math_percentage = passing_math_count/ float(new_student_count) * 100 passing_reading_percentage = passing_reading_count/ float(new_student_count) * 100 # Calculate the students who passed both reading and math. passing_math_reading = school_data_complete_df[(school_data_complete_df["math_score"] >= 70) & (school_data_complete_df["reading_score"] >= 70)] # Calculate the number of students that passed both reading and math. overall_passing_math_reading_count = passing_math_reading["student_name"].count() # Step 4.Calculate the overall passing percentage with new total student count. overall_passing_percentage = overall_passing_math_reading_count/ new_student_count * 100 # Create a DataFrame district_summary_df = pd.DataFrame( [{"Total Schools": school_count, "Total Students": student_count, "Total Budget": total_budget, "Average Math Score": average_math_score, "Average Reading Score": average_reading_score, "% Passing Math": passing_math_percentage, "% Passing Reading": passing_reading_percentage, "% Overall Passing": overall_passing_percentage}]) # Format the "Total Students" to have the comma for a thousands separator. district_summary_df["Total Students"] = district_summary_df["Total Students"].map("{:,}".format) # Format the "Total Budget" to have the comma for a thousands separator, a decimal separator and a "$". district_summary_df["Total Budget"] = district_summary_df["Total Budget"].map("${:,.2f}".format) # Format the columns. district_summary_df["Average Math Score"] = district_summary_df["Average Math Score"].map("{:.1f}".format) district_summary_df["Average Reading Score"] = district_summary_df["Average Reading Score"].map("{:.1f}".format) district_summary_df["% Passing Math"] = district_summary_df["% Passing Math"].map("{:.1f}".format) district_summary_df["% Passing Reading"] = district_summary_df["% Passing Reading"].map("{:.1f}".format) district_summary_df["% Overall Passing"] = district_summary_df["% Overall Passing"].map("{:.1f}".format) # Display the data frame district_summary_df # Determine the School Type per_school_types = school_data_df.set_index(["school_name"])["type"] # Calculate the total student count. per_school_counts = school_data_complete_df["school_name"].value_counts() # Calculate the total school budget and per capita spending per_school_budget = school_data_complete_df.groupby(["school_name"]).mean()["budget"] # Calculate the per capita spending. per_school_capita = per_school_budget / per_school_counts # Calculate the average test scores. per_school_math = school_data_complete_df.groupby(["school_name"]).mean()["math_score"] per_school_reading = school_data_complete_df.groupby(["school_name"]).mean()["reading_score"] # Calculate the passing scores by creating a filtered DataFrame. per_school_passing_math = school_data_complete_df[(school_data_complete_df["math_score"] >= 70)] per_school_passing_reading = school_data_complete_df[(school_data_complete_df["reading_score"] >= 70)] # Calculate the number of students passing math and passing reading by school. per_school_passing_math = per_school_passing_math.groupby(["school_name"]).count()["student_name"] per_school_passing_reading = per_school_passing_reading.groupby(["school_name"]).count()["student_name"] # Calculate the percentage of passing math and reading scores per school. per_school_passing_math = per_school_passing_math / per_school_counts * 100 per_school_passing_reading = per_school_passing_reading / per_school_counts * 100 # Calculate the students who passed both reading and math. per_passing_math_reading = school_data_complete_df[(school_data_complete_df["reading_score"] >= 70) & (school_data_complete_df["math_score"] >= 70)] # Calculate the number of students passing math and passing reading by school. per_passing_math_reading = per_passing_math_reading.groupby(["school_name"]).count()["student_name"] # Calculate the percentage of passing math and reading scores per school. per_overall_passing_percentage = per_passing_math_reading / per_school_counts * 100 # Create the DataFrame per_school_summary_df = pd.DataFrame({ "School Type": per_school_types, "Total Students": per_school_counts, "Total School Budget": per_school_budget, "Per Student Budget": per_school_capita, "Average Math Score": per_school_math, "Average Reading Score": per_school_reading, "% Passing Math": per_school_passing_math, "% Passing Reading": per_school_passing_reading, "% Overall Passing": per_overall_passing_percentage}) # per_school_summary_df.head() # Format the Total School Budget and the Per Student Budget per_school_summary_df["Total School Budget"] = per_school_summary_df["Total School Budget"].map("${:,.2f}".format) per_school_summary_df["Per Student Budget"] = per_school_summary_df["Per Student Budget"].map("${:,.2f}".format) # Display the data frame per_school_summary_df THS_summary = (per_school_summary_df.loc[["Thomas High School"]]) # Step 5. Get the number of 10th-12th graders from Thomas High School (THS). THS_tenth_graders = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "10th")].count()["Student ID"] THS_eleventh_graders = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "11th")].count()["Student ID"] THS_twelfth_graders = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "12th")].count()["Student ID"] # Step 6. Get all the students passing math from THS THS_tenth_graders_passing_math = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "10th") & (student_data_df["math_score"] >= 70)].count()["Student ID"] THS_eleventh_graders_passing_math = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "11th") & (student_data_df["math_score"] >= 70)].count()["Student ID"] THS_twelfth_graders_passing_math = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "12th") & (student_data_df["math_score"] >= 70)].count()["Student ID"] # Step 7. Get all the students passing reading from THS THS_tenth_graders_passing_reading = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "10th") & (student_data_df["reading_score"] >= 70)].count()["Student ID"] THS_eleventh_graders_passing_reading = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "11th") & (student_data_df["reading_score"] >= 70)].count()["Student ID"] THS_twelfth_graders_passing_reading = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "12th") & (student_data_df["reading_score"] >= 70)].count()["Student ID"] # Step 8. Get all the students passing math and reading from THS THS_tenth_graders_passing_overall = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "10th") & (student_data_df["math_score"] >= 70) & (student_data_df["reading_score"] >= 70)].count()["Student ID"] THS_eleventh_graders_passing_overall = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "11th") & (student_data_df["math_score"] >= 70) & (student_data_df["reading_score"] >= 70)].count()["Student ID"] THS_twelfth_graders_passing_overall = student_data_df.loc[ (student_data_df["school_name"] == "Thomas High School")& (student_data_df["grade"] == "12th") & (student_data_df["math_score"] >= 70) & (student_data_df["reading_score"] >= 70)].count()["Student ID"] # Step 9. Calculate the percentage of 10th-12th grade students passing math from Thomas High School. THS_tenth_graders_passing_math_percentage = THS_tenth_graders_passing_math/ float(THS_tenth_graders) * 100 THS_eleventh_graders_passing_math_percentage = THS_eleventh_graders_passing_math/ float(THS_eleventh_graders) * 100 THS_twelfth_graders_passing_math_percentage = THS_twelfth_graders_passing_math/ float(THS_twelfth_graders) * 100 # Step 10. Calculate the percentage of 10th-12th grade students passing reading from Thomas High School. THS_tenth_graders_passing_reading_percentage = THS_tenth_graders_passing_reading/ float(THS_tenth_graders) * 100 THS_eleventh_graders_passing_reading_percentage = THS_eleventh_graders_passing_reading/ float(THS_eleventh_graders) * 100 THS_twelfth_graders_passing_reading_percentage = THS_twelfth_graders_passing_reading/ float(THS_twelfth_graders) * 100 # Step 11. Calculate the overall passing percentage of 10th-12th grade from Thomas High School. THS_tenth_graders_passing_overall_percentage = THS_tenth_graders_passing_overall/ float(THS_tenth_graders) * 100 THS_eleventh_graders_passing_overall_percentage = THS_eleventh_graders_passing_overall/ float(THS_eleventh_graders) * 100 THS_twelfth_graders_passing_overall_percentage = THS_twelfth_graders_passing_overall/ float(THS_twelfth_graders) * 100 # Step 12. Replace the passing math percent for Thomas High School in the per_school_summary_df. THS_passing_math = (THS_tenth_graders_passing_math + THS_eleventh_graders_passing_math + THS_twelfth_graders_passing_math) THS_studentbody = (THS_tenth_graders + THS_eleventh_graders + THS_twelfth_graders ) THS_passing_math_percentage = THS_passing_math/ THS_studentbody per_school_summary_df.at['Thomas High School','% Passing Math'] = THS_passing_math_percentage * 100 # Step 13. Replace the passing reading percentage for Thomas High School in the per_school_summary_df. THS_passing_reading = (THS_tenth_graders_passing_reading + THS_eleventh_graders_passing_reading + THS_twelfth_graders_passing_reading) THS_studentbody = (THS_tenth_graders + THS_eleventh_graders + THS_twelfth_graders ) THS_passing_reading_percentage = THS_passing_reading/ THS_studentbody per_school_summary_df.at['Thomas High School','% Passing Reading'] = THS_passing_reading_percentage * 100 # Step 14. Replace the overall passing percentage for Thomas High School in the per_school_summary_df. THS_passing_overall = (THS_tenth_graders_passing_overall + THS_eleventh_graders_passing_overall + THS_twelfth_graders_passing_overall) THS_studentbody = (THS_tenth_graders + THS_eleventh_graders + THS_twelfth_graders ) THS_passing_overall_percentage = THS_passing_overall/ THS_studentbody per_school_summary_df.at['Thomas High School','% Overall Passing'] = THS_passing_overall_percentage * 100 per_school_summary_df THS_summary = (per_school_summary_df.loc[["Thomas High School"]]) # Sort and show top five schools. top_schools = per_school_summary_df.sort_values([ "% Overall Passing"], ascending = False) top_schools.head() # Sort and show top five schools. bottom_schools = per_school_summary_df.sort_values ([ "% Overall Passing"] , ascending = True) bottom_schools.head() # Create a Series of scores by grade levels using conditionals. ninth_graders = school_data_complete_df[(school_data_complete_df["grade"] == "9th")] tenth_graders = school_data_complete_df[(school_data_complete_df["grade"] == "10th")] eleventh_graders = school_data_complete_df[(school_data_complete_df["grade"] == "11th")] twelfth_graders = school_data_complete_df[(school_data_complete_df["grade"] == "12th")] # Group each school Series by the school name for the average math score. ninth_graders_math_scores = ninth_graders.groupby(["school_name"]).mean()["math_score"] tenth_graders_math_scores = tenth_graders.groupby(["school_name"]).mean()["math_score"] eleventh_graders_math_scores = eleventh_graders.groupby(["school_name"]).mean()["math_score"] twelfth_graders_math_scores = twelfth_graders.groupby(["school_name"]).mean()["math_score"] # Group each school Series by the school name for the average reading score. ninth_graders_reading_scores = ninth_graders.groupby(["school_name"]).mean()["reading_score"] tenth_graders_reading_scores = tenth_graders.groupby(["school_name"]).mean()["reading_score"] eleventh_graders_reading_scores = eleventh_graders.groupby(["school_name"]).mean()["reading_score"] twelfth_graders_reading_scores = twelfth_graders.groupby(["school_name"]).mean()["reading_score"] # Combine each Series for average math scores by school into single data frame. math_scores_by_grade = pd.DataFrame({ "9th": ninth_graders_math_scores, "10th": tenth_graders_math_scores, "11th": eleventh_graders_math_scores, "12th": twelfth_graders_math_scores }) # Combine each Series for average reading scores by school into single data frame. reading_scores_by_grade = pd.DataFrame({ "9th": ninth_graders_reading_scores, "10th": tenth_graders_reading_scores, "11th": eleventh_graders_reading_scores, "12th": twelfth_graders_reading_scores }) # Format each grade column. math_scores_by_grade["9th"] = math_scores_by_grade["9th"].map("{:,.1f}".format) math_scores_by_grade["10th"] = math_scores_by_grade["10th"].map("{:,.1f}".format) math_scores_by_grade["11th"] = math_scores_by_grade["11th"].map("{:,.1f}".format) math_scores_by_grade["12th"] = math_scores_by_grade["12th"].map("{:,.1f}".format) # Format each grade column. reading_scores_by_grade["9th"] = reading_scores_by_grade["9th"].map("{:,.1f}".format) reading_scores_by_grade["10th"] = reading_scores_by_grade["10th"].map("{:,.1f}".format) reading_scores_by_grade["11th"] = reading_scores_by_grade["11th"].map("{:,.1f}".format) reading_scores_by_grade["12th"] = reading_scores_by_grade["12th"].map("{:,.1f}".format) # Remove the index. reading_scores_by_grade = reading_scores_by_grade[["9th", "10th", "11th", "12th"]] reading_scores_by_grade.index.name = None # Display the data frame reading_scores_by_grade # Remove the index. math_scores_by_grade = math_scores_by_grade[["9th", "10th", "11th", "12th"]] math_scores_by_grade.index.name = None # Display the data frame math_scores_by_grade # Establish the spending bins and group names. spending_bins = [0, 585, 630, 645, 675] group_name = ["<$584", "$585 - $629", "$630 - $644" , "$645 - $675"] per_school_summary_df["Spending Ranges (Per Student)"] = pd.cut(per_school_capita, spending_bins, labels = group_name) # Calculate averages for the desired columns. spending_math_scores = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["Average Math Score"] spending_reading_scores = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["Average Reading Score"] spending_passing_math = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Math"] spending_passing_reading = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Passing Reading"] spending_passing_overall = per_school_summary_df.groupby(["Spending Ranges (Per Student)"]).mean()["% Overall Passing"] # Create the DataFrame spending_summary_df = pd.DataFrame({ "Average Math Score" : spending_math_scores, "Average Reading Score" : spending_reading_scores, "% Passing Math" : spending_passing_math, "% Passing Reading" : spending_passing_reading, "% Passing Overall": spending_passing_overall }) # Format the DataFrame spending_summary_df["Average Math Score"] = spending_summary_df["Average Math Score"].map("{:.1f}".format) spending_summary_df["Average Reading Score"] = spending_summary_df["Average Reading Score"].map("{:.1f}".format) spending_summary_df["% Passing Math"] = spending_summary_df["% Passing Math"].map("{:.1f}".format) spending_summary_df["% Passing Reading"] = spending_summary_df["% Passing Reading"].map("{:.1f}".format) spending_summary_df["% Passing Overall"] = spending_summary_df["% Passing Overall"].map("{:.1f}".format) spending_summary_df # Establish the bins. size_bins = [0, 1000, 2000, 5000] group_names = ["Small (<1000)", "Medium (1000-2000)", "Large(2000 -5000)"] # Categorize spending based on the bins. per_school_summary_df["School Size"] = pd.cut(per_school_summary_df["Total Students"], size_bins, labels=group_names) # Calculate averages for the desired columns. size_math_scores = per_school_summary_df.groupby(["School Size"]).mean()["Average Math Score"] size_reading_scores = per_school_summary_df.groupby(["School Size"]).mean()["Average Reading Score"] size_passing_math = per_school_summary_df.groupby(["School Size"]).mean()["% Passing Math"] size_passing_reading = per_school_summary_df.groupby(["School Size"]).mean()["% Passing Reading"] size_passing_overall = per_school_summary_df.groupby(["School Size"]).mean()["% Overall Passing"] # Assemble into DataFrame. size_summary_df = pd.DataFrame({ "Average Math Score" : size_math_scores, "Average Reading Score" : size_reading_scores, "% Passing Math" : size_passing_math, "% Passing Reading" : size_passing_reading, "% Passing Overall": size_passing_overall }) # Format the DataFrame size_summary_df["Average Math Score"] = size_summary_df["Average Math Score"].map("{:.1f}".format) size_summary_df["Average Reading Score"] = size_summary_df["Average Reading Score"].map("{:.1f}".format) size_summary_df["% Passing Math"] = size_summary_df["% Passing Math"].map("{:.1f}".format) size_summary_df["% Passing Reading"] = size_summary_df["% Passing Reading"].map("{:.1f}".format) size_summary_df["% Passing Overall"] = size_summary_df["% Passing Overall"].map("{:.1f}".format) size_summary_df # Calculate averages for the desired columns. type_math_scores = per_school_summary_df.groupby(["School Type"]).mean()["Average Math Score"] type_reading_scores = per_school_summary_df.groupby(["School Type"]).mean()["Average Reading Score"] type_passing_math = per_school_summary_df.groupby(["School Type"]).mean()["% Passing Math"] type_passing_reading = per_school_summary_df.groupby(["School Type"]).mean()["% Passing Reading"] type_passing_overall = per_school_summary_df.groupby(["School Type"]).mean()["% Overall Passing"] # Assemble into DataFrame. type_summary_df = pd.DataFrame({ "Average Math Score" : type_math_scores, "Average Reading Score" : type_reading_scores, "% Passing Math" : type_passing_math, "% Passing Reading" : type_passing_reading, "% Passing Overall": type_passing_overall }) # # Format the DataFrame type_summary_df["Average Math Score"] = type_summary_df["Average Math Score"].map("{:.1f}".format) type_summary_df["Average Reading Score"] = type_summary_df["Average Reading Score"].map("{:.1f}".format) type_summary_df["% Passing Math"] = type_summary_df["% Passing Math"].map("{:.1f}".format) type_summary_df["% Passing Reading"] = type_summary_df["% Passing Reading"].map("{:.1f}".format) type_summary_df["% Passing Overall"] = type_summary_df["% Passing Overall"].map("{:.1f}".format) type_summary_df
0.641198
0.861538
# 探索过拟合和欠拟合 在前面的两个例子中(电影影评分类和预测燃油效率),我们看到,在训练许多周期之后,我们的模型对验证数据的准确性会到达峰值,然后开始下降。 换句话说,我们的模型会过度拟合训练数据,学习如果处理过拟合很重要,尽管通常可以在训练集上实现高精度,但我们真正想要的是开发能够很好泛化测试数据(或之前未见过的数据)的模型。 过拟合的反面是欠拟合,当测试数据仍有改进空间会发生欠拟合,出现这种情况的原因有很多:模型不够强大,过度正则化,或者根本没有经过足够长的时间训练,这意味着网络尚未学习训练数据中的相关模式。 如果训练时间过长,模型将开始过度拟合,并从训练数据中学习模式,而这些模式可能并不适用于测试数据,我们需要取得平衡,了解如何训练适当数量的周期,我们将在下面讨论,这是一项有用的技能。 为了防止过拟合,最好的解决方案是使用更多的训练数据,受过更多数据训练的模型自然会更好的泛化。当没有更多的训练数据时,另外一个最佳解决方案是使用正则化等技术,这些限制了模型可以存储的信息的数据量和类型,如果网络只能记住少量模式,那么优化过程将迫使它专注于最突出的模式,这些模式有更好的泛化性。 在本章节中,我们将探索两种常见的正则化技术:权重正则化和dropout丢弃正则化,并使用它们来改进我们的IMDB电影评论分类。 ``` from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt print(tf.__version__) ``` ## 下载IMDB数据集 我们不会像以前一样使用嵌入,而是对句子进行多重编码。这个模型将很快适应训练集。它将用于证明何时发生过拟合,以及如何处理它。 对我们的列表进行多热编码意味着将它们转换为0和1的向量,具体地说,这将意味着例如将序列[3,5]转换为10000维向量,除了索引3和5的值是1之外,其他全零。 ``` NUM_WORDS = 10000 (train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS) def multi_hot_sequences(sequences, dimension): # Create an all-zero matrix of shape (len(sequences), dimension) results = np.zeros((len(sequences), dimension)) for i, word_indices in enumerate(sequences): results[i, word_indices] = 1.0 # set specific indices of results[i] to 1s return results train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS) test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS) ``` 让我们看一下生成的多热矢量,单词索引按频率排序,因此预计索引零附近有更多的1值,我们可以在下图中看到: ``` plt.plot(train_data[0]) ``` ## 演示过拟合 防止过度拟合的最简单方法是减小模型的大小,即模型中可学习参数的数量(由层数和每层单元数决定)。在深度学习中,模型中可学习参数的数量通常被称为模型的“容量”。直观地,具有更多参数的模型将具有更多的“记忆能力”,因此将能够容易地学习训练样本与其目标之间的完美的字典式映射,没有任何泛化能力的映射,但是在对未见过的数据做出预测时这将是无用的。 始终牢记这一点:深度学习模型往往善于适应训练数据,但真正的挑战是泛化,而不是适应。 另一方面,如果网络具有有限的记忆资源,则将不能容易地学习映射。为了最大限度地减少损失,它必须学习具有更强预测能力的压缩表示。同时,如果您使模型太小,则难以适应训练数据。“太多容量”和“容量不足”之间存在平衡。 不幸的是,没有神奇的公式来确定模型的正确大小或架构(就层数而言,或每层的正确大小),您将不得不尝试使用一系列不同的架构。 要找到合适的模型大小,最好从相对较少的层和参数开始,然后开始增加层的大小或添加新层,直到您看到验证损失的收益递减为止。让我们在电影评论分类网络上试试。 我们将仅适用`Dense`层作为基线创建一个简单模型,然后创建更小和更大的版本,并进行比较。 ### 创建一个基线模型 ``` baseline_model = keras.Sequential([ # `input_shape` is only required here so that `.summary` works. keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dense(16, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) baseline_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy']) baseline_model.summary() baseline_history = baseline_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) ``` ### 创建一个更小的模型 让我们创建一个隐藏单元较少的模型,与我们刚刚创建的基线模型进行比较: ``` smaller_model = keras.Sequential([ keras.layers.Dense(4, activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dense(4, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) smaller_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy']) smaller_model.summary() ``` 用相同的数据训练模型: ``` smaller_history = smaller_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) ``` ### 创建一个较大的模型 作为练习,您可以创建一个更大的模型,并查看它开始过拟合的速度。 接下来,让我们在这个基准测试中添加一个容量更大的网络,远远超出问题的范围: ``` bigger_model = keras.models.Sequential([ keras.layers.Dense(512, activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dense(512, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) bigger_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy','binary_crossentropy']) bigger_model.summary() ``` 并且,再次使用相同的数据训练模型: ``` bigger_history = bigger_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) ``` ### 绘制训练和验证损失 <!--TODO(markdaoust): This should be a one-liner with tensorboard --> 实线表示训练损失,虚线表示验证损失(记住:较低的验证损失表示更好的模型)。在这里,较小的网络开始过拟合晚于基线模型(在6个周期之后而不是4个周期),并且一旦开始过拟合,其性能下降得慢得多。 ``` def plot_history(histories, key='binary_crossentropy'): plt.figure(figsize=(16,10)) for name, history in histories: val = plt.plot(history.epoch, history.history['val_'+key], '--', label=name.title()+' Val') plt.plot(history.epoch, history.history[key], color=val[0].get_color(), label=name.title()+' Train') plt.xlabel('Epochs') plt.ylabel(key.replace('_',' ').title()) plt.legend() plt.xlim([0,max(history.epoch)]) plot_history([('baseline', baseline_history), ('smaller', smaller_history), ('bigger', bigger_history)]) ``` 请注意,较大的网络在仅仅一个周期之后几乎立即开始过度拟合,并且更严重。网络容量越大,能够越快地对训练数据进行建模(导致训练损失低),但过拟合的可能性越大(导致训练和验证损失之间的差异很大)。 ## 防止过拟合的策略 ### 添加权重正则化 你可能熟悉奥卡姆的剃刀原则:给出两个解释的东西,最可能正确的解释是“最简单”的解释,即做出最少量假设的解释。这也适用于神经网络学习的模型:给定一些训练数据和网络架构,有多组权重值(多个模型)可以解释数据,而简单模型比复杂模型更不容易过度拟合。 在这种情况下,“简单模型”是参数值分布的熵更小的模型(或参数更少的模型,如我们在上一节中看到的)。因此,减轻过度拟合的一种常见方法是通过强制网络的权值只取较小的值来限制网络的复杂性,这使得权值的分布更加“规则”。这被称为“权重正则化”,它是通过在网络的损失函数中增加与权重过大相关的成本来实现的。这种成本有两种: - [L1 正则化](https://developers.google.cn/machine-learning/glossary/#L1_regularization)其中添加的成本与权重系数的绝对值成正比(即与权重的“L1范数”成正比)。 - [L2 正则化](https://developers.google.cn/machine-learning/glossary/#L2_regularization), 其中增加的成本与权重系数值的平方成正比(即与权重的平方“L2范数”成正比)。L2正则化在神经网络中也称为权值衰减。不要让不同的名称迷惑你:权重衰减在数学上与L2正则化是完全相同的。 L2正则化引入了稀疏性,使一些权重参数为零。L2正则化将惩罚权重参数而不会使它们稀疏,这是L2更常见的一个原因。 在`tf.keras`中,通过将权重正则化实例作为关键字参数传递给层来添加权重正则化。我们现在添加L2权重正则化。 ``` l2_model = keras.models.Sequential([ keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001), activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001), activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) l2_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy']) l2_model_history = l2_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) ``` ```l2(0.001)```表示该层的权重矩阵中的每个系数都会将```0.001 * weight_coefficient_value**2```添加到网络的总损失中。请注意,由于此惩罚仅在训练时添加,因此在训练时该网络的损失将远高于测试时。 这是我们的L2正则化惩罚的影响: ``` plot_history([('baseline', baseline_history), ('l2', l2_model_history)]) ``` 正如你所看到的,L2正则化模型比基线模型更能抵抗过拟合,即使两个模型具有相同数量的参数。 ### 添加Dropout(丢弃正则化) Dropout是由Hinton和他在多伦多大学的学生开发的最有效和最常用的神经网络正则化技术之一。Dropout应用于层主要就是在训练期间随机“丢弃”(即设置为零)该层的多个输出特征。假设一个给定的层通常会在训练期间为给定的输入样本返回一个向量[0.2,0.5,1.3,0.8,1.1],在应用了Dropout之后,该向量将具有随机分布的几个零条目,例如,[0,0.5,1.3,0,1.1]。“丢弃率”是被归零的特征的一部分,它通常设置在0.2和0.5之间, 在测试时,没有单元被剔除,而是将层的输出值按与丢弃率相等的因子缩小,以平衡实际活动的单元多余训练时的单元。 在`tf.keras`中,您可以通过`Dropout`层在网络中引入dropout,该层将在之前应用于层的输出。 让我们在IMDB网络中添加两个`Dropout`层,看看它们在减少过度拟合方面做得如何: ``` dpt_model = keras.models.Sequential([ keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dropout(0.5), keras.layers.Dense(16, activation='relu'), keras.layers.Dropout(0.5), keras.layers.Dense(1, activation='sigmoid') ]) dpt_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy','binary_crossentropy']) dpt_model_history = dpt_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) plot_history([('baseline', baseline_history), ('dropout', dpt_model_history)]) ``` 从上图可以看出,添加dropout时对基线模型的明显改进。 回顾一下,以下是防止神经网络中过度拟合的最常用方法: - 获取更多训练数据 - 减少网络的容量 - 添加权重正则化 - 添加dropout 本指南未涉及的两个重要方法是数据增强和批量标准化。
github_jupyter
from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt print(tf.__version__) NUM_WORDS = 10000 (train_data, train_labels), (test_data, test_labels) = keras.datasets.imdb.load_data(num_words=NUM_WORDS) def multi_hot_sequences(sequences, dimension): # Create an all-zero matrix of shape (len(sequences), dimension) results = np.zeros((len(sequences), dimension)) for i, word_indices in enumerate(sequences): results[i, word_indices] = 1.0 # set specific indices of results[i] to 1s return results train_data = multi_hot_sequences(train_data, dimension=NUM_WORDS) test_data = multi_hot_sequences(test_data, dimension=NUM_WORDS) plt.plot(train_data[0]) baseline_model = keras.Sequential([ # `input_shape` is only required here so that `.summary` works. keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dense(16, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) baseline_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy']) baseline_model.summary() baseline_history = baseline_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) smaller_model = keras.Sequential([ keras.layers.Dense(4, activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dense(4, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) smaller_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy']) smaller_model.summary() smaller_history = smaller_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) bigger_model = keras.models.Sequential([ keras.layers.Dense(512, activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dense(512, activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) bigger_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy','binary_crossentropy']) bigger_model.summary() bigger_history = bigger_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) def plot_history(histories, key='binary_crossentropy'): plt.figure(figsize=(16,10)) for name, history in histories: val = plt.plot(history.epoch, history.history['val_'+key], '--', label=name.title()+' Val') plt.plot(history.epoch, history.history[key], color=val[0].get_color(), label=name.title()+' Train') plt.xlabel('Epochs') plt.ylabel(key.replace('_',' ').title()) plt.legend() plt.xlim([0,max(history.epoch)]) plot_history([('baseline', baseline_history), ('smaller', smaller_history), ('bigger', bigger_history)]) l2_model = keras.models.Sequential([ keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001), activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dense(16, kernel_regularizer=keras.regularizers.l2(0.001), activation='relu'), keras.layers.Dense(1, activation='sigmoid') ]) l2_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', 'binary_crossentropy']) l2_model_history = l2_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) plot_history([('baseline', baseline_history), ('l2', l2_model_history)]) dpt_model = keras.models.Sequential([ keras.layers.Dense(16, activation='relu', input_shape=(NUM_WORDS,)), keras.layers.Dropout(0.5), keras.layers.Dense(16, activation='relu'), keras.layers.Dropout(0.5), keras.layers.Dense(1, activation='sigmoid') ]) dpt_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy','binary_crossentropy']) dpt_model_history = dpt_model.fit(train_data, train_labels, epochs=20, batch_size=512, validation_data=(test_data, test_labels), verbose=2) plot_history([('baseline', baseline_history), ('dropout', dpt_model_history)])
0.894657
0.938745
# Homework 2 (SIO 211A) ``` # imports import numpy as np from matplotlib import pyplot as plt import seaborn as sns import unyt from geopy import distance ``` # Init ``` sns.set_theme() sns.set_style('whitegrid') sns.set_context('notebook') ``` # Problem 1 ## Sorry to make you click on another link, but this made writing a lot easier: ### https://docs.google.com/document/d/1U-OxdKkUfociPIs3qceVuuon0T3CdHk7LPMUecIAE4Y/edit?usp=sharing # Problem 2 ## (a) Rossby radius ## $R = \frac{\sqrt{gH}}{f}$ ``` g = 9.8 * unyt.m / unyt.s**2 H = 1000 * unyt.m f = 1e-4 / unyt.s R = np.sqrt(g*H)/f R.to('km') ``` ## (b) Kelvin wave time ## Our dispersion relation: ## $\omega = \sqrt{gH} k_y$ [Note, I use $k_y$ instead of $\ell$] ## Group velocity ## $C_g = \frac{\partial \omega}{\partial k} = \sqrt{gH}$ ``` C_g = np.sqrt(g*H) C_g ``` ### Distance ``` cali = (34.448113, -120.471439) equador = (-0.9375935827364111, -80.7313159322607) d = distance.distance(cali, equador).km * unyt.km d t = d / C_g t.to('hr') ``` ### It takes approximately 16 hours to travel the distance. ## (c) Energy ### K.E. ## $E_{\rm KE} = \frac{H}{2} |\vec u|^2$ ### For our cross-shore waves, $\vec u = v \hat y$ and we have ## $v = \sqrt{gH} \exp(x/R) \, G(yc-t)$ ### with $x=0$ the shore. ### This gives: ## $E_{\rm KE} = \frac{1}{4} gH^2 \exp(2x/R)$ ### with the extra factor of 1/2 from the time average. ## P.E. ## $E_{\rm PE} = \frac{1}{2} g \eta^2$ ## $\eta = H \exp(x/R) \, G(yc-t)$ ## Giving.. ## $E_{\rm PE} = \frac{1}{4} g H^2 \exp(2x/R)$ ### and we have equi-partition as for the SWE ### Numerically.. ``` # At x=0 E_x0 = g * H**2 / 4. E_x0 ``` ---- # (3) Shallow water wave reflection and transmission ## Boundary at $x=0$ with incoming wave in region with height $H_1$ and height $H_2>H_1$ at $x>0$ ## Shallow wave equations: ## $\eta_I = \eta_{0,I} \exp i(k_{x,I} x + k_{y,I}y - \omega_I t)$ ### with $\eta_{0,I} = 1$ and ## $u_I = \frac{g k_{x,I}}{\omega_I} \eta_{0,I} \exp i(k_{x,I} x + k_{y,I}y - \omega_I t)$ ## and dispersion relation ## $\omega_I^2 = g H_1 [k_{x,I}^2 + k_{y,I}^2]$ ## (a) Solve the equations for $\eta_{0,R}, \eta_{0,T}$ etc. ### Boundary condition 1: $\eta_I = \eta_R + \eta_T$ at $x=0$ ### This means: ## ${\rm e}^{i (k_{y,I} y - \omega_I t)} = \eta_{0,R} {\rm e}^{i (k_{y,R} y - \omega_R t)} + \eta_{0,T} {\rm e}^{i (k_{y,T} y - \omega_T t)} $ ### For this to hold at all times, we must have $\omega_I = \omega_R = \omega_T = \omega$ ### Similarly, to hold at all $y$ we have: $k_{y,I} = k_{y,R} = k_{y,T} = k_y$ ### This then requires: ## $1 = \eta_{0,R} + \eta_{0,T}$ ### The dispersion relation gives us values for $k_x$: ## $k_x = \sqrt{\omega^2/gH - k_y}$ ### Because $k_y$ is the same for all waves and $H=H_1$ for $\eta_I$ and $\eta_R$ ### Therefore, ## $k_{x,I} = \sqrt{\omega^2/gH_1 - k_y}$ ### and clearly ## $k_{x,R} = - k_{x,I}$ ### Last, we have: ## $k_{x,T} = \sqrt{\omega^2/gH_2 - k_y}$ ### Boundary condition 2: $Hu$ is continuous ### This gives us: ## $\frac{g}{\omega} k_{x,I} H_1 = \frac{g}{\omega} k_{x,R} H_1 \, \eta_{0,R} + \frac{g}{\omega} k_{x,T} H_2 \, \eta_{0,T}$ ### The $g/\omega$ terms vanish and we can substitute in $k_{x,R} = - k_{x,I}$ and $1 = \eta_{0,R} + \eta_{0,T}$ ### Massaging... ## $k_{x,I} H_1 = -k_{x,I} H_1 \, (1-\eta_{0,T}) + k_{x,T} H_2 \, \eta_{0,T}$ ## $\eta_{0,T} = \frac{2 k_{x,I} H_1}{k_{x,I}H_1 - k_{x,T} H_2}$ ## Regarding $\theta$ which we define off the normal: ## $\tan \theta_I = k_{x,I}/k_y$ ### and ## $\theta_I = \theta_R$ ### Meanwhile, ## $\tan \theta_T = k_{x,T}/k_y$ ### Given $H_2>H_1$ then $k_{x,T} < k_{x,I}$ and $\theta_T < \theta_I$ ## (b) Can the $\theta$ relationship be generalized? ### Recognizing $\tan \theta = \sin \theta / \cos\theta$ and that $k_y$ is the same in our demoninator, we have: ## $\sin\theta_I / \sin\theta_T = k_{x,I}/k_{x,T}$ ## This is a form of Snell's Law. ## (c) For $H_2>H_1$ is total reflection possible? ### Critical angle is $\theta_T = 90$ deg which requires: ## $\sin\theta_I = k_{x,I}/k_{x,T}$ ### For $H_2 > H_1$, we have $k_{x,T} < k_{x,I}$ so there is no such angle. ## (d) For $H_1>H_2$, the angle does exist and is: ## $\theta_I = \sin^{-1} (k_{x,I}/k_{x,T})$ ---- # (4) Rectangular lake with depth of $H=50 \, \rm m$ and width $L$ ## Our dipsersion relation is: ## $\omega^2 = gH k^2$ ``` g = 9.8 * unyt.m / unyt.s**2 H = 50 * unyt.m ``` ## (a) Consider a semi-diurnal period, i.e. $T = 12$ hrs ### $\omega = 2\pi/T$ ``` omega_a = 2 * np.pi / (12*unyt.hr) omega_a.to('1/s') ``` ### This gives ## $k = \omega / \sqrt{gH}$ ``` k_a = omega_a / np.sqrt(g*H) k_a ``` ### Our standing wave has wave numbers: ## $k_x = \frac{n\pi}{L}$ ### or ## $L = \frac{n \pi}{k}$ ``` L_a = np.pi / k_a L_a.to('km') ``` ## (b) Diurnal period -- $T = 1 \, \rm day$ ``` omega_b = 2 * np.pi / (24*unyt.hr) omega_b.to('1/s') k_b = omega_b / np.sqrt(g*H) k_b L_b = np.pi / k_b L_b.to('km') ``` ## (c) Do such bodies exist?! ### Lake Superior is $\approx 80,000 \, \rm sq km.$ or $L \approx 300 \, \rm km$ ### This is smaller than even the first case. And we emphasize that we took $n=1$, i.e. $L$ is only larger for the other harmonics.
github_jupyter
# imports import numpy as np from matplotlib import pyplot as plt import seaborn as sns import unyt from geopy import distance sns.set_theme() sns.set_style('whitegrid') sns.set_context('notebook') g = 9.8 * unyt.m / unyt.s**2 H = 1000 * unyt.m f = 1e-4 / unyt.s R = np.sqrt(g*H)/f R.to('km') C_g = np.sqrt(g*H) C_g cali = (34.448113, -120.471439) equador = (-0.9375935827364111, -80.7313159322607) d = distance.distance(cali, equador).km * unyt.km d t = d / C_g t.to('hr') # At x=0 E_x0 = g * H**2 / 4. E_x0 g = 9.8 * unyt.m / unyt.s**2 H = 50 * unyt.m omega_a = 2 * np.pi / (12*unyt.hr) omega_a.to('1/s') k_a = omega_a / np.sqrt(g*H) k_a L_a = np.pi / k_a L_a.to('km') omega_b = 2 * np.pi / (24*unyt.hr) omega_b.to('1/s') k_b = omega_b / np.sqrt(g*H) k_b L_b = np.pi / k_b L_b.to('km')
0.470007
0.910863
# Ex2 - Getting and Knowing your Data Check out [Chipotle Exercises Video Tutorial](https://www.youtube.com/watch?v=lpuYZ5EUyS8&list=PLgJhDSE2ZLxaY_DigHeiIDC1cD09rXgJv&index=2) to watch a data scientist go through the exercises This time we are going to pull data directly from the internet. Special thanks to: https://github.com/justmarkham for sharing the dataset and materials. ### Step 1. Import the necessary libraries ``` import pandas as pd import numpy as np ``` ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv). ### Step 3. Assign it to a variable called chipo. ``` url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv' chipo = pd.read_csv(url, sep = '\t') ``` ### Step 4. See the first 10 entries ``` chipo.head(10) ``` ### Step 5. What is the number of observations in the dataset? ``` # Solution 1 chipo.shape[0] # entries <= 4622 observations # Solution 2 chipo.info() # entries <= 4622 observations ``` ### Step 6. What is the number of columns in the dataset? ``` chipo.shape[1] ``` ### Step 7. Print the name of all the columns. ``` chipo.columns ``` ### Step 8. How is the dataset indexed? ``` chipo.index ``` ### Step 9. Which was the most-ordered item? ``` c = chipo.groupby('item_name') c = c.sum() c = c.sort_values(['quantity'], ascending=False) c.head(1) ``` ### Step 10. For the most-ordered item, how many items were ordered? ``` c = chipo.groupby('item_name') c = c.sum() c = c.sort_values(['quantity'], ascending=False) c.head(1) ``` ### Step 11. What was the most ordered item in the choice_description column? ``` c = chipo.groupby('choice_description').sum() c = c.sort_values(['quantity'], ascending=False) c.head(1) # Diet Coke 159 ``` ### Step 12. How many items were orderd in total? ``` total_items_orders = chipo.quantity.sum() total_items_orders ``` ### Step 13. Turn the item price into a float #### Step 13.a. Check the item price type ``` chipo.item_price.dtype ``` #### Step 13.b. Create a lambda function and change the type of item price ``` dollarizer = lambda x: float(x[1:-1]) chipo.item_price = chipo.item_price.apply(dollarizer) ``` #### Step 13.c. Check the item price type ``` chipo.item_price.dtype ``` ### Step 14. How much was the revenue for the period in the dataset? ``` revenue = (chipo['quantity']* chipo['item_price']).sum() print('Revenue was: $' + str(np.round(revenue,2))) ``` ### Step 15. How many orders were made in the period? ``` orders = chipo.order_id.value_counts().count() orders ``` ### Step 16. What is the average revenue amount per order? ``` # Solution 1 chipo['revenue'] = chipo['quantity'] * chipo['item_price'] order_grouped = chipo.groupby(by=['order_id']).sum() order_grouped.mean()['revenue'] # Solution 2 chipo.groupby(by=['order_id']).sum().mean()['revenue'] ``` ### Step 17. How many different items are sold? ``` chipo.item_name.value_counts().count() ```
github_jupyter
import pandas as pd import numpy as np url = 'https://raw.githubusercontent.com/justmarkham/DAT8/master/data/chipotle.tsv' chipo = pd.read_csv(url, sep = '\t') chipo.head(10) # Solution 1 chipo.shape[0] # entries <= 4622 observations # Solution 2 chipo.info() # entries <= 4622 observations chipo.shape[1] chipo.columns chipo.index c = chipo.groupby('item_name') c = c.sum() c = c.sort_values(['quantity'], ascending=False) c.head(1) c = chipo.groupby('item_name') c = c.sum() c = c.sort_values(['quantity'], ascending=False) c.head(1) c = chipo.groupby('choice_description').sum() c = c.sort_values(['quantity'], ascending=False) c.head(1) # Diet Coke 159 total_items_orders = chipo.quantity.sum() total_items_orders chipo.item_price.dtype dollarizer = lambda x: float(x[1:-1]) chipo.item_price = chipo.item_price.apply(dollarizer) chipo.item_price.dtype revenue = (chipo['quantity']* chipo['item_price']).sum() print('Revenue was: $' + str(np.round(revenue,2))) orders = chipo.order_id.value_counts().count() orders # Solution 1 chipo['revenue'] = chipo['quantity'] * chipo['item_price'] order_grouped = chipo.groupby(by=['order_id']).sum() order_grouped.mean()['revenue'] # Solution 2 chipo.groupby(by=['order_id']).sum().mean()['revenue'] chipo.item_name.value_counts().count()
0.628179
0.988199
# Simulation of Ball drop and Spring mass damper system "Simulation of dynamic systems for dummies". <img src="for_dummies.jpg" width="200" align="right"> This is a very simple description of how to do time simulations of a dynamic system using SciPy ODE (Ordinary Differnetial Equation) Solver. ``` from scipy.integrate import odeint import numpy as np import matplotlib.pyplot as plt ``` ## Simulation of a static system to introduce ODEint Define a method that takes a system state and describe how this state will change in time. The method does this by returning time derivatives for each state. The ODE solver will use these time derivatives to calculate new states, for the next time step. Here is a method that takes a system to simulate a train that travels with constant speed: (The system has only one state, the position of the train) ``` V_start = 150*10**3/3600 # [m/s] Train velocity at start def train(states,t): # states: # [x] x = states[0] # Position of train dxdt = V_start # The position state will change by the speed of the train # Time derivative of the states: d_states_dt = np.array([dxdt]) return d_states_dt x_start = 0 # [m] Train position at start # The states at start of the simulation, the train is traveling with constant speed V at position x = 0. states_0 = np.array([x_start]) # Create a time vector for the simulation: t = np.linspace(0,10,100) # Simulate with the "train" method and start states for the times in t: states = odeint(func = train,y0 = states_0,t = t) # The result is the time series of the states: x = states[:,0] fig,ax = plt.subplots() ax.plot(t,x,label = 'Train position') ax.set_title('Train traveling at constant speed') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() ``` The speed can hower be a state too: ``` def train_2_states(states,t): # states: # [x,V] x = states[0] # Position of train V = states[1] # Speed of train dxdt = V # The position state will change by the speed of the train dVdt = 0 # The velocity will not change (No acceleration) # Time derivative of the states: d_states_dt = np.array([dxdt,dVdt]) return d_states_dt # The states at start of the simulation, the train is traveling with constant speed V at position x = 0. states_0 = np.array([x_start,V_start]) # Create a time vector for the simulation: t = np.linspace(0,10,100) # Simulate with the "train" method and start states for the times in t: states = odeint(func = train_2_states,y0 = states_0,t = t) # The result is the time series of the states: x = states[:,0] dxdt = states[:,1] fig,axes = plt.subplots(ncols = 2) fig.set_size_inches(11,5) ax = axes[0] ax.plot(t,x,label = 'Train position') ax.set_title('Train traveling at constant speed') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() ax = axes[1] ax.plot(t,dxdt,label = 'Train speed') ax.set_title('Train traveling at constant speed') ax.set_xlabel('time [s]') ax.set_ylabel('dx/dt [m/s]') a = ax.legend() ``` ## Ball drop Here is a system where the speed is not constant. A simulation of a ball drop under the influence of gravity force. ``` g = 9.81 m = 1 def ball_drop(states,t): # states: # [x,v] # F = g*m = m*dv/dt # --> dv/dt = (g*m) / m x = states[0] dxdt = states[1] dvdt = (g*m) / m d_states_dt = np.array([dxdt,dvdt]) return d_states_dt states_0 = np.array([0,0]) t = np.linspace(0,10,100) states = odeint(func = ball_drop,y0 = states_0,t = t) x = states[:,0] dxdt = states[:,1] fig,axes = plt.subplots(ncols = 2) fig.set_size_inches(11,5) ax = axes[0] ax.plot(t,x,label = 'Ball position') ax.set_title('Ball drop') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() ax = axes[1] ax.plot(t,dxdt,label = 'Ball speed') ax.set_title('Ball drop') ax.set_xlabel('time [s]') ax.set_ylabel('dx/dt [m/s]') a = ax.legend() ``` Simulating in air, where the ball has a resistance due aerodynamic drag. ``` cd = 0.01 def ball_drop_air(states,t): # states: # [x,u] # F = g*m - cd*u = m*du/dt # --> du/dt = (g*m - cd*u**2) / m x = states[0] u = states[1] dxdt = u dudt = (g*m - cd*u**2) / m d_states_dt = np.array([dxdt,dudt]) return d_states_dt states = odeint(func = ball_drop_air,y0 = states_0,t = t) x_air = states[:,0] dxdt_air = states[:,1] fig,axes = plt.subplots(ncols = 2) fig.set_size_inches(11,5) ax = axes[0] ax.plot(t,x,label = 'Vacuum') ax.plot(t,x_air,label = 'Air') ax.set_title('Ball drop in vacuum and air') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() ax = axes[1] ax.plot(t,dxdt,label = 'Vacuum') ax.plot(t,dxdt_air,label = 'Air') ax.set_title('Ball drop in vacuum and air') ax.set_xlabel('time [s]') ax.set_ylabel('dx/dt [m/s]') a = ax.legend() ``` The very classical dynamic system with a spring, a mass and a damper. ![title](spring_mass_damp.png) ``` k = 3 # The stiffnes of the spring (relates to position) c = 0.1 # Damping term (relates to velocity) m = 0.1 # The mass (relates to acceleration) def spring_mass_damp(states,t): # states: # [x,v] # F = -k*x -c*v = m*dv/dt # --> dv/dt = (-kx -c*v) / m x = states[0] dxdt = states[1] dvdt = (-k*x -c*dxdt) / m d_states_dt = np.array([dxdt,dvdt]) return d_states_dt y0 = np.array([1,0]) t = np.linspace(0,10,100) states = odeint(func = spring_mass_damp,y0 = y0,t = t) x = states[:,0] dxdt = states[:,1] fig,ax = plt.subplots() ax.plot(t,x) ax.set_title('Spring mass damper simulation') ax.set_xlabel('time [s]') a = ax.set_ylabel('x [m]') ``` Also add a gravity force ``` g = 9.81 def spring_mass_damp_g(states,t): # states: # [x,v] # F = g*m -k*x -c*v = m*dv/dt # --> dv/dt = (g*m -kx -c*v) / m x = states[0] dxdt = states[1] dvdt = (g*m -k*x -c*dxdt) / m d_states_dt = np.array([dxdt,dvdt]) return d_states_dt states_g = odeint(func = spring_mass_damp_g,y0 = y0,t = t) x_g = states_g[:,0] dxdt_g = states_g[:,1] fig,ax = plt.subplots() ax.plot(t,x,label = 'No gravity force') ax.plot(t,x_g,label = 'Gravity force') ax.set_title('Spring mass damper simulation with and without gravity') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() ``` ## SymPy solution ``` import sympy as sym import sympy.physics.mechanics as me from sympy.physics.vector import init_vprinting init_vprinting(use_latex='mathjax') x, v = me.dynamicsymbols('x v') m, c, k, g, t = sym.symbols('m c k g t') ceiling = me.ReferenceFrame('C') O = me.Point('O') P = me.Point('P') O.set_vel(ceiling, 0) P.set_pos(O, x * ceiling.x) P.set_vel(ceiling, v * ceiling.x) P.vel(ceiling) damping = -c * P.vel(ceiling) stiffness = -k * P.pos_from(O) gravity = m * g * ceiling.x forces = damping + stiffness + gravity forces ```
github_jupyter
from scipy.integrate import odeint import numpy as np import matplotlib.pyplot as plt V_start = 150*10**3/3600 # [m/s] Train velocity at start def train(states,t): # states: # [x] x = states[0] # Position of train dxdt = V_start # The position state will change by the speed of the train # Time derivative of the states: d_states_dt = np.array([dxdt]) return d_states_dt x_start = 0 # [m] Train position at start # The states at start of the simulation, the train is traveling with constant speed V at position x = 0. states_0 = np.array([x_start]) # Create a time vector for the simulation: t = np.linspace(0,10,100) # Simulate with the "train" method and start states for the times in t: states = odeint(func = train,y0 = states_0,t = t) # The result is the time series of the states: x = states[:,0] fig,ax = plt.subplots() ax.plot(t,x,label = 'Train position') ax.set_title('Train traveling at constant speed') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() def train_2_states(states,t): # states: # [x,V] x = states[0] # Position of train V = states[1] # Speed of train dxdt = V # The position state will change by the speed of the train dVdt = 0 # The velocity will not change (No acceleration) # Time derivative of the states: d_states_dt = np.array([dxdt,dVdt]) return d_states_dt # The states at start of the simulation, the train is traveling with constant speed V at position x = 0. states_0 = np.array([x_start,V_start]) # Create a time vector for the simulation: t = np.linspace(0,10,100) # Simulate with the "train" method and start states for the times in t: states = odeint(func = train_2_states,y0 = states_0,t = t) # The result is the time series of the states: x = states[:,0] dxdt = states[:,1] fig,axes = plt.subplots(ncols = 2) fig.set_size_inches(11,5) ax = axes[0] ax.plot(t,x,label = 'Train position') ax.set_title('Train traveling at constant speed') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() ax = axes[1] ax.plot(t,dxdt,label = 'Train speed') ax.set_title('Train traveling at constant speed') ax.set_xlabel('time [s]') ax.set_ylabel('dx/dt [m/s]') a = ax.legend() g = 9.81 m = 1 def ball_drop(states,t): # states: # [x,v] # F = g*m = m*dv/dt # --> dv/dt = (g*m) / m x = states[0] dxdt = states[1] dvdt = (g*m) / m d_states_dt = np.array([dxdt,dvdt]) return d_states_dt states_0 = np.array([0,0]) t = np.linspace(0,10,100) states = odeint(func = ball_drop,y0 = states_0,t = t) x = states[:,0] dxdt = states[:,1] fig,axes = plt.subplots(ncols = 2) fig.set_size_inches(11,5) ax = axes[0] ax.plot(t,x,label = 'Ball position') ax.set_title('Ball drop') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() ax = axes[1] ax.plot(t,dxdt,label = 'Ball speed') ax.set_title('Ball drop') ax.set_xlabel('time [s]') ax.set_ylabel('dx/dt [m/s]') a = ax.legend() cd = 0.01 def ball_drop_air(states,t): # states: # [x,u] # F = g*m - cd*u = m*du/dt # --> du/dt = (g*m - cd*u**2) / m x = states[0] u = states[1] dxdt = u dudt = (g*m - cd*u**2) / m d_states_dt = np.array([dxdt,dudt]) return d_states_dt states = odeint(func = ball_drop_air,y0 = states_0,t = t) x_air = states[:,0] dxdt_air = states[:,1] fig,axes = plt.subplots(ncols = 2) fig.set_size_inches(11,5) ax = axes[0] ax.plot(t,x,label = 'Vacuum') ax.plot(t,x_air,label = 'Air') ax.set_title('Ball drop in vacuum and air') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() ax = axes[1] ax.plot(t,dxdt,label = 'Vacuum') ax.plot(t,dxdt_air,label = 'Air') ax.set_title('Ball drop in vacuum and air') ax.set_xlabel('time [s]') ax.set_ylabel('dx/dt [m/s]') a = ax.legend() k = 3 # The stiffnes of the spring (relates to position) c = 0.1 # Damping term (relates to velocity) m = 0.1 # The mass (relates to acceleration) def spring_mass_damp(states,t): # states: # [x,v] # F = -k*x -c*v = m*dv/dt # --> dv/dt = (-kx -c*v) / m x = states[0] dxdt = states[1] dvdt = (-k*x -c*dxdt) / m d_states_dt = np.array([dxdt,dvdt]) return d_states_dt y0 = np.array([1,0]) t = np.linspace(0,10,100) states = odeint(func = spring_mass_damp,y0 = y0,t = t) x = states[:,0] dxdt = states[:,1] fig,ax = plt.subplots() ax.plot(t,x) ax.set_title('Spring mass damper simulation') ax.set_xlabel('time [s]') a = ax.set_ylabel('x [m]') g = 9.81 def spring_mass_damp_g(states,t): # states: # [x,v] # F = g*m -k*x -c*v = m*dv/dt # --> dv/dt = (g*m -kx -c*v) / m x = states[0] dxdt = states[1] dvdt = (g*m -k*x -c*dxdt) / m d_states_dt = np.array([dxdt,dvdt]) return d_states_dt states_g = odeint(func = spring_mass_damp_g,y0 = y0,t = t) x_g = states_g[:,0] dxdt_g = states_g[:,1] fig,ax = plt.subplots() ax.plot(t,x,label = 'No gravity force') ax.plot(t,x_g,label = 'Gravity force') ax.set_title('Spring mass damper simulation with and without gravity') ax.set_xlabel('time [s]') ax.set_ylabel('x [m]') a = ax.legend() import sympy as sym import sympy.physics.mechanics as me from sympy.physics.vector import init_vprinting init_vprinting(use_latex='mathjax') x, v = me.dynamicsymbols('x v') m, c, k, g, t = sym.symbols('m c k g t') ceiling = me.ReferenceFrame('C') O = me.Point('O') P = me.Point('P') O.set_vel(ceiling, 0) P.set_pos(O, x * ceiling.x) P.set_vel(ceiling, v * ceiling.x) P.vel(ceiling) damping = -c * P.vel(ceiling) stiffness = -k * P.pos_from(O) gravity = m * g * ceiling.x forces = damping + stiffness + gravity forces
0.782372
0.981382
## 控制迷宫寻宝机器人 在这个项目中,你将使用刚刚学到的知识,尝试根据要求,编写代码,来控制一个机器人,在模拟环境中行走,并找到目标宝藏。 机器人所在的模拟环境中,会包含这样几个因素:机器人的起点、障碍物、宝藏箱。你的任务包括: 1. 分析模拟环境的数据 2. 控制机器人随机行动 3. (可选)控制机器人走到终点 * 一个良好的含有注释的代码,可以让你的程序可读性更高,尝试为你自己的代码添加相应的注释。 --- --- ## 第一节 分析模拟环境的数据 首先,只有足够了解机器人所在的环境,我们的机器人才能成功找到目标宝藏,因此首先我们来对机器人所在环境的数据进行分析。在这个部分,会考察你对数据结构、控制流的了解。 ### 1.1 理解模拟环境数据的储存格式 首先我们思考这样的问题:如何存储模拟环境的数据呢? 我们将我们的模拟环境抽象成一个格子世界,每个格子按照坐标编号进行标记;每个格子中会有四个情况,分别为普通格子(可通行)、机器人的起点(可通行)、障碍物(不可通行)、宝藏箱(目标点)。例如,一个模拟环境就可以抽象成3行4列的格子世界,并按这按这样的方法进行存储: ``` environment = [[0,0,0,2], [1,2,0,0], [0,2,3,2]] ``` 我们用了一个列表来保存虚拟世界的数据。外层列表中的每一个元素依然是一个列表,它代表模拟环境中每行的数据。而对于这个列表中的每个元素都是一个数,它们的含义是: - 0: 普通格子(可通行) - 1: 机器人的起点(可通行) - 2: 障碍物(不可通行) - 3: 宝藏箱(目标点) 那么,根据上述的数据,这个迷宫的第二行第一列,是我们机器人的起点。 __注:我们描述的迷宫的坐标位置(第一行第一列),和迷宫下标索引的值(如 `(0,0)`)是不一样的,请注意下标的问题。__ 如下的代码,使用了辅助函数,读取了模拟环境的数据,并保存在了 `env_data` 变量中。 ``` import helper env_data = helper.fetch_maze() ``` --- **任务1:**在如下代码中,请写代码获得这些值: 1. 模拟环境的长和宽 2. 模拟环境中第3行第6列元素 ``` #TODO 1模拟环境的行数 rows = None rows = len(env_data) #TODO 2模拟环境的列数 columns = None columns = len(env_data[0]) #TODO 3取出模拟环境第三行第六列的元素 row_3_col_6 = None row_3_col_6 = env_data[2][5] print("迷宫共有", rows, "行", columns, "列,第三行第六列的元素是", row_3_col_6) ``` --- ## 1.2 分析模拟环境数据 接着我们需要对模拟环境的中的数据进行分析。请根据如下的指示,计算相应的值。 --- **任务2:**在如下代码中,请计算模拟环境中,第一行和第三列的障碍物个数。 提示:*可以用循环完成。* ``` #TODO 4计算模拟环境中,第一行的的障碍物个数。 number_of_barriers_row1 = None number_of_barriers_row1 = len([i for i in env_data[0] if i == 2]) #TODO 5计算模拟环境中,第三列的的障碍物个数。 number_of_barriers_col3 = None number_of_barriers_col3 = len([i for i in range(len(env_data)) if env_data[i][2] == 2]) print("迷宫中,第一行共有", number_of_barriers_row1, "个障碍物,第三列共有", number_of_barriers_col3, "个障碍物。") ``` --- **任务3:**在如下代码中: 1. 创建一个名为 `loc_map` 的字典,它有两个键值,分别为 `start` 和 `destination`,对应的值分别为起点和目标点的坐标,它们以如 `(1,1)` 的形式保存为元组。 2. 从字典中取出 `start` 对应的值,保存在 `robot_current_loc` 对应的变量中,这个变量表示小车现在的位置。 ``` loc_map = {} #TODO 6按照上述要求创建字典 # 使用enumerate()循环获取env_data的 每一行的索引row_idx 和 嵌套小列表nested_list for row_idx,nested_list in enumerate(env_data): # 查询 1 在嵌套的列表中的索引值 即为y值 其row_idx作为x值 开始位置 if 1 in nested_list: loc_map['start'] = (row_idx, nested_list.index(1)) # 查询 3 在嵌套的列表中的索引值 即为y值 其row_idx作为x值 结束位置 if 3 in nested_list: loc_map['destination'] = (row_idx, nested_list.index(3)) robot_current_loc = None #TODO 7保存机器人当前的位置 robot_current_loc = loc_map['start'] ``` --- --- ## 第二节 控制机器人随机漫步 在这一步中,你需发出指令,控制机器人在环境中随机行动。它会考察你对控制流、调用函数的知识。 ## 2.1 控制机器人行动 我们的机器人能够执行四个动作:向上走 `u`、向下走 `d`、向左走 `l`、向右走 `r`。但是,由于有障碍,很多时候机器人的行动并不能成功。所以在这里,你需要实现一个函数,来判断机器人在某个位置,执行某个移动动作是否可行。 --- **任务4:**在下方代码中,实现名为 `is_move_valid_special` 的函数,它有两个输入,分别为机器人所在的位置坐标 `loc`,以及即将执行的动作 `act`,如 `(1,1)` 及 `u`。接着它的返回是一个布尔值,表明小车在 `loc` 位置下,是否可以执行动作 `act`。 提示1:*可以读取上方定义的 `env_data` 变量,来读取模拟环境的数据。* 提示2:*在实现函数后,请删去下方的 `pass` 代码。* 提示3:*我们需要处理边界的情况,即机器人走到了虚拟环境边界时,是不能够走出虚拟环境的。* ``` def is_move_valid_special(loc, act): """ Judge wether the robot can take action act at location loc. Keyword arguments: loc -- tuple, robots current location act -- string, robots meant action 这里函数名已经从sepcial改为special了 原来似乎是拼错了? 感觉写的有些略复杂了 用了多分支 想知道是否有更加简洁的方法 谢谢!!! """ #TODO 8 if act == 'u': if loc[0] == 0: return False # 上边界处理 elif env_data[loc[0]-1][loc[1]] == 2: return False # 上移障碍物判断 else: return True # 可走 elif act == 'd': if loc[0] == len(env_data) - 1: return False # 下边界处理 elif env_data[loc[0]+1][loc[1]] == 2: return False # 下移障碍物判断 else: return True # 可走 elif act == 'l': if loc[1] == 0: return False # 左边界处理 elif env_data[loc[0]][loc[1]-1] == 2: return False # 左移障碍物判断 else: return True # 可走 elif act == 'r': if loc[1] == len(env_data[0])-1: return False # 右边界处理 elif env_data[loc[0]][loc[1]+1] == 2: return False # 右移障碍物判断 else: return True # 可走 else: print('Wrong parameter!') # 参数错误情况 ``` --- **任务5:**在下方代码中,重新实现一个名为 `is_move_valid` 的函数,它有三个输入,分别为模拟环境的数据 `env_data`、机器人所在的位置坐标 `loc`、以及即将执行的动作 `act`。它的返回值与此前一样,是一个布尔值,表明小车在给定的虚拟环境中的 `loc` 位置下,是否可以执行动作 `act`。 ``` def is_move_valid(env, loc, act): """ Judge wether the robot can take action act at location loc. Keyword arguments: env -- list, the environment data loc -- tuple, robots current location act -- string, robots meant action """ #TODO 9 if act == 'u': if loc[0] == 0: return False # 上边界处理 elif env[loc[0]-1][loc[1]] == 2: return False # 上移障碍物判断 else: return True # 可走 elif act == 'd': if loc[0] == len(env_data) - 1: return False # 下边界处理 elif env[loc[0]+1][loc[1]] == 2: return False # 下移障碍物判断 else: return True # 可走 elif act == 'l': if loc[1] == 0: return False # 左边界处理 elif env[loc[0]][loc[1]-1] == 2: return False # 左移障碍物判断 else: return True # 可走 elif act == 'r': if loc[1] == len(env_data[0])-1: return False # 右边界处理 elif env[loc[0]][loc[1]+1] == 2: return False # 右移障碍物判断 else: return True # 可走 else: print('Wrong parameter!') # 参数错误情况 ``` --- **任务6:**请回答:**在任务4及任务5中的实现的两个函数中,`env_data` 这个变量有什么不同?** 提示:_可以尝试从变量作用域的角度回答该问题。_ **回答:** 这里感觉有些奇怪 env_data是个全局变量 任务4定义函数中可以使用这个变量由于其作用域是全局的 而任务5中它作为了形参(看注释形参应该是env 但是实际是env_data 有点懵?改成了env)出现在is_move_valid函数中 当然实参也是它 这块不知道如何从变量作用域回答?希望得到解答!感谢!! --- ## 2.2 机器人可行动作 --- **任务7:**编写一个名为 `valid_actions` 的函数。它有两个输入,分别为虚拟环境的数据 `env_data`,以及机器人所在的位置 `loc`,输出是一个列表,表明机器人在这个位置所有的可行动作。 提示:*可以尝试调用上方定义的`is_move_valid`函数。* ``` ## TODO 10 从头定义、实现你的函数 def valid_actions(env,loc): """ Return all valid actions at location loc. Keyword arguments: env -- list, the environment data loc -- tuple, robots current location """ return [i for i in ['u','d','l','r'] if is_move_valid(env,loc,i)] ``` --- ## 2.3 移动机器人 当机器人收到一个动作的时候,你机器人的位置应发生相应的变化。 **任务8:**编写一个名为 `move_robot` 的函数,它有两个输入,分别为机器人当前所在的位置 `loc` 和即将执行的动作 `act`。接着会返回机器人执行动作之后的新位置 `new_loc`。 ``` ##TODO 11 从头定义、实现你的函数 def mov_robot(loc,act): """ According to the action act to move the robot to the new location new_loc. Keyword arguments: loc -- tuple, robots current location act -- string, robots meant action """ new_loc = None if is_move_valid(env_data,loc,act): if act == 'u': new_loc = (loc[0]-1, loc[1]) elif act == 'd': new_loc = (loc[0]+1, loc[1]) elif act == 'l': new_loc = (loc[0], loc[1]-1) elif act == 'r': new_loc = (loc[0], loc[1]+1) else: print('Wrong action!') return new_loc ``` --- ## 2.4 随机移动机器人 接着,我们尝试在虚拟环境中随机移动机器人,看看会有什么效果。 **任务9:**编写一个名为 `random_choose_actions` 的函数,它有两个输入,分别为虚拟环境的数据 `env_data`,以及机器人所在的位置 `loc`。机器人会执行一个300次的循环,每次循环,他会执行以下任务: 1. 利用上方定义的 `valid_actions` 函数,找出当前位置下,机器人可行的动作; 2. 利用 `random` 库中的 `choice` 函数,从机器人可行的动作中,随机挑选出一个动作; 3. 接着根据这个动作,利用上方定义的 `move_robot` 函数,来移动机器人,并更新机器人的位置; 4. 当机器人走到终点时,输出“在第n个回合找到宝藏!”。 提示:如果机器人无法在300个回合内找到宝藏的话,试试看增大这个数字,也许会有不错的效果 :P ``` ##TODO 12 从头实现你的函数 import random def random_choose_actions(env,loc): """ Randomly move the robot at current location loc until find destination Keyword arguments: env -- list, the environment data loc -- tuple, robots current location """ # 将机器人当前位置(起点)赋值给cur_loc cur_loc = loc for i in range(300): # 随机从有效移动的列表中选择一个移动方向 并使之移动 random_act = random.choice(valid_actions(env_data,cur_loc)) # 更新cur_loc cur_loc = mov_robot(cur_loc,random_act) # 如果cur_loc作为坐标传入env_data时候得到代表宝藏的3值 即停止 if env_data[cur_loc[0]][cur_loc[1]] == 3: print('在第',i,'个回合找到宝藏!') break # 运行 random_choose_actions(env_data, robot_current_loc) ``` --- --- ## (可选)第三节 控制机器人走到终点 ## 3.1 控制机器人走到终点 在这里,你将综合上述的知识,编码控制机器人走到终点。这个任务对刚刚入门的你来说可能有些挑战,所以它是一个选做题。 **任务10**:尝试实现一个算法,能够对给定的模拟环境,输出机器人的行动策略,使之能够走到终点。 提示:_你可以尝试参考 [A星算法](https://zh.wikipedia.org/wiki/A*%E6%90%9C%E5%AF%BB%E7%AE%97%E6%B3%95) 。_ 以及以下参考资料: * https://baike.baidu.com/item/A%2A算法 * https://blog.csdn.net/hitwhylz/article/details/23089415 ``` ##TODO 13 实现你的算法 ''' 尝试自己实现A*算法 不过由于逻辑等问题 失败了 此段代码作废!!! 自己水平尚浅 还需要学习。。。请见谅 def a_star_special(start_node,env): open_list = [] closed_list = [] cur_loc = start_node h_dic = {} g_dic = {} F_dic = {} x = 0 # 将起点 start_node 加入 open_list 等待探索 open_list.append(start_node) count = 0 while loc_map['destination'] not in open_list: for i in open_list: # 计算open_list里所有点到终点的曼哈顿距离 并加入h_dic h_dic[i] = abs(loc_map['destination'][0]-i[0]) + abs(loc_map['destination'][1]-i[1]) print("h_dic:",h_dic) # 计算open_list里所有点距离起点的距离 加入g_dic[i] # 由于这里机器人不能斜着走 只能上下左右 所以g恒+1 g_dic[i] = count print("g_dic:",g_dic) # 将两个字典的对应位置的值求和 并存入F_dic # 参考了:https://segmentfault.com/q/1010000000683968/ F_dic = dict(Counter(h_dic) + Counter(g_dic)) print("F_dic",F_dic) # 从open_list中移除当前节点 open_list.remove(cur_loc) print("open_list remove",open_list) # 当前处理节点更新为F最小的那个 按x[1]即值排序 寻找最小的那个值 然后取出键赋值给cur_loc # 参考了:https://zhidao.baidu.com/question/439448738.html if x < 1: cur_loc = min(F_dic.items(), key=lambda x:x[1])[0] print("cur_loc",cur_loc) x = 3 else: F_dic.pop(min(F_dic.items(), key=lambda x:x[1])[0]) print("F_dic",F_dic) cur_loc = min(F_dic.items(), key=lambda x:x[1])[0] print("cur_loc",cur_loc) # 将当前节点移动到closed_list已经估算距离 closed_list.append(cur_loc) # 对新节点周围点计算 # 遍历可移动位置 找出当前位置周围的方格 count += 1 print("count",count) for j in valid_actions(env,cur_loc): if mov_robot(cur_loc,j) in closed_list: continue elif mov_robot(cur_loc,j) not in open_list: open_list.append(mov_robot(cur_loc,j)) # 设置当前位置为这几个节点的父节点f_loc f_loc = mov_robot(cur_loc,j) print("父节点",f_loc) # 将这些放个的h值算出 并加入h_dic h_dic[mov_robot(cur_loc,j)] = abs(loc_map['destination'][0]-mov_robot(cur_loc,j)[0]) + abs(loc_map['destination'][1]-mov_robot(cur_loc,j)[1]) g_dic[mov_robot(cur_loc,j)] = count cur_loc = f_loc print("open_list",open_list) a_star_special(loc_map['start'],env_data) ''' ``` > 注意: 当你写完了所有的代码,并且回答了所有的问题。你就可以把你的 iPython Notebook 导出成 HTML 文件。你可以在菜单栏,这样导出**File -> Download as -> HTML (.html)**把这个 HTML 和这个 iPython notebook 一起做为你的作业提交。
github_jupyter
environment = [[0,0,0,2], [1,2,0,0], [0,2,3,2]] import helper env_data = helper.fetch_maze() #TODO 1模拟环境的行数 rows = None rows = len(env_data) #TODO 2模拟环境的列数 columns = None columns = len(env_data[0]) #TODO 3取出模拟环境第三行第六列的元素 row_3_col_6 = None row_3_col_6 = env_data[2][5] print("迷宫共有", rows, "行", columns, "列,第三行第六列的元素是", row_3_col_6) #TODO 4计算模拟环境中,第一行的的障碍物个数。 number_of_barriers_row1 = None number_of_barriers_row1 = len([i for i in env_data[0] if i == 2]) #TODO 5计算模拟环境中,第三列的的障碍物个数。 number_of_barriers_col3 = None number_of_barriers_col3 = len([i for i in range(len(env_data)) if env_data[i][2] == 2]) print("迷宫中,第一行共有", number_of_barriers_row1, "个障碍物,第三列共有", number_of_barriers_col3, "个障碍物。") loc_map = {} #TODO 6按照上述要求创建字典 # 使用enumerate()循环获取env_data的 每一行的索引row_idx 和 嵌套小列表nested_list for row_idx,nested_list in enumerate(env_data): # 查询 1 在嵌套的列表中的索引值 即为y值 其row_idx作为x值 开始位置 if 1 in nested_list: loc_map['start'] = (row_idx, nested_list.index(1)) # 查询 3 在嵌套的列表中的索引值 即为y值 其row_idx作为x值 结束位置 if 3 in nested_list: loc_map['destination'] = (row_idx, nested_list.index(3)) robot_current_loc = None #TODO 7保存机器人当前的位置 robot_current_loc = loc_map['start'] def is_move_valid_special(loc, act): """ Judge wether the robot can take action act at location loc. Keyword arguments: loc -- tuple, robots current location act -- string, robots meant action 这里函数名已经从sepcial改为special了 原来似乎是拼错了? 感觉写的有些略复杂了 用了多分支 想知道是否有更加简洁的方法 谢谢!!! """ #TODO 8 if act == 'u': if loc[0] == 0: return False # 上边界处理 elif env_data[loc[0]-1][loc[1]] == 2: return False # 上移障碍物判断 else: return True # 可走 elif act == 'd': if loc[0] == len(env_data) - 1: return False # 下边界处理 elif env_data[loc[0]+1][loc[1]] == 2: return False # 下移障碍物判断 else: return True # 可走 elif act == 'l': if loc[1] == 0: return False # 左边界处理 elif env_data[loc[0]][loc[1]-1] == 2: return False # 左移障碍物判断 else: return True # 可走 elif act == 'r': if loc[1] == len(env_data[0])-1: return False # 右边界处理 elif env_data[loc[0]][loc[1]+1] == 2: return False # 右移障碍物判断 else: return True # 可走 else: print('Wrong parameter!') # 参数错误情况 def is_move_valid(env, loc, act): """ Judge wether the robot can take action act at location loc. Keyword arguments: env -- list, the environment data loc -- tuple, robots current location act -- string, robots meant action """ #TODO 9 if act == 'u': if loc[0] == 0: return False # 上边界处理 elif env[loc[0]-1][loc[1]] == 2: return False # 上移障碍物判断 else: return True # 可走 elif act == 'd': if loc[0] == len(env_data) - 1: return False # 下边界处理 elif env[loc[0]+1][loc[1]] == 2: return False # 下移障碍物判断 else: return True # 可走 elif act == 'l': if loc[1] == 0: return False # 左边界处理 elif env[loc[0]][loc[1]-1] == 2: return False # 左移障碍物判断 else: return True # 可走 elif act == 'r': if loc[1] == len(env_data[0])-1: return False # 右边界处理 elif env[loc[0]][loc[1]+1] == 2: return False # 右移障碍物判断 else: return True # 可走 else: print('Wrong parameter!') # 参数错误情况 ## TODO 10 从头定义、实现你的函数 def valid_actions(env,loc): """ Return all valid actions at location loc. Keyword arguments: env -- list, the environment data loc -- tuple, robots current location """ return [i for i in ['u','d','l','r'] if is_move_valid(env,loc,i)] ##TODO 11 从头定义、实现你的函数 def mov_robot(loc,act): """ According to the action act to move the robot to the new location new_loc. Keyword arguments: loc -- tuple, robots current location act -- string, robots meant action """ new_loc = None if is_move_valid(env_data,loc,act): if act == 'u': new_loc = (loc[0]-1, loc[1]) elif act == 'd': new_loc = (loc[0]+1, loc[1]) elif act == 'l': new_loc = (loc[0], loc[1]-1) elif act == 'r': new_loc = (loc[0], loc[1]+1) else: print('Wrong action!') return new_loc ##TODO 12 从头实现你的函数 import random def random_choose_actions(env,loc): """ Randomly move the robot at current location loc until find destination Keyword arguments: env -- list, the environment data loc -- tuple, robots current location """ # 将机器人当前位置(起点)赋值给cur_loc cur_loc = loc for i in range(300): # 随机从有效移动的列表中选择一个移动方向 并使之移动 random_act = random.choice(valid_actions(env_data,cur_loc)) # 更新cur_loc cur_loc = mov_robot(cur_loc,random_act) # 如果cur_loc作为坐标传入env_data时候得到代表宝藏的3值 即停止 if env_data[cur_loc[0]][cur_loc[1]] == 3: print('在第',i,'个回合找到宝藏!') break # 运行 random_choose_actions(env_data, robot_current_loc) ##TODO 13 实现你的算法 ''' 尝试自己实现A*算法 不过由于逻辑等问题 失败了 此段代码作废!!! 自己水平尚浅 还需要学习。。。请见谅 def a_star_special(start_node,env): open_list = [] closed_list = [] cur_loc = start_node h_dic = {} g_dic = {} F_dic = {} x = 0 # 将起点 start_node 加入 open_list 等待探索 open_list.append(start_node) count = 0 while loc_map['destination'] not in open_list: for i in open_list: # 计算open_list里所有点到终点的曼哈顿距离 并加入h_dic h_dic[i] = abs(loc_map['destination'][0]-i[0]) + abs(loc_map['destination'][1]-i[1]) print("h_dic:",h_dic) # 计算open_list里所有点距离起点的距离 加入g_dic[i] # 由于这里机器人不能斜着走 只能上下左右 所以g恒+1 g_dic[i] = count print("g_dic:",g_dic) # 将两个字典的对应位置的值求和 并存入F_dic # 参考了:https://segmentfault.com/q/1010000000683968/ F_dic = dict(Counter(h_dic) + Counter(g_dic)) print("F_dic",F_dic) # 从open_list中移除当前节点 open_list.remove(cur_loc) print("open_list remove",open_list) # 当前处理节点更新为F最小的那个 按x[1]即值排序 寻找最小的那个值 然后取出键赋值给cur_loc # 参考了:https://zhidao.baidu.com/question/439448738.html if x < 1: cur_loc = min(F_dic.items(), key=lambda x:x[1])[0] print("cur_loc",cur_loc) x = 3 else: F_dic.pop(min(F_dic.items(), key=lambda x:x[1])[0]) print("F_dic",F_dic) cur_loc = min(F_dic.items(), key=lambda x:x[1])[0] print("cur_loc",cur_loc) # 将当前节点移动到closed_list已经估算距离 closed_list.append(cur_loc) # 对新节点周围点计算 # 遍历可移动位置 找出当前位置周围的方格 count += 1 print("count",count) for j in valid_actions(env,cur_loc): if mov_robot(cur_loc,j) in closed_list: continue elif mov_robot(cur_loc,j) not in open_list: open_list.append(mov_robot(cur_loc,j)) # 设置当前位置为这几个节点的父节点f_loc f_loc = mov_robot(cur_loc,j) print("父节点",f_loc) # 将这些放个的h值算出 并加入h_dic h_dic[mov_robot(cur_loc,j)] = abs(loc_map['destination'][0]-mov_robot(cur_loc,j)[0]) + abs(loc_map['destination'][1]-mov_robot(cur_loc,j)[1]) g_dic[mov_robot(cur_loc,j)] = count cur_loc = f_loc print("open_list",open_list) a_star_special(loc_map['start'],env_data) '''
0.087199
0.853608
# Heart Rate Varability (HRV) NeuroKit2 is the most comprehensive software for computing HRV indices, and the list of features is available below: | Domains | Indices | NeuroKit | heartpy | HRV | pyHRV | | |-------------------|:-------:|:---------------:|:-------:|:---:|:-----:|---| | Time Domain | | | | | | | | | | CVNN | ✔️ | | | | | | | CVSD | ✔️ | | | | | | | MAD | | ✔️ | | | | | | MHR | | | ✔️ | | | | | MRRI | | | ✔️ | | | | | NNI parameters | | | | ✔️ | | | | ΔNNI parameters | | | | ✔️ | | | | MadNN | ✔️ | | | | | | | MeanNN | ✔️ | | | | | | | MedianNN | ✔️ | | | | | | | MCVNN | ✔️ | | | | | | | pNN20 | ✔️ | ✔️ | | ✔️ | | | | pNN50 | ✔️ | ✔️ | ✔️ | ✔️ | | | | RMSSD | ✔️ | ✔️ | ✔️ | ✔️ | | | | SDANN | | | | ✔️ | | | | SDNN | ✔️ | ✔️ | ✔️ | ✔️ | | | | SDNN_index | | | | ✔️ | | | | SDSD | ✔️ | ✔️ | ✔️ | ✔️ | | | | TINN | ✔️ | | | ✔️ | | Frequency Domain | | | | | | | | | | ULF | ✔️ | | | ✔️ | | | | VLF | ✔️ | | ✔️ | ✔️ | | | | LF | ✔️ | ✔️ | ✔️ | ✔️ | | | | LFn | ✔️ | | ✔️ | ✔️ | | | | LF Peak | | | | ✔️ | | | | LF Relative | | | | ✔️ | | | | HF | ✔️ | ✔️ | ✔️ | ✔️ | | | | HFnu | ✔️ | | ✔️ | ✔️ | | | | HF Peak | | | | ✔️ | | | | HF Relative | | | | ✔️ | | | | LF/HF | ✔️ | ✔️ | ✔️ | ✔️ | | Non-Linear Domain | | | | | | | | | | SD1 | ✔️ | ✔️ | ✔️ | ✔️ | | | | SD2 | ✔️ | ✔️ | ✔️ | ✔️ | | | | S | ✔️ | ✔️ | | ✔️ | | | | SD1/SD2 | ✔️ | ✔️ | | ✔️ | | | | SampEn | ✔️ | | | ✔️ | | | | DFA | | | | ✔️ | | | | CSI | ✔️ | | | | | | | Modified CSI | ✔️ | | | | | | | CVI | ✔️ | | | | ## Compute HRV features This example can be referenced by [citing the package](https://github.com/neuropsychology/NeuroKit#citation). The example shows how to use NeuroKit2 to compute heart rate variability (HRV) indices in the time-, frequency-, and non-linear domain. ``` # Load the NeuroKit package and other useful packages import neurokit2 as nk import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = [15, 9] # Bigger images ``` ## Download Dataset First, let's download the resting rate data (sampled at 100Hz) using `nk.data()`. ``` data = nk.data("bio_resting_5min_100hz") data.head() # Print first 5 rows ``` You can see that it consists of three different signals, pertaining to ECG, PPG (an alternative determinant of heart rate as compared to ECG), and RSP (respiration). Now, let's extract the ECG signal in the shape of a vector (i.e., a one-dimensional array), and find the peaks using [ecg_peaks()](https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.ecg_peaks). ``` # Find peaks peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100) ``` *Note: It is critical that you specify the correct sampling rate of your signal throughout many processing functions, as this allows NeuroKit to have a time reference.* This produces two elements, `peaks` which is a DataFrame of same length as the input signal in which occurences of R-peaks are marked with 1 in a list of zeros. `info` is a dictionary of the sample points at which these R-peaks occur. HRV is the temporal variation between consecutive heartbeats (**RR intervals**). Here, we will use `peaks` i.e. occurrences of the heartbeat peaks, as the input argument in the following HRV functions to extract the indices. ## Time-Domain Analysis First, let's extract the time-domain indices. ``` # Extract clean EDA and SCR features hrv_time = nk.hrv_time(peaks, sampling_rate=100, show=True) hrv_time ``` These features include the RMSSD (square root of the mean of the sum of successive differences between adjacent RR intervals), MeanNN (mean of RR intervals) so on and so forth. You can also visualize the distribution of R-R intervals by specifying `show=True` in [hrv_time()](https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.hrv_time). ## Frequency-Domain Analysis Now, let's extract the frequency domain features, which involve extracting for example the spectral power density pertaining to different frequency bands. Again, you can visualize the power across frequency bands by specifying `show=True` in [hrv_frequency()](https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.hrv_frequency). ``` hrv_freq = nk.hrv_frequency(peaks, sampling_rate=100, show=True) hrv_freq ``` ## Non-Linear Domain Analysis Now, let's compute the non-linear indices with [hrv_nonlinear()](https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.hrv_nonlinear). ``` hrv_non = nk.hrv_nonlinear(peaks, sampling_rate=100, show=True) hrv_non ``` This will produce a Poincaré plot which plots each RR interval against the next successive one. ## All Domains Finally, if you'd like to extract HRV indices from all three domains, you can simply input `peaks` into [hrv()](https://neurokit2.readthedocs.io/en/latest/functions.html#neurokit2.hrv), where you can specify `show=True` to visualize the combination of plots depicting the RR intervals distribution, power spectral density for frequency domains, and the poincare scattergram. ``` hrv_indices = nk.hrv(peaks, sampling_rate=100, show=True) hrv_indices ``` ## Resources There are several other packages more focused on HRV in which you might find a more in depth explanation and documentation. See their documentation here: - [HeartPy](https://python-heart-rate-analysis-toolkit.readthedocs.io/en/latest/) - [HRV](https://hrv.readthedocs.io/en/latest/) - [pyHRV](https://pyhrv.readthedocs.io/en/latest/_pages/api/nonlinear.html)
github_jupyter
# Load the NeuroKit package and other useful packages import neurokit2 as nk import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = [15, 9] # Bigger images data = nk.data("bio_resting_5min_100hz") data.head() # Print first 5 rows # Find peaks peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100) # Extract clean EDA and SCR features hrv_time = nk.hrv_time(peaks, sampling_rate=100, show=True) hrv_time hrv_freq = nk.hrv_frequency(peaks, sampling_rate=100, show=True) hrv_freq hrv_non = nk.hrv_nonlinear(peaks, sampling_rate=100, show=True) hrv_non hrv_indices = nk.hrv(peaks, sampling_rate=100, show=True) hrv_indices
0.673514
0.92054
``` import pandas as pd import numpy as np import math trips = pd.read_csv('../data/raw/201501-hubway-tripdata.csv') trips = trips.append(pd.read_csv('../data/raw/201502-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201503-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201504-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201505-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201506-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201507-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201508-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201509-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201510-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201511-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201512-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201601-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201602-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201603-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201604-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201605-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201606-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201607-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201608-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201609-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201610-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201611-hubway-tripdata.csv')) print(trips.shape) trips.columns #drop unwanted features and rename station features trips = trips.drop(['tripduration', 'bikeid', 'usertype', 'birth year', 'gender'], axis=1) trips = trips.rename(columns={'start station id': 'start_station_id', 'start station name': 'start_station_name', 'start station latitude': 'start_station_latitude', 'start station longitude': 'start_station_longitude', 'end station id': 'end_station_id', 'end station name': 'end_station_name', 'end station latitude': 'end_station_latitude', 'end station longitude': 'end_station_longitude'}) trips.dtypes #coerce all end station ids to numbers. if not possible, then set NaN trips['end_station_id'] = pd.to_numeric(trips['end_station_id'], errors='coerce') print('Number of trips with unknown end station ID:', trips[pd.isnull(trips['end_station_id'])].shape[0]) trips = trips[pd.notnull(trips['end_station_id'])] trips = trips.astype({'start_station_id': int, 'end_station_id': int}) #station ID 1 is Hubway's warehouse and can be removed warehouse_trips = (trips['start_station_id'] == 1) | (trips['end_station_id'] == 1) print('Number of trips with station ID = 1:', trips[warehouse_trips].shape[0]) trips = trips[warehouse_trips == False] trips['end_station_latitude'] = pd.to_numeric(trips['end_station_latitude'], errors='coerce') print('Number of trips with unknown end station latitude:', trips[pd.isnull(trips['end_station_latitude'])].shape[0]) trips['end_station_longitude'] = pd.to_numeric(trips['end_station_longitude'], errors='coerce') print('Number of trips with unknown end station longitude:', trips[pd.isnull(trips['end_station_longitude'])].shape[0]) trips['starttime'] = trips['starttime'].apply(pd.Timestamp) trips['stoptime'] = trips['stoptime'].apply(pd.Timestamp) trips.dtypes #remove trips from or to "virtual" locations outside Boston BOSTON_LATITUDE = 42.355428 BOSTON_LONGITUDE = -71.069786 out_of_bounds = ((trips['start_station_latitude'] > (BOSTON_LATITUDE + 1)) | (trips['start_station_latitude'] < (BOSTON_LATITUDE - 1)) | (trips['end_station_latitude'] > (BOSTON_LATITUDE + 1)) | (trips['end_station_latitude'] < (BOSTON_LATITUDE - 1)) | (trips['start_station_longitude'] > (BOSTON_LONGITUDE + 1)) | (trips['start_station_longitude'] < (BOSTON_LONGITUDE - 1)) | (trips['end_station_longitude'] > (BOSTON_LONGITUDE + 1)) | (trips['end_station_longitude'] < (BOSTON_LONGITUDE - 1))) print("Number of trips with station locations outside of Boston: ", trips[out_of_bounds].shape[0]) trips = trips[out_of_bounds == False] #remove trips that end in December 2016 print("Number of trips ending in December 2016: ", trips[trips['stoptime'] >= pd.Timestamp(2016, 12, 1, 0, 0, 0)].shape[0]) trips = trips[trips['stoptime'] < pd.Timestamp(2016, 12, 1, 0, 0, 0)] trips.head() print("Number of trips:", trips.shape[0]) trips = trips.set_index('starttime') trips.to_csv('../data/raw/201501-201611-hubway-tripdata.csv') ```
github_jupyter
import pandas as pd import numpy as np import math trips = pd.read_csv('../data/raw/201501-hubway-tripdata.csv') trips = trips.append(pd.read_csv('../data/raw/201502-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201503-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201504-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201505-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201506-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201507-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201508-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201509-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201510-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201511-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201512-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201601-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201602-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201603-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201604-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201605-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201606-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201607-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201608-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201609-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201610-hubway-tripdata.csv')) trips = trips.append(pd.read_csv('../data/raw/201611-hubway-tripdata.csv')) print(trips.shape) trips.columns #drop unwanted features and rename station features trips = trips.drop(['tripduration', 'bikeid', 'usertype', 'birth year', 'gender'], axis=1) trips = trips.rename(columns={'start station id': 'start_station_id', 'start station name': 'start_station_name', 'start station latitude': 'start_station_latitude', 'start station longitude': 'start_station_longitude', 'end station id': 'end_station_id', 'end station name': 'end_station_name', 'end station latitude': 'end_station_latitude', 'end station longitude': 'end_station_longitude'}) trips.dtypes #coerce all end station ids to numbers. if not possible, then set NaN trips['end_station_id'] = pd.to_numeric(trips['end_station_id'], errors='coerce') print('Number of trips with unknown end station ID:', trips[pd.isnull(trips['end_station_id'])].shape[0]) trips = trips[pd.notnull(trips['end_station_id'])] trips = trips.astype({'start_station_id': int, 'end_station_id': int}) #station ID 1 is Hubway's warehouse and can be removed warehouse_trips = (trips['start_station_id'] == 1) | (trips['end_station_id'] == 1) print('Number of trips with station ID = 1:', trips[warehouse_trips].shape[0]) trips = trips[warehouse_trips == False] trips['end_station_latitude'] = pd.to_numeric(trips['end_station_latitude'], errors='coerce') print('Number of trips with unknown end station latitude:', trips[pd.isnull(trips['end_station_latitude'])].shape[0]) trips['end_station_longitude'] = pd.to_numeric(trips['end_station_longitude'], errors='coerce') print('Number of trips with unknown end station longitude:', trips[pd.isnull(trips['end_station_longitude'])].shape[0]) trips['starttime'] = trips['starttime'].apply(pd.Timestamp) trips['stoptime'] = trips['stoptime'].apply(pd.Timestamp) trips.dtypes #remove trips from or to "virtual" locations outside Boston BOSTON_LATITUDE = 42.355428 BOSTON_LONGITUDE = -71.069786 out_of_bounds = ((trips['start_station_latitude'] > (BOSTON_LATITUDE + 1)) | (trips['start_station_latitude'] < (BOSTON_LATITUDE - 1)) | (trips['end_station_latitude'] > (BOSTON_LATITUDE + 1)) | (trips['end_station_latitude'] < (BOSTON_LATITUDE - 1)) | (trips['start_station_longitude'] > (BOSTON_LONGITUDE + 1)) | (trips['start_station_longitude'] < (BOSTON_LONGITUDE - 1)) | (trips['end_station_longitude'] > (BOSTON_LONGITUDE + 1)) | (trips['end_station_longitude'] < (BOSTON_LONGITUDE - 1))) print("Number of trips with station locations outside of Boston: ", trips[out_of_bounds].shape[0]) trips = trips[out_of_bounds == False] #remove trips that end in December 2016 print("Number of trips ending in December 2016: ", trips[trips['stoptime'] >= pd.Timestamp(2016, 12, 1, 0, 0, 0)].shape[0]) trips = trips[trips['stoptime'] < pd.Timestamp(2016, 12, 1, 0, 0, 0)] trips.head() print("Number of trips:", trips.shape[0]) trips = trips.set_index('starttime') trips.to_csv('../data/raw/201501-201611-hubway-tripdata.csv')
0.174903
0.131368
<center><h1><b><span style="color:blue">Histogramming & visualisation</span></b></h1></center> &nbsp;<br> This is not an under-statement - **histograms are ubiquitous in Particle Physics!** This being said, and as far as the (general) scientific Python ecosystem is concerned, histograms aren't quite seen as first-class citizens ... Would you then be surprised to know that histogramming saw a lot of developments in our field? ### **Quick intro to the following packages** - `histoprint` - pretty print of NumPy (and other) histograms to the console. - `boost-histogram` - fast and flexible multi-dimensional histograms and profiles. - `Hist` - analyst-friendly front-end for boost-histogram. - `mplhep` - HEP domain-specific add-ons to matplotlib. **Important note:** much of the functionality in these packages significantly enhances the general scientific ecosystem and is at all not (HEP) domain specific! **Important note (2):** the fact that there is some overlap in some of the functionality these packages provide may be overwhelming at first - no different from the fact that very many Python libraries exist for visualisation out there, actually. Remember, the development of these packages are largely driven by the community (needs and feedback). See this as a motivation to contribute towards the most useful and best features. &nbsp;<br> <center><h2><b><span style="color:green">histoprint - pretty print of NumPy (and other) histograms to the console</span></b></h2></center> An introduction to `histoprint` very largely based on the README, see https://github.com/scikit-hep/histoprint. The package is mainly meant to be used on the console; else for quick and trivial displays of 1-dimensional histograms. ``` import numpy as np from histoprint import text_hist, print_hist ``` `text_hist(...)`, the simplest ever thin wrapper for `numpy.histogram`: ``` text_hist( np.random.randn(1000), bins=[-5, -3, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 5], title="Variable bin widths" ) h_numpy = np.histogram(np.random.normal(0, 1, 100000), bins=20, range=(-5, 5)) h_numpy A = np.random.randn(1000) - 2 B = np.random.randn(1000) C = np.random.randn(1000) + 2 D = np.random.randn(500) * 2 histA = np.histogram(A, bins=15, range=(-5, 5)) histB = np.histogram(B, bins=15, range=(-5, 5)) histC = np.histogram(C, bins=15, range=(-5, 5)) histD = np.histogram(D, bins=15, range=(-5, 5)) histAll = ([histA[0], histB[0], histC[0], histD[0]], histA[1]) # print_hist can be used to print multiple histograms at once # (or just to print a single one as returned by numpy.histogram) print_hist(histAll, title="Overlays", labels="ABCDE") print_hist( histAll, title="Stacks", stack=True, symbols=" ", bg_colors="rgbcmy", labels="ABCDE", ) print_hist( histAll, title="Summaries", symbols=r"=|\/", fg_colors="0", bg_colors="0", labels=["AAAAAAAAAAAAAAAA", "B", "CCCCCCCCCCCCC", "D"], summary=True, ) ``` **Note:** the last example does not use terminal colors, so it can be copied as text. ### **1. Command-line interface** `Histoprint` comes with a simple command-line interface to create histograms of tabulated data. It can read in files or take data directly from STDIN: ``` !histoprint --help ``` ### **2. Interoperability - support for other histogram types** `Histoprint` can directly plot other (more fancy) types of histograms if they follow the PlottableProtocol conventions, or offer a way of being converted to the NumPy format. Currently this means they have to expose a numpy() or to_numpy() method. Both the TH1 histograms of uproot4, as well as the histograms of boost-histogram, are supported like this: ``` import boost_histogram as bh hist = bh.Histogram(bh.axis.Regular(20, -3, 3)) hist.fill(np.random.randn(1000)) print_hist(hist, title="Boost Histogram") import uproot file = uproot.open("http://scikit-hep.org/uproot3/examples/Event.root") hist = file["htime"] print_hist(hist, title="uproot TH1") ``` &nbsp;<br><center><img src="images/logo_boost-histogram.png" alt="boost-histogram package logo" style="width: 150px;"/></center> <center><h2><b><span style="color:green">Fast and flexible multi-dimensional histograms and profiles</span></b></h2></center> * Python bindings for the C++14 Boost.Histogram library. * No external dependency. * Very fast and feature-rich. * Multi-dimensional histograms and profiles. * Supports weighted and unweighted data. * And much much more, see the [docs](https://boost-histogram.readthedocs.io/). <div class="alert alert-info"> <b>Acknowledgements</b> This mini-tutorial is largely based on material by Henry Schreiner (Princeton University), with minor modifications. </div> ### **1. Basic 1D histogram** ``` import boost_histogram as bh import numpy as np import matplotlib.pyplot as plt ``` Let's generate some data and create a histogram: ``` data1 = np.random.normal(3.5, 2.5, size=1_000_000) h1 = bh.Histogram(bh.axis.Regular(40, -2, 10)) h1.fill(data1) ``` Let's explicitly check to see how many entries are in the histogram: ``` h1.sum() ``` What happened to the missing items? They are in the underflow and overflow bins ;-): ``` h1.sum(flow=True) ``` As is ROOT, overflow bins are on by default. We can turn them off, but they enable some powerful things such as projections. Make sure things visually look OK: ``` plt.bar(h1.axes[0].centers, h1, width=h1.axes[0].widths); ``` From now on, let's be lazy with a helper function for plotting: ``` plothist = lambda h: plt.bar(h.axes[0].centers, h, width=h.axes[0].widths); ``` ### **2. Drop-in replacement for NumPy** ``` bins2, edges2 = bh.numpy.histogram(data1, bins=10) bins2, edges2 ``` ### **3. "Extra dimensions"** The same API works for multiple dimensions. This is an area where the library shines particularly well. ``` h2 = bh.Histogram( bh.axis.Regular(150, -1.5, 1.5), bh.axis.Regular(100, -1, 1) ) h2 def make_2D_data(*, mean=(0,0), widths=(1,1), size=1_000_000): cov = np.asarray(widths) * np.eye(2) return np.random.multivariate_normal(mean, cov, size=size).T data3x = make_2D_data(mean=[-.75, .5], widths=[.2, 0.02]) data3y = make_2D_data(mean=[.75, .5], widths=[.2, 0.02]) ``` From here on we will be using `.reset()` before a `.fill()`, just to make sure each cell in the notebook can be rerun. ``` h2.reset() h2.fill(*data3x) h2.fill(*data3y) ``` Again, let's make plotting a little function: ``` def plothist2d(h): X, Y = h.axes.edges X, Y = np.broadcast_arrays(X, Y) return plt.pcolormesh(X, Y, h) ``` > The above code is the same as doing: > > ```python X, Y = np.meshgrid(*(a.edges for a in h.axes, indexing='ij') ``` ``` plothist2d(h2); ``` Let's try a 3D histogram ``` data3d = [np.random.normal(size=1_000_000) for _ in range(3)] h3d = bh.Histogram( bh.axis.Regular(150, -5, 5), bh.axis.Regular(100, -5, 5), bh.axis.Regular(100, -5, 5) ) h3d.fill(*data3d) ``` Projection of the first two axes: ``` plothist2d(h3d.project(0,1)); ``` ### **4. Unified Histogram Indexing** Let's explore the boost-histogram UHI syntax. We will reuse the previous 2D histogram from part 3: ``` plothist2d(h2); ``` I can see that I want y from 0.25 to 0.75, in data coordinates: ``` plothist2d(h2[:, bh.loc(.25):bh.loc(.75)]); ``` What's the contents of a bin? ``` h2[100,87] ``` How about in data coordinates? ``` h2[bh.loc(.5), bh.loc(.75)] ``` > Note: to get the coordinates manually: > > ```python h2.axes[0].index(.5) == 100 h2.axes[1].index(.75) == 87 ``` Let's look at one part and rebin: ``` plothist2d(h2[:50:bh.rebin(2), 50::bh.rebin(2)]); ``` What is the value at `(-.75, .5)`? ``` h2[bh.loc(-.75), bh.loc(.5)] ``` ### **5. Fancier axis types - a circular axis example** ``` h = bh.Histogram(bh.axis.Regular(30, 0, 2*np.pi, circular=True)) h.fill(np.random.uniform(0, np.pi*4, size=300)) ax = plt.subplot(111, polar=True) plothist = lambda h: plt.bar(h.axes[0].centers, h, width=h.axes[0].widths) plothist(h); ``` ### **6. Advanced usage - storage and accumulators** Explore: - The extensive list of histogram operations available. - The rich set of storage types (Double, Int64, Weight, etc.). - The accumulators defined (Sum, WeightedSum, etc.). Storages store accumulators. A simple example: ``` mean = bh.accumulators.Mean() mean.fill([0.3, 0.4, 0.5]) h = bh.Histogram(bh.axis.Regular(10, 0, 10), storage=bh.storage.Mean()) h.fill([2.5] * 3, sample=[0.3, 0.4, 0.5]) print_hist(h.to_numpy()) ``` &nbsp;<br><center><img src="images/logo_mplhep.png" alt="mplhep package logo" style="width:170px;"/></center> <center><h2><b><span style="color:green">HEP domain-specific add-ons to matplotlib</span></b></h2></center> ### Package motivation - Collection of wrappers for `matplotlib` to more easily produce plots typically needed in HEP. - Collect styles that are compatible with current collaboration recommendations (ROOT-package-like plots for large [LHC](https://home.cern/science/accelerators/large-hadron-collider) experiments ATLAS, CMS, etc.). ``` import numpy as np import matplotlib.pyplot as plt import mplhep ``` ### **1. Some basics** Plotting of 1D histograms made easy for "standard" and commonly used looks in HEP: ``` plt.style.use([mplhep.style.ROOT, mplhep.style.firamath]) # see style down in this notebook h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10) f, axs = plt.subplots(2, 2, sharex=True, sharey=True) axs = axs.flatten() axs[0].set_title("Default", fontsize=18) mplhep.histplot(h, bins, ax=axs[0]) axs[1].set_title("Plot Edges", fontsize=18) mplhep.histplot(h, bins, edges=True, ax=axs[1]) axs[2].set_title("Plot Errorbars", fontsize=18) mplhep.histplot(h, bins, yerr=np.sqrt(h), ax=axs[2]) axs[3].set_title("Filled Histogram", fontsize=18) mplhep.histplot(h, bins, histtype='fill', ax=axs[3]) plt.tight_layout() plt.show() ``` It is just as easy to overlay 2, or more, 1D histograms: ``` h, bins = np.histogram(np.random.normal(10, 3, 400), bins = 10) f, axs = plt.subplots(2, 2, sharex=True, sharey=True) axs = axs.flatten() axs[0].set_title("Default Overlay", fontsize=18) mplhep.histplot([h, 1.5*h], bins, ax=axs[0]) axs[1].set_title("Default Overlay w/ Errorbars", fontsize=18) mplhep.histplot([h, 1.5*h], bins, yerr=[np.sqrt(h), np.sqrt(1.5*h)], ax=axs[1]) axs[2].set_title("Automatic Errorbars", fontsize=18) mplhep.histplot([h, 1.5*h], bins, yerr=True, ax=axs[2]) axs[3].set_title("With Labels", fontsize=18) mplhep.histplot([h, 1.5*h], bins, yerr=True, ax=axs[3], label=["First", "Second"]) axs[3].legend(fontsize=15) plt.tight_layout() plt.show() ``` In other situations one rather needs to stack a series of 1D histograms. Here is how this can be done: ``` f, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(9, 9)) axs = axs.flatten() axs[0].set_title("Default", fontsize=18) mplhep.histplot([h, 1.5*h], bins, stack=True, ax=axs[0]) axs[1].set_title("Plot Edges", fontsize=18) mplhep.histplot([h, 1.5*h], bins, edges=True, stack=True,ax=axs[1]) axs[2].set_title("Plot Errorbars", fontsize=18) mplhep.histplot([h, 1.5*h], bins, yerr=np.sqrt(h), stack=True, ax=axs[2]) axs[3].set_title("Filled Histogram", fontsize=18) mplhep.histplot([1.5*h, h], bins, histtype='fill', stack=True, ax=axs[3]) plt.tight_layout() plt.show() ``` ### **2. Fancier plots** Likewise, `mplhep` provides a simple way to plot 2D histograms. ``` fig, ax = plt.subplots() xedges = np.arange(0, 11.5, 1.5) yedges = [0, 2, 3, 4, 6, 7] x = np.random.normal(5, 1.5, 100) y = np.random.normal(4, 1, 100) mplhep.hist2dplot(*np.histogram2d(x, y, bins=(xedges, yedges)), labels=True); # This would work just as well: # H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) # mplhep.hist2dplot(H, xedges, yedges, labels=True); ``` There's no trouble if starting from a `boost-histogram` histogram - just convert it on-the-fly: ``` h2 = bh.Histogram( bh.axis.Variable(xedges), bh.axis.Variable(yedges) ) h2.fill(x,y) mplhep.hist2dplot(*h2.to_numpy()); ``` In fact, is the conversion at all necessary? Not anymore, as Boost histograms are supported: ``` mplhep.hist2dplot(h2); ``` ### **3. Styling** As said above, several styles are predefined. Here is a simple example: ``` x = np.random.uniform(0, 10, 240) y = np.random.normal(512, 112, 240) z = np.random.normal(0.5, 0.1, 240) plt.style.use([mplhep.style.ROOT, mplhep.style.firamath]) # styles can be chained f, ax = plt.subplots() ax.scatter(x,y, c=z, label='XSX') mplhep.cms.label(loc=0) plt.show() ``` &nbsp;<br><center><img src="images/logo_Hist.png" style="width: 150px;"/></center> <center><h2><b><span style="color:green">Analyst friendly front-end for boost-histogram</span></b></h2></center> - `Hist` is a powerful, friendly and analysis-focused histogramming tool based on `boost-histogram`. - It provides some cool shortcuts for histogram creation, plotting tools, and new ideas. Let's explore the library a bit with some examples. See also the [GitHub repository](https://github.com/scikit-hep/hist) for full documentation. ``` import hist from hist import Hist import numpy as np ``` ### **1. Cool representations in notebooks** ``` Hist.new.Reg(50, 1, 2).Double().fill(np.random.normal(1.5, 0.3, 10_000)) h2 = Hist.new.Reg(50, 0, 2, name='My preferred x-axis title').Reg(50, 10, 20).Double().fill( np.random.normal(1, 0.5, 10_000), np.random.normal(15, 3, 10_000) ) h2 Hist.new.Reg(50, 0, 2).Reg(50, 10, 20).Reg(2, 3, 4).Double() ``` Based on `boost-histogram`’s Axis, `Hist` supports six types of axis (Regular, Boolean, Variable, Integer, IntCategory and StrCategory), with additional names and labels. You can checkout `hist.axis` or use the shortcut definitions instead: ``` # Add the axes using the shortcut method h = ( Hist.new.Reg(10, -5, 5, overflow=False, underflow=False, name="A") .Bool(name="B") .Var(range(10), name="C") .Int(-5, 5, overflow=False, underflow=False, name="D") .IntCat(range(10), name="E") .StrCat(["T", "F"], name="F") .Double() ) h ``` ### **2. Handy plotting functions** ``` data1 = np.random.normal(3.5, 2.5, size=1_000_000) h1 = Hist(hist.axis.Regular(40, -2, 10, name="Variable x"), storage=hist.storage.Double()) h1.fill(data1) ``` The histogram "knows how to plot itself" - very handy for quick checks: ``` h1.plot(color="darkviolet", lw=3, histtype='fill') import matplotlib.pyplot as plt # plot2d fig, ax = plt.subplots(figsize=(6, 6)) h2.plot2d(ax=ax, cmap="plasma") plt.show() ``` This being said, `Hist` talks well with the HEP plotting library `mplhep`: ``` h = bh.Histogram( bh.axis.Regular(2, 0, 1, metadata="x"), bh.axis.Regular(4, 0, 1, metadata="y")) h.fill( [.2, .4, .3], [.3, .5, .2]) h mplhep.hist2dplot(h) ``` Did we not say "handy plotting functions" and a "powerful, friendly and analysis-focused histogramming tool"? Check this out: ``` h = Hist( hist.axis.Regular(50, -5, 5, name="S", label="s [units]", flow=False), hist.axis.Regular(50, -5, 5, name="W", label="w [units]", flow=False), ) import numpy as np s_data = np.random.normal(size=10_000) + np.ones(10_000) w_data = np.random.normal(size=10_000) s_data = np.random.normal(1, 0.5, 100_000) w_data = np.random.normal(13, 3, 100_000) h = Hist.new.Reg(50, -5, 5, name="S", label="s [units]", flow=False).Reg(50, -20, 20, name="W", label="w [units]", flow=False).Double().fill( s_data, w_data) # plot2d_full plt.figure(figsize=(8, 8)) h.plot2d_full( main_cmap="coolwarm", top_ls="--", top_color="orange", top_lw=2, side_ls=":", side_lw=2, side_color="steelblue", ) plt.show() ``` Now we really are mixing all libraries - checking the projections with `mplhep`: ``` import mplhep fig, axs = plt.subplots(1, 2, figsize=(12, 5)) mplhep.histplot(h.project("S"), ax=axs[0]) mplhep.histplot(h.project("W"), ax=axs[1]) ``` Pull plots are very commonly used in HEP, e.g. to make sure that a toy study produces unbiased results and returns adequate uncertainties. `Hist` hence provides a powerful and flexible method for pull studies, called `.plot_pull()`. The method can be passed in a Callable object to be used to fit the plot. For the sake of argument, let's define a Gaussian function (following the `Hist` documentation): ``` # The unumpy module of the uncertainties package deals with uncertainties in arrays from uncertainties import unumpy as unp def pdf(x, a=1 / np.sqrt(2 * np.pi), x0=0, sigma=1, offset=0): exp = unp.exp if a.dtype == np.dtype("O") else np.exp # 'O' for a Python object return a * exp(-((x - x0) ** 2) / (2 * sigma ** 2)) + offset fig = plt.figure(figsize=(10, 8)) # Fill a histogram with 500 entries hh = hist.Hist( hist.axis.Regular( 50, -5, 5, name="X", label="x [units]", underflow=False, overflow=False ) ).fill(np.random.normal(size=500)) # Plot the histogram (data points) together with the fitted curve, and the pull plot main_ax_artists, subplot_ax_artists = hh.plot_pull( pdf, # Normal distributions are so commonly used in pull distributions that the str aliases "normal", "gauss", and "gaus" are supported as well! eb_ecolor="steelblue", eb_mfc="steelblue", eb_mec="steelblue", eb_fmt="o", eb_ms=6, eb_capsize=1, eb_capthick=2, eb_alpha=0.8, fp_c="hotpink", fp_ls="-", fp_lw=2, fp_alpha=0.8, bar_fc="royalblue", pp_num=5, pp_fc="royalblue", pp_alpha=0.5, pp_ec=None, ub_alpha=0.5, ) plt.ylim((-2,2)) plt.show() ```
github_jupyter
import numpy as np from histoprint import text_hist, print_hist text_hist( np.random.randn(1000), bins=[-5, -3, -2, -1, -0.5, 0, 0.5, 1, 2, 3, 5], title="Variable bin widths" ) h_numpy = np.histogram(np.random.normal(0, 1, 100000), bins=20, range=(-5, 5)) h_numpy A = np.random.randn(1000) - 2 B = np.random.randn(1000) C = np.random.randn(1000) + 2 D = np.random.randn(500) * 2 histA = np.histogram(A, bins=15, range=(-5, 5)) histB = np.histogram(B, bins=15, range=(-5, 5)) histC = np.histogram(C, bins=15, range=(-5, 5)) histD = np.histogram(D, bins=15, range=(-5, 5)) histAll = ([histA[0], histB[0], histC[0], histD[0]], histA[1]) # print_hist can be used to print multiple histograms at once # (or just to print a single one as returned by numpy.histogram) print_hist(histAll, title="Overlays", labels="ABCDE") print_hist( histAll, title="Stacks", stack=True, symbols=" ", bg_colors="rgbcmy", labels="ABCDE", ) print_hist( histAll, title="Summaries", symbols=r"=|\/", fg_colors="0", bg_colors="0", labels=["AAAAAAAAAAAAAAAA", "B", "CCCCCCCCCCCCC", "D"], summary=True, ) !histoprint --help import boost_histogram as bh hist = bh.Histogram(bh.axis.Regular(20, -3, 3)) hist.fill(np.random.randn(1000)) print_hist(hist, title="Boost Histogram") import uproot file = uproot.open("http://scikit-hep.org/uproot3/examples/Event.root") hist = file["htime"] print_hist(hist, title="uproot TH1") import boost_histogram as bh import numpy as np import matplotlib.pyplot as plt data1 = np.random.normal(3.5, 2.5, size=1_000_000) h1 = bh.Histogram(bh.axis.Regular(40, -2, 10)) h1.fill(data1) h1.sum() h1.sum(flow=True) plt.bar(h1.axes[0].centers, h1, width=h1.axes[0].widths); plothist = lambda h: plt.bar(h.axes[0].centers, h, width=h.axes[0].widths); bins2, edges2 = bh.numpy.histogram(data1, bins=10) bins2, edges2 h2 = bh.Histogram( bh.axis.Regular(150, -1.5, 1.5), bh.axis.Regular(100, -1, 1) ) h2 def make_2D_data(*, mean=(0,0), widths=(1,1), size=1_000_000): cov = np.asarray(widths) * np.eye(2) return np.random.multivariate_normal(mean, cov, size=size).T data3x = make_2D_data(mean=[-.75, .5], widths=[.2, 0.02]) data3y = make_2D_data(mean=[.75, .5], widths=[.2, 0.02]) h2.reset() h2.fill(*data3x) h2.fill(*data3y) def plothist2d(h): X, Y = h.axes.edges X, Y = np.broadcast_arrays(X, Y) return plt.pcolormesh(X, Y, h) X, Y = np.meshgrid(*(a.edges for a in h.axes, indexing='ij') plothist2d(h2); data3d = [np.random.normal(size=1_000_000) for _ in range(3)] h3d = bh.Histogram( bh.axis.Regular(150, -5, 5), bh.axis.Regular(100, -5, 5), bh.axis.Regular(100, -5, 5) ) h3d.fill(*data3d) plothist2d(h3d.project(0,1)); plothist2d(h2); plothist2d(h2[:, bh.loc(.25):bh.loc(.75)]); h2[100,87] h2[bh.loc(.5), bh.loc(.75)] h2.axes[0].index(.5) == 100 h2.axes[1].index(.75) == 87 plothist2d(h2[:50:bh.rebin(2), 50::bh.rebin(2)]); h2[bh.loc(-.75), bh.loc(.5)] h = bh.Histogram(bh.axis.Regular(30, 0, 2*np.pi, circular=True)) h.fill(np.random.uniform(0, np.pi*4, size=300)) ax = plt.subplot(111, polar=True) plothist = lambda h: plt.bar(h.axes[0].centers, h, width=h.axes[0].widths) plothist(h); mean = bh.accumulators.Mean() mean.fill([0.3, 0.4, 0.5]) h = bh.Histogram(bh.axis.Regular(10, 0, 10), storage=bh.storage.Mean()) h.fill([2.5] * 3, sample=[0.3, 0.4, 0.5]) print_hist(h.to_numpy()) import numpy as np import matplotlib.pyplot as plt import mplhep plt.style.use([mplhep.style.ROOT, mplhep.style.firamath]) # see style down in this notebook h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10) f, axs = plt.subplots(2, 2, sharex=True, sharey=True) axs = axs.flatten() axs[0].set_title("Default", fontsize=18) mplhep.histplot(h, bins, ax=axs[0]) axs[1].set_title("Plot Edges", fontsize=18) mplhep.histplot(h, bins, edges=True, ax=axs[1]) axs[2].set_title("Plot Errorbars", fontsize=18) mplhep.histplot(h, bins, yerr=np.sqrt(h), ax=axs[2]) axs[3].set_title("Filled Histogram", fontsize=18) mplhep.histplot(h, bins, histtype='fill', ax=axs[3]) plt.tight_layout() plt.show() h, bins = np.histogram(np.random.normal(10, 3, 400), bins = 10) f, axs = plt.subplots(2, 2, sharex=True, sharey=True) axs = axs.flatten() axs[0].set_title("Default Overlay", fontsize=18) mplhep.histplot([h, 1.5*h], bins, ax=axs[0]) axs[1].set_title("Default Overlay w/ Errorbars", fontsize=18) mplhep.histplot([h, 1.5*h], bins, yerr=[np.sqrt(h), np.sqrt(1.5*h)], ax=axs[1]) axs[2].set_title("Automatic Errorbars", fontsize=18) mplhep.histplot([h, 1.5*h], bins, yerr=True, ax=axs[2]) axs[3].set_title("With Labels", fontsize=18) mplhep.histplot([h, 1.5*h], bins, yerr=True, ax=axs[3], label=["First", "Second"]) axs[3].legend(fontsize=15) plt.tight_layout() plt.show() f, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(9, 9)) axs = axs.flatten() axs[0].set_title("Default", fontsize=18) mplhep.histplot([h, 1.5*h], bins, stack=True, ax=axs[0]) axs[1].set_title("Plot Edges", fontsize=18) mplhep.histplot([h, 1.5*h], bins, edges=True, stack=True,ax=axs[1]) axs[2].set_title("Plot Errorbars", fontsize=18) mplhep.histplot([h, 1.5*h], bins, yerr=np.sqrt(h), stack=True, ax=axs[2]) axs[3].set_title("Filled Histogram", fontsize=18) mplhep.histplot([1.5*h, h], bins, histtype='fill', stack=True, ax=axs[3]) plt.tight_layout() plt.show() fig, ax = plt.subplots() xedges = np.arange(0, 11.5, 1.5) yedges = [0, 2, 3, 4, 6, 7] x = np.random.normal(5, 1.5, 100) y = np.random.normal(4, 1, 100) mplhep.hist2dplot(*np.histogram2d(x, y, bins=(xedges, yedges)), labels=True); # This would work just as well: # H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) # mplhep.hist2dplot(H, xedges, yedges, labels=True); h2 = bh.Histogram( bh.axis.Variable(xedges), bh.axis.Variable(yedges) ) h2.fill(x,y) mplhep.hist2dplot(*h2.to_numpy()); mplhep.hist2dplot(h2); x = np.random.uniform(0, 10, 240) y = np.random.normal(512, 112, 240) z = np.random.normal(0.5, 0.1, 240) plt.style.use([mplhep.style.ROOT, mplhep.style.firamath]) # styles can be chained f, ax = plt.subplots() ax.scatter(x,y, c=z, label='XSX') mplhep.cms.label(loc=0) plt.show() import hist from hist import Hist import numpy as np Hist.new.Reg(50, 1, 2).Double().fill(np.random.normal(1.5, 0.3, 10_000)) h2 = Hist.new.Reg(50, 0, 2, name='My preferred x-axis title').Reg(50, 10, 20).Double().fill( np.random.normal(1, 0.5, 10_000), np.random.normal(15, 3, 10_000) ) h2 Hist.new.Reg(50, 0, 2).Reg(50, 10, 20).Reg(2, 3, 4).Double() # Add the axes using the shortcut method h = ( Hist.new.Reg(10, -5, 5, overflow=False, underflow=False, name="A") .Bool(name="B") .Var(range(10), name="C") .Int(-5, 5, overflow=False, underflow=False, name="D") .IntCat(range(10), name="E") .StrCat(["T", "F"], name="F") .Double() ) h data1 = np.random.normal(3.5, 2.5, size=1_000_000) h1 = Hist(hist.axis.Regular(40, -2, 10, name="Variable x"), storage=hist.storage.Double()) h1.fill(data1) h1.plot(color="darkviolet", lw=3, histtype='fill') import matplotlib.pyplot as plt # plot2d fig, ax = plt.subplots(figsize=(6, 6)) h2.plot2d(ax=ax, cmap="plasma") plt.show() h = bh.Histogram( bh.axis.Regular(2, 0, 1, metadata="x"), bh.axis.Regular(4, 0, 1, metadata="y")) h.fill( [.2, .4, .3], [.3, .5, .2]) h mplhep.hist2dplot(h) h = Hist( hist.axis.Regular(50, -5, 5, name="S", label="s [units]", flow=False), hist.axis.Regular(50, -5, 5, name="W", label="w [units]", flow=False), ) import numpy as np s_data = np.random.normal(size=10_000) + np.ones(10_000) w_data = np.random.normal(size=10_000) s_data = np.random.normal(1, 0.5, 100_000) w_data = np.random.normal(13, 3, 100_000) h = Hist.new.Reg(50, -5, 5, name="S", label="s [units]", flow=False).Reg(50, -20, 20, name="W", label="w [units]", flow=False).Double().fill( s_data, w_data) # plot2d_full plt.figure(figsize=(8, 8)) h.plot2d_full( main_cmap="coolwarm", top_ls="--", top_color="orange", top_lw=2, side_ls=":", side_lw=2, side_color="steelblue", ) plt.show() import mplhep fig, axs = plt.subplots(1, 2, figsize=(12, 5)) mplhep.histplot(h.project("S"), ax=axs[0]) mplhep.histplot(h.project("W"), ax=axs[1]) # The unumpy module of the uncertainties package deals with uncertainties in arrays from uncertainties import unumpy as unp def pdf(x, a=1 / np.sqrt(2 * np.pi), x0=0, sigma=1, offset=0): exp = unp.exp if a.dtype == np.dtype("O") else np.exp # 'O' for a Python object return a * exp(-((x - x0) ** 2) / (2 * sigma ** 2)) + offset fig = plt.figure(figsize=(10, 8)) # Fill a histogram with 500 entries hh = hist.Hist( hist.axis.Regular( 50, -5, 5, name="X", label="x [units]", underflow=False, overflow=False ) ).fill(np.random.normal(size=500)) # Plot the histogram (data points) together with the fitted curve, and the pull plot main_ax_artists, subplot_ax_artists = hh.plot_pull( pdf, # Normal distributions are so commonly used in pull distributions that the str aliases "normal", "gauss", and "gaus" are supported as well! eb_ecolor="steelblue", eb_mfc="steelblue", eb_mec="steelblue", eb_fmt="o", eb_ms=6, eb_capsize=1, eb_capthick=2, eb_alpha=0.8, fp_c="hotpink", fp_ls="-", fp_lw=2, fp_alpha=0.8, bar_fc="royalblue", pp_num=5, pp_fc="royalblue", pp_alpha=0.5, pp_ec=None, ub_alpha=0.5, ) plt.ylim((-2,2)) plt.show()
0.455199
0.984694
``` import numpy as np import os import torch import torchvision import torchvision.transforms as transforms ### Load dataset - Preprocessing DATA_PATH = '/tmp/data' BATCH_SIZE = 64 def load_mnist(path, batch_size): if not os.path.exists(path): os.mkdir(path) trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))]) train_set = torchvision.datasets.MNIST(root=path, train=True, transform=trans, download=True) test_set = torchvision.datasets.MNIST(root=path, train=False, transform=trans, download=True) train_loader = torch.utils.data.DataLoader( dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( dataset=test_set, batch_size=batch_size, shuffle = False) return train_loader, test_loader train_loader, test_loader = load_mnist(DATA_PATH, BATCH_SIZE) ### Build network IN_SIZE = 28*28 HIDDEN_SIZE = 50 OUT_SIZE = 10 LR=0.001 class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.l1 = torch.nn.Linear(IN_SIZE , HIDDEN_SIZE) self.l2 = torch.nn.Linear(HIDDEN_SIZE, OUT_SIZE) def forward(self, x): x = x.view(-1, IN_SIZE) x = torch.relu(self.l1(x)) y_logits = self.l2(x) return y_logits net = Net() criterion = torch.nn.CrossEntropyLoss(reduction='sum') opti = torch.optim.SGD(net.parameters(), lr=LR) ### Training NEPOCHS = 5 for epoch in range(NEPOCHS): for batch_idx, (X, y) in enumerate(train_loader): net.zero_grad() y_logits = net(X) loss = criterion(y_logits, y) loss.backward() opti.step() preds = torch.empty(len(train_loader.dataset)) y = torch.empty(len(train_loader.dataset)) loss = 0 for batch_idx, (bX, by) in enumerate(train_loader): y_logits = net(bX) bloss = criterion(y_logits, by) bpreds = torch.argmax(y_logits, dim=1) preds[batch_idx*BATCH_SIZE:batch_idx*BATCH_SIZE+len(bX)] = bpreds y[batch_idx*BATCH_SIZE:batch_idx*BATCH_SIZE+len(bX)] = by loss += bloss acc = y.eq(preds).sum().float() / len(y) print('Epoch {}: Loss = {}, Accuracy = {}'.format(epoch+1, loss.data, acc)) ### Evaluate preds = torch.empty(len(test_loader.dataset)) y = torch.empty(len(test_loader.dataset)) loss = 0 for batch_idx, (bX, by) in enumerate(test_loader): y_logits = net(bX) bloss = criterion(y_logits, by) bpreds = torch.argmax(y_logits, dim=1) preds[batch_idx*BATCH_SIZE:batch_idx*BATCH_SIZE+len(bX)] = bpreds y[batch_idx*BATCH_SIZE:batch_idx*BATCH_SIZE+len(bX)] = by loss += bloss acc = y.eq(preds).sum().float() / len(y) print('Test Accuracy = {}'.format(acc)) ```
github_jupyter
import numpy as np import os import torch import torchvision import torchvision.transforms as transforms ### Load dataset - Preprocessing DATA_PATH = '/tmp/data' BATCH_SIZE = 64 def load_mnist(path, batch_size): if not os.path.exists(path): os.mkdir(path) trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))]) train_set = torchvision.datasets.MNIST(root=path, train=True, transform=trans, download=True) test_set = torchvision.datasets.MNIST(root=path, train=False, transform=trans, download=True) train_loader = torch.utils.data.DataLoader( dataset=train_set, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( dataset=test_set, batch_size=batch_size, shuffle = False) return train_loader, test_loader train_loader, test_loader = load_mnist(DATA_PATH, BATCH_SIZE) ### Build network IN_SIZE = 28*28 HIDDEN_SIZE = 50 OUT_SIZE = 10 LR=0.001 class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.l1 = torch.nn.Linear(IN_SIZE , HIDDEN_SIZE) self.l2 = torch.nn.Linear(HIDDEN_SIZE, OUT_SIZE) def forward(self, x): x = x.view(-1, IN_SIZE) x = torch.relu(self.l1(x)) y_logits = self.l2(x) return y_logits net = Net() criterion = torch.nn.CrossEntropyLoss(reduction='sum') opti = torch.optim.SGD(net.parameters(), lr=LR) ### Training NEPOCHS = 5 for epoch in range(NEPOCHS): for batch_idx, (X, y) in enumerate(train_loader): net.zero_grad() y_logits = net(X) loss = criterion(y_logits, y) loss.backward() opti.step() preds = torch.empty(len(train_loader.dataset)) y = torch.empty(len(train_loader.dataset)) loss = 0 for batch_idx, (bX, by) in enumerate(train_loader): y_logits = net(bX) bloss = criterion(y_logits, by) bpreds = torch.argmax(y_logits, dim=1) preds[batch_idx*BATCH_SIZE:batch_idx*BATCH_SIZE+len(bX)] = bpreds y[batch_idx*BATCH_SIZE:batch_idx*BATCH_SIZE+len(bX)] = by loss += bloss acc = y.eq(preds).sum().float() / len(y) print('Epoch {}: Loss = {}, Accuracy = {}'.format(epoch+1, loss.data, acc)) ### Evaluate preds = torch.empty(len(test_loader.dataset)) y = torch.empty(len(test_loader.dataset)) loss = 0 for batch_idx, (bX, by) in enumerate(test_loader): y_logits = net(bX) bloss = criterion(y_logits, by) bpreds = torch.argmax(y_logits, dim=1) preds[batch_idx*BATCH_SIZE:batch_idx*BATCH_SIZE+len(bX)] = bpreds y[batch_idx*BATCH_SIZE:batch_idx*BATCH_SIZE+len(bX)] = by loss += bloss acc = y.eq(preds).sum().float() / len(y) print('Test Accuracy = {}'.format(acc))
0.807537
0.641029
``` import os import json import random import csv import math import pandas as pd from sklearn.preprocessing import MinMaxScaler file_loc = 'mprobc_100kb.txt' with open(file_loc) as input_: stripped = [line.strip() for line in input_] lines = [s.split('\t')[1:] for s in stripped if s] header = [] final_list=[] for counter,line in enumerate(lines[1:]): abc = line[0] header.append(abc) new_list = [abc] + [int(math.fabs(math.floor(float(x)))) for x in line[1:]] final_list.append(new_list) with open('mprobc_chr1_100kb.csv', 'wt') as out_file: tsv_writer = csv.writer(out_file, delimiter=',') for i in final_list: tsv_writer.writerow(i) df = pd.read_csv('mprobc_chr1_100kb.csv',delimiter=',',header=None,index_col=None) df.head() df.describe() df.info() header = df.pop(0) df.head() ``` ### Normalization Scaling The values between zero and one ``` scaler = MinMaxScaler() scaled_values = scaler.fit_transform(df) df.loc[:,:] = scaled_values df.describe() frequency_threshold = 0.050 df2 = df thres_inter_tar = [] thres_inter_sou = [] frequency = [] size = [] for chr_loc, i in enumerate(df2.iterrows()): list_ = list(i[1]) # print(i[1]) for counter,j in enumerate(list_): if j >= frequency_threshold: # print(j) if header[counter] == header[chr_loc]: continue else: thres_inter_tar.append(header[counter]) frequency.append(j) thres_inter_sou.append(header[chr_loc]) size.append(random.randint(1,9)) else: continue df_write = pd.DataFrame({ 'source':thres_inter_tar, 'target':thres_inter_sou, 'weight':frequency, 'size':size, }) df_write.to_csv('cis.csv',index=False) df_write import os import json import random import csv import math import pandas as pd from sklearn.preprocessing import MinMaxScaler file_loc = 'mprobc_100kb.txt' with open(file_loc) as input_: stripped = [line.strip() for line in input_] lines = [s.split('\t')[1:] for s in stripped if s] header = [] final_list=[] for counter,line in enumerate(lines[1:]): abc = line[0] header.append(abc) new_list = [abc] + [int(math.fabs(math.floor(float(x)))) for x in line[1:]] final_list.append(new_list) with open('mprobc_100kb.csv', 'wt') as out_file: tsv_writer = csv.writer(out_file, delimiter=',') for i in final_list: tsv_writer.writerow(i) df = pd.read_csv('mprobc_100kb.csv',delimiter=',',header=None,index_col=None) header = df.pop(0) scaler = MinMaxScaler() scaled_values = scaler.fit_transform(df) df.loc[:,:] = scaled_values frequency_threshold = 0.050 thres_inter_tar = [] thres_inter_sou = [] frequency = [] for chr_loc, i in enumerate(df.iterrows()): list_ = list(i[1]) # print(i[1]) for counter,j in enumerate(list_): if j >= frequency_threshold: # print(j) if header[counter] == header[chr_loc]: continue else: thres_inter_tar.append(header[counter]) frequency.append(j) thres_inter_sou.append(header[chr_loc]) else: continue df_write = pd.DataFrame({ 'source':thres_inter_tar, 'target':thres_inter_sou, 'weight':frequency, }) df_write.to_csv('interactions.csv',index=False) ```
github_jupyter
import os import json import random import csv import math import pandas as pd from sklearn.preprocessing import MinMaxScaler file_loc = 'mprobc_100kb.txt' with open(file_loc) as input_: stripped = [line.strip() for line in input_] lines = [s.split('\t')[1:] for s in stripped if s] header = [] final_list=[] for counter,line in enumerate(lines[1:]): abc = line[0] header.append(abc) new_list = [abc] + [int(math.fabs(math.floor(float(x)))) for x in line[1:]] final_list.append(new_list) with open('mprobc_chr1_100kb.csv', 'wt') as out_file: tsv_writer = csv.writer(out_file, delimiter=',') for i in final_list: tsv_writer.writerow(i) df = pd.read_csv('mprobc_chr1_100kb.csv',delimiter=',',header=None,index_col=None) df.head() df.describe() df.info() header = df.pop(0) df.head() scaler = MinMaxScaler() scaled_values = scaler.fit_transform(df) df.loc[:,:] = scaled_values df.describe() frequency_threshold = 0.050 df2 = df thres_inter_tar = [] thres_inter_sou = [] frequency = [] size = [] for chr_loc, i in enumerate(df2.iterrows()): list_ = list(i[1]) # print(i[1]) for counter,j in enumerate(list_): if j >= frequency_threshold: # print(j) if header[counter] == header[chr_loc]: continue else: thres_inter_tar.append(header[counter]) frequency.append(j) thres_inter_sou.append(header[chr_loc]) size.append(random.randint(1,9)) else: continue df_write = pd.DataFrame({ 'source':thres_inter_tar, 'target':thres_inter_sou, 'weight':frequency, 'size':size, }) df_write.to_csv('cis.csv',index=False) df_write import os import json import random import csv import math import pandas as pd from sklearn.preprocessing import MinMaxScaler file_loc = 'mprobc_100kb.txt' with open(file_loc) as input_: stripped = [line.strip() for line in input_] lines = [s.split('\t')[1:] for s in stripped if s] header = [] final_list=[] for counter,line in enumerate(lines[1:]): abc = line[0] header.append(abc) new_list = [abc] + [int(math.fabs(math.floor(float(x)))) for x in line[1:]] final_list.append(new_list) with open('mprobc_100kb.csv', 'wt') as out_file: tsv_writer = csv.writer(out_file, delimiter=',') for i in final_list: tsv_writer.writerow(i) df = pd.read_csv('mprobc_100kb.csv',delimiter=',',header=None,index_col=None) header = df.pop(0) scaler = MinMaxScaler() scaled_values = scaler.fit_transform(df) df.loc[:,:] = scaled_values frequency_threshold = 0.050 thres_inter_tar = [] thres_inter_sou = [] frequency = [] for chr_loc, i in enumerate(df.iterrows()): list_ = list(i[1]) # print(i[1]) for counter,j in enumerate(list_): if j >= frequency_threshold: # print(j) if header[counter] == header[chr_loc]: continue else: thres_inter_tar.append(header[counter]) frequency.append(j) thres_inter_sou.append(header[chr_loc]) else: continue df_write = pd.DataFrame({ 'source':thres_inter_tar, 'target':thres_inter_sou, 'weight':frequency, }) df_write.to_csv('interactions.csv',index=False)
0.078385
0.295883
# Strong Edges Network VS ACDC Fully Connected Network ``` import DSGRN from DSGRN import * import cProfile import sys sys.setrecursionlimit(10**8) sys.path.insert(0,'../src') import PhenotypeGraphFun import CondensationGraph_iter import ReducePhenotypeGraph import PhenotypeGraphviz import pickle import Hb_Kni_high2low database_s = Database("/home/elizabeth/Desktop/ACDC/ACDC_StrongEdges.db") network_s = Network("/home/elizabeth/Desktop/ACDC/ACDC_StrongEdges") parameter_graph_s = ParameterGraph(network_s) database_f = Database("/home/elizabeth/Desktop/ACDC/ACDC_Fullconn.db") network_f = Network("/home/elizabeth/Desktop/ACDC/ACDC_Fullconn") parameter_graph_f = ParameterGraph(network_f) Table(["Strong Edges", "Fully Connected"], [[DrawGraph(network_s),DrawGraph(network_f)], ["Parameter Graph size:", "Parameter Graph size:"], [parameter_graph_s.size(),parameter_graph_f.size()]]) with open('StrongEdges_edges_0.pkl','rb') as file: edges_s = pickle.load(file) with open('Fullconn_edges_0.pkl','rb') as file: edges_f = pickle.load(file) len(edges_s) len(edges_f) cond_s, redu_params_s, scc_s = condensation_graph_optimized(edges_s) cond_f, redu_params_f, scc_f = condensation_graph_optimized(edges_f) PGI = [(3,6), (3,6), (300, 2400), (300, 2400), (300, 2400), (36000, 48000), (36000, 48000),(36000, 48000), (138, 120) ] Table(["Strong Edges", "Fully Connected"], [[PhenotypeGraphviz(database_s, network_s, cond_s, 'svg', 's'), PhenotypeGraphviz(database_f, network_f, cond_f, 'svg', 'f')]]) Table(["Strong Edges MG's from Cond", "Fully Connected MG's from Cond"], [[DrawGraph(MorseGraph(DomainGraph(parameter_graph_s.parameter(i)))), DrawGraph(MorseGraph(DomainGraph(parameter_graph_f.parameter(j))))] for i,j in PGI]) Table(["Layer","Strong Edges scc size", "Fully Connected scc size", "Difference (s-f)"], [ [i , len(scc_s[i][0]), len(scc_f[i][0]), len(scc_s[i][0]) - len(scc_f[i][0]) ] for i in scc_s]) Table(["Layer","Strong Edges scc size %", "Fully Connected scc size %", "Difference (s-f)"], [ [i , round((len(scc_s[i][0])/parameter_graph_s.size())*100,2), round((len(scc_f[i][0])/parameter_graph_f.size())*100,2), round((len(scc_s[i][0])/parameter_graph_s.size())*100-(len(scc_f[i][0])/parameter_graph_f.size())*100,2) ] for i in scc_s]) ``` ### Looking at only Hb high in first layer and Kni low in last layer ``` with open('StrongEdges_edges_0r.pkl','rb') as file: edges_sr = pickle.load(file) with open('Fullconn_edges_0r.pkl','rb') as file: edges_fr = pickle.load(file) cond_sr, redu_params_sr, scc_sr = condensation_graph_optimized(edges_sr) cond_fr, redu_params_fr, scc_fr = condensation_graph_optimized(edges_fr) Table(["Fully Connected with Constraints"], [[PhenotypeGraphviz(database_f, network_f, cond_fr, 'svg', 'f')]]) AP35 = {"Hb":[0,1], "Gt":3, "Kr":0, "Kni":0} AP37 = {"Hb":1, "Gt":[0,3], "Kr":0, "Kni":0} AP40 = {"Hb":1, "Gt":0, "Kr":[0,1], "Kni":0} AP45 = {"Hb":[0,1], "Gt":0, "Kr":1, "Kni":0} AP47 = {"Hb":[0,1], "Gt":0, "Kr":1, "Kni":0} AP51 = {"Hb":0, "Gt":0, "Kr":1, "Kni":[0,3]} AP57 = {"Hb":0, "Gt":0, "Kr":[0,1], "Kni":3} AP61 = {"Hb":0, "Gt":0, "Kr":[0,1], "Kni":3} AP63 = {"Hb":0, "Gt":[0,3], "Kr":0, "Kni":3} AP67 = {"Hb":0, "Gt":3, "Kr":0, "Kni":[0,3]} D = [[AP37], [AP40], [AP45], [AP47], [AP51], [AP57], [AP61], [AP63], [AP67]] paramslist = get_paramslist_optimized(database, D, '=') len(paramslist[0]) len(scc_s[0][0]) ```
github_jupyter
import DSGRN from DSGRN import * import cProfile import sys sys.setrecursionlimit(10**8) sys.path.insert(0,'../src') import PhenotypeGraphFun import CondensationGraph_iter import ReducePhenotypeGraph import PhenotypeGraphviz import pickle import Hb_Kni_high2low database_s = Database("/home/elizabeth/Desktop/ACDC/ACDC_StrongEdges.db") network_s = Network("/home/elizabeth/Desktop/ACDC/ACDC_StrongEdges") parameter_graph_s = ParameterGraph(network_s) database_f = Database("/home/elizabeth/Desktop/ACDC/ACDC_Fullconn.db") network_f = Network("/home/elizabeth/Desktop/ACDC/ACDC_Fullconn") parameter_graph_f = ParameterGraph(network_f) Table(["Strong Edges", "Fully Connected"], [[DrawGraph(network_s),DrawGraph(network_f)], ["Parameter Graph size:", "Parameter Graph size:"], [parameter_graph_s.size(),parameter_graph_f.size()]]) with open('StrongEdges_edges_0.pkl','rb') as file: edges_s = pickle.load(file) with open('Fullconn_edges_0.pkl','rb') as file: edges_f = pickle.load(file) len(edges_s) len(edges_f) cond_s, redu_params_s, scc_s = condensation_graph_optimized(edges_s) cond_f, redu_params_f, scc_f = condensation_graph_optimized(edges_f) PGI = [(3,6), (3,6), (300, 2400), (300, 2400), (300, 2400), (36000, 48000), (36000, 48000),(36000, 48000), (138, 120) ] Table(["Strong Edges", "Fully Connected"], [[PhenotypeGraphviz(database_s, network_s, cond_s, 'svg', 's'), PhenotypeGraphviz(database_f, network_f, cond_f, 'svg', 'f')]]) Table(["Strong Edges MG's from Cond", "Fully Connected MG's from Cond"], [[DrawGraph(MorseGraph(DomainGraph(parameter_graph_s.parameter(i)))), DrawGraph(MorseGraph(DomainGraph(parameter_graph_f.parameter(j))))] for i,j in PGI]) Table(["Layer","Strong Edges scc size", "Fully Connected scc size", "Difference (s-f)"], [ [i , len(scc_s[i][0]), len(scc_f[i][0]), len(scc_s[i][0]) - len(scc_f[i][0]) ] for i in scc_s]) Table(["Layer","Strong Edges scc size %", "Fully Connected scc size %", "Difference (s-f)"], [ [i , round((len(scc_s[i][0])/parameter_graph_s.size())*100,2), round((len(scc_f[i][0])/parameter_graph_f.size())*100,2), round((len(scc_s[i][0])/parameter_graph_s.size())*100-(len(scc_f[i][0])/parameter_graph_f.size())*100,2) ] for i in scc_s]) with open('StrongEdges_edges_0r.pkl','rb') as file: edges_sr = pickle.load(file) with open('Fullconn_edges_0r.pkl','rb') as file: edges_fr = pickle.load(file) cond_sr, redu_params_sr, scc_sr = condensation_graph_optimized(edges_sr) cond_fr, redu_params_fr, scc_fr = condensation_graph_optimized(edges_fr) Table(["Fully Connected with Constraints"], [[PhenotypeGraphviz(database_f, network_f, cond_fr, 'svg', 'f')]]) AP35 = {"Hb":[0,1], "Gt":3, "Kr":0, "Kni":0} AP37 = {"Hb":1, "Gt":[0,3], "Kr":0, "Kni":0} AP40 = {"Hb":1, "Gt":0, "Kr":[0,1], "Kni":0} AP45 = {"Hb":[0,1], "Gt":0, "Kr":1, "Kni":0} AP47 = {"Hb":[0,1], "Gt":0, "Kr":1, "Kni":0} AP51 = {"Hb":0, "Gt":0, "Kr":1, "Kni":[0,3]} AP57 = {"Hb":0, "Gt":0, "Kr":[0,1], "Kni":3} AP61 = {"Hb":0, "Gt":0, "Kr":[0,1], "Kni":3} AP63 = {"Hb":0, "Gt":[0,3], "Kr":0, "Kni":3} AP67 = {"Hb":0, "Gt":3, "Kr":0, "Kni":[0,3]} D = [[AP37], [AP40], [AP45], [AP47], [AP51], [AP57], [AP61], [AP63], [AP67]] paramslist = get_paramslist_optimized(database, D, '=') len(paramslist[0]) len(scc_s[0][0])
0.105498
0.488954
**Database Systems 2017 (ITWS-6960)** **Final Project** **Stephan Zednik** ``` import re import pandas import matplotlib %matplotlib inline ``` # Recipe Data ``` from recipes import RecipeData ``` Open a connection to the recipes database ``` recipes = RecipeData() recipes.is_connected() ``` Use the ``RecipeData`` fluent interface to query recipe data from MongoDB *query - find 5 recipes for alcoholic eggnog that are under 400 calories* ``` recipes.query().title_contains("eggnog").category("alcoholic").max_calories(400).run().head(5) ``` The ``.show()`` method can be used with the fluent interface to show what the compiled pymongo query looks like ``` recipes.query().title_contains("eggnog").category("alcoholic").max_calories(400).show() ``` # Product Data ``` from products import ProductData ``` Open a connection to the products database ``` products = ProductData("host='localhost' dbname='foodfacts' user='foodfacts' password='foodfacts'") ``` Use the ``ProductData`` fluent interface to query for product data from the PostgreSQL db *query - find Kroger-brand products whose name includes 'Enriched Flour'* ``` products.query().name_includes("Enriched Flour").brand("Kroger").run() ``` As with the ``RecipeData`` API, the ``.show()`` method can be used to show the SQL query that would be executed against the PostgreSQL db for a given query construction. ``` products.query().name_includes("Enriched Flour").brand("Kroger").show() ``` The fluent API supports some method being called multiple times so a query can indicate that more than 1 category for a given product should be matched against. This is treated as an AND over the constraints. The ProductData API does not currently support the ability to provide OR over multiple values, but this could be added at a later time. ``` products.query().category("alcoholic").category("christmas").max_calories(500).show() products.query().category("alcoholic").category("christmas").max_calories(500).run() ``` ## SQL Injection The [psycopg2](http://initd.org/psycopg/docs/usage.html#passing-parameters-to-sql-queries) library is used to escape query parameters for the purpose of defending against SQL-injection attacks. *Test resistence to SQL-injection* ``` products.query().name_includes("'); SELECT * from products;").show() ``` The ``'`` of the injection is escaped so the entire injection attack is treated as a literal value instead of as part of a DQL (SELECT), DML (UPDATE, DELETE), or DDL (DROP) command ``` products.query().name_includes("'); SELECT * from products;").run() products.query().brand("); DROP products; COMMIT; ").run() ``` note - I am a little surprised that the resulting DataFrame does not have columns (something I may look into later...) but a follow-up query over products will how that the ``DROP products;`` query was not sucessful. ``` # find all products that include peanuts products.query().ingredient("peanut").brand("Kroger").run() ``` # Distributed Queries - finding products based on recipe ingredients I would like to run a query to find potential products to use as ingredients in a recipe. To implement this query I will have to run a query for recipe data against the PostgreSQL database and a query for product data against the MongoDB database and then join the results of the two queries. I will write a ``DistributedQuery`` class that provides a fluent interface that allows the user to construct queries to run against the products and recipes databases and then performs a join on recipes.ingredient to product.name and returns a joined panda DataFrame. *example - find kroger-brand products that can be used as ingredients in highly-rated (4+) brunch recipes that contain eggs and bacon and the recipe is under 500 calories* ``` DistributedQuery(products, recipes)\ .recipe_category("brunch")\ .recipe_ingredients_contains_all_of(["bacon", "egg"])\ .recipe_min_rating(4)\ .recipe_max_calories(500)\ .product_brand("Kroger")\ .run() ``` To complicate matters, the recipe ingredients array and product name fields do not join nicely. The ingredient field often contains amount and other descriptive information that makes it difficult to match against product names with high degrees of confidence. The product name fields also occasionally include brand and other descriptive information that make it difficult to automatically match against a generic ingredient name. To perform the join I will have to break apart the ingredient and product name fields, join on subsets of the field and then compute a weight value to provide a simple estimation of the confidence of the match. If my key matching criteria is too strict it will miss products that make sense for an ingredient need, if the key matching criteria is too loose there will be a very large number of false matches. I will compute weights on the joins and select (by trial and error) a reasonable default minimum weight to use to filter out low-confidence joins. I will also add a method to the fluent interface ``.min_weight(number)`` that will allow the user to adjust the weight value used to filter out low-confidence joins. ``` class DistributedQuery: '''Provides a fluent API to construct and run queries against the products and recipe databases and does a 'fuzzy' join of the query results on (recipes.ingredients, products.name)''' def __init__(self, products, recipes): self.products_query = products.query() self.recipes_query = recipes.query() self.min_merge_weight = 0.2 @staticmethod def get_text_breakdown(ingredient): if not ingredient: return [] ingredient = ingredient.strip()#.split(',')[0] _last = len(ingredient) run = True parts = [] while run: idx = ingredient.rfind(" ", 0, _last) _last = idx if idx == -1: parts.append(ingredient.lower()) run = False else: parts.append(ingredient[idx:].strip(" ,()-").lower()) return parts def min_weight(self, weight): self.min_merge_weight = weight return self def product_category(self, category): self.products_query = self.products_query.category(category) return self def product_max_calories(self, max_calories): self.products_query = self.products_query.max_calories(max_calories) return self def product_brand(self, brand): self.products_query = self.products_query.brand(brand) return self def recipe_max_calories(self, max_calories): self.recipes_query = self.recipes_query.max_calories(max_calories) return self def recipe_min_rating(self, min_rating): self.recipes_query = self.recipes_query.min_rating(min_rating) return self def recipe_ingredients_contains_all_of(self, ingredients): self.recipes_query = self.recipes_query.ingredient_all_of(ingredients) return self def recipe_category(self, category): self.recipes_query = self.recipes_query.category(category) return self def recipe_category_any_of(self, categories): self.recipes_query = self.recipes_query.category_any_of(categories) return self def recipe_category_all_of(self, categories): self.recipes_query = self.recipes_query.category_all_of(categories) return self @staticmethod def products_add_name_key(df): df = df.add_prefix("product_") rows = [] _ = df.apply(lambda row: [rows.append(row.tolist()+[name_part]) for name_part in DistributedQuery.get_text_breakdown(row["product_name"])], axis=1) return pandas.DataFrame(rows, columns=df.columns.tolist()+['name_key']).drop_duplicates() @staticmethod def recipes_add_ingredient_key(df): df = df.add_prefix("recipe_") rows = [] _ = df.apply(lambda row: [rows.append(row.tolist()+[ingredient]) for ingredient in row["recipe_ingredients"]], axis=1) _df = pandas.DataFrame(rows, columns=df.columns.tolist()+['recipe_ingredient']).drop_duplicates(['recipe_title','recipe_ingredient']) rows = [] for index, row in _df.iterrows(): for part in DistributedQuery.get_text_breakdown(row["recipe_ingredient"]): rows.append(row.tolist()+[part]) return pandas.DataFrame(rows, columns=_df.columns.tolist()+['ingredient_key']).drop_duplicates(['recipe_title', 'recipe_ingredient', 'ingredient_key']) @staticmethod def compute_weight(key, field): if not key or not field: return 0 if pandas.isnull(key) or pandas.isnull(field): return 0 return len(key)/len(field) @staticmethod def compute_merge_weight(df): df['ingredient_key_weight'] = df.apply(lambda row: DistributedQuery.compute_weight(row['ingredient_key'], row['product_name']), axis=1) df['name_key_weight'] = df.apply(lambda row: DistributedQuery.compute_weight(row['name_key'], row['recipe_ingredient']), axis=1) return df @staticmethod def merge_recipes_and_products(df_recipes, df_products): df_recipes2 = DistributedQuery.recipes_add_ingredient_key(df_recipes) df_products2 = DistributedQuery.products_add_name_key(df_products) df = pandas.merge(df_recipes2, df_products2, how='left', left_on="ingredient_key", right_on="name_key", suffixes=('_recipe','_product')) df = DistributedQuery.compute_merge_weight(df) df.drop('ingredient_key', axis=1, inplace=True) df.drop('name_key', axis=1, inplace=True) df = df.drop_duplicates(['recipe_title', 'recipe_ingredient', 'product_brands', 'product_name']) return df def run(self): '''run the queries, merge via 'fuzzy join' and return the merged result as a pandas DataFrame ''' df_recipes = self.recipes_query.run() df_products = self.products_query.run() df_merged = self.merge_recipes_and_products(df_recipes, df_products) if self.min_merge_weight > 0: df_merged = df_merged.loc[(df_merged['ingredient_key_weight'] > self.min_merge_weight) & (df_merged['name_key_weight'] > self.min_merge_weight)] return df_merged def show(self): '''return a dictionary showing the recipe and product queries that would be run and merged''' return {"products": self.products_query.show(), "recipes": self.recipes_query.show()} default_columns = ['recipe_title', 'recipe_ingredient', 'recipe_rating', 'recipe_calories', 'product_name', 'product_brands', 'ingredient_key_weight', 'name_key_weight'] def filter_by_recipe_title(df, recipe, columns=default_columns): '''helper function to easily filter query results by a recipe name''' return df.loc[df['recipe_title'].str.contains(recipe)][columns] def simplify(df, columns=default_columns): '''helper function to reduce data frame to a subset of columns''' return df[columns] ``` *query - find kroger-brand products that can be used as ingredients in highly-rated (4+) brunch recipes that contain eggs and bacon and for which recipe is under 500 calories* ``` %time df = DistributedQuery(products, recipes)\ .recipe_category("brunch")\ .recipe_ingredients_contains_all_of(["bacon", "egg"])\ .recipe_min_rating(4)\ .recipe_max_calories(500)\ .product_brand("Kroger")\ .run() simplify(df) ``` *Re-run the query, this time looking for Safeway-brand products* ``` %time df2 = DistributedQuery(products, recipes)\ .recipe_category("brunch")\ .recipe_ingredients_contains_all_of(["bacon", "egg"])\ .recipe_min_rating(4)\ .recipe_max_calories(500)\ .product_brand("Safeway")\ .run() simplify(df2) ``` try the product, recipe, and distributedQuery interfaces yourself! examples: - ``products.query().name_includes("honey").brand("Trader Joe's").max_sugar(75).run()`` - ``recipes.query().category_any_of(["easter", "christmas", "thanksgiving"]).min_rating(4).run()`` - ``DistributedQuery(products, recipes)\ .recipe_category("christmas")\ .recipe_min_rating(4)\ .recipe_max_calories(500)\ .product_brand("Trader Joe's")\ .run()``
github_jupyter
import re import pandas import matplotlib %matplotlib inline from recipes import RecipeData recipes = RecipeData() recipes.is_connected() recipes.query().title_contains("eggnog").category("alcoholic").max_calories(400).run().head(5) recipes.query().title_contains("eggnog").category("alcoholic").max_calories(400).show() from products import ProductData products = ProductData("host='localhost' dbname='foodfacts' user='foodfacts' password='foodfacts'") products.query().name_includes("Enriched Flour").brand("Kroger").run() products.query().name_includes("Enriched Flour").brand("Kroger").show() products.query().category("alcoholic").category("christmas").max_calories(500).show() products.query().category("alcoholic").category("christmas").max_calories(500).run() products.query().name_includes("'); SELECT * from products;").show() products.query().name_includes("'); SELECT * from products;").run() products.query().brand("); DROP products; COMMIT; ").run() # find all products that include peanuts products.query().ingredient("peanut").brand("Kroger").run() DistributedQuery(products, recipes)\ .recipe_category("brunch")\ .recipe_ingredients_contains_all_of(["bacon", "egg"])\ .recipe_min_rating(4)\ .recipe_max_calories(500)\ .product_brand("Kroger")\ .run() class DistributedQuery: '''Provides a fluent API to construct and run queries against the products and recipe databases and does a 'fuzzy' join of the query results on (recipes.ingredients, products.name)''' def __init__(self, products, recipes): self.products_query = products.query() self.recipes_query = recipes.query() self.min_merge_weight = 0.2 @staticmethod def get_text_breakdown(ingredient): if not ingredient: return [] ingredient = ingredient.strip()#.split(',')[0] _last = len(ingredient) run = True parts = [] while run: idx = ingredient.rfind(" ", 0, _last) _last = idx if idx == -1: parts.append(ingredient.lower()) run = False else: parts.append(ingredient[idx:].strip(" ,()-").lower()) return parts def min_weight(self, weight): self.min_merge_weight = weight return self def product_category(self, category): self.products_query = self.products_query.category(category) return self def product_max_calories(self, max_calories): self.products_query = self.products_query.max_calories(max_calories) return self def product_brand(self, brand): self.products_query = self.products_query.brand(brand) return self def recipe_max_calories(self, max_calories): self.recipes_query = self.recipes_query.max_calories(max_calories) return self def recipe_min_rating(self, min_rating): self.recipes_query = self.recipes_query.min_rating(min_rating) return self def recipe_ingredients_contains_all_of(self, ingredients): self.recipes_query = self.recipes_query.ingredient_all_of(ingredients) return self def recipe_category(self, category): self.recipes_query = self.recipes_query.category(category) return self def recipe_category_any_of(self, categories): self.recipes_query = self.recipes_query.category_any_of(categories) return self def recipe_category_all_of(self, categories): self.recipes_query = self.recipes_query.category_all_of(categories) return self @staticmethod def products_add_name_key(df): df = df.add_prefix("product_") rows = [] _ = df.apply(lambda row: [rows.append(row.tolist()+[name_part]) for name_part in DistributedQuery.get_text_breakdown(row["product_name"])], axis=1) return pandas.DataFrame(rows, columns=df.columns.tolist()+['name_key']).drop_duplicates() @staticmethod def recipes_add_ingredient_key(df): df = df.add_prefix("recipe_") rows = [] _ = df.apply(lambda row: [rows.append(row.tolist()+[ingredient]) for ingredient in row["recipe_ingredients"]], axis=1) _df = pandas.DataFrame(rows, columns=df.columns.tolist()+['recipe_ingredient']).drop_duplicates(['recipe_title','recipe_ingredient']) rows = [] for index, row in _df.iterrows(): for part in DistributedQuery.get_text_breakdown(row["recipe_ingredient"]): rows.append(row.tolist()+[part]) return pandas.DataFrame(rows, columns=_df.columns.tolist()+['ingredient_key']).drop_duplicates(['recipe_title', 'recipe_ingredient', 'ingredient_key']) @staticmethod def compute_weight(key, field): if not key or not field: return 0 if pandas.isnull(key) or pandas.isnull(field): return 0 return len(key)/len(field) @staticmethod def compute_merge_weight(df): df['ingredient_key_weight'] = df.apply(lambda row: DistributedQuery.compute_weight(row['ingredient_key'], row['product_name']), axis=1) df['name_key_weight'] = df.apply(lambda row: DistributedQuery.compute_weight(row['name_key'], row['recipe_ingredient']), axis=1) return df @staticmethod def merge_recipes_and_products(df_recipes, df_products): df_recipes2 = DistributedQuery.recipes_add_ingredient_key(df_recipes) df_products2 = DistributedQuery.products_add_name_key(df_products) df = pandas.merge(df_recipes2, df_products2, how='left', left_on="ingredient_key", right_on="name_key", suffixes=('_recipe','_product')) df = DistributedQuery.compute_merge_weight(df) df.drop('ingredient_key', axis=1, inplace=True) df.drop('name_key', axis=1, inplace=True) df = df.drop_duplicates(['recipe_title', 'recipe_ingredient', 'product_brands', 'product_name']) return df def run(self): '''run the queries, merge via 'fuzzy join' and return the merged result as a pandas DataFrame ''' df_recipes = self.recipes_query.run() df_products = self.products_query.run() df_merged = self.merge_recipes_and_products(df_recipes, df_products) if self.min_merge_weight > 0: df_merged = df_merged.loc[(df_merged['ingredient_key_weight'] > self.min_merge_weight) & (df_merged['name_key_weight'] > self.min_merge_weight)] return df_merged def show(self): '''return a dictionary showing the recipe and product queries that would be run and merged''' return {"products": self.products_query.show(), "recipes": self.recipes_query.show()} default_columns = ['recipe_title', 'recipe_ingredient', 'recipe_rating', 'recipe_calories', 'product_name', 'product_brands', 'ingredient_key_weight', 'name_key_weight'] def filter_by_recipe_title(df, recipe, columns=default_columns): '''helper function to easily filter query results by a recipe name''' return df.loc[df['recipe_title'].str.contains(recipe)][columns] def simplify(df, columns=default_columns): '''helper function to reduce data frame to a subset of columns''' return df[columns] %time df = DistributedQuery(products, recipes)\ .recipe_category("brunch")\ .recipe_ingredients_contains_all_of(["bacon", "egg"])\ .recipe_min_rating(4)\ .recipe_max_calories(500)\ .product_brand("Kroger")\ .run() simplify(df) %time df2 = DistributedQuery(products, recipes)\ .recipe_category("brunch")\ .recipe_ingredients_contains_all_of(["bacon", "egg"])\ .recipe_min_rating(4)\ .recipe_max_calories(500)\ .product_brand("Safeway")\ .run() simplify(df2)
0.526586
0.915205
## SEAI 2021 - Python - Lab 1 # Intro to Python Vincenzo Nardelli - Niccolò Salvini # DBSCAN DBSCAN stands for *Density-based spatial clustering of applications with noise* is a data clustering algorithm developed in 1996 by Martin Ester, Hans-Peter Kriegel, Jörg Sander, and Xiaowei Xu. It is a non-parametric density-based clustering algorithm: given a set of points in some space, it groups together points that are closely packed together (points with many nearby neighbors), identifying outliers as points that lie alone in low-density regions (whose nearest neighbors are too far away). The major benefits with respect to k-mean and clustering and hierarchical clustering: - The user is not required to set an initial number of clusters (elbow method to find the right number of clusters miniming SSD) - It is very robust to outliers, SVMs largerly suffer from those - It can draw also non-spherical clusters. <br> In order to fit a DBCAN the user is required to set up 2 parameters: 1. **Eps**: the maximum distance between two observation belonging to the same cluster 2. **minPts**: the minimum number of observation to be called a cluster. In this notebook we are going to cluster with DBSCAN demonstrating better performances against the other two algorithms. <br><br> At first let's import the required libraries ``` import matplotlib.pyplot as plt import seaborn as sns from sklearn.datasets import make_moons plt.rcParams["figure.figsize"] = (14, 10) sns.set() ``` Then remeber the function <span style="font-family: Monaco">make_blobs</span>? <span style="font-family: Monaco">make_moons</span> randomly generates moons od data instead of blobs ``` X, y = make_moons(n_samples=200, noise=0.05, random_state=0) plt.scatter(X[:,0],X[:,1]) plt.show() ``` With this kind of data it is impossibile to reach a decent classification with already presented two algorithms. Let's see this behaviour. ## K-means ``` from sklearn.cluster import KMeans km = KMeans(n_clusters=2) km.fit(X) y_km = km.predict(X) plt.scatter(X[:, 0], X[:, 1], c=y_km,cmap='viridis') plt.show() ``` ## Hierarchical Clustering (aggolomerative) ``` from sklearn.cluster import AgglomerativeClustering ac = AgglomerativeClustering(n_clusters=2, linkage="complete") y_ac = ac.fit_predict(X) plt.scatter(X[:, 0], X[:, 1], c=y_ac,cmap='viridis') plt.show() ``` ## DBSCAN Now let's take a DBSCAN as it is already been pointed out **we are not required to set the number of clusters**, instead **EPS** and **minPts** are the two parameters of interest. ``` from sklearn.cluster import DBSCAN dbscan = DBSCAN(eps=0.25, min_samples=5) y_dbscan = dbscan.fit_predict(X) plt.scatter(X[:, 0], X[:, 1], c=y_dbscan,cmap='viridis') #core_pts = dbscan.components_ #plt.scatter(x=core_pts[:, 0], y=core_pts[:, 1], c='red', s=200, alpha=0.5); plt.show() ``` DBSCAN did its best and perferctly classifies datta into 2 "moon" clusters. ## DBSCAN limits The main problem of DBSCAN regards the optimization of the two parameters and this might be difficult under the wildly different clusters desities circumstances. That means when the data is umbalaced i.e. the numerosity of one cluster is way greater than the other (in a binary classification scenario). ## References - Wikipedia Contributors (2021). DBSCAN. [online] Wikipedia. Available at: https://en.wikipedia.org/wiki/DBSCAN [Accessed 23 Apr. 2021]. ‌
github_jupyter
import matplotlib.pyplot as plt import seaborn as sns from sklearn.datasets import make_moons plt.rcParams["figure.figsize"] = (14, 10) sns.set() X, y = make_moons(n_samples=200, noise=0.05, random_state=0) plt.scatter(X[:,0],X[:,1]) plt.show() from sklearn.cluster import KMeans km = KMeans(n_clusters=2) km.fit(X) y_km = km.predict(X) plt.scatter(X[:, 0], X[:, 1], c=y_km,cmap='viridis') plt.show() from sklearn.cluster import AgglomerativeClustering ac = AgglomerativeClustering(n_clusters=2, linkage="complete") y_ac = ac.fit_predict(X) plt.scatter(X[:, 0], X[:, 1], c=y_ac,cmap='viridis') plt.show() from sklearn.cluster import DBSCAN dbscan = DBSCAN(eps=0.25, min_samples=5) y_dbscan = dbscan.fit_predict(X) plt.scatter(X[:, 0], X[:, 1], c=y_dbscan,cmap='viridis') #core_pts = dbscan.components_ #plt.scatter(x=core_pts[:, 0], y=core_pts[:, 1], c='red', s=200, alpha=0.5); plt.show()
0.595257
0.96793
# Numpy " NumPy is the fundamental package for scientific computing with Python. It contains among other things: * a powerful N-dimensional array object * sophisticated (broadcasting) functions * useful linear algebra, Fourier transform, and random number capabilities " -- From the [NumPy](http://www.numpy.org/) landing page. Before learning about numpy, we introduce.. ### The NXOR Function Many of the exercises involve working with the $\mathrm{NXOR} \colon \; [-1, 1]^2 \rightarrow \{-1, +1\}$ function defined as $$ (x_1, x_2) \longmapsto \mathrm{sgn}(x_1 \cdot x_2) .$$ where for $x_1 \cdot x_2 = 0$ we let $\mathrm{NXOR}(x_1, x_2) = -1$. We can visualize this function as ![A set of points in \[-1, +1\]^2 with green and red markers denoting the value assigned to them by the NXOR function](https://github.com/tmlss2018/PracticalSessions/blob/master/assets/nxor_labels.png?raw=true) where each point in $ [-1, 1]^2$ is marked by green (+1) or red (-1) according to the value assigned to it by the NXOR function. Over the course of the intro lab exercises we will 1. Generate such data with numpy. 2. Create the plot above with matplotlib. 3. Train a model to learn this function. ### Setup and imports. Run the following cell. ``` from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np ``` ### Random numbers in numpy ``` np.random.random((3, 2)) # Array of shape (3, 2), entries uniform in [0, 1). ``` Note that (as usual in computing) numpy produces pseudo-random numbers based on a seed, or more precisely a random state. In order to make random sequences and calculations based on reproducible, use * the [`np.random.seed()`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.seed.html) function to set the default global seed, or * the [`np.random.RandomState`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.random.RandomState.html) class which is a container for a pseudo-random number generator and exposes methods for generating random numbers. ``` np.random.seed(0) print(np.random.random(2)) # Reset the global random state to the same state. np.random.seed(0) print(np.random.random(2)) ``` ### Numpy Array Operations 1 There are a large number of operations you can run on any numpy array. Here we showcase some common ones. ``` # Create one from hard-coded data: ar = np.array([ [0.0, 0.2], [0.9, 0.5], [0.3, 0.7], ], dtype=np.float64) # float64 is the default. print('The array:\n', ar) print() print('data type', ar.dtype) print('transpose\n', ar.T) print('shape', ar.shape) print('reshaping an array', ar.reshape((6))) ``` Many numpy operations are available both as np module functions as well as array methods. For example, we can also reshape as ``` print('reshape v2', np.reshape(ar, (6, 1))) ``` ### Numpy Indexing and selectors Here are some basic indexing examples from numpy. ``` ar ar[0, 1] # row, column ar[:, 1] # slices: select all elements across the first (0th) axis. ar[1:2, 1] # slices with syntax from:to, selecting [from, to). ar[1:, 1] # Omit `to` to go all the way to the end ar[:2, 1] # Omit `from` to start from the beginning ar[0:-1, 1] # Use negative indexing to count elements from the back. ``` We can also pass boolean arrays as indices. These will exactly define which elements to select. ``` ar[np.array([ [True, False], [False, True], [True, False], ])] ``` Boolean arrays can be created with logical operations, then used as selectors. Logical operators apply elementwise. ``` ar_2 = np.array([ # Nearly the same as ar [0.0, 0.1], [0.9, 0.5], [0.0, 0.7], ]) # Where ar_2 is smaller than ar, let ar_2 be -inf. ar_2[ar_2 < ar] = -np.inf ar_2 ``` ### Numpy Operations 2 ``` print('array:\n', ar) print() print('sum across axis 0 (rows):', ar.sum(axis=0)) print('mean', ar.mean()) print('min', ar.min()) print('row-wise min', ar.min(axis=1)) ``` We can also take element-wise minimums between two arrays. We may want to do this when "clipping" values in a matrix, that is, setting any values larger than, say, 0.6, to 0.6. We would do this in numpy with.. ### Broadcasting (and selectors) ``` np.minimum(ar, 0.6) ``` Numpy automatically turns the scalar 0.6 into an array the same size as `ar` in order to take element-wise minimum. Broadcasting can save us a lot of typing, but in complicated cases it may require a good understanding of the exact rules followed. Some references: * [Numpy page that explains broadcasting](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html) * [Similar content with some visualizations](http://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc) Here we follow with a selection of other useful broadcasting examples. ``` # Centering our array. print('centered array:\n', ar - np.mean(ar)) ``` Note that `np.mean()` was a scalar, but it is automatically subtracted from every element. We can write the minimum function ourselves, as well. ``` clipped_ar = ar.copy() # So that ar is not modified. clipped_ar[clipped_ar > 0.6] = 0.6 clipped_ar ``` A few things happened here: 1. 0.6 was broadcast in for the greater than (>) operation 2. The greater than operation defined a selector, selecting a subset of the elements of the array 3. 0.6 was broadcast to the right number of elements for assignment. Vectors may also be broadcast into matrices. ``` vec = np.array([1, 2]) ar + vec ``` Here the shapes of the involved arrays are: ``` ar (2d array): 2 x 2 vec (1d array): 2 Result (2d array): 2 x 2 ``` When either of the dimensions compared is one (even implicitly, like in the case of `vec`), the other is used. In other words, dimensions with size 1 are stretched or “copied” to match the other. Here, this meant that the `[1, 2]` row was repeated to match the number of rows in `ar`, then added together. If there is a shape mismatch, you will be informed. To try, uncomment the line below and run it. ``` #ar + np.array([[1, 2, 3]]) ``` #### Exercise Broadcast and add the vector `[10, 20, 30]` across the columns of `ar`. You should get ``` array([[10. , 10.2], [20.9, 20.5], [30.3, 30.7]]) ``` ``` #@title Code # Recall that you can use vec.shape to verify that your array has the # shape you expect. ### Your code here ### #@title Solution vec = np.array([[10], [20], [30]]) ar + vec ``` ### `np.newaxis` We can use another numpy feature, `np.newaxis` to simply form the column vector that was required for the example above. It adds a singleton dimension to arrays at the desired location: ``` vec = np.array([1, 2]) vec.shape vec[np.newaxis, :].shape vec[:, np.newaxis].shape ``` Now you know more than enough to generate some example data for our `NXOR` function. ### Exercise: Generate Data for NXOR Write a function `get_data(num_examples)` that returns two numpy arrays * `inputs` of shape `num_examples x 2` with points selected uniformly from the $[-1, 1]^2$ domain. * `labels` of shape `num_examples` with the associated output of `NXOR`. ``` #@title Code def get_data(num_examples): # Replace with your code. return np.zeros((num_examples, 2)), np.zeros((num_examples)) #@title Solution # Solution 1. def get_data(num_examples): inputs = 2*np.random.random((num_examples, 2)) - 1 labels = np.prod(inputs, axis=1) labels[labels <= 0] = -1 labels[labels > 0] = 1 return inputs, labels # Solution 1. # def get_data(num_examples): # inputs = 2*np.random.random((num_examples, 2)) - 1 # labels = np.sign(np.prod(inputs, axis=1)) # labels[labels == 0] = -1 # return inputs, labels get_data(4) ``` ## That's all, folks! For now. ``` ```
github_jupyter
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np np.random.random((3, 2)) # Array of shape (3, 2), entries uniform in [0, 1). np.random.seed(0) print(np.random.random(2)) # Reset the global random state to the same state. np.random.seed(0) print(np.random.random(2)) # Create one from hard-coded data: ar = np.array([ [0.0, 0.2], [0.9, 0.5], [0.3, 0.7], ], dtype=np.float64) # float64 is the default. print('The array:\n', ar) print() print('data type', ar.dtype) print('transpose\n', ar.T) print('shape', ar.shape) print('reshaping an array', ar.reshape((6))) print('reshape v2', np.reshape(ar, (6, 1))) ar ar[0, 1] # row, column ar[:, 1] # slices: select all elements across the first (0th) axis. ar[1:2, 1] # slices with syntax from:to, selecting [from, to). ar[1:, 1] # Omit `to` to go all the way to the end ar[:2, 1] # Omit `from` to start from the beginning ar[0:-1, 1] # Use negative indexing to count elements from the back. ar[np.array([ [True, False], [False, True], [True, False], ])] ar_2 = np.array([ # Nearly the same as ar [0.0, 0.1], [0.9, 0.5], [0.0, 0.7], ]) # Where ar_2 is smaller than ar, let ar_2 be -inf. ar_2[ar_2 < ar] = -np.inf ar_2 print('array:\n', ar) print() print('sum across axis 0 (rows):', ar.sum(axis=0)) print('mean', ar.mean()) print('min', ar.min()) print('row-wise min', ar.min(axis=1)) np.minimum(ar, 0.6) # Centering our array. print('centered array:\n', ar - np.mean(ar)) clipped_ar = ar.copy() # So that ar is not modified. clipped_ar[clipped_ar > 0.6] = 0.6 clipped_ar vec = np.array([1, 2]) ar + vec ar (2d array): 2 x 2 vec (1d array): 2 Result (2d array): 2 x 2 #ar + np.array([[1, 2, 3]]) array([[10. , 10.2], [20.9, 20.5], [30.3, 30.7]]) ``` ### `np.newaxis` We can use another numpy feature, `np.newaxis` to simply form the column vector that was required for the example above. It adds a singleton dimension to arrays at the desired location: Now you know more than enough to generate some example data for our `NXOR` function. ### Exercise: Generate Data for NXOR Write a function `get_data(num_examples)` that returns two numpy arrays * `inputs` of shape `num_examples x 2` with points selected uniformly from the $[-1, 1]^2$ domain. * `labels` of shape `num_examples` with the associated output of `NXOR`. ## That's all, folks! For now.
0.719581
0.988165
<a href="https://colab.research.google.com/github/Bhavani-Rajan/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/module3-make-explanatory-visualizations/LS_DS_123_Make_Explanatory_Visualizations_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # ASSIGNMENT ### 1) Replicate the lesson code. I recommend that you [do not copy-paste](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit). Get caught up to where we got our example in class and then try and take things further. How close to "pixel perfect" can you make the lecture graph? Once you have something that you're proud of, share your graph in the cohort channel and move on to the second exercise. ### 2) Reproduce another example from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/). **WARNING**: There are a lot of very custom graphs and tables at the above link. I **highly** recommend not trying to reproduce any that look like a table of values or something really different from the graph types that we are already familiar with. Search through the posts until you find a graph type that you are more or less familiar with: histogram, bar chart, stacked bar chart, line chart, [seaborn relplot](https://seaborn.pydata.org/generated/seaborn.relplot.html), etc. Recreating some of the graphics that 538 uses would be a lot easier in Adobe photoshop/illustrator than with matplotlib. - If you put in some time to find a graph that looks "easy" to replicate you'll probably find that it's not as easy as you thought. - If you start with a graph that looks hard to replicate you'll probably run up against a brick wall and be disappointed with your afternoon. ``` # Your Work Here ## basics of matplotlib from youtube video. import matplotlib.pyplot as plt x1 = [1,2,3,4,5] y1 = [1,4,9,16,25] x2 = [6,7,8,9,10] y2 = [1,8,27,64,125] #plt.plot(x1,y1,label='square') #plt.plot(x2,y2,label='cube') plt.bar(x1,y1,label='square',color='r') plt.bar(x2,y2,label='cube',color='c') plt.xlabel('Actual') plt.ylabel('y axis') plt.title('squared Vs cubed values') plt.legend() plt.show() #example from fivethirtyeight website from IPython.display import display, Image url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png' example = Image(url=url, width=400) display(example) # code that we did in the morning! import matplotlib.pyplot as plt import numpy as np import pandas as pd plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) #got the color matched using hex value fake.plot.bar(color='#E67141', width=0.9); import matplotlib.pyplot as plt import matplotlib.ticker as mtick # select the style plt.style.use('fivethirtyeight') #generate our figure fig = plt.figure() # generate our axes (center section) for a plot ax = fake.plot.bar(color='#ED713A', width=0.9) # to set the background to white in color ax.set(facecolor='white') # also for the fig fig.patch.set(facecolor='white') fig.patch.set_alpha(0.1) ax.patch.set_alpha(0.1) # multiple way of adding title #fig.suptitle('title added to fig') #ax.text(x=1,y=35,s='title using ax.text') #ax.set_title('title using ax.set_title'); # There is more than one way to set a title on a graph # matplotlib is not "pythonic" # fig.suptitle('test title') # ax.set_title("title") # If we want a really custom title # We can just use a text annotation and make it look like a title ax.text(x=-2.2, y=46, s="'An Inconvenient Sequel: Truth To Power' is divisive", fontweight='bold', fontsize=12); ax.text(x=-2.2, y=43, s="IMDb ratings for the film as of Aug. 29", fontsize=11) # Set our axis labels (These are just text objects that have been rotated!!) ax.set_ylabel("Percent of total votes", fontsize=9, fontweight='bold', labelpad=10) ax.set_xlabel("Rating", fontsize=9, fontweight='bold', labelpad=10) # Fix our tick lables. Yet again, these are text and use the text parameters! ax.set_xticklabels(range(1,11), rotation=0) ax.set_yticks(range(0,50,10)) ax.set_yticklabels(range(0, 50, 10)) # to format the percentage sign on y axis fmt = '%.0f%%' # Format you want the ticks, e.g. '40%' yticks = mtick.FormatStrFormatter(fmt) ax.yaxis.set_major_formatter(yticks) # to set the tick color to grey ax.tick_params(axis='x', colors='gray') ax.tick_params(axis='y', colors='gray') plt.show() ``` # STRETCH OPTIONS ### 1) Reproduce one of the following using the matplotlib or seaborn libraries: - [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) - [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) - or another example of your choice! ### 2) Make more charts! Choose a chart you want to make, from [Visual Vocabulary - Vega Edition](http://ft.com/vocabulary). Find the chart in an example gallery of a Python data visualization library: - [Seaborn](http://seaborn.pydata.org/examples/index.html) - [Altair](https://altair-viz.github.io/gallery/index.html) - [Matplotlib](https://matplotlib.org/gallery.html) - [Pandas](https://pandas.pydata.org/pandas-docs/stable/visualization.html) Reproduce the chart. [Optionally, try the "Ben Franklin Method."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) If you want, experiment and make changes. Take notes. Consider sharing your work with your cohort! ``` # More Work Here ```
github_jupyter
# Your Work Here ## basics of matplotlib from youtube video. import matplotlib.pyplot as plt x1 = [1,2,3,4,5] y1 = [1,4,9,16,25] x2 = [6,7,8,9,10] y2 = [1,8,27,64,125] #plt.plot(x1,y1,label='square') #plt.plot(x2,y2,label='cube') plt.bar(x1,y1,label='square',color='r') plt.bar(x2,y2,label='cube',color='c') plt.xlabel('Actual') plt.ylabel('y axis') plt.title('squared Vs cubed values') plt.legend() plt.show() #example from fivethirtyeight website from IPython.display import display, Image url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png' example = Image(url=url, width=400) display(example) # code that we did in the morning! import matplotlib.pyplot as plt import numpy as np import pandas as pd plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) #got the color matched using hex value fake.plot.bar(color='#E67141', width=0.9); import matplotlib.pyplot as plt import matplotlib.ticker as mtick # select the style plt.style.use('fivethirtyeight') #generate our figure fig = plt.figure() # generate our axes (center section) for a plot ax = fake.plot.bar(color='#ED713A', width=0.9) # to set the background to white in color ax.set(facecolor='white') # also for the fig fig.patch.set(facecolor='white') fig.patch.set_alpha(0.1) ax.patch.set_alpha(0.1) # multiple way of adding title #fig.suptitle('title added to fig') #ax.text(x=1,y=35,s='title using ax.text') #ax.set_title('title using ax.set_title'); # There is more than one way to set a title on a graph # matplotlib is not "pythonic" # fig.suptitle('test title') # ax.set_title("title") # If we want a really custom title # We can just use a text annotation and make it look like a title ax.text(x=-2.2, y=46, s="'An Inconvenient Sequel: Truth To Power' is divisive", fontweight='bold', fontsize=12); ax.text(x=-2.2, y=43, s="IMDb ratings for the film as of Aug. 29", fontsize=11) # Set our axis labels (These are just text objects that have been rotated!!) ax.set_ylabel("Percent of total votes", fontsize=9, fontweight='bold', labelpad=10) ax.set_xlabel("Rating", fontsize=9, fontweight='bold', labelpad=10) # Fix our tick lables. Yet again, these are text and use the text parameters! ax.set_xticklabels(range(1,11), rotation=0) ax.set_yticks(range(0,50,10)) ax.set_yticklabels(range(0, 50, 10)) # to format the percentage sign on y axis fmt = '%.0f%%' # Format you want the ticks, e.g. '40%' yticks = mtick.FormatStrFormatter(fmt) ax.yaxis.set_major_formatter(yticks) # to set the tick color to grey ax.tick_params(axis='x', colors='gray') ax.tick_params(axis='y', colors='gray') plt.show() # More Work Here
0.509764
0.920576
``` import pylab as pl import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm import astropy.constants as const import astropy.units as u from astropy.table import Table from tools.legacy import cutout from IPython.display import Image ``` # Más allá de lo evidente Hay pocas cosas en el Universo más impresionantes que un cúmulo gigante, con hasta 1000 galaxias encerradas en un espacio pequeño. ``` Image(filename='images/legacy_tsz.png') ``` Pero, ¿cómo sabemos que estas galaxias están en el mismo espacio, en lugar de ser proyecciones al azar en el cielo de galaxias que se encuentran a distancias muy diferentes? ¡Con sus espectros por supuesto! Al medir el desplazamiento al rojo de cada galaxia, encontramos que son vecinas cercanas tanto en la dirección radial, como en la proyección en el cielo. Eso los hace increíblemente raros y dignos de una atención dedicada. ¿Quizás hay más de lo que podemos ver a simple vista? ``` Image(filename='/images/optical_retriever.jpg', width=500) Image(filename='../images/ir_retriever.jpg', width=500) ``` ¿Qué diferencias hay entre estas dos imágenes? ¡Tómate un minuto para pensarlo! ``` Image(filename='images/thinking.jpeg', width=500) ``` ¿Ya tienes una respuesta ? No, de acuerdo, toma otro minuto (pista: hay un indicio en el nombre del archivo). ``` Image(filename='images/still_thinking.jpg', width=500) Image(filename='images/happy.jpg', width=500) ``` ¡Ah, ya lo tienes! Así es, cuando usamos gafas especiales, infrarrojas, vemos una medida de la temperatura de nuestro perro favorito, en lugar de que tan reflectante es su pelaje como hacemos con "el visible". Entonces, ¿qué vemos en un cúmulo de galaxias fuera del rango visible? ``` Image(filename='images/ChandraClusters.jpg', width=1000) ``` ¡Wow! Bueno, valió la pena solo por la imagen. ¿Qué hemos aprendido? Al observar una longitud de onda mucho _más corta_ que la visible, vemos que los eventos ocurren a una energía muy alta. Aquí, en la radiografía vemos el 'medio intra cúmulo' muy caliente que aumenta el espacio entre galaxias. Este gas es simplemente hidrógeno y helio, como lo que haría prender mechero de Bunsen, pero es demasiado caliente para colapsar y formar estrellas. Ha sido muy importante hacer una foto como esta. Afortunadamente para nosotros, la atmósfera es opaca a los rayos X, por lo que no fue sino hasta la década de 1950 que pudimos ir al espacio, y ver este mundo completamente nuevo. Recuerda, hasta que apareció SpaceX, los satélites eran un gran problema ya que los lanzamientos de cohetes costaban mucho dinero. Los cohetes también tienen que ser pequeños para minimizar el peso y la resistencia del aire, por lo que los telescopios en el espacio tienen que ser pequeños; Veremos más sobre [SpaceX] (https://www.spacex.com/) en otro cuaderno. ``` Image(filename='images/Launch.jpg', width=1000) ``` Pero los diseñadores del satélite [Chandra](https://www.nasa.gov/mission_pages/chandra/main/index.html) decidieron que valía la pena explorar esta nueva frontera, la astronomía de rayos X. ``` Image(filename='images/Chandra.png', width=1000) ``` Entonces, ¿hay una imagen infrarroja de un cúmulo de galaxias? Por supuesto, nos daría información sobre el gas que está un poco menos caliente. Pero, ¿podemos ser más inteligentes y encontrar otras formas nuevas que no dependan de que el gas esté lo suficientemente caliente como para emitir radiación? Claro, somos astrónomos, ¡ese es nuestro trabajo! ¿Qué pasa si buscamos el efecto que la presencia del cúmulo tendría en la luz emitida por otras fuentes, eso haría el trabajo, verdad? Como de costumbre, Einstein nos dio el primer indicio al predecir que la luz es desviada por la materia presente a medida que ésta viaja por el Universo. ¡Hasta aquí llegó Newton, y sus líneas rectas! Para un fondo muy distante, veríamos varias imágenes a medida que la luz se dobla alrededor de la lente: ``` Image(filename='images/StrongLensDiagram.jpg', width=1000) ``` Para una alineación perfecta de la fuente y la lente, incluso podemos ver un anillo de Einstein completo, que está compuesto por imágenes superpuestas: ``` Image(filename='images/StrongLens.jpeg', width=1000) ``` Esto es increíblemente útil, ya que, según Einstein, el radio del anillo nos dice la masa exacta encerrada del mismo. ``` Image(filename='images/ClusterStrongLensing.jpg', width=1000) ``` Para un cúmulo, en lugar de una sola fuente, las cosas se ponen complicadas, ya que en realidad solo vemos anillos parciales, producidos por el efecto de la presencia de toda la materia. # El teatro de las sombras cósmicas Sigamos pensando aún un poco más, fuera de la caja. En lugar de la luz de las galaxias distantes, ¿qué pasa con la deflección de luz proveniente del 'Big Bang'? Nuestro primer cuaderno introdujo la expansión del Universo y la reciente detección de una aceleración cósmica. ¿Qué pasa cuando retrocedemos en el tiempo? ``` Image(filename='images/CMB_Cone.jpg', width=1000) ``` Si las galaxias se están separando entre sí hoy en día, entonces su distribución debe haber sido aún más densa en el pasado. El único error que hay que evitar aquí es pensar que hay un punto en el que todo colapsa. Más bien, todo el espacio se vuelve más denso al mismo ritmo, a medida que la materia del exterior se acerca más y más. Las cosas se pusieron tan calientes y tan densas en el pasado que la luz se emitía por todas partes, pero quedó atrapada cuando el hidrógeno absorbió y remitió continuamente cualquier fotón producido. Una vez que el Universo se enfrió lo suficiente, ¡los fotones quedaron libres! Desde entonces, han estado viajando 13.8 mil millones de años hasta llegar a nosotros, y esta es la imagen: ``` Image(filename='images/PlanckCMB.jpg', width=1000) ``` Aquí, las fluctuaciones de color representan diferencias de temperatura, que son causadas por diferencias de densidad en el volumen, desde el que el fotón comenzó su viaje. Los más entusiastas de ustedes verán que se necesitó otro telescopio espacial para hacer estas mediciones, el [Planck](https://es.wikipedia.org/wiki/Planck_(satélite)). A medida que el Universo se expande, el color de la luz se vuelve más rojo al mismo ritmo. El Universo es muy antiguo, por lo que el CMB tiene una longitud de onda muy larga. ¿Esta luz del CMB, se ve deflectada por la materia, mientras viaja hacia nosotros? ¡Por supuesto, ese podría ser el tema de otro cuaderno! Pero lo más interesante aquí, es el nuevo efecto de que los electrones en el cúmulo también dispersan la luz del CMB de un color (longitud de onda) a otro. # La regla de oro de los astrónomos $\Large \theta = 1.22 \left ( \frac{\lambda}{D} \right ) $ Si tienes un telescopio con un espejo de diámetro $D$ [cm], observando la luz de longitud de onda $\lambda$ [cm], el tamaño angular de las fuentes que se pueden identificar claramente (resolver) es $\theta$ [radianes] _en el mejor de los casos_. Veamos esto en acción. ``` space = {'CHANDRA': {'BAND': 'XRAY', 'WAVELENGTH': 1.2, 'DIAMETER': 1.2}} # [Angstroms, Meters]. space['PLANCK'] = {'BAND': 'MICROWAVE', 'FREQUENCY': 44, 'DIAMETER': 1.6} # [GigaHertz, Meters]. space['PLANCK']['WAVELENGTH'] = 2.9979e8 / space['PLANCK']['FREQUENCY'] / 1.e9 # [Meters]. space['PLANCK']['WAVELENGTH'] *= 1.e10 # [Angstroms]. ground = {'DESI': {'BAND': 'GRZ', 'WAVELENGTH': 4000., 'DIAMETER': 3.8}} # Revisit. for altitude in [space, ground]: for key in altitude.keys(): diam = 100. * altitude[key]['DIAMETER'] # [cm] wave = 1.e-8 * altitude[key]['WAVELENGTH'] # [cm] theta = 1.22 * wave / diam # [radians] theta *= 180. / np.pi # [degrees] theta *= 60. # [arcmin] altitude[key]['BEAMSIZE'] = theta print('{: <16}\t{:.1f}\t{:.1e}'.format(key, diam, theta)) ``` Ahora, DESI nunca alcanza realmente esta resolución dada por el 'límite de difracción', ya que domina el desenfoque producido por la atmósfera. Si bien, Planck tiene una resolución relativamente pobre, incluso para la fuente más pequeña del Universo, Planck lo vería como $\simeq(1/6)$ del tamaño de la luna llena. ``` Image(filename='images/MoonPlanck.jpg', width=1000) ``` ¡Eso apesta cuando buscas cúmulos de galaxias en el Universo!. Así que el equipo del [Telescopio Cosmológico de Atacama](https://en.wikipedia.org/wiki/Atacama_Cosmology_Telescope) (ACT), mira también su página en [inglés](https://act.princeton.edu/), ató sus cosas a la Llama más cercana (¡no literalmente!), y se dispuso a construir un telescopio _terrestre_. ¿Por qué? ¡Porque no se puede colocar un telescopio de 6 m en un cohete! ``` Image(filename='images/ACT.jpg', width=1000) ``` ¡Es justo decir que ACT se parece más a un cubo que a un telescopio convencional! Pero hace un gran trabajo en la búsqueda de cúmulos. ``` Image(filename='images/act_louis+16.jpg', width=600) ``` Bastante psicodélico ¿no?. Los patrones similares a bacterias, en rojo y azul, son el CMB principal, como en el caso de Planck. Los puntos marcados en un círculo son grupos resueltos identificados por ACT. ¿Cómo? Porque la luz del CMB que pasó a través del cúmulo cambió su frecuencia al interactuar con los electrones allí. ¡El resultado es una sombra del gas más frío alrededor de las galaxias del cúmulo! Solo hemos sabido de este [efecto térmico Sunyaev-Zel'dovich](https://es.wikipedia.org/wiki/Efecto_Siunyáiev-Zeldóvich) durante_ 50_ años más o menos. Esta dispersión es similar a por qué el cielo a veces es más azul, cuanto más lejos del Sol y más rosado cerca de él, particularmente durante las puestas de Sol. Esto se debe a que el aire desvía o "dispersa" la luz azul del Sol. Como resultado, el cielo se ve azul lejos del Sol. Cerca de éste, debido a que el aire desvía la luz azul solo la luz rosada llega a tus ojos. Usemos la alta resolución de ACT para hacer un acercamiento al clúster: ``` decs = np.loadtxt('dat/comptony/decmap_ra0.96_dec2.08.txt') ras = np.loadtxt('dat/comptony/ramap_ra0.96_dec2.08.txt') ys = np.loadtxt('dat/comptony/stampmap_ra0.96_dec2.08.txt') plt.imshow(ys, extent=(ras.min(), ras.max(), decs.min(), decs.max()), interpolation='nearest', cmap=cm.gist_rainbow) ``` La verdadera belleza es que Planck y ACT miden el cúmulo en _diferentes_ longitudes de onda, pero no tan diferentes como lo harían Chandra o DESI. Dada nuestra comprensión de la dispersión entre electrones y grupos, podemos predecir cómo se vería esto en los diferentes 'canales' de longitud de onda. Necesitaremos algunas unidades comunes en Astronomía: ``` # Constante de Boltzmann,https://es.wikipedia.org/wiki/Constante_de_Boltzmann . kB = const.k_B.value # Constante de Planck, https://es.wikipedia.org/wiki/Constante_de_Planck hh = const.h.value ``` ¡Esta pequeña celda nos dio algunas de las cantidades más importantes de la física! También necesitamos la temperatura media observada del CMB. ``` TT = 2.755 def theta(y, nu): # Eqn. 1 of https://arxiv.org/pdf/astro-ph/0402115.pdf. x = hh * nu / kB / TT return - 2 * y * (2. - 0.5 * x / np.tanh(x / 2.)) fig, axes = plt.subplots(1, 3, figsize=(15, 5)) for i, nu in enumerate([98., 148., 220.]): cluster_map = theta(ys, 1.e9 * nu) axes[i].imshow(cluster_map, extent=(ras.min(), ras.max(), decs.min(), decs.max()), interpolation='nearest', cmap=cm.gist_rainbow) axes[i].set_title('{:.1f} GHz'.format(nu)) ``` El hecho de que la temperatura observada de los fotones en una dirección dada pase de negativa a positiva nos dice que se trata de un cúmulo auténtico. Es bueno que pudimos encontrar estos bariones 'cálidos' encerrados en los cúmulos, con el CMB y ACT, hasta ese momento solo sabíamos dónde estaban [¡el 50% de los átomos!](https://en.wikipedia.org/wiki/Missing_baryon_problem). # DESCANSO ¡Eso fue mucho! Tómate un descanso y un café, recompensa por tu arduo trabajo hasta ahora. Cuando regreses, resumiremos por qué se necesitan dos telescopios terrestres y dos satélites para comprender mejor la energía oscura con cúmulos de galaxias. ``` Image(filename='images/Fireworks.jpg', width=1000) ``` ¿Suficiente descanso? Bien, ¡volvamos al trabajo! Si no has revisado nbody.ipynb, házlo ahora y regresa aquí después. Elegiremos un cúmulo que DESI observará pronto. DESI tenía un hermano pequeño, el sondedo [BOSS](http://www.sdss3.org/surveys/boss.php)([mira también esta nota reciente](https://www.fisica.unam.mx/es/noticias.php?id=1301)), que ya observo dicho cúmulo. Una vez que DESI realmente se ponga en marcha, observará el mismo número de galaxias que el estudio BOSS _en un mes_. ``` fig, ax = plt.subplots(1,1, figsize=(10,5)) cutout(ax, 0.96, 2.08) ``` Al obtener corrimientos al rojo para las galaxias en el cúmulo, podemos determinar qué galaxias son solo proyecciones al azar en el cielo. ``` clusters = Table.read('dat/tsz_boss_cluster_zs.fits') clusters.sort('RA') clusters ``` Si conocemos el tamaño de un cúmulo típico, entonces las galaxias que no estén lo suficientemente cerca del desplazamiento al rojo medio no deberían contarse como miembros del cúmulo. ``` # desplazamiento al rojo del cúmulo. meanz = np.mean(clusters['Z']) # Diferencia típica respecto a meanz, para una galaxia en el cúmulo. stdz = np.std(clusters['Z']) in_cluster = ((clusters['Z'] - meanz) / stdz) < 1.0 pl.plot(clusters['RA'][in_cluster], clusters['Dec'][in_cluster], marker='.', lw=0.0, c='b', alpha=0.6, label='In Cluster') pl.plot(clusters['RA'][~in_cluster], clusters['Dec'][~in_cluster], marker='.', lw=0.0, c='r', alpha=0.6, label='Spy!') pl.xlabel('RA') pl.ylabel('DEC') pl.legend(loc=2) ``` Si sabemos cuántas galaxias hay en el cúmulo, podemos hacer una estimación burda de cuánta materia oscura hay, asociada a dicho cúmulo. Es un método complicado, ¡y no para cualquiera! Un método mucho mejor, más noble, es usar mediciones de los anillos de Einstein para determinar la masa, vía el efecto de lente gravitatoria. ¿Por qué nos interesa? Porque las simulaciones de n-cuerpos, nos dicen indican que el número de cúmulos en un cierto rango de masas, realmente es sensible al tipo de Energía Oscura que hay en el Universo. Esto es genial, ya que si hacemos bien las cuentas estaremos aprendiendo mucho sobre la Energía Oscura. Pero si cometemos pequeños errores, entonces las implicaciones de nuestras conclusiones sobre la energía oscura estarán realmente mal... no solo un poco mal. ``` Image(filename='images/Mistake.jpg', width=600) ``` ¿Y qué pasa con los datos del CMB? Bueno, si tenemos muestras realmente buenas de masas derivadas del efecto de lente gravitatoria, entonces podemos calibrar la relación entre la masa y la diferencia de frecuencia observada en el CMB. Esto nos dará un estimado de las masas para una muestra mucho más grande en el Universo, bajo unas cuantas condiciones.
github_jupyter
import pylab as pl import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm import astropy.constants as const import astropy.units as u from astropy.table import Table from tools.legacy import cutout from IPython.display import Image Image(filename='images/legacy_tsz.png') Image(filename='/images/optical_retriever.jpg', width=500) Image(filename='../images/ir_retriever.jpg', width=500) Image(filename='images/thinking.jpeg', width=500) Image(filename='images/still_thinking.jpg', width=500) Image(filename='images/happy.jpg', width=500) Image(filename='images/ChandraClusters.jpg', width=1000) Image(filename='images/Launch.jpg', width=1000) Image(filename='images/Chandra.png', width=1000) Image(filename='images/StrongLensDiagram.jpg', width=1000) Image(filename='images/StrongLens.jpeg', width=1000) Image(filename='images/ClusterStrongLensing.jpg', width=1000) Image(filename='images/CMB_Cone.jpg', width=1000) Image(filename='images/PlanckCMB.jpg', width=1000) space = {'CHANDRA': {'BAND': 'XRAY', 'WAVELENGTH': 1.2, 'DIAMETER': 1.2}} # [Angstroms, Meters]. space['PLANCK'] = {'BAND': 'MICROWAVE', 'FREQUENCY': 44, 'DIAMETER': 1.6} # [GigaHertz, Meters]. space['PLANCK']['WAVELENGTH'] = 2.9979e8 / space['PLANCK']['FREQUENCY'] / 1.e9 # [Meters]. space['PLANCK']['WAVELENGTH'] *= 1.e10 # [Angstroms]. ground = {'DESI': {'BAND': 'GRZ', 'WAVELENGTH': 4000., 'DIAMETER': 3.8}} # Revisit. for altitude in [space, ground]: for key in altitude.keys(): diam = 100. * altitude[key]['DIAMETER'] # [cm] wave = 1.e-8 * altitude[key]['WAVELENGTH'] # [cm] theta = 1.22 * wave / diam # [radians] theta *= 180. / np.pi # [degrees] theta *= 60. # [arcmin] altitude[key]['BEAMSIZE'] = theta print('{: <16}\t{:.1f}\t{:.1e}'.format(key, diam, theta)) Image(filename='images/MoonPlanck.jpg', width=1000) Image(filename='images/ACT.jpg', width=1000) Image(filename='images/act_louis+16.jpg', width=600) decs = np.loadtxt('dat/comptony/decmap_ra0.96_dec2.08.txt') ras = np.loadtxt('dat/comptony/ramap_ra0.96_dec2.08.txt') ys = np.loadtxt('dat/comptony/stampmap_ra0.96_dec2.08.txt') plt.imshow(ys, extent=(ras.min(), ras.max(), decs.min(), decs.max()), interpolation='nearest', cmap=cm.gist_rainbow) # Constante de Boltzmann,https://es.wikipedia.org/wiki/Constante_de_Boltzmann . kB = const.k_B.value # Constante de Planck, https://es.wikipedia.org/wiki/Constante_de_Planck hh = const.h.value TT = 2.755 def theta(y, nu): # Eqn. 1 of https://arxiv.org/pdf/astro-ph/0402115.pdf. x = hh * nu / kB / TT return - 2 * y * (2. - 0.5 * x / np.tanh(x / 2.)) fig, axes = plt.subplots(1, 3, figsize=(15, 5)) for i, nu in enumerate([98., 148., 220.]): cluster_map = theta(ys, 1.e9 * nu) axes[i].imshow(cluster_map, extent=(ras.min(), ras.max(), decs.min(), decs.max()), interpolation='nearest', cmap=cm.gist_rainbow) axes[i].set_title('{:.1f} GHz'.format(nu)) Image(filename='images/Fireworks.jpg', width=1000) fig, ax = plt.subplots(1,1, figsize=(10,5)) cutout(ax, 0.96, 2.08) clusters = Table.read('dat/tsz_boss_cluster_zs.fits') clusters.sort('RA') clusters # desplazamiento al rojo del cúmulo. meanz = np.mean(clusters['Z']) # Diferencia típica respecto a meanz, para una galaxia en el cúmulo. stdz = np.std(clusters['Z']) in_cluster = ((clusters['Z'] - meanz) / stdz) < 1.0 pl.plot(clusters['RA'][in_cluster], clusters['Dec'][in_cluster], marker='.', lw=0.0, c='b', alpha=0.6, label='In Cluster') pl.plot(clusters['RA'][~in_cluster], clusters['Dec'][~in_cluster], marker='.', lw=0.0, c='r', alpha=0.6, label='Spy!') pl.xlabel('RA') pl.ylabel('DEC') pl.legend(loc=2) Image(filename='images/Mistake.jpg', width=600)
0.465873
0.883538
# Load X values ``` import os dir = [x[0] for x in os.walk('/Users/apple/Desktop/eeglab14_1_2b/participant')] dir.sort() dir.reverse() dir.pop() dir.reverse() print(len(dir)) import glob filenames = [] for x in dir: temp = glob.glob(x + "/*.png") temp.sort() filenames.append(temp) print(len(filenames)) from matplotlib import image from matplotlib import pyplot import numpy as np from PIL import Image X = [] for i in range(len(filenames)): for j in range(int(len(filenames[i]) / 2)): #print(len(filenames[i])) #print(j) data = Image.open(filenames[i].pop()) arr = np.array(data) result = np.zeros((32,32)) toadd = np.zeros((32,32,4)) for k in range(arr.shape[2]): result[:arr[:,:,k].shape[0],:arr[:,:,k].shape[1]] = arr[:,:,k] toadd[:,:,k] = result #print(data.shape) X.append(toadd) print(len(X)) print(X[1].shape) ``` # Load Y ``` import pandas as pd df = pd.read_csv('/Users/apple/Desktop/eeglab14_1_2b/participant_ratings.csv', sep=r'\s*,\s*',engine = 'python', na_values = '?') df.dropna() Y_chart = pd.get_dummies(df, drop_first=True) Y = Y_chart['Valence'].tolist() print(len(Y)) #print(Y) for i in range(len(Y)): if Y[i] < 5: Y[i] = 0 else: Y[i] = 1 print(len(Y)) ``` # Training and Testing ``` from random import shuffle XY = [] for i in range(len(X)): XY.append((X[i], Y[i])) shuffle(XY) print(len(XY)) X = [x[0] for x in XY] Y = [x[1] for x in XY] import numpy as np Xarray = np.asarray(X) Yarray = np.asarray(Y) print(Xarray.shape) print(Yarray.shape) X_train = Xarray[:1240,:,:,:] Y_train = Yarray[:1240] print(X_train.shape) print(Y_train.shape) X_test = Xarray[1240:,:,:] Y_test = Yarray[1240:] print(X_test.shape) print(Y_test.shape) print('Image dataset have shape =', X_train.shape) print('Image dataset has min/mean/std/max = %.2f/%.2f/%.2f/%.2f'%(X_train.min(), X_train.mean(), X_train.std(), X_train.max())) print('') print('Train label has shape =', Y_train.shape) print('Training label has min/mean/std/max = %.2f/%.2f/%.2f/%.2f'%(Y_train.min(), Y_train.mean(), Y_train.std(), Y_train.max())) def normalize_data(data): data = data / data.max() return data X_train = normalize_data(X_train) X_test = normalize_data(X_test) print('Image dataset have shape =', X_train.shape) print('Image dataset has min/mean/std/max = %.2f/%.2f/%.2f/%.2f'%(X_train.min(), X_train.mean(), X_train.std(), X_train.max())) print('') print('Train label has shape =', Y_train.shape) print('Training label has min/mean/std/max = %.2f/%.2f/%.2f/%.2f'%(Y_train.min(), Y_train.mean(), Y_train.std(), Y_train.max())) ``` # Visualization ``` print(len(Y_train)) X_train_0 = X_train[:,:,:,0] X_train_1 = X_train[:,:,:,1] X_train_2 = X_train[:,:,:,2] X_train_3 = X_train[:,:,:,3] X_train = X_train[:,:,:,0:3] X_test = X_test[:,:,:,0:3] print(X_train_0.shape) print(len(Y_test)) print(X_test.shape) print(X_train.shape) %matplotlib inline import matplotlib.pyplot as plt import numpy as np class_names = ['Positive', 'Negative'] plt.figure(figsize=(7,7)) for i in range(10): plt.subplot(4,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X_train_0[i], cmap=plt.cm.binary) plt.xlabel(class_names[Y_train[i]]) plt.show() plt.figure(figsize=(7,7)) for i in range(10): plt.subplot(4,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X_train_1[i], cmap=plt.cm.binary) plt.xlabel(class_names[Y_train[i]]) plt.show() plt.figure(figsize=(7,7)) for i in range(10): plt.subplot(4,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X_train_2[i], cmap=plt.cm.binary) plt.xlabel(class_names[Y_train[i]]) plt.show() import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data as utils transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') def imshow(img): img = img / 2 + 0.5 # Unnormalize. npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() dataiter = iter(trainloader) images, labels = dataiter.next() print(images.size()) images = images.permute(0, 3, 1, 2) print(images.size()) temp = images.numpy() print(len(temp[0])) print(len(temp[0][0])) print(len(temp[0][0][0])) # Show images. rows = 1 columns = 1 fig=plt.figure() for i in range(1): fig.add_subplot(rows, columns, i+1) plt.title(classes[labels[i]]) img = images[i] / 2 + 0.5 # this is for unnormalize the image img = torchvision.transforms.ToPILImage()(img) plt.imshow(img) plt.show() ``` # My Attempt ``` num_classes = 2 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 10, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(10, 20, 3, padding=1) self.fc1 = nn.Linear(20 * 8 * 8, 100) self.fc2 = nn.Linear(100, num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 20 * 8 * 8) x = F.relu(self.fc1(x)) x = self.fc2(x) return x net = Net() net.to(device) loss_func = nn.CrossEntropyLoss() opt = torch.optim.Adam(net.parameters(), lr=0.0005) #opt = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) avg_losses = [] epochs = 1000 print_freq = 15 for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) inputs, labels = inputs.to(device), labels.to(device) opt.zero_grad() outputs = net(inputs) loss = loss_func(outputs, labels) loss.backward() opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') num_classes = 2 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 10, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(10, 20, 3, padding=1) self.fc1 = nn.Linear(20 * 8 * 8, 100) self.fc2 = nn.Linear(100, num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 20 * 8 * 8) x = F.relu(self.fc1(x)) x = self.fc2(x) return x net = Net() net.to(device) loss_func = nn.CrossEntropyLoss() opt = optim.Adadelta(net.parameters(), lr=0.5, rho=0.9, eps=1e-06, weight_decay=0) avg_losses = [] epochs = 500 print_freq = 15 for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) inputs, labels = inputs.to(device), labels.to(device) opt.zero_grad() # Forward step. outputs = net(inputs) loss = loss_func(outputs, labels) loss.backward() opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) ``` # LeNET ``` transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') num_classes = 2 class Net(nn.Module): def __init__(self): super(Net, self).__init__() # input channel = 3, output channel = 6, kernel_size = 5 # input size = (32, 32), output size = (28, 28) self.conv1 = nn.Conv2d(3, 6, 5) # input channel = 6, output channel = 16, kernel_size = 5 # input size = (14, 14), output size = (10, 10) self.conv2 = nn.Conv2d(6, 16, 5) # input dim = 16*5*5, output dim = 120 self.fc1 = nn.Linear(16 * 5 * 5, 120) # input dim = 120, output dim = 40 self.fc2 = nn.Linear(120, 40) # input dim = 40, output dim = 2 self.fc3 = nn.Linear(40, num_classes) def forward(self, x): # pool size = 2 # input size = (28, 28), output size = (14, 14), output channel = 6 x = F.max_pool2d(F.relu(self.conv1(x)), 2) # pool size = 2 # input size = (10, 10), output size = (5, 5), output channel = 16 x = F.max_pool2d(F.relu(self.conv2(x)), 2) # flatten as one dimension x = x.view(x.shape[0], -1) # input dim = 16*5*5, output dim = 120 x = F.relu(self.fc1(x)) # input dim = 120, output dim = 40 x = F.relu(self.fc2(x)) # input dim = 40, output dim = 2 x = self.fc3(x) return x net = Net().to(device) loss_func = nn.CrossEntropyLoss() # We use stochastic gradient descent (SGD) as optimizer. opt = torch.optim.Adam(net.parameters(), lr=0.0005) avg_losses = [] # Avg. losses. epochs = 1000 # Total epochs. print_freq = 15 # Print frequency. for epoch in range(epochs): # Loop over the dataset multiple times. running_loss = 0.0 # Initialize running loss. for i, data in enumerate(trainloader, 0): # Get the inputs. inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) # Move the inputs to the specified device. inputs, labels = inputs.to(device), labels.to(device) # Zero the parameter gradients. opt.zero_grad() # Forward step. outputs = net(inputs) #print(outputs) #print(labels) loss = loss_func(outputs, labels) # Backward step. loss.backward() # Optimization step (update the parameters). opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') num_classes = 2 class Net(nn.Module): def __init__(self): super(Net, self).__init__() # input channel = 3, output channel = 6, kernel_size = 5 # input size = (32, 32), output size = (28, 28) self.conv1 = nn.Conv2d(3, 6, 5) # input channel = 6, output channel = 16, kernel_size = 5 # input size = (14, 14), output size = (10, 10) self.conv2 = nn.Conv2d(6, 16, 5) # input dim = 16*5*5, output dim = 120 self.fc1 = nn.Linear(16 * 5 * 5, 120) # input dim = 120, output dim = 40 self.fc2 = nn.Linear(120, 40) # input dim = 40, output dim = 2 self.fc3 = nn.Linear(40, num_classes) def forward(self, x): # pool size = 2 # input size = (28, 28), output size = (14, 14), output channel = 6 x = F.max_pool2d(F.relu(self.conv1(x)), 2) # pool size = 2 # input size = (10, 10), output size = (5, 5), output channel = 16 x = F.max_pool2d(F.relu(self.conv2(x)), 2) # flatten as one dimension x = x.view(x.shape[0], -1) # input dim = 16*5*5, output dim = 120 x = F.relu(self.fc1(x)) # input dim = 120, output dim = 40 x = F.relu(self.fc2(x)) # input dim = 40, output dim = 2 x = self.fc3(x) return x net = Net().to(device) loss_func = nn.CrossEntropyLoss() # We use stochastic gradient descent (SGD) as optimizer. #opt = torch.optim.Adam(net.parameters(), lr=0.001) opt = optim.Adadelta(net.parameters(), lr=1.0, rho=0.9, eps=1e-06, weight_decay=0) avg_losses = [] # Avg. losses. epochs = 500 # Total epochs. print_freq = 15 # Print frequency. for epoch in range(epochs): # Loop over the dataset multiple times. running_loss = 0.0 # Initialize running loss. for i, data in enumerate(trainloader, 0): # Get the inputs. inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) # Move the inputs to the specified device. inputs, labels = inputs.to(device), labels.to(device) # Zero the parameter gradients. opt.zero_grad() # Forward step. outputs = net(inputs) loss = loss_func(outputs, labels) # Backward step. loss.backward() # Optimization step (update the parameters). opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) ``` # My Second Approach ``` transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 1 input image channel, 6 output channels, 3x3 square convolution # kernel self.conv1 = nn.Conv2d(3, 6, 3) self.conv2 = nn.Conv2d(6, 16, 3) # an affine operation: y = Wx + b self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension self.fc2 = nn.Linear(120, 40) self.fc3 = nn.Linear(40, 2) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features net = Net() net.to(device) loss_func = nn.CrossEntropyLoss() # We use stochastic gradient descent (SGD) as optimizer. opt = torch.optim.Adam(net.parameters(), lr=0.0005) #opt = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) avg_losses = [] # Avg. losses. epochs = 1000 # Total epochs. print_freq = 15 # Print frequency. for epoch in range(epochs): # Loop over the dataset multiple times. running_loss = 0.0 # Initialize running loss. for i, data in enumerate(trainloader, 0): # Get the inputs. inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) # Move the inputs to the specified device. inputs, labels = inputs.to(device), labels.to(device) # Zero the parameter gradients. opt.zero_grad() # Forward step. outputs = net(inputs) #print(outputs) #print(labels) loss = loss_func(outputs, labels) # Backward step. loss.backward() # Optimization step (update the parameters). opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 3 input image channel, 6 output channels, 3x3 square convolution # kernel self.conv1 = nn.Conv2d(3, 6, 3) self.conv2 = nn.Conv2d(6, 16, 3) # an affine operation: y = Wx + b self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension self.fc2 = nn.Linear(120, 40) self.fc3 = nn.Linear(40, 2) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features net = Net() net.to(device) loss_func = nn.CrossEntropyLoss() # We use stochastic gradient descent (SGD) as optimizer. #opt = torch.optim.Adam(net.parameters(), lr=0.001) opt = optim.Adadelta(net.parameters(), lr=0.5, rho=0.9, eps=1e-06, weight_decay=0) avg_losses = [] # Avg. losses. epochs = 500 # Total epochs. print_freq = 15 # Print frequency. for epoch in range(epochs): # Loop over the dataset multiple times. running_loss = 0.0 # Initialize running loss. for i, data in enumerate(trainloader, 0): # Get the inputs. inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) # Move the inputs to the specified device. inputs, labels = inputs.to(device), labels.to(device) # Zero the parameter gradients. opt.zero_grad() # Forward step. outputs = net(inputs) loss = loss_func(outputs, labels) # Backward step. loss.backward() # Optimization step (update the parameters). opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) ``` # Flatten ML ``` X_trainFlat = [x.flatten() for x in X_train] X_testFlat = [x.flatten() for x in X_test] from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn import linear_model from sklearn.tree import DecisionTreeClassifier cls = DecisionTreeClassifier(random_state=0) cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("Decision Tree accuracy: " + str(percentage)) print() cls = RandomForestClassifier() cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("Random Forest accuracy: " + str(percentage)) print() cls = AdaBoostClassifier() cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("Ada Boost accuracy: " + str(percentage)) print() cls = linear_model.SGDClassifier(max_iter=1000, tol=1e-3) cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("SGD Classifier accuracy: " + str(percentage)) print() cls = LogisticRegression() cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("Logistic Regression accuracy: " + str(percentage)) X_train_vec = [] for i in range(X_train.shape[0]): flat = X_train[i,:,:,:].flatten() arr = np.array2string(flat) X_train_vec.append(arr) X_test_vec = [] for i in range(X_test.shape[0]): flat = X_test[i,:,:,:].flatten() arr = np.array2string(flat) X_test_vec.append(arr) from sklearn.feature_extraction.text import TfidfVectorizer count_vect = TfidfVectorizer(ngram_range=(1,3)) trainX = count_vect.fit_transform(X_train_vec) from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(Y_train) target_labels = le.classes_ trainy = le.transform(Y_train) from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV param_grid = {'C': [0.01, 0.1, 1, 5, 10, 25]} print("grid search start") grid = GridSearchCV(LogisticRegression(random_state=0, solver='lbfgs',class_weight = 'balanced', max_iter=10000), param_grid, cv=5) print("done grid search") grid.fit(trainX, trainy) print("Best cross-validation score: {:.2f}".format(grid.best_score_)) print("Best parameters: ", grid.best_params_) print("Best estimator: ", grid.best_estimator_) cls = grid.best_estimator_ cls.fit(trainX, trainy) import sklearn.metrics testX = count_vect.transform(X_test_vec) yp = cls.predict(testX) testy = le.transform(Y_test) acc = sklearn.metrics.accuracy_score(testy, yp) print("Accuracy for Test for the NLP method is " + str(acc)) ```
github_jupyter
import os dir = [x[0] for x in os.walk('/Users/apple/Desktop/eeglab14_1_2b/participant')] dir.sort() dir.reverse() dir.pop() dir.reverse() print(len(dir)) import glob filenames = [] for x in dir: temp = glob.glob(x + "/*.png") temp.sort() filenames.append(temp) print(len(filenames)) from matplotlib import image from matplotlib import pyplot import numpy as np from PIL import Image X = [] for i in range(len(filenames)): for j in range(int(len(filenames[i]) / 2)): #print(len(filenames[i])) #print(j) data = Image.open(filenames[i].pop()) arr = np.array(data) result = np.zeros((32,32)) toadd = np.zeros((32,32,4)) for k in range(arr.shape[2]): result[:arr[:,:,k].shape[0],:arr[:,:,k].shape[1]] = arr[:,:,k] toadd[:,:,k] = result #print(data.shape) X.append(toadd) print(len(X)) print(X[1].shape) import pandas as pd df = pd.read_csv('/Users/apple/Desktop/eeglab14_1_2b/participant_ratings.csv', sep=r'\s*,\s*',engine = 'python', na_values = '?') df.dropna() Y_chart = pd.get_dummies(df, drop_first=True) Y = Y_chart['Valence'].tolist() print(len(Y)) #print(Y) for i in range(len(Y)): if Y[i] < 5: Y[i] = 0 else: Y[i] = 1 print(len(Y)) from random import shuffle XY = [] for i in range(len(X)): XY.append((X[i], Y[i])) shuffle(XY) print(len(XY)) X = [x[0] for x in XY] Y = [x[1] for x in XY] import numpy as np Xarray = np.asarray(X) Yarray = np.asarray(Y) print(Xarray.shape) print(Yarray.shape) X_train = Xarray[:1240,:,:,:] Y_train = Yarray[:1240] print(X_train.shape) print(Y_train.shape) X_test = Xarray[1240:,:,:] Y_test = Yarray[1240:] print(X_test.shape) print(Y_test.shape) print('Image dataset have shape =', X_train.shape) print('Image dataset has min/mean/std/max = %.2f/%.2f/%.2f/%.2f'%(X_train.min(), X_train.mean(), X_train.std(), X_train.max())) print('') print('Train label has shape =', Y_train.shape) print('Training label has min/mean/std/max = %.2f/%.2f/%.2f/%.2f'%(Y_train.min(), Y_train.mean(), Y_train.std(), Y_train.max())) def normalize_data(data): data = data / data.max() return data X_train = normalize_data(X_train) X_test = normalize_data(X_test) print('Image dataset have shape =', X_train.shape) print('Image dataset has min/mean/std/max = %.2f/%.2f/%.2f/%.2f'%(X_train.min(), X_train.mean(), X_train.std(), X_train.max())) print('') print('Train label has shape =', Y_train.shape) print('Training label has min/mean/std/max = %.2f/%.2f/%.2f/%.2f'%(Y_train.min(), Y_train.mean(), Y_train.std(), Y_train.max())) print(len(Y_train)) X_train_0 = X_train[:,:,:,0] X_train_1 = X_train[:,:,:,1] X_train_2 = X_train[:,:,:,2] X_train_3 = X_train[:,:,:,3] X_train = X_train[:,:,:,0:3] X_test = X_test[:,:,:,0:3] print(X_train_0.shape) print(len(Y_test)) print(X_test.shape) print(X_train.shape) %matplotlib inline import matplotlib.pyplot as plt import numpy as np class_names = ['Positive', 'Negative'] plt.figure(figsize=(7,7)) for i in range(10): plt.subplot(4,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X_train_0[i], cmap=plt.cm.binary) plt.xlabel(class_names[Y_train[i]]) plt.show() plt.figure(figsize=(7,7)) for i in range(10): plt.subplot(4,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X_train_1[i], cmap=plt.cm.binary) plt.xlabel(class_names[Y_train[i]]) plt.show() plt.figure(figsize=(7,7)) for i in range(10): plt.subplot(4,5,i+1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(X_train_2[i], cmap=plt.cm.binary) plt.xlabel(class_names[Y_train[i]]) plt.show() import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data as utils transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') def imshow(img): img = img / 2 + 0.5 # Unnormalize. npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() dataiter = iter(trainloader) images, labels = dataiter.next() print(images.size()) images = images.permute(0, 3, 1, 2) print(images.size()) temp = images.numpy() print(len(temp[0])) print(len(temp[0][0])) print(len(temp[0][0][0])) # Show images. rows = 1 columns = 1 fig=plt.figure() for i in range(1): fig.add_subplot(rows, columns, i+1) plt.title(classes[labels[i]]) img = images[i] / 2 + 0.5 # this is for unnormalize the image img = torchvision.transforms.ToPILImage()(img) plt.imshow(img) plt.show() num_classes = 2 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 10, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(10, 20, 3, padding=1) self.fc1 = nn.Linear(20 * 8 * 8, 100) self.fc2 = nn.Linear(100, num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 20 * 8 * 8) x = F.relu(self.fc1(x)) x = self.fc2(x) return x net = Net() net.to(device) loss_func = nn.CrossEntropyLoss() opt = torch.optim.Adam(net.parameters(), lr=0.0005) #opt = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) avg_losses = [] epochs = 1000 print_freq = 15 for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) inputs, labels = inputs.to(device), labels.to(device) opt.zero_grad() outputs = net(inputs) loss = loss_func(outputs, labels) loss.backward() opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') num_classes = 2 class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 10, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(10, 20, 3, padding=1) self.fc1 = nn.Linear(20 * 8 * 8, 100) self.fc2 = nn.Linear(100, num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 20 * 8 * 8) x = F.relu(self.fc1(x)) x = self.fc2(x) return x net = Net() net.to(device) loss_func = nn.CrossEntropyLoss() opt = optim.Adadelta(net.parameters(), lr=0.5, rho=0.9, eps=1e-06, weight_decay=0) avg_losses = [] epochs = 500 print_freq = 15 for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(trainloader, 0): inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) inputs, labels = inputs.to(device), labels.to(device) opt.zero_grad() # Forward step. outputs = net(inputs) loss = loss_func(outputs, labels) loss.backward() opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') num_classes = 2 class Net(nn.Module): def __init__(self): super(Net, self).__init__() # input channel = 3, output channel = 6, kernel_size = 5 # input size = (32, 32), output size = (28, 28) self.conv1 = nn.Conv2d(3, 6, 5) # input channel = 6, output channel = 16, kernel_size = 5 # input size = (14, 14), output size = (10, 10) self.conv2 = nn.Conv2d(6, 16, 5) # input dim = 16*5*5, output dim = 120 self.fc1 = nn.Linear(16 * 5 * 5, 120) # input dim = 120, output dim = 40 self.fc2 = nn.Linear(120, 40) # input dim = 40, output dim = 2 self.fc3 = nn.Linear(40, num_classes) def forward(self, x): # pool size = 2 # input size = (28, 28), output size = (14, 14), output channel = 6 x = F.max_pool2d(F.relu(self.conv1(x)), 2) # pool size = 2 # input size = (10, 10), output size = (5, 5), output channel = 16 x = F.max_pool2d(F.relu(self.conv2(x)), 2) # flatten as one dimension x = x.view(x.shape[0], -1) # input dim = 16*5*5, output dim = 120 x = F.relu(self.fc1(x)) # input dim = 120, output dim = 40 x = F.relu(self.fc2(x)) # input dim = 40, output dim = 2 x = self.fc3(x) return x net = Net().to(device) loss_func = nn.CrossEntropyLoss() # We use stochastic gradient descent (SGD) as optimizer. opt = torch.optim.Adam(net.parameters(), lr=0.0005) avg_losses = [] # Avg. losses. epochs = 1000 # Total epochs. print_freq = 15 # Print frequency. for epoch in range(epochs): # Loop over the dataset multiple times. running_loss = 0.0 # Initialize running loss. for i, data in enumerate(trainloader, 0): # Get the inputs. inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) # Move the inputs to the specified device. inputs, labels = inputs.to(device), labels.to(device) # Zero the parameter gradients. opt.zero_grad() # Forward step. outputs = net(inputs) #print(outputs) #print(labels) loss = loss_func(outputs, labels) # Backward step. loss.backward() # Optimization step (update the parameters). opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') num_classes = 2 class Net(nn.Module): def __init__(self): super(Net, self).__init__() # input channel = 3, output channel = 6, kernel_size = 5 # input size = (32, 32), output size = (28, 28) self.conv1 = nn.Conv2d(3, 6, 5) # input channel = 6, output channel = 16, kernel_size = 5 # input size = (14, 14), output size = (10, 10) self.conv2 = nn.Conv2d(6, 16, 5) # input dim = 16*5*5, output dim = 120 self.fc1 = nn.Linear(16 * 5 * 5, 120) # input dim = 120, output dim = 40 self.fc2 = nn.Linear(120, 40) # input dim = 40, output dim = 2 self.fc3 = nn.Linear(40, num_classes) def forward(self, x): # pool size = 2 # input size = (28, 28), output size = (14, 14), output channel = 6 x = F.max_pool2d(F.relu(self.conv1(x)), 2) # pool size = 2 # input size = (10, 10), output size = (5, 5), output channel = 16 x = F.max_pool2d(F.relu(self.conv2(x)), 2) # flatten as one dimension x = x.view(x.shape[0], -1) # input dim = 16*5*5, output dim = 120 x = F.relu(self.fc1(x)) # input dim = 120, output dim = 40 x = F.relu(self.fc2(x)) # input dim = 40, output dim = 2 x = self.fc3(x) return x net = Net().to(device) loss_func = nn.CrossEntropyLoss() # We use stochastic gradient descent (SGD) as optimizer. #opt = torch.optim.Adam(net.parameters(), lr=0.001) opt = optim.Adadelta(net.parameters(), lr=1.0, rho=0.9, eps=1e-06, weight_decay=0) avg_losses = [] # Avg. losses. epochs = 500 # Total epochs. print_freq = 15 # Print frequency. for epoch in range(epochs): # Loop over the dataset multiple times. running_loss = 0.0 # Initialize running loss. for i, data in enumerate(trainloader, 0): # Get the inputs. inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) # Move the inputs to the specified device. inputs, labels = inputs.to(device), labels.to(device) # Zero the parameter gradients. opt.zero_grad() # Forward step. outputs = net(inputs) loss = loss_func(outputs, labels) # Backward step. loss.backward() # Optimization step (update the parameters). opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 1 input image channel, 6 output channels, 3x3 square convolution # kernel self.conv1 = nn.Conv2d(3, 6, 3) self.conv2 = nn.Conv2d(6, 16, 3) # an affine operation: y = Wx + b self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension self.fc2 = nn.Linear(120, 40) self.fc3 = nn.Linear(40, 2) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # If the size is a square you can only specify a single number x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features net = Net() net.to(device) loss_func = nn.CrossEntropyLoss() # We use stochastic gradient descent (SGD) as optimizer. opt = torch.optim.Adam(net.parameters(), lr=0.0005) #opt = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) avg_losses = [] # Avg. losses. epochs = 1000 # Total epochs. print_freq = 15 # Print frequency. for epoch in range(epochs): # Loop over the dataset multiple times. running_loss = 0.0 # Initialize running loss. for i, data in enumerate(trainloader, 0): # Get the inputs. inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) # Move the inputs to the specified device. inputs, labels = inputs.to(device), labels.to(device) # Zero the parameter gradients. opt.zero_grad() # Forward step. outputs = net(inputs) #print(outputs) #print(labels) loss = loss_func(outputs, labels) # Backward step. loss.backward() # Optimization step (update the parameters). opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) tensor_x = torch.stack([torch.Tensor(i) for i in X_train]) # transform to torch tensors tensor_y = torch.from_numpy(Y_train) trainset = utils.TensorDataset(tensor_x,tensor_y) # create your datset trainloader = utils.DataLoader(trainset, batch_size= 40) tensor_x_test = torch.stack([torch.Tensor(i) for i in X_test]) # transform to torch tensors tensor_y_test = torch.from_numpy(Y_test) testset = utils.TensorDataset(tensor_x_test,tensor_y_test) # create your datset testloader = utils.DataLoader(testset, batch_size=40) device = torch.device("cpu") classes = ('Positive', 'Negative') class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 3 input image channel, 6 output channels, 3x3 square convolution # kernel self.conv1 = nn.Conv2d(3, 6, 3) self.conv2 = nn.Conv2d(6, 16, 3) # an affine operation: y = Wx + b self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension self.fc2 = nn.Linear(120, 40) self.fc3 = nn.Linear(40, 2) def forward(self, x): # Max pooling over a (2, 2) window x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) x = F.max_pool2d(F.relu(self.conv2(x)), 2) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features net = Net() net.to(device) loss_func = nn.CrossEntropyLoss() # We use stochastic gradient descent (SGD) as optimizer. #opt = torch.optim.Adam(net.parameters(), lr=0.001) opt = optim.Adadelta(net.parameters(), lr=0.5, rho=0.9, eps=1e-06, weight_decay=0) avg_losses = [] # Avg. losses. epochs = 500 # Total epochs. print_freq = 15 # Print frequency. for epoch in range(epochs): # Loop over the dataset multiple times. running_loss = 0.0 # Initialize running loss. for i, data in enumerate(trainloader, 0): # Get the inputs. inputs, labels = data inputs = inputs.permute(0, 3, 1, 2) # Move the inputs to the specified device. inputs, labels = inputs.to(device), labels.to(device) # Zero the parameter gradients. opt.zero_grad() # Forward step. outputs = net(inputs) loss = loss_func(outputs, labels) # Backward step. loss.backward() # Optimization step (update the parameters). opt.step() # Print statistics. running_loss += loss.item() if i % print_freq == print_freq - 1: # Print every several minibatches. avg_loss = running_loss / print_freq print('[epoch: {}, i: {:5d}] avg mini-batch loss: {:.3f}'.format(epoch, i, avg_loss)) avg_losses.append(avg_loss) running_loss = 0.0 print('Finished Training.') plt.plot(avg_losses) plt.xlabel('mini-batch index / {}'.format(print_freq)) plt.ylabel('avg. mini-batch loss') plt.show() net.eval() correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images = images.to(device) labels = labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(total) print('Accuracy of the network on the 40 test images: %d %%' % (100 * correct / total)) # Get test accuracy for each class. class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data images = images.permute(0, 3, 1, 2) images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(40): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(2): print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i])) X_trainFlat = [x.flatten() for x in X_train] X_testFlat = [x.flatten() for x in X_test] from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn import linear_model from sklearn.tree import DecisionTreeClassifier cls = DecisionTreeClassifier(random_state=0) cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("Decision Tree accuracy: " + str(percentage)) print() cls = RandomForestClassifier() cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("Random Forest accuracy: " + str(percentage)) print() cls = AdaBoostClassifier() cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("Ada Boost accuracy: " + str(percentage)) print() cls = linear_model.SGDClassifier(max_iter=1000, tol=1e-3) cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("SGD Classifier accuracy: " + str(percentage)) print() cls = LogisticRegression() cls.fit(X_trainFlat, Y_train) #print(cls) pred = cls.predict(X_testFlat) correct = 0 for x in range(len(pred)): if pred[x] == Y_test[x]: correct+=1 percentage = correct/len(Y_test) print("Logistic Regression accuracy: " + str(percentage)) X_train_vec = [] for i in range(X_train.shape[0]): flat = X_train[i,:,:,:].flatten() arr = np.array2string(flat) X_train_vec.append(arr) X_test_vec = [] for i in range(X_test.shape[0]): flat = X_test[i,:,:,:].flatten() arr = np.array2string(flat) X_test_vec.append(arr) from sklearn.feature_extraction.text import TfidfVectorizer count_vect = TfidfVectorizer(ngram_range=(1,3)) trainX = count_vect.fit_transform(X_train_vec) from sklearn import preprocessing le = preprocessing.LabelEncoder() le.fit(Y_train) target_labels = le.classes_ trainy = le.transform(Y_train) from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV param_grid = {'C': [0.01, 0.1, 1, 5, 10, 25]} print("grid search start") grid = GridSearchCV(LogisticRegression(random_state=0, solver='lbfgs',class_weight = 'balanced', max_iter=10000), param_grid, cv=5) print("done grid search") grid.fit(trainX, trainy) print("Best cross-validation score: {:.2f}".format(grid.best_score_)) print("Best parameters: ", grid.best_params_) print("Best estimator: ", grid.best_estimator_) cls = grid.best_estimator_ cls.fit(trainX, trainy) import sklearn.metrics testX = count_vect.transform(X_test_vec) yp = cls.predict(testX) testy = le.transform(Y_test) acc = sklearn.metrics.accuracy_score(testy, yp) print("Accuracy for Test for the NLP method is " + str(acc))
0.262653
0.627523
# Allegheny County Jail Statistics ## Author: Solomon Heisey ### Goal The goal of this project is to expose statistical trends present at the Allegheny County Jail from 2015 to 2021. DISCLAIMER: The goal of this project is not to hypothesize why certain trends appear, rather, it is designed to educate others about the presence of these trends. The dataset used in the project is the result of over 5,000,000 records which have been collected though daily jail censuses. The dataset has been provided by the Western Pennsylvania Regional Data Center. ``` import matplotlib.pyplot as plt import pandas as pd ``` For the sake of interactive display in Jupyter, matplotlib will be enabled inline. ``` %matplotlib inline url = 'http://tools.wprdc.org/downstream/66cdcd57-6c92-4aaa-8800-0ed9d8f03e22' file_name = './data.csv' try: df_all = pd.read_csv('data.csv', sep=',', engine='python', parse_dates=['census_date']) except FileNotFoundError: df_all = pd.read_csv(url, sep=',', engine='python', parse_dates=['census_date']) df_all.to_csv(file_name) df_all['race'] = df_all['race'].replace(['A'], 'Asian or Pacific Islander') df_all['race'] = df_all['race'].replace(['B'], 'Black or African American') df_all['race'] = df_all['race'].replace(['H'], 'Hispanic') df_all['race'] = df_all['race'].replace(['I'], 'American Indian or Alaskan Native') df_all['race'] = df_all['race'].replace(['U'], 'Unknown') df_all['race'] = df_all['race'].replace(['W'], 'White') df_all['race'] = df_all['race'].replace(['x'], 'Biracial') df_all['race'] = df_all['race'].replace(['O'], 'All others') df_all['gender'] = df_all['gender'].replace(['M'], 'Male') df_all['gender'] = df_all['gender'].replace(['F'], 'Female') df_all['census_month'] = df_all['census_date'].dt.month df_all['census_year'] = df_all['census_date'].dt.year ``` ### Gender, Race, and Age Breakdown by Month ``` df_gender_month = df_all.groupby(['census_month', 'gender'])['gender'].count().unstack() df_gender_month.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Month', title='Gender Breakdown by Month', figsize= (12,6)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_race_month = df_all.groupby(['census_month', 'race'])['race'].count().unstack() df_race_month.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Month', title='Race Breakdown by Month', figsize= (12,6)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() age_groups = pd.cut(df_all['age_at_booking'], bins=[19, 40, 65, 100]) df_age_month = pd.crosstab(df_all['census_month'], age_groups) df_age_month.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Month', title='Age Breakdown by Month', figsize=(12,5)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() ``` ## Gender, Race, and Age Breakdown by Year ``` df_gender_year = df_all.groupby(['census_year', 'gender'])['gender'].count().unstack() df_gender_year.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Year', title='Gender Breakdown by Year', figsize= (7,7)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_race_year = df_all.groupby(['census_year', 'race'])['race'].count().unstack() df_race_year.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Year', title='Race Breakdown by Year', figsize= (7,7)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() age_groups = pd.cut(df_all['age_at_booking'], bins=[18, 40, 65, 100]) df_age_year = pd.crosstab(df_all['census_year'], age_groups) df_age_year.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Year', title='Age Breakdown by Year', figsize=(7,7)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() ``` ## Inmate Population from 2015 to 2021 ``` df_age_gender = pd.crosstab(age_groups, df_all['gender']) df_age_gender.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Age Group', title='Gender and Age Breakdown from 2015 to 2021', figsize= (3,9)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_gender_race = df_all.groupby(['race', 'gender'])['race'].count().unstack() df_gender_race.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Race', title='Race and Gender Breakdown from 2015 to 2021', figsize= (7,8)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_age_race = pd.crosstab(df_all['race'], age_groups) df_age_race.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Race', title='Age and Race Breakdown from 2015 to 2021', figsize=(7,7)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.legend(loc=(1.04,0)) plt.show() temp = pd.crosstab(index=[df_all['gender'], df_all['race']], columns=age_groups) temp.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Gender, Race', title='Age, Race, and Gender Breakdown from 2015 to 2021', figsize=(14,6)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.legend(loc=(1.04,0)) plt.show() ```
github_jupyter
import matplotlib.pyplot as plt import pandas as pd %matplotlib inline url = 'http://tools.wprdc.org/downstream/66cdcd57-6c92-4aaa-8800-0ed9d8f03e22' file_name = './data.csv' try: df_all = pd.read_csv('data.csv', sep=',', engine='python', parse_dates=['census_date']) except FileNotFoundError: df_all = pd.read_csv(url, sep=',', engine='python', parse_dates=['census_date']) df_all.to_csv(file_name) df_all['race'] = df_all['race'].replace(['A'], 'Asian or Pacific Islander') df_all['race'] = df_all['race'].replace(['B'], 'Black or African American') df_all['race'] = df_all['race'].replace(['H'], 'Hispanic') df_all['race'] = df_all['race'].replace(['I'], 'American Indian or Alaskan Native') df_all['race'] = df_all['race'].replace(['U'], 'Unknown') df_all['race'] = df_all['race'].replace(['W'], 'White') df_all['race'] = df_all['race'].replace(['x'], 'Biracial') df_all['race'] = df_all['race'].replace(['O'], 'All others') df_all['gender'] = df_all['gender'].replace(['M'], 'Male') df_all['gender'] = df_all['gender'].replace(['F'], 'Female') df_all['census_month'] = df_all['census_date'].dt.month df_all['census_year'] = df_all['census_date'].dt.year df_gender_month = df_all.groupby(['census_month', 'gender'])['gender'].count().unstack() df_gender_month.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Month', title='Gender Breakdown by Month', figsize= (12,6)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_race_month = df_all.groupby(['census_month', 'race'])['race'].count().unstack() df_race_month.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Month', title='Race Breakdown by Month', figsize= (12,6)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() age_groups = pd.cut(df_all['age_at_booking'], bins=[19, 40, 65, 100]) df_age_month = pd.crosstab(df_all['census_month'], age_groups) df_age_month.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Month', title='Age Breakdown by Month', figsize=(12,5)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_gender_year = df_all.groupby(['census_year', 'gender'])['gender'].count().unstack() df_gender_year.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Year', title='Gender Breakdown by Year', figsize= (7,7)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_race_year = df_all.groupby(['census_year', 'race'])['race'].count().unstack() df_race_year.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Year', title='Race Breakdown by Year', figsize= (7,7)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() age_groups = pd.cut(df_all['age_at_booking'], bins=[18, 40, 65, 100]) df_age_year = pd.crosstab(df_all['census_year'], age_groups) df_age_year.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Year', title='Age Breakdown by Year', figsize=(7,7)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_age_gender = pd.crosstab(age_groups, df_all['gender']) df_age_gender.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Age Group', title='Gender and Age Breakdown from 2015 to 2021', figsize= (3,9)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_gender_race = df_all.groupby(['race', 'gender'])['race'].count().unstack() df_gender_race.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Race', title='Race and Gender Breakdown from 2015 to 2021', figsize= (7,8)) plt.legend(loc=(1.04,0)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.show() df_age_race = pd.crosstab(df_all['race'], age_groups) df_age_race.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Race', title='Age and Race Breakdown from 2015 to 2021', figsize=(7,7)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.legend(loc=(1.04,0)) plt.show() temp = pd.crosstab(index=[df_all['gender'], df_all['race']], columns=age_groups) temp.plot.bar(stacked=True, ylabel='Total Inmates', xlabel='Gender, Race', title='Age, Race, and Gender Breakdown from 2015 to 2021', figsize=(14,6)) plt.gcf().axes[0].yaxis.get_major_formatter().set_scientific(False) plt.legend(loc=(1.04,0)) plt.show()
0.295128
0.824214
``` class Stock(): def __init__(self, stock_name, stock_price, stock_type, stock_amount): '''Initilize stock info Attributes: stock_name stock_price, stock_type, stock_amount''' self.name = stock_name self.amount = stock_amount self.price = stock_price self.type = stock_type def __str__(self): """Show the stock info Args: None Returns: string : stock's name, type, amount and price """ return ('Stock name: %s, Type: %s, Amount: %s, Price: %s' %( self.name, self.type, self.amount, self.price)) # current value of one stock def current_value(self): return self.price * self.amount if self.type == 'B' else (-1) * self.price * self.amount class Order(): def __init__(self, order_id, stock, user_id): '''order info''' self.order_id = order_id self.stock = stock self.user_id = user_id # show the order info def __str__(self): return ('order id: %s, %s, user_id: %s' % ( self.order_id, self.stock,self.user_id)) # current value of one order def current_summary(self): current_value = self.stock.current_value() print('order id: {}, current_summary: {}'.format(self.order_id, current_value)) return current_value # user info def user(self): return self.user_id def order_user_map(self): order_user_dict = {self.user_id: self.order_id} class Exchange(): def __init__(self): self.orders = [] # def add(self, stock): # if stock in self.stocks_list: # pass # else: # self.stocks_list.append(stock) # create an order def create(self, order): if order in self.orders: pass else: self.orders.append(order) # cancel an order def cancel(self, order): if order in self.orders: self.orders.remove(order) else: print('There is no existing order in the exchange') # check all orders info in the exchange def current_market_summary(self): for order in self.orders: print(order) # amount of all orders in the exchange def total_portfolio(self): current_portfolio = 0 for order in self.orders: order_value = order.current_summary() current_portfolio += order_value return current_portfolio # all orders info and total amount for one specific user_id def search(self, user_id): user_summary = 0 for order in self.orders: if user_id in order.user(): user_summary += order.current_summary() if user_summary == 0: return 'Not Found' return 'User id: {}. Summary for this user:{} '.format(user_id, user_summary) MSFT = Stock('MSFT',140,'B',10) GOOG = Stock('GOOG',3000,'B',20) APPL = Stock('APPL',300,'B',15) AMAZ = Stock('AMAZ',3100,'S',100) order_1 = Order('01',MSFT, 'haha01') order_1.current_summary() order_2 = Order('10',GOOG, 'haha05') order_3 = Order('8',APPL, 'haha05') print(order_1) exchange_1 = Exchange() exchange_1.create(order_1) exchange_1.create(order_2) exchange_1.current_market_summary() exchange_1.create(order_3) exchange_1.search('haha05') exchange_1.cancel(order_2) exchange_1.current_market_summary() order_user_dict = {'haha01': {'order id': '01', 'Stock name': 'MSFT', 'Type': 'B', 'Amount': '10', 'Price': '140'}, 'haha01': {'order id': '02', 'Stock name': 'MSFT', 'Type': 'B', 'Amount': '20', 'Price': '140'}} order_user_dict exchange_dict ```
github_jupyter
class Stock(): def __init__(self, stock_name, stock_price, stock_type, stock_amount): '''Initilize stock info Attributes: stock_name stock_price, stock_type, stock_amount''' self.name = stock_name self.amount = stock_amount self.price = stock_price self.type = stock_type def __str__(self): """Show the stock info Args: None Returns: string : stock's name, type, amount and price """ return ('Stock name: %s, Type: %s, Amount: %s, Price: %s' %( self.name, self.type, self.amount, self.price)) # current value of one stock def current_value(self): return self.price * self.amount if self.type == 'B' else (-1) * self.price * self.amount class Order(): def __init__(self, order_id, stock, user_id): '''order info''' self.order_id = order_id self.stock = stock self.user_id = user_id # show the order info def __str__(self): return ('order id: %s, %s, user_id: %s' % ( self.order_id, self.stock,self.user_id)) # current value of one order def current_summary(self): current_value = self.stock.current_value() print('order id: {}, current_summary: {}'.format(self.order_id, current_value)) return current_value # user info def user(self): return self.user_id def order_user_map(self): order_user_dict = {self.user_id: self.order_id} class Exchange(): def __init__(self): self.orders = [] # def add(self, stock): # if stock in self.stocks_list: # pass # else: # self.stocks_list.append(stock) # create an order def create(self, order): if order in self.orders: pass else: self.orders.append(order) # cancel an order def cancel(self, order): if order in self.orders: self.orders.remove(order) else: print('There is no existing order in the exchange') # check all orders info in the exchange def current_market_summary(self): for order in self.orders: print(order) # amount of all orders in the exchange def total_portfolio(self): current_portfolio = 0 for order in self.orders: order_value = order.current_summary() current_portfolio += order_value return current_portfolio # all orders info and total amount for one specific user_id def search(self, user_id): user_summary = 0 for order in self.orders: if user_id in order.user(): user_summary += order.current_summary() if user_summary == 0: return 'Not Found' return 'User id: {}. Summary for this user:{} '.format(user_id, user_summary) MSFT = Stock('MSFT',140,'B',10) GOOG = Stock('GOOG',3000,'B',20) APPL = Stock('APPL',300,'B',15) AMAZ = Stock('AMAZ',3100,'S',100) order_1 = Order('01',MSFT, 'haha01') order_1.current_summary() order_2 = Order('10',GOOG, 'haha05') order_3 = Order('8',APPL, 'haha05') print(order_1) exchange_1 = Exchange() exchange_1.create(order_1) exchange_1.create(order_2) exchange_1.current_market_summary() exchange_1.create(order_3) exchange_1.search('haha05') exchange_1.cancel(order_2) exchange_1.current_market_summary() order_user_dict = {'haha01': {'order id': '01', 'Stock name': 'MSFT', 'Type': 'B', 'Amount': '10', 'Price': '140'}, 'haha01': {'order id': '02', 'Stock name': 'MSFT', 'Type': 'B', 'Amount': '20', 'Price': '140'}} order_user_dict exchange_dict
0.436262
0.243912
``` import numpy as np import pandas as pd import mxnet as mx import matplotlib.pyplot as plt import plotly.plotly as py import logging logging.basicConfig(level=logging.DEBUG) train1=pd.read_csv('../data/train.csv') train1.shape train1.iloc[0:4, 0:15] train=np.asarray(train1.iloc[0:33600,:]) cv=np.asarray(train1.iloc[33600:,:]) _train=train[:,1:] _train.shape _cv=cv[:,1:] _cv.shape trainx=np.reshape(_train, (_train.shape[0],1,28,28))/255 cvx=np.reshape(_cv, (_cv.shape[0],1,28,28))/255 ix=3 img=np.asarray(np.matrix(trainx[ix,0,:,:])) plt.imshow(img, cmap='Greys_r') plt.show() trainy=np.asarray(train[:,0]) cvy=np.asarray(cv[:,0]) trainy.shape ``` FULLY CONNECTED NEURAL NETWORK =========================== ``` data = mx.sym.var('data') Y= mx.symbol.Variable('softmax_label') # first fullc layer flatten = mx.sym.flatten(data=data) fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500) nlin3 = mx.sym.Activation(data=fc1, act_type="relu") # output fullc fc3 = mx.sym.FullyConnected(data=nlin3, num_hidden=10) # Softmax output SNN = mx.symbol.SoftmaxOutput(data=fc3, label=Y, name="SNN") SNN_model = mx.mod.Module(symbol=SNN, label_names =['softmax_label'], context=mx.cpu()) batch_size = 100 train_iter = mx.io.NDArrayIter(trainx, trainy, batch_size, shuffle=True) val_iter = mx.io.NDArrayIter(cvx, cvy, batch_size) SNN_model.fit(train_iter, # train data eval_data=val_iter, # validation data optimizer='sgd', optimizer_params={'learning_rate':0.05, 'momentum': 0.9}, eval_metric='acc', batch_end_callback = mx.callback.Speedometer(batch_size=batch_size, frequent=200), num_epoch=15) ``` DEEP FULLY CONNECTED NEURAL NETWORK =========================== ``` data = mx.sym.var('data') Y= mx.symbol.Variable('softmax_label') # first fullc layer flatten = mx.sym.flatten(data=data) fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500) nlin1 = mx.sym.Activation(data=fc1, act_type="relu") # second fullc layer fc2 = mx.symbol.FullyConnected(data=nlin1, num_hidden=500) nlin2 = mx.sym.Activation(data=fc2, act_type="relu") # third fullc layer fc3 = mx.symbol.FullyConnected(data=nlin2, num_hidden=500) nlin3 = mx.sym.Activation(data=fc3, act_type="relu") # output fullc fc4 = mx.sym.FullyConnected(data=nlin3, num_hidden=10) # Softmax output DNN = mx.symbol.SoftmaxOutput(data=fc4, label=Y, name="DNN") DNN_model = mx.mod.Module(symbol=DNN, label_names =['softmax_label'], context=mx.cpu()) batch_size = 100 train_iter = mx.io.NDArrayIter(trainx, trainy, batch_size, shuffle=True) val_iter = mx.io.NDArrayIter(cvx, cvy, batch_size) DNN_model.fit(train_iter, # train data eval_data=val_iter, # validation data optimizer='sgd', optimizer_params={'learning_rate':0.05, 'momentum': 0.9}, eval_metric='acc', batch_end_callback = mx.callback.Speedometer(batch_size=batch_size, frequent=200), num_epoch=15) ``` CONVOLUTIONAL NEURAL NETWORK =========================== ``` data = mx.sym.var('data') Y= mx.symbol.Variable('softmax_label') # first conv layer conv1 = mx.sym.Convolution(data=data, kernel=(5,5), num_filter=20) nlin1 = mx.sym.Activation(data=conv1, act_type="relu") pool1 = mx.sym.Pooling(data=nlin1, pool_type="max", kernel=(2,2), stride=(2,2)) drop1 = mx.symbol.Dropout(data=pool1,p=0.5) # second conv layer conv2 = mx.sym.Convolution(data=drop1, kernel=(5,5), num_filter=40) nlin2 = mx.sym.Activation(data=conv2, act_type="relu") pool2 = mx.sym.Pooling(data=nlin2, pool_type="max", kernel=(2,2), stride=(2,2)) drop2 = mx.symbol.Dropout(data=pool2,p=0.5) # first fullc layer flatten = mx.sym.flatten(data=drop2) fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500) nlin3 = mx.sym.Activation(data=fc1, act_type="relu") # output fullc fc2 = mx.sym.FullyConnected(data=nlin3, num_hidden=10) # Softmax output CNN = mx.symbol.SoftmaxOutput(data=fc2, label=Y,name="CCN") CNN_model = mx.mod.Module(symbol=CNN, label_names =['softmax_label'], context=mx.cpu()) batch_size = 100 train_iter = mx.io.NDArrayIter(trainx, trainy, batch_size, shuffle=True) val_iter = mx.io.NDArrayIter(cvx, cvy, batch_size) CNN_model.fit(train_iter, # train data eval_data=val_iter, # validation data optimizer='sgd', optimizer_params={'learning_rate':0.05, 'momentum': 0.9}, eval_metric='acc', batch_end_callback = mx.callback.Speedometer(batch_size=batch_size, frequent=200), num_epoch=15) ```
github_jupyter
import numpy as np import pandas as pd import mxnet as mx import matplotlib.pyplot as plt import plotly.plotly as py import logging logging.basicConfig(level=logging.DEBUG) train1=pd.read_csv('../data/train.csv') train1.shape train1.iloc[0:4, 0:15] train=np.asarray(train1.iloc[0:33600,:]) cv=np.asarray(train1.iloc[33600:,:]) _train=train[:,1:] _train.shape _cv=cv[:,1:] _cv.shape trainx=np.reshape(_train, (_train.shape[0],1,28,28))/255 cvx=np.reshape(_cv, (_cv.shape[0],1,28,28))/255 ix=3 img=np.asarray(np.matrix(trainx[ix,0,:,:])) plt.imshow(img, cmap='Greys_r') plt.show() trainy=np.asarray(train[:,0]) cvy=np.asarray(cv[:,0]) trainy.shape data = mx.sym.var('data') Y= mx.symbol.Variable('softmax_label') # first fullc layer flatten = mx.sym.flatten(data=data) fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500) nlin3 = mx.sym.Activation(data=fc1, act_type="relu") # output fullc fc3 = mx.sym.FullyConnected(data=nlin3, num_hidden=10) # Softmax output SNN = mx.symbol.SoftmaxOutput(data=fc3, label=Y, name="SNN") SNN_model = mx.mod.Module(symbol=SNN, label_names =['softmax_label'], context=mx.cpu()) batch_size = 100 train_iter = mx.io.NDArrayIter(trainx, trainy, batch_size, shuffle=True) val_iter = mx.io.NDArrayIter(cvx, cvy, batch_size) SNN_model.fit(train_iter, # train data eval_data=val_iter, # validation data optimizer='sgd', optimizer_params={'learning_rate':0.05, 'momentum': 0.9}, eval_metric='acc', batch_end_callback = mx.callback.Speedometer(batch_size=batch_size, frequent=200), num_epoch=15) data = mx.sym.var('data') Y= mx.symbol.Variable('softmax_label') # first fullc layer flatten = mx.sym.flatten(data=data) fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500) nlin1 = mx.sym.Activation(data=fc1, act_type="relu") # second fullc layer fc2 = mx.symbol.FullyConnected(data=nlin1, num_hidden=500) nlin2 = mx.sym.Activation(data=fc2, act_type="relu") # third fullc layer fc3 = mx.symbol.FullyConnected(data=nlin2, num_hidden=500) nlin3 = mx.sym.Activation(data=fc3, act_type="relu") # output fullc fc4 = mx.sym.FullyConnected(data=nlin3, num_hidden=10) # Softmax output DNN = mx.symbol.SoftmaxOutput(data=fc4, label=Y, name="DNN") DNN_model = mx.mod.Module(symbol=DNN, label_names =['softmax_label'], context=mx.cpu()) batch_size = 100 train_iter = mx.io.NDArrayIter(trainx, trainy, batch_size, shuffle=True) val_iter = mx.io.NDArrayIter(cvx, cvy, batch_size) DNN_model.fit(train_iter, # train data eval_data=val_iter, # validation data optimizer='sgd', optimizer_params={'learning_rate':0.05, 'momentum': 0.9}, eval_metric='acc', batch_end_callback = mx.callback.Speedometer(batch_size=batch_size, frequent=200), num_epoch=15) data = mx.sym.var('data') Y= mx.symbol.Variable('softmax_label') # first conv layer conv1 = mx.sym.Convolution(data=data, kernel=(5,5), num_filter=20) nlin1 = mx.sym.Activation(data=conv1, act_type="relu") pool1 = mx.sym.Pooling(data=nlin1, pool_type="max", kernel=(2,2), stride=(2,2)) drop1 = mx.symbol.Dropout(data=pool1,p=0.5) # second conv layer conv2 = mx.sym.Convolution(data=drop1, kernel=(5,5), num_filter=40) nlin2 = mx.sym.Activation(data=conv2, act_type="relu") pool2 = mx.sym.Pooling(data=nlin2, pool_type="max", kernel=(2,2), stride=(2,2)) drop2 = mx.symbol.Dropout(data=pool2,p=0.5) # first fullc layer flatten = mx.sym.flatten(data=drop2) fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500) nlin3 = mx.sym.Activation(data=fc1, act_type="relu") # output fullc fc2 = mx.sym.FullyConnected(data=nlin3, num_hidden=10) # Softmax output CNN = mx.symbol.SoftmaxOutput(data=fc2, label=Y,name="CCN") CNN_model = mx.mod.Module(symbol=CNN, label_names =['softmax_label'], context=mx.cpu()) batch_size = 100 train_iter = mx.io.NDArrayIter(trainx, trainy, batch_size, shuffle=True) val_iter = mx.io.NDArrayIter(cvx, cvy, batch_size) CNN_model.fit(train_iter, # train data eval_data=val_iter, # validation data optimizer='sgd', optimizer_params={'learning_rate':0.05, 'momentum': 0.9}, eval_metric='acc', batch_end_callback = mx.callback.Speedometer(batch_size=batch_size, frequent=200), num_epoch=15)
0.610686
0.707922
# **Álgebra Linear** ``` import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D v1 = [2,5] v2 = [1,6,8] v1 ,v2 type(v1) v3 = np.array([8,3,9]) type(v3) v3.shape[0] v3.shape v4=np.array([1.+2.j, 3.+4.j, 5, 6.j], dtype=complex) v4 type(v4) ``` # **Lendo elementos de um array** ``` a = np.array([7,5,3,9,0,2]) a a[0] a[1:] a[1:4] a[-1] a[-3] a[-6] a[-3:-1] ``` #**Plotando um vetor** ``` v = [3,5] u = [1,2,3] plt.plot(v) plt.plot([0,v[0]] , [0,v[1]]) ``` # **Plota um vetor 2D** ``` plt.plot([0,v[0]] , [0,v[1]]) plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.show() ``` # **Plota um vetor 3D** ``` fig = plt.figure() ax = Axes3D(fig) ax.plot([0,u[0]],[0,u[1]],[0,u[2]]) #plt.axis('equal') ax.plot([0, 0],[0, 0],[-5, 5],'k--') ax.plot([0, 0],[-5, 5],[0, 0],'k--') ax.plot([-5, 5],[0, 0],[0, 0],'k--') plt.show() ``` # **Soma de vetores** ``` v1 = np.array([2,-4]) v2 = np.array([4,6]) v3 = v1+v2 v3 = np.add(v1,v2) print('V3 =' ,v3) plt.plot([0,v1[0]] , [0,v1[1]] , 'r' , label = 'v1') plt.plot([0,v2[0]] , [0,v2[1]], 'b' , label = 'v2') plt.plot([0,v3[0]] , [0,v3[1]] , 'g' , label = 'v3') plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.legend() plt.show() plt.plot([0,v1[0]] , [0,v1[1]] , 'r' , label = 'vetor v1') plt.plot([0,v2[0]]+v1[0] , [0,v2[1]]+v1[1], 'b' , label = 'vetor v2') plt.plot([0,v3[0]] , [0,v3[1]] , 'g' , label = 'vetor v3') plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.legend() plt.show() ``` # **Multiplicação de vetor por um escalar** ``` u1 = np.array([4,4]) a = .5 u2 = u1*a plt.plot([0,u1[0]] , [0,u1[1]] , 'r' , label = 'v1') plt.plot([0,u2[0]] , [0,u2[1]], 'b--' , label = 'v2') plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.legend() plt.show() u1 = np.array([4,4]) a = -.3 u2 = u1*a plt.plot([0,u1[0]] , [0,u1[1]] , 'r' , label = 'v1') plt.plot([0,u2[0]] , [0,u2[1]], 'b' , label = 'v2') plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.legend() plt.show() ``` # **Multiplicação de vetores** ``` a1 = [2,4,6] a2 = [3,5,1] print(np.multiply(a1,a2)) ``` # **Produto interno** ``` a1 = np.array([2,4,6]) a2 = np.array([3,5,1]) dotp = a1@a2 print(" Dot product - ",dotp) dotp = np.dot(a1,a2) print(" Dot product usign np.dot",dotp) dotp = np.inner(a1,a2) print(" Dot product usign np.inner", dotp) dotp = sum(np.multiply(a1,a2)) print(" Dot product usign np.multiply & sum",dotp) dotp = np.matmul(a1,a2) print(" Dot product usign np.matmul",dotp) dotp = 0 for i in range(len(a1)): dotp = dotp + a1[i]*a2[i] print(" Dot product usign for loop" , dotp) ``` # **Tamanho de um vetor** ``` v3 = np.array([1,2,3,4,5,6,7]) length = np.sqrt(np.dot(v3,v3)) length v3 = np.array([1,2,3,4,5,6,7]) length = np.sqrt(sum(np.multiply(v3,v3))) length v3 = np.array([1,2,3,4,5,6,7]) length = np.sqrt(np.matmul(v3,v3)) length ``` # **Vetor normalizado** ``` v1 = [1,1] length_v1 = np.sqrt(np.dot(v1,v1)) norm_v1 = v1/length_v1 length_v1 , norm_v1 v1 = [1,1] norm_v1 = v1/np.linalg.norm(v1) norm_v1 ``` # **Ângulo entre vetores** ``` #First Method v1 = np.array([5,5]) v2 = np.array([-5,5]) ang = np.rad2deg(np.arccos( np.dot(v1,v2) / (np.linalg.norm(v1)*np.linalg.norm(v2)))) plt.plot([0,v1[0]] , [0,v1[1]] , 'r' , label = 'v1') plt.plot([0,v2[0]]+v1[0] , [0,v2[1]]+v1[1], 'b' , label = 'v2') plt.plot([15,-15] , [0,0] , 'k--') plt.plot([0,0] , [15,-15] , 'k--') plt.grid() plt.axis((-15, 15, -15, 15)) plt.legend() plt.title('Angulo entre vetores - %s' %ang) plt.show() #Second Method v1 = np.array([5,5]) v2 = np.array([-5,5]) lengthV1 = np.sqrt(np.dot(v1,v1)) lengthV2 = np.sqrt(np.dot(v2,v2)) ang = np.rad2deg(np.arccos( np.dot(v1,v2) / (lengthV1 * lengthV2))) print('Angulo entre vetores - %s' %ang) v1 = np.array([5,2,-3]) v2 = np.array([7,-1,2]) fig = plt.figure() ax = Axes3D(fig) ax.plot([0, v1[0]],[0, v1[1]],[0, v1[2]],'b') ax.plot([0, v2[0]],[0, v2[1]],[0, v2[2]],'r') ang = np.rad2deg(np.arccos( np.dot(v1,v2) / (np.linalg.norm(v1)*np.linalg.norm(v2)) )) plt.title('Angulo entre vetores: %s degrees.' %ang) ``` # **Produtos interno e externo** ``` v1 = np.array([1,2,1]) v2 = np.array([2,1,2]) np.inner(v1,v2) print("\n Inner Product ==> \n", np.inner(v1,v2)) print("\n Outer Product ==> \n", np.outer(v1,v2)) ``` # **Produto vetorial** ``` v1 = np.array([7,0,0]) v2 = np.array([0,7,0]) print("\nVector Cross Product ==> \n", np.cross(v1,v2)) ``` # **Operações com matrizes** # **Criação de matriz** ``` A = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]) A type(A) A.dtype B = np.array([[2.7,6.03,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]) B type(B) B.dtype A.shape A[0,] A[:,0] A[0,0] A[0][0] A[1:3 , 1:3] ``` # **Matriz de zeros** ``` np.zeros(36).reshape(6,6) np.zeros((6,6)) ``` # **Matriz de 1's** ``` np.ones(25).reshape(5,5) np.ones((5,5)) ``` # **Matriz de números aleatórios** ``` X = np.random.random((3,4)) X ``` # **Matriz identidade** ``` I = np.eye(7) I ``` # **Matriz diagonal** ``` D = np.diag([1,2,3,4,5,6,7]) D ``` # **Matrizes triangulares (superior e inferior)** ``` M = np.random.randn(5,5) U = np.triu(M) L = np.tril(M) print("matriz aleatória \n" , M) print("\n") print("matriz triangular inferior \n" , L) print("\n") print("matriz triangular superior \n" , U) ``` # **Concatenação de matrizes** ``` A = np.array([[1,2] , [3,4] ,[5,6]]) B = np.array([[9,2] , [3,-3]]) C = np.concatenate((A,B)) C , C.shape , type(C) , C.dtype np.full((7,7) , 4) M = np.array([[1,2,3],[4,-3,6],[8,7,0]]) M M.flatten() ``` # **Soma de matrizes** ``` M = np.array([[1,2,3],[4,-3,6],[7,8,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) ==> \n", M) print("\n Segunda matriz (N) ==> \n", N) C = M+N print("\n soma (M+N) ==> \n", C) # ou C = np.add(M,N,dtype = np.float64) print("\n soma usando np.add ==> \n", C) ``` # **Subtração de matrizes** ``` M = np.array([[1,2,3],[4,-3,6],[7,8,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) ==> \n", M) print("\n Segunda matriz (N) ==> \n", N) C = M-N print("\n Subtração (M-N) ==> \n", C) # ou C = np.subtract(M,N,dtype = np.float64) print("\n Subtração usando np.subtract ==> \n", C) ``` # **Multiplicação de matriz por escalar** ``` M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) C = 20 print("\n Matriz (M) ==> \n", M) print("\n Multiplicação por escalar ==> \n", C*M) # ou print("\n Multiplicação por escalar usando np.multiply ==> \n", np.multiply(C,M)) ``` # **Transposta de uma matriz** ``` M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Transposta de M ==> \n", np.transpose(M)) # ou print("\nTransposta de M ==> \n", M.T) ``` # **Determinante de uma matriz** ``` M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Determinante de M ==> ", np.linalg.det(M)) ``` # **Posto de uma matriz** ``` M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Posto de M ==> ", np.linalg.matrix_rank(M)) ``` # **Traço de uma matriz** ``` M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Traço de M ==> ", np.trace(M)) ``` # **Inversa de uma matriz** ``` M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Inversa de M ==> \n", np.linalg.inv(M)) ``` # **Multiplicação de matrizes (pontual)** ``` M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) ==> \n", M) print("\n Segunda matriz (N) ==> \n", N) print("\n Multiplication pontual de M e N ==> \n", M*N) # ou print("\n Multiplication pontual de M e N ==> \n", np.multiply(M,N)) ``` # **Produto escalar matricial** ``` M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) \n", M) print("\n Segunda matriz (N) \n", N) print("\n Produto escalar \n", M@N) # ou print("\n Produto escala usando np.matmul \n", np.matmul(M,N)) # ou print("\n Produto escala usando np.dot \n", np.dot(M,N)) ``` # **"Divisão" de Matrizes** ``` M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) \n", M) print("\n Segunda matriz (N) \n", N) print("\n Divisão (M/N) \n", M/N) # ou print("\n Divisão (M/N) \n", np.divide(M,N)) ``` # **Soma de todos elementos da matriz** ``` N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("Matriz (N) \n", N) print ("Soma de todos elementos da matriz") print (np.sum(N)) ``` # **Adição com base na coluna** ``` N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("Matriz (N) ==> \n", N) print ("Adição com base na coluna") print (np.sum(N,axis=0)) ``` # **Adição com base na linha** ``` N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("Matriz (N) ==> \n", N) print ("Adição com base na linha") print (np.sum(N,axis=1)) ``` # **Produto de Kronecker de matrizes** ``` M1 = np.array([[1,2,3] , [4,5,6]]) M1 M2 = np.array([[10,10,10],[10,10,10]]) M2 np.kron(M1,M2) ``` # **Multiplicação matriz-vetor** ``` A = np.array([[1,2,3] ,[4,5,6]]) v = np.array([10,20,30]) print ("Multiplicação matriz-vetor \n", A*v) ``` # **Produto escalar matriz-vetor** ``` A = np.array([[1,2,3] ,[4,5,6]]) v = np.array([10,20,30]) print ("Produto escalar matriz-vetor \n" , A@v) ``` # **Potências de matriz** ``` M1 = np.array([[1,2],[5,6]]) M1 # Matriz na potencia 3 M1@M1@M1 # Matriz na potencia 3 usando np.linalg.matrix_power np.linalg.matrix_power(M1,3) ``` # **Tensores** ``` # Criando um Tensor T1 = np.array([ [[1,2,3], [4,5,6], [7,8,9]], [[10,11,12], [13,14,15], [16,17,18]], [[19,20,21], [22,23,24], [25,26,27]], ]) T1 T2 = np.array([ [[0,0,0] , [0,0,0] , [0,0,0]], [[2,2,2] , [2,2,2] , [2,2,2]], [[4,4,4] , [4,4,4] , [4,4,4]] ]) T2 ``` # **Soma de tensores** ``` A = T1+T2 A ``` # **Subtração de tensores** ``` S = T1-T2 S np.subtract(T1,T2) ``` # **Produto de tensores (baseado em elementos)** ``` P = T1*T2 P np.multiply(T1,T2) ``` # **"Divisão" de tensores (baseado em elementos)** ``` D = T1/T2 D np.divide(T1,T2) ``` # **Produto escalar de tensores** ``` T1 T2 np.tensordot(T1,T2) ``` # **Solução de sistemas lineares (AX=B)** ``` A = np.array([[4,-2,3] , [1,-5,6] , [-7,8,9]]) A B = np.random.random((3,1)) B # Primeiro metodo X = np.dot(np.linalg.inv(A) , B) X # Segundo Metodo X = np.matmul(np.linalg.inv(A) , B) X # Terceiro metodo X = np.linalg.inv(A)@B X # Quarto metodo X = np.linalg.solve(A,B) X ```
github_jupyter
import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D v1 = [2,5] v2 = [1,6,8] v1 ,v2 type(v1) v3 = np.array([8,3,9]) type(v3) v3.shape[0] v3.shape v4=np.array([1.+2.j, 3.+4.j, 5, 6.j], dtype=complex) v4 type(v4) a = np.array([7,5,3,9,0,2]) a a[0] a[1:] a[1:4] a[-1] a[-3] a[-6] a[-3:-1] v = [3,5] u = [1,2,3] plt.plot(v) plt.plot([0,v[0]] , [0,v[1]]) plt.plot([0,v[0]] , [0,v[1]]) plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.show() fig = plt.figure() ax = Axes3D(fig) ax.plot([0,u[0]],[0,u[1]],[0,u[2]]) #plt.axis('equal') ax.plot([0, 0],[0, 0],[-5, 5],'k--') ax.plot([0, 0],[-5, 5],[0, 0],'k--') ax.plot([-5, 5],[0, 0],[0, 0],'k--') plt.show() v1 = np.array([2,-4]) v2 = np.array([4,6]) v3 = v1+v2 v3 = np.add(v1,v2) print('V3 =' ,v3) plt.plot([0,v1[0]] , [0,v1[1]] , 'r' , label = 'v1') plt.plot([0,v2[0]] , [0,v2[1]], 'b' , label = 'v2') plt.plot([0,v3[0]] , [0,v3[1]] , 'g' , label = 'v3') plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.legend() plt.show() plt.plot([0,v1[0]] , [0,v1[1]] , 'r' , label = 'vetor v1') plt.plot([0,v2[0]]+v1[0] , [0,v2[1]]+v1[1], 'b' , label = 'vetor v2') plt.plot([0,v3[0]] , [0,v3[1]] , 'g' , label = 'vetor v3') plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.legend() plt.show() u1 = np.array([4,4]) a = .5 u2 = u1*a plt.plot([0,u1[0]] , [0,u1[1]] , 'r' , label = 'v1') plt.plot([0,u2[0]] , [0,u2[1]], 'b--' , label = 'v2') plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.legend() plt.show() u1 = np.array([4,4]) a = -.3 u2 = u1*a plt.plot([0,u1[0]] , [0,u1[1]] , 'r' , label = 'v1') plt.plot([0,u2[0]] , [0,u2[1]], 'b' , label = 'v2') plt.plot([8,-8] , [0,0] , 'k--') plt.plot([0,0] , [8,-8] , 'k--') plt.grid() plt.axis((-8, 8, -8, 8)) plt.legend() plt.show() a1 = [2,4,6] a2 = [3,5,1] print(np.multiply(a1,a2)) a1 = np.array([2,4,6]) a2 = np.array([3,5,1]) dotp = a1@a2 print(" Dot product - ",dotp) dotp = np.dot(a1,a2) print(" Dot product usign np.dot",dotp) dotp = np.inner(a1,a2) print(" Dot product usign np.inner", dotp) dotp = sum(np.multiply(a1,a2)) print(" Dot product usign np.multiply & sum",dotp) dotp = np.matmul(a1,a2) print(" Dot product usign np.matmul",dotp) dotp = 0 for i in range(len(a1)): dotp = dotp + a1[i]*a2[i] print(" Dot product usign for loop" , dotp) v3 = np.array([1,2,3,4,5,6,7]) length = np.sqrt(np.dot(v3,v3)) length v3 = np.array([1,2,3,4,5,6,7]) length = np.sqrt(sum(np.multiply(v3,v3))) length v3 = np.array([1,2,3,4,5,6,7]) length = np.sqrt(np.matmul(v3,v3)) length v1 = [1,1] length_v1 = np.sqrt(np.dot(v1,v1)) norm_v1 = v1/length_v1 length_v1 , norm_v1 v1 = [1,1] norm_v1 = v1/np.linalg.norm(v1) norm_v1 #First Method v1 = np.array([5,5]) v2 = np.array([-5,5]) ang = np.rad2deg(np.arccos( np.dot(v1,v2) / (np.linalg.norm(v1)*np.linalg.norm(v2)))) plt.plot([0,v1[0]] , [0,v1[1]] , 'r' , label = 'v1') plt.plot([0,v2[0]]+v1[0] , [0,v2[1]]+v1[1], 'b' , label = 'v2') plt.plot([15,-15] , [0,0] , 'k--') plt.plot([0,0] , [15,-15] , 'k--') plt.grid() plt.axis((-15, 15, -15, 15)) plt.legend() plt.title('Angulo entre vetores - %s' %ang) plt.show() #Second Method v1 = np.array([5,5]) v2 = np.array([-5,5]) lengthV1 = np.sqrt(np.dot(v1,v1)) lengthV2 = np.sqrt(np.dot(v2,v2)) ang = np.rad2deg(np.arccos( np.dot(v1,v2) / (lengthV1 * lengthV2))) print('Angulo entre vetores - %s' %ang) v1 = np.array([5,2,-3]) v2 = np.array([7,-1,2]) fig = plt.figure() ax = Axes3D(fig) ax.plot([0, v1[0]],[0, v1[1]],[0, v1[2]],'b') ax.plot([0, v2[0]],[0, v2[1]],[0, v2[2]],'r') ang = np.rad2deg(np.arccos( np.dot(v1,v2) / (np.linalg.norm(v1)*np.linalg.norm(v2)) )) plt.title('Angulo entre vetores: %s degrees.' %ang) v1 = np.array([1,2,1]) v2 = np.array([2,1,2]) np.inner(v1,v2) print("\n Inner Product ==> \n", np.inner(v1,v2)) print("\n Outer Product ==> \n", np.outer(v1,v2)) v1 = np.array([7,0,0]) v2 = np.array([0,7,0]) print("\nVector Cross Product ==> \n", np.cross(v1,v2)) A = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]) A type(A) A.dtype B = np.array([[2.7,6.03,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]) B type(B) B.dtype A.shape A[0,] A[:,0] A[0,0] A[0][0] A[1:3 , 1:3] np.zeros(36).reshape(6,6) np.zeros((6,6)) np.ones(25).reshape(5,5) np.ones((5,5)) X = np.random.random((3,4)) X I = np.eye(7) I D = np.diag([1,2,3,4,5,6,7]) D M = np.random.randn(5,5) U = np.triu(M) L = np.tril(M) print("matriz aleatória \n" , M) print("\n") print("matriz triangular inferior \n" , L) print("\n") print("matriz triangular superior \n" , U) A = np.array([[1,2] , [3,4] ,[5,6]]) B = np.array([[9,2] , [3,-3]]) C = np.concatenate((A,B)) C , C.shape , type(C) , C.dtype np.full((7,7) , 4) M = np.array([[1,2,3],[4,-3,6],[8,7,0]]) M M.flatten() M = np.array([[1,2,3],[4,-3,6],[7,8,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) ==> \n", M) print("\n Segunda matriz (N) ==> \n", N) C = M+N print("\n soma (M+N) ==> \n", C) # ou C = np.add(M,N,dtype = np.float64) print("\n soma usando np.add ==> \n", C) M = np.array([[1,2,3],[4,-3,6],[7,8,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) ==> \n", M) print("\n Segunda matriz (N) ==> \n", N) C = M-N print("\n Subtração (M-N) ==> \n", C) # ou C = np.subtract(M,N,dtype = np.float64) print("\n Subtração usando np.subtract ==> \n", C) M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) C = 20 print("\n Matriz (M) ==> \n", M) print("\n Multiplicação por escalar ==> \n", C*M) # ou print("\n Multiplicação por escalar usando np.multiply ==> \n", np.multiply(C,M)) M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Transposta de M ==> \n", np.transpose(M)) # ou print("\nTransposta de M ==> \n", M.T) M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Determinante de M ==> ", np.linalg.det(M)) M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Posto de M ==> ", np.linalg.matrix_rank(M)) M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Traço de M ==> ", np.trace(M)) M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) print("\n Matriz (M) ==> \n", M) print("\n Inversa de M ==> \n", np.linalg.inv(M)) M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) ==> \n", M) print("\n Segunda matriz (N) ==> \n", N) print("\n Multiplication pontual de M e N ==> \n", M*N) # ou print("\n Multiplication pontual de M e N ==> \n", np.multiply(M,N)) M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) \n", M) print("\n Segunda matriz (N) \n", N) print("\n Produto escalar \n", M@N) # ou print("\n Produto escala usando np.matmul \n", np.matmul(M,N)) # ou print("\n Produto escala usando np.dot \n", np.dot(M,N)) M = np.array([[1,9,3],[2,-7,6],[8,7,0]]) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("\n Primeira matriz (M) \n", M) print("\n Segunda matriz (N) \n", N) print("\n Divisão (M/N) \n", M/N) # ou print("\n Divisão (M/N) \n", np.divide(M,N)) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("Matriz (N) \n", N) print ("Soma de todos elementos da matriz") print (np.sum(N)) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("Matriz (N) ==> \n", N) print ("Adição com base na coluna") print (np.sum(N,axis=0)) N = np.array([[5,5,5],[6,6,6],[7,7,7]]) print("Matriz (N) ==> \n", N) print ("Adição com base na linha") print (np.sum(N,axis=1)) M1 = np.array([[1,2,3] , [4,5,6]]) M1 M2 = np.array([[10,10,10],[10,10,10]]) M2 np.kron(M1,M2) A = np.array([[1,2,3] ,[4,5,6]]) v = np.array([10,20,30]) print ("Multiplicação matriz-vetor \n", A*v) A = np.array([[1,2,3] ,[4,5,6]]) v = np.array([10,20,30]) print ("Produto escalar matriz-vetor \n" , A@v) M1 = np.array([[1,2],[5,6]]) M1 # Matriz na potencia 3 M1@M1@M1 # Matriz na potencia 3 usando np.linalg.matrix_power np.linalg.matrix_power(M1,3) # Criando um Tensor T1 = np.array([ [[1,2,3], [4,5,6], [7,8,9]], [[10,11,12], [13,14,15], [16,17,18]], [[19,20,21], [22,23,24], [25,26,27]], ]) T1 T2 = np.array([ [[0,0,0] , [0,0,0] , [0,0,0]], [[2,2,2] , [2,2,2] , [2,2,2]], [[4,4,4] , [4,4,4] , [4,4,4]] ]) T2 A = T1+T2 A S = T1-T2 S np.subtract(T1,T2) P = T1*T2 P np.multiply(T1,T2) D = T1/T2 D np.divide(T1,T2) T1 T2 np.tensordot(T1,T2) A = np.array([[4,-2,3] , [1,-5,6] , [-7,8,9]]) A B = np.random.random((3,1)) B # Primeiro metodo X = np.dot(np.linalg.inv(A) , B) X # Segundo Metodo X = np.matmul(np.linalg.inv(A) , B) X # Terceiro metodo X = np.linalg.inv(A)@B X # Quarto metodo X = np.linalg.solve(A,B) X
0.255808
0.865793
``` # Hidden code cell for setup # Imports and setup import astropixie import astropixie_widgets import enum import ipywidgets import numpy astropixie_widgets.config.setup_notebook() from astropixie.data import pprint as show_data_in_table from numpy import intersect1d as stars_in_both class SortOrder(enum.Enum): BrightestToDimmest = enum.auto() DimmestToBrightest = enum.auto() HottestToCoolest = enum.auto() CoolestToHottest = enum.auto() def filter_star_data(data, sortOrder, percent=100): if sortOrder in [SortOrder.BrightestToDimmest, SortOrder.DimmestToBrightest]: order = 'luminosity' elif sortOrder in [SortOrder.HottestToCoolest, SortOrder.CoolestToHottest]: order = 'temperature' sortedData = numpy.sort(data, axis=None, order=order) if sortOrder in [SortOrder.HottestToCoolest, SortOrder.BrightestToDimmest]: sortedData = sortedData[::-1] filteredStarCount = int(len(sortedData) * percent / 100) return sortedData[0:filteredStarCount] ``` ## Introduction and Background Today you will be using a data visualization tool called the [H-R Diagram](https://en.wikipedia.org/wiki/Hertzsprung–Russell_diagram), first developed more than a century ago by [Ejnar Hertzsprung](https://en.wikipedia.org/wiki/Ejnar_Hertzsprung) from Denmark, and [Henry Norris Russell](https://en.wikipedia.org/wiki/Henry_Norris_Russell), an American. The H-R Diagram will enable you to create your own "window" to the stars and explore what it can reveal about star properties such as size, temperature, and energy output. In order to accurately compare stars to each other and measure properties such as their energy outputs, it is important to account for the fact that two stars of the same brightness will look very different if one is farther away from Earth than the other. One way to address this issue is to collect data from a group of stars in a [star cluster](https://en.wikipedia.org/wiki/Star_cluster), in which all the stars are the same distance away. Today you will collect and analyze data for the stars in one cluster, which will allow you to determine the variation that exists in stellar properties. In this investigation, the term [luminosity](https://en.wikipedia.org/wiki/Luminosity) refers to the total energy output from a star per unit of time. Luminosity is typically reported as a ratio of the star's energy output compared to the energy emitted by the Sun. For example, a star with a _solar luminosity_ of "10" emits ten times more energy than the Sun. # Procedure and Data First call up the information and data for your star cluster. *Type in the name of your cluster and press Enter:* ``` # Hidden code cell cluster = astropixie.data.Berkeley20SDSS() hr_diagram = astropixie_widgets.visual.SHRD(cluster) hr_diagram.show() def show_data_in_hr_diagram(data): # Note, pulling the ranges from the H-R diagram widget. astropixie_widgets.visual.hr_diagram_from_data(data, hr_diagram.x_range, hr_diagram.y_range) ``` #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) *Make your best estimate of which stars in the image belong to the cluster.* #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/click_16x.svg) *Use your mouse to outline the boundary of the cluster.* #### You will now see a plot of all the data for the stars you selected displayed on an H-R Diagram. #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) Notice that most stars occupy a region stretching from the upper left to the lower right of the diagram. This is known as the [main sequence](https://cnx.org/contents/LnN76Opl@13.153:EVgehrPG@9/The-HR-Diagram). #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/question_16x.svg) Answer the next question about the main sequence of your cluster. ``` # Hidden code cell for question astropixie_widgets.question.show_question("1. Where on the main sequence are stars the most numerous? What color are these stars?") ``` #### You will now begin to work with code to define the characteristics of the stars in the cluster. The code in the gray below calls up all data in the cluster and displays it as a data table. #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/click_16x.svg) Click on the gray code box below, and hold down SHIFT and press ENTER to run this code. ``` all_star_data = hr_diagram.filtered_data show_data_in_table(all_star_data) ``` #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/pencil_16x.svg) Run the code below to sort the data by temperature. Record the maximum and minimum temperatures: ``` # The code below sorts the star data from coolest stars to hottest stars, # and stores it in the new list named 'coolToHot' coolToHot = filter_star_data(all_star_data, SortOrder.CoolestToHottest) # Show the new list 'coolToHot' in a table. show_data_in_table(coolToHot) astropixie_widgets.question.show_question("2. Record the hottest and coolest temperatures for the stars in your cluster.") ``` #### Now you will use code to define a selected set of stars on the H-R Diagram. The next box contains code that displays only the hottest stars (hottest = top 20% of cluster data ordered by temperature). #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) Run the code and observe where the stars appear on the diagram: ``` hottestStars = filter_star_data(all_star_data, SortOrder.HottestToCoolest, percent=20) show_data_in_hr_diagram(hottestStars) ``` #### The next box contains code that displays only the coolest stars (coolest = bottom 20% of cluster data ordered by temperature). #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) Run the code and observe where the stars appear on the diagram: ``` coolestStars = filter_star_data(all_star_data, SortOrder.CoolestToHottest, percent=20) show_data_in_hr_diagram(coolestStars) ``` #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/pencil_16x.svg) The next code box sorts the data by luminosity (brightness). Run it and record the maximum and minimum luminosity values: ``` # The code below sorts the star data from dimmest stars to the brightest stars, # and stores it in the new list named 'dimToBright' dimToBright = filter_star_data(all_star_data, SortOrder.DimmestToBrightest) # Show the new list 'dimToBright' in a table. show_data_in_table(dimToBright) astropixie_widgets.question.show_question("3. Record the largest and smallest luminosities for the stars in your cluster.") ``` #### The next box contains code that displays only the most luminous (brightest) stars (brightest = top 20% of cluster data ordered by luminosity). #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) Run the code and observe where the stars appear on the diagram: ``` brightestStars = filter_star_data(all_star_data, SortOrder.BrightestToDimmest, percent=20) show_data_in_hr_diagram(brightestStars) ``` #### The next box contains code that displays only the least luminous (dimmest) stars (dimmest = bottom 20% of cluster data ordered by luminosity). #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) Run the code and observe where the stars appear on the diagram: ``` dimmestStars = filter_star_data(all_star_data, SortOrder.DimmestToBrightest, percent=20) show_data_in_hr_diagram(dimmestStars) ``` #### It's possible to define a set of stars that share two common characterisitics. #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/question_16x.svg) For the next questions (4a, b, c and d), answer each part by first running the code then describing the area of the H-R Diagram where the stars appear. Answer with a combination of two of these words: *left, right, top, bottom*, and a color or colors. #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) Run the code and observe where cool and dim stars appear on the diagram: ``` coolAndDimStars = stars_in_both(coolestStars, dimmestStars) show_data_in_hr_diagram(coolAndDimStars) astropixie_widgets.question.show_question("4a. Where on the H-R diagram are cool, dim stars located? What color are these stars?") ``` #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) Run the code and observe where cool and bright stars appear on the diagram: ``` coolAndBrightStars = stars_in_both(coolestStars, brightestStars) show_data_in_hr_diagram(coolAndBrightStars) astropixie_widgets.question.show_question("4b. Where on the H-R diagram are bright, cool stars located? What color are these stars?") ``` #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) Run the code and observe where hot and bright stars appear on the diagram: ``` hotAndBrightStars = stars_in_both(hottestStars, brightestStars) show_data_in_hr_diagram(hotAndBrightStars) astropixie_widgets.question.show_question("4c. Where on the H-R diagram are hot, bright stars located? What color are these stars?") ``` #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/eye_16x.svg) Run the code and observe where hot and dim stars appear on the diagram: ``` hotAndDimStars = stars_in_both(hottestStars, dimmestStars) show_data_in_hr_diagram(hotAndDimStars) astropixie_widgets.question.show_question("4d. Where on the H-R diagram are dim, hot stars located? What color are these stars?") ``` # ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/question_16x.svg) Discuss and report #### *Take a few minutes with your partner or small group to investigate and discuss the following:* ``` astropixie_widgets.question.show_question("5. The Sun’s surface temperature is about 6000K. Suppose a main sequence star has a temperature three times greater than the Sun’s. How much more luminous than the Sun is the hotter star? Use your diagram to estimate an answer.") astropixie_widgets.question.show_question("6. Two stars have the same luminosity but differ in color. What physical property of the stars could explain this?") astropixie_widgets.question.show_question("7. Two giant stars have the same luminosity. One is yellow and the other is orange. Which one is larger? Explain your reasoning.") astropixie_widgets.question.show_question("8. What physical property of stars could explain why stars in the lower left of the H-R Diagram are dimmer than the stars in the upper left, since they are both very hot?") ``` ### *Be prepared to report out and discuss your observations.* # ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/pencil_16x.svg) Summary ``` astropixie_widgets.question.show_question("9. Now that you have had a chance to discuss your observations, write a summary in the text box below that explains what you have learned about star temperatures, sizes and luminosities.", rows=6) ``` ## Challenge Problem #### Use what you have learned to write code that will display only the *blue stars* in your cluster. #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/pencil_16x.svg) Write your code in the box below, and hold down SHIFT and press ENTER to run your code. You can edit your code and run it as many times as you like. #### ![Click](//d38piav16dazdc.cloudfront.net/data/icons/sized/svg/question_16x.svg) Answer the next question based on the code you wrote. ``` astropixie_widgets.question.show_question("10. What percentage (number value) did you enter to display only the blue stars in your data set?") ```
github_jupyter
# Hidden code cell for setup # Imports and setup import astropixie import astropixie_widgets import enum import ipywidgets import numpy astropixie_widgets.config.setup_notebook() from astropixie.data import pprint as show_data_in_table from numpy import intersect1d as stars_in_both class SortOrder(enum.Enum): BrightestToDimmest = enum.auto() DimmestToBrightest = enum.auto() HottestToCoolest = enum.auto() CoolestToHottest = enum.auto() def filter_star_data(data, sortOrder, percent=100): if sortOrder in [SortOrder.BrightestToDimmest, SortOrder.DimmestToBrightest]: order = 'luminosity' elif sortOrder in [SortOrder.HottestToCoolest, SortOrder.CoolestToHottest]: order = 'temperature' sortedData = numpy.sort(data, axis=None, order=order) if sortOrder in [SortOrder.HottestToCoolest, SortOrder.BrightestToDimmest]: sortedData = sortedData[::-1] filteredStarCount = int(len(sortedData) * percent / 100) return sortedData[0:filteredStarCount] # Hidden code cell cluster = astropixie.data.Berkeley20SDSS() hr_diagram = astropixie_widgets.visual.SHRD(cluster) hr_diagram.show() def show_data_in_hr_diagram(data): # Note, pulling the ranges from the H-R diagram widget. astropixie_widgets.visual.hr_diagram_from_data(data, hr_diagram.x_range, hr_diagram.y_range) # Hidden code cell for question astropixie_widgets.question.show_question("1. Where on the main sequence are stars the most numerous? What color are these stars?") all_star_data = hr_diagram.filtered_data show_data_in_table(all_star_data) # The code below sorts the star data from coolest stars to hottest stars, # and stores it in the new list named 'coolToHot' coolToHot = filter_star_data(all_star_data, SortOrder.CoolestToHottest) # Show the new list 'coolToHot' in a table. show_data_in_table(coolToHot) astropixie_widgets.question.show_question("2. Record the hottest and coolest temperatures for the stars in your cluster.") hottestStars = filter_star_data(all_star_data, SortOrder.HottestToCoolest, percent=20) show_data_in_hr_diagram(hottestStars) coolestStars = filter_star_data(all_star_data, SortOrder.CoolestToHottest, percent=20) show_data_in_hr_diagram(coolestStars) # The code below sorts the star data from dimmest stars to the brightest stars, # and stores it in the new list named 'dimToBright' dimToBright = filter_star_data(all_star_data, SortOrder.DimmestToBrightest) # Show the new list 'dimToBright' in a table. show_data_in_table(dimToBright) astropixie_widgets.question.show_question("3. Record the largest and smallest luminosities for the stars in your cluster.") brightestStars = filter_star_data(all_star_data, SortOrder.BrightestToDimmest, percent=20) show_data_in_hr_diagram(brightestStars) dimmestStars = filter_star_data(all_star_data, SortOrder.DimmestToBrightest, percent=20) show_data_in_hr_diagram(dimmestStars) coolAndDimStars = stars_in_both(coolestStars, dimmestStars) show_data_in_hr_diagram(coolAndDimStars) astropixie_widgets.question.show_question("4a. Where on the H-R diagram are cool, dim stars located? What color are these stars?") coolAndBrightStars = stars_in_both(coolestStars, brightestStars) show_data_in_hr_diagram(coolAndBrightStars) astropixie_widgets.question.show_question("4b. Where on the H-R diagram are bright, cool stars located? What color are these stars?") hotAndBrightStars = stars_in_both(hottestStars, brightestStars) show_data_in_hr_diagram(hotAndBrightStars) astropixie_widgets.question.show_question("4c. Where on the H-R diagram are hot, bright stars located? What color are these stars?") hotAndDimStars = stars_in_both(hottestStars, dimmestStars) show_data_in_hr_diagram(hotAndDimStars) astropixie_widgets.question.show_question("4d. Where on the H-R diagram are dim, hot stars located? What color are these stars?") astropixie_widgets.question.show_question("5. The Sun’s surface temperature is about 6000K. Suppose a main sequence star has a temperature three times greater than the Sun’s. How much more luminous than the Sun is the hotter star? Use your diagram to estimate an answer.") astropixie_widgets.question.show_question("6. Two stars have the same luminosity but differ in color. What physical property of the stars could explain this?") astropixie_widgets.question.show_question("7. Two giant stars have the same luminosity. One is yellow and the other is orange. Which one is larger? Explain your reasoning.") astropixie_widgets.question.show_question("8. What physical property of stars could explain why stars in the lower left of the H-R Diagram are dimmer than the stars in the upper left, since they are both very hot?") astropixie_widgets.question.show_question("9. Now that you have had a chance to discuss your observations, write a summary in the text box below that explains what you have learned about star temperatures, sizes and luminosities.", rows=6) astropixie_widgets.question.show_question("10. What percentage (number value) did you enter to display only the blue stars in your data set?")
0.615781
0.910704
``` # run this cell to check your Python version is OK for this notebook! import sys def check_python_version_above_3_6(): major = sys.version_info.major minor = sys.version_info.minor if major < 3 or minor < 6: print('ERROR you need to run this notebook with Python 3.6 or above (as f-strings used)') print('ERROR current Python version is {}.{}'.format(major, minor)) print('ERROR Please see:\n', ' https://canvas.anglia.ac.uk/courses/15139/pages/azure-notebooks-switching-kernel\n' ' for information on switching kernel on Azure Notebooks') else: print('Python version {}.{} you are good to go'.format(major, minor)) check_python_version_above_3_6() ``` # BLAST method for database searching This practical uses the simple BLAST algorithm 'MyBlast' from Chapter 7 of Rocha and Ferreira (2018) Bioinformatics Algorithms. It is not supposed to be a fully-fledged program but aims only to show the initial steps in the database search method. The first step is to create a database. This will be a text file containing a set of strings representing the sequencees to be searched. Each sequence is short enough in the test data to be given as a single line in the file. The authors provide a test data file callled `seqBlast.txt` that is supplied with this notebook for available from https://github.com/ARU-Bioinf-CMA-2020/tw6 The function read_database opens that file and creates the database. ``` # run this cell to define function read_database def read_database (filename): """" reads the sequences to search from a text file and returns as a list of strings. """ file = open (filename) db = [] for line in file: db.append(line.rstrip()) file.close() return db # run this cell to use read_database to read in entries from seqBlast.txt # and check that 5 entries read. db = read_database("seqBlast.txt") assert len(db) == 5 ``` The next step is to pre-process the query sequence. The authors call this producing a 'map' of all word of a particular size (an adjustable parameter here - although remember that larger words are more demanding). A table of these can be a Python dictionary - as that naturally reproduces the hashing method described in the lecture. ``` # run this cell to define the build_map function def build_map(query, w): """ preprocesses the query to store the positions of words """ res = {} for i in range(len(query)-w+1): subseq = query[i:i+w] if subseq in res: res[subseq].append(i) else: res[subseq] = [i] return res ``` In the authors' simplified version of the algorithm they do not use a substitution table but instead a simple scoring function similar to the one used for the Smith-Waterman example. The function gives a positive score only to perfect hits. As explained in the lecture this has to be the default for many protein sequence words in BLOSUM62 as well. The next function get_hits will scan a sequence and find all matches of the words from the query map. These are considered 'hits'. ``` # run this cell to define get_hits def get_hits (seq, map_, w): """ scans the sequence for word hit in map returns tuples: index of match in query with the index of match in sequence """ res = [] # list of tuples for i in range(len(seq)-w+1): subseq = seq[i:i+w] if subseq in map_: l = map_[subseq] for ind in l: res.append( (ind,i) ) return res ``` The next step is to extend the hits found by the previous function. Here the the hit is extended while the new aligned positions score greated than or equal to half of the new positions. No gaps are used. But a check needs to be made that the extension has not reached the end of either the query or the sequence. ``` def extends_hit(seq, hit, query, w): """ the hit positions are extended based on the sequence and query matches on either side returns a tuple: (the starting index of the alignment on the query, the starting index of the alignment on the sequences, the size of the alignment, and the score - that is the number of matching characters). """ stq, sts = hit[0], hit[1] ## extend hit forward matfw = 0 k=0 bestk = 0 while 2*matfw >= k and stq+w+k < len(query) and sts+w+k < len(seq): if query[stq+w+k] == seq[sts+w+k]: matfw+=1 bestk = k+1 k += 1 size = w + bestk ## extend hit backwards k = 0 matbw = 0 bestk = 0 while 2*matbw >= k and stq > k and sts > k: if query[stq-k-1] == seq[sts-k-1]: matbw+=1 bestk = k+1 k+=1 size += bestk return (stq-bestk, sts-bestk, size, w+matfw+matbw) ``` The output for the extended hit is an alignment starting positions in the query and in the sequence, its size and finally a count of identity matches added to the word length. The words from hits are all fully identical by definition in this implementation. The identity count is a measure of the final score for the extended alignment. The extend_hit function is applied to all the hits in turn by the following function hit_best_score. It then returns the top scoring one. ``` def hit_best_score(seq, query, m, w): """ the hit positions are extended based on the sequence and query matches on either side returns the best alignment the query an a given sequence as a tuple: (the starting index of the alignment on the query, the starting index of the alignment on the sequence, the size of the alignment, and the score - that is the number of matching characters). """ hits = get_hits(seq, m, w) bestScore = -1.0 best = () for h in hits: ext = extends_hit(seq, h, query, w) score = ext[3] if score > bestScore or (score== bestScore and ext[2] < best[2]): bestScore = score best = ext return best ``` The final step is to apply the previous functions to compare the query with all the sequences in the database. The best overall alignment is found for sequences with hits. The result is a tuple similar to the ones above. ``` def best_alignment(db, query, w): """ compare the query with all the sequences in the database all significant scores >=0 are returned returns the best alignment the query an a given sequence as a tuple: (the starting index of the alignment on the query, the starting index of the alignment on the sequence, the size of the alignment, the score - that is the number of matching characters, The index of the sequence with the best alignment). """ m = build_map(query, w) bestScore = -1.0 res = (0,0,0,0,0) for k in range(0,len(db)): bestSeq = hit_best_score(db[k], query, m, w) if bestSeq != (): score = bestSeq[3] if score > bestScore or (score== bestScore and bestSeq[2] < res[2]): bestScore = score res = bestSeq[0], bestSeq[1], bestSeq[2], bestSeq[3], k if bestScore < 0: return () else: return res ``` Applying it to a database from the test data file. The standard DNA word length of 11 is used: ``` query = "ggggcgacgacggcgacgaatgatg" result = best_alignment(db, query, 11) query_start, best_sequence_start, size, score, best_sequence_index = result # unpack result print(f'alignment score {score} that is the number of matching characters') print(f'alignment size {size} bases') print(f'starting index of the alignment on the query {query_start}') print(f'the best sequence is {best_sequence_index}') print(f'starting index of the alignment on the best sequence {best_sequence_start}') # now your job is to print out the alignment between the query and the best sequence that has been identified # this should look like: # cgacgacggcgacgaatgatg # |||||||| |||||||||||| # cgacgacgacgacgaatgatg # the vertical lines can be produced using the highlight_line function: def highlight_line(first_seq, second_seq): """ for the two sequences returns a line where matching letters are highlighted with | except if the letter are a gap """ joins = ['|' if a == b and a != '-' else ' ' for a, b in zip(first_seq, second_seq)] return ''.join(joins) assert highlight_line('A', 'A') == '|' assert highlight_line('AAAA', 'AAAA') == '||||' assert highlight_line('AAAA', 'AGGA') == '| |' assert highlight_line('AA-AA', 'AA-AA') == '|| ||' ``` # optional advanced exercise Try out a longer query - does your code for printing the alignment work for this. ``` query = "gacgcctcgcgctcgcgcgctgaggcaaaaaaaaaaaaaaaaaaaatcggatagctagctgagcgctcgatagcgcgttcgctgcatcgcgtatagcgctgaagctcccggcgagctgtctgtaaatcggatctcatctcgctctatcct" r = best_alignment(db, query, 11) result = best_alignment(db, query, 11) query_start, best_sequence_start, size, score, best_sequence_index = result # unpack result print(f'alignment score {score} that is the number of matching characters') print(f'alignment size {size} bases') print(f'starting index of the alignment on the query {query_start}') print(f'the best sequence is {best_sequence_index}') print(f'starting index of the alignment on the best sequence {best_sequence_start}') # ```
github_jupyter
# run this cell to check your Python version is OK for this notebook! import sys def check_python_version_above_3_6(): major = sys.version_info.major minor = sys.version_info.minor if major < 3 or minor < 6: print('ERROR you need to run this notebook with Python 3.6 or above (as f-strings used)') print('ERROR current Python version is {}.{}'.format(major, minor)) print('ERROR Please see:\n', ' https://canvas.anglia.ac.uk/courses/15139/pages/azure-notebooks-switching-kernel\n' ' for information on switching kernel on Azure Notebooks') else: print('Python version {}.{} you are good to go'.format(major, minor)) check_python_version_above_3_6() # run this cell to define function read_database def read_database (filename): """" reads the sequences to search from a text file and returns as a list of strings. """ file = open (filename) db = [] for line in file: db.append(line.rstrip()) file.close() return db # run this cell to use read_database to read in entries from seqBlast.txt # and check that 5 entries read. db = read_database("seqBlast.txt") assert len(db) == 5 # run this cell to define the build_map function def build_map(query, w): """ preprocesses the query to store the positions of words """ res = {} for i in range(len(query)-w+1): subseq = query[i:i+w] if subseq in res: res[subseq].append(i) else: res[subseq] = [i] return res # run this cell to define get_hits def get_hits (seq, map_, w): """ scans the sequence for word hit in map returns tuples: index of match in query with the index of match in sequence """ res = [] # list of tuples for i in range(len(seq)-w+1): subseq = seq[i:i+w] if subseq in map_: l = map_[subseq] for ind in l: res.append( (ind,i) ) return res def extends_hit(seq, hit, query, w): """ the hit positions are extended based on the sequence and query matches on either side returns a tuple: (the starting index of the alignment on the query, the starting index of the alignment on the sequences, the size of the alignment, and the score - that is the number of matching characters). """ stq, sts = hit[0], hit[1] ## extend hit forward matfw = 0 k=0 bestk = 0 while 2*matfw >= k and stq+w+k < len(query) and sts+w+k < len(seq): if query[stq+w+k] == seq[sts+w+k]: matfw+=1 bestk = k+1 k += 1 size = w + bestk ## extend hit backwards k = 0 matbw = 0 bestk = 0 while 2*matbw >= k and stq > k and sts > k: if query[stq-k-1] == seq[sts-k-1]: matbw+=1 bestk = k+1 k+=1 size += bestk return (stq-bestk, sts-bestk, size, w+matfw+matbw) def hit_best_score(seq, query, m, w): """ the hit positions are extended based on the sequence and query matches on either side returns the best alignment the query an a given sequence as a tuple: (the starting index of the alignment on the query, the starting index of the alignment on the sequence, the size of the alignment, and the score - that is the number of matching characters). """ hits = get_hits(seq, m, w) bestScore = -1.0 best = () for h in hits: ext = extends_hit(seq, h, query, w) score = ext[3] if score > bestScore or (score== bestScore and ext[2] < best[2]): bestScore = score best = ext return best def best_alignment(db, query, w): """ compare the query with all the sequences in the database all significant scores >=0 are returned returns the best alignment the query an a given sequence as a tuple: (the starting index of the alignment on the query, the starting index of the alignment on the sequence, the size of the alignment, the score - that is the number of matching characters, The index of the sequence with the best alignment). """ m = build_map(query, w) bestScore = -1.0 res = (0,0,0,0,0) for k in range(0,len(db)): bestSeq = hit_best_score(db[k], query, m, w) if bestSeq != (): score = bestSeq[3] if score > bestScore or (score== bestScore and bestSeq[2] < res[2]): bestScore = score res = bestSeq[0], bestSeq[1], bestSeq[2], bestSeq[3], k if bestScore < 0: return () else: return res query = "ggggcgacgacggcgacgaatgatg" result = best_alignment(db, query, 11) query_start, best_sequence_start, size, score, best_sequence_index = result # unpack result print(f'alignment score {score} that is the number of matching characters') print(f'alignment size {size} bases') print(f'starting index of the alignment on the query {query_start}') print(f'the best sequence is {best_sequence_index}') print(f'starting index of the alignment on the best sequence {best_sequence_start}') # now your job is to print out the alignment between the query and the best sequence that has been identified # this should look like: # cgacgacggcgacgaatgatg # |||||||| |||||||||||| # cgacgacgacgacgaatgatg # the vertical lines can be produced using the highlight_line function: def highlight_line(first_seq, second_seq): """ for the two sequences returns a line where matching letters are highlighted with | except if the letter are a gap """ joins = ['|' if a == b and a != '-' else ' ' for a, b in zip(first_seq, second_seq)] return ''.join(joins) assert highlight_line('A', 'A') == '|' assert highlight_line('AAAA', 'AAAA') == '||||' assert highlight_line('AAAA', 'AGGA') == '| |' assert highlight_line('AA-AA', 'AA-AA') == '|| ||' query = "gacgcctcgcgctcgcgcgctgaggcaaaaaaaaaaaaaaaaaaaatcggatagctagctgagcgctcgatagcgcgttcgctgcatcgcgtatagcgctgaagctcccggcgagctgtctgtaaatcggatctcatctcgctctatcct" r = best_alignment(db, query, 11) result = best_alignment(db, query, 11) query_start, best_sequence_start, size, score, best_sequence_index = result # unpack result print(f'alignment score {score} that is the number of matching characters') print(f'alignment size {size} bases') print(f'starting index of the alignment on the query {query_start}') print(f'the best sequence is {best_sequence_index}') print(f'starting index of the alignment on the best sequence {best_sequence_start}') #
0.427516
0.893588
# Compare Robustness ## Set up the Environment ``` # Import everything that's needed to run the notebook import os import pickle import dill import pathlib import datetime import random import time from IPython.display import display, Markdown, Latex import pandas as pd import numpy as np from sklearn.pipeline import Pipeline from sklearn.base import BaseEstimator, TransformerMixin from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.neural_network import MLPClassifier import scipy.stats import matplotlib.pyplot as plt from matplotlib.lines import Line2D import util import robust from ipynb.fs.defs.descriptor_based_neural_networks import DescriptorBuilder from ipynb.fs.defs.construct_sbnn import SBNNPreprocessor from sklearn.model_selection import learning_curve plt.rc('xtick', labelsize=15) plt.rc('ytick', labelsize=15) plt.rc('axes', labelsize=15) # Define the path to the configuration dictionary config_path = 'configuration.p' # Load the configuration dictionary with open(config_path, 'rb') as f: configuration = pickle.load(f) # Get the paths to the relevant directories data_directory_path = configuration['data']['directory_path'] classifiers_directory_path = configuration['classifiers']['directory_path'] ``` ## Load the Storages of Results and Reports ``` dbnn_storage1 = dbnn_storage dbnn_storage = {} results_directory_path = configuration['results']['directory_path'] path = os.path.join(results_directory_path, 'dbnn_results.p') with open(path, 'rb') as f: dbnn_storage['results'] = pickle.load(f) reports_directory_path = configuration['reports']['directory_path'] path = os.path.join(reports_directory_path, 'dbnn') path = os.path.join(path, 'dbnn_reports.p') with open(path, 'rb') as f: dbnn_storage['reports'] = pickle.load(f) ``` ## Load the DBNNs ``` with open('dbnns1.p', 'rb') as f: dbnns = dill.load(f) ``` ## Load and Prepare Set $\mathcal{F}$ ``` # Define the dictionary to store the actual datasets, indexed by their names datasets = {} # Load the datasets for set_name in ['F-left', 'F-right', 'F-central', 'F-symmetric']: set_path = configuration['data']['datasets'][set_name]['path'] print('Loading {} from {}'.format(set_name, set_path)) datasets[set_name] = util.load_from_file(set_path) print('Done.') for set_name in datasets: labels = [sample.pop() for sample in datasets[set_name]] samples = datasets[set_name] datasets[set_name] = {'samples' : samples, 'labels' : labels} ``` ## Load the Tests ``` # Make a dictionary to hold the tests test_classifiers = {} # Specify the classical tests codes = ['SW', 'SF', 'LF', 'JB', 'DP', 'AD', 'CVM', 'FSSD'] # Load the classical tests for test_code in codes: test, statistic = util.get_test(test_code) for alpha in [0.01, 0.05]: test_classifiers[(test_code, alpha)] = util.TestClassifier(test, statistic, alpha) # Although SBNN is not technically a test, consider it too. with open(os.path.join('classifiers', 'sbnn.p'), 'rb') as f: sbnn = pickle.load(f) test_classifiers[('SBNN', '/')] = sbnn codes += ['SBNN'] # Specify the robustified tests robust_codes = ['MMRT1', 'MMRT2', 'TTRT1', 'TTRT2', 'RSW', 'RLM'] # Load the robustified tests for test_code in robust_codes: test, statistic = robust.get_robust_test(test_code) for alpha in [0.01, 0.05]: test_classifiers[(test_code, alpha)] = util.TestClassifier(test, statistic, alpha) ``` ## Evaluate the Tests ``` # Specify the sample sizes n_range = range(10, 101, 10) # Specify the metrics to calculate metrics = ['TNR'] # Evaluate the tests on each group of samples in set F for group in ['F-left', 'F-right', 'F-central', 'F-symmetric']: print(group) samples = datasets[group]['samples'] labels = datasets[group]['labels'] # Create a dictionary to store the results all_test_results = {} for (test_code, alpha) in test_classifiers: # Evaluate the tests (and SBNN) print('\t', test_code, alpha, end='') # Get the test test_clf = test_classifiers[(test_code, alpha)] # Evaluate it start = time.time() test_results_df = util.evaluate_pretty(samples, labels, test_clf, metrics=metrics, n_range=n_range, index='n') end = time.time() # Show how long its evaluation took and display the results print('\t', end - start) display(test_results_df.T) # Memorize the results all_test_results[(test_code, alpha)] = test_results_df # Put the results into the storage for persistence for key in all_test_results: test_results = all_test_results[key] memory = dbnn_storage['results']['comparison'].get(group, {}) memory[key] = test_results dbnn_storage['results']['comparison'][group] = memory ``` ## Create the Dataframes of Results ``` F_results = {} for group in dbnn_storage['results']['comparison']: if group[0] != 'F': continue print(group) results = dbnn_storage['results']['comparison'][group] results_dict = {test_key: results[test_key]['TNR'] for test_key in results} results_df = pd.concat(results_dict, axis=1) results_df = results_df[sorted(results_df.columns)] for name in sorted(dbnns.keys()): if '0.01' in name: new_name = 'DBNN$_{0.01}$' alpha = 0.01 elif '0.05' in name: new_name = 'DBNN$_{0.05}$' alpha = 0.05 elif 'opt' in name: new_name = 'DBNN$_{opt}$' alpha = '/' elif '0.1' in name: continue else: new_name = 'DBNN' alpha = '/' results_df[(new_name, alpha)] = dbnn_storage['results']['evaluation'][name][group]['TNR'] # list(sorted(dbnns.keys())) results_df = results_df[[col for col in results_df.columns]] F_results[group] = results_df display(results_df.T) ``` ## Make $\LaTeX$ Tables and Plot the Figures ``` #(F_results['F-left'].xs('/', level=1, axis=1) <= 0.05*2).T#.sum(axis=0) #F_results['F-symmetric'].xs('/', level=1, axis=1) competitors = list(test_classifiers.keys()) dbnn_cols = [('DBNN', '/'), ('DBNN$_{opt}$', '/'), ('DBNN$_{0.01}$', 0.01), ('DBNN$_{0.05}$', 0.05)] selected_results = {} for group in F_results: print(group) df_competition = F_results[group][competitors].T.sort_values(by='overall', ascending=True).head(5) df_dbnn = F_results[group][dbnn_cols].T selected_results[group] = df_dbnn.append(df_competition) display(selected_results[group]) figures = {'reports' : {'comparison' : {}}} for group in selected_results: df = selected_results[group].T fig = df[df.index != 'overall'].plot(kind='line', style=['o-', 'v-', '^-', 's-', 'D--', 'p--', 'x--', 'X-.', 'd--'], #color=['navy', 'darkred', 'red', 'orangered', 'orange'], linewidth=3, markersize=13, figsize=(10,7), use_index=True) plt.legend(fontsize=11) plt.ylabel('$TNR$') plt.legend(bbox_to_anchor=(0, 1.01), loc='lower left', ncol=5) plt.tight_layout() #plt.plot(range(0, 101, 100), [0.05, 0.05]) latex = util.get_latex_table(F_results[group].T, float_format='$%.2f$', index=True, caption=group, label=group) dbnn_storage['reports']['comparison'][group] = {'fig' : fig, 'latex': latex} figures['reports']['comparison'][group] = {'fig' : fig} print(latex) ``` ## Save ``` results_directory_path = configuration['results']['directory_path'] path = os.path.join(results_directory_path, 'dbnn_results.p') with open(path, 'wb') as f: pickle.dump(dbnn_storage['results'], f) reports_directory_path = configuration['reports']['directory_path'] path = os.path.join(reports_directory_path, 'dbnn') pathlib.Path(*path.split(os.sep)).mkdir(parents=True, exist_ok=True) reports_directory_path = path path = os.path.join(reports_directory_path, 'dbnn_reports.p') with open(path, 'wb') as f: pickle.dump(dbnn_storage['reports'], f) util.traverse_and_save(figures, reports_directory_path) ```
github_jupyter
# Import everything that's needed to run the notebook import os import pickle import dill import pathlib import datetime import random import time from IPython.display import display, Markdown, Latex import pandas as pd import numpy as np from sklearn.pipeline import Pipeline from sklearn.base import BaseEstimator, TransformerMixin from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer from sklearn.neural_network import MLPClassifier import scipy.stats import matplotlib.pyplot as plt from matplotlib.lines import Line2D import util import robust from ipynb.fs.defs.descriptor_based_neural_networks import DescriptorBuilder from ipynb.fs.defs.construct_sbnn import SBNNPreprocessor from sklearn.model_selection import learning_curve plt.rc('xtick', labelsize=15) plt.rc('ytick', labelsize=15) plt.rc('axes', labelsize=15) # Define the path to the configuration dictionary config_path = 'configuration.p' # Load the configuration dictionary with open(config_path, 'rb') as f: configuration = pickle.load(f) # Get the paths to the relevant directories data_directory_path = configuration['data']['directory_path'] classifiers_directory_path = configuration['classifiers']['directory_path'] dbnn_storage1 = dbnn_storage dbnn_storage = {} results_directory_path = configuration['results']['directory_path'] path = os.path.join(results_directory_path, 'dbnn_results.p') with open(path, 'rb') as f: dbnn_storage['results'] = pickle.load(f) reports_directory_path = configuration['reports']['directory_path'] path = os.path.join(reports_directory_path, 'dbnn') path = os.path.join(path, 'dbnn_reports.p') with open(path, 'rb') as f: dbnn_storage['reports'] = pickle.load(f) with open('dbnns1.p', 'rb') as f: dbnns = dill.load(f) # Define the dictionary to store the actual datasets, indexed by their names datasets = {} # Load the datasets for set_name in ['F-left', 'F-right', 'F-central', 'F-symmetric']: set_path = configuration['data']['datasets'][set_name]['path'] print('Loading {} from {}'.format(set_name, set_path)) datasets[set_name] = util.load_from_file(set_path) print('Done.') for set_name in datasets: labels = [sample.pop() for sample in datasets[set_name]] samples = datasets[set_name] datasets[set_name] = {'samples' : samples, 'labels' : labels} # Make a dictionary to hold the tests test_classifiers = {} # Specify the classical tests codes = ['SW', 'SF', 'LF', 'JB', 'DP', 'AD', 'CVM', 'FSSD'] # Load the classical tests for test_code in codes: test, statistic = util.get_test(test_code) for alpha in [0.01, 0.05]: test_classifiers[(test_code, alpha)] = util.TestClassifier(test, statistic, alpha) # Although SBNN is not technically a test, consider it too. with open(os.path.join('classifiers', 'sbnn.p'), 'rb') as f: sbnn = pickle.load(f) test_classifiers[('SBNN', '/')] = sbnn codes += ['SBNN'] # Specify the robustified tests robust_codes = ['MMRT1', 'MMRT2', 'TTRT1', 'TTRT2', 'RSW', 'RLM'] # Load the robustified tests for test_code in robust_codes: test, statistic = robust.get_robust_test(test_code) for alpha in [0.01, 0.05]: test_classifiers[(test_code, alpha)] = util.TestClassifier(test, statistic, alpha) # Specify the sample sizes n_range = range(10, 101, 10) # Specify the metrics to calculate metrics = ['TNR'] # Evaluate the tests on each group of samples in set F for group in ['F-left', 'F-right', 'F-central', 'F-symmetric']: print(group) samples = datasets[group]['samples'] labels = datasets[group]['labels'] # Create a dictionary to store the results all_test_results = {} for (test_code, alpha) in test_classifiers: # Evaluate the tests (and SBNN) print('\t', test_code, alpha, end='') # Get the test test_clf = test_classifiers[(test_code, alpha)] # Evaluate it start = time.time() test_results_df = util.evaluate_pretty(samples, labels, test_clf, metrics=metrics, n_range=n_range, index='n') end = time.time() # Show how long its evaluation took and display the results print('\t', end - start) display(test_results_df.T) # Memorize the results all_test_results[(test_code, alpha)] = test_results_df # Put the results into the storage for persistence for key in all_test_results: test_results = all_test_results[key] memory = dbnn_storage['results']['comparison'].get(group, {}) memory[key] = test_results dbnn_storage['results']['comparison'][group] = memory F_results = {} for group in dbnn_storage['results']['comparison']: if group[0] != 'F': continue print(group) results = dbnn_storage['results']['comparison'][group] results_dict = {test_key: results[test_key]['TNR'] for test_key in results} results_df = pd.concat(results_dict, axis=1) results_df = results_df[sorted(results_df.columns)] for name in sorted(dbnns.keys()): if '0.01' in name: new_name = 'DBNN$_{0.01}$' alpha = 0.01 elif '0.05' in name: new_name = 'DBNN$_{0.05}$' alpha = 0.05 elif 'opt' in name: new_name = 'DBNN$_{opt}$' alpha = '/' elif '0.1' in name: continue else: new_name = 'DBNN' alpha = '/' results_df[(new_name, alpha)] = dbnn_storage['results']['evaluation'][name][group]['TNR'] # list(sorted(dbnns.keys())) results_df = results_df[[col for col in results_df.columns]] F_results[group] = results_df display(results_df.T) #(F_results['F-left'].xs('/', level=1, axis=1) <= 0.05*2).T#.sum(axis=0) #F_results['F-symmetric'].xs('/', level=1, axis=1) competitors = list(test_classifiers.keys()) dbnn_cols = [('DBNN', '/'), ('DBNN$_{opt}$', '/'), ('DBNN$_{0.01}$', 0.01), ('DBNN$_{0.05}$', 0.05)] selected_results = {} for group in F_results: print(group) df_competition = F_results[group][competitors].T.sort_values(by='overall', ascending=True).head(5) df_dbnn = F_results[group][dbnn_cols].T selected_results[group] = df_dbnn.append(df_competition) display(selected_results[group]) figures = {'reports' : {'comparison' : {}}} for group in selected_results: df = selected_results[group].T fig = df[df.index != 'overall'].plot(kind='line', style=['o-', 'v-', '^-', 's-', 'D--', 'p--', 'x--', 'X-.', 'd--'], #color=['navy', 'darkred', 'red', 'orangered', 'orange'], linewidth=3, markersize=13, figsize=(10,7), use_index=True) plt.legend(fontsize=11) plt.ylabel('$TNR$') plt.legend(bbox_to_anchor=(0, 1.01), loc='lower left', ncol=5) plt.tight_layout() #plt.plot(range(0, 101, 100), [0.05, 0.05]) latex = util.get_latex_table(F_results[group].T, float_format='$%.2f$', index=True, caption=group, label=group) dbnn_storage['reports']['comparison'][group] = {'fig' : fig, 'latex': latex} figures['reports']['comparison'][group] = {'fig' : fig} print(latex) results_directory_path = configuration['results']['directory_path'] path = os.path.join(results_directory_path, 'dbnn_results.p') with open(path, 'wb') as f: pickle.dump(dbnn_storage['results'], f) reports_directory_path = configuration['reports']['directory_path'] path = os.path.join(reports_directory_path, 'dbnn') pathlib.Path(*path.split(os.sep)).mkdir(parents=True, exist_ok=True) reports_directory_path = path path = os.path.join(reports_directory_path, 'dbnn_reports.p') with open(path, 'wb') as f: pickle.dump(dbnn_storage['reports'], f) util.traverse_and_save(figures, reports_directory_path)
0.652906
0.773216
# Gaussian Density Filtering for predicting mid-price raise in LOBs In this notebook we present results of prediction if the mid price will raise in next tick for LOBs. For data preparation we use GDF filters with various parameters $r$ and $\sigma$. For prediction we use SVM - we will use 4 features for predictions, which will be described in later parts. We will refer to this approach as `GDF+SVM` approach. We compare our results to approach which used logistic regression and queue imbalance as features. We will refer to this approach as `logit+queue-imbalance`. The order is as follows: 1. Data description 2. Methodology * GDF parameteres choice 3. Results * `GDF+SVM` validation vs training set results * `logit+queue-imbalance` vs `GDF+SVM` on training set * `logit+queue-imbalance` vs `GDF+SVM` on validation set * test set results 4. Conclusions ``` %matplotlib inline from ast import literal_eval import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.svm import SVC from typing import Tuple import numpy as np import itertools import os from collections import OrderedDict from sklearn import preprocessing from sklearn.metrics import roc_auc_score from scipy.stats import norm import plotly.offline as py import plotly.figure_factory as ff import plotly.graph_objs as go import warnings from lob_data_utils import lob, db_result, roc_results sns.set_style('whitegrid') warnings.filterwarnings('ignore') py.init_notebook_mode(connected=True) ``` ## Data We use data from 2013-09 till 2013-11 from 53 stocks which have at least 10000 datapoints - we use exactly 10000 datapoints. First we normalize the data, then we apply GDF filters (described in `gdf_approach_description.ipynb` notebook). For GDF filters we need to choose parameters: * $\sigma$ - how wide is the Gaussian curve * $r$ - how tall is the Gaussian curve * $K$ - on how many segments we will divide the data We have choosen: * $\sigma \in \{0.1, 0.2, 0.3, 0.4, 0.5\} $ * $r \in \{ 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1 \}$ * K = 50 We used grid approach, so we generated the data for every pair of the parameters $r$ and $\sigma$. We split data into three continous datasets: * training data - 60% of datapoints * validation data - 20% of datapoints * testing data - 20% of datapoints ``` r = 0.05 s = 0.2 stock = '11234' K = 50 length = 10000 filename = 'data_gdf/gdf_{}_len{}_r{}_s{}_K{}.csv'.format(stock, length, r, s, K) df_data_example = pd.read_csv(filename) df_data_example.head() gdf_columns = ['gdf_' + str(i) for i in range(0, 50)] plt.figure(figsize=(16, 8)) ax = sns.boxplot(df_data_example[gdf_columns]) _ = ax.set_xticklabels(gdf_columns, rotation=45) plt.title('Distribution of gdf features for stock ' + str(stock)) plt.xlabel('GDF feature') plt.ylabel('') gdf_features_columns = ['gdf_' + str(i) for i in range(23, 27)] df_data_example[gdf_features_columns].iloc[0:50].plot(figsize=(16,8), marker='.', alpha=0.5) plt.title('Gdf features') plt.xlabel('GDF feature') plt.ylabel('value') ``` ## Methodology ### Methodology for `GDF+SVM` approach: First we need to find a way to prepare data to perform classsification, right choice of parameters: * $\sigma \in \{0.1, 0.2, 0.3, 0.4, 0.5\} $ * $r \in \{ 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1 \}$ is crucial. After that we train SVM with `rbf` kernel on the *training* set applied GDF filters with choosen $\sigma$ and $r$ parameters. Next we hypertune `gamma` and `C` parameters on *validation* dataset. After that we pick the best parameters for training algorithm and test how it performs on *testing* dataset. In this notebook we pick 50 `gdf` features (all of them) for prediction obtained by applying **GDF** to the datasets. They are choosen, because they don't have so wide spread of values as the rest and they are in the "middle". For choosing the SVM parameters we will use grid-search approach with parameters: * $\gamma \in (1, 10, 100, 1000, 10000)$ * $ C \in (1, 10, 100, 1000, 10000)$ We will use **ROC area score** for evaluating the performance of the classifiers. We will compare our results to results obtained by using `logit+queue-imbalance` *logistic regression* with **queue imbalance** feature (which was described in ealier work). ``` s_parameters = [0.1, 0.2, 0.3, 0.4, 0.5] r_parameters = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1] length = 10000 stocks = list(roc_results.results_10000.keys()) K1 = 0 K2 = 50 K = 50 df_res = pd.DataFrame() for r in r_parameters: for s in s_parameters: for stock in stocks: try: filename = 'data_res/res_{}_len{}_r{}_s{}_K{}.csv'.format(stock, length, r, s, K) df = pd.read_csv(filename) df_res = df_res.append(df, ignore_index=True) except Exception as e: pass df_res.drop('Unnamed: 0', axis=1, inplace=True) df_res = df_res.apply(pd.to_numeric) df_res.dropna(axis=0, inplace=True) df_res.head() stocks = list(df_res['stock'].unique()) print('Number of stocks in datasets:', len(stocks)) ``` #### Results dataset We introduce dataframe with results which will have columns: * `stock` - stock id * `K` - number of segments for GDF filters * `s` - $\sigma$ parameter for GDF filters * `r` - scale parameter for GDF filters * `C` - C parameter for `GDF_SVM` * `gamma` - $\gamma$ parameter for `GDF_SVM` * `roc_cv` - ROC area score for `GDF_SVM`(r, s, K, C, gamma) on validation set * `roc_train` - ROC area score for `GDF_SVM`(r, s, K, C, gamma) on training set * `roc_log_cv` - ROC area score for `logit+queue-imbalance` on validation set * `roc_log_train` - ROC area score for `logit+queue-imbalance` on training set * `roc_cv_diff` - ROC area score difference between `GDF_SVM` and `logit+queue-imbalance` on validation set * `roc_train_diff` - ROC area score difference between `GDF_SVM` and `logit+queue-imbalance` on training set * `cv_train_diff` - ROC area score difference for `GDF_SVM` between result on validation and training set ``` df_res['roc_log_cv'] = np.zeros(len(df_res)) - 1 df_res['roc_log_train'] = np.zeros(len(df_res)) - 1 roc_log_cv = [] res_log = roc_results.result_cv_10000 for idx, row in df_res.iterrows(): roc_log_cv.append(res_log[str(int(row['stock']))]) df_res['roc_log_cv'] = roc_log_cv roc_log_train = [] res_log = roc_results.results_10000 for idx, row in df_res.iterrows(): roc_log_train.append(res_log[str(int(row['stock']))]) df_res['roc_log_train'] = roc_log_train df_res['roc_cv_diff'] = df_res['roc_cv'] - df_res['roc_log_cv'] df_res['roc_train_diff'] = df_res['roc_train'] - df_res['roc_log_train'] df_res['cv_train_diff'] = df_res['roc_cv'] - df_res['roc_train'] df_res.head() ``` ### Paramters for GDF filters In this section we want to analyze the influence of the choice of the $r$ and $\sigma$ parameters on the quality of predictions. The $r$ and $\sigma$ parameters are used in the data preparation stage to calculate gdf features ($r$ is how tall is gaussian curve, $\sigma$ is how wide it is). We prepared the datasets with all pairs of these parameters: * $\sigma \in \{0.1, 0.2, 0.3, 0.4, 0.5\} $ * $r \in \{ 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1 \}$ Noisy plot below presents the distributions of the **ROC area scores** for different pairs of $r$ and $\sigma$ parameters. We can observe that the first quartle (Q1) has bigger values when $\sigma$ (`s` on the picture) is smaller. ``` r_s_dict = OrderedDict() for r in r_parameters: for s in s_parameters: r_s_dict['r={}, s={}'.format(r, s)] = df_res[df_res['r'] == r][df_res['s'] == s]['roc_cv'].values plt.figure(figsize=(16, 8)) ax = sns.boxplot(data=list(r_s_dict.values())) plt.ylabel('ROC area score') plt.xlabel('Parameters r and s') _ = ax.set_xticklabels(list(r_s_dict.keys()), rotation=45) plt.title('ROC area score distribution for different r and s parameters') r_s_dict = OrderedDict() for r in r_parameters: for s in [0.1]: r_s_dict['r={}'.format(r, s)] = df_res[df_res['r'] == r][df_res['s'] == s]['roc_cv'].values plt.figure(figsize=(16, 8)) ax = sns.swarmplot(data=list(r_s_dict.values())) plt.ylabel('ROC area score') plt.xlabel('Parameters r') _ = ax.set_xticklabels(list(r_s_dict.keys()), rotation=45) plt.title('ROC area score distribution for different r and fixed s=0.1 (small)') ``` There doesn't seem to be a big difference between different $r$ parameters when the $\sigma$ parameter is small. Anyway we can check also programmatically what is the best using two metrics: * number of stocks which have worse results than `logit+queue-imbalance` on validation set * mean of ROC area scores on validation set (the highest will be considered the best) We could also calculate sums of mean errors on validation/training scores, but we prefer to have more "generalized" approach, which will bring improvement for all stocks. #### Number of stocks which have worse results than `logit+queue-imbalance` on validation set We will check for how many stocks we underperformed and we will pick $r$ and $\sigma$ for which this number is the smallest one. We will compare our `GDF+SVM` against **logit queue-imbalance** approach. ``` number_of_worse = [] for s in s_parameters: for r in r_parameters: df_r_s = df_res[df_res['s'] == s][df_res['r'] == r] df_r_s = df_r_s.sort_values(by='roc_cv_diff', ascending=False) df_r_s = df_r_s.groupby(by='stock').apply(lambda df: df.nlargest(1, columns=['roc_cv_diff'])) worse_num = len(df_r_s[df_r_s['roc_cv_diff'] < 0]) number_of_worse.append({'s': s, 'r': r, 'worse_num': worse_num}) df_worse_num = pd.DataFrame(number_of_worse) ax = df_worse_num[['worse_num']].plot(kind='bar', figsize=(16,8)) ax_cols = ['s={}, r={}'.format(v[0], v[1]) for v in df_worse_num[['s', 'r']].values] _ = ax.set_xticklabels(ax_cols, rotation=45) plt.title('Number of stocks with worse result than logit on validation set') plt.ylabel('Number') plt.xlabel('GDF parameters') print('The best parameters are:') best_gdf_parameters = df_worse_num.nsmallest(1, columns=['worse_num']) best_gdf_parameters ``` Looking at the whole results data set with the best parameters above we underperform by 0.4% on average, with std deviation 3% (in `roc_cv_diff` column we have *difference* between `GDF+SVM` approach scores versus `logit+queue-imbalance`). This result is not bad since we look at all trained classifiers scores instead. It means that hypertunning SVM parameters is a good next step to have an improvement. ``` df_res_fixed = df_res[df_res['s'] == best_gdf_parameters.iloc[0]['s']][df_res['r'] == best_gdf_parameters.iloc[0]['r']] df_res_fixed = df_res_fixed.sort_values(by='roc_cv_diff', ascending=False) df_res_fixed[['roc_cv', 'roc_train', 'roc_log_train', 'roc_log_cv', 'roc_cv_diff']].describe() ``` ##### Stocks for which we underperform: ``` df_bests_fixed = df_res_fixed.sort_values(by='roc_cv', ascending=False) df_bests_fixed = df_bests_fixed.groupby(by='stock').apply(lambda df: df.nlargest(1, columns=['roc_cv_diff'])) df_bests_fixed = df_bests_fixed.sort_values(by='roc_cv', ascending=False) print('Number of worse stocks', len(df_bests_fixed[df_bests_fixed['roc_cv_diff'] < 0]), 'per', len(df_bests_fixed['stock'].unique())) df_worst_fixed = df_bests_fixed[df_bests_fixed['roc_cv_diff'] < 0] df_worst_fixed ``` #### Mean of `roc_cv` scores We will check for which parameters $r$ and $\sigma$ we have the highest mean of ROC area scores on validation set. ``` roc_cv_means = [] for s in s_parameters: for r in r_parameters: df_r_s = df_res[df_res['s'] == s][df_res['r'] == r] df_r_s = df_r_s.sort_values(by='roc_cv_diff', ascending=False) roc_cv_mean = df_r_s['roc_cv'].mean() roc_cv_means.append({'s': s, 'r': r, 'roc_cv_mean': roc_cv_mean}) df_roc_cv_means = pd.DataFrame(roc_cv_means) ax = df_roc_cv_means[['roc_cv_mean']].plot(kind='bar', figsize=(16,8)) ax_cols = ['s={}, r={}'.format(v[0], v[1]) for v in df_roc_cv_means[['s', 'r']].values] _ = ax.set_xticklabels(ax_cols, rotation=45) plt.title('Mean ROC area score on validation set') plt.ylabel('ROC area score') plt.xlabel('GDF parameters') print('The best parameters are:') best_gdf_parameters_mean = df_roc_cv_means.nlargest(3, columns=['roc_cv_mean']) best_gdf_parameters_mean df_res_fixed_mean = df_res[df_res['s'] == best_gdf_parameters_mean.iloc[0]['s']][df_res['r'] == best_gdf_parameters_mean.iloc[0]['r']] df_res_fixed_mean = df_res_fixed_mean.sort_values(by='roc_cv_diff', ascending=False) df_res_fixed_mean[['roc_cv', 'roc_train', 'roc_log_train', 'roc_log_cv', 'roc_cv_diff']].describe() ``` ## Results If the parameters $r$ and $\sigma$ for `GDF+SVM` approach are choosen not carefully, we can get too good scores on training and validation set. Trainig and validation set scores can be odd - one can be very high, when the other can be less than 0.5 (null-hypothesis). It means that this approach is prone to **overfitting** and we need to be careful with choosing parameters. ``` df_bests = df_res_fixed.sort_values(by='roc_cv', ascending=False) df_bests = df_bests.groupby(by='stock').first() df_bests = df_bests.sort_values(by='roc_cv') df_bests.head() ``` ### Results `GDF+SVM` : training vs validation set `GDF+SVM` usually performs better on training set than on validation set. We can easily see that we will easily overfit since some trainig results are close to 1.0. ``` dff = df_bests[['roc_train', 'roc_cv', 'cv_train_diff']].sort_values(by='cv_train_diff') dff = dff.sort_values(by=['cv_train_diff']) dff[['roc_train', 'roc_cv']].plot(kind='bar', figsize=(16, 8)) plt.legend() plt.title('ROC area scores on training and testing set (sorted by differences)') print('Mean square error:', dff['cv_train_diff'].sum() / 2) ``` ### Results `GDF+SVM` vs `logit+queue-imbalance` on training set In most of the cases cases `GDF+SVM` performs **worse** on training set than `logit+queue-imbalance`. ``` dff = df_bests[['roc_train', 'roc_log_train', 'roc_train_diff']].sort_values(by='roc_train_diff') dff[['roc_train', 'roc_log_train']].plot(kind='bar', figsize=(16, 8)) plt.legend() plt.title('ROC area scores on training set (sorted by differences)') print('Mean square error:', dff['roc_train_diff'].sum() / 2) dff['gdf_log_train_diff'] = dff['roc_train'] - dff['roc_log_train'] dff['gdf_log_train_diff'].plot(kind='hist', label='(GDF - log) on training set') plt.legend() plt.title('Density of differences between ROC area score for GDF+SVM and logit+queue-imbalance') df_worse_train = df_bests[df_bests['roc_train'] < df_bests['roc_log_train']] df_worse_train = df_worse_train.sort_values(by='roc_train_diff') ax = df_worse_train[['roc_train_diff']].plot(kind='bar', figsize=(16,8)) print('Number of undreperforming stocks on training set:', len(df_worse_train)) plt.title('Logit and SVM score difference on training set') plt.ylabel('ROC area score diff') plt.xlabel('Stock') ``` ### Results `GDF+SVM` vs `logit+queue-imbalance` on validation set `GDF+SVM` approach performs better on validation set on most of the stocks. ``` dff = df_bests[['roc_cv', 'roc_log_cv', 'roc_cv_diff']].sort_values(by='roc_cv_diff') dff[['roc_cv', 'roc_log_cv']].plot(kind='bar', figsize=(16, 8)) plt.legend() plt.title('ROC area scores on validation set') print('Mean square error:', dff['roc_cv_diff'].sum() / 2) dff['gdf_log_cv_diff'] = dff['roc_cv'] - dff['roc_log_cv'] dff['gdf_log_cv_diff'].plot(kind='hist', label='(GDF - log) on validation set') plt.legend() plt.title('Density of differences between ROC area score for GDF+SVM and logit+queue-imbalance') df_worse_train = df_bests[df_bests['roc_cv'] < df_bests['roc_log_cv']] df_worse_train['roc_cv_diff'] = df_worse_train['roc_cv'] - df_worse_train['roc_log_cv'] df_worse_train = df_worse_train.sort_values(by='roc_cv_diff') ax = df_worse_train[['roc_cv_diff']].plot(kind='bar', figsize=(16,8)) print('Number of worse stocks on validation set:', len(df_worse_train)) plt.title('Logit and SVM score difference on validation set') plt.ylabel('ROC area score diff') plt.xlabel('Stock') ``` ### Can we improve? Before we took `GDF+SVM`'s with the same $r$ and $\sigma$. It might be that for stocks we underperform we could use different GDF parameteres and get better results. We will consider results from `df_res` dataframe (described in [data-description](#Data-description)) which have too big difference (so, not in *interdecile range*) between validation score and training score as outliers (because `GDF+SVM`'s with their parameters *overfit* for sure). ``` def reject_outliers_iqr(df, col='roc_cv', scale=1.5): q1, q3 = np.percentile(df[col], [25, 75]) iqr = q3 - q1 lower_bound = q1 - (iqr * scale) upper_bound = q3 + (iqr * scale) print('Length before removing outliers', len(df)) print('Rejecting items from ', col, 'lower than ', lower_bound, ' and higher than', upper_bound) d = df.where((df[col] > lower_bound) & (df[col] < upper_bound)) d.dropna(inplace=True) print('Length after removing outliers', len(d)) return d df_res_clean = pd.DataFrame(df_res) df_res_clean = reject_outliers_iqr(df_res_clean, col='cv_train_diff') worse_stocks = df_worse_train.index better_idx = [] df_worse_better = pd.DataFrame(columns=df_res.columns) for s in worse_stocks: row_dict = df_res_clean[df_res_clean['stock'] == int(s)].nlargest( 1, columns=['roc_cv']).to_dict(orient='records') df_worse_better = df_worse_better.append(row_dict, ignore_index=True) ax = df_worse_better.sort_values(by='roc_cv_diff', ascending=False)['roc_cv_diff'].plot(kind='bar') _ = ax.set_xticklabels(df_worse_better['stock'], rotation=45) plt.title('Improved GDF+SVM (not fixed parameters) vs logit+queue-imbalance') plt.xlabel('Stock') plt.ylabel('ROC area score difference') df_worse_better.sort_values(by='roc_cv_diff', ascending=False) ``` ### Results on testing set Bellow we present results on testing set for `GDF+SVM` approach with the best parameters (so if we were underperforming we pick the best GDF parameters we have seen, otherwise we use fixed ones). ``` def svm_classification(df, gdf_columns, C=1000, gamma=1): clf = SVC(kernel='rbf', C=C, gamma=gamma) X = df.loc[:, gdf_columns] y = df['mid_price_indicator'].values.reshape(-1, 1) y[0] = 0 clf.fit(X, y) return clf def predict(clf, df, gdf_columns): predictions = clf.predict(df.loc[:, gdf_columns]) try: return roc_auc_score(predictions, df['mid_price_indicator']) except Exception as e: pass return -1 stocks = df_bests.index gdf_columns = ['gdf_' + str(i) for i in range(0, 50)][K1:K2] dfs = {} dfs_test = {} clf = {} length = 10000 for stock in stocks: if stock in worse_stocks: r = df_worse_better[df_worse_better['stock'] == stock]['r'].values[0] s = df_worse_better[df_worse_better['stock'] == stock]['s'].values[0] gamma = df_worse_better[df_worse_better['stock'] == stock]['gamma'].values[0] C = df_worse_better[df_worse_better['stock'] == stock]['C'].values[0] else: r = df_bests.loc[int(stock)]['r'] s = df_bests.loc[int(stock)]['s'] gamma = df_bests.loc[int(stock)]['gamma'] C = df_bests.loc[int(stock)]['C'] try: filename = 'gdf_{}_len{}_r{}_s{}_K{}'.format(stock, length, r, s, K) dfs[stock], _, dfs_test[stock] = lob.load_prepared_data( filename, data_dir='data_gdf/', cv=True, length=length) clf[stock] = svm_classification(dfs[stock], gdf_columns=gdf_columns, C=C, gamma=gamma) except Exception as e: print(filename, e) continue roc_score = {} for stock in stocks: roc_score[stock] = predict(clf[stock], dfs_test[stock], gdf_columns) ``` #### GDF+SVM in training validation and test results It seems that for 2 stocks our prediction failed completely (we predicted only one class), which means that `GDF+SVM` approach is prone to overfitting. ``` df_bests['roc_test'] = np.zeros(len(df_bests)) - 1 for idx, row in df_bests.iterrows(): df_bests['roc_test'].loc[idx]= roc_score[idx] df_bests = df_bests.sort_values(by='roc_test') ax = df_bests[['roc_test', 'roc_cv', 'roc_train']].plot(kind='bar', figsize=(16, 8)) ``` On average we perform **6% worse** on testing set than on validation set. Standard deviation is quite high. ``` (df_bests[df_bests['roc_test'] != -1]['roc_test'] - df_bests[df_bests['roc_test'] != -1]['roc_cv']).describe() ``` #### GDF+SVM vs logit+queue-imbalance on testing set Approach with `logit+queue-imbalance` always gives some results which is not true for `GDF+SVM`. ``` df_bests.reindex() l = [] for idx, row in df_bests.iterrows(): l.append(roc_results.result_test_10000[str(idx)]) df_bests.assign(roc_log_test2 = l) df_bests['roc_log_test'] = np.array(l) df_bests = df_bests.sort_values(by='roc_log_test') ax = df_bests[['roc_test', 'roc_log_test']].plot(kind='bar', figsize=(16, 8)) ``` `GDF+SVM` approach is **worse by 2%** on testing set than `logit+queue-imbalance` approach if we consider stocks for which we weren't able to calculate proper predictions as outliers. ``` (df_bests[df_bests['roc_test'] != -1]['roc_test'] - df_bests[df_bests['roc_test'] != -1]['roc_log_test']).describe() ``` ## Conclusions In this notebook we used 50 features from datasets converted using `GDF` filters. The results on testing set showed that `GDF+SVM` approach is about 2% worse than `logit+queue-imbalance` approach to mid-price raise classification problem. We also noticed that `GDF+SVM` approach might not give results for all stocks, it is very prone to *overfitting*. The GDF parameters ($r$ and $\sigma$) are crucial in preventing later *overfitting*. We noticed that the best results are for: $r=0.04$ and $\sigma = 0.1$. `GDF+SVM` with 50 features gives the worst results.
github_jupyter
%matplotlib inline from ast import literal_eval import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.cluster import KMeans from sklearn.svm import SVC from typing import Tuple import numpy as np import itertools import os from collections import OrderedDict from sklearn import preprocessing from sklearn.metrics import roc_auc_score from scipy.stats import norm import plotly.offline as py import plotly.figure_factory as ff import plotly.graph_objs as go import warnings from lob_data_utils import lob, db_result, roc_results sns.set_style('whitegrid') warnings.filterwarnings('ignore') py.init_notebook_mode(connected=True) r = 0.05 s = 0.2 stock = '11234' K = 50 length = 10000 filename = 'data_gdf/gdf_{}_len{}_r{}_s{}_K{}.csv'.format(stock, length, r, s, K) df_data_example = pd.read_csv(filename) df_data_example.head() gdf_columns = ['gdf_' + str(i) for i in range(0, 50)] plt.figure(figsize=(16, 8)) ax = sns.boxplot(df_data_example[gdf_columns]) _ = ax.set_xticklabels(gdf_columns, rotation=45) plt.title('Distribution of gdf features for stock ' + str(stock)) plt.xlabel('GDF feature') plt.ylabel('') gdf_features_columns = ['gdf_' + str(i) for i in range(23, 27)] df_data_example[gdf_features_columns].iloc[0:50].plot(figsize=(16,8), marker='.', alpha=0.5) plt.title('Gdf features') plt.xlabel('GDF feature') plt.ylabel('value') s_parameters = [0.1, 0.2, 0.3, 0.4, 0.5] r_parameters = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1] length = 10000 stocks = list(roc_results.results_10000.keys()) K1 = 0 K2 = 50 K = 50 df_res = pd.DataFrame() for r in r_parameters: for s in s_parameters: for stock in stocks: try: filename = 'data_res/res_{}_len{}_r{}_s{}_K{}.csv'.format(stock, length, r, s, K) df = pd.read_csv(filename) df_res = df_res.append(df, ignore_index=True) except Exception as e: pass df_res.drop('Unnamed: 0', axis=1, inplace=True) df_res = df_res.apply(pd.to_numeric) df_res.dropna(axis=0, inplace=True) df_res.head() stocks = list(df_res['stock'].unique()) print('Number of stocks in datasets:', len(stocks)) df_res['roc_log_cv'] = np.zeros(len(df_res)) - 1 df_res['roc_log_train'] = np.zeros(len(df_res)) - 1 roc_log_cv = [] res_log = roc_results.result_cv_10000 for idx, row in df_res.iterrows(): roc_log_cv.append(res_log[str(int(row['stock']))]) df_res['roc_log_cv'] = roc_log_cv roc_log_train = [] res_log = roc_results.results_10000 for idx, row in df_res.iterrows(): roc_log_train.append(res_log[str(int(row['stock']))]) df_res['roc_log_train'] = roc_log_train df_res['roc_cv_diff'] = df_res['roc_cv'] - df_res['roc_log_cv'] df_res['roc_train_diff'] = df_res['roc_train'] - df_res['roc_log_train'] df_res['cv_train_diff'] = df_res['roc_cv'] - df_res['roc_train'] df_res.head() r_s_dict = OrderedDict() for r in r_parameters: for s in s_parameters: r_s_dict['r={}, s={}'.format(r, s)] = df_res[df_res['r'] == r][df_res['s'] == s]['roc_cv'].values plt.figure(figsize=(16, 8)) ax = sns.boxplot(data=list(r_s_dict.values())) plt.ylabel('ROC area score') plt.xlabel('Parameters r and s') _ = ax.set_xticklabels(list(r_s_dict.keys()), rotation=45) plt.title('ROC area score distribution for different r and s parameters') r_s_dict = OrderedDict() for r in r_parameters: for s in [0.1]: r_s_dict['r={}'.format(r, s)] = df_res[df_res['r'] == r][df_res['s'] == s]['roc_cv'].values plt.figure(figsize=(16, 8)) ax = sns.swarmplot(data=list(r_s_dict.values())) plt.ylabel('ROC area score') plt.xlabel('Parameters r') _ = ax.set_xticklabels(list(r_s_dict.keys()), rotation=45) plt.title('ROC area score distribution for different r and fixed s=0.1 (small)') number_of_worse = [] for s in s_parameters: for r in r_parameters: df_r_s = df_res[df_res['s'] == s][df_res['r'] == r] df_r_s = df_r_s.sort_values(by='roc_cv_diff', ascending=False) df_r_s = df_r_s.groupby(by='stock').apply(lambda df: df.nlargest(1, columns=['roc_cv_diff'])) worse_num = len(df_r_s[df_r_s['roc_cv_diff'] < 0]) number_of_worse.append({'s': s, 'r': r, 'worse_num': worse_num}) df_worse_num = pd.DataFrame(number_of_worse) ax = df_worse_num[['worse_num']].plot(kind='bar', figsize=(16,8)) ax_cols = ['s={}, r={}'.format(v[0], v[1]) for v in df_worse_num[['s', 'r']].values] _ = ax.set_xticklabels(ax_cols, rotation=45) plt.title('Number of stocks with worse result than logit on validation set') plt.ylabel('Number') plt.xlabel('GDF parameters') print('The best parameters are:') best_gdf_parameters = df_worse_num.nsmallest(1, columns=['worse_num']) best_gdf_parameters df_res_fixed = df_res[df_res['s'] == best_gdf_parameters.iloc[0]['s']][df_res['r'] == best_gdf_parameters.iloc[0]['r']] df_res_fixed = df_res_fixed.sort_values(by='roc_cv_diff', ascending=False) df_res_fixed[['roc_cv', 'roc_train', 'roc_log_train', 'roc_log_cv', 'roc_cv_diff']].describe() df_bests_fixed = df_res_fixed.sort_values(by='roc_cv', ascending=False) df_bests_fixed = df_bests_fixed.groupby(by='stock').apply(lambda df: df.nlargest(1, columns=['roc_cv_diff'])) df_bests_fixed = df_bests_fixed.sort_values(by='roc_cv', ascending=False) print('Number of worse stocks', len(df_bests_fixed[df_bests_fixed['roc_cv_diff'] < 0]), 'per', len(df_bests_fixed['stock'].unique())) df_worst_fixed = df_bests_fixed[df_bests_fixed['roc_cv_diff'] < 0] df_worst_fixed roc_cv_means = [] for s in s_parameters: for r in r_parameters: df_r_s = df_res[df_res['s'] == s][df_res['r'] == r] df_r_s = df_r_s.sort_values(by='roc_cv_diff', ascending=False) roc_cv_mean = df_r_s['roc_cv'].mean() roc_cv_means.append({'s': s, 'r': r, 'roc_cv_mean': roc_cv_mean}) df_roc_cv_means = pd.DataFrame(roc_cv_means) ax = df_roc_cv_means[['roc_cv_mean']].plot(kind='bar', figsize=(16,8)) ax_cols = ['s={}, r={}'.format(v[0], v[1]) for v in df_roc_cv_means[['s', 'r']].values] _ = ax.set_xticklabels(ax_cols, rotation=45) plt.title('Mean ROC area score on validation set') plt.ylabel('ROC area score') plt.xlabel('GDF parameters') print('The best parameters are:') best_gdf_parameters_mean = df_roc_cv_means.nlargest(3, columns=['roc_cv_mean']) best_gdf_parameters_mean df_res_fixed_mean = df_res[df_res['s'] == best_gdf_parameters_mean.iloc[0]['s']][df_res['r'] == best_gdf_parameters_mean.iloc[0]['r']] df_res_fixed_mean = df_res_fixed_mean.sort_values(by='roc_cv_diff', ascending=False) df_res_fixed_mean[['roc_cv', 'roc_train', 'roc_log_train', 'roc_log_cv', 'roc_cv_diff']].describe() df_bests = df_res_fixed.sort_values(by='roc_cv', ascending=False) df_bests = df_bests.groupby(by='stock').first() df_bests = df_bests.sort_values(by='roc_cv') df_bests.head() dff = df_bests[['roc_train', 'roc_cv', 'cv_train_diff']].sort_values(by='cv_train_diff') dff = dff.sort_values(by=['cv_train_diff']) dff[['roc_train', 'roc_cv']].plot(kind='bar', figsize=(16, 8)) plt.legend() plt.title('ROC area scores on training and testing set (sorted by differences)') print('Mean square error:', dff['cv_train_diff'].sum() / 2) dff = df_bests[['roc_train', 'roc_log_train', 'roc_train_diff']].sort_values(by='roc_train_diff') dff[['roc_train', 'roc_log_train']].plot(kind='bar', figsize=(16, 8)) plt.legend() plt.title('ROC area scores on training set (sorted by differences)') print('Mean square error:', dff['roc_train_diff'].sum() / 2) dff['gdf_log_train_diff'] = dff['roc_train'] - dff['roc_log_train'] dff['gdf_log_train_diff'].plot(kind='hist', label='(GDF - log) on training set') plt.legend() plt.title('Density of differences between ROC area score for GDF+SVM and logit+queue-imbalance') df_worse_train = df_bests[df_bests['roc_train'] < df_bests['roc_log_train']] df_worse_train = df_worse_train.sort_values(by='roc_train_diff') ax = df_worse_train[['roc_train_diff']].plot(kind='bar', figsize=(16,8)) print('Number of undreperforming stocks on training set:', len(df_worse_train)) plt.title('Logit and SVM score difference on training set') plt.ylabel('ROC area score diff') plt.xlabel('Stock') dff = df_bests[['roc_cv', 'roc_log_cv', 'roc_cv_diff']].sort_values(by='roc_cv_diff') dff[['roc_cv', 'roc_log_cv']].plot(kind='bar', figsize=(16, 8)) plt.legend() plt.title('ROC area scores on validation set') print('Mean square error:', dff['roc_cv_diff'].sum() / 2) dff['gdf_log_cv_diff'] = dff['roc_cv'] - dff['roc_log_cv'] dff['gdf_log_cv_diff'].plot(kind='hist', label='(GDF - log) on validation set') plt.legend() plt.title('Density of differences between ROC area score for GDF+SVM and logit+queue-imbalance') df_worse_train = df_bests[df_bests['roc_cv'] < df_bests['roc_log_cv']] df_worse_train['roc_cv_diff'] = df_worse_train['roc_cv'] - df_worse_train['roc_log_cv'] df_worse_train = df_worse_train.sort_values(by='roc_cv_diff') ax = df_worse_train[['roc_cv_diff']].plot(kind='bar', figsize=(16,8)) print('Number of worse stocks on validation set:', len(df_worse_train)) plt.title('Logit and SVM score difference on validation set') plt.ylabel('ROC area score diff') plt.xlabel('Stock') def reject_outliers_iqr(df, col='roc_cv', scale=1.5): q1, q3 = np.percentile(df[col], [25, 75]) iqr = q3 - q1 lower_bound = q1 - (iqr * scale) upper_bound = q3 + (iqr * scale) print('Length before removing outliers', len(df)) print('Rejecting items from ', col, 'lower than ', lower_bound, ' and higher than', upper_bound) d = df.where((df[col] > lower_bound) & (df[col] < upper_bound)) d.dropna(inplace=True) print('Length after removing outliers', len(d)) return d df_res_clean = pd.DataFrame(df_res) df_res_clean = reject_outliers_iqr(df_res_clean, col='cv_train_diff') worse_stocks = df_worse_train.index better_idx = [] df_worse_better = pd.DataFrame(columns=df_res.columns) for s in worse_stocks: row_dict = df_res_clean[df_res_clean['stock'] == int(s)].nlargest( 1, columns=['roc_cv']).to_dict(orient='records') df_worse_better = df_worse_better.append(row_dict, ignore_index=True) ax = df_worse_better.sort_values(by='roc_cv_diff', ascending=False)['roc_cv_diff'].plot(kind='bar') _ = ax.set_xticklabels(df_worse_better['stock'], rotation=45) plt.title('Improved GDF+SVM (not fixed parameters) vs logit+queue-imbalance') plt.xlabel('Stock') plt.ylabel('ROC area score difference') df_worse_better.sort_values(by='roc_cv_diff', ascending=False) def svm_classification(df, gdf_columns, C=1000, gamma=1): clf = SVC(kernel='rbf', C=C, gamma=gamma) X = df.loc[:, gdf_columns] y = df['mid_price_indicator'].values.reshape(-1, 1) y[0] = 0 clf.fit(X, y) return clf def predict(clf, df, gdf_columns): predictions = clf.predict(df.loc[:, gdf_columns]) try: return roc_auc_score(predictions, df['mid_price_indicator']) except Exception as e: pass return -1 stocks = df_bests.index gdf_columns = ['gdf_' + str(i) for i in range(0, 50)][K1:K2] dfs = {} dfs_test = {} clf = {} length = 10000 for stock in stocks: if stock in worse_stocks: r = df_worse_better[df_worse_better['stock'] == stock]['r'].values[0] s = df_worse_better[df_worse_better['stock'] == stock]['s'].values[0] gamma = df_worse_better[df_worse_better['stock'] == stock]['gamma'].values[0] C = df_worse_better[df_worse_better['stock'] == stock]['C'].values[0] else: r = df_bests.loc[int(stock)]['r'] s = df_bests.loc[int(stock)]['s'] gamma = df_bests.loc[int(stock)]['gamma'] C = df_bests.loc[int(stock)]['C'] try: filename = 'gdf_{}_len{}_r{}_s{}_K{}'.format(stock, length, r, s, K) dfs[stock], _, dfs_test[stock] = lob.load_prepared_data( filename, data_dir='data_gdf/', cv=True, length=length) clf[stock] = svm_classification(dfs[stock], gdf_columns=gdf_columns, C=C, gamma=gamma) except Exception as e: print(filename, e) continue roc_score = {} for stock in stocks: roc_score[stock] = predict(clf[stock], dfs_test[stock], gdf_columns) df_bests['roc_test'] = np.zeros(len(df_bests)) - 1 for idx, row in df_bests.iterrows(): df_bests['roc_test'].loc[idx]= roc_score[idx] df_bests = df_bests.sort_values(by='roc_test') ax = df_bests[['roc_test', 'roc_cv', 'roc_train']].plot(kind='bar', figsize=(16, 8)) (df_bests[df_bests['roc_test'] != -1]['roc_test'] - df_bests[df_bests['roc_test'] != -1]['roc_cv']).describe() df_bests.reindex() l = [] for idx, row in df_bests.iterrows(): l.append(roc_results.result_test_10000[str(idx)]) df_bests.assign(roc_log_test2 = l) df_bests['roc_log_test'] = np.array(l) df_bests = df_bests.sort_values(by='roc_log_test') ax = df_bests[['roc_test', 'roc_log_test']].plot(kind='bar', figsize=(16, 8)) (df_bests[df_bests['roc_test'] != -1]['roc_test'] - df_bests[df_bests['roc_test'] != -1]['roc_log_test']).describe()
0.587825
0.980034
## Example. Probability of a girl birth given placenta previa **Analysis using a uniform prior distribution** ``` %matplotlib inline import arviz as az import matplotlib.pyplot as plt import numpy as np import pymc as pm from scipy.special import expit az.style.use('arviz-darkgrid') %config Inline.figure_formats = ['retina'] %load_ext watermark births = 987 fem_births = 437 with pm.Model() as model_1: theta = pm.Uniform('theta', lower=0, upper=1) obs = pm.Binomial('observed', n=births, p=theta, observed=fem_births) with model_1: trace_1 = pm.sample(draws=20_000, tune=50_000) az.plot_trace(trace_1); df = az.summary(trace_1, round_to=4) df ``` The summary shows the mean and the standard deviation, it also shows the 95% posterior interval [0.4112, 0.4732]. The next plot is the plot for the posterior distribution. ``` az.plot_posterior(trace_1); # same as pm.plot_posterior() ``` The true posterior distribution is $\textsf{Beta}(438, 544)$. Let's compare it with the one we found using `pymc`. ``` from scipy.stats import beta x = np.linspace(0, 1, 1000) y = beta.pdf(x, 438, 544) mean_t = df['mean'].values[0] sd_t = df['sd'].values[0] alpha_t = (mean_t**2 * (1 - mean_t)) / (sd_t**2) - mean_t beta_t = (1 - mean_t) * (mean_t * (1 - mean_t) / sd_t**2 - 1) y_pred = beta.pdf(x, alpha_t, beta_t) plt.figure(figsize=(10, 5)) plt.plot(x, y, label='True', linewidth=5) plt.plot(x, y_pred, 'o', label='Predicted', linewidth=4, alpha=0.6) plt.legend() plt.title('The posterior distribution') plt.xlabel(r'$\theta$', fontsize=14); ``` Just like in the book, `phi` is the ratio of male to female births and `trans` is the logit transform of `theta`. ``` with pm.Model() as model_2: theta = pm.Uniform('theta', lower=0, upper=1) trans = pm.Deterministic('trans', pm.logit(theta)) phi = pm.Deterministic('phi', (1 - theta) / theta) obs = pm.Binomial('observed', n=births, p=theta, observed=fem_births) ``` Try looking at the model's test point to see if your model has problems. ``` model_2.check_test_point() ``` For comparison's sake, we change the value for `observed` to a negative number to see what happens: ``` with pm.Model() as model_2_bad: theta = pm.Uniform('theta', lower=0, upper=1) trans = pm.Deterministic('trans', pm.logit(theta)) phi = pm.Deterministic('phi', (1 - theta) / theta) obs = pm.Binomial('observed', n=births, p=theta, observed=-2) model_2_bad.check_test_point() with model_2: trace_2 = pm.sample(draws=5000, tune=2000) az.plot_trace(trace_2); df2 = az.summary(trace_2, round_to=4) df2 ``` You can plot the posterior distribution for the logit transform, `trans`; the male-to-female sex ratio, `phi`. ``` fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(11, 4)) az.plot_posterior(trace_2, var_names=['trans', 'phi'], ax=axes); ``` If you want the interval for `trans`, you have to invert the 95% interval on the logit scale ``` lldd = expit(df2.loc['trans','hpd_3%']) llii = expit(df2.loc['trans','hpd_97%']) print(f'The interval is [{lldd:.3f}, {llii:.3f}]') ``` **Analysis using a nonconjugate prior distribution** And with a custom prior distribution, a triangular one with a uniform distribution to the left and a uniform distribution to the right. ``` import theano.tensor as tt def triangular(central_num, width): left_num = central_num - width right_num = central_num + width theta = pm.Triangular('theta', lower=left_num, upper=right_num, c=central_num) # Comment these lines to see some changes if tt.lt(left_num, theta): theta = pm.Uniform('theta1', lower=0, upper=left_num) if tt.gt(right_num, theta): theta = pm.Uniform('theta2', lower=right_num, upper=1) return theta ``` Remember, you can play with `width`. In this case, `width=0.09` ``` central_num = 0.485 width = 0.09 with pm.Model() as model_3: theta = triangular(central_num, width) obs = pm.Binomial('observed', n=births, p=theta, observed=fem_births) with model_3: trace_3 = pm.sample(draws=15_000, tune=15_000, target_accept=0.95) az.plot_trace(trace_3, var_names=['theta']); az.summary(trace_3, var_names='theta', round_to=4) ``` And the posterior distribution for `theta` is this. ``` az.plot_posterior(trace_3, var_names='theta'); ``` ## Estimating a rate from Poisson data: an idealized example ``` with pm.Model() as poisson_model: theta = pm.Gamma('theta', alpha=3, beta=5) post = pm.Poisson('post', mu=2 * theta, observed=3) poisson_model.check_test_point() pm.model_to_graphviz(poisson_model) with poisson_model: trace_poisson = pm.sample(draws=20_000, tune=10_000, target_accept=0.95) az.plot_trace(trace_poisson); df4 = az.summary(trace_poisson, round_to=4) df4 ``` The plot of the posterior distribution ``` pm.plot_posterior(trace_poisson); ``` The true posterior distribution is $\textsf{Gamma}(6,7)$. Let's compare it with the one we found using `pymc`. ``` from scipy.stats import gamma x = np.linspace(0, 3, 1000) y = gamma.pdf(x, 6, scale=1/7) mean_t = df4['mean'].values[0] sd_t = df4['sd'].values[0] alpha_t = mean_t**2 / sd_t**2 beta_t = mean_t / sd_t**2 y_pred = gamma.pdf(x, alpha_t, scale=1/beta_t) plt.figure(figsize=(10, 5)) plt.plot(x, y, 'k', label='True', linewidth=7) plt.plot(x, y_pred, 'C1', label='Predicted', linewidth=3, alpha=0.9) plt.legend() plt.title('The posterior distribution') plt.xlabel(r'$\theta$', fontsize=14); ``` If we add additional data, `mu` changes. ``` with pm.Model() as poisson_model_2: theta = pm.Gamma('theta', alpha=3, beta=5) post = pm.Poisson('post', mu=20 * theta, observed=30) with poisson_model_2: trace_poisson_2 = pm.sample(draws=10_000, tune=15_000, target_accept=0.95) az.plot_trace(trace_poisson_2); df5 = pm.summary(trace_poisson_2, round_to=4) df5 az.plot_posterior(trace_poisson_2); ``` The true posterior distribution is $\textsf{Gamma}(33, 25)$ ``` x = np.linspace(0, 3, 1000) y = gamma.pdf(x, 33, scale=1/25) # How you write alpha and beta mean_t = df5['mean'].values[0] sd_t = df5['sd'].values[0] alpha_t = mean_t**2 / sd_t**2 beta_t = mean_t / sd_t**2 y_pred = gamma.pdf(x, alpha_t, scale=1/beta_t) plt.figure(figsize=(10, 5)) plt.plot(x, y, 'k', label='True', linewidth=5) plt.plot(x, y_pred, 'oC1', label='Predicted', alpha=0.15) plt.legend() plt.title('The posterior distribution') plt.xlabel(r'$\theta$', fontsize=14); val = np.mean(trace_poisson_2['theta'] >= 1) print(f'The posterior probability that theta exceeds 1.0 is {val:.2f}.') %watermark -iv -v -p theano,scipy,matplotlib,arviz -m ```
github_jupyter
%matplotlib inline import arviz as az import matplotlib.pyplot as plt import numpy as np import pymc as pm from scipy.special import expit az.style.use('arviz-darkgrid') %config Inline.figure_formats = ['retina'] %load_ext watermark births = 987 fem_births = 437 with pm.Model() as model_1: theta = pm.Uniform('theta', lower=0, upper=1) obs = pm.Binomial('observed', n=births, p=theta, observed=fem_births) with model_1: trace_1 = pm.sample(draws=20_000, tune=50_000) az.plot_trace(trace_1); df = az.summary(trace_1, round_to=4) df az.plot_posterior(trace_1); # same as pm.plot_posterior() from scipy.stats import beta x = np.linspace(0, 1, 1000) y = beta.pdf(x, 438, 544) mean_t = df['mean'].values[0] sd_t = df['sd'].values[0] alpha_t = (mean_t**2 * (1 - mean_t)) / (sd_t**2) - mean_t beta_t = (1 - mean_t) * (mean_t * (1 - mean_t) / sd_t**2 - 1) y_pred = beta.pdf(x, alpha_t, beta_t) plt.figure(figsize=(10, 5)) plt.plot(x, y, label='True', linewidth=5) plt.plot(x, y_pred, 'o', label='Predicted', linewidth=4, alpha=0.6) plt.legend() plt.title('The posterior distribution') plt.xlabel(r'$\theta$', fontsize=14); with pm.Model() as model_2: theta = pm.Uniform('theta', lower=0, upper=1) trans = pm.Deterministic('trans', pm.logit(theta)) phi = pm.Deterministic('phi', (1 - theta) / theta) obs = pm.Binomial('observed', n=births, p=theta, observed=fem_births) model_2.check_test_point() with pm.Model() as model_2_bad: theta = pm.Uniform('theta', lower=0, upper=1) trans = pm.Deterministic('trans', pm.logit(theta)) phi = pm.Deterministic('phi', (1 - theta) / theta) obs = pm.Binomial('observed', n=births, p=theta, observed=-2) model_2_bad.check_test_point() with model_2: trace_2 = pm.sample(draws=5000, tune=2000) az.plot_trace(trace_2); df2 = az.summary(trace_2, round_to=4) df2 fig, axes = plt.subplots(ncols=2, nrows=1, figsize=(11, 4)) az.plot_posterior(trace_2, var_names=['trans', 'phi'], ax=axes); lldd = expit(df2.loc['trans','hpd_3%']) llii = expit(df2.loc['trans','hpd_97%']) print(f'The interval is [{lldd:.3f}, {llii:.3f}]') import theano.tensor as tt def triangular(central_num, width): left_num = central_num - width right_num = central_num + width theta = pm.Triangular('theta', lower=left_num, upper=right_num, c=central_num) # Comment these lines to see some changes if tt.lt(left_num, theta): theta = pm.Uniform('theta1', lower=0, upper=left_num) if tt.gt(right_num, theta): theta = pm.Uniform('theta2', lower=right_num, upper=1) return theta central_num = 0.485 width = 0.09 with pm.Model() as model_3: theta = triangular(central_num, width) obs = pm.Binomial('observed', n=births, p=theta, observed=fem_births) with model_3: trace_3 = pm.sample(draws=15_000, tune=15_000, target_accept=0.95) az.plot_trace(trace_3, var_names=['theta']); az.summary(trace_3, var_names='theta', round_to=4) az.plot_posterior(trace_3, var_names='theta'); with pm.Model() as poisson_model: theta = pm.Gamma('theta', alpha=3, beta=5) post = pm.Poisson('post', mu=2 * theta, observed=3) poisson_model.check_test_point() pm.model_to_graphviz(poisson_model) with poisson_model: trace_poisson = pm.sample(draws=20_000, tune=10_000, target_accept=0.95) az.plot_trace(trace_poisson); df4 = az.summary(trace_poisson, round_to=4) df4 pm.plot_posterior(trace_poisson); from scipy.stats import gamma x = np.linspace(0, 3, 1000) y = gamma.pdf(x, 6, scale=1/7) mean_t = df4['mean'].values[0] sd_t = df4['sd'].values[0] alpha_t = mean_t**2 / sd_t**2 beta_t = mean_t / sd_t**2 y_pred = gamma.pdf(x, alpha_t, scale=1/beta_t) plt.figure(figsize=(10, 5)) plt.plot(x, y, 'k', label='True', linewidth=7) plt.plot(x, y_pred, 'C1', label='Predicted', linewidth=3, alpha=0.9) plt.legend() plt.title('The posterior distribution') plt.xlabel(r'$\theta$', fontsize=14); with pm.Model() as poisson_model_2: theta = pm.Gamma('theta', alpha=3, beta=5) post = pm.Poisson('post', mu=20 * theta, observed=30) with poisson_model_2: trace_poisson_2 = pm.sample(draws=10_000, tune=15_000, target_accept=0.95) az.plot_trace(trace_poisson_2); df5 = pm.summary(trace_poisson_2, round_to=4) df5 az.plot_posterior(trace_poisson_2); x = np.linspace(0, 3, 1000) y = gamma.pdf(x, 33, scale=1/25) # How you write alpha and beta mean_t = df5['mean'].values[0] sd_t = df5['sd'].values[0] alpha_t = mean_t**2 / sd_t**2 beta_t = mean_t / sd_t**2 y_pred = gamma.pdf(x, alpha_t, scale=1/beta_t) plt.figure(figsize=(10, 5)) plt.plot(x, y, 'k', label='True', linewidth=5) plt.plot(x, y_pred, 'oC1', label='Predicted', alpha=0.15) plt.legend() plt.title('The posterior distribution') plt.xlabel(r'$\theta$', fontsize=14); val = np.mean(trace_poisson_2['theta'] >= 1) print(f'The posterior probability that theta exceeds 1.0 is {val:.2f}.') %watermark -iv -v -p theano,scipy,matplotlib,arviz -m
0.709623
0.987436
# Experimento 1: _All VS Some_ Comparando o resultado de 20 rodadas de classificação com KNN entre o modelo treinado com todos os 18 canais e o modelo treinado com apenas os 6 no qual obteve melhor desempenho ## importando bibliotecas ``` import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier # lista de str ['01', '02', ..., '24'] file_range = ['{:0>2}'.format(chb) for chb in range(1, 25)] # Carregando sementes seed_array = np.load('./20_seeds.npy') print(f'seeds: {seed_array}') ``` ## Gerando dados ``` all_vs_some_df = pd.DataFrame() for file in file_range: # Carregando chb chb_df = pd.read_csv(f'./chb{file}/chb{file}_com_dwt.csv') target_s = chb_df.pop('target') some_6_channels = ['2', '3', '6', '7', '14', '15'] some_8_channels = some_6_channels + ['5', '1'] # Filtrando colunas do chb que são referentes aos canais em some_6_channels columns_from_6_channels = [column for column in chb_df.columns if column.split('-')[-1] in some_6_channels] # Filtrando colunas do chb que são referentes aos canais em some_8_channels columns_from_8_channels = [column for column in chb_df.columns if column.split('-')[-1] in some_8_channels] # Para 20 rodadas com todos os canais X_all = chb_df.values # Para 20 rodadas com os 6 canais X_6_channels = chb_df[columns_from_6_channels].values # Para 20 rodadas com os 8 canais X_8_channels = chb_df[columns_from_8_channels].values # A classificação não muda para os casos y = target_s.values acc_from_all = [] acc_from_6_ch = [] acc_from_8_ch = [] for seed in seed_array: # Separando dados X_train_all, X_test_all, y_train_all, y_test_all = train_test_split(X_all, y, test_size=0.3, random_state=seed) X_train_6_ch, X_test_6_ch, y_train_6_ch, y_test_6_ch = train_test_split(X_6_channels, y, test_size=0.3, random_state=seed) X_train_8_ch, X_test_8_ch, y_train_8_ch, y_test_8_ch = train_test_split(X_8_channels, y, test_size=0.3, random_state=seed) # Instanciando e treinando classificadores model_all = KNeighborsClassifier().fit(X_train_all, y_train_all) model_6_ch = KNeighborsClassifier().fit(X_train_6_ch, y_train_6_ch) model_8_ch = KNeighborsClassifier().fit(X_train_8_ch, y_train_8_ch) # Salvando acurácias acc_from_all.append(model_all.score(X_test_all, y_test_all)) acc_from_6_ch.append(model_6_ch.score(X_test_6_ch, y_test_6_ch)) acc_from_8_ch.append(model_8_ch.score(X_test_8_ch, y_test_8_ch)) # Formando dataframe de acurácias acc_df = pd.DataFrame() acc_df['all channels'] = acc_from_all acc_df['2_3_6_7_14_15 channels'] = acc_from_6_ch acc_df['2_3_6_7_14_15_5_1 channels'] = acc_from_8_ch channels = ['all channels', '2_3_6_7_10_14 channels', '2_3_6_7_10_14_11_14 channels'] temp_df_list = [] for column, mean, std in zip(channels, acc_df.mean().values, acc_df.std().values): temp_df_list.append( pd.DataFrame( data=[mean, std], index=[f'{column}-mean', f'{column}-std'], dtype=np.float64 ).transpose() ) chb = pd.DataFrame(data=[f'{file}'], columns=['chb'], dtype=np.float64) all_vs_some_df = all_vs_some_df.append(chb.join(temp_df_list), ignore_index=True).sort_values(by='chb') all_vs_some_df.to_csv(path_or_buf='./csvs/allVSsome.csv', index=False) ``` ## Aplicando gradiente por coluna Quanto maior o valor (em relação à coluna) mais escuro será o fundo da célula. ``` all_vs_some_df = pd.read_csv('./csvs/allVSsome.csv') all_vs_some_df.style.background_gradient(cmap='Blues', subset=all_vs_some_df.columns[1:]).hide_index() ``` ## Aplicando gradiente por linha Quanto maior o valor (em relação à linha) mais escuro será o fundo da célula ``` only_means = [column for column in all_vs_some_df.columns if 'mean' in column.split('-')] all_vs_some_df[['chb'] + only_means].style.background_gradient(cmap='Blues', axis='columns', subset=only_means).hide_index() all_vs_some_df[only_means].boxplot(figsize=(12, 6)) all_vs_some_df[only_means].describe() ```
github_jupyter
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier # lista de str ['01', '02', ..., '24'] file_range = ['{:0>2}'.format(chb) for chb in range(1, 25)] # Carregando sementes seed_array = np.load('./20_seeds.npy') print(f'seeds: {seed_array}') all_vs_some_df = pd.DataFrame() for file in file_range: # Carregando chb chb_df = pd.read_csv(f'./chb{file}/chb{file}_com_dwt.csv') target_s = chb_df.pop('target') some_6_channels = ['2', '3', '6', '7', '14', '15'] some_8_channels = some_6_channels + ['5', '1'] # Filtrando colunas do chb que são referentes aos canais em some_6_channels columns_from_6_channels = [column for column in chb_df.columns if column.split('-')[-1] in some_6_channels] # Filtrando colunas do chb que são referentes aos canais em some_8_channels columns_from_8_channels = [column for column in chb_df.columns if column.split('-')[-1] in some_8_channels] # Para 20 rodadas com todos os canais X_all = chb_df.values # Para 20 rodadas com os 6 canais X_6_channels = chb_df[columns_from_6_channels].values # Para 20 rodadas com os 8 canais X_8_channels = chb_df[columns_from_8_channels].values # A classificação não muda para os casos y = target_s.values acc_from_all = [] acc_from_6_ch = [] acc_from_8_ch = [] for seed in seed_array: # Separando dados X_train_all, X_test_all, y_train_all, y_test_all = train_test_split(X_all, y, test_size=0.3, random_state=seed) X_train_6_ch, X_test_6_ch, y_train_6_ch, y_test_6_ch = train_test_split(X_6_channels, y, test_size=0.3, random_state=seed) X_train_8_ch, X_test_8_ch, y_train_8_ch, y_test_8_ch = train_test_split(X_8_channels, y, test_size=0.3, random_state=seed) # Instanciando e treinando classificadores model_all = KNeighborsClassifier().fit(X_train_all, y_train_all) model_6_ch = KNeighborsClassifier().fit(X_train_6_ch, y_train_6_ch) model_8_ch = KNeighborsClassifier().fit(X_train_8_ch, y_train_8_ch) # Salvando acurácias acc_from_all.append(model_all.score(X_test_all, y_test_all)) acc_from_6_ch.append(model_6_ch.score(X_test_6_ch, y_test_6_ch)) acc_from_8_ch.append(model_8_ch.score(X_test_8_ch, y_test_8_ch)) # Formando dataframe de acurácias acc_df = pd.DataFrame() acc_df['all channels'] = acc_from_all acc_df['2_3_6_7_14_15 channels'] = acc_from_6_ch acc_df['2_3_6_7_14_15_5_1 channels'] = acc_from_8_ch channels = ['all channels', '2_3_6_7_10_14 channels', '2_3_6_7_10_14_11_14 channels'] temp_df_list = [] for column, mean, std in zip(channels, acc_df.mean().values, acc_df.std().values): temp_df_list.append( pd.DataFrame( data=[mean, std], index=[f'{column}-mean', f'{column}-std'], dtype=np.float64 ).transpose() ) chb = pd.DataFrame(data=[f'{file}'], columns=['chb'], dtype=np.float64) all_vs_some_df = all_vs_some_df.append(chb.join(temp_df_list), ignore_index=True).sort_values(by='chb') all_vs_some_df.to_csv(path_or_buf='./csvs/allVSsome.csv', index=False) all_vs_some_df = pd.read_csv('./csvs/allVSsome.csv') all_vs_some_df.style.background_gradient(cmap='Blues', subset=all_vs_some_df.columns[1:]).hide_index() only_means = [column for column in all_vs_some_df.columns if 'mean' in column.split('-')] all_vs_some_df[['chb'] + only_means].style.background_gradient(cmap='Blues', axis='columns', subset=only_means).hide_index() all_vs_some_df[only_means].boxplot(figsize=(12, 6)) all_vs_some_df[only_means].describe()
0.439146
0.770335
# Klasyfikatory ### Pakiety ``` import pandas as pd import numpy as np import category_encoders as ce import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.metrics import recall_score from sklearn.pipeline import Pipeline from sklearn.metrics import precision_score from sklearn import metrics from sklearn.preprocessing import StandardScaler import random import warnings warnings.filterwarnings('ignore') from xgboost import XGBClassifier ``` ### Ustawienie ziarna gwarantuje reprodukowalność wyników ``` np.random.seed(123) ``` ### Wczytanie danych ``` data = pd.read_csv('australia.csv') data = pd.DataFrame(data) ``` ### Sprawdzenia zrównoważenia zbioru ze względu na zmienną celu, w celu doboru odpowiednich miar oceny klasyfikacji ``` data.filter(["RainTomorrow"]).hist() ``` Dane są niezrównoważone, więc jako miary jakości predykcji klasy przez klasyfikatory będziemy używać precision i recall (nie accuracy) # 1. Podział na zbiory treningowy i testowy * 80% obserwacji należy do zbioru treningowego, pozostałe do testowego. * Odseparowujemy zmienną celu od zmiennych objaśniających. ``` X_train, X_test, Y_train, Y_test = train_test_split(data.drop('RainTomorrow', axis=1), data['RainTomorrow']) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) ``` # 2. Klasyfikatory ## 2.1. Random Forest ``` # Stworzenie modelu rf_classifier = RandomForestClassifier(n_estimators=1000, max_depth=8, max_features='sqrt', bootstrap=True, min_samples_leaf=2, min_samples_split=2, random_state=42) ``` ### Objaśnienie wybranych hiperparametrów Wybrane hiperparametry: **n_estimators** - ilość drzew składowych (int, domyślnie 100) **max_depth** - maksymalna głębokość drzewa (int, domyślnie None) **min_samples_split** - minimalna liczba obserwacji wymagana do podziału węzła wewnętrznego (int/float, domyślnie 2) **min_samples_leaf** - minimalna liczba obserwacji wymagana względem liścia (int, domyślnie 1) **max_features** - ilość cech branych pod uwagę podczas poszukiwania najlepszego podziału (string). Jeśli “sqrt”, to max_features=sqrt(n_features), jeśli “log2”, to max_features=log2(n_features). **bootstrap** - stosowanie bootstrap (bool, domyślnie True) **random_state** - kontroluje losowość procedury bootstrap ``` # Trenowanie modelu rf_classifier.fit(X_train, Y_train) # Predykcja klasy predict_class1 = rf_classifier.predict(X_test) # Predykcja prawdopodobieństwa predict_proba1 = rf_classifier.predict_proba(X_test)[:, 1] ``` ## 2.2. XGBoost ``` # Stworzenie modelu xgb_classifier = XGBClassifier(n_estimators = 1000, booster = 'gbtree', colsample_bytree = 0.8, max_depth = 5, gamma = 1.5, min_child_weight = 1, subsample = 0.8, random_state = 42) ``` ### Objaśnienie wybranych hiperparametrów **booster** - typ boostera, gbtree, gblinear lub dart. Dwa pierwsze oparte na modelach drzewiastych, ostatni na funkcjach liniowych. **colsample_bytree** - to frakcja kolumn (losowo wybranych), które zostaną wykorzystane przy konstruowaniu każdego drzewa. **gamma** - odpowiada za częstość przycinania drzewa, im wyższa gamma, tym częściej przycinamy. **min_child_weight** - minimalna waga wymagana względem 'dziecka' **subsample** - odpowiada części obserwacji (wierszy) do podpróbkowania na każdym etapie. Domyślnie jest ustawiony na 1, co oznacza, że używamy wszystkich wierszy. Pozostałe jak wyżej. ``` # Trenowanie modelu xgb_classifier.fit(X_train, Y_train) # Predykcja klasy predict_class2 = xgb_classifier.predict(X_test) # Predykcja prawdopodobieństwa predict_proba2 = xgb_classifier.predict_proba(X_test)[:, 1] ``` ## 2.3. Regresja logistyczna ``` from sklearn.linear_model import LogisticRegression # Budowanie modelu lr_classifier = LogisticRegression(penalty = 'l1', class_weight='balanced', C = 0.01, solver = 'saga') ``` ### Objaśnienie wybranych hiperparametrów **penalty** - norma kary. **class_weight** - wagi klas. **C** - odwrotny parametr regularyzacji, wyższe wartości C odpowiadają mniejszej regularyzacji. **solver** - algorytm używany w procedurze optymalizacji. ``` lr_classifier.fit(X_train, Y_train) # Predykcja klasy predict_class3 = lr_classifier.predict(X_test) # Predykcja prawdopodobieństwa predict_proba3 = lr_classifier.predict_proba(X_test)[:, 1] ``` # 3. Zestawienie rezultatów ## 3.1. Miary precision i recall Wartości miary recall na zbiorze testowym dla klasyfikatorów Random Forest, XGBoost i regresji logistycznej wynoszą odpowiednio: ``` pd.DataFrame({"Metoda" : ["Random Forest", "XGBoost", "Regresja logistyczna"], "Recall" : [recall_score(Y_test, predict_class1), recall_score(Y_test, predict_class2), recall_score(Y_test, predict_class3)], "Precision" : [precision_score(Y_test, predict_class1, average='macro'), precision_score(Y_test, predict_class2, average='macro'), precision_score(Y_test, predict_class3, average='macro')]}) ``` ## Krzywe Precision-Recall ``` from sklearn.metrics import precision_recall_curve from sklearn.metrics import plot_precision_recall_curve disp1 = plot_precision_recall_curve(rf_classifier, X_test, Y_test) disp1.ax_.set_title('Krzywa Precision-Recall dla klasyfikatora Random Forest') disp2 = plot_precision_recall_curve(xgb_classifier, X_test, Y_test) disp2.ax_.set_title('Krzywa Precision-Recall dla klasyfikatora XGBoost') disp3 = plot_precision_recall_curve(lr_classifier, X_test, Y_test) disp3.ax_.set_title('Krzywa Precision-Recall dla regresji logistycznej') ``` ## Wnioski: Przy predykcji klasy okazuje się, że osiągnięte przez klasyfikatory wartości precision i recall są zróżnicowane. Względem miary precision najlepsze rezultaty osiągnął Random Forest, natomiast względem recall regresja logistyczna. Moim zdaniem, ocena który klasyfikator jest lepszy od pozostałych zależy od konkretnej sytuacji i zmiennej celu, którą chcemy przewidywać. Mianowicie ważne jest to, czy chcemy aby klasyfikator wykrywał jak największą frakcję wyników dodatnich (maksymalizacja recall), czy aby jak największa część obserwacji wskazanych przez klasyfikator jako dodatnia faktycznie taka była (maksymalizacja precision). W przypadku przewidywania deszczu, skupiłabym się bardziej na zmiennej recall. ## 3.2. Krzywa ROCR i miara AUC ``` # Krzywa ROCR fpr1, tpr1, thresholds1 = metrics.roc_curve(Y_test, predict_proba1) # false & true positive rates fpr2, tpr2, thresholds2 = metrics.roc_curve(Y_test, predict_proba2) # false & true positive rates fpr3, tpr3, thresholds3 = metrics.roc_curve(Y_test, predict_proba3) # false & true positive rates plt.figure() plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr1, tpr1, label='Random Forest') plt.plot(fpr2, tpr2, label='XGBoost') plt.plot(fpr3, tpr3, label='Regresja Logistyczna') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.legend(loc='best') plt.show() # Miara AUC pd.DataFrame({"Klasyfikator" : ["Random Forest", "XGBoost", "Regresja Logistyczna"], "AUC": [metrics.auc(fpr1, tpr1), metrics.auc(fpr2, tpr2), metrics.auc(fpr3, tpr3)]}) ``` ## Wnioski Rezultaty osiągnięte przez klasyfikatory w przypadku predykcji prawdopodobieństwa są niemalże identyczne (krzywe ROCR pokrywają się) i dość wysokie. Jak widać najwyższy wynik względem miary AUC osiągnęła regresja logistyczna, nie mniej jednak, różnice są znikome. Jak wynika z literatury, wyniki wszystkich klasyfikatorów świadczą o tym, że każdy z nich może być postrzegany jako co najmniej dobry, a nawet bardzo dobry. # Część dodatkowa - model regresyjny # Regresja liniowa ``` import category_encoders as ce from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error ``` ### Wczytanie danych ``` df = pd.read_csv('allegro-api-transactions.csv') df = pd.DataFrame(df) df = df.drop(['lp', 'date'], axis = 1) Y = df.price cols = ['categories', 'seller','it_location', 'main_category'] ``` ## 1. Target Encoding ``` te = ce.TargetEncoder(cols = cols) # Podział na zbiory treningowy i testowy train_X1, test_X1, train_Y1, test_Y1 = train_test_split(df.drop('price', axis=1), df['price']) # Kodowanie po podziale na podzbiory encoded_train_X1 = te.fit_transform(train_X1, train_Y1) encoded_test_X1 = te.transform(test_X1, test_Y1) # Model linreg1 = LinearRegression() linreg1.fit(encoded_train_X1, train_Y1) # Predykcja y_pred1 = linreg1.predict(encoded_test_X1) ``` ## 2. James Stein Encoding ``` js = ce.james_stein.JamesSteinEncoder(df, cols = cols) # Podział na zbiory treningowy i testowy train_X2, test_X2, train_Y2, test_Y2 = train_test_split(df.drop('price', axis=1), df['price']) # Kodowanie po podziale na podzbiory encoded_train_X2 = js.fit_transform(train_X2, train_Y2) encoded_test_X2 = js.transform(test_X2, test_Y2) # Model linreg2 = LinearRegression() linreg2.fit(encoded_train_X2, train_Y2) # Predykcja y_pred2 = linreg2.predict(encoded_test_X2) ``` ## 3. CatBoost Encoding ``` cb = ce.CatBoostEncoder(cols = cols) # Podział na zbiory treningowy i testowy train_X3, test_X3, train_Y3, test_Y3 = train_test_split(df.drop('price', axis=1), df['price']) # Kodowanie po podziale na podzbiory train_X3_copy = train_X3.copy() train_Y3_copy = train_Y3.copy() test_X3_copy = test_X3.copy() test_Y3_copy = test_Y3.copy() ## Losowa permutacja wierszy (zabieg zalecane w dokumentacji) train_permutation = np.random.permutation(len(train_X3_copy)) train_X3_copy = train_X3_copy.iloc[train_permutation].reset_index(drop = True) train_Y3_copy = train_Y3_copy.iloc[train_permutation].reset_index(drop = True) test_permutation = np.random.permutation(len(test_X3_copy)) test_X3_copy = test_X3_copy.iloc[test_permutation].reset_index(drop = True) test_Y3_copy = test_Y3_copy.iloc[test_permutation].reset_index(drop = True) ## Kodowanie encoded_train_X3 = cb.fit_transform(train_X3_copy, train_Y3_copy) encoded_test_X3 = cb.transform(test_X3_copy, test_Y3_copy) # Model linreg3 = LinearRegression() linreg3.fit(encoded_train_X3, train_Y3_copy) # Predykcja y_pred3 = linreg3.predict(encoded_test_X3) ``` ## Zestawienie wyników ``` pd.DataFrame({"Metoda" : ["Target Encoding", "James Stein Encoding", "CatBoost Encoding"], "R2" : [r2_score(test_Y1, y_pred1), r2_score(test_Y2, y_pred2), r2_score(test_Y3_copy, y_pred3)], "RMSE" : [mean_squared_error(test_Y1, y_pred1, squared=False), mean_squared_error(test_Y2, y_pred2, squared=False), mean_squared_error(test_Y3_copy, y_pred3, squared=False)]}) ``` ## Wnioski Zgodnie z początkową intuicją rezultaty regresji względem rozważanych miar, dla danych poddanych podobnym metodom kodowania zmiennych kategorycznych, są zróżnicowane w nieznacznym stopniu. Pierwsze co rzuca się w oczy w powyższych wynikach, to niskie wartości współczynników determinacji. Wartość R<sup>2</sup> wynosząca ok. 0.1-0.15, gdzie maksymalna możliwa wartość wynosi 1, jest niezwykle mała. Pokazuje, że nasze modele regresji praktycznie nie przedstawiają zmienności próbki. Wartości błędów średniokwadratowych dla rozważanych encodingów są nieco bardziej zróżnicowane. Nie mniej jednak w tym wywołaniu, najlepiej względem obydwu miar wypadł CatBoost Encoding. (Odnoszę się do konkretnego wywołania, ponieważ wyniki nie są ściśle reprodukowalne)
github_jupyter
import pandas as pd import numpy as np import category_encoders as ce import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV, train_test_split from sklearn.metrics import recall_score from sklearn.pipeline import Pipeline from sklearn.metrics import precision_score from sklearn import metrics from sklearn.preprocessing import StandardScaler import random import warnings warnings.filterwarnings('ignore') from xgboost import XGBClassifier np.random.seed(123) data = pd.read_csv('australia.csv') data = pd.DataFrame(data) data.filter(["RainTomorrow"]).hist() X_train, X_test, Y_train, Y_test = train_test_split(data.drop('RainTomorrow', axis=1), data['RainTomorrow']) scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) # Stworzenie modelu rf_classifier = RandomForestClassifier(n_estimators=1000, max_depth=8, max_features='sqrt', bootstrap=True, min_samples_leaf=2, min_samples_split=2, random_state=42) # Trenowanie modelu rf_classifier.fit(X_train, Y_train) # Predykcja klasy predict_class1 = rf_classifier.predict(X_test) # Predykcja prawdopodobieństwa predict_proba1 = rf_classifier.predict_proba(X_test)[:, 1] # Stworzenie modelu xgb_classifier = XGBClassifier(n_estimators = 1000, booster = 'gbtree', colsample_bytree = 0.8, max_depth = 5, gamma = 1.5, min_child_weight = 1, subsample = 0.8, random_state = 42) # Trenowanie modelu xgb_classifier.fit(X_train, Y_train) # Predykcja klasy predict_class2 = xgb_classifier.predict(X_test) # Predykcja prawdopodobieństwa predict_proba2 = xgb_classifier.predict_proba(X_test)[:, 1] from sklearn.linear_model import LogisticRegression # Budowanie modelu lr_classifier = LogisticRegression(penalty = 'l1', class_weight='balanced', C = 0.01, solver = 'saga') lr_classifier.fit(X_train, Y_train) # Predykcja klasy predict_class3 = lr_classifier.predict(X_test) # Predykcja prawdopodobieństwa predict_proba3 = lr_classifier.predict_proba(X_test)[:, 1] pd.DataFrame({"Metoda" : ["Random Forest", "XGBoost", "Regresja logistyczna"], "Recall" : [recall_score(Y_test, predict_class1), recall_score(Y_test, predict_class2), recall_score(Y_test, predict_class3)], "Precision" : [precision_score(Y_test, predict_class1, average='macro'), precision_score(Y_test, predict_class2, average='macro'), precision_score(Y_test, predict_class3, average='macro')]}) from sklearn.metrics import precision_recall_curve from sklearn.metrics import plot_precision_recall_curve disp1 = plot_precision_recall_curve(rf_classifier, X_test, Y_test) disp1.ax_.set_title('Krzywa Precision-Recall dla klasyfikatora Random Forest') disp2 = plot_precision_recall_curve(xgb_classifier, X_test, Y_test) disp2.ax_.set_title('Krzywa Precision-Recall dla klasyfikatora XGBoost') disp3 = plot_precision_recall_curve(lr_classifier, X_test, Y_test) disp3.ax_.set_title('Krzywa Precision-Recall dla regresji logistycznej') # Krzywa ROCR fpr1, tpr1, thresholds1 = metrics.roc_curve(Y_test, predict_proba1) # false & true positive rates fpr2, tpr2, thresholds2 = metrics.roc_curve(Y_test, predict_proba2) # false & true positive rates fpr3, tpr3, thresholds3 = metrics.roc_curve(Y_test, predict_proba3) # false & true positive rates plt.figure() plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr1, tpr1, label='Random Forest') plt.plot(fpr2, tpr2, label='XGBoost') plt.plot(fpr3, tpr3, label='Regresja Logistyczna') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.legend(loc='best') plt.show() # Miara AUC pd.DataFrame({"Klasyfikator" : ["Random Forest", "XGBoost", "Regresja Logistyczna"], "AUC": [metrics.auc(fpr1, tpr1), metrics.auc(fpr2, tpr2), metrics.auc(fpr3, tpr3)]}) import category_encoders as ce from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error df = pd.read_csv('allegro-api-transactions.csv') df = pd.DataFrame(df) df = df.drop(['lp', 'date'], axis = 1) Y = df.price cols = ['categories', 'seller','it_location', 'main_category'] te = ce.TargetEncoder(cols = cols) # Podział na zbiory treningowy i testowy train_X1, test_X1, train_Y1, test_Y1 = train_test_split(df.drop('price', axis=1), df['price']) # Kodowanie po podziale na podzbiory encoded_train_X1 = te.fit_transform(train_X1, train_Y1) encoded_test_X1 = te.transform(test_X1, test_Y1) # Model linreg1 = LinearRegression() linreg1.fit(encoded_train_X1, train_Y1) # Predykcja y_pred1 = linreg1.predict(encoded_test_X1) js = ce.james_stein.JamesSteinEncoder(df, cols = cols) # Podział na zbiory treningowy i testowy train_X2, test_X2, train_Y2, test_Y2 = train_test_split(df.drop('price', axis=1), df['price']) # Kodowanie po podziale na podzbiory encoded_train_X2 = js.fit_transform(train_X2, train_Y2) encoded_test_X2 = js.transform(test_X2, test_Y2) # Model linreg2 = LinearRegression() linreg2.fit(encoded_train_X2, train_Y2) # Predykcja y_pred2 = linreg2.predict(encoded_test_X2) cb = ce.CatBoostEncoder(cols = cols) # Podział na zbiory treningowy i testowy train_X3, test_X3, train_Y3, test_Y3 = train_test_split(df.drop('price', axis=1), df['price']) # Kodowanie po podziale na podzbiory train_X3_copy = train_X3.copy() train_Y3_copy = train_Y3.copy() test_X3_copy = test_X3.copy() test_Y3_copy = test_Y3.copy() ## Losowa permutacja wierszy (zabieg zalecane w dokumentacji) train_permutation = np.random.permutation(len(train_X3_copy)) train_X3_copy = train_X3_copy.iloc[train_permutation].reset_index(drop = True) train_Y3_copy = train_Y3_copy.iloc[train_permutation].reset_index(drop = True) test_permutation = np.random.permutation(len(test_X3_copy)) test_X3_copy = test_X3_copy.iloc[test_permutation].reset_index(drop = True) test_Y3_copy = test_Y3_copy.iloc[test_permutation].reset_index(drop = True) ## Kodowanie encoded_train_X3 = cb.fit_transform(train_X3_copy, train_Y3_copy) encoded_test_X3 = cb.transform(test_X3_copy, test_Y3_copy) # Model linreg3 = LinearRegression() linreg3.fit(encoded_train_X3, train_Y3_copy) # Predykcja y_pred3 = linreg3.predict(encoded_test_X3) pd.DataFrame({"Metoda" : ["Target Encoding", "James Stein Encoding", "CatBoost Encoding"], "R2" : [r2_score(test_Y1, y_pred1), r2_score(test_Y2, y_pred2), r2_score(test_Y3_copy, y_pred3)], "RMSE" : [mean_squared_error(test_Y1, y_pred1, squared=False), mean_squared_error(test_Y2, y_pred2, squared=False), mean_squared_error(test_Y3_copy, y_pred3, squared=False)]})
0.605799
0.797596
# Shortcuts para Jupyter lab Con la celda en azul podemos: añadir una celda hacia arriba pulsando a añadir una celda hacia abajo pulsando b copiar una celda completa pulsando c borrar una celda pulsando d(x2) ejecutar y avanzar una celda pulsando SHIFT + INTRO ejecutar la celda seleccionada pulsando CONTROL + INTRO volver al estado anterior pulsando z para comentar y descomentar un bloque de código CONTROL + MENOS para ver una pequeña ayuda a las funciones: nos colocamos dentro de la funcion y hacemos SHIFT + TAB # Como escribir en markdown [markdown](https://markdown.es/sintaxis-markdown/) # Introducción a Python * Lenguaje de programación interpretado -> código legible. * Programación orientada a objetos * Programación funcional * Lenguaje de programación multiusos ## Preparando el entorno Para proceder a la preparación del entorno vamos a utilizar Anaconda. Anaconda es un gestor de paquetes que instala librerias que son utilizadas en Daata Science. Además es un gestor de entornos. Además de utilizar Anaconda en algun momento utilizaremos pip, otro gestor de paquetes. Para la instalación vamos a utlizara las instrucciones que se encientran aquí. https://docs.anaconda.com/anaconda/install/ ``` from IPython.display import IFrame ``` ## Comprobación del entorno En este ejercicio vamos a comprobar que tenemos correctamente configurado el entorno. - Comprobar la versión de python instalada. - Obtener la lista de paquetes instalada usando conda ``` IFrame(src = "https://kapeli.com/cheat_sheets/Conda.docset/Contents/Resources/Documents/index", width=800, height=400) # Esta celda solo funcionará para los usuarios de mac o linux, en windows el comando es dir !pwd !conda info ``` # Como abordar el proceso de desarrollo. Antes de empezar, es necesario entender cuales son los pasos necesarios para desarollar software. Qué debemos hacer? 1. Análisis del problema 2. Especificaciones 3. Diseño de la solución 4. Implementación 5. Pruebas Es importante entender el problema: - Que datos vamos a tener. Como nos van a llegar - Que esperamos obtener - Cuales son las transformaciones/acciones que tenemos que realizar? Especificaciones - Debemos definir de manera rápida que queremos obtener. - Cuales sera´n los inputs/oputpus. como es la interacción Diseño de la solución - Aquí comenzamos a definir los componentes que tendrá nuestra solución - Como modelaremos los elementos de la misma de acuerdo a las capacidades que nos den las herramientas que vamos a usar - Definiremos funciones y métodos y/o clases. - definiremos las funcionalidades esperadas para cada método o función. Implementación - Aquí es cuando codificamos. - Tendremos que tener en cuenta que herramientas podemos reutilizar, que librerias - Codificamos Pruebas - En el caso de Data Science, vamos a aproximarnos al proceso de una forma muy funcional y secuencial, así que aordaremos las pruebas sobre la propia ejecución, pero es común definir durante el diseño las pruebas unitarias y de integración, o incluso utilizar aproximaciones orientadas a testing como TDD. # Elementos de un programa **Comentarios** - permiten incluir descripciones al código sobre los objetivos, que se hace en cada lugar, etc. Es muy importante usarlos, porque nuestra memoria es peor de lo que creemos. **Nombres** - las funciones, variables, metodos, etc reciben identificadores. Son Case Sensitive, y debemos tratar de describir lo que estamos nombrando. **Expresiones** - Las expresiones son fragmentos de código que producen o calculan nuevos datos. Pueden ser desde la más sencilla, que es un literal hasta formulas más complejas o que ejecutan funciones o metodos. **Output Statments** - Permiten mostrar los datos, variables y resultados de expresiones. **Assigment statements** - permiten asignar valores a variables. En general tienen el formato <variable> = <expresion> **Assigning input** - Son casos de asignación que recojen entrada de los usuarios en general por el interfaz del programa. # Tipos de datos en python ## Números enteros ``` x = 9 x x = int(10) x ``` ### ¿Qué podemos hacer? ``` 4 x = 5 2 + 4, 2 / 4, 6 - 10, 4 * 4, 3**3 # ¿Qué resultado será? int(3/4) ``` ## Números decimales ``` x = .3 x x = float(3.5) # atención a que el separador decimal es el '.' x x = 1e-16 x ``` ### ¿Qué podemos hacer? ``` 2.4 + 4.1, 2.3 / 4.06, 6 - 10.8, 4.3 * 4.1, 3.4**3.7 ``` ## Cadenas de texto ``` x = """ hola """ x x = 'Mi primera cadena de texto' x x = str(7) x ``` ### ¿Qué podemos hacer? ``` x = 'Mi primera cadena de texto' x.lower(), x.upper() x.split(' ') x = ' eSto sE lleVaba En tiEmpOs quE nO QuiEro RecorDar ' x.strip().capitalize() x = 'Hola' y = 'Mundo' x + y, (x + y)*2 'G00000000000L'.replace('0', 'O') # Buscar que métodos tiene asociado el tipo str ``` ### Formateo de strings en python ``` name = 'Esteban Sánchez' years = 25 print(f'Hola me llamo {name} y tengo {years} años') cadena = 'La factura del curso de %s es de %0.2f euros' % ('data science'.upper(), int(12.345)) print(cadena) ``` ## Booleanos ``` x = True y = False x, y ``` ### ¿Qué podemos hacer? ``` x = bool(1) y = bool(0) x, y x, y = bool('true'), bool('True') print(x, y) xx, yy = bool('false'), bool('False') print(xx, yy) xxx = bool('') print(xxx) ``` Hasta aqui hemos visto diferentes formas de asignar a una variable a un determinado tipo de dato, para ello hemos usado la asignación directa o funciones asociadas a ese tipo de dato. Sin embargo hay funciones como el 'print()' que no devuelven nada, solo enseñan resultado. Lo veremos en el siguiente bloque. ## Para completar ``` # Podemos ver siempre de que tipo es la variable que estamos tratando con la función 'type()' type(7), type(5.5), type(False), type('str') # Además python cuenta con un tipo de datos especial que se llama None # este tipo de dato se usa cuando no se quiere especificar nada. Y no # soporta ningún tipo de operación. x = None type(x), bool(x) None x = 7 val = type(x) print(val) return_value = print('abc') # ¿Que va a pasar con la siguiente línea? print(return_value) ``` # Ayudas ``` # Con el cursor dentro del método tambien podríamos ver la ayuda si pulsamos SHIFT + TABULADOR # str.replace() str.replace? help(str.replace) ``` # Operaciones aritméticas | Operator | Name | Description | |--------------|----------------|--------------------------------------------------------| | ``a + b`` | Addition | Sum of ``a`` and ``b`` | | ``a - b`` | Subtraction | Difference of ``a`` and ``b`` | | ``a * b`` | Multiplication | Product of ``a`` and ``b`` | | ``a / b`` | True division | Quotient of ``a`` and ``b`` | | ``a // b`` | Floor division | Quotient of ``a`` and ``b``, removing fractional parts | | ``a % b`` | Modulus | Integer remainder after division of ``a`` by ``b`` | | ``a ** b`` | Exponentiation | ``a`` raised to the power of ``b`` | | ``-a`` | Negation | The negative of ``a`` | | ``+a`` | Unary plus | ``a`` unchanged (rarely used) | Estas operaciones se pueden usar como la matemática básica que conocemos de primero se ejecutan los paréntesis, tras ello la multiplicación y la división y por último las operaciones aditivas. ## Floor division ``` print(f"La 'floor division' es una división normal solo que el resultado es el número entero de la división. " \ f"Hacer 10 // 3 = {10 // 3} es lo mismo que hacer el int(10 / 3) = {int(10/3)} que hicimos antes.") ``` ## Modulus La operación módulo devuelve el resto de una división ``` 10 % 5, 4 % 3, 3 % 4, 4 % 7 ``` **Ejercicio 1**: Escriber una función en la cual dado un número mayor que cero siempre devuelva el entero superior ``` def entero_superior(num): x = int(num) incremento = int((1 % num)) ret = x + incremento return ret n = entero_superior(1.1) n ``` ## Para completar ``` # Podemos hacer una suma o una resta a una variable numérica de una forma más simple incr = 2 value_sum = 2 value_sum += incr print('Suma rapida: ', value_sum) value_res = 2 value_res -= incr print('Resta rapida: ', value_res) value_mult = 7 value_mult *= incr print('Multiplicación rapida: ', value_mult) ``` **Ejercicio 2:** Escribe una función que calcule las soluciones de un polinomio de segundo grado: ``` def soluciones_segundo_grado(a, b, c): sol1 = (-b + (b**2 - 4*a*c)**(.5))/(2*a) sol2 = (-b - (b**2 - 4*a*c)**(.5))/(2*a) return sol1, sol2 assert soluciones_segundo_grado(1, -3, 2) == (2.0, 1.0) ``` # Operaciones lógicas ## Operaciones de comparación Estas operaciones siempre devuelven True o False por lo tanto devuelven booleanos. Las operaciones de comparación son: | Operation | Description || Operation | Description | |---------------|-----------------------------------||---------------|--------------------------------------| | ``a == b`` | ``a`` equal to ``b`` || ``a != b`` | ``a`` not equal to ``b`` | | ``a < b`` | ``a`` less than ``b`` || ``a > b`` | ``a`` greater than ``b`` | | ``a <= b`` | ``a`` less than or equal to ``b`` || ``a >= b`` | ``a`` greater than or equal to ``b`` | ``` x, y = 6, 7 # comprobar con booleanos print(f'¿Es x mayor que y? {x > y}') print(f'¿Es x menor que y? {x < y}') print(f'¿Es x igual que y? {x == y}') print(f'¿Es x diferente de y? {x != y}') x, y = 'Casa', 'Casa' print(f'¿Es x mayor que y? {x > y}') print(f'¿Es x menor que y? {x < y}') print(f'¿Es x igual que y? {x == y}') print(f'¿Es x diferente de y? {x != y}') # ¿Cuál será mayor casa o taza? print("barco" >= "moto") print('osa' < 'oso') print('mas' < 'mascar') print('as' >= 'As') ``` La comparación de cadenas de texto se hace de manera lexicográfica, esto es que se ordena semánticamente. Y además las mayúsculas son menores que las minúsculas. Depende del código ascii ``` 'X' < 'x' '!' < '?' print("123" == 123) print(int("123") == 123) print("123" == str(123)) int('n') x = 10 y = 7 z = 3 # ¿Qué saldrá de los siguientes prints? print(x < 5) print(x == y + z) print(x != x) print(y == 10 - 3) print(x**2 <= 100) ``` ### IF-ELIF-ELSE La estructura general de este tipo de algoritmos es la siguiente: ```python if condicion_1 == True: ... elif condicion_2 == 8: ... else: ... ``` Lo cual se lee: Si se cumple la condicion_1 hazme el bloque de código que hay justo debajo e identado, si esta condición no se cumple mira a ver si la condicion_2 lo hace y me calculas el bloque de código que hay debajo e identado. En el caso de que no se cumplan las condiciones 1 y 2 entonces me haces el bloque de código que está identado bajo el else. Esto se usa para validar variables y seleccionar lógicas a aplicar. ##### **EL TAMAÑO (de la identación) IMPORTA** En C por ejemplo un bloque de código es: ``` C // C code for(int i=0; i<100; i++) { // curly braces indicate code block total += i; } ``` mientras que en python no usamos llaves ni puntos y comas para explicitar un bloque de código o un fin de línea. En python cuando queremos especificar un bloque de código (como por ejemplo lo que ejecuta un IF-ELIF-ELSE, lo que ejecuta una función o lo que ejecuta un bucle) debemos acabar la línea con ':' y la siguiente empezarla con un tabulador o 4 espacios blancos. Tomemos el siguiente ejemplo de código como visual para ver esto de la identación: ```python for i in _list: if i: return 'do_something1' elif (i == 7) and (i <= 10): return 'do_something2' else: return 'do_something3' ``` ``` INPUT = 8 if INPUT == 7: print('La variable x es numérica y es 7') elif ((type(INPUT) != str) and (7 < INPUT < 10)): print('La variable x está entre 7 y 10 abiertos') elif type(INPUT) == str: print('La variable x es una cadena de texto') else: print('Esta lógica no la tengo contemplada') ``` Tambien tenemos opción de hacer condiciones anidadas ``` input_ = 8 if input_ > 7: print('El input de nuestro programa es mayor que 7') print('¿Será mayor o igual de 10?') if input_ >= 10: print('\t ¡Sí!') else: print('\t Pues va a ser que no') else: print('El número es menor o igual a 7') ``` **Ejercicio 3**: Escriber una función en la cual dado un número mayor o igual a 1 imprima el entero superior y validar si el número metido cumple la condición: ``` def entero_superior(num): x = int(num) if num < 1: print('El número metido es menor que 1 por lo tanto no cumple la condición') elif x == num: print(num) else: incremento = int((1 % num)) print(x + incremento) entero_superior(-2) ``` ## Operaciones de identidad y pertenencia | Operator | Description | |---------------|---------------------------------------------------| | ``a is b`` | True if ``a`` and ``b`` are identical objects | | ``a is not b``| True if ``a`` and ``b`` are not identical objects | | ``a in b`` | True if ``a`` is a member of ``b`` | | ``a not in b``| True if ``a`` is not a member of ``b`` | | ``a or b`` | True if a condition satisfies one or two of them | ``` num = int(input('Elige número')) if num > 0 and num < 10: print("Gracias") else: print("Número incorrecto") string = str(input('Dime un número')).lower() if string == 'dos' or string == 'tres': print('gracias') else: print('numero incorrecto') nombre = str(input("¿Cual es el nombre de nuestro planeta? ")).upper() if nombre == "TIERRA": print("¡Correcto!") else: print("Incorrecto") nombre = input("¿Cual es el nombre de nuestro planeta? ") if nombre == "tierra" or nombre == "Tierra": print("¡Correcto!") else: print("Incorrecto") x = input('Dame algo') type(x) x = int(input("Introduce un número: ")) if not x < 0: print("El número es positivo") ``` ## Operaciones bitwise | Operator | Name | Description | |--------------|-----------------|---------------------------------------------| | ``a & b`` | Bitwise AND | Bits defined in both ``a`` and ``b`` | | <code>a &#124; b</code>| Bitwise OR | Bits defined in ``a`` or ``b`` or both | | ``a ^ b`` | Bitwise XOR | Bits defined in ``a`` or ``b`` but not both | | ``a << b`` | Bit shift left | Shift bits of ``a`` left by ``b`` units | | ``a >> b`` | Bit shift right | Shift bits of ``a`` right by ``b`` units | | ``~a`` | Bitwise NOT | Bitwise negation of ``a`` | ``` x, y = 'Tiça', 'pleta' if (x == 'Tierra') ^ (y == 'planeta'): print('Solo se cumple uno de los dos') elif (x == 'Tierra') & (y == 'planeta'): print('Se cumplen los dos') elif (x == 'Tierra') | (y == 'planeta'): print('Solo se cumple uno de os dos') else: print('Suerte') # ¿Qué es lo que no está del todo correcto en la lógica de aqui? Pista: Se ejecuta en orden y solo la parte de la condición que hace True. ``` # Estructura de datos ## Listas ``` l = list() print(type(l), l) l = [] l ``` ### ¿Qué podemos hacer? ``` ll = ['1', '2'] # Para rellenar un lista se puede usar el siguiente método para listas: l.append(0) l.append('hola') l.append(list('12')) l.append([6, 7, 8, '9', 'diez']) print(l) # con el método append estamos añadiendo a la posición final de la lista el elemento que queremos ``` Como podemos ver no hay restricción en lo que se refiere al tipo de dato que contiene una lista. Podemos además indexar valores de una lista dadas sus posiciones: ``` print('l[0]=', l[0], '\n', 'l[0:2]=', l[0:2], '\n', 'l[4:]=', l[1:], '\n', 'l[-2:]=', l[-2:], '\n', 'l[-1]=', l[-1]) ``` Podemos además calcular el número de elementos que tiene una lista de manera directa: ``` print(f'La lista l tiene {len(l)} elementos') ``` Supongamos una lista de números donde cada elemento es la probabilidad de sacar un número de un dado: ``` p_d = [1/6] * 6 p_d ll = ['hola', ' ', 'mundos'] print(ll[0], ll[1], ll[2]) ``` Entonces para comprobar que la suma de las probabilidades es la unidad, podemos hacer: ``` sum(p_d) ``` ### Precisión decimal Una cosa a tener en cuenta es la precisión decimal, estamos tratando con ordenadores que no dejan de tener un espacio en memoria finito. Pongamos un ejemplo: ``` 0.1 + 0.2 == 0.3 ``` No es un comportamiento único de python, es debido a como guarda la información python. Si pintamos los números con mayor precisión veremos lo que pasa: ``` print("0.1 = {0:.17f}".format(0.1)) print("0.2 = {0:.17f}".format(0.2)) print("0.3 = {0:.17f}".format(0.3)) ``` We're accustomed to thinking of numbers in decimal (base-10) notation, so that each fraction must be expressed as a sum of powers of 10: $$ 1 /8 = 1\cdot 10^{-1} + 2\cdot 10^{-2} + 5\cdot 10^{-3} $$ In the familiar base-10 representation, we represent this in the familiar decimal expression: $0.125$. Computers usually store values in binary notation, so that each number is expressed as a sum of powers of 2: $$ 1/8 = 0\cdot 2^{-1} + 0\cdot 2^{-2} + 1\cdot 2^{-3} $$ In a base-2 representation, we can write this $0.001_2$, where the subscript 2 indicates binary notation. The value $0.125 = 0.001_2$ happens to be one number which both binary and decimal notation can represent in a finite number of digits. In the familiar base-10 representation of numbers, you are probably familiar with numbers that can't be expressed in a finite number of digits. For example, dividing $1$ by $3$ gives, in standard decimal notation: $$ 1 / 3 = 0.333333333\cdots $$ The 3s go on forever: that is, to truly represent this quotient, the number of required digits is infinite! Similarly, there are numbers for which binary representations require an infinite number of digits. For example: $$ 1 / 10 = 0.00011001100110011\cdots_2 $$ Just as decimal notation requires an infinite number of digits to perfectly represent $1/3$, binary notation requires an infinite number of digits to represent $1/10$. Python internally truncates these representations at 52 bits beyond the first nonzero bit on most systems. This rounding error for floating-point values is a necessary evil of working with floating-point numbers. The best way to deal with it is to always keep in mind that floating-point arithmetic is approximate, and *never* rely on exact equality tests with floating-point values. **Ejercicio 4:** Calcula la media de un vector numérico ret = sum(l) / len(l) ``` round(3.456, 1) def media_vector(l): ret = sum(l) / len(l) return ret assert media_vector([1, 2, 3]) == 2 ``` Tambien podemos calcular la longitud de una cadena de caracteres además de indexar caracteres: ``` media_vector([1.2, 300, 678238, 4.5]) x = 'Hola Mundo!' len(x), x[: 4], x[-6:] ``` Podemos crear listas de números enteros consecutivos: ``` # Crea una lista con 10 elementos. x = range(11) type(x) list(x) list(x) x = list(range(11, 17)) x x = list(range(10, 20, 2)) x x = list(range(30, 3, -3)) x [1, 2] + [3, 4] # Además podemos hacer las operaciones rápidas. [1, 2, 3, 4, 5, 6, 5, 5] ``` Uniendo operadores lógicos, podemos comprobar si un valor está en una lista: ``` x = 40 l = [1, 2, 3, 4] if x in l: print(f'El número {x} está en la lista') else: print(f'El número {x} no está en la list') ``` Dado que consideramos cadenas de texto como listas podemos tambien ver si una cadena está dentro de otra: ``` text = 'Hola Mundo' if 'Mundo' in text: print('Se encuentra') ``` Las listas además no son inmutables, esto es que puedes cambiar valores preexistentes: ``` my_list = [1, 2, 3, 4, 5] print(my_list) my_list[0] = my_list[0] * 10 print(my_list) # Podemos insertar un valor en una posición determinada my_list = ['Pau', 'Marc', 'Navarro', 'Llull', 'Ricky'] my_list.insert(2, 'Sergio R') print(my_list) # Podemos luego eliminar una posición de una lista del my_list[3] print(my_list) # Podemos guardarnos el valor eliminado dropped = my_list.pop(4) print(my_list) print(dropped) banquillo = list() banquillo.append(my_list.pop(2)) banquillo x1 = int(input('Numero 1')) x2 = int(input('Numero 2')) x3 = int(input('Numero 3')) l = list() l.append(x1) l.append(x2) l.append(x3) print(l) x1 = 1 x2 = 2 x3 = 3 l = [x1, x2, x3] print(l) ``` **Ejercicio 5:** Programa un sistema que meta 3 números en una lista. Después, imprime la lista. **Ejercicio 6:** Implementa un programa que tenga como input una posición en la lista, y elimine el elemento en esa posición. Si la posición no existe en la lista (es menor que cero o mayor que la longitud), entonces imprime un mensaje indicando que ha habido un error. ``` pos = 1 list_ = [2, 4, 5, 6, 'hola', [45]] if not len(list_) <= pos: del list_[pos] print(list_) else: print(f'No existe el elemento {pos} de la lista') l = [1, 2] l.extend([1, 23]) l.sort(reverse=True) ``` **Ejercicio 7:** Implementa un algoritmo en el que dada una lista desordenada de números, te devuelva la misma lista ordenada decrecientemente. (Pista: es un método de las listas --> una sola línea de código es la implementación) ``` l = [2, 7,3, 4, 57, 9, 0] l.sort(reverse=True) l ``` ## Sets ``` l = [2, 2, 1] l set(l), type(set(l)) s = set(l) s[1] ``` Los sets son como las listas solo que están ordenadas y no permiten elementos iguales además no se pueden cambiar sus valores dado que son inmutables ``` text = """ En un lugar de la Mancha, de cuyo nombre no quiero acordarme, no ha mucho tiempo que vivía un hidalgo de los de lanza en astillero, adarga antigua, rocín flaco y galgo corredor. Una olla de algo más vaca que carnero, salpicón las más noches, duelos y quebrantos los sábados, lentejas los viernes, algún palomino de añadidura los domingos, consumían las tres partes de su hacienda. El resto della concluían sayo de velarte, calzas de velludo para las fiestas con sus pantuflos de lo mismo, los días de entre semana se honraba con su vellori de lo más fino. Tenía en su casa una ama que pasaba de los cuarenta, y una sobrina que no llegaba a los veinte, y un mozo de campo y plaza, que así ensillaba el rocín como tomaba la podadera. Frisaba la edad de nuestro hidalgo con los cincuenta años, era de complexión recia, seco de carnes, enjuto de rostro; gran madrugador y amigo de la caza. Quieren decir que tenía el sobrenombre de Quijada o Quesada (que en esto hay alguna diferencia en los autores que deste caso escriben), aunque por conjeturas verosímiles se deja entender que se llama Quijana; pero esto importa poco a nuestro cuento; basta que en la narración dél no se salga un punto de la verdad. """ ``` **Ejercicio 8:** Crea un algoritmo que dado el texto anterior me devuelva una lista con todas las palabras que aparecen. Reemplaza los signos de puntuación (. . , ;) por nada ``` text2 = text.lower().replace(',', '').replace(';', '').replace('.', '').replace('\n', '').replace('(', '').replace(')', '') l = text2.split(' ') set(l) ``` # Tuplas Hay dos maneras de inicializar una tupla ``` t = tuple() t t = () t ``` Tradicionalmente la tupla la componían dos elementos aunque ahora una tupla puede ser de N elementos ``` t = (1, 2, 3, 4, 'hola') t # Podemos acceder a un indice de la tupla t[0] # Podemos hacer slicing sobre tuplas t[1:-2] # Pero no podemos cambiar el valor de los elementos de las tuplas t[1] = 2 ``` # Diccionarios ``` d = {} d d = dict() d ``` Los diccionarios son unas estructuras para organizar datos muy interesantes dado que se organizan modo clave: valor. ``` animales = {'vertebrados': ['delfin', 'ballena', 'homo sapiens'], 'invertebrados': ['cucaracha', 'mariposa']} animales animales['numeros'] = 1 animales # Hay dos formas de pedirle a un diccionario que te devuelva el valor dada una clave print(animales['vertebrados']) print(animales.get('invertebrados')) # En el caso de la segunda manera, si la clave no estuviera en el diccionario nos devuelve un None. De la segunda manera falla el algoritmo print(animales.get('con-alas')) print(animales['con-alas']) # Podemos ver todas las claves de un diccionario print(animales.keys()) # Podemos ver todos los valores de un diccionario print(animales.values()) alumnos = {1: 'Reyes', 2: 'Oda', 3: 'Micaela', 4: 'Rafael', 5: 'Alfonso'} alumnos # Si viniera un alumno nuevo podríamos añadirlo al diccionario de alumnos de la siguiente forma: alumnos[6] = 'Nuevo alumno' alumnos['clave'] = 'Nuevo alumno 2' alumnos d = {} d d['uno'] = 1 d['dos'] = 2 d['tres'] = 3 d # Podemos combinar diccionarios accediendo a sus valores psoe = {'lider_rojo': 'Pedro Sánchez'} podemos = {'lider_morado': 'Pablo Iglesias'} nuevo_gobierno = {**psoe, **podemos} nuevo_gobierno total = {'Madrid': 4e6} andalucia = {'Andalucia': 8e6} total.update(andalucia) total ``` Una cualidad muy importante de los diccionarios es que las claves que guarda son únicas. Lo vemos con un ejemplo: ``` dictionary = {'clave1': 'valor1', 'clave2': 'valor2'} dictionary # Como podemos ver en este ejemplo se elimina el valor asociado a la clave anterior dictionary['clave1'] = 'nuevo_valor1' dictionary # Otro ejemplo más: d = {('pt-022020', '1'): '10', ('pt-022020', '2'): '6'} d d[('pt-022020', '2')] = '7' d ``` # Bucles ## While Mientras se cumpla una condición me vas a ejecutar el siguiente bloque de código ``` count = 0 while count < 5: print('Suma uno a la variable count') print(f'count_pre={count}') count += 1 print(f'count_post={count}') cond = True x = 0 # while cond == True: while cond: x += 1 if x >= 7: # La cláusula break para el bucle break elif x >= 2: # La clausula continue hace que no se # ejecute más código y pase a la siguiente iteración continue else: print(x) print('Es menor de 2') cond = True x = 0 # while cond == True: (esta línea es igual que poner la de abajo dado que el while # evalua a True) while cond: x += 1 if x >= 7: break else: pass cond = True x = 0 while cond: x += 1 if x >= 100: cond = False else: # La cláusula pass hace nada (es como vale pues pasa por aqui pero no hagas nada) pass ``` **Ejercicio 9:** Crea un algoritmo que me devuelva en una lista los números impares menores o iguales a 100: ``` # Forma 1 (no cuenta el 0 como número impar o par dado que lo primero que haces en la lógica # es sumar 1) x=0 lista=list() while x < 100: x+= 1 if x%2 !=0: lista.append(x) else: pass print (lista) # Forma 2 (En este caso si que cuenta el 0 dado que el sumar uno se encuentra al final) count = 0 l_impar = list() while count <= 100: resto = count % 2 if resto == 1: l_impar.append(count) else: pass count += 1 print(l_impar) count = 0 l_impar = list() while count <= 100: # resto = count % 2 if count % 2 == 1: l_impar.append(count) elif count % 2 == 0: pass count += 1 print(l_impar) # Esta es una manera lenta de ejecutar un algoritmo dado que calculamos el módulo de un valor # dos veces, por lo tanto realizamos dos veces la misma operación # Rápido count = 0 l_impar = list() while count <= 100: resto = count % 2 if resto == 1: l_impar.append(count) elif resto == 0: pass count += 1 print(l_impar) # Esta es una manera rapida de ejecutar un algoritmo dado que calculamos el módulo de un valor # una vez, por lo tanto realizamos una sola vez la operación módulo # Para calcular el tiempo que tarda vamos a meterlo en dos funciones # y vamos a calcular el tiempo. def lento(): count = 0 l_impar = list() while count <= 100: # resto = count % 2 if count % 2 == 1: l_impar.append(count) elif count % 2 == 0: pass count += 1 return l_impar def rapido(): count = 0 l_impar = list() while count <= 100: resto = count % 2 if resto == 1: l_impar.append(count) elif resto == 0: pass count += 1 return l_impar # La primera parte de la expresión de abajo se llaman comandos mágico y ese sirve para calcular # el tiempo de ejecución %timeit rapido() %timeit lento() ``` Podemos ver más información sobre comandos mágicos [blog](https://www.analyticslane.com/2019/04/12/seis-comandos-magicos-de-jupyter-notebooks/) o [pagina oficial](https://ipython.readthedocs.io/en/stable/interactive/magics.html) # Bucle For ``` l = list('asfasfdsdf') for idx in range(0, len(l)): print(l[idx]) l = list(range(10)) for elemento in l: print(elemento) l = ['Montaña', 'Monte', 'Llanura', 'Playa'] for index, value in enumerate(l, start=1): print(index, value) ``` **Ejercicio 10**: Crea una cuenta atrás que acabe en un explosión ``` count = 5 while count: print(count) count -= 1 if not count: print('BUUUUUUM') ``` Recordad que todos los números son True excepto el 0 que es False ``` d = {1: 'Casa', 2: 'Chalet', 3: 'Piso', 4: 'Ladrillo', 5: 'Hormigón', 6: 'Arcilla'} tipos_de_casa = list() materiales = list() for key, value in d.items(): if key <=3: tipos_de_casa.append(value) else: materiales.append(value) print(tipos_de_casa) print(materiales) string = 'Hola mundillo' for char in string: print(char) entero = 745 for ent in entero: print(ent) ``` Los diccionario, las listas y los sets son iterables, esto es que puedes aplicar bucles dentro de ellas ## List comprehension En python nunca creamos una lista como: ```python c = 0 l = list() while c < 9: l.append(c) ``` Si no que hacemos: ``` l = [_ for _ in range(0, 9)] l l = ['123T', '457T', '876Y'] l_endswith_t = [_ for _ in l if _.endswith('T')] l_endswith_t l = ['123T', '457T', '876Y'] l_endswith_t = [_ for _ in l if _.endswith('T')] l_endswith_t x = None y = 'Hola' if x is None else 'Adios' y ``` La estructura para las list comprehension son: ```python variable = [valor_que_añades for elemento_dentro_de_lo_que_recorres in lista_a_recorrer] ``` Con una condición añadida quedaría: ```python var = [vqa for eddlqr in lar if condición_1] ``` Con una condicion if else: ```python var = [vqa1 if cond1 else vqa2 for eddlqr in lar] ``` ``` [_ if _.endswith('a') else _.upper() for _ in ['aa', 'bb']] # Este bloque de código de aqui abajo x = False y = 1 if x == True else 0 print(y) # Es igual que este otro bloque x = True if x == True: y = 1 else: y = 0 print(y) # La diferencia reside en que el primero es más pythonico ``` # Keywords ``` help('keywords') ``` Lo de arriba es un extracto de algunas palabras clave que tiene reservadas python porque hacen alguna función ``` locals() globals() # Al hacer esto lo que nos hacemos es cargarnos la funcionalidad de la palabra list en este caso por # ello es recomendable no usar estas palabras como nombres de variables. Podemos usar algo como lo que # hay en la celda de abajo list = list('asd') print(list) f = list() f # He tenido que resetear el kernel porque anteriormente me he cargado la funcionalidad list_ = list('asd') print(list_) f = list() f ``` # Asignación de variables. Copiado y referenciado. Como ya sabemos una variable se asigna por ejemplo de la siguiente manera ``` x = [5, 6, 7] ``` Ahora bien, tambien podemos asignar el valor de una variable a otra variable siendo las dos iguales: ``` y = x print(x, y) x == y ``` En este caso no estamos creando una nueva variable y con el valor de la variable en x si no que estamos haciendo que x se refiera a y, por lo que si cambia y cambiaría también x. Esto se hace así para evitar consumir memoria y guardar dos veces la mimas información. ``` del x[0] print(x) print(y) ``` # Funciones Vamos a ver como se estructura una función. ``` def media_vector(vector): """ Esta función sirve para calcular la media de un vector dado. Parameters: vector (list[float]): Lista de N números sobre los cuales se hará una media Returns: float: Media del vector """ num = sum(vector) den = len(vector) return num/den media_vector([1, 2, 4]) ``` Una función se define siempre empezando por 'def'. Tras esto podemos ver el nombre de la función en color azul que normalmente será snake_case. Entre paréntesis se encuentran los argumentos/variables/inputs de la función, para el ejemplo anterior es uno solo que tiene de nombre 'vector'. Tras esto ponemos los ':' para especificar que el código que viene a continuación pertenece a la función. Se observa también un comentario escrito entre 3 comillas dobles que equivale al docstring o información de lo que hace la función. Tras esto vendría el algoritmo propiamente dicho o los cálculos y finalmente si queremos que nuestra función de un resultado de vuelta que podamos asignarle a una variable debemos poner un return. **Ejercicio 11**: Escribe una función que a una cadena de texto reemplaze los signos de puntuación ['.', ',', ';', ':', '!', '¡', '?', '¿', '(', ')', '-'] por la cadena vacía ``` s = 'Hola! ¿Como te llamas?' def suma(x=2, y=1): return x + y suma(1, 9) def elimina_puntuacion(string, signos_puntuacion=['.', ',', '!']): for signo in signos_puntuacion: string = string.replace(signo, '') return string texto = elimina_puntuacion(s, ['.', ',', '!', '?']) texto def elimina_puntuacion(string): signos_puntuacion = ['.', ',', ';', ':', '!', '¡', '?', '¿', '(', ')', '-'] for signo in signos_puntuacion: string = string.replace(signo, '') return string texto = elimina_puntuacion(s) texto ``` Además de las funciones como acabamos de ver, tambien podemos definir funciones con parámetros por defecto. Esto es como sigue: ``` def elevar_un_numero(num, pot=2): """ Esta función eleva un número a N (por defecto N=2) Parameters: num (float): Número base que se eleva pot (float): Potencia a la que se eleva Return: float: Potencia del número num elevado a pot """ calc = num ** pot return calc elevar_un_numero(11, 3) # Si escribimos solo num = elevar_un_numero(5) print(f'Nos eleva al cuadrado 5**2={num}') # Pero podemos cambiar el valor de la potencia, especificando esa variable por posición. Si lo hacemos de esta manera # la posición es muy importante ya que si cambias los números de posición cambia la lógica de la función. num = elevar_un_numero(5, 3) print(f'Nos eleva al cubo 5**3={num}') # O podemos cambiarlo por nombre de variable. En este caso como se referencia a la variable por nombre, da igual el orden. num = elevar_un_numero(num=7, pot=2) print(f'Nos eleva al cuadrado 7**2={num}') num = elevar_un_numero(pot=7, num=2) print(f'Nos eleva a la septima 2**7={num}') ``` **Ejercicio12**: Haz una función que te devuelva (aproximadamente) el salario neto, y además la media de lo que cobrarías al mes si fueran 12 o 14 pagas como lo que quedaría de pagas extraordinarias en caso de que fueran 12 pagas. [distribución de IRPF](https://factorialhr.es/blog/calcular-sueldo-neto-bruto/). La variable de entrada son el salario bruto y las pagas que hay (por defecto poner 14). ``` b = 65000 tramos = { b > 60000: .45, 35201 < b <= 60000: .24, 20001 < b <= 35201: .3, 12450 < b <= 20001: .24}.get(True, .19) def IRPF(bruto, pagas=14): b = bruto hacienda = 0 cond = True while cond: tramos = { b > 60000: [.45, 60000], 35201 < b <= 60000: [.24, 35201], 20001 < b <= 35201: [.30, 20001], 12450 < b <= 20001: [.24, 12450]}.get(True, [.19, False]) if tramos[1] is not False: hacienda += (b - tramos[1]) * tramos[0] b -= (b - tramos[1]) else: hacienda += b * tramos[0] b -= b * tramos[0] cond = tramos[1] neto = bruto - hacienda return neto, [neto/pagas for _ in range(0, 12)] IRPF(21000, 12) def IRPF(bruto, pagas=14): hacienda = 0 b = bruto cond = True while cond: if b > 60000: hacienda += (b-60000)*.45 b -= (b-60000) elif b > 35201: hacienda += (b - 35201)*.24 b -= (b - 35201) elif b > 20001: hacienda += (b - 20001)*.30 b -= (b - 20001) elif b > 12450: hacienda += (b - 12450)*.24 b -= (b - 12450) else: hacienda += b*.19 b -= b*.19 cond = False neto = bruto - hacienda return neto, [neto/pagas for _ in range(0, 12)] IRPF(21000, 12) ``` Las variables que crees dentro de una función son específica de ese espacio, no se pueden usar dentro. Sin embargo las variables que creemos fuera de funciones si se pueden usar para esta. Veamoslo con un ejemplo: ``` # Funciona! a = 7 def divide_entre_siete(x): return x/a res = divide_entre_siete(7) res # No funciona! def divide_entre_siete(x): aaa = 7 return x/aaa res = divide_entre_siete(7) print(res) print(aaa) ``` ## Argumentos flexibles: ``*args`` & ``**kwargs`` A veces no queremos especificar el número de variables que tiene nuestra función, para ello se usan estos argumentos para capturar todos los argumentos que queramos. ``` def catch_all(*args, **kwargs): print("args =", args) print("kwargs = ", kwargs) catch_all(1, 2, 3, a=4, b=5) catch_all('a', keyword=2) ``` El caracter ``*`` significa expande esta secuencia mientras que el parámetro ``**`` significa expande este diccionario. ``` inputs = (1, 2, 3) keywords = {'pi': 3.14} catch_all(*inputs, **keywords) def elevar(n, p): return n**p elevar(3, p=5) # Esto falla porque requiere que la posición sea la adecuada a la posición de estos argumentos catch_all(a=7, 5, 6) catch_all(1, 2, 3) print('#'*7) catch_all(a=1, b=2) def media_vector(*args): return sum(args)/len(args) media_vector(1,2,3,4,5,6,7,8,9,10) ``` # Clases **Definición del profe**: Las clases son agrupaciones de funciones (dentro de una clase a las funciones se les llama métodos) que realizan operaciones similares, por lo que son buenas para agrupar funcionalidades parecidas. **Definición de alguien que sabe**: Las clases proveen una forma de empaquetar datos y funcionalidad juntos. Al crear una nueva clase, se crea un nuevo tipo de objeto, permitiendo crear nuevas instancias de ese tipo. Cada instancia de clase puede tener atributos adjuntos para mantener su estado. Las instancias de clase también pueden tener métodos (definidos por su clase) para modificar su estado. A una instancia de un objeto se le llama a llamar a una clase (como si llamas a una función) y a ejecutar un método de una clase es a ejecutar una función que hay dentro de esa clase. Veamos un ejemplo: ``` class MiPrimeraClase(): """ Mi primera clase. """ def __init__(self): """ Este método se ejecuta en el momento en el que instancias la clase """ self.lista_compra = [] def anadir(self, prod): """ Este es un método que puede usarse tantas veces como se requiera """ self.lista_compra.append(prod) # Instancio el objecto (llamo a la clase) MiPrimeraClase clase = MiPrimeraClase() print(clase) # Puedo ver las variables que tiene la clase: print(clase.lista_compra) # Puedo además usar el método de la clase para añadir objetos a esta lista clase.anadir('Pan') clase.anadir('Uvas') # Y ver como ha cambiado esta variable de la clase print(clase.lista_compra) ``` El **self** que escribimos en todos los métodos es una variable muda (esto quiere decir que al llamar al método no hace falta declararla) por que hace referencia a todo lo que se contenga dentro del ecosistema de la clase. En el ejemplo anterior hemos podido ver la variable lista_compra porque era una variable pública, esto sin embargo puede no ser así definiendo las variables como privadas. Veamos un ejemplo: ``` class Coche(): def __init__(self, marca): self.marca = marca self.__color = 'Gris Antracita' # Anteceder al nombre de la variable dos barras bajas indica variable privada coche = Coche(marca='Toyota Hybrid') coche.marca # Falla porque esa variable es privada coche.__color ``` Sin embargo esta variable privada si que puede ser usada dentro del ecosistema de la clase. Por ejemplo ``` class Coche(): def __init__(self, marca): self.marca = marca self.__color = 'Gris Antracita' # Anteceder al nombre de la variable dos barras baja indica variable privada def get_color(self): return self.__color coche = Coche(marca='Toyota Hybrid') print(coche.marca) print(coche.get_color()) ``` Dado que la variable color es privada no podemos cambiar el valor de esta... ¿o sí? **Ejercicio 13**: A la clase Coche añadir un método que permita cambiar el valor de la variable privada color Tambien las funciones pueden ser privadas por lo que solo podrían usarse dentro del ecosistema de la clase ``` class Coche(): def __init__(self, marca): self.marca = marca self.__color = 'Gris Antracita' # Anteceder al nombre de la variable dos barras bajas indica variable privada def get_color(self): return self.__color def __change_color(self, new_color): self.__color = new_color def change_c(self, nc): nc = nc.capitalize() if nc in ['Rojo', 'Gris Antracita', 'Azul']: self.__change_color(new_color=nc) else: raise 'El color no es ni Rojo ni Gris Antracita ni Azul' c = Coche('Lexus') # Esto falla c.__change_color('Rojo') # Pero podemos hacer esto c.change_c('Rojo') print(c.get_color()) ``` Podríamos crea dentro de clases tambien métodos que se usan solo fuera del ecosistema de la clase: ``` class Coche(): def __init__(self, marca): self.marca = marca self.__color = 'Gris Antracita' # Anteceder al nombre de la variable dos barras bajas indica variable privada def get_color(self): return self.__color def __change_color(self, new_color): self.__color = new_color def change_c(self, nc): nc = nc.capitalize() if nc in ['Rojo', 'Gris Antracita', 'Azul']: self.__change_color(new_color=nc) else: raise 'El color no es ni Rojo ni Gris Antracita ni Azul' @staticmethod # <-- Esto se le denomina decorador de la función def ruedas_de_repuesto(): # No tiene el self por lo que no pertenece al ecosistema de la clase return 1 r = Coche('Lexus') r.ruedas_de_repuesto() class Coche(): def __init__(self, marca): self.marca = marca self.__color = 'Gris Antracita' # Anteceder al nombre de la variable dos barras bajas indica variable privada def get_color(self): """ A este tipo de métodos se les denomina getter dado que obtiene valores de variables de clases """ return self.__color def __change_color(self, new_color): """ A este tipo de métodos se les denomina setter porque establecen variables dentro de la clase """ self.__color = new_color def change_c(self, nc): nc = nc.capitalize() if nc in ['Rojo', 'Gris Antracita', 'Azul']: self.__change_color(new_color=nc) else: raise 'El color no es ni Rojo ni Gris Antracita ni Azul' @staticmethod def ruedas_de_repuesto(): return 1 @staticmethod def ruedas_total(): return ruedas_de_repuesto + 4 # No funciona porque el metodo ruedas_de_repuesto no está dentro de la clase. ¿Cómo podríamos hacer para poder usarlo? rt = Coche('Audi') rt.ruedas_total() # Se pueden crear además varios objetos de la misma clase sin problema coches = [Coche(marca) for marca in ['Toyota', 'Lexus', 'BMW', 'Renault']] coches coches[2].get_color() ``` ### Programación Orientada a Objetos Las clases son una parte de lo que se denomina programación orientada a objetos (OOP). La idea es tratar de hacer bloques de código reusable para luego aprovechar al máximo estas funcionalidades. Para entender esto vamos a ver la *Herencia* en las clases y vamos a ver como al final podremos crear un grafo de clases. ## Herencia ``` class Rocket(): # Simula el viaje de un cohete para un juego # o para simulaciones fisicas. def __init__(self, x=0, y=0): # Posicion inicial del cohete self.x = x self.y = y def move_rocket(self, x_increment=0, y_increment=1): # Mueve el cohete en las unidades que se quieras. Por defecto # se mueve solo 1 en el eje y self.x += x_increment self.y += y_increment def get_distance(self, other_rocket): # Podemos darle a la función otro objeto Rocket y que # calcule la distancia entre ellos (distancia euclídea). distance = ((self.x-other_rocket.x)**2+(self.y-other_rocket.y)**2)**.5 return distance r1 = Rocket(5,7) r2 = Rocket(1,0) r2.get_distance(r1) ``` **Ejercicio 14**: Modifica el método get_distance para se le pasen por fuera del ecosistema de la clases dos objetos Rocket y que haga el mismo cálculo. ``` class Rocket(): # Simula el viaje de un cohete para un juego # o para simulaciones fisicas. def __init__(self, x=0, y=0): # Posicion inicial del cohete self.x = x self.y = y def move_rocket(self, x_increment=0, y_increment=1): # Mueve el cohete en las unidades que se quieras. Por defecto # se mueve solo 1 en el eje y self.x += x_increment self.y += y_increment @staticmethod def get_distance(one_rocket, other_rocket): # Podemos darle a la función otro objeto Rocket y que # calcule la distancia entre ellos (distancia euclídea). distance = ((one_rocket.x-other_rocket.x)**2+(self.y-other_rocket.y)**2)**.5 return distance r1 = Rocket(5,7) r2 = Rocket(1,0) r2.get_distance(r1) ``` Vamos a crear ahora una clase (hijo) que hereda de la clase (padre) Rocket. ``` class Shuttle(Rocket): # <-- Como hereda de la clase Rocket debemos pasarsela como "variable" de Shuttle # Simula los despegues def __init__(self, x=0, y=0, flights_completed=0): super().__init__(x, y) # <-- Esta línea es la que "me carga" en el ecosistema de Shuttle todo lo que había en (este caso) Rocket # por ello necesita este init las variables x e y para darselas a la clase padre. self.flights_completed = flights_completed #debe tener para traer del padre al hijo la línea 5 del ejemplo anterior (en el init) Si hay dos herencias diferentes, habrá que poner dos super cohete = Shuttle(x=4, y=8.8, flights_completed=3) cohete.x, cohete.y # Como podemos ver la variable cohete tiene acceso a todos los métodos de Rocket cohete.move_rocket() cohete.x, cohete.y # Y además podremos acceder a los atributos que tiene Shuttle perse cohete.flights_completed ``` De esta manera podremos reutilizar siempre la clase Rocket y podríamos añadir más funcionalidades que hereden de estas. Hay que tener en cuenta que si a la clase hijo le metemos un método o variable que se llame igual que uno de la clase padre, el del hijo sobreescribirá el del padre. **Ejercicio 15**: Crea una clase Persona que contenga nombre, edad, apellidos y sexo además añade métodos que te devuelvan toda esta información mediante prints. Tras esto crea una clase Estudiante y otra Profesor que hereden de persona donde se añadan en uno las materias que da y en el otro las materias que da respectivamente además del nombre de la clase (si la persona es Estudiante o Profesor). Finalmente crea una última clase que herede de las otras dos y que añada destrezas que tendría que tener un estudiante o un profesor. ``` class Persona(): def __init__ (self, nombre, apellidos, edad, sexualidad): self.nombre = nombre.title() self.apellidos = apellidos.title() self.edad = edad self.sexualidad = sexualidad def print_persona(self): print(f"Esta persona es {self.nombre} {self.apellidos} tiene {self.edad} años y es {self.sexualidad}") p1 = Persona(nombre="Fulanito", apellidos="de tal", edad="20",sexualidad="no binario") p2 = Persona(nombre="Menganito", apellidos="de cual", edad="40",sexualidad="binario") p1.print_persona() p2.print_persona() class Estudiante(Persona): def __init__(self, nombre, apellidos, edad, sexualidad, rol, id_, asignatura): super().__init__(nombre, apellidos, edad, sexualidad) self.__rol = "Estudiante" self.id = id_ self.asignaturas = asignatura def print_alumno(self): print(f"El alumno {self.nombre} {self.apellidos} tiene las siguientes asignaturas: {self.asignaturas}") p3 = Estudiante(nombre="Fulanito", apellidos="de tal", edad="20",sexualidad="no binario", rol="Estudiante", id_="992037", asignatura=("Álgebra", "Matemáticas", "Nuevas tecnologías")) p3.print_alumno() class Profesor(Persona): def __init__(self, nombre, apellidos, edad, sexualidad, rol, id_, asignatura): super().__init__(nombre, apellidos, edad, sexualidad) self.__rol = "Profesor" self.id = id_ self.asignaturas = asignatura def print_profesor(self): print(f"El profesor {self.nombre} {self.apellidos} tiene las siguientes asignaturas: {self.asignaturas}") p1 = Profesor(nombre="Fulanito", apellidos="de tal", edad="20",sexualidad="no binario", rol="Profesor", id_="992037", asignatura=("Álgebra", "Matemáticas", "Nuevas tecnologías")) p1.print_profesor() class Destrezas(Profesor, Estudiante): def __init__(self, nombre, apellidos, edad, sexualidad, rol, id_, asignatura, destreza): Profesor.__init__(nombre, apellidos, edad, sexualidad, rol, id_, asignatura) Estudiante.__init__(nombre, apellidos, edad, sexualidad, id_, asignatura) if self.__rol == "Estudiante": self.destrezas = destreza else: self.destrezas = destreza def print_destrezas(self): print(f"{self.nombre} {self.apellidos} tiene las siguientes destrezas: {self.destrezas}") ``` # Diferencias entre OOP y programación funcional ``` # OOP def factorial(n): ret = 1 while n>0: ret *= n n -= 1 return ret factorial(3) # Programación funcional def factorial_rec(n): """ Usa la propia función que define para recursivamente hayar el cálculo """ if n == 0: return 1 else: return n*factorial_rec(n-1) factorial(3) ``` # Módulos y paquetes La ventaja competitiva que tiene python con respecto a muchos lenguajes es la comunidad. Esta comunidad provee una infinidad de paquete de terceros (third party) que hacen de python un lenguaje universal para infinidad de desarrollos, además de estos tambien python contiene paquetes base que podemos verlas todas [clicando aqui](https://docs.python.org/3/library/) todas estas no tenemos porque preinstalarlas antes de usarlas, algunos ejemplos son: 1. os and sys: Tools for interfacing with the operating system, including navigating file directory structures and executing shell commands. 2. math and cmath: Mathematical functions and operations on real and complex numbers 3. itertools: Tools for constructing and interacting with iterators and generators 4. functools: Tools that assist with functional programming 5. random: Tools for generating pseudorandom numbers 6. pickle: Tools for object persistence: saving objects to and loading objects from disk 7. json and csv: Tools for reading JSON-formatted and CSV-formatted files. 8. urllib: Tools for doing HTTP and other web requests. Todos los imports suelen escribirse al principio del código ### Importar explicitamente ``` import math # Tienes que explicitar que quieres de la librería math la función sqrt (raiz cuadrada) math.sqrt(121) ``` ### Importar explicitamente con alias ``` import random as r # Importas una librería poniendole un alias que luego usarás para llamar a las funciones r.randint(0, 3) # <-- Devuelveme un entero aleatorio entre 0 y 2 ``` ### Importar funciones concretas ``` from math import pi, cos cos(pi) ``` ### Importar implicitamente ``` from math import * # De la librería math importame todo sin(pi) # Se considera 0 por la precisión de la máquina ``` ### Importar third party ``` import numpy as np ``` No podemos importarla porque no la tenemos en nuestro entorno virtual. Para ello podemos usar conda (Anaconda) o pip (Python) para instalar estos paquetes que no tenemos de base en el entorno virtual. Muchos de los paquetes están en conda, sin embargo habrá otros que solo estén en pip. La forma de usar uno u otro por terminal es la siguiente ``` !conda install -y numpy !pip install pandas # !conda install -y numpy <-- Es posible especificarle una versión concreta # !pip install pandas <-- Es posible especificarle una versión concreta !pip install astropy ``` # Ficheros Los ordenadores modernos tienen, en general, muchas maneras de almacenar la información. Una de ellas es la memoria RAM, que sólo está en uso cuando el ordenador está en uso, recibiendo energía. Es muy rápida, pero la información se pierde si no tiene electricidad. Otra manera es mediante discos duros (o, más recientemente, unidades de estado sólido, más modernas). Este tipo de dispositivos tiene la particularidad de que no necesitan un aporte de energía como la memoria RAM para funcionar, por lo que pueden almacenar información mucho tiempo sin consumo. Cuestan menos por unidad de memoria (es decir, almacenar un bit en un disco duro es más barato que en RAM), y además son relativamente lentos. Pero, dado que no es razonable estar gastando energía constantemente, los necesitamos. En la mayoría de sistemas operativos que se usan actualmente, los datos en el disco duro están almacenados en forma de ficheros, que residen en directorios o carpetas. Así, si queremos tener datos externos a nuestro programa, la manera ideal es crear archivos con esos datos, y usarlos desde nuestro código. Vamos a aprender a usar ficheros en Python para almacenar información y leerla. Veamos un ejemplo de como llamar a un fichero ## Leer ficheros ``` !pwd fichero = open('data/san_manuel_bueno_martir.html') # <-- Podemos llamarlo con una ruta relativa o absoluta # Hacer cosas con el fichero fichero.close() fichero = open('data/san_manuel_bueno_martir.html') # Abrimos el fichero para lectura contenido = fichero.read() c = fichero.read()# Leemos el contenido y lo guardamos en una variable print(c) # Imprimimos el contenido fichero.close() # Cerramos el fichero fichero = open('data/san_manuel_bueno_martir.html') contenido = fichero.read() lineas = contenido.splitlines() i = 0 for linea in lineas: print(linea) i += 1 if i == 5: break fichero.close() ``` En realidad hay muchos tipos de ficheros. Una división típica es la diferencia entre ficheros de texto plano y los archivos binarios. Un archivo de texto plano es un archivo en el que todo lo que hay es texto. Por ejemplo, un programa en Python estará guardado en un archivo de texto plano. En un fichero binario, hay unos y ceros. Aunque técnicamente en un archivo de texto plano también hay unos y ceros, en los archivos binarios esos unos y ceros no tienen por qué identificar texto. ## Escribir ficheros ``` fichero = open("data/fichero.txt", "w") # Atención a la 'w'! fichero.write("primera línea\n") fichero.write("segunda línea\n") fichero.close() fichero = open("data/fichero.txt", 'r') print(fichero.read()) ``` # Funciones anónimas Las funciones anónimas (funciones lambda) es otra manera de definir funciones de una manera más corta ``` def suma(x, y): return x + y ``` Es lo mismo que ``` z = lambda x, y: x + y z(4, 6) ``` Es util cuando queremos pasar funciones como argumentos de otras funciones. Por ejmplo: ``` # Podemos hacerlo porque una lista es ordenable sorted([2, 5, 1]) # Pero un diccionario no es ordenable data = [{'first':'Guido', 'last':'Van Rossum', 'YOB':1956}, {'first':'Grace', 'last':'Hopper', 'YOB':1906}, {'first':'Alan', 'last':'Turing', 'YOB':1912}] sorted(data) # Sin embargo lo que si podemos hacer aprovechando lo dicho anteriormente es help(sorted) sorted(data, key=lambda item: item['first']) # <-- Ordena alfabéticamente sorted(data, key=lambda item: item['YOB']) # <-- Ordena numéricamente ``` # **OPCIONAL** # Expresiones regulares ## Flexible Pattern Matching with Regular Expressions The methods of Python's ``str`` type give you a powerful set of tools for formatting, splitting, and manipulating string data. But even more powerful tools are available in Python's built-in *regular expression* module. Regular expressions are a huge topic; there are there are entire books written on the topic (including Jeffrey E.F. Friedl’s [*Mastering Regular Expressions, 3rd Edition*](http://shop.oreilly.com/product/9780596528126.do)), so it will be hard to do justice within just a single subsection. My goal here is to give you an idea of the types of problems that might be addressed using regular expressions, as well as a basic idea of how to use them in Python. I'll suggest some references for learning more in [Further Resources on Regular Expressions](#Further-Resources-on-Regular-Expressions). Fundamentally, regular expressions are a means of *flexible pattern matching* in strings. If you frequently use the command-line, you are probably familiar with this type of flexible matching with the "``*``" character, which acts as a wildcard. For example, we can list all the IPython notebooks (i.e., files with extension *.ipynb*) with "Python" in their filename by using the "``*``" wildcard to match any characters in between: ``` !ls *Python*.ipynb ``` Regular expressions generalize this "wildcard" idea to a wide range of flexible string-matching sytaxes. The Python interface to regular expressions is contained in the built-in ``re`` module; as a simple example, let's use it to duplicate the functionality of the string ``split()`` method: ``` print(line) import re regex = re.compile('\s+') regex.split(line) ``` Here we've first *compiled* a regular expression, then used it to *split* a string. Just as Python's ``split()`` method returns a list of all substrings between whitespace, the regular expression ``split()`` method returns a list of all substrings between matches to the input pattern. In this case, the input is ``"\s+"``: "``\s``" is a special character that matches any whitespace (space, tab, newline, etc.), and the "``+``" is a character that indicates *one or more* of the entity preceding it. Thus, the regular expression matches any substring consisting of one or more spaces. The ``split()`` method here is basically a convenience routine built upon this *pattern matching* behavior; more fundamental is the ``match()`` method, which will tell you whether the beginning of a string matches the pattern: ``` for s in [" ", "abc ", " abc"]: if regex.match(s): print(repr(s), "matches") else: print(repr(s), "does not match") ``` Like ``split()``, there are similar convenience routines to find the first match (like ``str.index()`` or ``str.find()``) or to find and replace (like ``str.replace()``). We'll again use the line from before: ``` line = 'the quick brown fox jumped over a lazy dog' ``` With this, we can see that the ``regex.search()`` method operates a lot like ``str.index()`` or ``str.find()``: ``` line.index('fox') regex = re.compile('fox') match = regex.search(line) match.start() ``` Similarly, the ``regex.sub()`` method operates much like ``str.replace()``: ``` line.replace('fox', 'BEAR') regex.sub('BEAR', line) ``` With a bit of thought, other native string operations can also be cast as regular expressions. ### A more sophisticated example But, you might ask, why would you want to use the more complicated and verbose syntax of regular expressions rather than the more intuitive and simple string methods? The advantage is that regular expressions offer *far* more flexibility. Here we'll consider a more complicated example: the common task of matching email addresses. I'll start by simply writing a (somewhat indecipherable) regular expression, and then walk through what is going on. Here it goes: ``` email = re.compile('\w+@\w+\.[a-z]{3}') ``` Using this, if we're given a line from a document, we can quickly extract things that look like email addresses ``` text = "To email Guido, try guido@python.org or the older address guido@google.com." email.findall(text) ``` (Note that these addresses are entirely made up; there are probably better ways to get in touch with Guido). We can do further operations, like replacing these email addresses with another string, perhaps to hide addresses in the output: ``` email.sub('--@--.--', text) ``` Finally, note that if you really want to match *any* email address, the preceding regular expression is far too simple. For example, it only allows addresses made of alphanumeric characters that end in one of several common domain suffixes. So, for example, the period used here means that we only find part of the address: ``` email.findall('barack.obama@whitehouse.gov') ``` This goes to show how unforgiving regular expressions can be if you're not careful! If you search around online, you can find some suggestions for regular expressions that will match *all* valid emails, but beware: they are much more involved than the simple expression used here! ### Basics of regular expression syntax The syntax of regular expressions is much too large a topic for this short section. Still, a bit of familiarity can go a long way: I will walk through some of the basic constructs here, and then list some more complete resources from which you can learn more. My hope is that the following quick primer will enable you to use these resources effectively. #### Simple strings are matched directly If you build a regular expression on a simple string of characters or digits, it will match that exact string: ``` regex = re.compile('ion') regex.findall('Great Expectations') ``` #### Some characters have special meanings While simple letters or numbers are direct matches, there are a handful of characters that have special meanings within regular expressions. They are: ``` . ^ $ * + ? { } [ ] \ | ( ) ``` We will discuss the meaning of some of these momentarily. In the meantime, you should know that if you'd like to match any of these characters directly, you can *escape* them with a back-slash: ``` regex = re.compile(r'\$') regex.findall("the cost is $20") ``` The ``r`` preface in ``r'\$'`` indicates a *raw string*; in standard Python strings, the backslash is used to indicate special characters. For example, a tab is indicated by ``"\t"``: ``` print('a\tb\tc') ``` Such substitutions are not made in a raw string: ``` print(r'a\tb\tc') ``` For this reason, whenever you use backslashes in a regular expression, it is good practice to use a raw string. #### Special characters can match character groups Just as the ``"\"`` character within regular expressions can escape special characters, turning them into normal characters, it can also be used to give normal characters special meaning. These special characters match specified groups of characters, and we've seen them before. In the email address regexp from before, we used the character ``"\w"``, which is a special marker matching *any alphanumeric character*. Similarly, in the simple ``split()`` example, we also saw ``"\s"``, a special marker indicating *any whitespace character*. Putting these together, we can create a regular expression that will match *any two letters/digits with whitespace between them*: ``` regex = re.compile(r'\w\s\w') regex.findall('the fox is 9 years old') ``` This example begins to hint at the power and flexibility of regular expressions. The following table lists a few of these characters that are commonly useful: | Character | Description || Character | Description | |-----------|-----------------------------||-----------|---------------------------------| | ``"\d"`` | Match any digit || ``"\D"`` | Match any non-digit | | ``"\s"`` | Match any whitespace || ``"\S"`` | Match any non-whitespace | | ``"\w"`` | Match any alphanumeric char || ``"\W"`` | Match any non-alphanumeric char | This is *not* a comprehensive list or description; for more details, see Python's [regular expression syntax documentation](https://docs.python.org/3/library/re.html#re-syntax). #### Square brackets match custom character groups If the built-in character groups aren't specific enough for you, you can use square brackets to specify any set of characters you're interested in. For example, the following will match any lower-case vowel: ``` regex = re.compile('[aeiou]') regex.split('consequential') ``` Similarly, you can use a dash to specify a range: for example, ``"[a-z]"`` will match any lower-case letter, and ``"[1-3]"`` will match any of ``"1"``, ``"2"``, or ``"3"``. For instance, you may need to extract from a document specific numerical codes that consist of a capital letter followed by a digit. You could do this as follows: ``` regex = re.compile('[A-Z][0-9]') regex.findall('1043879, G2, H6') ``` #### Wildcards match repeated characters If you would like to match a string with, say, three alphanumeric characters in a row, it is possible to write, for example, ``"\w\w\w"``. Because this is such a common need, there is a specific syntax to match repetitions – curly braces with a number: ``` regex = re.compile(r'\w{3}') regex.findall('The quick brown fox') ``` There are also markers available to match any number of repetitions – for example, the ``"+"`` character will match *one or more* repetitions of what precedes it: ``` regex = re.compile(r'\w+') regex.findall('The quick brown fox') ``` The following is a table of the repetition markers available for use in regular expressions: | Character | Description | Example | |-----------|-------------|---------| | ``?`` | Match zero or one repetitions of preceding | ``"ab?"`` matches ``"a"`` or ``"ab"`` | | ``*`` | Match zero or more repetitions of preceding | ``"ab*"`` matches ``"a"``, ``"ab"``, ``"abb"``, ``"abbb"``... | | ``+`` | Match one or more repetitions of preceding | ``"ab+"`` matches ``"ab"``, ``"abb"``, ``"abbb"``... but not ``"a"`` | | ``{n}`` | Match ``n`` repetitions of preeeding | ``"ab{2}"`` matches ``"abb"`` | | ``{m,n}`` | Match between ``m`` and ``n`` repetitions of preceding | ``"ab{2,3}"`` matches ``"abb"`` or ``"abbb"`` | With these basics in mind, let's return to our email address matcher: ``` email = re.compile(r'\w+@\w+\.[a-z]{3}') ``` We can now understand what this means: we want one or more alphanumeric character (``"\w+"``) followed by the *at sign* (``"@"``), followed by one or more alphanumeric character (``"\w+"``), followed by a period (``"\."`` – note the need for a backslash escape), followed by exactly three lower-case letters. If we want to now modify this so that the Obama email address matches, we can do so using the square-bracket notation: ``` email2 = re.compile(r'[\w.]+@\w+\.[a-z]{3}') email2.findall('barack.obama@whitehouse.gov') ``` We have changed ``"\w+"`` to ``"[\w.]+"``, so we will match any alphanumeric character *or* a period. With this more flexible expression, we can match a wider range of email addresses (though still not all – can you identify other shortcomings of this expression?). #### Parentheses indicate *groups* to extract For compound regular expressions like our email matcher, we often want to extract their components rather than the full match. This can be done using parentheses to *group* the results: ``` email3 = re.compile(r'([\w.]+)@(\w+)\.([a-z]{3})') text = "To email Guido, try guido@python.org or the older address guido@google.com." email3.findall(text) ``` As we see, this grouping actually extracts a list of the sub-components of the email address. We can go a bit further and *name* the extracted components using the ``"(?P<name> )"`` syntax, in which case the groups can be extracted as a Python dictionary: ``` email4 = re.compile(r'(?P<user>[\w.]+)@(?P<domain>\w+)\.(?P<suffix>[a-z]{3})') match = email4.match('guido@python.org') match.groupdict() ``` #### *BIBLIOGRAFÍA* [Learning Python 5th - O'Reilly](https://learning.oreilly.com/library/view/learning-python-5th/9781449355722/titlepage01.html) [Python Crash Course 2th - O'Reilly](https://learning.oreilly.com/library/view/python-crash-course/9781492071266/xhtml/cover.xhtml) [Internet](https://google.es) [Stackoverflow](https://stackoverflow.com) [Medium](https://medium.com) [Towards Data Science](https://towardsdatascience.com) [Data Science Stack Exchange](https://datascience.stackexchange.com) [Kaggle](https://kaggle.com) ``` 4/2 is int ```
github_jupyter
from IPython.display import IFrame IFrame(src = "https://kapeli.com/cheat_sheets/Conda.docset/Contents/Resources/Documents/index", width=800, height=400) # Esta celda solo funcionará para los usuarios de mac o linux, en windows el comando es dir !pwd !conda info x = 9 x x = int(10) x 4 x = 5 2 + 4, 2 / 4, 6 - 10, 4 * 4, 3**3 # ¿Qué resultado será? int(3/4) x = .3 x x = float(3.5) # atención a que el separador decimal es el '.' x x = 1e-16 x 2.4 + 4.1, 2.3 / 4.06, 6 - 10.8, 4.3 * 4.1, 3.4**3.7 x = """ hola """ x x = 'Mi primera cadena de texto' x x = str(7) x x = 'Mi primera cadena de texto' x.lower(), x.upper() x.split(' ') x = ' eSto sE lleVaba En tiEmpOs quE nO QuiEro RecorDar ' x.strip().capitalize() x = 'Hola' y = 'Mundo' x + y, (x + y)*2 'G00000000000L'.replace('0', 'O') # Buscar que métodos tiene asociado el tipo str name = 'Esteban Sánchez' years = 25 print(f'Hola me llamo {name} y tengo {years} años') cadena = 'La factura del curso de %s es de %0.2f euros' % ('data science'.upper(), int(12.345)) print(cadena) x = True y = False x, y x = bool(1) y = bool(0) x, y x, y = bool('true'), bool('True') print(x, y) xx, yy = bool('false'), bool('False') print(xx, yy) xxx = bool('') print(xxx) # Podemos ver siempre de que tipo es la variable que estamos tratando con la función 'type()' type(7), type(5.5), type(False), type('str') # Además python cuenta con un tipo de datos especial que se llama None # este tipo de dato se usa cuando no se quiere especificar nada. Y no # soporta ningún tipo de operación. x = None type(x), bool(x) None x = 7 val = type(x) print(val) return_value = print('abc') # ¿Que va a pasar con la siguiente línea? print(return_value) # Con el cursor dentro del método tambien podríamos ver la ayuda si pulsamos SHIFT + TABULADOR # str.replace() str.replace? help(str.replace) print(f"La 'floor division' es una división normal solo que el resultado es el número entero de la división. " \ f"Hacer 10 // 3 = {10 // 3} es lo mismo que hacer el int(10 / 3) = {int(10/3)} que hicimos antes.") 10 % 5, 4 % 3, 3 % 4, 4 % 7 def entero_superior(num): x = int(num) incremento = int((1 % num)) ret = x + incremento return ret n = entero_superior(1.1) n # Podemos hacer una suma o una resta a una variable numérica de una forma más simple incr = 2 value_sum = 2 value_sum += incr print('Suma rapida: ', value_sum) value_res = 2 value_res -= incr print('Resta rapida: ', value_res) value_mult = 7 value_mult *= incr print('Multiplicación rapida: ', value_mult) def soluciones_segundo_grado(a, b, c): sol1 = (-b + (b**2 - 4*a*c)**(.5))/(2*a) sol2 = (-b - (b**2 - 4*a*c)**(.5))/(2*a) return sol1, sol2 assert soluciones_segundo_grado(1, -3, 2) == (2.0, 1.0) x, y = 6, 7 # comprobar con booleanos print(f'¿Es x mayor que y? {x > y}') print(f'¿Es x menor que y? {x < y}') print(f'¿Es x igual que y? {x == y}') print(f'¿Es x diferente de y? {x != y}') x, y = 'Casa', 'Casa' print(f'¿Es x mayor que y? {x > y}') print(f'¿Es x menor que y? {x < y}') print(f'¿Es x igual que y? {x == y}') print(f'¿Es x diferente de y? {x != y}') # ¿Cuál será mayor casa o taza? print("barco" >= "moto") print('osa' < 'oso') print('mas' < 'mascar') print('as' >= 'As') 'X' < 'x' '!' < '?' print("123" == 123) print(int("123") == 123) print("123" == str(123)) int('n') x = 10 y = 7 z = 3 # ¿Qué saldrá de los siguientes prints? print(x < 5) print(x == y + z) print(x != x) print(y == 10 - 3) print(x**2 <= 100) if condicion_1 == True: ... elif condicion_2 == 8: ... else: ... mientras que en python no usamos llaves ni puntos y comas para explicitar un bloque de código o un fin de línea. En python cuando queremos especificar un bloque de código (como por ejemplo lo que ejecuta un IF-ELIF-ELSE, lo que ejecuta una función o lo que ejecuta un bucle) debemos acabar la línea con ':' y la siguiente empezarla con un tabulador o 4 espacios blancos. Tomemos el siguiente ejemplo de código como visual para ver esto de la identación: Tambien tenemos opción de hacer condiciones anidadas **Ejercicio 3**: Escriber una función en la cual dado un número mayor o igual a 1 imprima el entero superior y validar si el número metido cumple la condición: ## Operaciones de identidad y pertenencia | Operator | Description | |---------------|---------------------------------------------------| | ``a is b`` | True if ``a`` and ``b`` are identical objects | | ``a is not b``| True if ``a`` and ``b`` are not identical objects | | ``a in b`` | True if ``a`` is a member of ``b`` | | ``a not in b``| True if ``a`` is not a member of ``b`` | | ``a or b`` | True if a condition satisfies one or two of them | ## Operaciones bitwise | Operator | Name | Description | |--------------|-----------------|---------------------------------------------| | ``a & b`` | Bitwise AND | Bits defined in both ``a`` and ``b`` | | <code>a &#124; b</code>| Bitwise OR | Bits defined in ``a`` or ``b`` or both | | ``a ^ b`` | Bitwise XOR | Bits defined in ``a`` or ``b`` but not both | | ``a << b`` | Bit shift left | Shift bits of ``a`` left by ``b`` units | | ``a >> b`` | Bit shift right | Shift bits of ``a`` right by ``b`` units | | ``~a`` | Bitwise NOT | Bitwise negation of ``a`` | # Estructura de datos ## Listas ### ¿Qué podemos hacer? Como podemos ver no hay restricción en lo que se refiere al tipo de dato que contiene una lista. Podemos además indexar valores de una lista dadas sus posiciones: Podemos además calcular el número de elementos que tiene una lista de manera directa: Supongamos una lista de números donde cada elemento es la probabilidad de sacar un número de un dado: Entonces para comprobar que la suma de las probabilidades es la unidad, podemos hacer: ### Precisión decimal Una cosa a tener en cuenta es la precisión decimal, estamos tratando con ordenadores que no dejan de tener un espacio en memoria finito. Pongamos un ejemplo: No es un comportamiento único de python, es debido a como guarda la información python. Si pintamos los números con mayor precisión veremos lo que pasa: We're accustomed to thinking of numbers in decimal (base-10) notation, so that each fraction must be expressed as a sum of powers of 10: $$ 1 /8 = 1\cdot 10^{-1} + 2\cdot 10^{-2} + 5\cdot 10^{-3} $$ In the familiar base-10 representation, we represent this in the familiar decimal expression: $0.125$. Computers usually store values in binary notation, so that each number is expressed as a sum of powers of 2: $$ 1/8 = 0\cdot 2^{-1} + 0\cdot 2^{-2} + 1\cdot 2^{-3} $$ In a base-2 representation, we can write this $0.001_2$, where the subscript 2 indicates binary notation. The value $0.125 = 0.001_2$ happens to be one number which both binary and decimal notation can represent in a finite number of digits. In the familiar base-10 representation of numbers, you are probably familiar with numbers that can't be expressed in a finite number of digits. For example, dividing $1$ by $3$ gives, in standard decimal notation: $$ 1 / 3 = 0.333333333\cdots $$ The 3s go on forever: that is, to truly represent this quotient, the number of required digits is infinite! Similarly, there are numbers for which binary representations require an infinite number of digits. For example: $$ 1 / 10 = 0.00011001100110011\cdots_2 $$ Just as decimal notation requires an infinite number of digits to perfectly represent $1/3$, binary notation requires an infinite number of digits to represent $1/10$. Python internally truncates these representations at 52 bits beyond the first nonzero bit on most systems. This rounding error for floating-point values is a necessary evil of working with floating-point numbers. The best way to deal with it is to always keep in mind that floating-point arithmetic is approximate, and *never* rely on exact equality tests with floating-point values. **Ejercicio 4:** Calcula la media de un vector numérico ret = sum(l) / len(l) Tambien podemos calcular la longitud de una cadena de caracteres además de indexar caracteres: Podemos crear listas de números enteros consecutivos: Uniendo operadores lógicos, podemos comprobar si un valor está en una lista: Dado que consideramos cadenas de texto como listas podemos tambien ver si una cadena está dentro de otra: Las listas además no son inmutables, esto es que puedes cambiar valores preexistentes: **Ejercicio 5:** Programa un sistema que meta 3 números en una lista. Después, imprime la lista. **Ejercicio 6:** Implementa un programa que tenga como input una posición en la lista, y elimine el elemento en esa posición. Si la posición no existe en la lista (es menor que cero o mayor que la longitud), entonces imprime un mensaje indicando que ha habido un error. **Ejercicio 7:** Implementa un algoritmo en el que dada una lista desordenada de números, te devuelva la misma lista ordenada decrecientemente. (Pista: es un método de las listas --> una sola línea de código es la implementación) ## Sets Los sets son como las listas solo que están ordenadas y no permiten elementos iguales además no se pueden cambiar sus valores dado que son inmutables **Ejercicio 8:** Crea un algoritmo que dado el texto anterior me devuelva una lista con todas las palabras que aparecen. Reemplaza los signos de puntuación (. . , ;) por nada # Tuplas Hay dos maneras de inicializar una tupla Tradicionalmente la tupla la componían dos elementos aunque ahora una tupla puede ser de N elementos # Diccionarios Los diccionarios son unas estructuras para organizar datos muy interesantes dado que se organizan modo clave: valor. Una cualidad muy importante de los diccionarios es que las claves que guarda son únicas. Lo vemos con un ejemplo: # Bucles ## While Mientras se cumpla una condición me vas a ejecutar el siguiente bloque de código **Ejercicio 9:** Crea un algoritmo que me devuelva en una lista los números impares menores o iguales a 100: Podemos ver más información sobre comandos mágicos [blog](https://www.analyticslane.com/2019/04/12/seis-comandos-magicos-de-jupyter-notebooks/) o [pagina oficial](https://ipython.readthedocs.io/en/stable/interactive/magics.html) # Bucle For **Ejercicio 10**: Crea una cuenta atrás que acabe en un explosión Recordad que todos los números son True excepto el 0 que es False Los diccionario, las listas y los sets son iterables, esto es que puedes aplicar bucles dentro de ellas ## List comprehension En python nunca creamos una lista como: Si no que hacemos: La estructura para las list comprehension son: Con una condición añadida quedaría: Con una condicion if else: # Keywords Lo de arriba es un extracto de algunas palabras clave que tiene reservadas python porque hacen alguna función # Asignación de variables. Copiado y referenciado. Como ya sabemos una variable se asigna por ejemplo de la siguiente manera Ahora bien, tambien podemos asignar el valor de una variable a otra variable siendo las dos iguales: En este caso no estamos creando una nueva variable y con el valor de la variable en x si no que estamos haciendo que x se refiera a y, por lo que si cambia y cambiaría también x. Esto se hace así para evitar consumir memoria y guardar dos veces la mimas información. # Funciones Vamos a ver como se estructura una función. Una función se define siempre empezando por 'def'. Tras esto podemos ver el nombre de la función en color azul que normalmente será snake_case. Entre paréntesis se encuentran los argumentos/variables/inputs de la función, para el ejemplo anterior es uno solo que tiene de nombre 'vector'. Tras esto ponemos los ':' para especificar que el código que viene a continuación pertenece a la función. Se observa también un comentario escrito entre 3 comillas dobles que equivale al docstring o información de lo que hace la función. Tras esto vendría el algoritmo propiamente dicho o los cálculos y finalmente si queremos que nuestra función de un resultado de vuelta que podamos asignarle a una variable debemos poner un return. **Ejercicio 11**: Escribe una función que a una cadena de texto reemplaze los signos de puntuación ['.', ',', ';', ':', '!', '¡', '?', '¿', '(', ')', '-'] por la cadena vacía Además de las funciones como acabamos de ver, tambien podemos definir funciones con parámetros por defecto. Esto es como sigue: **Ejercicio12**: Haz una función que te devuelva (aproximadamente) el salario neto, y además la media de lo que cobrarías al mes si fueran 12 o 14 pagas como lo que quedaría de pagas extraordinarias en caso de que fueran 12 pagas. [distribución de IRPF](https://factorialhr.es/blog/calcular-sueldo-neto-bruto/). La variable de entrada son el salario bruto y las pagas que hay (por defecto poner 14). Las variables que crees dentro de una función son específica de ese espacio, no se pueden usar dentro. Sin embargo las variables que creemos fuera de funciones si se pueden usar para esta. Veamoslo con un ejemplo: ## Argumentos flexibles: ``*args`` & ``**kwargs`` A veces no queremos especificar el número de variables que tiene nuestra función, para ello se usan estos argumentos para capturar todos los argumentos que queramos. El caracter ``*`` significa expande esta secuencia mientras que el parámetro ``**`` significa expande este diccionario. # Clases **Definición del profe**: Las clases son agrupaciones de funciones (dentro de una clase a las funciones se les llama métodos) que realizan operaciones similares, por lo que son buenas para agrupar funcionalidades parecidas. **Definición de alguien que sabe**: Las clases proveen una forma de empaquetar datos y funcionalidad juntos. Al crear una nueva clase, se crea un nuevo tipo de objeto, permitiendo crear nuevas instancias de ese tipo. Cada instancia de clase puede tener atributos adjuntos para mantener su estado. Las instancias de clase también pueden tener métodos (definidos por su clase) para modificar su estado. A una instancia de un objeto se le llama a llamar a una clase (como si llamas a una función) y a ejecutar un método de una clase es a ejecutar una función que hay dentro de esa clase. Veamos un ejemplo: El **self** que escribimos en todos los métodos es una variable muda (esto quiere decir que al llamar al método no hace falta declararla) por que hace referencia a todo lo que se contenga dentro del ecosistema de la clase. En el ejemplo anterior hemos podido ver la variable lista_compra porque era una variable pública, esto sin embargo puede no ser así definiendo las variables como privadas. Veamos un ejemplo: Sin embargo esta variable privada si que puede ser usada dentro del ecosistema de la clase. Por ejemplo Dado que la variable color es privada no podemos cambiar el valor de esta... ¿o sí? **Ejercicio 13**: A la clase Coche añadir un método que permita cambiar el valor de la variable privada color Tambien las funciones pueden ser privadas por lo que solo podrían usarse dentro del ecosistema de la clase Podríamos crea dentro de clases tambien métodos que se usan solo fuera del ecosistema de la clase: ### Programación Orientada a Objetos Las clases son una parte de lo que se denomina programación orientada a objetos (OOP). La idea es tratar de hacer bloques de código reusable para luego aprovechar al máximo estas funcionalidades. Para entender esto vamos a ver la *Herencia* en las clases y vamos a ver como al final podremos crear un grafo de clases. ## Herencia **Ejercicio 14**: Modifica el método get_distance para se le pasen por fuera del ecosistema de la clases dos objetos Rocket y que haga el mismo cálculo. Vamos a crear ahora una clase (hijo) que hereda de la clase (padre) Rocket. De esta manera podremos reutilizar siempre la clase Rocket y podríamos añadir más funcionalidades que hereden de estas. Hay que tener en cuenta que si a la clase hijo le metemos un método o variable que se llame igual que uno de la clase padre, el del hijo sobreescribirá el del padre. **Ejercicio 15**: Crea una clase Persona que contenga nombre, edad, apellidos y sexo además añade métodos que te devuelvan toda esta información mediante prints. Tras esto crea una clase Estudiante y otra Profesor que hereden de persona donde se añadan en uno las materias que da y en el otro las materias que da respectivamente además del nombre de la clase (si la persona es Estudiante o Profesor). Finalmente crea una última clase que herede de las otras dos y que añada destrezas que tendría que tener un estudiante o un profesor. # Diferencias entre OOP y programación funcional # Módulos y paquetes La ventaja competitiva que tiene python con respecto a muchos lenguajes es la comunidad. Esta comunidad provee una infinidad de paquete de terceros (third party) que hacen de python un lenguaje universal para infinidad de desarrollos, además de estos tambien python contiene paquetes base que podemos verlas todas [clicando aqui](https://docs.python.org/3/library/) todas estas no tenemos porque preinstalarlas antes de usarlas, algunos ejemplos son: 1. os and sys: Tools for interfacing with the operating system, including navigating file directory structures and executing shell commands. 2. math and cmath: Mathematical functions and operations on real and complex numbers 3. itertools: Tools for constructing and interacting with iterators and generators 4. functools: Tools that assist with functional programming 5. random: Tools for generating pseudorandom numbers 6. pickle: Tools for object persistence: saving objects to and loading objects from disk 7. json and csv: Tools for reading JSON-formatted and CSV-formatted files. 8. urllib: Tools for doing HTTP and other web requests. Todos los imports suelen escribirse al principio del código ### Importar explicitamente ### Importar explicitamente con alias ### Importar funciones concretas ### Importar implicitamente ### Importar third party No podemos importarla porque no la tenemos en nuestro entorno virtual. Para ello podemos usar conda (Anaconda) o pip (Python) para instalar estos paquetes que no tenemos de base en el entorno virtual. Muchos de los paquetes están en conda, sin embargo habrá otros que solo estén en pip. La forma de usar uno u otro por terminal es la siguiente # Ficheros Los ordenadores modernos tienen, en general, muchas maneras de almacenar la información. Una de ellas es la memoria RAM, que sólo está en uso cuando el ordenador está en uso, recibiendo energía. Es muy rápida, pero la información se pierde si no tiene electricidad. Otra manera es mediante discos duros (o, más recientemente, unidades de estado sólido, más modernas). Este tipo de dispositivos tiene la particularidad de que no necesitan un aporte de energía como la memoria RAM para funcionar, por lo que pueden almacenar información mucho tiempo sin consumo. Cuestan menos por unidad de memoria (es decir, almacenar un bit en un disco duro es más barato que en RAM), y además son relativamente lentos. Pero, dado que no es razonable estar gastando energía constantemente, los necesitamos. En la mayoría de sistemas operativos que se usan actualmente, los datos en el disco duro están almacenados en forma de ficheros, que residen en directorios o carpetas. Así, si queremos tener datos externos a nuestro programa, la manera ideal es crear archivos con esos datos, y usarlos desde nuestro código. Vamos a aprender a usar ficheros en Python para almacenar información y leerla. Veamos un ejemplo de como llamar a un fichero ## Leer ficheros En realidad hay muchos tipos de ficheros. Una división típica es la diferencia entre ficheros de texto plano y los archivos binarios. Un archivo de texto plano es un archivo en el que todo lo que hay es texto. Por ejemplo, un programa en Python estará guardado en un archivo de texto plano. En un fichero binario, hay unos y ceros. Aunque técnicamente en un archivo de texto plano también hay unos y ceros, en los archivos binarios esos unos y ceros no tienen por qué identificar texto. ## Escribir ficheros # Funciones anónimas Las funciones anónimas (funciones lambda) es otra manera de definir funciones de una manera más corta Es lo mismo que Es util cuando queremos pasar funciones como argumentos de otras funciones. Por ejmplo: # **OPCIONAL** # Expresiones regulares ## Flexible Pattern Matching with Regular Expressions The methods of Python's ``str`` type give you a powerful set of tools for formatting, splitting, and manipulating string data. But even more powerful tools are available in Python's built-in *regular expression* module. Regular expressions are a huge topic; there are there are entire books written on the topic (including Jeffrey E.F. Friedl’s [*Mastering Regular Expressions, 3rd Edition*](http://shop.oreilly.com/product/9780596528126.do)), so it will be hard to do justice within just a single subsection. My goal here is to give you an idea of the types of problems that might be addressed using regular expressions, as well as a basic idea of how to use them in Python. I'll suggest some references for learning more in [Further Resources on Regular Expressions](#Further-Resources-on-Regular-Expressions). Fundamentally, regular expressions are a means of *flexible pattern matching* in strings. If you frequently use the command-line, you are probably familiar with this type of flexible matching with the "``*``" character, which acts as a wildcard. For example, we can list all the IPython notebooks (i.e., files with extension *.ipynb*) with "Python" in their filename by using the "``*``" wildcard to match any characters in between: Regular expressions generalize this "wildcard" idea to a wide range of flexible string-matching sytaxes. The Python interface to regular expressions is contained in the built-in ``re`` module; as a simple example, let's use it to duplicate the functionality of the string ``split()`` method: Here we've first *compiled* a regular expression, then used it to *split* a string. Just as Python's ``split()`` method returns a list of all substrings between whitespace, the regular expression ``split()`` method returns a list of all substrings between matches to the input pattern. In this case, the input is ``"\s+"``: "``\s``" is a special character that matches any whitespace (space, tab, newline, etc.), and the "``+``" is a character that indicates *one or more* of the entity preceding it. Thus, the regular expression matches any substring consisting of one or more spaces. The ``split()`` method here is basically a convenience routine built upon this *pattern matching* behavior; more fundamental is the ``match()`` method, which will tell you whether the beginning of a string matches the pattern: Like ``split()``, there are similar convenience routines to find the first match (like ``str.index()`` or ``str.find()``) or to find and replace (like ``str.replace()``). We'll again use the line from before: With this, we can see that the ``regex.search()`` method operates a lot like ``str.index()`` or ``str.find()``: Similarly, the ``regex.sub()`` method operates much like ``str.replace()``: With a bit of thought, other native string operations can also be cast as regular expressions. ### A more sophisticated example But, you might ask, why would you want to use the more complicated and verbose syntax of regular expressions rather than the more intuitive and simple string methods? The advantage is that regular expressions offer *far* more flexibility. Here we'll consider a more complicated example: the common task of matching email addresses. I'll start by simply writing a (somewhat indecipherable) regular expression, and then walk through what is going on. Here it goes: Using this, if we're given a line from a document, we can quickly extract things that look like email addresses (Note that these addresses are entirely made up; there are probably better ways to get in touch with Guido). We can do further operations, like replacing these email addresses with another string, perhaps to hide addresses in the output: Finally, note that if you really want to match *any* email address, the preceding regular expression is far too simple. For example, it only allows addresses made of alphanumeric characters that end in one of several common domain suffixes. So, for example, the period used here means that we only find part of the address: This goes to show how unforgiving regular expressions can be if you're not careful! If you search around online, you can find some suggestions for regular expressions that will match *all* valid emails, but beware: they are much more involved than the simple expression used here! ### Basics of regular expression syntax The syntax of regular expressions is much too large a topic for this short section. Still, a bit of familiarity can go a long way: I will walk through some of the basic constructs here, and then list some more complete resources from which you can learn more. My hope is that the following quick primer will enable you to use these resources effectively. #### Simple strings are matched directly If you build a regular expression on a simple string of characters or digits, it will match that exact string: #### Some characters have special meanings While simple letters or numbers are direct matches, there are a handful of characters that have special meanings within regular expressions. They are: We will discuss the meaning of some of these momentarily. In the meantime, you should know that if you'd like to match any of these characters directly, you can *escape* them with a back-slash: The ``r`` preface in ``r'\$'`` indicates a *raw string*; in standard Python strings, the backslash is used to indicate special characters. For example, a tab is indicated by ``"\t"``: Such substitutions are not made in a raw string: For this reason, whenever you use backslashes in a regular expression, it is good practice to use a raw string. #### Special characters can match character groups Just as the ``"\"`` character within regular expressions can escape special characters, turning them into normal characters, it can also be used to give normal characters special meaning. These special characters match specified groups of characters, and we've seen them before. In the email address regexp from before, we used the character ``"\w"``, which is a special marker matching *any alphanumeric character*. Similarly, in the simple ``split()`` example, we also saw ``"\s"``, a special marker indicating *any whitespace character*. Putting these together, we can create a regular expression that will match *any two letters/digits with whitespace between them*: This example begins to hint at the power and flexibility of regular expressions. The following table lists a few of these characters that are commonly useful: | Character | Description || Character | Description | |-----------|-----------------------------||-----------|---------------------------------| | ``"\d"`` | Match any digit || ``"\D"`` | Match any non-digit | | ``"\s"`` | Match any whitespace || ``"\S"`` | Match any non-whitespace | | ``"\w"`` | Match any alphanumeric char || ``"\W"`` | Match any non-alphanumeric char | This is *not* a comprehensive list or description; for more details, see Python's [regular expression syntax documentation](https://docs.python.org/3/library/re.html#re-syntax). #### Square brackets match custom character groups If the built-in character groups aren't specific enough for you, you can use square brackets to specify any set of characters you're interested in. For example, the following will match any lower-case vowel: Similarly, you can use a dash to specify a range: for example, ``"[a-z]"`` will match any lower-case letter, and ``"[1-3]"`` will match any of ``"1"``, ``"2"``, or ``"3"``. For instance, you may need to extract from a document specific numerical codes that consist of a capital letter followed by a digit. You could do this as follows: #### Wildcards match repeated characters If you would like to match a string with, say, three alphanumeric characters in a row, it is possible to write, for example, ``"\w\w\w"``. Because this is such a common need, there is a specific syntax to match repetitions – curly braces with a number: There are also markers available to match any number of repetitions – for example, the ``"+"`` character will match *one or more* repetitions of what precedes it: The following is a table of the repetition markers available for use in regular expressions: | Character | Description | Example | |-----------|-------------|---------| | ``?`` | Match zero or one repetitions of preceding | ``"ab?"`` matches ``"a"`` or ``"ab"`` | | ``*`` | Match zero or more repetitions of preceding | ``"ab*"`` matches ``"a"``, ``"ab"``, ``"abb"``, ``"abbb"``... | | ``+`` | Match one or more repetitions of preceding | ``"ab+"`` matches ``"ab"``, ``"abb"``, ``"abbb"``... but not ``"a"`` | | ``{n}`` | Match ``n`` repetitions of preeeding | ``"ab{2}"`` matches ``"abb"`` | | ``{m,n}`` | Match between ``m`` and ``n`` repetitions of preceding | ``"ab{2,3}"`` matches ``"abb"`` or ``"abbb"`` | With these basics in mind, let's return to our email address matcher: We can now understand what this means: we want one or more alphanumeric character (``"\w+"``) followed by the *at sign* (``"@"``), followed by one or more alphanumeric character (``"\w+"``), followed by a period (``"\."`` – note the need for a backslash escape), followed by exactly three lower-case letters. If we want to now modify this so that the Obama email address matches, we can do so using the square-bracket notation: We have changed ``"\w+"`` to ``"[\w.]+"``, so we will match any alphanumeric character *or* a period. With this more flexible expression, we can match a wider range of email addresses (though still not all – can you identify other shortcomings of this expression?). #### Parentheses indicate *groups* to extract For compound regular expressions like our email matcher, we often want to extract their components rather than the full match. This can be done using parentheses to *group* the results: As we see, this grouping actually extracts a list of the sub-components of the email address. We can go a bit further and *name* the extracted components using the ``"(?P<name> )"`` syntax, in which case the groups can be extracted as a Python dictionary: #### *BIBLIOGRAFÍA* [Learning Python 5th - O'Reilly](https://learning.oreilly.com/library/view/learning-python-5th/9781449355722/titlepage01.html) [Python Crash Course 2th - O'Reilly](https://learning.oreilly.com/library/view/python-crash-course/9781492071266/xhtml/cover.xhtml) [Internet](https://google.es) [Stackoverflow](https://stackoverflow.com) [Medium](https://medium.com) [Towards Data Science](https://towardsdatascience.com) [Data Science Stack Exchange](https://datascience.stackexchange.com) [Kaggle](https://kaggle.com)
0.34632
0.928376
>>> Work in Progress (Following are the lecture notes of Prof Percy Liang/Prof Dorsa Sadigh - CS221 - Stanford. This is my interpretation of his excellent teaching and I take full responsibility of any misinterpretation/misinformation provided herein.) ## Lecture 5: Search 1 - Dynamic Programming, Uniform Cost Search | Stanford CS221 State based models - we learn search/state based problems here - we completed reflex based models earlier <img src="images/01_modelTypes.png" width=400 height=400> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ ----- For reflex based models - Model - can be Linear Predictor, or NN - Inference - was simple, evaluate the NN function say - Learning - how to use gd or sgd - Will learn the same way for state based models ----- ### Application of Search problems - Route finding - Objective - Shortest - Fastest - Most scenic - Actions - left, right, straight - Robot motion planning - Objective - go from point A to point B - fastest - most energy efficient - safest - most expressive - Actions - different joint - translation joints - rotation joints - Games - Objective - Rubik - 15 puzzle - Action - move pieces ---- ### Difference between reflex and search based model - Classifier (reflex based models) - based on input, find f, output was a single label - $x \rightarrow \fbox{f} \rightarrow \text{single action } y \in ${+1, -1} - Search problem (state based models) - given a input/state, and given that I have that state, we want an output that is a sequence of actions - _key idea_ - consider future consequences of an action - $x \rightarrow \fbox{f} \rightarrow \text{action sequence} (a_{1}, a_{2},a_{3},a_{4},...)$ ---- ### Roadmap - Tree search - Dynamic programming - Uniform cost search ----- ### Search Problem - Farmer - (7) - Steps - create library of actions - create search tree (what if?) - explore other solutions as well, which can be better - this can also be formulated as optimization problem - Definition - starting state(s) - Action(s) - possible actions - Cost(s,a) - cost of action - Succ(s,a) - successor - IsEnd(s) - reached end state <img src="images/05_farmerProb.png" width=400 height=400> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ ----- ### Transportation Problem - Problem - streets with blocks numbered 1 to n - walking from s to s+1 takes 1 minute - taking magic tram from s to 2s takes 2 minute - how to travel from 1 to n in least time? - How to define the initial state? <img src="images/05_transportationProb.png" width=400 height=400> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ ----- ### Algorithms - Backtracking search | Algorithms | Cost | Time | Space | | --- | --- | --- | --- | | Backtracking Search | Any | O($b^{D}$) | O(D) | ----- <img src="images/05_transportationProb2.png" width=400 height=400> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ - To set the recursion in python ```python import sys sys.setrecursionlimit 100000 ``` ----- ### Algorithm - Depth first search(DFS) | Algorithms | Cost | Time | Space | | --- | --- | --- | --- | | Backtracking Search | Any | O($b^{D}$) | O(D) | | DFS | 0 | O($b^{D}$) | O(D) | - DFS puts in a restriction that cost has to be 0 - Once it finds a solution, it does not look over the entire tree - cost of edge is 0 - the worst case scenario is still the same, but it performs better comparatively - _Backtracking search + stop when you find the first end state_ - _Action costs(s,a) = 0_ ----- ### Algorithm - Breadth first search(BFS) | Algorithms | Cost | Time | Space | | --- | --- | --- | --- | | Backtracking | Any | O($b^{D}$) | O(D) | | DFS | 0 | O($b^{D}$) | O(D) | | BFS | const $\geq0$ | O($b^{d}$) | O($b^{d}$) | - useful when cost is some constant - All the edges have the same cost - search layer by layer and find the solution, so in that sense better than DFS that it doesn't have to search till the very bottom leaf nodes - it might happen that search finds solution in the 2nd layer, and wont look further - limited to a reduced depth (d<D), so the time complexity improves - store every thing because the current node information may be needed later to find child node, so the space complexity is lot worse ----- ### Algorithm - DFS with Iterative Deepening | Algorithms | Cost | Time | Space | | --- | --- | --- | --- | | Backtracking | Any | O($b^{D}$) | O(D) | | DFS | 0 | O($b^{D}$) | O(D) | | BFS | const $\geq0$ | O($b^{d}$) | O($b^{d}$) | | DFS-ID | const $\geq0$ | O($b^{d}$) | O(d) | - combine the benefits of DFS and BFS - goes level by level like BFS, for every level it runs a full DFS - if you find solution early on, its good that you have run few DFS - Analogy - dog with a leash, where you extend the leash everytime if it does not find anything - extending the leash is synonymous to extending the levels ----- ### Disadvantages - these searches have exponential time - we try to avoid using DFS-ID, but not always - the exponential time can be brought down to polynomial time using dynamic programming ----- ### Dynamic programming - suppose there is a state _s_, and we are interested in reaching at end state, but we take action a to reach state _s'_. - the cost required to arrive at state s' is _cost(s,a)_ - from state s', we take bunch of actions to arrive at end state _End_ - the objective is to calculate the future cost of state s _FutureCost(s)_ - in the same way, the future cost from state s' to reach end state is _FutureCost(s')_ - this saves exponential space and time - key idea is to think of how to define the state - _a state is a summary of all the past actions sufficient to choose future actions optimally_ - the future cost will be <img src="images/05_dynProgCost2.png" width=400 height=400> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ ----- ### Problem - Route finding - find the minimum cost path from city 1 to city n, only moving forward - it costs $c_{ij}$ to go from i to j - the picture below is the representation of problem statement - future cost is recursive and only depends on state - if we save it, we dont have to recompute - _future cost only depends on current city, which is enough to compute future cost_ <img src="images/05_dynProgRouteFinding.png" width=200 height=200> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ ----- ### Algorithm - Route finding <img src="images/05_dynProgRouteFindingAlgo.png" width=400 height=400> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ ----- - Assumption here is - works only for acyclic graphs - there is a natural ordering that exists here regarding future costs, so cycle is not possible here - does not work for cyclic graphs ----- ### Adding constraints to the problem - can't visit three odd cities in a row - City 1 -> City 3 -> ~~City 7~~ - Now the current city state is not enough - One possible solution is > S = (Previous city, Current city) > |S| = $N^{2}$ - Here the problem is there are N possible combinations here, which results in exponential cost - Other possible solution is > S = (if prev city was odd(a counter True/False), current city) > |S| = 2N - 2 comes from the fact that we have two choices, if previous city was odd or even - N comes from the fact that we have N choices for current city <img src="images/05_dynProgRouteFindingAlgo2.png" width=200 height=200> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ ----- ### Adding another constraint to the problem - Problem - travel from city 1 to city n, visiting at least 3 odd cities - Solution - Possible option - (# of odd cities, current city) - all we care about is 3+ odd cities - if we keep track of all odd cities > $|S| = N/2 * N = N^{2}/2$ - but if we keep track of 3+ odd cities > $|S| = 3 * N = 3N$ > S = min((# of odd cities, 3), Current city) ----- ### Uniform cost search (UCS) - When do we use UCS - when the step costs are not the same and we are interested in optimal path - very similar to Dijkstra's algorithm - we have 3 states we need to keep track of - Explored state - the state we have found optimal path of - things we are sure about - we are done with it - Frontier state - we have computed it, but not sure if that is the best way of all - still finding out how to get there cheaply - its a known unknown - Unexplored state - unexplored part of states - dont know how to get there - its an unknown unknown ----- ### Problem <img src="images/05_uniformCostSearchProb.png" width=400 height=400> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ ----- ### Algorithm <img src="images/06_uniformCostSearchAlgo.png" width=400 height=400> $\tiny{\text{YouTube-Stanford-CS221-Dorsa Sadigh}}$ -----
github_jupyter
import sys sys.setrecursionlimit 100000
0.078735
0.945349
# Statistics :label:`sec_statistics` Undoubtedly, to be a top deep learning practitioner, the ability to train the state-of-the-art and high accurate models is crucial. However, it is often unclear when improvements are significant, or only the result of random fluctuations in the training process. To be able to discuss uncertainty in estimated values, we must learn some statistics. The earliest reference of *statistics* can be traced back to an Arab scholar Al-Kindi in the $9^{\mathrm{th}}$-century, who gave a detailed description of how to use statistics and frequency analysis to decipher encrypted messages. After 800 years, the modern statistics arose from Germany in 1700s, when the researchers focused on the demographic and economic data collection and analysis. Today, statistics is the science subject that concerns the collection, processing, analysis, interpretation and visualization of data. What is more, the core theory of statistics has been widely used in the research within academia, industry, and government. More specifically, statistics can be divided to *descriptive statistics* and *statistical inference*. The former focus on summarizing and illustrating the features of a collection of observed data, which is referred to as a *sample*. The sample is drawn from a *population*, denotes the total set of similar individuals, items, or events of our experiment interests. Contrary to descriptive statistics, *statistical inference* further deduces the characteristics of a population from the given *samples*, based on the assumptions that the sample distribution can replicate the population distribution at some degree. You may wonder: “What is the essential difference between machine learning and statistics?” Fundamentally speaking, statistics focuses on the inference problem. This type of problems includes modeling the relationship between the variables, such as causal inference, and testing the statistically significance of model parameters, such as A/B testing. In contrast, machine learning emphasizes on making accurate predictions, without explicitly programming and understanding each parameter's functionality. In this section, we will introduce three types of statistics inference methods: evaluating and comparing estimators, conducting hypothesis tests, and constructing confidence intervals. These methods can help us infer the characteristics of a given population, i.e., the true parameter $\theta$. For brevity, we assume that the true parameter $\theta$ of a given population is a scalar value. It is straightforward to extend to the case where $\theta$ is a vector or a tensor, thus we omit it in our discussion. ## Evaluating and Comparing Estimators In statistics, an *estimator* is a function of given samples used to estimate the true parameter $\theta$. We will write $\hat{\theta}_n = \hat{f}(x_1, \ldots, x_n)$ for the estimate of $\theta$ after observing the samples {$x_1, x_2, \ldots, x_n$}. We have seen simple examples of estimators before in section :numref:`sec_maximum_likelihood`. If you have a number of samples from a Bernoulli random variable, then the maximum likelihood estimate for the probability the random variable is one can be obtained by counting the number of ones observed and dividing by the total number of samples. Similarly, an exercise asked you to show that the maximum likelihood estimate of the mean of a Gaussian given a number of samples is given by the average value of all the samples. These estimators will almost never give the true value of the parameter, but ideally for a large number of samples the estimate will be close. As an example, we show below the true density of a Gaussian random variable with mean zero and variance one, along with a collection samples from that Gaussian. We constructed the $y$ coordinate so every point is visible and the relationship to the original density is clearer. ``` import random from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() # Sample datapoints and create y coordinate epsilon = 0.1 random.seed(8675309) xs = np.random.normal(loc=0, scale=1, size=(300,)) ys = [ np.sum( np.exp(-(xs[:i] - xs[i])**2 / (2 * epsilon**2)) / np.sqrt(2 * np.pi * epsilon**2)) / len(xs) for i in range(len(xs))] # Compute true density xd = np.arange(np.min(xs), np.max(xs), 0.01) yd = np.exp(-xd**2 / 2) / np.sqrt(2 * np.pi) # Plot the results d2l.plot(xd, yd, 'x', 'density') d2l.plt.scatter(xs, ys) d2l.plt.axvline(x=0) d2l.plt.axvline(x=np.mean(xs), linestyle='--', color='purple') d2l.plt.title(f'sample mean: {float(np.mean(xs)):.2f}') d2l.plt.show() ``` There can be many ways to compute an estimator of a parameter $\hat{\theta}_n$. In this section, we introduce three common methods to evaluate and compare estimators: the mean squared error, the standard deviation, and statistical bias. ### Mean Squared Error Perhaps the simplest metric used to evaluate estimators is the *mean squared error (MSE)* (or $l_2$ loss) of an estimator can be defined as $$\mathrm{MSE} (\hat{\theta}_n, \theta) = E[(\hat{\theta}_n - \theta)^2].$$ :eqlabel:`eq_mse_est` This allows us to quantify the average squared deviation from the true value. MSE is always non-negative. If you have read :numref:`sec_linear_regression`, you will recognize it as the most commonly used regression loss function. As a measure to evaluate an estimator, the closer its value to zero, the closer the estimator is close to the true parameter $\theta$. ### Statistical Bias The MSE provides a natural metric, but we can easily imagine multiple different phenomena that might make it large. Two fundamentally important are fluctuation in the estimator due to randomness in the dataset, and systematic error in the estimator due to the estimation procedure. First, let us measure the systematic error. For an estimator $\hat{\theta}_n$, the mathematical illustration of *statistical bias* can be defined as $$\mathrm{bias}(\hat{\theta}_n) = E(\hat{\theta}_n - \theta) = E(\hat{\theta}_n) - \theta.$$ :eqlabel:`eq_bias` Note that when $\mathrm{bias}(\hat{\theta}_n) = 0$, the expectation of the estimator $\hat{\theta}_n$ is equal to the true value of parameter. In this case, we say $\hat{\theta}_n$ is an unbiased estimator. In general, an unbiased estimator is better than a biased estimator since its expectation is the same as the true parameter. It is worth being aware, however, that biased estimators are frequently used in practice. There are cases where unbiased estimators do not exist without further assumptions, or are intractable to compute. This may seem like a significant flaw in an estimator, however the majority of estimators encountered in practice are at least asymptotically unbiased in the sense that the bias tends to zero as the number of available samples tends to infinity: $\lim_{n \rightarrow \infty} \mathrm{bias}(\hat{\theta}_n) = 0$. ### Variance and Standard Deviation Second, let us measure the randomness in the estimator. Recall from :numref:`sec_random_variables`, the *standard deviation* (or *standard error*) is defined as the squared root of the variance. We may measure the degree of fluctuation of an estimator by measuring the standard deviation or variance of that estimator. $$\sigma_{\hat{\theta}_n} = \sqrt{\mathrm{Var} (\hat{\theta}_n )} = \sqrt{E[(\hat{\theta}_n - E(\hat{\theta}_n))^2]}.$$ :eqlabel:`eq_var_est` It is important to compare :eqref:`eq_var_est` to :eqref:`eq_mse_est`. In this equation we do not compare to the true population value $\theta$, but instead to $E(\hat{\theta}_n)$, the expected sample mean. Thus we are not measuring how far the estimator tends to be from the true value, but instead we measuring the fluctuation of the estimator itself. ### The Bias-Variance Trade-off It is intuitively clear that these two main components contribute to the mean squared error. What is somewhat shocking is that we can show that this is actually a *decomposition* of the mean squared error into these two contributions plus a third one. That is to say that we can write the mean squared error as the sum of the square of the bias, the variance and the irreducible error. $$ \begin{aligned} \mathrm{MSE} (\hat{\theta}_n, \theta) &= E[(\hat{\theta}_n - \theta)^2] \\ &= E[(\hat{\theta}_n)^2] + E[\theta^2] - 2E[\hat{\theta}_n\theta] \\ &= \mathrm{Var} [\hat{\theta}_n] + E[\hat{\theta}_n]^2 + \mathrm{Var} [\theta] + E[\theta]^2 - 2E[\hat{\theta}_n]E[\theta] \\ &= (E[\hat{\theta}_n] - E[\theta])^2 + \mathrm{Var} [\hat{\theta}_n] + \mathrm{Var} [\theta] \\ &= (E[\hat{\theta}_n - \theta])^2 + \mathrm{Var} [\hat{\theta}_n] + \mathrm{Var} [\theta] \\ &= (\mathrm{bias} [\hat{\theta}_n])^2 + \mathrm{Var} (\hat{\theta}_n) + \mathrm{Var} [\theta].\\ \end{aligned} $$ We refer the above formula as *bias-variance trade-off*. The mean squared error can be divided into three sources of error: the error from high bias, the error from high variance and the irreducible error. The bias error is commonly seen in a simple model (such as a linear regression model), which cannot extract high dimensional relations between the features and the outputs. If a model suffers from high bias error, we often say it is *underfitting* or lack of *flexibilty* as introduced in (:numref:`sec_model_selection`). The high variance usually results from a too complex model, which overfits the training data. As a result, an *overfitting* model is sensitive to small fluctuations in the data. If a model suffers from high variance, we often say it is *overfitting* and lack of *generalization* as introduced in (:numref:`sec_model_selection`). The irreducible error is the result from noise in the $\theta$ itself. ### Evaluating Estimators in Code Since the standard deviation of an estimator has been implementing by simply calling `a.std()` for a tensor `a`, we will skip it but implement the statistical bias and the mean squared error. ``` # Statistical bias def stat_bias(true_theta, est_theta): return (np.mean(est_theta) - true_theta) # Mean squared error def mse(data, true_theta): return (np.mean(np.square(data - true_theta))) ``` To illustrate the equation of the bias-variance trade-off, let us simulate of normal distribution $\mathcal{N}(\theta, \sigma^2)$ with $10,000$ samples. Here, we use a $\theta = 1$ and $\sigma = 4$. As the estimator is a function of the given samples, here we use the mean of the samples as an estimator for true $\theta$ in this normal distribution $\mathcal{N}(\theta, \sigma^2)$ . ``` theta_true = 1 sigma = 4 sample_len = 10000 samples = np.random.normal(theta_true, sigma, sample_len) theta_est = np.mean(samples) theta_est ``` Let us validate the trade-off equation by calculating the summation of the squared bias and the variance of our estimator. First, calculate the MSE of our estimator. ``` mse(samples, theta_true) ``` Next, we calculate $\mathrm{Var} (\hat{\theta}_n) + [\mathrm{bias} (\hat{\theta}_n)]^2$ as below. As you can see, the two values agree to numerical precision. ``` bias = stat_bias(theta_true, theta_est) np.square(samples.std()) + np.square(bias) ``` ## Conducting Hypothesis Tests The most commonly encountered topic in statistical inference is hypothesis testing. While hypothesis testing was popularized in the early 20th century, the first use can be traced back to John Arbuthnot in the 1700s. John tracked 80-year birth records in London and concluded that more men were born than women each year. Following that, the modern significance testing is the intelligence heritage by Karl Pearson who invented $p$-value and Pearson's chi-squared test, William Gosset who is the father of Student's t-distribution, and Ronald Fisher who initialed the null hypothesis and the significance test. A *hypothesis test* is a way of evaluating some evidence against the default statement about a population. We refer the default statement as the *null hypothesis* $H_0$, which we try to reject using the observed data. Here, we use $H_0$ as a starting point for the statistical significance testing. The *alternative hypothesis* $H_A$ (or $H_1$) is a statement that is contrary to the null hypothesis. A null hypothesis is often stated in a declarative form which posits a relationship between variables. It should reflect the brief as explicit as possible, and be testable by statistics theory. Imagine you are a chemist. After spending thousands of hours in the lab, you develop a new medicine which can dramatically improve one's ability to understand math. To show its magic power, you need to test it. Naturally, you may need some volunteers to take the medicine and see whether it can help them learn math better. How do you get started? First, you will need carefully random selected two groups of volunteers, so that there is no difference between their math understanding ability measured by some metrics. The two groups are commonly referred to as the test group and the control group. The *test group* (or *treatment group*) is a group of individuals who will experience the medicine, while the *control group* represents the group of users who are set aside as a benchmark, i.e., identical environment setups except taking this medicine. In this way, the influence of all the variables are minimized, except the impact of the independent variable in the treatment. Second, after a period of taking the medicine, you will need to measure the two groups' math understanding by the same metrics, such as letting the volunteers do the same tests after learning a new math formula. Then, you can collect their performance and compare the results. In this case, our null hypothesis will be that there is no difference between the two groups, and our alternate will be that there is. This is still not fully formal. There are many details you have to think of carefully. For example, what is the suitable metrics to test their math understanding ability? How many volunteers for your test so you can be confident to claim the effectiveness of your medicine? How long should you run the test? How do you decide if there is a difference between the two groups? Do you care about the average performance only, or also the range of variation of the scores? And so on. In this way, hypothesis testing provides a framework for experimental design and reasoning about certainty in observed results. If we can now show that the null hypothesis is very unlikely to be true, we may reject it with confidence. To complete the story of how to work with hypothesis testing, we need to now introduce some additional terminology and make some of our concepts above formal. ### Statistical Significance The *statistical significance* measures the probability of erroneously rejecting the null hypothesis, $H_0$, when it should not be rejected, i.e., $$ \text{statistical significance }= 1 - \alpha = 1 - P(\text{reject } H_0 \mid H_0 \text{ is true} ).$$ It is also referred to as the *type I error* or *false positive*. The $\alpha$, is called as the *significance level* and its commonly used value is $5\%$, i.e., $1-\alpha = 95\%$. The significance level can be explained as the level of risk that we are willing to take, when we reject a true null hypothesis. :numref:`fig_statistical_significance` shows the observations' values and probability of a given normal distribution in a two-sample hypothesis test. If the observation data example is located outsides the $95\%$ threshold, it will be a very unlikely observation under the null hypothesis assumption. Hence, there might be something wrong with the null hypothesis and we will reject it. ![Statistical significance.](../img/statistical-significance.svg) :label:`fig_statistical_significance` ### Statistical Power The *statistical power* (or *sensitivity*) measures the probability of reject the null hypothesis, $H_0$, when it should be rejected, i.e., $$ \text{statistical power }= 1 - \beta = 1 - P(\text{ fail to reject } H_0 \mid H_0 \text{ is false} ).$$ Recall that a *type I error* is error caused by rejecting the null hypothesis when it is true, whereas a *type II error* is resulted from failing to reject the null hypothesis when it is false. A type II error is usually denoted as $\beta$, and hence the corresponding statistical power is $1-\beta$. Intuitively, statistical power can be interpreted as how likely our test will detect a real discrepancy of some minimum magnitude at a desired statistical significance level. $80\%$ is a commonly used statistical power threshold. The higher the statistical power, the more likely we are to detect true differences. One of the most common uses of statistical power is in determining the number of samples needed. The probability you reject the null hypothesis when it is false depends on the degree to which it is false (known as the *effect size*) and the number of samples you have. As you might expect, small effect sizes will require a very large number of samples to be detectable with high probability. While beyond the scope of this brief appendix to derive in detail, as an example, want to be able to reject a null hypothesis that our sample came from a mean zero variance one Gaussian, and we believe that our sample's mean is actually close to one, we can do so with acceptable error rates with a sample size of only $8$. However, if we think our sample population true mean is close to $0.01$, then we'd need a sample size of nearly $80000$ to detect the difference. We can imagine the power as a water filter. In this analogy, a high power hypothesis test is like a high quality water filtration system that will reduce harmful substances in the water as much as possible. On the other hand, a smaller discrepancy is like a low quality water filter, where some relative small substances may easily escape from the gaps. Similarly, if the statistical power is not of enough high power, then the test may not catch the smaller discrepancy. ### Test Statistic A *test statistic* $T(x)$ is a scalar which summarizes some characteristic of the sample data. The goal of defining such a statistic is that it should allow us to distinguish between different distributions and conduct our hypothesis test. Thinking back to our chemist example, if we wish to show that one population performs better than the other, it could be reasonable to take the mean as the test statistic. Different choices of test statistic can lead to statistical test with drastically different statistical power. Often, $T(X)$ (the distribution of the test statistic under our null hypothesis) will follow, at least approximately, a common probability distribution such as a normal distribution when considered under the null hypothesis. If we can derive explicitly such a distribution, and then measure our test statistic on our dataset, we can safely reject the null hypothesis if our statistic is far outside the range that we would expect. Making this quantitative leads us to the notion of $p$-values. ### $p$-value The $p$-value (or the *probability value*) is the probability that $T(X)$ is at least as extreme as the observed test statistic $T(x)$ assuming that the null hypothesis is *true*, i.e., $$ p\text{-value} = P_{H_0}(T(X) \geq T(x)).$$ If the $p$-value is smaller than or equal to a predefined and fixed statistical significance level $\alpha$, we may reject the null hypothesis. Otherwise, we will conclude that we are lack of evidence to reject the null hypothesis. For a given population distribution, the *region of rejection* will be the interval contained of all the points which has a $p$-value smaller than the statistical significance level $\alpha$. ### One-side Test and Two-sided Test Normally there are two kinds of significance test: the one-sided test and the two-sided test. The *one-sided test* (or *one-tailed test*) is applicable when the null hypothesis and the alternative hypothesis only have one direction. For example, the null hypothesis may state that the true parameter $\theta$ is less than or equal to a value $c$. The alternative hypothesis would be that $\theta$ is greater than $c$. That is, the region of rejection is on only one side of the sampling distribution. Contrary to the one-sided test, the *two-sided test* (or *two-tailed test*) is applicable when the region of rejection is on both sides of the sampling distribution. An example in this case may have a null hypothesis state that the true parameter $\theta$ is equal to a value $c$. The alternative hypothesis would be that $\theta$ is not equal to $c$. ### General Steps of Hypothesis Testing After getting familiar with the above concepts, let us go through the general steps of hypothesis testing. 1. State the question and establish a null hypotheses $H_0$. 2. Set the statistical significance level $\alpha$ and a statistical power ($1 - \beta$). 3. Obtain samples through experiments. The number of samples needed will depend on the statistical power, and the expected effect size. 4. Calculate the test statistic and the $p$-value. 5. Make the decision to keep or reject the null hypothesis based on the $p$-value and the statistical significance level $\alpha$. To conduct a hypothesis test, we start by defining a null hypothesis and a level of risk that we are willing to take. Then we calculate the test statistic of the sample, taking an extreme value of the test statistic as evidence against the null hypothesis. If the test statistic falls within the reject region, we may reject the null hypothesis in favor of the alternative. Hypothesis testing is applicable in a variety of scenarios such as the clinical trails and A/B testing. ## Constructing Confidence Intervals When estimating the value of a parameter $\theta$, point estimators like $\hat \theta$ are of limited utility since they contain no notion of uncertainty. Rather, it would be far better if we could produce an interval that would contain the true parameter $\theta$ with high probability. If you were interested in such ideas a century ago, then you would have been excited to read "Outline of a Theory of Statistical Estimation Based on the Classical Theory of Probability" by Jerzy Neyman :cite:`Neyman.1937`, who first introduced the concept of confidence interval in 1937. To be useful, a confidence interval should be as small as possible for a given degree of certainty. Let us see how to derive it. ### Definition Mathematically, a *confidence interval* for the true parameter $\theta$ is an interval $C_n$ that computed from the sample data such that $$P_{\theta} (C_n \ni \theta) \geq 1 - \alpha, \forall \theta.$$ :eqlabel:`eq_confidence` Here $\alpha \in (0, 1)$, and $1 - \alpha$ is called the *confidence level* or *coverage* of the interval. This is the same $\alpha$ as the significance level as we discussed about above. Note that :eqref:`eq_confidence` is about variable $C_n$, not about the fixed $\theta$. To emphasize this, we write $P_{\theta} (C_n \ni \theta)$ rather than $P_{\theta} (\theta \in C_n)$. ### Interpretation It is very tempting to interpret a $95\%$ confidence interval as an interval where you can be $95\%$ sure the true parameter lies, however this is sadly not true. The true parameter is fixed, and it is the interval that is random. Thus a better interpretation would be to say that if you generated a large number of confidence intervals by this procedure, $95\%$ of the generated intervals would contain the true parameter. This may seem pedantic, but it can have real implications for the interpretation of the results. In particular, we may satisfy :eqref:`eq_confidence` by constructing intervals that we are *almost certain* do not contain the true value, as long as we only do so rarely enough. We close this section by providing three tempting but false statements. An in-depth discussion of these points can be found in :cite:`Morey.Hoekstra.Rouder.ea.2016`. * **Fallacy 1**. Narrow confidence intervals mean we can estimate the parameter precisely. * **Fallacy 2**. The values inside the confidence interval are more likely to be the true value than those outside the interval. * **Fallacy 3**. The probability that a particular observed $95\%$ confidence interval contains the true value is $95\%$. Sufficed to say, confidence intervals are subtle objects. However, if you keep the interpretation clear, they can be powerful tools. ### A Gaussian Example Let us discuss the most classical example, the confidence interval for the mean of a Gaussian of unknown mean and variance. Suppose we collect $n$ samples $\{x_i\}_{i=1}^n$ from our Gaussian $\mathcal{N}(\mu, \sigma^2)$. We can compute estimators for the mean and standard deviation by taking $$\hat\mu_n = \frac{1}{n}\sum_{i=1}^n x_i \;\text{and}\; \hat\sigma^2_n = \frac{1}{n-1}\sum_{i=1}^n (x_i - \hat\mu)^2.$$ If we now consider the random variable $$ T = \frac{\hat\mu_n - \mu}{\hat\sigma_n/\sqrt{n}}, $$ we obtain a random variable following a well-known distribution called the *Student's t-distribution on* $n-1$ *degrees of freedom*. This distribution is very well studied, and it is known, for instance, that as $n\rightarrow \infty$, it is approximately a standard Gaussian, and thus by looking up values of the Gaussian c.d.f. in a table, we may conclude that the value of $T$ is in the interval $[-1.96, 1.96]$ at least $95\%$ of the time. For finite values of $n$, the interval needs to be somewhat larger, but are well known and precomputed in tables. Thus, we may conclude that for large $n$, $$ P\left(\frac{\hat\mu_n - \mu}{\hat\sigma_n/\sqrt{n}} \in [-1.96, 1.96]\right) \ge 0.95. $$ Rearranging this by multiplying both sides by $\hat\sigma_n/\sqrt{n}$ and then adding $\hat\mu_n$, we obtain $$ P\left(\mu \in \left[\hat\mu_n - 1.96\frac{\hat\sigma_n}{\sqrt{n}}, \hat\mu_n + 1.96\frac{\hat\sigma_n}{\sqrt{n}}\right]\right) \ge 0.95. $$ Thus we know that we have found our $95\%$ confidence interval: $$\left[\hat\mu_n - 1.96\frac{\hat\sigma_n}{\sqrt{n}}, \hat\mu_n + 1.96\frac{\hat\sigma_n}{\sqrt{n}}\right].$$ :eqlabel:`eq_gauss_confidence` It is safe to say that :eqref:`eq_gauss_confidence` is one of the most used formula in statistics. Let us close our discussion of statistics by implementing it. For simplicity, we assume we are in the asymptotic regime. Small values of $N$ should include the correct value of `t_star` obtained either programmatically or from a $t$-table. ``` # Number of samples N = 1000 # Sample dataset samples = np.random.normal(loc=0, scale=1, size=(N,)) # Lookup Students's t-distribution c.d.f. t_star = 1.96 # Construct interval mu_hat = np.mean(samples) sigma_hat = samples.std(ddof=1) (mu_hat - t_star * sigma_hat / np.sqrt(N), mu_hat + t_star * sigma_hat / np.sqrt(N)) ``` ## Summary * Statistics focuses on inference problems, whereas deep learning emphasizes on making accurate predictions without explicitly programming and understanding. * There are three common statistics inference methods: evaluating and comparing estimators, conducting hypothesis tests, and constructing confidence intervals. * There are three most common estimators: statistical bias, standard deviation, and mean square error. * A confidence interval is an estimated range of a true population parameter that we can construct by given the samples. * Hypothesis testing is a way of evaluating some evidence against the default statement about a population. ## Exercises 1. Let $X_1, X_2, \ldots, X_n \overset{\text{iid}}{\sim} \mathrm{Unif}(0, \theta)$, where "iid" stands for *independent and identically distributed*. Consider the following estimators of $\theta$: $$\hat{\theta} = \max \{X_1, X_2, \ldots, X_n \};$$ $$\tilde{\theta} = 2 \bar{X_n} = \frac{2}{n} \sum_{i=1}^n X_i.$$ * Find the statistical bias, standard deviation, and mean square error of $\hat{\theta}.$ * Find the statistical bias, standard deviation, and mean square error of $\tilde{\theta}.$ * Which estimator is better? 1. For our chemist example in introduction, can you derive the 5 steps to conduct a two-sided hypothesis testing? Given the statistical significance level $\alpha = 0.05$ and the statistical power $1 - \beta = 0.8$. 1. Run the confidence interval code with $N=2$ and $\alpha = 0.5$ for $100$ independently generated dataset, and plot the resulting intervals (in this case `t_star = 1.0`). You will see several very short intervals which are very far from containing the true mean $0$. Does this contradict the interpretation of the confidence interval? Do you feel comfortable using short intervals to indicate high precision estimates? [Discussions](https://discuss.d2l.ai/t/419)
github_jupyter
import random from mxnet import np, npx from d2l import mxnet as d2l npx.set_np() # Sample datapoints and create y coordinate epsilon = 0.1 random.seed(8675309) xs = np.random.normal(loc=0, scale=1, size=(300,)) ys = [ np.sum( np.exp(-(xs[:i] - xs[i])**2 / (2 * epsilon**2)) / np.sqrt(2 * np.pi * epsilon**2)) / len(xs) for i in range(len(xs))] # Compute true density xd = np.arange(np.min(xs), np.max(xs), 0.01) yd = np.exp(-xd**2 / 2) / np.sqrt(2 * np.pi) # Plot the results d2l.plot(xd, yd, 'x', 'density') d2l.plt.scatter(xs, ys) d2l.plt.axvline(x=0) d2l.plt.axvline(x=np.mean(xs), linestyle='--', color='purple') d2l.plt.title(f'sample mean: {float(np.mean(xs)):.2f}') d2l.plt.show() # Statistical bias def stat_bias(true_theta, est_theta): return (np.mean(est_theta) - true_theta) # Mean squared error def mse(data, true_theta): return (np.mean(np.square(data - true_theta))) theta_true = 1 sigma = 4 sample_len = 10000 samples = np.random.normal(theta_true, sigma, sample_len) theta_est = np.mean(samples) theta_est mse(samples, theta_true) bias = stat_bias(theta_true, theta_est) np.square(samples.std()) + np.square(bias) # Number of samples N = 1000 # Sample dataset samples = np.random.normal(loc=0, scale=1, size=(N,)) # Lookup Students's t-distribution c.d.f. t_star = 1.96 # Construct interval mu_hat = np.mean(samples) sigma_hat = samples.std(ddof=1) (mu_hat - t_star * sigma_hat / np.sqrt(N), mu_hat + t_star * sigma_hat / np.sqrt(N))
0.76533
0.994396
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-data-transfer.png) # Azure Machine Learning Pipeline with DataTransferStep This notebook is used to demonstrate the use of DataTransferStep in an Azure Machine Learning Pipeline. In certain cases, you will need to transfer data from one data location to another. For example, your data may be in Azure SQL Database and you may want to move it to Azure Data Lake storage. Or, your data is in an ADLS account and you want to make it available in the Blob storage. The built-in **DataTransferStep** class helps you transfer data in these situations. The below examples show how to move data between different storage types supported in Azure Machine Learning. ## Data transfer currently supports following storage types: | Data store | Supported as a source | Supported as a sink | | --- | --- | --- | | Azure Blob Storage | Yes | Yes | | Azure Data Lake Storage Gen 1 | Yes | Yes | | Azure Data Lake Storage Gen 2 | Yes | Yes | | Azure SQL Database | Yes | Yes | | Azure Database for PostgreSQL | Yes | Yes | | Azure Database for MySQL | Yes | Yes | ## Azure Machine Learning and Pipeline SDK-specific imports ``` import os import azureml.core from azureml.core.compute import ComputeTarget, DataFactoryCompute from azureml.exceptions import ComputeTargetException from azureml.core import Workspace, Experiment from azureml.pipeline.core import Pipeline from azureml.core.datastore import Datastore from azureml.data.data_reference import DataReference from azureml.pipeline.steps import DataTransferStep # Check core SDK version number print("SDK version:", azureml.core.VERSION) ``` ## Initialize Workspace Initialize a workspace object from persisted configuration. If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure the config file is present at .\config.json If you don't have a config.json file, please go through the [configuration Notebook](https://aka.ms/pl-config) first. This sets you up with a working config file that has information on your workspace, subscription id, etc. ``` ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') ``` ## Register Datastores and create DataReferences For background on registering your data store, consult this article: https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-access-data > Please make sure to update the following code examples with appropriate values. ### Azure Blob Storage > Since Blob Storage can contain a file and directory with the same name, you can use **source_reference_type** and **destination_reference_type** optional arguments in DataTransferStep constructor to explicitly specify whether you're referring to the file or the directory. ``` from msrest.exceptions import HttpOperationError blob_datastore_name='MyBlobDatastore' account_name=os.getenv("BLOB_ACCOUNTNAME_62", "<my-account-name>") # Storage account name container_name=os.getenv("BLOB_CONTAINER_62", "<my-container-name>") # Name of Azure blob container account_key=os.getenv("BLOB_ACCOUNT_KEY_62", "<my-account-key>") # Storage account key try: blob_datastore = Datastore.get(ws, blob_datastore_name) print("Found Blob Datastore with name: %s" % blob_datastore_name) except HttpOperationError: blob_datastore = Datastore.register_azure_blob_container( workspace=ws, datastore_name=blob_datastore_name, account_name=account_name, # Storage account name container_name=container_name, # Name of Azure blob container account_key=account_key) # Storage account key print("Registered blob datastore with name: %s" % blob_datastore_name) blob_data_ref = DataReference( datastore=blob_datastore, data_reference_name="blob_test_data", path_on_datastore="testdata") ``` ### Azure Data Lake Storage Gen1 Please consult the following articles for detailed steps on setting up service principal authentication and assigning correct permissions to Data Lake Storage account: https://docs.microsoft.com/en-us/azure/data-lake-store/data-lake-store-service-to-service-authenticate-using-active-directory https://docs.microsoft.com/en-us/azure/data-factory/connector-azure-data-lake-store#use-service-principal-authentication ``` datastore_name='MyAdlsDatastore' subscription_id=os.getenv("ADL_SUBSCRIPTION_62", "<my-subscription-id>") # subscription id of ADLS account resource_group=os.getenv("ADL_RESOURCE_GROUP_62", "<my-resource-group>") # resource group of ADLS account store_name=os.getenv("ADL_STORENAME_62", "<my-datastore-name>") # ADLS account name tenant_id=os.getenv("ADL_TENANT_62", "<my-tenant-id>") # tenant id of service principal client_id=os.getenv("ADL_CLIENTID_62", "<my-client-id>") # client id of service principal client_secret=os.getenv("ADL_CLIENT_SECRET_62", "<my-client-secret>") # the secret of service principal try: adls_datastore = Datastore.get(ws, datastore_name) print("Found datastore with name: %s" % datastore_name) except HttpOperationError: adls_datastore = Datastore.register_azure_data_lake( workspace=ws, datastore_name=datastore_name, subscription_id=subscription_id, # subscription id of ADLS account resource_group=resource_group, # resource group of ADLS account store_name=store_name, # ADLS account name tenant_id=tenant_id, # tenant id of service principal client_id=client_id, # client id of service principal client_secret=client_secret) # the secret of service principal print("Registered datastore with name: %s" % datastore_name) adls_data_ref = DataReference( datastore=adls_datastore, data_reference_name="adls_test_data", path_on_datastore="testdata") ``` ### Azure Data Lake Storage Gen2 Please consult the following article for detailed steps on setting up service principal authentication and assigning correct permissions to Data lake Storage Gen2 account: https://docs.microsoft.com/en-us/azure/data-factory/connector-azure-data-lake-storage#service-principal-authentication ``` adlsgen2_datastore_name = 'myadlsgen2datastore' account_name=os.getenv("ADLSGEN2_ACCOUNTNAME_62", "<my-account-name>") # ADLS Gen2 account name tenant_id=os.getenv("ADLSGEN2_TENANT_62", "<my-tenant-id>") # tenant id of service principal client_id=os.getenv("ADLSGEN2_CLIENTID_62", "<my-client-id>") # client id of service principal client_secret=os.getenv("ADLSGEN2_CLIENT_SECRET_62", "<my-client-secret>") # the secret of service principal try: adlsgen2_datastore = Datastore.get(ws, adlsgen2_datastore_name) print("Found ADLS Gen2 datastore with name: %s" % adlsgen2_datastore_name) except: adlsgen2_datastore = Datastore.register_azure_data_lake_gen2( workspace=ws, datastore_name=adlsgen2_datastore_name, filesystem='test', # Name of ADLS Gen2 filesystem account_name=account_name, # ADLS Gen2 account name tenant_id=tenant_id, # tenant id of service principal client_id=client_id, # client id of service principal client_secret=client_secret) # the secret of service principal print("Registered datastore with name: %s" % adlsgen2_datastore_name) adlsgen2_data_ref = DataReference( datastore=adlsgen2_datastore, data_reference_name='adlsgen2_test_data', path_on_datastore='testdata') ``` ### Azure SQL Database For enabling service principal authentication for an Azure SQL Database, please follow this section in Azure Data Factory documentation: https://docs.microsoft.com/en-us/azure/data-factory/connector-azure-sql-database#service-principal-authentication > Note: When copying data **to** an Azure SQL Database, data will be _appended_ to an existing table. We also expect the source file to have a header row and the names should exactly match with column names in destination table. ``` sql_datastore_name="MySqlDatastore" server_name=os.getenv("SQL_SERVERNAME_62", "<my-server-name>") # Name of SQL server database_name=os.getenv("SQL_DATBASENAME_62", "<my-database-name>") # Name of SQL database client_id=os.getenv("SQL_CLIENTNAME_62", "<my-client-id>") # client id of service principal with permissions to access database client_secret=os.getenv("SQL_CLIENTSECRET_62", "<my-client-secret>") # the secret of service principal tenant_id=os.getenv("SQL_TENANTID_62", "<my-tenant-id>") # tenant id of service principal try: sql_datastore = Datastore.get(ws, sql_datastore_name) print("Found sql database datastore with name: %s" % sql_datastore_name) except HttpOperationError: sql_datastore = Datastore.register_azure_sql_database( workspace=ws, datastore_name=sql_datastore_name, server_name=server_name, database_name=database_name, client_id=client_id, client_secret=client_secret, tenant_id=tenant_id) print("Registered sql databse datastore with name: %s" % sql_datastore_name) from azureml.data.sql_data_reference import SqlDataReference sql_query_data_ref = SqlDataReference( datastore=sql_datastore, data_reference_name="sql_query_data_ref", sql_query="select top 1 * from TestData") ``` ### Azure Database for PostgreSQL ``` psql_datastore_name="MyPostgreSqlDatastore" server_name=os.getenv("PSQL_SERVERNAME_62", "<my-server-name>") # Name of PostgreSQL server database_name=os.getenv("PSQL_DATBASENAME_62", "<my-database-name>") # Name of PostgreSQL database user_id=os.getenv("PSQL_USERID_62", "<my-user-id>") # user id user_password=os.getenv("PSQL_USERPW_62", "<my-user-password>") # user password try: psql_datastore = Datastore.get(ws, psql_datastore_name) print("Found PostgreSQL database datastore with name: %s" % psql_datastore_name) except HttpOperationError: psql_datastore = Datastore.register_azure_postgre_sql( workspace=ws, datastore_name=psql_datastore_name, server_name=server_name, database_name=database_name, user_id=user_id, user_password=user_password) print("Registered PostgreSQL databse datastore with name: %s" % psql_datastore_name) from azureml.data.sql_data_reference import SqlDataReference psql_query_data_ref = SqlDataReference( datastore=psql_datastore, data_reference_name="psql_query_data_ref", sql_query="SELECT * FROM testtable") ``` ### Azure Database for MySQL ``` mysql_datastore_name="MySqlDatastore" server_name=os.getenv("MYSQL_SERVERNAME_62", "<my-server-name>") # Name of MySQL server database_name=os.getenv("MYSQL_DATBASENAME_62", "<my-database-name>") # Name of MySQL database user_id=os.getenv("MYSQL_USERID_62", "<my-user-id>") # user id user_password=os.getenv("MYSQL_USERPW_62", "<my-user-password>") # user password try: mysql_datastore = Datastore.get(ws, mysql_datastore_name) print("Found MySQL database datastore with name: %s" % mysql_datastore_name) except HttpOperationError: mysql_datastore = Datastore.register_azure_my_sql( workspace=ws, datastore_name=mysql_datastore_name, server_name=server_name, database_name=database_name, user_id=user_id, user_password=user_password) print("Registered MySQL databse datastore with name: %s" % mysql_datastore_name) from azureml.data.sql_data_reference import SqlDataReference mysql_query_data_ref = SqlDataReference( datastore=mysql_datastore, data_reference_name="mysql_query_data_ref", sql_query="SELECT * FROM testtable") mysql_table_data_ref = SqlDataReference( datastore=mysql_datastore, data_reference_name="mysql_table_data_ref", sql_table="testtable") ``` ## Setup Data Factory Account ``` data_factory_name = 'adftest' def get_or_create_data_factory(workspace, factory_name): try: return DataFactoryCompute(workspace, factory_name) except ComputeTargetException as e: if 'ComputeTargetNotFound' in e.message: print('Data factory not found, creating...') provisioning_config = DataFactoryCompute.provisioning_configuration() data_factory = ComputeTarget.create(workspace, factory_name, provisioning_config) data_factory.wait_for_completion() return data_factory else: raise e data_factory_compute = get_or_create_data_factory(ws, data_factory_name) print("Setup Azure Data Factory account complete") ``` ## Create a DataTransferStep **DataTransferStep** is used to transfer data between Azure Blob, Azure Data Lake Store, and Azure SQL database. - **name:** Name of module - **source_data_reference:** Input connection that serves as source of data transfer operation. - **destination_data_reference:** Input connection that serves as destination of data transfer operation. - **compute_target:** Azure Data Factory to use for transferring data. - **allow_reuse:** Whether the step should reuse results of previous DataTransferStep when run with same inputs. Set as False to force data to be transferred again. Optional arguments to explicitly specify whether a path corresponds to a file or a directory. These are useful when storage contains both file and directory with the same name or when creating a new destination path. - **source_reference_type:** An optional string specifying the type of source_data_reference. Possible values include: 'file', 'directory'. When not specified, we use the type of existing path or directory if it's a new path. - **destination_reference_type:** An optional string specifying the type of destination_data_reference. Possible values include: 'file', 'directory'. When not specified, we use the type of existing path or directory if it's a new path. ``` transfer_adls_to_blob = DataTransferStep( name="transfer_adls_to_blob", source_data_reference=adls_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute) print("Data transfer step created") transfer_adlsgen2_to_blob = DataTransferStep( name='transfer_adlsgen2_to_blob', source_data_reference=adlsgen2_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute) transfer_sql_to_blob = DataTransferStep( name="transfer_sql_to_blob", source_data_reference=sql_query_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute, destination_reference_type='file') transfer_psql_to_blob = DataTransferStep( name="transfer_psql_to_blob", source_data_reference=psql_query_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute, destination_reference_type='file') transfer_mysql_to_blob = DataTransferStep( name="transfer_mysql_to_blob", source_data_reference=mysql_query_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute) print("Data transfer step created for Sql server, PostgreSQL and MySQL") ``` ## Build and Submit the Experiment ``` pipeline_01 = Pipeline( description="data_transfer_01", workspace=ws, steps=[transfer_adls_to_blob]) pipeline_run_01 = Experiment(ws, "Data_Transfer_example_01").submit(pipeline_01) pipeline_run_01.wait_for_completion() pipeline_02 = Pipeline( description="data_transfer_02", workspace=ws, steps=[transfer_sql_to_blob,transfer_psql_to_blob, transfer_adlsgen2_to_blob]) pipeline_run_02 = Experiment(ws, "Data_Transfer_example_02").submit(pipeline_02) pipeline_run_02.wait_for_completion() ``` ### View Run Details ``` from azureml.widgets import RunDetails RunDetails(pipeline_run_01).show() from azureml.widgets import RunDetails RunDetails(pipeline_run_02).show() ``` # Next: Databricks as a Compute Target To use Databricks as a compute target from Azure Machine Learning Pipeline, a DatabricksStep is used. This [notebook](https://aka.ms/pl-databricks) demonstrates the use of a DatabricksStep in an Azure Machine Learning Pipeline.
github_jupyter
import os import azureml.core from azureml.core.compute import ComputeTarget, DataFactoryCompute from azureml.exceptions import ComputeTargetException from azureml.core import Workspace, Experiment from azureml.pipeline.core import Pipeline from azureml.core.datastore import Datastore from azureml.data.data_reference import DataReference from azureml.pipeline.steps import DataTransferStep # Check core SDK version number print("SDK version:", azureml.core.VERSION) ws = Workspace.from_config() print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n') from msrest.exceptions import HttpOperationError blob_datastore_name='MyBlobDatastore' account_name=os.getenv("BLOB_ACCOUNTNAME_62", "<my-account-name>") # Storage account name container_name=os.getenv("BLOB_CONTAINER_62", "<my-container-name>") # Name of Azure blob container account_key=os.getenv("BLOB_ACCOUNT_KEY_62", "<my-account-key>") # Storage account key try: blob_datastore = Datastore.get(ws, blob_datastore_name) print("Found Blob Datastore with name: %s" % blob_datastore_name) except HttpOperationError: blob_datastore = Datastore.register_azure_blob_container( workspace=ws, datastore_name=blob_datastore_name, account_name=account_name, # Storage account name container_name=container_name, # Name of Azure blob container account_key=account_key) # Storage account key print("Registered blob datastore with name: %s" % blob_datastore_name) blob_data_ref = DataReference( datastore=blob_datastore, data_reference_name="blob_test_data", path_on_datastore="testdata") datastore_name='MyAdlsDatastore' subscription_id=os.getenv("ADL_SUBSCRIPTION_62", "<my-subscription-id>") # subscription id of ADLS account resource_group=os.getenv("ADL_RESOURCE_GROUP_62", "<my-resource-group>") # resource group of ADLS account store_name=os.getenv("ADL_STORENAME_62", "<my-datastore-name>") # ADLS account name tenant_id=os.getenv("ADL_TENANT_62", "<my-tenant-id>") # tenant id of service principal client_id=os.getenv("ADL_CLIENTID_62", "<my-client-id>") # client id of service principal client_secret=os.getenv("ADL_CLIENT_SECRET_62", "<my-client-secret>") # the secret of service principal try: adls_datastore = Datastore.get(ws, datastore_name) print("Found datastore with name: %s" % datastore_name) except HttpOperationError: adls_datastore = Datastore.register_azure_data_lake( workspace=ws, datastore_name=datastore_name, subscription_id=subscription_id, # subscription id of ADLS account resource_group=resource_group, # resource group of ADLS account store_name=store_name, # ADLS account name tenant_id=tenant_id, # tenant id of service principal client_id=client_id, # client id of service principal client_secret=client_secret) # the secret of service principal print("Registered datastore with name: %s" % datastore_name) adls_data_ref = DataReference( datastore=adls_datastore, data_reference_name="adls_test_data", path_on_datastore="testdata") adlsgen2_datastore_name = 'myadlsgen2datastore' account_name=os.getenv("ADLSGEN2_ACCOUNTNAME_62", "<my-account-name>") # ADLS Gen2 account name tenant_id=os.getenv("ADLSGEN2_TENANT_62", "<my-tenant-id>") # tenant id of service principal client_id=os.getenv("ADLSGEN2_CLIENTID_62", "<my-client-id>") # client id of service principal client_secret=os.getenv("ADLSGEN2_CLIENT_SECRET_62", "<my-client-secret>") # the secret of service principal try: adlsgen2_datastore = Datastore.get(ws, adlsgen2_datastore_name) print("Found ADLS Gen2 datastore with name: %s" % adlsgen2_datastore_name) except: adlsgen2_datastore = Datastore.register_azure_data_lake_gen2( workspace=ws, datastore_name=adlsgen2_datastore_name, filesystem='test', # Name of ADLS Gen2 filesystem account_name=account_name, # ADLS Gen2 account name tenant_id=tenant_id, # tenant id of service principal client_id=client_id, # client id of service principal client_secret=client_secret) # the secret of service principal print("Registered datastore with name: %s" % adlsgen2_datastore_name) adlsgen2_data_ref = DataReference( datastore=adlsgen2_datastore, data_reference_name='adlsgen2_test_data', path_on_datastore='testdata') sql_datastore_name="MySqlDatastore" server_name=os.getenv("SQL_SERVERNAME_62", "<my-server-name>") # Name of SQL server database_name=os.getenv("SQL_DATBASENAME_62", "<my-database-name>") # Name of SQL database client_id=os.getenv("SQL_CLIENTNAME_62", "<my-client-id>") # client id of service principal with permissions to access database client_secret=os.getenv("SQL_CLIENTSECRET_62", "<my-client-secret>") # the secret of service principal tenant_id=os.getenv("SQL_TENANTID_62", "<my-tenant-id>") # tenant id of service principal try: sql_datastore = Datastore.get(ws, sql_datastore_name) print("Found sql database datastore with name: %s" % sql_datastore_name) except HttpOperationError: sql_datastore = Datastore.register_azure_sql_database( workspace=ws, datastore_name=sql_datastore_name, server_name=server_name, database_name=database_name, client_id=client_id, client_secret=client_secret, tenant_id=tenant_id) print("Registered sql databse datastore with name: %s" % sql_datastore_name) from azureml.data.sql_data_reference import SqlDataReference sql_query_data_ref = SqlDataReference( datastore=sql_datastore, data_reference_name="sql_query_data_ref", sql_query="select top 1 * from TestData") psql_datastore_name="MyPostgreSqlDatastore" server_name=os.getenv("PSQL_SERVERNAME_62", "<my-server-name>") # Name of PostgreSQL server database_name=os.getenv("PSQL_DATBASENAME_62", "<my-database-name>") # Name of PostgreSQL database user_id=os.getenv("PSQL_USERID_62", "<my-user-id>") # user id user_password=os.getenv("PSQL_USERPW_62", "<my-user-password>") # user password try: psql_datastore = Datastore.get(ws, psql_datastore_name) print("Found PostgreSQL database datastore with name: %s" % psql_datastore_name) except HttpOperationError: psql_datastore = Datastore.register_azure_postgre_sql( workspace=ws, datastore_name=psql_datastore_name, server_name=server_name, database_name=database_name, user_id=user_id, user_password=user_password) print("Registered PostgreSQL databse datastore with name: %s" % psql_datastore_name) from azureml.data.sql_data_reference import SqlDataReference psql_query_data_ref = SqlDataReference( datastore=psql_datastore, data_reference_name="psql_query_data_ref", sql_query="SELECT * FROM testtable") mysql_datastore_name="MySqlDatastore" server_name=os.getenv("MYSQL_SERVERNAME_62", "<my-server-name>") # Name of MySQL server database_name=os.getenv("MYSQL_DATBASENAME_62", "<my-database-name>") # Name of MySQL database user_id=os.getenv("MYSQL_USERID_62", "<my-user-id>") # user id user_password=os.getenv("MYSQL_USERPW_62", "<my-user-password>") # user password try: mysql_datastore = Datastore.get(ws, mysql_datastore_name) print("Found MySQL database datastore with name: %s" % mysql_datastore_name) except HttpOperationError: mysql_datastore = Datastore.register_azure_my_sql( workspace=ws, datastore_name=mysql_datastore_name, server_name=server_name, database_name=database_name, user_id=user_id, user_password=user_password) print("Registered MySQL databse datastore with name: %s" % mysql_datastore_name) from azureml.data.sql_data_reference import SqlDataReference mysql_query_data_ref = SqlDataReference( datastore=mysql_datastore, data_reference_name="mysql_query_data_ref", sql_query="SELECT * FROM testtable") mysql_table_data_ref = SqlDataReference( datastore=mysql_datastore, data_reference_name="mysql_table_data_ref", sql_table="testtable") data_factory_name = 'adftest' def get_or_create_data_factory(workspace, factory_name): try: return DataFactoryCompute(workspace, factory_name) except ComputeTargetException as e: if 'ComputeTargetNotFound' in e.message: print('Data factory not found, creating...') provisioning_config = DataFactoryCompute.provisioning_configuration() data_factory = ComputeTarget.create(workspace, factory_name, provisioning_config) data_factory.wait_for_completion() return data_factory else: raise e data_factory_compute = get_or_create_data_factory(ws, data_factory_name) print("Setup Azure Data Factory account complete") transfer_adls_to_blob = DataTransferStep( name="transfer_adls_to_blob", source_data_reference=adls_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute) print("Data transfer step created") transfer_adlsgen2_to_blob = DataTransferStep( name='transfer_adlsgen2_to_blob', source_data_reference=adlsgen2_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute) transfer_sql_to_blob = DataTransferStep( name="transfer_sql_to_blob", source_data_reference=sql_query_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute, destination_reference_type='file') transfer_psql_to_blob = DataTransferStep( name="transfer_psql_to_blob", source_data_reference=psql_query_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute, destination_reference_type='file') transfer_mysql_to_blob = DataTransferStep( name="transfer_mysql_to_blob", source_data_reference=mysql_query_data_ref, destination_data_reference=blob_data_ref, compute_target=data_factory_compute) print("Data transfer step created for Sql server, PostgreSQL and MySQL") pipeline_01 = Pipeline( description="data_transfer_01", workspace=ws, steps=[transfer_adls_to_blob]) pipeline_run_01 = Experiment(ws, "Data_Transfer_example_01").submit(pipeline_01) pipeline_run_01.wait_for_completion() pipeline_02 = Pipeline( description="data_transfer_02", workspace=ws, steps=[transfer_sql_to_blob,transfer_psql_to_blob, transfer_adlsgen2_to_blob]) pipeline_run_02 = Experiment(ws, "Data_Transfer_example_02").submit(pipeline_02) pipeline_run_02.wait_for_completion() from azureml.widgets import RunDetails RunDetails(pipeline_run_01).show() from azureml.widgets import RunDetails RunDetails(pipeline_run_02).show()
0.217836
0.897201
# The Rational Speech Act framework (This tutorial written by Noah Goodman and Eli Bingham) Human language depends on the assumption of *cooperativity*, that speakers attempt to provide relevant information to the listener; listeners can use this assumption to reason *pragmatically* about the likely state of the world given the utterance chosen by the speaker. The Rational Speech Act framework formalizes these ideas using probabiistic decision making and reasoning. Note: This notebook must be run against Pyro 4392d54a220c328ee356600fb69f82166330d3d6 or later. ``` #first some imports import torch torch.set_default_dtype(torch.float64) # double precision for numerical stability import collections import argparse import matplotlib.pyplot as plt import pyro import pyro.distributions as dist import pyro.poutine as poutine from search_inference import factor, HashingMarginal, memoize, Search ``` Before we can defined RSA, we specify a helper function that wraps up inference. `Marginal` takes an un-normalized stochastic function, constructs the distribution over execution traces by using `Search`, and constructs the marginal distribution on return values (via `HashingMarginal`). ``` def Marginal(fn): return memoize(lambda *args: HashingMarginal(Search(fn).run(*args))) ``` The RSA model captures recursive social reasoning -- a listener thinks about a speaker who thinks about a listener.... To start, the `literal_listener` simply imposes that the utterance is true. Mathematically: $$P_\text{Lit}(s|u) \propto {\mathcal L}(u,s)P(s)$$ In code: ``` @Marginal def literal_listener(utterance): state = state_prior() factor("literal_meaning", 0. if meaning(utterance, state) else -999999.) return state ``` Next the cooperative speaker chooses an utterance to convey a given state to the literal listener. Mathematically: $$P_S(u|s) \propto [P_\text{Lit}(s|u) P(u)]^\alpha$$ In the code below, the `utterance_prior` captures the cost of producing an utterance, while the `pyro.sample` expression captures that the litteral listener guesses the right state (`obs=state` indicates that the sampled value is observed to be the correct `state`). We use `poutine.scale` to raise the entire execution probability to the power of `alpha` -- this yields a softmax decision rule with optimality parameter `alpha`. ``` @Marginal def speaker(state): alpha = 1. with poutine.scale(scale=torch.tensor(alpha)): utterance = utterance_prior() pyro.sample("listener", literal_listener(utterance), obs=state) return utterance ``` Finally, we can define the pragmatic_listener, who infers which state is likely, given that the speaker chose a given utterance. Mathematically: $$P_L(s|u) \propto P_S(u|s) P(s)$$ In code: ``` @Marginal def pragmatic_listener(utterance): state = state_prior() pyro.sample("speaker", speaker(state), obs=utterance) return state ``` Now let's set up a simple world by filling in the priors. We imagine there are 4 objects each either blue or red, and the possible utterances are "none are blue", "some are blue", "all are blue". We take the prior probabilities for the number of blue objects and the utterance to be uniform. ``` total_number = 4 def state_prior(): n = pyro.sample("state", dist.Categorical(probs=torch.ones(total_number+1) / total_number+1)) return n def utterance_prior(): ix = pyro.sample("utt", dist.Categorical(probs=torch.ones(3) / 3)) return ["none","some","all"][ix] ``` Finally, the meaning function (notated $\mathcal L$ above): ``` meanings = { "none": lambda N: N==0, "some": lambda N: N>0, "all": lambda N: N==total_number, } def meaning(utterance, state): return meanings[utterance](state) ``` Now let's see if it works: how does the pragmatic listener interpret the "some" utterance? ``` #silly plotting helper: def plot_dist(d): support = d.enumerate_support() data = [d.log_prob(s).exp().item() for s in d.enumerate_support()] names = support ax = plt.subplot(111) width=0.3 bins = map(lambda x: x-width/2,range(1,len(data)+1)) ax.bar(bins,data,width=width) ax.set_xticks(map(lambda x: x, range(1,len(data)+1))) ax.set_xticklabels(names,rotation=45, rotation_mode="anchor", ha="right") interp_dist = pragmatic_listener("some") plot_dist(interp_dist) ``` Yay, we get a *scalar implicature*: "some" is interpretted as likely not including all 4. Try looking at the `literal_listener` too -- no implicature.
github_jupyter
#first some imports import torch torch.set_default_dtype(torch.float64) # double precision for numerical stability import collections import argparse import matplotlib.pyplot as plt import pyro import pyro.distributions as dist import pyro.poutine as poutine from search_inference import factor, HashingMarginal, memoize, Search def Marginal(fn): return memoize(lambda *args: HashingMarginal(Search(fn).run(*args))) @Marginal def literal_listener(utterance): state = state_prior() factor("literal_meaning", 0. if meaning(utterance, state) else -999999.) return state @Marginal def speaker(state): alpha = 1. with poutine.scale(scale=torch.tensor(alpha)): utterance = utterance_prior() pyro.sample("listener", literal_listener(utterance), obs=state) return utterance @Marginal def pragmatic_listener(utterance): state = state_prior() pyro.sample("speaker", speaker(state), obs=utterance) return state total_number = 4 def state_prior(): n = pyro.sample("state", dist.Categorical(probs=torch.ones(total_number+1) / total_number+1)) return n def utterance_prior(): ix = pyro.sample("utt", dist.Categorical(probs=torch.ones(3) / 3)) return ["none","some","all"][ix] meanings = { "none": lambda N: N==0, "some": lambda N: N>0, "all": lambda N: N==total_number, } def meaning(utterance, state): return meanings[utterance](state) #silly plotting helper: def plot_dist(d): support = d.enumerate_support() data = [d.log_prob(s).exp().item() for s in d.enumerate_support()] names = support ax = plt.subplot(111) width=0.3 bins = map(lambda x: x-width/2,range(1,len(data)+1)) ax.bar(bins,data,width=width) ax.set_xticks(map(lambda x: x, range(1,len(data)+1))) ax.set_xticklabels(names,rotation=45, rotation_mode="anchor", ha="right") interp_dist = pragmatic_listener("some") plot_dist(interp_dist)
0.46223
0.992184
# Get Started Here are some sample queries. See what BQX can do. ## Initialization ``` from bqx.query import Query as Q from bqx.parts import Table as T, Column as C from bqx.func import SUM ``` # 1. Simple examples ## 1.1 Make simple query. ``` q = Q().SELECT('name').FROM('sample_table') print(q.getq()) ``` ## 1.2 Get rid of quotes using Aliases. ``` sample_table = T('sample_table') name = C('name') q = Q().SELECT(name).FROM(sample_table) print(q.getq()) ``` ## 1.3 You'll want WHERE clause. Column alias has overridden operators. It provides syntax highlighting feature on conditions. ``` sample_table = T('sample_table') name = C('name') q = Q().SELECT(name).FROM(sample_table).WHERE(name == 'Hatsune Miku') print(q.getq()) ``` ## 1.4 SUM of column? Of course! ``` sample_table = T('sample_table') name = C('name') score = C('score') score_sum = SUM(score) q = Q().SELECT(name, score_sum).FROM(sample_table).WHERE(name == 'Hatsune Miku').GROUP_BY(score) print(q.getq()) ``` # 2. BQX's special features ## 2.1 Keep it partial. Use it later. Put your query in in-complete state (we call it 'partial query'). Generate variety of queries with Python's power. ``` sample_tables = [T('table_foo'), T('table_bar'), T('table_baz')] name = C('name') q = Q().SELECT(name) # Query without FROM??? for table in sample_tables: print(q.FROM(table).getq()) # Now it's complete query print() ``` ## 2.2 Escape from bracket hell. I guess you have ever seen a nested query in nested query in nested query with bunch of AS clauses like: ``` sql SELECT average, name FROM ( SELECT pid, (a+b+c)/3 AS average, name FROM ( SELECT x.pid AS pid, x.a AS a, x.b AS b, x.c AS c, y.name AS name FROM [dataset.x] AS x INNER JOIN [dataset.y] as y ON x.pid = y.pid ) ) ORDER BY name ``` Here is a solution to this. **Sub query reference** feature and **Auto alias** feature is used. ``` # Call AS function manually to define AS clause. x = T('table_x').AS('x') y = T('table_y').AS('y') # You don't have to call AS func all time. # If you say auto_alias is True, AS clause will be auto-generated # next to columns like 'x.pid', 'x.a', 'x.b', 'x.c' declared below. q1 = ( Q(auto_alias=True) .SELECT(x.pid, x.a, x.b, x.c, y.name) .FROM(x) .INNER_JOIN(y) .ON(x.pid == y.pid)) pid, name, a, b, c = C('pid'), C('name'), C('a'), C('b'), C('c') average_calc = ((a + b + c) / 3).AS('average') q2 = ( Q() .SELECT(pid, average_calc, name) .FROM(q1)) average = C('average') q3 = ( Q() .SELECT(average, name) .FROM(q2) .ORDER_BY(name)) print(q3.getq()) ``` ## 2.3 I WANT MORE, MORE SIMPLE QUERY!!! BQX have **SELECT chain** feature for simplification. Literally you *can* chain SELECT clauses and omit FROM clauses. Here is another example which provides identical query shown above, with shorter code. ``` x = T('table_x').AS('x') y = T('table_y').AS('y') pid, name, average, a, b, c = C('pid'), C('name'), C('average'), C('a'), C('b'), C('c') average_calc = ((a + b + c) / 3).AS('average') q = ( Q(auto_alias=True) .SELECT(x.pid, x.a, x.b, x.c, y.name) .FROM(x) .INNER_JOIN(y) .ON(x.pid == y.pid) .SELECT(pid, average_calc, name) .SELECT(average, name) .ORDER_BY(name)) print(q.getq()) ```
github_jupyter
from bqx.query import Query as Q from bqx.parts import Table as T, Column as C from bqx.func import SUM q = Q().SELECT('name').FROM('sample_table') print(q.getq()) sample_table = T('sample_table') name = C('name') q = Q().SELECT(name).FROM(sample_table) print(q.getq()) sample_table = T('sample_table') name = C('name') q = Q().SELECT(name).FROM(sample_table).WHERE(name == 'Hatsune Miku') print(q.getq()) sample_table = T('sample_table') name = C('name') score = C('score') score_sum = SUM(score) q = Q().SELECT(name, score_sum).FROM(sample_table).WHERE(name == 'Hatsune Miku').GROUP_BY(score) print(q.getq()) sample_tables = [T('table_foo'), T('table_bar'), T('table_baz')] name = C('name') q = Q().SELECT(name) # Query without FROM??? for table in sample_tables: print(q.FROM(table).getq()) # Now it's complete query print() Here is a solution to this. **Sub query reference** feature and **Auto alias** feature is used. ## 2.3 I WANT MORE, MORE SIMPLE QUERY!!! BQX have **SELECT chain** feature for simplification. Literally you *can* chain SELECT clauses and omit FROM clauses. Here is another example which provides identical query shown above, with shorter code.
0.375477
0.887644
# 07.03 - NEURAL NETWORKS ``` !wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1/main/content/init.py import init; init.init(force_download=False); init.get_weblink() import numpy as np import matplotlib.pyplot as plt from local.lib import mlutils from IPython.display import Image %matplotlib inline ``` ## Reinterpreting logistic regression observe how we can represent the same logistic regression expression we saw before. ``` ## KEEPOUTPUT Image("local/imgs/logreg.png", width=600) ``` This also goes by the weird name of **[The Perceptron](https://en.wikipedia.org/wiki/Perceptron)** Recall that **matrix and vector** multiplication and `numpy`**broadcasting** are very convenient to make this operation for a single element or for the full dataset. $$\hat{y}^{(i)} = \text{sigm}(\mathbf{x}^{(i)}\cdot \mathbf{w} + b)$$ $$\hat{\mathbf{y}} = \text{sigm}(\mathbf{X} \cdot \mathbf{w} + b)$$ with $$\text{sigm}(z)=\frac{1}{1+e^{-z}}$$ See and understand this with the following random dataset ($\mathbf{X}$) and parameters ($\theta$). $m$ is the number of data items we have (rows), and $n$ is the number of attributes per data point (columns). This can be seen as a **EXTREME SIMPLIFICATION** of a [biological neuron](https://en.wikipedia.org/wiki/Neuron). ``` ## KEEPOUTPUT m,n = 1000,5 X = np.random.normal(size=(m,n)) t = np.random.normal(size=n) b = np.random.normal() sigmoid = lambda z: 1/(1+np.exp(-z)) print ("X\n",X) print ("\n\nt\n", t) print ("\n\nb\n", b) X.shape, t.shape ``` logistic regression prediction in one line of code for the full dataset ``` ## KEEPOUTPUT y_hat = sigmoid(X.dot(t)+b) print (y_hat[:10]) y_hat.shape ``` ## Neural networks $\rightarrow$ Assembling logistic regression units we can have several perceptrons together from the same input ``` ## KEEPOUTPUT Image("local/imgs/mlp.png", width=600) ``` what are the sizes of the symbols above? **HINT**: Matrix multiplication has to match up. - $\mathbf{x}^{(i)} \;\;\;\in \mathbb{R}^n$ - $\mathbf{W}^0 \;\;\;\in \mathbb{R}^{n\times 3}\rightarrow\;\;$each column contains the weights of one logistic regression unit. - $\mathbf{b}^0 \;\;\;\;\in \mathbb{R}^3\;\;\;\rightarrow\;\;$one per logistic regression unit. - $\mathbf{z}^{(i)[0]} \in \mathbb{R}^3\;\;\;\rightarrow\;\;$one output per logistic regression unit. - $\mathbf{W}^1 \;\;\in \mathbb{R}^{3}\;\;\;\rightarrow\;\;$this is a regular logistic regression unit, but its input comes from the previous layer. - $\mathbf{b}^1 \; \;\;\in \mathbb{R}\;\;\;\;\;\rightarrow\;\;$like a regular logistic regression unit. - $\hat{y}^{(i)}\; \;\in \mathbb{R}\;\;\;\;\;\rightarrow\;\;$the network output we can have different activations functions - $\mathbf{z}^{(i)[0]} = \text{tanh}(\mathbf{x}^{(i)}\cdot \mathbf{W}^0+\mathbf{b}^0)$ - $\mathbf{z}^{(i)[1]} = \hat{\mathbf{y}}^{(i)} = \text{sigm}(\mathbf{z}^{(i)[0]}\cdot \mathbf{W}^1+\mathbf{b}^1)$ ``` sigmoid = lambda z: 1/(1+np.exp(-z)) relu = lambda z: z*(z>0) ## KEEPOUTPUT xr = np.linspace(-10,10,100) plt.figure(figsize=(15,3)) plt.subplot(131) plt.axhline(0, color="black") plt.axvline(0, color="black") plt.plot(xr, sigmoid(xr) ,lw=2) plt.grid(); plt.title("sigmoid function") plt.subplot(132) plt.axhline(0, color="black") plt.axvline(0, color="black") plt.plot(xr, np.tanh(xr) ,lw=2) plt.grid(); plt.title("tanh function") plt.subplot(133) plt.axhline(0, color="black") plt.axvline(0, color="black") plt.plot(xr, relu(xr) ,lw=2) plt.grid(); plt.title("relu function") ``` in general - $\text{sigm}$ is good for output units (can be interpreted as probability) - $\text{tanh}$ is good for hidden layers in small networks (neg and pos contributions) - $\text{relu}$ is good for hidden layers is large (deep learning) networks (easier to train) **Observe** the capacity of neural networks to create classification frontiers. The following set of weights were obtained **AFTER** training a neural network with the [sklearn moons](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html) dataset: - input data has two columns - the hidden layer has four neurons (logistic units) Try to understant **the shapes** as we are computing ALL predictions for a dataset **SIMULTANEOUSLY**, using `numpy` vectorized operations. ``` ## KEEPOUTPUT b0,b1,W0,W1 = (np.array([-12.89987776, 10.35173209, 11.65978321, -7.55016811]), -17.36405931876728, np.array([[19.04548787, -8.65065699, 14.28282749, -9.44291219], [15.44773976, 5.09753522, -3.12074945, 10.5002505 ]]), np.array([-42.17763359,-34.87459471, 7.21432064,-36.52606503])) print ("W0:\n",W0) print ("\nb0:\n",b0) print ("\nW1:\n",W1) print ("\nb1:\n",b1) W0.shape, b0.shape, W1.shape, type(b1) ## KEEPOUTPUT from sklearn.datasets import make_moons X,y = make_moons(300, noise=.15) plt.scatter(X[:,0][y==0], X[:,1][y==0], color="blue", label="class 0", alpha=.5) plt.scatter(X[:,0][y==1], X[:,1][y==1], color="red", label="class 1", alpha=.5) ``` This is the **NEURAL NETWORK** prediction function. Observe the output is a sigmoid function and we convert it into a [0,1] classification prediction by simply threholding it at 0.5. See the sigmoid function graph above to understand why. ``` predict = lambda X: (sigmoid(np.tanh(X.dot(W0)+b0).dot(W1)+b1)>.5).astype(int) from local.lib import mlutils ## KEEPOUTPUT mlutils.plot_2Ddata_with_boundary(predict, X, y) ```
github_jupyter
!wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1/main/content/init.py import init; init.init(force_download=False); init.get_weblink() import numpy as np import matplotlib.pyplot as plt from local.lib import mlutils from IPython.display import Image %matplotlib inline ## KEEPOUTPUT Image("local/imgs/logreg.png", width=600) ## KEEPOUTPUT m,n = 1000,5 X = np.random.normal(size=(m,n)) t = np.random.normal(size=n) b = np.random.normal() sigmoid = lambda z: 1/(1+np.exp(-z)) print ("X\n",X) print ("\n\nt\n", t) print ("\n\nb\n", b) X.shape, t.shape ## KEEPOUTPUT y_hat = sigmoid(X.dot(t)+b) print (y_hat[:10]) y_hat.shape ## KEEPOUTPUT Image("local/imgs/mlp.png", width=600) sigmoid = lambda z: 1/(1+np.exp(-z)) relu = lambda z: z*(z>0) ## KEEPOUTPUT xr = np.linspace(-10,10,100) plt.figure(figsize=(15,3)) plt.subplot(131) plt.axhline(0, color="black") plt.axvline(0, color="black") plt.plot(xr, sigmoid(xr) ,lw=2) plt.grid(); plt.title("sigmoid function") plt.subplot(132) plt.axhline(0, color="black") plt.axvline(0, color="black") plt.plot(xr, np.tanh(xr) ,lw=2) plt.grid(); plt.title("tanh function") plt.subplot(133) plt.axhline(0, color="black") plt.axvline(0, color="black") plt.plot(xr, relu(xr) ,lw=2) plt.grid(); plt.title("relu function") ## KEEPOUTPUT b0,b1,W0,W1 = (np.array([-12.89987776, 10.35173209, 11.65978321, -7.55016811]), -17.36405931876728, np.array([[19.04548787, -8.65065699, 14.28282749, -9.44291219], [15.44773976, 5.09753522, -3.12074945, 10.5002505 ]]), np.array([-42.17763359,-34.87459471, 7.21432064,-36.52606503])) print ("W0:\n",W0) print ("\nb0:\n",b0) print ("\nW1:\n",W1) print ("\nb1:\n",b1) W0.shape, b0.shape, W1.shape, type(b1) ## KEEPOUTPUT from sklearn.datasets import make_moons X,y = make_moons(300, noise=.15) plt.scatter(X[:,0][y==0], X[:,1][y==0], color="blue", label="class 0", alpha=.5) plt.scatter(X[:,0][y==1], X[:,1][y==1], color="red", label="class 1", alpha=.5) predict = lambda X: (sigmoid(np.tanh(X.dot(W0)+b0).dot(W1)+b1)>.5).astype(int) from local.lib import mlutils ## KEEPOUTPUT mlutils.plot_2Ddata_with_boundary(predict, X, y)
0.367384
0.92976
# Style Transfer on ONNX Models with OpenVINO ![Neural Style Transfer network output](https://user-images.githubusercontent.com/77325899/147354137-4fc9e79e-0195-4927-9608-0e3f17973d75.png) This notebook demonstrates [Fast Neural Style Transfer](https://github.com/onnx/models/tree/master/vision/style_transfer/fast_neural_style) on ONNX models with OpenVINO. Style Transfer models mix the content of an image with the style of another image. For this notebook, we use five pretrained models, for the following styles: Mosaic, Rain Princess, Candy, Udnie and Pointilism. The models are from the [ONNX Model Repository](https://github.com/onnx/models) and are based on the research paper [Perceptual Losses for Real-Time Style Transfer and Super-Resolution](https://arxiv.org/abs/1603.08155) by Justin Johnson, Alexandre Alahi and Li Fei-Fei. ## Preparation ### Imports ``` import sys from enum import Enum from pathlib import Path import cv2 import matplotlib.pyplot as plt import numpy as np from IPython.display import HTML, FileLink, clear_output, display from openvino.runtime import Core, PartialShape from yaspin import yaspin sys.path.append("../utils") from notebook_utils import download_file ``` ### Download Models The `Style` Enum lists the supported styles with url, title and model path properties. Models for all supported styles will be downloaded to `MODEL_DIR` if they have not been downloaded before. ``` BASE_URL = "https://github.com/onnx/models/raw/main/vision/style_transfer/fast_neural_style/model" MODEL_DIR = "model" class Style(Enum): MOSAIC = "mosaic" RAIN_PRINCESS = "rain-princess" CANDY = "candy" UDNIE = "udnie" POINTILISM = "pointilism" def __init__(self, *args): self.model_path = Path(f"{self.value}-9.onnx") self.title = self.value.replace("-", " ").title() self.url = f"{BASE_URL}/{self.model_path}" for style in Style: if not Path(f"{MODEL_DIR}/{style.model_path}").exists(): download_file(style.url, directory=MODEL_DIR) ``` ### Load Image Load an image with OpenCV and convert it to RGB. The style transfer model will be resized to the image shape. This gives the most detailed results, but for larger images, inference will take longer and use more memory. The `resize_to_max` function optionally resizes the image to a maximum size. ``` IMAGE_FILE = "data/coco_square.jpg" image = cv2.cvtColor(cv2.imread(IMAGE_FILE), cv2.COLOR_BGR2RGB) def resize_to_max(image: np.ndarray, max_side: int) -> np.ndarray: """ Resize image to an image where the largest side has a maximum length of max_side while keeping aspect ratio. Example: if an original image has width and height of (1000, 500) and max_side is 300, the resized image will have a width and height of (300, 150). :param image: Array of image to resize :param max_side: Maximum length of largest image side :return: Resized image """ if max(image.shape) <= max_side: new_image = image else: index = np.argmax(image.shape) factor = max_side / image.shape[index] height, width = image.shape[:2] new_height, new_width = int(factor * height), int(factor * width) new_image = cv2.resize(image, (new_width, new_height)) return new_image # Uncomment the line below to resize large images to a max side length to improve inference speed. # image = resize_to_max(image=image, max_side=1024) ``` ## Do Inference and Show Results For all five models: do inference, convert the result to an 8-bit image, show the results, and save the results to disk. ``` # Set SAVE_RESULTS to False to disable saving the result images. SAVE_RESULTS = True # find reasonable dimensions for matplotlib plot wh_ratio = image.shape[1] / image.shape[0] figwidth = 15 figheight = (figwidth * 0.75) // wh_ratio # Create matplotlib plot and show source image fig, ax = plt.subplots(2, 3, figsize=(figwidth, figheight)) axs = ax.ravel() axs[0].imshow(image) axs[0].set_title("Source Image") axs[0].axis("off") # Create Core instance, prepare output folder ie = Core() output_folder = Path("output") output_folder.mkdir(exist_ok=True) # Transpose input image to network dimensions and extract image name and shape input_image = np.expand_dims(image.transpose(2, 0, 1), axis=0) image_name = Path(IMAGE_FILE).stem image_shape_str = f"{image.shape[1]}x{image.shape[0]}" file_links = [] for i, style in enumerate(Style): # Load model and get model info model = ie.read_model(model=Path(MODEL_DIR) / style.model_path) input_key = list(model.inputs)[0] # Reshape network to image shape and load network to device model.reshape({input_key: PartialShape([1, 3, image.shape[0], image.shape[1]])}) compiled_model = ie.compile_model(model=model, device_name="CPU") output_key = list(compiled_model.outputs)[0] # Do inference with yaspin(text=f"Doing inference on {style.title} model") as sp: request = compiled_model.create_infer_request() request.infer(inputs={input_key.any_name: input_image}) result = request.get_output_tensor(output_key.index).data result = compiled_model([input_image])[output_key] sp.ok("✔") # Convert inference result to image shape and apply postprocessing # Postprocessing is described in the model documentation: # https://github.com/onnx/models/tree/master/vision/style_transfer/fast_neural_style result = result.squeeze().transpose(1, 2, 0) result = np.clip(result, 0, 255).astype(np.uint8) # Show the result axs[i + 1].imshow(result) axs[i + 1].set_title(style.title) axs[i + 1].axis("off") # Optionally save results to disk if SAVE_RESULTS: image_path = f"{image_name}_{style.model_path.stem}_{image_shape_str}.png" output_path = output_folder / image_path cv2.imwrite(str(output_path), cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) file_link = FileLink(output_path, result_html_prefix=f"{style.title} image: ") file_link.html_link_str = "<a href='%s' download>%s</a>" file_links.append(file_link) del model del compiled_model clear_output(wait=True) fig.tight_layout() plt.show() if SAVE_RESULTS: output_path = output_folder / f"{image_name}_{image_shape_str}_style_transfer.jpg" fig.savefig(str(output_path), dpi=300, bbox_inches="tight", pad_inches=0.1) file_link = FileLink(output_path, result_html_prefix="Overview image: ") file_link.html_link_str = "<a href='%s' download>%s</a>" file_links.append(file_link) display(HTML("Saved image files:")) for file_link in file_links: display(HTML(file_link._repr_html_())) ```
github_jupyter
import sys from enum import Enum from pathlib import Path import cv2 import matplotlib.pyplot as plt import numpy as np from IPython.display import HTML, FileLink, clear_output, display from openvino.runtime import Core, PartialShape from yaspin import yaspin sys.path.append("../utils") from notebook_utils import download_file BASE_URL = "https://github.com/onnx/models/raw/main/vision/style_transfer/fast_neural_style/model" MODEL_DIR = "model" class Style(Enum): MOSAIC = "mosaic" RAIN_PRINCESS = "rain-princess" CANDY = "candy" UDNIE = "udnie" POINTILISM = "pointilism" def __init__(self, *args): self.model_path = Path(f"{self.value}-9.onnx") self.title = self.value.replace("-", " ").title() self.url = f"{BASE_URL}/{self.model_path}" for style in Style: if not Path(f"{MODEL_DIR}/{style.model_path}").exists(): download_file(style.url, directory=MODEL_DIR) IMAGE_FILE = "data/coco_square.jpg" image = cv2.cvtColor(cv2.imread(IMAGE_FILE), cv2.COLOR_BGR2RGB) def resize_to_max(image: np.ndarray, max_side: int) -> np.ndarray: """ Resize image to an image where the largest side has a maximum length of max_side while keeping aspect ratio. Example: if an original image has width and height of (1000, 500) and max_side is 300, the resized image will have a width and height of (300, 150). :param image: Array of image to resize :param max_side: Maximum length of largest image side :return: Resized image """ if max(image.shape) <= max_side: new_image = image else: index = np.argmax(image.shape) factor = max_side / image.shape[index] height, width = image.shape[:2] new_height, new_width = int(factor * height), int(factor * width) new_image = cv2.resize(image, (new_width, new_height)) return new_image # Uncomment the line below to resize large images to a max side length to improve inference speed. # image = resize_to_max(image=image, max_side=1024) # Set SAVE_RESULTS to False to disable saving the result images. SAVE_RESULTS = True # find reasonable dimensions for matplotlib plot wh_ratio = image.shape[1] / image.shape[0] figwidth = 15 figheight = (figwidth * 0.75) // wh_ratio # Create matplotlib plot and show source image fig, ax = plt.subplots(2, 3, figsize=(figwidth, figheight)) axs = ax.ravel() axs[0].imshow(image) axs[0].set_title("Source Image") axs[0].axis("off") # Create Core instance, prepare output folder ie = Core() output_folder = Path("output") output_folder.mkdir(exist_ok=True) # Transpose input image to network dimensions and extract image name and shape input_image = np.expand_dims(image.transpose(2, 0, 1), axis=0) image_name = Path(IMAGE_FILE).stem image_shape_str = f"{image.shape[1]}x{image.shape[0]}" file_links = [] for i, style in enumerate(Style): # Load model and get model info model = ie.read_model(model=Path(MODEL_DIR) / style.model_path) input_key = list(model.inputs)[0] # Reshape network to image shape and load network to device model.reshape({input_key: PartialShape([1, 3, image.shape[0], image.shape[1]])}) compiled_model = ie.compile_model(model=model, device_name="CPU") output_key = list(compiled_model.outputs)[0] # Do inference with yaspin(text=f"Doing inference on {style.title} model") as sp: request = compiled_model.create_infer_request() request.infer(inputs={input_key.any_name: input_image}) result = request.get_output_tensor(output_key.index).data result = compiled_model([input_image])[output_key] sp.ok("✔") # Convert inference result to image shape and apply postprocessing # Postprocessing is described in the model documentation: # https://github.com/onnx/models/tree/master/vision/style_transfer/fast_neural_style result = result.squeeze().transpose(1, 2, 0) result = np.clip(result, 0, 255).astype(np.uint8) # Show the result axs[i + 1].imshow(result) axs[i + 1].set_title(style.title) axs[i + 1].axis("off") # Optionally save results to disk if SAVE_RESULTS: image_path = f"{image_name}_{style.model_path.stem}_{image_shape_str}.png" output_path = output_folder / image_path cv2.imwrite(str(output_path), cv2.cvtColor(result, cv2.COLOR_BGR2RGB)) file_link = FileLink(output_path, result_html_prefix=f"{style.title} image: ") file_link.html_link_str = "<a href='%s' download>%s</a>" file_links.append(file_link) del model del compiled_model clear_output(wait=True) fig.tight_layout() plt.show() if SAVE_RESULTS: output_path = output_folder / f"{image_name}_{image_shape_str}_style_transfer.jpg" fig.savefig(str(output_path), dpi=300, bbox_inches="tight", pad_inches=0.1) file_link = FileLink(output_path, result_html_prefix="Overview image: ") file_link.html_link_str = "<a href='%s' download>%s</a>" file_links.append(file_link) display(HTML("Saved image files:")) for file_link in file_links: display(HTML(file_link._repr_html_()))
0.610453
0.951051
``` !wget https://download.pytorch.org/tutorial/hymenoptera_data.zip -P data/ !unzip -d data data/hymenoptera_data.zip import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models from torchvision import transforms as T import numpy as np import matplotlib.pyplot as plt import time import os import copy plt.ion() data_transforms = { 'train': T.Compose([ T.RandomResizedCrop(224), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] ) ]), 'val': T.Compose([ T.Resize(256), T.CenterCrop(224), T.ToTensor(), T.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] ) ]) } data_dir = './data/hymenoptera_data' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def imshow(inp, title=None): inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.01) inputs, classes = next(iter(dataloaders['train'])) out = torchvision.utils.make_grid(inputs) imshow(out, title=[class_names[x] for x in classes]) def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_weights = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) for phase in ['train', 'val']: if phase == 'train': model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for inputs, labels in dataloaders[phase]: inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) if phase == 'train': loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) if phase == 'train': scheduler.step() epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_size[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_weights = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) model.load_state_dict(best_model_weights) return model def visualize_model(model, num_images=6): was_training = model.training model.eval() images_so_far = 0 fig = plt.figure() with torch.no_grad(): for i, (inputs, labels) in enumerate(dataloaders['val']): inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) for j in range(inputs.size()[0]): images_so_far += 1 ax = plt.subplot(num_images//2, 2, images_so_far) ax.axis('off') ax.set_title('predicted: {}'.format(class_names[preds[j]])) imshow(inputs.cpu().data[j]) if images_so_far == num_images: model.train(mode=was_training) return model.train(mode=was_training) model_ft = models.resnet18(pretrained=True) num_features = model_ft.fc.in_features model_ft.fc = nn.Linear(num_features, 2) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25) visualize_model(model_ft) model_conv = models.resnet18(pretrained=True) for param in model_conv.parameters(): param.requires_grad = False num_features = model_conv.fc.in_features model_conv.fc = nn.Linear(num_features, 2) model_conv = model_conv.to(device) criterion = nn.CrossEntropyLoss() optim_conv = optim.SGD(model_conv.parameters(), lr=0.001, momentum=0.9) exp_lr_scheduler = lr_scheduler.StepLR(optim_conv, step_size=7, gamma=0.1) model_conv = train_model(model_conv, criterion, optim_conv, exp_lr_scheduler, num_epochs=25) visualize_model(model_conv) ```
github_jupyter
!wget https://download.pytorch.org/tutorial/hymenoptera_data.zip -P data/ !unzip -d data data/hymenoptera_data.zip import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import torchvision from torchvision import datasets, models from torchvision import transforms as T import numpy as np import matplotlib.pyplot as plt import time import os import copy plt.ion() data_transforms = { 'train': T.Compose([ T.RandomResizedCrop(224), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] ) ]), 'val': T.Compose([ T.Resize(256), T.CenterCrop(224), T.ToTensor(), T.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] ) ]) } data_dir = './data/hymenoptera_data' image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4, shuffle=True, num_workers=4) for x in ['train', 'val']} dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']} class_names = image_datasets['train'].classes device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") def imshow(inp, title=None): inp = inp.numpy().transpose((1, 2, 0)) mean = np.array([0.485, 0.456, 0.406]) std = np.array([0.229, 0.224, 0.225]) inp = std * inp + mean inp = np.clip(inp, 0, 1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.01) inputs, classes = next(iter(dataloaders['train'])) out = torchvision.utils.make_grid(inputs) imshow(out, title=[class_names[x] for x in classes]) def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_weights = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) for phase in ['train', 'val']: if phase == 'train': model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for inputs, labels in dataloaders[phase]: inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) if phase == 'train': loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) if phase == 'train': scheduler.step() epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_size[phase] print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc)) if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_weights = copy.deepcopy(model.state_dict()) print() time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) model.load_state_dict(best_model_weights) return model def visualize_model(model, num_images=6): was_training = model.training model.eval() images_so_far = 0 fig = plt.figure() with torch.no_grad(): for i, (inputs, labels) in enumerate(dataloaders['val']): inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _, preds = torch.max(outputs, 1) for j in range(inputs.size()[0]): images_so_far += 1 ax = plt.subplot(num_images//2, 2, images_so_far) ax.axis('off') ax.set_title('predicted: {}'.format(class_names[preds[j]])) imshow(inputs.cpu().data[j]) if images_so_far == num_images: model.train(mode=was_training) return model.train(mode=was_training) model_ft = models.resnet18(pretrained=True) num_features = model_ft.fc.in_features model_ft.fc = nn.Linear(num_features, 2) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1) model_ft = train_model(model_ft, criterion, optimizer_ft, exp_lr_scheduler, num_epochs=25) visualize_model(model_ft) model_conv = models.resnet18(pretrained=True) for param in model_conv.parameters(): param.requires_grad = False num_features = model_conv.fc.in_features model_conv.fc = nn.Linear(num_features, 2) model_conv = model_conv.to(device) criterion = nn.CrossEntropyLoss() optim_conv = optim.SGD(model_conv.parameters(), lr=0.001, momentum=0.9) exp_lr_scheduler = lr_scheduler.StepLR(optim_conv, step_size=7, gamma=0.1) model_conv = train_model(model_conv, criterion, optim_conv, exp_lr_scheduler, num_epochs=25) visualize_model(model_conv)
0.836688
0.817319
# Sentence Transformers 학습과 활용 본 노트북에서는 `klue/roberta-base` 모델을 **KLUE** 내 **STS** 데이터셋을 활용하여 모델을 훈련하는 예제를 다루게 됩니다. 학습을 통해 얻어질 `sentence-klue-roberta-base` 모델은 입력된 문장의 임베딩을 계산해 유사도를 예측하는데 사용할 수 있게 됩니다. 학습 과정 이후에는 간단한 예제 코드를 통해 모델이 어떻게 활용되는지도 함께 알아보도록 할 것입니다. 모든 소스 코드는 [`sentence-transformers`](https://github.com/UKPLab/sentence-transformers) 원 라이브러리를 참고하였습니다. 먼저, 노트북을 실행하는데 필요한 라이브러리를 설치합니다. 모델 훈련을 위해서는 `sentence-transformers`가, 학습 데이터셋 로드를 위해서는 `datasets` 라이브러리의 설치가 필요합니다. ``` !pip install sentence-transformers datasets ``` ## Sentence Transformers 학습 노트북을 실행하는데 필요한 라이브러리들을 모두 임포트합니다. ``` import math import logging from datetime import datetime import torch from torch.utils.data import DataLoader from datasets import load_dataset from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator from sentence_transformers.readers import InputExample ``` 학습 경과를 지켜보는데 사용될 *logger* 를 초기화합니다. ``` logging.basicConfig( format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()], ) ``` 학습에 필요한 정보를 변수로 기록합니다. 본 노트북에서는 `klue-roberta-base` 모델을 활용하지만, https://huggingface.co/klue 페이지에서 더 다양한 사전학습 언어 모델을 확인하실 수 있습니다. ``` model_name = "klue/roberta-base" ``` 모델 정보 외에도 학습에 필요한 하이퍼 파라미터를 정의합니다. ``` train_batch_size = 32 num_epochs = 4 model_save_path = "output/training_klue_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S") ``` 앞서 정의한 사전학습 언어 모델을 로드합니다. `sentence-transformers`는 HuggingFace의 `transformers`와 호환이 잘 이루어지고 있기 때문에, [모델 허브](https://huggingface.co/models)에 올라와있는 대부분의 언어 모델을 임베딩을 추출할 *Embedder* 로 사용할 수 있습니다. ``` embedding_model = models.Transformer(model_name) ``` *Embedder* 에서 추출된 토큰 단위 임베딩들을 가지고 문장 임베딩을 어떻게 계산할 것인지를 결정하는 *Pooler* 를 정의합니다. 여러 Pooling 기법이 있겠지만, 예제 노트북에서는 **Mean Pooling**을 사용하기로 합니다. **Mean Pooling**이란 모델이 반환한 모든 토큰 임베딩을 더해준 후, 더해진 토큰 개수만큼 나누어 문장을 대표하는 임베딩으로 사용하는 기법을 의미합니다. ``` pooler = models.Pooling( embedding_model.get_word_embedding_dimension(), pooling_mode_mean_tokens=True, pooling_mode_cls_token=False, pooling_mode_max_tokens=False, ) ``` *Embedder* 와 *Pooler* 를 정의했으므로, 이 두 모듈로 구성된 하나의 모델을 정의합니다. `modules`에 입력으로 들어가는 모듈이 순차적으로 임베딩 과정에 사용이 된다고 생각하시면 됩니다. ``` model = SentenceTransformer(modules=[embedding_model, pooler]) ``` 이제 학습에 사용될 KLUE STS 데이터셋을 다운로드 및 로드합니다. ``` datasets = load_dataset("klue", "sts") ``` 다운로드 혹은 로드 후 얻어진 `datasets` 객체를 살펴보면, 훈련 데이터와 검증 데이터가 포함되어 있는 것을 확인할 수 있습니다. ``` datasets ``` 각 예시 데이터는 아래와 같이 두 개의 문장과 두 문장의 유사도를 라벨로 지니고 있습니다. ``` datasets["train"][0] ``` 이제 테스트에 활용할 데이터를 얻어야 할 차례입니다. 위에서 살펴본 바와 같이 KLUE 내 STS 데이터셋은 테스트 데이터셋을 포함하고 있지 않습니다. 따라서 실습의 원활한 진행을 위해 다른 벤치마크 STS 데이터셋인 KorSTS 데이터셋을 다운로드 및 로드하여 사용하도록 하겠습니다. (\* 두 데이터셋은 제작 과정이 엄밀히 다르므로, KLUE STS 데이터에 대해 학습된 모델이 KorSTS 테스트셋에 대해 기록하는 점수은 사실상 큰 의미가 없을 수 있습니다. 전체적인 훈련 프로세스의 이해를 돕기 위해 사용한다고 생각해주시는게 좋습니다.) ``` testsets = load_dataset("kor_nlu", "sts") ``` KorSTS 데이터셋은 훈련, 검증 그리고 테스트셋을 지니고 있습니다. ``` testsets ``` KorSTS의 예시 데이터도 마찬가지로 두 문장과 두 문장 간 유사도를 지니고 있습니다. ``` testsets["test"][0] ``` 이제 앞서 얻어진 데이터셋을 `sentence-transformers` 훈련 양식에 맞게 변환해주는 작업을 거쳐야 합니다. 두 데이터 모두 0점에서 5점 사이의 값으로 유사도가 기록되었기 때문에, 0.0 ~ 1.0 스케일로 정규화를 시켜주는 작업을 거치게 됩니다. (\* KorSTS 내 테스트셋의 경우 `None`으로 기록된 문장이 몇 개 존재하여, `None`을 걸러주는 조건이 추가되었습니다.) ``` train_samples = [] dev_samples = [] test_samples = [] # KLUE STS 내 훈련, 검증 데이터 예제 변환 for phase in ["train", "validation"]: examples = datasets[phase] for example in examples: score = float(example["labels"]["label"]) / 5.0 # 0.0 ~ 1.0 스케일로 유사도 정규화 inp_example = InputExample( texts=[example["sentence1"], example["sentence2"]], label=score, ) if phase == "validation": dev_samples.append(inp_example) else: train_samples.append(inp_example) # KorSTS 내 테스트 데이터 예제 변환 for example in testsets["test"]: score = float(example["score"]) / 5.0 if example["sentence1"] and example["sentence2"]: inp_example = InputExample( texts=[example["sentence1"], example["sentence2"]], label=score, ) test_samples.append(inp_example) ``` 앞선 로직을 통해 각 데이터 예제는 다음과 같이 `InputExample` 객체로 변환되게 됩니다. ``` train_samples[0].texts, train_samples[0].label test_samples[0].texts, test_samples[0].label ``` 이제 학습에 사용될 `DataLoader`와 **Loss**를 설정해주도록 합니다. `CosineSimilarityLoss`는 입력된 두 문장의 임베딩 간 코사인 유사도와 골드 라벨 간 차이를 통해 계산되게 됩니다. ``` train_dataloader = DataLoader( train_samples, shuffle=True, batch_size=train_batch_size, ) train_loss = losses.CosineSimilarityLoss(model=model) ``` 모델 검증에 활용할 **Evaluator** 를 정의해줍니다. 앞서 얻어진 검증 데이터를 활용하여, 모델의 문장 임베딩 간 코사인 유사도가 얼마나 골드 라벨에 가까운지 계산하는 역할을 수행합니다. ``` evaluator = EmbeddingSimilarityEvaluator.from_input_examples( dev_samples, name="sts-dev", ) ``` 모델 학습에 사용될 **Warm up Steps**를 설정합니다. 다양한 방법으로 스텝 수를 결정할 수 있겠지만, 예제 노트북에서는 원 예제 코드를 따라 훈련 배치 수의 10% 만큼으로 값을 설정합니다. ``` warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up logging.info(f"Warmup-steps: {warmup_steps}") ``` 이제 앞서 얻어진 객체, 값들을 가지고 모델의 훈련을 진행합니다. `sentence-transformers`에서는 다음과 같이 `fit` 함수를 통해 간단히 모델의 훈련과 검증이 가능합니다. 훈련 과정을 통해 매 에폭 마다 얻어지는 체크포인트에 대해 *Evaluator* 가 학습된 모델의 코사인 유사도와 골드 라벨 간 피어슨, 스피어만 상관 계수를 계산해 기록을 남기게 됩니다. ``` model.fit( train_objectives=[(train_dataloader, train_loss)], evaluator=evaluator, epochs=num_epochs, evaluation_steps=1000, warmup_steps=warmup_steps, output_path=model_save_path, ) ``` 학습이 완료되었다면 이제 학습된 모델을 테스트 할 시간입니다. 앞서 KorSTS 데이터를 활용해 구축한 테스트 데이터셋을 앞서와 마찬가지로 *Evaluator* 로 초기화해주도록 합니다. ``` model = SentenceTransformer(model_save_path) test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test') ``` 이제 테스트 *Evaluator* 를 활용하여 테스트셋에 대해 각 상관 계수를 계산하도록 합니다. ``` test_evaluator(model, output_path=model_save_path) ``` 역시 검증 데이터에 비해 좋지 않은 점수를 기록하였습니다. KLUE 내 검증 데이터셋 중 일부를 샘플링하여 테스트셋으로 활용하는 방안도 있겠지만, 본 노트북은 전체 훈련 프로세스를 파악하는데 초점을 맞추었으므로 실험을 마치도록 합니다. ## Sentence Transformers 활용 ### 시맨틱 서치 입력된 문장 간 유사도를 쉽고 빠르게 구할 수 있도록 설계된 `sentence-transformers`를 이용한다면 임베딩을 활용해 다양한 어플리케이션을 고안할 수 있습니다. 먼저 여러 문장 후보군이 주어졌을 때, 입력된 문장과 가장 유사한 문장을 계산하는 예제를 살펴보도록 합시다. 이를 위해 검색의 대상이 되는 문장 후보군을 다음과 같이 정의할 필요가 있습니다. 이후, 정의된 문장 후보군을 미리 임베딩합니다. ``` docs = [ "1992년 7월 8일 손흥민은 강원도 춘천시 후평동에서 아버지 손웅정과 어머니 길은자의 차남으로 태어나 그곳에서 자랐다.", "형은 손흥윤이다.", "춘천 부안초등학교를 졸업했고, 춘천 후평중학교에 입학한 후 2학년때 원주 육민관중학교 축구부에 들어가기 위해 전학하여 졸업하였으며, 2008년 당시 FC 서울의 U-18팀이었던 동북고등학교 축구부에서 선수 활동 중 대한축구협회 우수선수 해외유학 프로젝트에 선발되어 2008년 8월 독일 분데스리가의 함부르크 유소년팀에 입단하였다.", "함부르크 유스팀 주전 공격수로 2008년 6월 네덜란드에서 열린 4개국 경기에서 4게임에 출전, 3골을 터뜨렸다.", "1년간의 유학 후 2009년 8월 한국으로 돌아온 후 10월에 개막한 FIFA U-17 월드컵에 출전하여 3골을 터트리며 한국을 8강으로 이끌었다.", "그해 11월 함부르크의 정식 유소년팀 선수 계약을 체결하였으며 독일 U-19 리그 4경기 2골을 넣고 2군 리그에 출전을 시작했다.", "독일 U-19 리그에서 손흥민은 11경기 6골, 2부 리그에서는 6경기 1골을 넣으며 재능을 인정받아 2010년 6월 17세의 나이로 함부르크의 1군 팀 훈련에 참가, 프리시즌 활약으로 함부르크와 정식 계약을 한 후 10월 18세에 함부르크 1군 소속으로 독일 분데스리가에 데뷔하였다.", ] document_embeddings = model.encode(docs) ``` 이제 입력 문장을 임베딩 할 차례입니다. ``` query = "손흥민은 어린 나이에 유럽에 진출하였다." query_embedding = model.encode(query) ``` 아래는 입력된 문장의 임베딩과 미리 임베딩 된 후보군 문장 임베딩 간 유사도를 계산해 유사도가 높은 순서대로 `top_k` 개 문장을 뽑아주는 예제 코드입니다. `top_k`는 전체 문장 후보군의 개수를 넘지 않아야 하므로, `min()` 함수를 통해 예외 처리를 해줍니다. ``` top_k = min(5, len(docs)) # 입력 문장 - 문장 후보군 간 코사인 유사도 계산 후, cos_scores = util.pytorch_cos_sim(query_embedding, document_embeddings)[0] # 코사인 유사도 순으로 `top_k` 개 문장 추출 top_results = torch.topk(cos_scores, k=top_k) print(f"입력 문장: {query}") print(f"\n<입력 문장과 유사한 {top_k} 개의 문장>\n") for i, (score, idx) in enumerate(zip(top_results[0], top_results[1])): print(f"{i+1}: {docs[idx]} {'(유사도: {:.4f})'.format(score)}\n") ``` 반환된 문장 중 `유럽`, `어린 나이` 등의 키워드가 없음애도 높은 유사도로 두 문장이 반환된 것을 확인할 수 있습니다. ### 클러스터링 `sentence-transformers`를 통해 얻어진 임베딩을 활용해 클러스터링을 수행할 수도 있습니다. 다양한 클러스터링 기법의 적용이 가능하겠지만, 본 노트북에서는 **k-Means** 클러스터링을 수행한 결과를 살펴보도록 합니다. 예제 수행을 위해 `scikit-learn`의 설치가 추가로 필요합니다. ``` # !pip install scikit-learn from sklearn.cluster import KMeans ``` 마찬가지로 앞서 구축한 문장 후보군들에 대해 임베딩을 수행합니다. 이후, `num_clusters` 변수를 통해 클러스터의 개수를 설정한 후 임베딩을 활용한 **k-Means** 클러스터링을 수행하도록 합니다. ``` document_embeddings = model.encode(docs) num_clusters = 3 k_means = KMeans(n_clusters=num_clusters) k_means.fit(document_embeddings) ``` 이제 클러스터링을 통해 각 문장이 어떤 클러스터에 포함되었는지 확인해봅시다. ``` cluster_assignment = k_means.labels_ cluster_assignment ``` 클러스터링 결과를 토대로 각 문장을 클러스터로 분리한 후, 결과를 출력합니다. ``` # 클러스터 개수 만큼 문장을 담을 리스트 초기화 clustered_sentences = [[] for _ in range(num_clusters)] # 클러스터링 결과를 돌며 각 클러스터에 맞게 문장 삽입 for sentence_id, cluster_id in enumerate(cluster_assignment): clustered_sentences[cluster_id].append(docs[sentence_id]) for i, cluster in enumerate(clustered_sentences): result = "\n".join(cluster) print(f"< 클러스터 {i+1} >\n{result}\n") ``` 국내 관련 문장과 해외 관련 문장, 가족 관계와 같은 느낌으로 클러스터가 형성된 것을 확인할 수 있습니다. 지금까지 `sentence-transformers`를 학습하는 과정을 KLUE STS 데이터셋을 통해 알아보았습니다. `sentence-transformers`는 다양한 문장 임베딩 기법과 이를 활용한 응용 사례에 대해서 크게 고민하는 **UKPLab**에서 관리되는 라이브러리이니 만큼 앞으로 더 발전하고 관리될 가능성이 높은 도구입니다. 본 노트북을 통해 습득한 지식이 여러분의 업무와 학습에 도움이 되었으면 좋겠습니다. ``` 허 훈 (huffonism@gmail.com) ``` APPENDIX: 앞서 학습된 모델을 HuggingFace 모델 허브에 업로드하였으니, 아래 예제와 같이 사용이 가능합니다. ``` import torch from sentence_transformers import SentenceTransformer, util model = SentenceTransformer("Huffon/sentence-klue-roberta-base") docs = [ "1992년 7월 8일 손흥민은 강원도 춘천시 후평동에서 아버지 손웅정과 어머니 길은자의 차남으로 태어나 그곳에서 자랐다.", "형은 손흥윤이다.", "춘천 부안초등학교를 졸업했고, 춘천 후평중학교에 입학한 후 2학년때 원주 육민관중학교 축구부에 들어가기 위해 전학하여 졸업하였으며, 2008년 당시 FC 서울의 U-18팀이었던 동북고등학교 축구부에서 선수 활동 중 대한축구협회 우수선수 해외유학 프로젝트에 선발되어 2008년 8월 독일 분데스리가의 함부르크 유소년팀에 입단하였다.", "함부르크 유스팀 주전 공격수로 2008년 6월 네덜란드에서 열린 4개국 경기에서 4게임에 출전, 3골을 터뜨렸다.", "1년간의 유학 후 2009년 8월 한국으로 돌아온 후 10월에 개막한 FIFA U-17 월드컵에 출전하여 3골을 터트리며 한국을 8강으로 이끌었다.", "그해 11월 함부르크의 정식 유소년팀 선수 계약을 체결하였으며 독일 U-19 리그 4경기 2골을 넣고 2군 리그에 출전을 시작했다.", "독일 U-19 리그에서 손흥민은 11경기 6골, 2부 리그에서는 6경기 1골을 넣으며 재능을 인정받아 2010년 6월 17세의 나이로 함부르크의 1군 팀 훈련에 참가, 프리시즌 활약으로 함부르크와 정식 계약을 한 후 10월 18세에 함부르크 1군 소속으로 독일 분데스리가에 데뷔하였다.", ] document_embeddings = model.encode(docs) query = "손흥민은 어린 나이에 유럽에 진출하였다." query_embedding = model.encode(query) top_k = min(5, len(docs)) # 입력 문장 - 문장 후보군 간 코사인 유사도 계산 후, cos_scores = util.pytorch_cos_sim(query_embedding, document_embeddings)[0] # 코사인 유사도 순으로 `top_k` 개 문장 추출 top_results = torch.topk(cos_scores, k=top_k) print(f"입력 문장: {query}") print(f"\n<입력 문장과 유사한 {top_k} 개의 문장>\n") for i, (score, idx) in enumerate(zip(top_results[0], top_results[1])): print(f"{i+1}: {docs[idx]} {'(유사도: {:.4f})'.format(score)}\n") ```
github_jupyter
!pip install sentence-transformers datasets import math import logging from datetime import datetime import torch from torch.utils.data import DataLoader from datasets import load_dataset from sentence_transformers import SentenceTransformer, LoggingHandler, losses, models, util from sentence_transformers.evaluation import EmbeddingSimilarityEvaluator from sentence_transformers.readers import InputExample logging.basicConfig( format="%(asctime)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO, handlers=[LoggingHandler()], ) model_name = "klue/roberta-base" train_batch_size = 32 num_epochs = 4 model_save_path = "output/training_klue_sts_" + model_name.replace("/", "-") + "-" + datetime.now().strftime("%Y-%m-%d_%H-%M-%S") embedding_model = models.Transformer(model_name) pooler = models.Pooling( embedding_model.get_word_embedding_dimension(), pooling_mode_mean_tokens=True, pooling_mode_cls_token=False, pooling_mode_max_tokens=False, ) model = SentenceTransformer(modules=[embedding_model, pooler]) datasets = load_dataset("klue", "sts") datasets datasets["train"][0] testsets = load_dataset("kor_nlu", "sts") testsets testsets["test"][0] train_samples = [] dev_samples = [] test_samples = [] # KLUE STS 내 훈련, 검증 데이터 예제 변환 for phase in ["train", "validation"]: examples = datasets[phase] for example in examples: score = float(example["labels"]["label"]) / 5.0 # 0.0 ~ 1.0 스케일로 유사도 정규화 inp_example = InputExample( texts=[example["sentence1"], example["sentence2"]], label=score, ) if phase == "validation": dev_samples.append(inp_example) else: train_samples.append(inp_example) # KorSTS 내 테스트 데이터 예제 변환 for example in testsets["test"]: score = float(example["score"]) / 5.0 if example["sentence1"] and example["sentence2"]: inp_example = InputExample( texts=[example["sentence1"], example["sentence2"]], label=score, ) test_samples.append(inp_example) train_samples[0].texts, train_samples[0].label test_samples[0].texts, test_samples[0].label train_dataloader = DataLoader( train_samples, shuffle=True, batch_size=train_batch_size, ) train_loss = losses.CosineSimilarityLoss(model=model) evaluator = EmbeddingSimilarityEvaluator.from_input_examples( dev_samples, name="sts-dev", ) warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up logging.info(f"Warmup-steps: {warmup_steps}") model.fit( train_objectives=[(train_dataloader, train_loss)], evaluator=evaluator, epochs=num_epochs, evaluation_steps=1000, warmup_steps=warmup_steps, output_path=model_save_path, ) model = SentenceTransformer(model_save_path) test_evaluator = EmbeddingSimilarityEvaluator.from_input_examples(test_samples, name='sts-test') test_evaluator(model, output_path=model_save_path) docs = [ "1992년 7월 8일 손흥민은 강원도 춘천시 후평동에서 아버지 손웅정과 어머니 길은자의 차남으로 태어나 그곳에서 자랐다.", "형은 손흥윤이다.", "춘천 부안초등학교를 졸업했고, 춘천 후평중학교에 입학한 후 2학년때 원주 육민관중학교 축구부에 들어가기 위해 전학하여 졸업하였으며, 2008년 당시 FC 서울의 U-18팀이었던 동북고등학교 축구부에서 선수 활동 중 대한축구협회 우수선수 해외유학 프로젝트에 선발되어 2008년 8월 독일 분데스리가의 함부르크 유소년팀에 입단하였다.", "함부르크 유스팀 주전 공격수로 2008년 6월 네덜란드에서 열린 4개국 경기에서 4게임에 출전, 3골을 터뜨렸다.", "1년간의 유학 후 2009년 8월 한국으로 돌아온 후 10월에 개막한 FIFA U-17 월드컵에 출전하여 3골을 터트리며 한국을 8강으로 이끌었다.", "그해 11월 함부르크의 정식 유소년팀 선수 계약을 체결하였으며 독일 U-19 리그 4경기 2골을 넣고 2군 리그에 출전을 시작했다.", "독일 U-19 리그에서 손흥민은 11경기 6골, 2부 리그에서는 6경기 1골을 넣으며 재능을 인정받아 2010년 6월 17세의 나이로 함부르크의 1군 팀 훈련에 참가, 프리시즌 활약으로 함부르크와 정식 계약을 한 후 10월 18세에 함부르크 1군 소속으로 독일 분데스리가에 데뷔하였다.", ] document_embeddings = model.encode(docs) query = "손흥민은 어린 나이에 유럽에 진출하였다." query_embedding = model.encode(query) top_k = min(5, len(docs)) # 입력 문장 - 문장 후보군 간 코사인 유사도 계산 후, cos_scores = util.pytorch_cos_sim(query_embedding, document_embeddings)[0] # 코사인 유사도 순으로 `top_k` 개 문장 추출 top_results = torch.topk(cos_scores, k=top_k) print(f"입력 문장: {query}") print(f"\n<입력 문장과 유사한 {top_k} 개의 문장>\n") for i, (score, idx) in enumerate(zip(top_results[0], top_results[1])): print(f"{i+1}: {docs[idx]} {'(유사도: {:.4f})'.format(score)}\n") # !pip install scikit-learn from sklearn.cluster import KMeans document_embeddings = model.encode(docs) num_clusters = 3 k_means = KMeans(n_clusters=num_clusters) k_means.fit(document_embeddings) cluster_assignment = k_means.labels_ cluster_assignment # 클러스터 개수 만큼 문장을 담을 리스트 초기화 clustered_sentences = [[] for _ in range(num_clusters)] # 클러스터링 결과를 돌며 각 클러스터에 맞게 문장 삽입 for sentence_id, cluster_id in enumerate(cluster_assignment): clustered_sentences[cluster_id].append(docs[sentence_id]) for i, cluster in enumerate(clustered_sentences): result = "\n".join(cluster) print(f"< 클러스터 {i+1} >\n{result}\n") 허 훈 (huffonism@gmail.com) import torch from sentence_transformers import SentenceTransformer, util model = SentenceTransformer("Huffon/sentence-klue-roberta-base") docs = [ "1992년 7월 8일 손흥민은 강원도 춘천시 후평동에서 아버지 손웅정과 어머니 길은자의 차남으로 태어나 그곳에서 자랐다.", "형은 손흥윤이다.", "춘천 부안초등학교를 졸업했고, 춘천 후평중학교에 입학한 후 2학년때 원주 육민관중학교 축구부에 들어가기 위해 전학하여 졸업하였으며, 2008년 당시 FC 서울의 U-18팀이었던 동북고등학교 축구부에서 선수 활동 중 대한축구협회 우수선수 해외유학 프로젝트에 선발되어 2008년 8월 독일 분데스리가의 함부르크 유소년팀에 입단하였다.", "함부르크 유스팀 주전 공격수로 2008년 6월 네덜란드에서 열린 4개국 경기에서 4게임에 출전, 3골을 터뜨렸다.", "1년간의 유학 후 2009년 8월 한국으로 돌아온 후 10월에 개막한 FIFA U-17 월드컵에 출전하여 3골을 터트리며 한국을 8강으로 이끌었다.", "그해 11월 함부르크의 정식 유소년팀 선수 계약을 체결하였으며 독일 U-19 리그 4경기 2골을 넣고 2군 리그에 출전을 시작했다.", "독일 U-19 리그에서 손흥민은 11경기 6골, 2부 리그에서는 6경기 1골을 넣으며 재능을 인정받아 2010년 6월 17세의 나이로 함부르크의 1군 팀 훈련에 참가, 프리시즌 활약으로 함부르크와 정식 계약을 한 후 10월 18세에 함부르크 1군 소속으로 독일 분데스리가에 데뷔하였다.", ] document_embeddings = model.encode(docs) query = "손흥민은 어린 나이에 유럽에 진출하였다." query_embedding = model.encode(query) top_k = min(5, len(docs)) # 입력 문장 - 문장 후보군 간 코사인 유사도 계산 후, cos_scores = util.pytorch_cos_sim(query_embedding, document_embeddings)[0] # 코사인 유사도 순으로 `top_k` 개 문장 추출 top_results = torch.topk(cos_scores, k=top_k) print(f"입력 문장: {query}") print(f"\n<입력 문장과 유사한 {top_k} 개의 문장>\n") for i, (score, idx) in enumerate(zip(top_results[0], top_results[1])): print(f"{i+1}: {docs[idx]} {'(유사도: {:.4f})'.format(score)}\n")
0.546012
0.967472
# VacationPy ---- #### Note * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps. ``` %matplotlib widget # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import gmaps import os # Import API key from api_keys import g_key gmaps.configure(api_key = g_key) ``` ### Store Part I results into DataFrame * Load the csv exported in Part I to a DataFrame ``` weather_data = pd.read_csv("output_data/cities.csv") weather_data ``` ### Humidity Heatmap * Configure gmaps. * Use the Lat and Lng as locations and Humidity as the weight. * Add Heatmap layer to map. ``` fig = gmaps.figure() #Set locations for the heat map. locations = weather_data[["Lat", "Lng"]] #Set humidity as weight for the heat map. humidity = weather_data["Humidity"] #Create heat layer heat_layer = gmaps.heatmap_layer(locations, weights= humidity, max_intensity=10, point_radius=1, dissipating=False) fig.add_layer(heat_layer) fig ``` ### Create new DataFrame fitting weather criteria * Narrow down the cities to fit weather conditions. * Drop any rows will null values. ``` new_weather_data = weather_data.loc[(weather_data["Cloudiness"] == 0) \ & (weather_data["Humidity"]< 65) \ & (weather_data["Max Temp"]>70) & (weather_data["Max Temp"]<80) \ & (weather_data["Wind Speed"]< 10)] new_weather_data.dropna() ``` ### Hotel Map * Store into variable named `hotel_df`. * Add a "Hotel Name" column to the DataFrame. * Set parameters to search for hotels with 5000 meters. * Hit the Google Places API for each city's coordinates. * Store the first Hotel result into the DataFrame. * Plot markers on top of the heatmap. ``` hotel_df = new_weather_data.filter(["City", "Country", "Lat", "Lng"]) hotel_df["Hotel Name"] = "" hotel_df #Set params params = {"radius": 5000, "types": "lodging", "key": "g_key"} #Loop through hotel_df and find hotels in each city: for index, row in hotel_df.iterrows(): base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json" lat = row["Lat"] lng = row["Lng"] #Change locations while keeping the params same params["location"] = f"{lat},{lng}" #make request and print url name_address = requests.get(base_url, params=params).json() try: hotel_df.loc[index, "Hotel Name"] = name_address["results"][0]["name"] except (KeyError, IndexError): print(f"Missing field/result... skipping") # NOTE: Do not change any of the code in this cell # Using the template add the hotel marks to the heatmap info_box_template = """ <dl> <dt>Name</dt><dd>{Hotel Name}</dd> <dt>City</dt><dd>{City}</dd> <dt>Country</dt><dd>{Country}</dd> </dl> """ # Store the DataFrame Row # NOTE: be sure to update with your DataFrame name hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()] locations = hotel_df[["Lat", "Lng"]] # Add marker layer ontop of heat map marker_layer = gmaps.marker_layer(locations, info_box_content = hotel_info) fig.add_layer(marker_layer) # Display Map fig ```
github_jupyter
%matplotlib widget # Dependencies and Setup import matplotlib.pyplot as plt import pandas as pd import numpy as np import requests import gmaps import os # Import API key from api_keys import g_key gmaps.configure(api_key = g_key) weather_data = pd.read_csv("output_data/cities.csv") weather_data fig = gmaps.figure() #Set locations for the heat map. locations = weather_data[["Lat", "Lng"]] #Set humidity as weight for the heat map. humidity = weather_data["Humidity"] #Create heat layer heat_layer = gmaps.heatmap_layer(locations, weights= humidity, max_intensity=10, point_radius=1, dissipating=False) fig.add_layer(heat_layer) fig new_weather_data = weather_data.loc[(weather_data["Cloudiness"] == 0) \ & (weather_data["Humidity"]< 65) \ & (weather_data["Max Temp"]>70) & (weather_data["Max Temp"]<80) \ & (weather_data["Wind Speed"]< 10)] new_weather_data.dropna() hotel_df = new_weather_data.filter(["City", "Country", "Lat", "Lng"]) hotel_df["Hotel Name"] = "" hotel_df #Set params params = {"radius": 5000, "types": "lodging", "key": "g_key"} #Loop through hotel_df and find hotels in each city: for index, row in hotel_df.iterrows(): base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json" lat = row["Lat"] lng = row["Lng"] #Change locations while keeping the params same params["location"] = f"{lat},{lng}" #make request and print url name_address = requests.get(base_url, params=params).json() try: hotel_df.loc[index, "Hotel Name"] = name_address["results"][0]["name"] except (KeyError, IndexError): print(f"Missing field/result... skipping") # NOTE: Do not change any of the code in this cell # Using the template add the hotel marks to the heatmap info_box_template = """ <dl> <dt>Name</dt><dd>{Hotel Name}</dd> <dt>City</dt><dd>{City}</dd> <dt>Country</dt><dd>{Country}</dd> </dl> """ # Store the DataFrame Row # NOTE: be sure to update with your DataFrame name hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()] locations = hotel_df[["Lat", "Lng"]] # Add marker layer ontop of heat map marker_layer = gmaps.marker_layer(locations, info_box_content = hotel_info) fig.add_layer(marker_layer) # Display Map fig
0.354321
0.833426
**Chapter 1 – The Machine Learning landscape** _This is the code used to generate some of the figures in chapter 1._ <table align="left"> <td> <a href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/01_the_machine_learning_landscape.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> </td> <td> <a target="_blank" href="https://kaggle.com/kernels/welcome?src=https://github.com/ageron/handson-ml2/blob/master/01_the_machine_learning_landscape.ipynb"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" /></a> </td> </table> # Code example 1-1 Although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead. ``` # Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" ``` This function just merges the OECD's life satisfaction data and the IMF's GDP per capita data. It's a bit too long and boring and it's not specific to Machine Learning, which is why I left it out of the book. ``` def prepare_country_stats(oecd_bli, gdp_per_capita): oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"] oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value") gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True) gdp_per_capita.set_index("Country", inplace=True) full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, left_index=True, right_index=True) full_country_stats.sort_values(by="GDP per capita", inplace=True) remove_indices = [0, 1, 6, 8, 33, 34, 35] keep_indices = list(set(range(36)) - set(remove_indices)) return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices] ``` The code in the book expects the data files to be located in the current directory. I just tweaked it here to fetch the files in datasets/lifesat. ``` import os datapath = os.path.join("datasets", "lifesat", "") # To plot pretty figures directly within Jupyter %matplotlib inline import matplotlib as mpl mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Download the data import urllib.request DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/" os.makedirs(datapath, exist_ok=True) for filename in ("oecd_bli_2015.csv", "gdp_per_capita.csv"): print("Downloading", filename) url = DOWNLOAD_ROOT + "datasets/lifesat/" + filename urllib.request.urlretrieve(url, datapath + filename) # Code example import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model # Load the data oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',') gdp_per_capita = pd.read_csv(datapath + "gdp_per_capita.csv",thousands=',',delimiter='\t', encoding='latin1', na_values="n/a") # Prepare the data country_stats = prepare_country_stats(oecd_bli, gdp_per_capita) X = np.c_[country_stats["GDP per capita"]] y = np.c_[country_stats["Life satisfaction"]] # Visualize the data country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction') plt.show() # Select a linear model model = sklearn.linear_model.LinearRegression() # Train the model model.fit(X, y) # Make a prediction for Cyprus X_new = [[22587]] # Cyprus' GDP per capita print(model.predict(X_new)) # outputs [[ 5.96242338]] ``` Replacing the Linear Regression model with k-Nearest Neighbors (in this example, k = 3) regression in the previous code is as simple as replacing these two lines: ```python import sklearn.linear_model model = sklearn.linear_model.LinearRegression() ``` with these two: ```python import sklearn.neighbors model = sklearn.neighbors.KNeighborsRegressor(n_neighbors=3) ``` ``` # Select a 3-Nearest Neighbors regression model import sklearn.neighbors model1 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=3) # Train the model model1.fit(X,y) # Make a prediction for Cyprus print(model1.predict(X_new)) # outputs [[5.76666667]] ``` # Note: you can ignore the rest of this notebook, it just generates many of the figures in chapter 1. Create a function to save the figures. ``` # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "fundamentals" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) ``` Make this notebook's output stable across runs: ``` np.random.seed(42) ``` # Load and prepare Life satisfaction data If you want, you can get fresh data from the OECD's website. Download the CSV from http://stats.oecd.org/index.aspx?DataSetCode=BLI and save it to `datasets/lifesat/`. ``` oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',') oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"] oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value") oecd_bli.head(2) oecd_bli["Life satisfaction"].head() ``` # Load and prepare GDP per capita data Just like above, you can update the GDP per capita data if you want. Just download data from http://goo.gl/j1MSKe (=> imf.org) and save it to `datasets/lifesat/`. ``` gdp_per_capita = pd.read_csv(datapath+"gdp_per_capita.csv", thousands=',', delimiter='\t', encoding='latin1', na_values="n/a") gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True) gdp_per_capita.set_index("Country", inplace=True) gdp_per_capita.head(2) full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, left_index=True, right_index=True) full_country_stats.sort_values(by="GDP per capita", inplace=True) full_country_stats full_country_stats[["GDP per capita", 'Life satisfaction']].loc["United States"] remove_indices = [0, 1, 6, 8, 33, 34, 35] keep_indices = list(set(range(36)) - set(remove_indices)) sample_data = full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices] missing_data = full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[remove_indices] sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3)) plt.axis([0, 60000, 0, 10]) position_text = { "Hungary": (5000, 1), "Korea": (18000, 1.7), "France": (29000, 2.4), "Australia": (40000, 3.0), "United States": (52000, 3.8), } for country, pos_text in position_text.items(): pos_data_x, pos_data_y = sample_data.loc[country] country = "U.S." if country == "United States" else country plt.annotate(country, xy=(pos_data_x, pos_data_y), xytext=pos_text, arrowprops=dict(facecolor='black', width=0.5, shrink=0.1, headwidth=5)) plt.plot(pos_data_x, pos_data_y, "ro") plt.xlabel("GDP per capita (USD)") save_fig('money_happy_scatterplot') plt.show() sample_data.to_csv(os.path.join("datasets", "lifesat", "lifesat.csv")) sample_data.loc[list(position_text.keys())] import numpy as np sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3)) plt.xlabel("GDP per capita (USD)") plt.axis([0, 60000, 0, 10]) X=np.linspace(0, 60000, 1000) plt.plot(X, 2*X/100000, "r") plt.text(40000, 2.7, r"$\theta_0 = 0$", fontsize=14, color="r") plt.text(40000, 1.8, r"$\theta_1 = 2 \times 10^{-5}$", fontsize=14, color="r") plt.plot(X, 8 - 5*X/100000, "g") plt.text(5000, 9.1, r"$\theta_0 = 8$", fontsize=14, color="g") plt.text(5000, 8.2, r"$\theta_1 = -5 \times 10^{-5}$", fontsize=14, color="g") plt.plot(X, 4 + 5*X/100000, "b") plt.text(5000, 3.5, r"$\theta_0 = 4$", fontsize=14, color="b") plt.text(5000, 2.6, r"$\theta_1 = 5 \times 10^{-5}$", fontsize=14, color="b") save_fig('tweaking_model_params_plot') plt.show() from sklearn import linear_model lin1 = linear_model.LinearRegression() Xsample = np.c_[sample_data["GDP per capita"]] ysample = np.c_[sample_data["Life satisfaction"]] lin1.fit(Xsample, ysample) t0, t1 = lin1.intercept_[0], lin1.coef_[0][0] t0, t1 sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3)) plt.xlabel("GDP per capita (USD)") plt.axis([0, 60000, 0, 10]) X=np.linspace(0, 60000, 1000) plt.plot(X, t0 + t1*X, "b") plt.text(5000, 3.1, r"$\theta_0 = 4.85$", fontsize=14, color="b") plt.text(5000, 2.2, r"$\theta_1 = 4.91 \times 10^{-5}$", fontsize=14, color="b") save_fig('best_fit_model_plot') plt.show() cyprus_gdp_per_capita = gdp_per_capita.loc["Cyprus"]["GDP per capita"] print(cyprus_gdp_per_capita) cyprus_predicted_life_satisfaction = lin1.predict([[cyprus_gdp_per_capita]])[0][0] cyprus_predicted_life_satisfaction sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3), s=1) plt.xlabel("GDP per capita (USD)") X=np.linspace(0, 60000, 1000) plt.plot(X, t0 + t1*X, "b") plt.axis([0, 60000, 0, 10]) plt.text(5000, 7.5, r"$\theta_0 = 4.85$", fontsize=14, color="b") plt.text(5000, 6.6, r"$\theta_1 = 4.91 \times 10^{-5}$", fontsize=14, color="b") plt.plot([cyprus_gdp_per_capita, cyprus_gdp_per_capita], [0, cyprus_predicted_life_satisfaction], "r--") plt.text(25000, 5.0, r"Prediction = 5.96", fontsize=14, color="b") plt.plot(cyprus_gdp_per_capita, cyprus_predicted_life_satisfaction, "ro") save_fig('cyprus_prediction_plot') plt.show() sample_data[7:10] (5.1+5.7+6.5)/3 backup = oecd_bli, gdp_per_capita def prepare_country_stats(oecd_bli, gdp_per_capita): oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"] oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value") gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True) gdp_per_capita.set_index("Country", inplace=True) full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, left_index=True, right_index=True) full_country_stats.sort_values(by="GDP per capita", inplace=True) remove_indices = [0, 1, 6, 8, 33, 34, 35] keep_indices = list(set(range(36)) - set(remove_indices)) return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices] # Code example import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model # Load the data oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',') gdp_per_capita = pd.read_csv(datapath + "gdp_per_capita.csv",thousands=',',delimiter='\t', encoding='latin1', na_values="n/a") # Prepare the data country_stats = prepare_country_stats(oecd_bli, gdp_per_capita) X = np.c_[country_stats["GDP per capita"]] y = np.c_[country_stats["Life satisfaction"]] # Visualize the data country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction') plt.show() # Select a linear model model = sklearn.linear_model.LinearRegression() # Train the model model.fit(X, y) # Make a prediction for Cyprus X_new = [[22587]] # Cyprus' GDP per capita print(model.predict(X_new)) # outputs [[ 5.96242338]] oecd_bli, gdp_per_capita = backup missing_data position_text2 = { "Brazil": (1000, 9.0), "Mexico": (11000, 9.0), "Chile": (25000, 9.0), "Czech Republic": (35000, 9.0), "Norway": (60000, 3), "Switzerland": (72000, 3.0), "Luxembourg": (90000, 3.0), } sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(8,3)) plt.axis([0, 110000, 0, 10]) for country, pos_text in position_text2.items(): pos_data_x, pos_data_y = missing_data.loc[country] plt.annotate(country, xy=(pos_data_x, pos_data_y), xytext=pos_text, arrowprops=dict(facecolor='black', width=0.5, shrink=0.1, headwidth=5)) plt.plot(pos_data_x, pos_data_y, "rs") X=np.linspace(0, 110000, 1000) plt.plot(X, t0 + t1*X, "b:") lin_reg_full = linear_model.LinearRegression() Xfull = np.c_[full_country_stats["GDP per capita"]] yfull = np.c_[full_country_stats["Life satisfaction"]] lin_reg_full.fit(Xfull, yfull) t0full, t1full = lin_reg_full.intercept_[0], lin_reg_full.coef_[0][0] X = np.linspace(0, 110000, 1000) plt.plot(X, t0full + t1full * X, "k") plt.xlabel("GDP per capita (USD)") save_fig('representative_training_data_scatterplot') plt.show() full_country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(8,3)) plt.axis([0, 110000, 0, 10]) from sklearn import preprocessing from sklearn import pipeline poly = preprocessing.PolynomialFeatures(degree=60, include_bias=False) scaler = preprocessing.StandardScaler() lin_reg2 = linear_model.LinearRegression() pipeline_reg = pipeline.Pipeline([('poly', poly), ('scal', scaler), ('lin', lin_reg2)]) pipeline_reg.fit(Xfull, yfull) curve = pipeline_reg.predict(X[:, np.newaxis]) plt.plot(X, curve) plt.xlabel("GDP per capita (USD)") save_fig('overfitting_model_plot') plt.show() full_country_stats.loc[[c for c in full_country_stats.index if "W" in c.upper()]]["Life satisfaction"] gdp_per_capita.loc[[c for c in gdp_per_capita.index if "W" in c.upper()]].head() plt.figure(figsize=(8,3)) plt.xlabel("GDP per capita") plt.ylabel('Life satisfaction') plt.plot(list(sample_data["GDP per capita"]), list(sample_data["Life satisfaction"]), "bo") plt.plot(list(missing_data["GDP per capita"]), list(missing_data["Life satisfaction"]), "rs") X = np.linspace(0, 110000, 1000) plt.plot(X, t0full + t1full * X, "r--", label="Linear model on all data") plt.plot(X, t0 + t1*X, "b:", label="Linear model on partial data") ridge = linear_model.Ridge(alpha=10**9.5) Xsample = np.c_[sample_data["GDP per capita"]] ysample = np.c_[sample_data["Life satisfaction"]] ridge.fit(Xsample, ysample) t0ridge, t1ridge = ridge.intercept_[0], ridge.coef_[0][0] plt.plot(X, t0ridge + t1ridge * X, "b", label="Regularized linear model on partial data") plt.legend(loc="lower right") plt.axis([0, 110000, 0, 10]) plt.xlabel("GDP per capita (USD)") save_fig('ridge_model_plot') plt.show() ```
github_jupyter
# Python ≥3.5 is required import sys assert sys.version_info >= (3, 5) # Scikit-Learn ≥0.20 is required import sklearn assert sklearn.__version__ >= "0.20" def prepare_country_stats(oecd_bli, gdp_per_capita): oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"] oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value") gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True) gdp_per_capita.set_index("Country", inplace=True) full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, left_index=True, right_index=True) full_country_stats.sort_values(by="GDP per capita", inplace=True) remove_indices = [0, 1, 6, 8, 33, 34, 35] keep_indices = list(set(range(36)) - set(remove_indices)) return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices] import os datapath = os.path.join("datasets", "lifesat", "") # To plot pretty figures directly within Jupyter %matplotlib inline import matplotlib as mpl mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Download the data import urllib.request DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/" os.makedirs(datapath, exist_ok=True) for filename in ("oecd_bli_2015.csv", "gdp_per_capita.csv"): print("Downloading", filename) url = DOWNLOAD_ROOT + "datasets/lifesat/" + filename urllib.request.urlretrieve(url, datapath + filename) # Code example import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model # Load the data oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',') gdp_per_capita = pd.read_csv(datapath + "gdp_per_capita.csv",thousands=',',delimiter='\t', encoding='latin1', na_values="n/a") # Prepare the data country_stats = prepare_country_stats(oecd_bli, gdp_per_capita) X = np.c_[country_stats["GDP per capita"]] y = np.c_[country_stats["Life satisfaction"]] # Visualize the data country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction') plt.show() # Select a linear model model = sklearn.linear_model.LinearRegression() # Train the model model.fit(X, y) # Make a prediction for Cyprus X_new = [[22587]] # Cyprus' GDP per capita print(model.predict(X_new)) # outputs [[ 5.96242338]] import sklearn.linear_model model = sklearn.linear_model.LinearRegression() import sklearn.neighbors model = sklearn.neighbors.KNeighborsRegressor(n_neighbors=3) # Select a 3-Nearest Neighbors regression model import sklearn.neighbors model1 = sklearn.neighbors.KNeighborsRegressor(n_neighbors=3) # Train the model model1.fit(X,y) # Make a prediction for Cyprus print(model1.predict(X_new)) # outputs [[5.76666667]] # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "fundamentals" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) np.random.seed(42) oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',') oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"] oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value") oecd_bli.head(2) oecd_bli["Life satisfaction"].head() gdp_per_capita = pd.read_csv(datapath+"gdp_per_capita.csv", thousands=',', delimiter='\t', encoding='latin1', na_values="n/a") gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True) gdp_per_capita.set_index("Country", inplace=True) gdp_per_capita.head(2) full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, left_index=True, right_index=True) full_country_stats.sort_values(by="GDP per capita", inplace=True) full_country_stats full_country_stats[["GDP per capita", 'Life satisfaction']].loc["United States"] remove_indices = [0, 1, 6, 8, 33, 34, 35] keep_indices = list(set(range(36)) - set(remove_indices)) sample_data = full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices] missing_data = full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[remove_indices] sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3)) plt.axis([0, 60000, 0, 10]) position_text = { "Hungary": (5000, 1), "Korea": (18000, 1.7), "France": (29000, 2.4), "Australia": (40000, 3.0), "United States": (52000, 3.8), } for country, pos_text in position_text.items(): pos_data_x, pos_data_y = sample_data.loc[country] country = "U.S." if country == "United States" else country plt.annotate(country, xy=(pos_data_x, pos_data_y), xytext=pos_text, arrowprops=dict(facecolor='black', width=0.5, shrink=0.1, headwidth=5)) plt.plot(pos_data_x, pos_data_y, "ro") plt.xlabel("GDP per capita (USD)") save_fig('money_happy_scatterplot') plt.show() sample_data.to_csv(os.path.join("datasets", "lifesat", "lifesat.csv")) sample_data.loc[list(position_text.keys())] import numpy as np sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3)) plt.xlabel("GDP per capita (USD)") plt.axis([0, 60000, 0, 10]) X=np.linspace(0, 60000, 1000) plt.plot(X, 2*X/100000, "r") plt.text(40000, 2.7, r"$\theta_0 = 0$", fontsize=14, color="r") plt.text(40000, 1.8, r"$\theta_1 = 2 \times 10^{-5}$", fontsize=14, color="r") plt.plot(X, 8 - 5*X/100000, "g") plt.text(5000, 9.1, r"$\theta_0 = 8$", fontsize=14, color="g") plt.text(5000, 8.2, r"$\theta_1 = -5 \times 10^{-5}$", fontsize=14, color="g") plt.plot(X, 4 + 5*X/100000, "b") plt.text(5000, 3.5, r"$\theta_0 = 4$", fontsize=14, color="b") plt.text(5000, 2.6, r"$\theta_1 = 5 \times 10^{-5}$", fontsize=14, color="b") save_fig('tweaking_model_params_plot') plt.show() from sklearn import linear_model lin1 = linear_model.LinearRegression() Xsample = np.c_[sample_data["GDP per capita"]] ysample = np.c_[sample_data["Life satisfaction"]] lin1.fit(Xsample, ysample) t0, t1 = lin1.intercept_[0], lin1.coef_[0][0] t0, t1 sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3)) plt.xlabel("GDP per capita (USD)") plt.axis([0, 60000, 0, 10]) X=np.linspace(0, 60000, 1000) plt.plot(X, t0 + t1*X, "b") plt.text(5000, 3.1, r"$\theta_0 = 4.85$", fontsize=14, color="b") plt.text(5000, 2.2, r"$\theta_1 = 4.91 \times 10^{-5}$", fontsize=14, color="b") save_fig('best_fit_model_plot') plt.show() cyprus_gdp_per_capita = gdp_per_capita.loc["Cyprus"]["GDP per capita"] print(cyprus_gdp_per_capita) cyprus_predicted_life_satisfaction = lin1.predict([[cyprus_gdp_per_capita]])[0][0] cyprus_predicted_life_satisfaction sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3), s=1) plt.xlabel("GDP per capita (USD)") X=np.linspace(0, 60000, 1000) plt.plot(X, t0 + t1*X, "b") plt.axis([0, 60000, 0, 10]) plt.text(5000, 7.5, r"$\theta_0 = 4.85$", fontsize=14, color="b") plt.text(5000, 6.6, r"$\theta_1 = 4.91 \times 10^{-5}$", fontsize=14, color="b") plt.plot([cyprus_gdp_per_capita, cyprus_gdp_per_capita], [0, cyprus_predicted_life_satisfaction], "r--") plt.text(25000, 5.0, r"Prediction = 5.96", fontsize=14, color="b") plt.plot(cyprus_gdp_per_capita, cyprus_predicted_life_satisfaction, "ro") save_fig('cyprus_prediction_plot') plt.show() sample_data[7:10] (5.1+5.7+6.5)/3 backup = oecd_bli, gdp_per_capita def prepare_country_stats(oecd_bli, gdp_per_capita): oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"] oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value") gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True) gdp_per_capita.set_index("Country", inplace=True) full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, left_index=True, right_index=True) full_country_stats.sort_values(by="GDP per capita", inplace=True) remove_indices = [0, 1, 6, 8, 33, 34, 35] keep_indices = list(set(range(36)) - set(remove_indices)) return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices] # Code example import matplotlib.pyplot as plt import numpy as np import pandas as pd import sklearn.linear_model # Load the data oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',') gdp_per_capita = pd.read_csv(datapath + "gdp_per_capita.csv",thousands=',',delimiter='\t', encoding='latin1', na_values="n/a") # Prepare the data country_stats = prepare_country_stats(oecd_bli, gdp_per_capita) X = np.c_[country_stats["GDP per capita"]] y = np.c_[country_stats["Life satisfaction"]] # Visualize the data country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction') plt.show() # Select a linear model model = sklearn.linear_model.LinearRegression() # Train the model model.fit(X, y) # Make a prediction for Cyprus X_new = [[22587]] # Cyprus' GDP per capita print(model.predict(X_new)) # outputs [[ 5.96242338]] oecd_bli, gdp_per_capita = backup missing_data position_text2 = { "Brazil": (1000, 9.0), "Mexico": (11000, 9.0), "Chile": (25000, 9.0), "Czech Republic": (35000, 9.0), "Norway": (60000, 3), "Switzerland": (72000, 3.0), "Luxembourg": (90000, 3.0), } sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(8,3)) plt.axis([0, 110000, 0, 10]) for country, pos_text in position_text2.items(): pos_data_x, pos_data_y = missing_data.loc[country] plt.annotate(country, xy=(pos_data_x, pos_data_y), xytext=pos_text, arrowprops=dict(facecolor='black', width=0.5, shrink=0.1, headwidth=5)) plt.plot(pos_data_x, pos_data_y, "rs") X=np.linspace(0, 110000, 1000) plt.plot(X, t0 + t1*X, "b:") lin_reg_full = linear_model.LinearRegression() Xfull = np.c_[full_country_stats["GDP per capita"]] yfull = np.c_[full_country_stats["Life satisfaction"]] lin_reg_full.fit(Xfull, yfull) t0full, t1full = lin_reg_full.intercept_[0], lin_reg_full.coef_[0][0] X = np.linspace(0, 110000, 1000) plt.plot(X, t0full + t1full * X, "k") plt.xlabel("GDP per capita (USD)") save_fig('representative_training_data_scatterplot') plt.show() full_country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(8,3)) plt.axis([0, 110000, 0, 10]) from sklearn import preprocessing from sklearn import pipeline poly = preprocessing.PolynomialFeatures(degree=60, include_bias=False) scaler = preprocessing.StandardScaler() lin_reg2 = linear_model.LinearRegression() pipeline_reg = pipeline.Pipeline([('poly', poly), ('scal', scaler), ('lin', lin_reg2)]) pipeline_reg.fit(Xfull, yfull) curve = pipeline_reg.predict(X[:, np.newaxis]) plt.plot(X, curve) plt.xlabel("GDP per capita (USD)") save_fig('overfitting_model_plot') plt.show() full_country_stats.loc[[c for c in full_country_stats.index if "W" in c.upper()]]["Life satisfaction"] gdp_per_capita.loc[[c for c in gdp_per_capita.index if "W" in c.upper()]].head() plt.figure(figsize=(8,3)) plt.xlabel("GDP per capita") plt.ylabel('Life satisfaction') plt.plot(list(sample_data["GDP per capita"]), list(sample_data["Life satisfaction"]), "bo") plt.plot(list(missing_data["GDP per capita"]), list(missing_data["Life satisfaction"]), "rs") X = np.linspace(0, 110000, 1000) plt.plot(X, t0full + t1full * X, "r--", label="Linear model on all data") plt.plot(X, t0 + t1*X, "b:", label="Linear model on partial data") ridge = linear_model.Ridge(alpha=10**9.5) Xsample = np.c_[sample_data["GDP per capita"]] ysample = np.c_[sample_data["Life satisfaction"]] ridge.fit(Xsample, ysample) t0ridge, t1ridge = ridge.intercept_[0], ridge.coef_[0][0] plt.plot(X, t0ridge + t1ridge * X, "b", label="Regularized linear model on partial data") plt.legend(loc="lower right") plt.axis([0, 110000, 0, 10]) plt.xlabel("GDP per capita (USD)") save_fig('ridge_model_plot') plt.show()
0.482185
0.941439
``` !pip install coremltools # Initialise packages from u2net import U2NETP import coremltools as ct from coremltools.proto import FeatureTypes_pb2 as ft import torch import torch.nn as nn from torch.autograd import Variable import os import numpy as np from PIL import Image from torchvision import transforms from skimage import io, transform class WrappedModel(nn.Module): def __init__(self): super(WrappedModel, self).__init__() self.model = U2NETP(3,1) self.model.load_state_dict(torch.load("u2netp.pth", map_location=torch.device('cpu'))) self.model.cpu() self.model.eval() def normPRED(self, d): ma = torch.max(d) mi = torch.min(d) dn = (d-mi)/(ma-mi) return dn def forward(self, x): d1,d2,d3,d4,d5,d6,d7 = self.model(x) ''' d1 = self.normPRED(d1) d2 = self.normPRED(d2) d3 = self.normPRED(d3) d4 = self.normPRED(d4) d5 = self.normPRED(d5) d6 = self.normPRED(d6) d7 = self.normPRED(d7) ''' return d1,d2,d3,d4,d5,d6,d7 from torchvision import transforms def save_output(pred, image): print(pred.shape) predict = pred predict = predict.squeeze() print(predict.shape) predict_np = predict.cpu().data.numpy() im = Image.fromarray(predict_np * 255).convert('RGB') imo = im.resize((image.size[0],image.size[1]),resample=Image.BILINEAR) display(imo) def tensor_lab(sample): image = sample tmpImg = np.zeros((image.shape[0],image.shape[1],3)) image = image/np.max(image) tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229 tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224 tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225 # change the r,g,b to b,r,g from [0,255] to [0,1] tmpImg = tmpImg.transpose((2, 0, 1)) return torch.from_numpy(tmpImg) # Pre-processing def input_test_image(image_name): inputs_test = Image.open(image_name) inputs_test = inputs_test.resize((320, 320)) inputs_test = np.asarray(inputs_test) inputs_test = tensor_lab(inputs_test) inputs_test = inputs_test.unsqueeze_(0) inputs_test = inputs_test.type(torch.FloatTensor) return inputs_test ''' def input_test_image(image_name): inputs_test = Image.open(image_name) inputs_test = inputs_test.resize((320, 320)) inputs_test = transforms.ToTensor()(inputs_test).unsqueeze_(0) inputs_test = inputs_test.type(torch.FloatTensor) return inputs_test ''' image_name = "0002-01.jpg" input_image = input_test_image(image_name) net = WrappedModel() d1,d2,d3,d4,d5,d6,d7 = net(input_image) save_output(d1, Image.open(image_name)) # Initialise Baseline UNETP model. net = U2NETP(3,1) device = torch.device('cpu') net.load_state_dict(torch.load("u2netp.pth", map_location=device)) net.cpu() net.eval() # Trace the model. image_name = "0002-01.jpg" input_image = input_test_image(image_name) print(input_image.shape) traced_model = torch.jit.trace(net, input_image) # Convert the model _inputs = ct.ImageType( name= "input_1", shape= input_image.shape, bias=[-0.485/0.229, -0.456/0.224, -0.406/0.225], scale=1.0/255.0 ) model = ct.convert(traced_model, inputs=[_inputs]) # Add metadata model.short_description = "U2-Net: Going Deeper with Nested U-Structure for Salient Object Detection" model.license = "Apache 2.0" model.author = "Qin, Xuebin and Zhang, Zichen and Huang, Chenyang and Dehghan, Masood and Zaiane, Osmar and Jagersand, Martin" # Rename inputs spec = model.get_spec() ct.utils.rename_feature(spec, "input_1", "in_0") ct.utils.rename_feature(spec,"2169","out_a0") ct.utils.rename_feature(spec,"2170","out_a1") ct.utils.rename_feature(spec,"2171","out_a2") ct.utils.rename_feature(spec,"2172","out_a3") ct.utils.rename_feature(spec,"2173","out_a4") ct.utils.rename_feature(spec,"2174","out_a5") ct.utils.rename_feature(spec,"2175","out_a6") model = ct.models.MLModel(spec) model.save("u2netp_temp.mlmodel") # Re-open model for modification model = ct.models.MLModel("u2netp_temp.mlmodel") # Get the model specifications spec = model.get_spec() # Change model input and save input = spec.description.input[0] input.type.imageType.colorSpace = ft.ImageFeatureType.BGR input.type.imageType.height = 320 input.type.imageType.width = 320 ct.utils.save_spec(spec, "u2netp_temp_new_input.mlmodel") # Re-open model for modification model = ct.models.MLModel("u2netp_temp_new_input.mlmodel") spec = model.get_spec() spec_layers = getattr(spec, spec.WhichOneof("Type")).layers output_layers = spec_layers[476:] # Get only the last output layers, may change with full-size U^2net # Append new layers new_layers = [] layernum = 0; for layer in output_layers: new_layer = spec_layers.add() new_layer.name = 'out_p' + str(layernum) new_layers.append('out_p' + str(layernum)) new_layer.activation.linear.alpha = 255 new_layer.activation.linear.beta = 0 new_layer.input.append('out_a' + str(layernum)) new_layer.output.append('out_p' + str(layernum)) output_description = next(x for x in spec.description.output if x.name==output_layers[layernum].output[0]) output_description.name = new_layer.name layernum = layernum + 1 # Specify the outputs as grayscale images. for output in spec.description.output: if output.name not in new_layers: continue if output.type.WhichOneof('Type') != 'multiArrayType': raise ValueError("%s is not a multiarray type" % output.name) array_shape = tuple(output.type.multiArrayType.shape) # print(array_shape) # print(output.type) output.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE') output.type.imageType.width = 320 output.type.imageType.height = 320 updated_model = ct.models.MLModel(spec) model.short_description = "U2-Net: Going Deeper with Nested U-Structure for Salient Object Detection" model.license = "Apache 2.0" model.author = "Qin, Xuebin and Zhang, Zichen and Huang, Chenyang and Dehghan, Masood and Zaiane, Osmar and Jagersand, Martin" updated_model.save("updated_model.mlmodel") ```
github_jupyter
!pip install coremltools # Initialise packages from u2net import U2NETP import coremltools as ct from coremltools.proto import FeatureTypes_pb2 as ft import torch import torch.nn as nn from torch.autograd import Variable import os import numpy as np from PIL import Image from torchvision import transforms from skimage import io, transform class WrappedModel(nn.Module): def __init__(self): super(WrappedModel, self).__init__() self.model = U2NETP(3,1) self.model.load_state_dict(torch.load("u2netp.pth", map_location=torch.device('cpu'))) self.model.cpu() self.model.eval() def normPRED(self, d): ma = torch.max(d) mi = torch.min(d) dn = (d-mi)/(ma-mi) return dn def forward(self, x): d1,d2,d3,d4,d5,d6,d7 = self.model(x) ''' d1 = self.normPRED(d1) d2 = self.normPRED(d2) d3 = self.normPRED(d3) d4 = self.normPRED(d4) d5 = self.normPRED(d5) d6 = self.normPRED(d6) d7 = self.normPRED(d7) ''' return d1,d2,d3,d4,d5,d6,d7 from torchvision import transforms def save_output(pred, image): print(pred.shape) predict = pred predict = predict.squeeze() print(predict.shape) predict_np = predict.cpu().data.numpy() im = Image.fromarray(predict_np * 255).convert('RGB') imo = im.resize((image.size[0],image.size[1]),resample=Image.BILINEAR) display(imo) def tensor_lab(sample): image = sample tmpImg = np.zeros((image.shape[0],image.shape[1],3)) image = image/np.max(image) tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229 tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224 tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225 # change the r,g,b to b,r,g from [0,255] to [0,1] tmpImg = tmpImg.transpose((2, 0, 1)) return torch.from_numpy(tmpImg) # Pre-processing def input_test_image(image_name): inputs_test = Image.open(image_name) inputs_test = inputs_test.resize((320, 320)) inputs_test = np.asarray(inputs_test) inputs_test = tensor_lab(inputs_test) inputs_test = inputs_test.unsqueeze_(0) inputs_test = inputs_test.type(torch.FloatTensor) return inputs_test ''' def input_test_image(image_name): inputs_test = Image.open(image_name) inputs_test = inputs_test.resize((320, 320)) inputs_test = transforms.ToTensor()(inputs_test).unsqueeze_(0) inputs_test = inputs_test.type(torch.FloatTensor) return inputs_test ''' image_name = "0002-01.jpg" input_image = input_test_image(image_name) net = WrappedModel() d1,d2,d3,d4,d5,d6,d7 = net(input_image) save_output(d1, Image.open(image_name)) # Initialise Baseline UNETP model. net = U2NETP(3,1) device = torch.device('cpu') net.load_state_dict(torch.load("u2netp.pth", map_location=device)) net.cpu() net.eval() # Trace the model. image_name = "0002-01.jpg" input_image = input_test_image(image_name) print(input_image.shape) traced_model = torch.jit.trace(net, input_image) # Convert the model _inputs = ct.ImageType( name= "input_1", shape= input_image.shape, bias=[-0.485/0.229, -0.456/0.224, -0.406/0.225], scale=1.0/255.0 ) model = ct.convert(traced_model, inputs=[_inputs]) # Add metadata model.short_description = "U2-Net: Going Deeper with Nested U-Structure for Salient Object Detection" model.license = "Apache 2.0" model.author = "Qin, Xuebin and Zhang, Zichen and Huang, Chenyang and Dehghan, Masood and Zaiane, Osmar and Jagersand, Martin" # Rename inputs spec = model.get_spec() ct.utils.rename_feature(spec, "input_1", "in_0") ct.utils.rename_feature(spec,"2169","out_a0") ct.utils.rename_feature(spec,"2170","out_a1") ct.utils.rename_feature(spec,"2171","out_a2") ct.utils.rename_feature(spec,"2172","out_a3") ct.utils.rename_feature(spec,"2173","out_a4") ct.utils.rename_feature(spec,"2174","out_a5") ct.utils.rename_feature(spec,"2175","out_a6") model = ct.models.MLModel(spec) model.save("u2netp_temp.mlmodel") # Re-open model for modification model = ct.models.MLModel("u2netp_temp.mlmodel") # Get the model specifications spec = model.get_spec() # Change model input and save input = spec.description.input[0] input.type.imageType.colorSpace = ft.ImageFeatureType.BGR input.type.imageType.height = 320 input.type.imageType.width = 320 ct.utils.save_spec(spec, "u2netp_temp_new_input.mlmodel") # Re-open model for modification model = ct.models.MLModel("u2netp_temp_new_input.mlmodel") spec = model.get_spec() spec_layers = getattr(spec, spec.WhichOneof("Type")).layers output_layers = spec_layers[476:] # Get only the last output layers, may change with full-size U^2net # Append new layers new_layers = [] layernum = 0; for layer in output_layers: new_layer = spec_layers.add() new_layer.name = 'out_p' + str(layernum) new_layers.append('out_p' + str(layernum)) new_layer.activation.linear.alpha = 255 new_layer.activation.linear.beta = 0 new_layer.input.append('out_a' + str(layernum)) new_layer.output.append('out_p' + str(layernum)) output_description = next(x for x in spec.description.output if x.name==output_layers[layernum].output[0]) output_description.name = new_layer.name layernum = layernum + 1 # Specify the outputs as grayscale images. for output in spec.description.output: if output.name not in new_layers: continue if output.type.WhichOneof('Type') != 'multiArrayType': raise ValueError("%s is not a multiarray type" % output.name) array_shape = tuple(output.type.multiArrayType.shape) # print(array_shape) # print(output.type) output.type.imageType.colorSpace = ft.ImageFeatureType.ColorSpace.Value('GRAYSCALE') output.type.imageType.width = 320 output.type.imageType.height = 320 updated_model = ct.models.MLModel(spec) model.short_description = "U2-Net: Going Deeper with Nested U-Structure for Salient Object Detection" model.license = "Apache 2.0" model.author = "Qin, Xuebin and Zhang, Zichen and Huang, Chenyang and Dehghan, Masood and Zaiane, Osmar and Jagersand, Martin" updated_model.save("updated_model.mlmodel")
0.821617
0.38523