code
stringlengths
2.5k
6.36M
kind
stringclasses
2 values
parsed_code
stringlengths
0
404k
quality_prob
float64
0
0.98
learning_prob
float64
0.03
1
좜처: https://blog.breezymind.com/2018/03/02/sklearn-feature_extraction-text-2/ ``` import pandas as pd import numpy as np pd.options.mode.chained_assignment = None np.random.seed(0) from konlpy.tag import Mecab mecab = Mecab() from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics.pairwise import linear_kernel, cosine_similarity # tokenizer : λ¬Έμž₯μ—μ„œ 색인어 μΆ”μΆœμ„ μœ„ν•΄ λͺ…사,동사,μ•ŒνŒŒλ²³,숫자 μ •λ„μ˜ λ‹¨μ–΄λ§Œ λ½‘μ•„μ„œ normalization, stemming μ²˜λ¦¬ν•˜λ„λ‘ 함 def tokenizer(raw, pos=["NNG","NNP"], stopword=['수','퀄리티','λ„μ‹œ','λΆ„','μ „λ¬Έ','μŠ€νƒ€','λ…„','원',\ 'μ›”','ν™”','수','λͺ©','금','μ‹œ','μ•€','일','그램','λ¬Έ'] ): return [ word for word, tag in mecab.pos(raw) if len(word) > 1 and tag in pos and word not in stopword ] # ν…ŒμŠ€νŠΈ λ¬Έμž₯ df = pd.read_csv("word2vec_wrangling.csv") df.head() import re def preprocessing(text): # κ°œν–‰λ¬Έμž 제거 text = re.sub('\\\\n', ' ', text) # 특수문자 제거 # νŠΉμˆ˜λ¬Έμžλ‚˜ 이λͺ¨ν‹°μ½˜ 등은 λ•Œλ‘œλŠ” 의미λ₯Ό 갖기도 ν•˜μ§€λ§Œ μ—¬κΈ°μ—μ„œλŠ” μ œκ±°ν–ˆμŠ΅λ‹ˆλ‹€. # text = re.sub('[?.,;:|\)*~`’!^\-_+<>@\#$%&-=#}β€»]', '', text) # ν•œκΈ€, 영문, 숫자만 남기고 λͺ¨λ‘ μ œκ±°ν•˜λ„λ‘ ν•©λ‹ˆλ‹€. # text = re.sub('[^κ°€-νž£γ„±-γ…Žγ…-γ…£a-zA-Z0-9]', ' ', text) # ν•œκΈ€, 영문만 남기고 λͺ¨λ‘ μ œκ±°ν•˜λ„λ‘ ν•©λ‹ˆλ‹€. text = re.sub('[^κ°€-νž£γ„±-γ…Žγ…-γ…£a-zA-Z]', ' ', text) return text %time rawdata = df['Content_txt'].apply(preprocessing) vectorize = CountVectorizer( tokenizer=tokenizer, min_df=10 # 예제둜 보기 μ’‹κ²Œ 1번 μ •λ„λ§Œ λ…ΈμΆœλ˜λŠ” 단어듀은 λ¬΄μ‹œν•˜κΈ°λ‘œ ν–ˆλ‹€ # min_df = 0.01 : λ¬Έμ„œμ˜ 1% 미만으둜 λ‚˜νƒ€λ‚˜λŠ” 단어 λ¬΄μ‹œ # min_df = 10 : λ¬Έμ„œμ— 10개 미만으둜 λ‚˜νƒ€λ‚˜λŠ” 단어 λ¬΄μ‹œ # max_df = 0.80 : λ¬Έμ„œμ˜ 80% 이상에 λ‚˜νƒ€λ‚˜λŠ” 단어 λ¬΄μ‹œ # max_df = 10 : 10개 μ΄μƒμ˜ λ¬Έμ„œμ— λ‚˜νƒ€λ‚˜λŠ” 단어 λ¬΄μ‹œ ) # λ¬Έμž₯μ—μ„œ λ…ΈμΆœλ˜λŠ” feature(νŠΉμ§•μ΄ λ λ§Œν•œ 단어) 수λ₯Ό ν•©ν•œ Document Term Matrix(μ΄ν•˜ DTM) 을 λ¦¬ν„΄ν•œλ‹€ X = vectorize.fit_transform(rawdata) print( 'fit_transform, (sentence {}, feature {})'.format(X.shape[0], X.shape[1]) ) # fit_transform, (sentence 5, feature 7) print(type(X)) # <class 'scipy.sparse.csr.csr_matrix'> print(X.toarray()) # [[0, 1, 2, 0, 0, 0, 1], # [0, 1, 1, 0, 0, 0, 2], # [1, 0, 0, 2, 1, 1, 0], # [1, 0, 0, 1, 0, 0, 0], # [0, 0, 0, 3, 1, 1, 0]] # λ¬Έμž₯μ—μ„œ 뽑아낸 feature λ“€μ˜ λ°°μ—΄ features = vectorize.get_feature_names() # 박쑰은 νŠœν† λ¦¬μ–Ό μ΄μ–΄λ‚˜κ°€λ €κ³  함 vectorizer = vectorize feature_vector = X feature_vector.shape vocab = vectorizer.get_feature_names() print(len(vocab)) vocab[:10] # 각 λ¦¬λ·°λ§ˆλ‹€ λ“±μž₯ν•˜λŠ” 단어에 λΉˆλ„μˆ˜κ°€ ν‘œν˜„λ©λ‹ˆλ‹€. 0 은 λ“±μž₯ν•˜μ§€ μ•ŠμŒμ„ μ˜λ―Έν•©λ‹ˆλ‹€. pd.DataFrame(feature_vector[:10].toarray(), columns=vocab).head() # μœ„μ—μ„œ κ΅¬ν•œ 단어벑터λ₯Ό λ”ν•˜λ©΄ 단어가 μ „μ²΄μ—μ„œ λ“±μž₯ν•˜λŠ” 횟수λ₯Ό μ•Œ 수 μžˆμŠ΅λ‹ˆλ‹€. # 벑터화 된 ν”Όμ²˜λ₯Ό 확인해 λ΄„ # Bag of words 에 λͺ‡ 개의 단어가 λ“€μ–΄μžˆλŠ”μ§€ 확인 dist = np.sum(feature_vector, axis=0) df_freq = pd.DataFrame(dist, columns=vocab) df_freq # ν–‰κ³Ό μ—΄μ˜ 좕을 T둜 λ°”κΏ”μ£Όκ³  λΉˆλ„μˆ˜λ‘œ μ •λ ¬ df_too_much = df_freq.T.sort_values(by=0, ascending=False).head(50) df_too_much df_too_much.T too_much_list = list(df_too_much.T.columns.values) print(too_much_list) # df_freq_T = df_freq.T.reset_index() # df_freq_T.columns = ["course", "freq"] # # κ°•μ˜λͺ…을 토큰 3개둜 μ€‘λ³΅μ œκ±°ν•˜κΈ° μœ„ν•΄, κ°•μ’Œλͺ…μ—μ„œ μ§€μ‹κ³΅μœ μžμ˜ 이름을 λΉˆλ¬Έμžμ—΄λ‘œ λ³€κ²½ # df_freq_T["course_find"] = df_freq_T["course"].str.replace("홍정λͺ¨μ˜", "") # df_freq_T["course_find"] = df_freq_T["course_find"].apply(lambda x : " ". join(x.split()[:4])) # df_freq_T.sort_values(["course_find", "freq"], ascending=False).head(10) ``` TF-IDF ``` from sklearn.feature_extraction.text import TfidfTransformer transformer = TfidfTransformer(smooth_idf=False) transformer %%time feature_tfidf = transformer.fit_transform(feature_vector) feature_tfidf.shape feature_tfidf # 각 rowμ—μ„œ 전체 단어가방에 μžˆλŠ” μ–΄νœ˜μ—μ„œ λ“±μž₯ν•˜λŠ” 단어에 λŒ€ν•œ one-hot-vector에 TF-IDF κ°€μ€‘μΉ˜ 반영 tfidf_freq = pd.DataFrame(feature_tfidf.toarray(), columns=vocab) tfidf_freq.head() df_tfidf = pd.DataFrame(tfidf_freq.sum()) df_tfidf_top = df_tfidf.sort_values(by=0, ascending=False) df_tfidf_top.head(50) df_tfidf_bottom = df_tfidf.sort_values(by=0, ascending=True) df_tfidf_bottom.head(10) # 쀑간에 μƒλž΅λ˜λŠ” 단어λ₯Ό μžμ„Ένžˆ 보고자 ν•  λ•Œ # 제일 μ“Έλͺ¨ μ—†λŠ” 단어듀 뽑아내기 top_list = [] for t in df_tfidf_top.index[:50]: top_list.append(t) print(top_list) ['μš”κ°€','ν•„λΌν…ŒμŠ€','λ‹€μ΄μ–΄νŠΈ',''] list_too_much1 = ['μš΄λ™', 'μš”κ°€', 'ν•„λΌν…ŒμŠ€', 'λŒ„μŠ€', 'λ‹€μ΄μ–΄νŠΈ', 'ν—¬μŠ€', '발레', 'λ²ˆμ§€', 'ν”ŒλΌμž‰', '살사', 'λ‹ˆμŠ€', '볡싱', 'μˆ˜μ—…', 'μ—¬μž', 'νŒ¨λ“€', 'μ•„μΏ μ•„', '일상', 'μ‹œκ°„', 'λ³΄λ“œ', 'μ„œν•‘', '수영', 'ν”ΌνŠΈ', '강사', 'λ°”λ””', '자이둜', '였늘', '피닝', 'λ‚¨μž', '레슨', 'μ·¨λ―Έ', '점핑', 'μ—¬ν–‰', 'νšŒμ›', '건강', '맛집', '라틴', 'ν‚€μ¦ˆ', 'ꡐ윑', 'κ°€λŠ₯', '크둜슀', 'μ†Œν†΅', '바이크', '감사', '기ꡬ', 'λΆ€μ‚°', '토닉', 'ꡐ정', '슀포츠', 'κ·Έλ£Ή', 'ν›„ν”„', '진행', 'μ‹œμž‘', '검도', '볼링', 'λ“±μ‚°', '수련', '클라이밍', '클럽', '개인', '학원', '사진', 'μ„œν”„', '데일리', 'νŠΈλ ˆμ΄λ‹', 'μ„Όν„°', 'μŠ€νŠœλ””μ˜€', '상담', '수영μž₯', 'μ—μ–΄λ‘œλΉ…', '무용', 'λ“œλŸΌ', 'λ™μš”', 'ν”Όν‹°', '트램', '탕가', 'μ‘°κΉ…', '곡연', 'νŒŒμš΄λ“œ', '야사', '폴린', 'κ΄‘μ£Ό', 'μˆ˜μ€‘', '피지', 'λ™ν˜ΈνšŒ', 'μ‚¬λžŒ', 'ν‚₯볡싱', '휘트', 'λŒ€κ΅¬', '카페', 'μ§€λ„μž', 'μ„ μˆ˜', 'ν•«μš”κ°€', 'ν”ŒλΌμ΄', 'νƒ€μš”', 'κΈ°λΆ€', 'ν•˜λ£¨', '행볡', '일산', '사이클', 'μ²΄ν˜•'] list_too_much_tf_idf = ['μš΄λ™', 'μš”κ°€', 'ν•„λΌν…ŒμŠ€', 'λŒ„μŠ€', 'λ²ˆμ§€', 'λ‹€μ΄μ–΄νŠΈ', '볡싱', '발레', 'λ‹ˆμŠ€', 'μˆ˜μ—…', 'ν”ŒλΌμž‰', 'ν—¬μŠ€', '살사', 'μ—¬μž', 'ν”ΌνŠΈ', 'μ‹œκ°„', '강사', '일상', '점핑', 'μ·¨λ―Έ', 'λ°”λ””', 'ꡐ윑', '였늘', '레슨', 'λ‚¨μž', 'ν‚€μ¦ˆ', 'νšŒμ›', 'λ³΄λ“œ', '맛집', 'κ°€λŠ₯', 'ꡐ정', '라틴', 'μ—¬ν–‰', '건강', '피닝', '진행', 'μ„œν•‘', 'κ·Έλ£Ή', 'ν”ŒλΌμ΄', 'μž μ‹€', '개인', 'λΆ€μ‚°', 'μŠ€νŠœλ””μ˜€', 'μ‹œμž‘', '기ꡬ', '자이둜', '학원', '상담', 'μ„Όν„°', '감사', '피지', 'μ†Œν†΅', 'μ§€λ„μž', 'κ³Όμ •', '수련', '무용', 'λŒ€κ΅¬', '자격증', '클럽', '수영', 'ν™λŒ€', '볼링', '일산', 'νŠΈλ ˆμ΄λ‹', 'λ™μš”', 'λ™ν˜ΈνšŒ', '토닉', '사진', 'μ˜μƒ', '크둜슀', 'μ˜€ν”ˆ', 'μ‚¬λžŒ', '데일리', '방솑', 'κ΄‘μ£Ό', '선생', 'μžμ„Έ', '마음', 'μ²΄ν˜•', '문의', '행볡', 'μ‚¬λž‘', '카페', 'κ·Όλ ₯', 'λ™μž‘', '탕가', '클래슀', 'λ“œλŸΌ', '친ꡬ', '카톑', '무료', 'μ „ν™”', '슀포츠', '곡연', '야사', '할인', 'ν•˜λ£¨', 'μ—μ–΄λ‘œλΉ…', '등둝', 'μ€€λΉ„'] list_too_much_200 = list_too_much1 + list_too_much_tf_idf bulyongeo_list = list(set(list_too_much_200)) print(bulyongeo_list) ```
github_jupyter
import pandas as pd import numpy as np pd.options.mode.chained_assignment = None np.random.seed(0) from konlpy.tag import Mecab mecab = Mecab() from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics.pairwise import linear_kernel, cosine_similarity # tokenizer : λ¬Έμž₯μ—μ„œ 색인어 μΆ”μΆœμ„ μœ„ν•΄ λͺ…사,동사,μ•ŒνŒŒλ²³,숫자 μ •λ„μ˜ λ‹¨μ–΄λ§Œ λ½‘μ•„μ„œ normalization, stemming μ²˜λ¦¬ν•˜λ„λ‘ 함 def tokenizer(raw, pos=["NNG","NNP"], stopword=['수','퀄리티','λ„μ‹œ','λΆ„','μ „λ¬Έ','μŠ€νƒ€','λ…„','원',\ 'μ›”','ν™”','수','λͺ©','금','μ‹œ','μ•€','일','그램','λ¬Έ'] ): return [ word for word, tag in mecab.pos(raw) if len(word) > 1 and tag in pos and word not in stopword ] # ν…ŒμŠ€νŠΈ λ¬Έμž₯ df = pd.read_csv("word2vec_wrangling.csv") df.head() import re def preprocessing(text): # κ°œν–‰λ¬Έμž 제거 text = re.sub('\\\\n', ' ', text) # 특수문자 제거 # νŠΉμˆ˜λ¬Έμžλ‚˜ 이λͺ¨ν‹°μ½˜ 등은 λ•Œλ‘œλŠ” 의미λ₯Ό 갖기도 ν•˜μ§€λ§Œ μ—¬κΈ°μ—μ„œλŠ” μ œκ±°ν–ˆμŠ΅λ‹ˆλ‹€. # text = re.sub('[?.,;:|\)*~`’!^\-_+<>@\#$%&-=#}β€»]', '', text) # ν•œκΈ€, 영문, 숫자만 남기고 λͺ¨λ‘ μ œκ±°ν•˜λ„λ‘ ν•©λ‹ˆλ‹€. # text = re.sub('[^κ°€-νž£γ„±-γ…Žγ…-γ…£a-zA-Z0-9]', ' ', text) # ν•œκΈ€, 영문만 남기고 λͺ¨λ‘ μ œκ±°ν•˜λ„λ‘ ν•©λ‹ˆλ‹€. text = re.sub('[^κ°€-νž£γ„±-γ…Žγ…-γ…£a-zA-Z]', ' ', text) return text %time rawdata = df['Content_txt'].apply(preprocessing) vectorize = CountVectorizer( tokenizer=tokenizer, min_df=10 # 예제둜 보기 μ’‹κ²Œ 1번 μ •λ„λ§Œ λ…ΈμΆœλ˜λŠ” 단어듀은 λ¬΄μ‹œν•˜κΈ°λ‘œ ν–ˆλ‹€ # min_df = 0.01 : λ¬Έμ„œμ˜ 1% 미만으둜 λ‚˜νƒ€λ‚˜λŠ” 단어 λ¬΄μ‹œ # min_df = 10 : λ¬Έμ„œμ— 10개 미만으둜 λ‚˜νƒ€λ‚˜λŠ” 단어 λ¬΄μ‹œ # max_df = 0.80 : λ¬Έμ„œμ˜ 80% 이상에 λ‚˜νƒ€λ‚˜λŠ” 단어 λ¬΄μ‹œ # max_df = 10 : 10개 μ΄μƒμ˜ λ¬Έμ„œμ— λ‚˜νƒ€λ‚˜λŠ” 단어 λ¬΄μ‹œ ) # λ¬Έμž₯μ—μ„œ λ…ΈμΆœλ˜λŠ” feature(νŠΉμ§•μ΄ λ λ§Œν•œ 단어) 수λ₯Ό ν•©ν•œ Document Term Matrix(μ΄ν•˜ DTM) 을 λ¦¬ν„΄ν•œλ‹€ X = vectorize.fit_transform(rawdata) print( 'fit_transform, (sentence {}, feature {})'.format(X.shape[0], X.shape[1]) ) # fit_transform, (sentence 5, feature 7) print(type(X)) # <class 'scipy.sparse.csr.csr_matrix'> print(X.toarray()) # [[0, 1, 2, 0, 0, 0, 1], # [0, 1, 1, 0, 0, 0, 2], # [1, 0, 0, 2, 1, 1, 0], # [1, 0, 0, 1, 0, 0, 0], # [0, 0, 0, 3, 1, 1, 0]] # λ¬Έμž₯μ—μ„œ 뽑아낸 feature λ“€μ˜ λ°°μ—΄ features = vectorize.get_feature_names() # 박쑰은 νŠœν† λ¦¬μ–Ό μ΄μ–΄λ‚˜κ°€λ €κ³  함 vectorizer = vectorize feature_vector = X feature_vector.shape vocab = vectorizer.get_feature_names() print(len(vocab)) vocab[:10] # 각 λ¦¬λ·°λ§ˆλ‹€ λ“±μž₯ν•˜λŠ” 단어에 λΉˆλ„μˆ˜κ°€ ν‘œν˜„λ©λ‹ˆλ‹€. 0 은 λ“±μž₯ν•˜μ§€ μ•ŠμŒμ„ μ˜λ―Έν•©λ‹ˆλ‹€. pd.DataFrame(feature_vector[:10].toarray(), columns=vocab).head() # μœ„μ—μ„œ κ΅¬ν•œ 단어벑터λ₯Ό λ”ν•˜λ©΄ 단어가 μ „μ²΄μ—μ„œ λ“±μž₯ν•˜λŠ” 횟수λ₯Ό μ•Œ 수 μžˆμŠ΅λ‹ˆλ‹€. # 벑터화 된 ν”Όμ²˜λ₯Ό 확인해 λ΄„ # Bag of words 에 λͺ‡ 개의 단어가 λ“€μ–΄μžˆλŠ”μ§€ 확인 dist = np.sum(feature_vector, axis=0) df_freq = pd.DataFrame(dist, columns=vocab) df_freq # ν–‰κ³Ό μ—΄μ˜ 좕을 T둜 λ°”κΏ”μ£Όκ³  λΉˆλ„μˆ˜λ‘œ μ •λ ¬ df_too_much = df_freq.T.sort_values(by=0, ascending=False).head(50) df_too_much df_too_much.T too_much_list = list(df_too_much.T.columns.values) print(too_much_list) # df_freq_T = df_freq.T.reset_index() # df_freq_T.columns = ["course", "freq"] # # κ°•μ˜λͺ…을 토큰 3개둜 μ€‘λ³΅μ œκ±°ν•˜κΈ° μœ„ν•΄, κ°•μ’Œλͺ…μ—μ„œ μ§€μ‹κ³΅μœ μžμ˜ 이름을 λΉˆλ¬Έμžμ—΄λ‘œ λ³€κ²½ # df_freq_T["course_find"] = df_freq_T["course"].str.replace("홍정λͺ¨μ˜", "") # df_freq_T["course_find"] = df_freq_T["course_find"].apply(lambda x : " ". join(x.split()[:4])) # df_freq_T.sort_values(["course_find", "freq"], ascending=False).head(10) from sklearn.feature_extraction.text import TfidfTransformer transformer = TfidfTransformer(smooth_idf=False) transformer %%time feature_tfidf = transformer.fit_transform(feature_vector) feature_tfidf.shape feature_tfidf # 각 rowμ—μ„œ 전체 단어가방에 μžˆλŠ” μ–΄νœ˜μ—μ„œ λ“±μž₯ν•˜λŠ” 단어에 λŒ€ν•œ one-hot-vector에 TF-IDF κ°€μ€‘μΉ˜ 반영 tfidf_freq = pd.DataFrame(feature_tfidf.toarray(), columns=vocab) tfidf_freq.head() df_tfidf = pd.DataFrame(tfidf_freq.sum()) df_tfidf_top = df_tfidf.sort_values(by=0, ascending=False) df_tfidf_top.head(50) df_tfidf_bottom = df_tfidf.sort_values(by=0, ascending=True) df_tfidf_bottom.head(10) # 쀑간에 μƒλž΅λ˜λŠ” 단어λ₯Ό μžμ„Ένžˆ 보고자 ν•  λ•Œ # 제일 μ“Έλͺ¨ μ—†λŠ” 단어듀 뽑아내기 top_list = [] for t in df_tfidf_top.index[:50]: top_list.append(t) print(top_list) ['μš”κ°€','ν•„λΌν…ŒμŠ€','λ‹€μ΄μ–΄νŠΈ',''] list_too_much1 = ['μš΄λ™', 'μš”κ°€', 'ν•„λΌν…ŒμŠ€', 'λŒ„μŠ€', 'λ‹€μ΄μ–΄νŠΈ', 'ν—¬μŠ€', '발레', 'λ²ˆμ§€', 'ν”ŒλΌμž‰', '살사', 'λ‹ˆμŠ€', '볡싱', 'μˆ˜μ—…', 'μ—¬μž', 'νŒ¨λ“€', 'μ•„μΏ μ•„', '일상', 'μ‹œκ°„', 'λ³΄λ“œ', 'μ„œν•‘', '수영', 'ν”ΌνŠΈ', '강사', 'λ°”λ””', '자이둜', '였늘', '피닝', 'λ‚¨μž', '레슨', 'μ·¨λ―Έ', '점핑', 'μ—¬ν–‰', 'νšŒμ›', '건강', '맛집', '라틴', 'ν‚€μ¦ˆ', 'ꡐ윑', 'κ°€λŠ₯', '크둜슀', 'μ†Œν†΅', '바이크', '감사', '기ꡬ', 'λΆ€μ‚°', '토닉', 'ꡐ정', '슀포츠', 'κ·Έλ£Ή', 'ν›„ν”„', '진행', 'μ‹œμž‘', '검도', '볼링', 'λ“±μ‚°', '수련', '클라이밍', '클럽', '개인', '학원', '사진', 'μ„œν”„', '데일리', 'νŠΈλ ˆμ΄λ‹', 'μ„Όν„°', 'μŠ€νŠœλ””μ˜€', '상담', '수영μž₯', 'μ—μ–΄λ‘œλΉ…', '무용', 'λ“œλŸΌ', 'λ™μš”', 'ν”Όν‹°', '트램', '탕가', 'μ‘°κΉ…', '곡연', 'νŒŒμš΄λ“œ', '야사', '폴린', 'κ΄‘μ£Ό', 'μˆ˜μ€‘', '피지', 'λ™ν˜ΈνšŒ', 'μ‚¬λžŒ', 'ν‚₯볡싱', '휘트', 'λŒ€κ΅¬', '카페', 'μ§€λ„μž', 'μ„ μˆ˜', 'ν•«μš”κ°€', 'ν”ŒλΌμ΄', 'νƒ€μš”', 'κΈ°λΆ€', 'ν•˜λ£¨', '행볡', '일산', '사이클', 'μ²΄ν˜•'] list_too_much_tf_idf = ['μš΄λ™', 'μš”κ°€', 'ν•„λΌν…ŒμŠ€', 'λŒ„μŠ€', 'λ²ˆμ§€', 'λ‹€μ΄μ–΄νŠΈ', '볡싱', '발레', 'λ‹ˆμŠ€', 'μˆ˜μ—…', 'ν”ŒλΌμž‰', 'ν—¬μŠ€', '살사', 'μ—¬μž', 'ν”ΌνŠΈ', 'μ‹œκ°„', '강사', '일상', '점핑', 'μ·¨λ―Έ', 'λ°”λ””', 'ꡐ윑', '였늘', '레슨', 'λ‚¨μž', 'ν‚€μ¦ˆ', 'νšŒμ›', 'λ³΄λ“œ', '맛집', 'κ°€λŠ₯', 'ꡐ정', '라틴', 'μ—¬ν–‰', '건강', '피닝', '진행', 'μ„œν•‘', 'κ·Έλ£Ή', 'ν”ŒλΌμ΄', 'μž μ‹€', '개인', 'λΆ€μ‚°', 'μŠ€νŠœλ””μ˜€', 'μ‹œμž‘', '기ꡬ', '자이둜', '학원', '상담', 'μ„Όν„°', '감사', '피지', 'μ†Œν†΅', 'μ§€λ„μž', 'κ³Όμ •', '수련', '무용', 'λŒ€κ΅¬', '자격증', '클럽', '수영', 'ν™λŒ€', '볼링', '일산', 'νŠΈλ ˆμ΄λ‹', 'λ™μš”', 'λ™ν˜ΈνšŒ', '토닉', '사진', 'μ˜μƒ', '크둜슀', 'μ˜€ν”ˆ', 'μ‚¬λžŒ', '데일리', '방솑', 'κ΄‘μ£Ό', '선생', 'μžμ„Έ', '마음', 'μ²΄ν˜•', '문의', '행볡', 'μ‚¬λž‘', '카페', 'κ·Όλ ₯', 'λ™μž‘', '탕가', '클래슀', 'λ“œλŸΌ', '친ꡬ', '카톑', '무료', 'μ „ν™”', '슀포츠', '곡연', '야사', '할인', 'ν•˜λ£¨', 'μ—μ–΄λ‘œλΉ…', '등둝', 'μ€€λΉ„'] list_too_much_200 = list_too_much1 + list_too_much_tf_idf bulyongeo_list = list(set(list_too_much_200)) print(bulyongeo_list)
0.29523
0.809088
``` import numpy as np import pandas as pd import wisps import wisps.simulations as wispsim import matplotlib.pyplot as plt from astropy.io import fits, ascii from astropy.table import Table %matplotlib inline bigf= wisps.get_big_file() bigf=bigf[bigf.snr1>=3] #3dhst data from astropy.io import ascii hst3d= ascii.read('/users/caganze/3dhst_master.phot.v4.1/3dhst_master.phot.v4.1.cat').to_pandas() #hst3d def magnitude(flux): return 25.0-2.5*np.log10(flux) def mag_err(combined): #combined is a pandas table with flux and flux_error if np.isnan(combined['flux']): return np.nan else: return abs(0.434*2.5*combined['flux_error']/combined['flux']) for k in ['F125', 'F606', 'F140', 'F160', 'F814']: hst3d[k]=magnitude(hst3d['f_{}W'.format(k)]) pnts=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl') maglimits=pd.DataFrame([x.mag_limits for x in pnts]) maglimits['imexp']=[x.imag_exptime for x in pnts] #exposure_times=[np.nanmean(x.exposure_time) for x in pnts] maglimits['pointing']=[x.name for x in pnts] maglimits['survey']=[x.survey for x in pnts] maglimits_wisp=maglimits[maglimits.survey=='wisps'] maglimits_hst3d=maglimits[maglimits.survey!='wisps'] wisp_ph=fits.open('/users/caganze/WISPPhotometricCatalog_v1.2.fits') wt=Table(wisp_ph[1].data) bigf_3d=bigf[bigf.survey=='HST3D'] bigf_w=bigf[bigf.survey != 'HST3D'] mag_grid=np.linspace(15, 30, 1000) fig, ax=plt.subplots(ncols=2, figsize=(8, 4)) h=ax[0].hist(bigf_3d.F140.values, range=[19, 30], bins=32, histtype='step', density=False) h=ax[0].hist(hst3d.F140.values, range=[15, 30], bins=32, histtype='step', density=False) h=ax[1].hist(bigf_3d.F160.values, range=[15, 30], bins=32, histtype='step', density=False) h=ax[1].hist(hst3d.F160.values, range=[15, 30], bins=32, histtype='step', density=False) plt.tight_layout() fig, (ax, ax1)=plt.subplots(ncols=2, nrows=2, figsize=(8, 4), sharex='col') h=ax[0].hist(bigf_3d.F140.values, range=[19, 30], bins=32, histtype='step', density=True, label='with spectra') h=ax[0].hist(hst3d.F140.values, range=[19, 30], bins=32, histtype='step', density=True, label='all') kde0=wisps.kernel_density(bigf_3d.F140[bigf_3d.F140.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) kde1=wisps.kernel_density(hst3d.F140[hst3d.F140.between(15, 30)], bw_method=0.1).pdf(mag_grid) ax[0].plot(mag_grid, kde0) ax[0].plot(mag_grid, kde1) ax1[0].plot(mag_grid, kde1/kde0) h=ax[1].hist(bigf_3d.F160.values, range=[19, 30], bins=32, histtype='step', density=True, label='with spectra') h=ax[1].hist(hst3d.F160.values, range=[19, 30], bins=32, histtype='step', density=True, label='all') kde2=wisps.kernel_density(bigf_3d.F160[bigf_3d.F160.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) kde3=wisps.kernel_density(hst3d.F160[hst3d.F160.between(15, 30)], bw_method=0.1).pdf(mag_grid) ax[1].plot(mag_grid, kde2) ax[1].plot(mag_grid, kde3) ax1[1].plot(mag_grid, kde3/kde2) for x in maglimits_hst3d.F140.dropna().values: ax[0].axvline(x, alpha=0.1) ax[1].axvline(x, alpha=0.1) for x in maglimits_hst3d.F160.dropna().values: ax1[0].axvline(x, alpha=0.1) ax1[1].axvline(x, alpha=0.1) plt.tight_layout() ax1[0].set_xlabel('F140W', fontsize=18) ax1[1].set_xlabel('F160W', fontsize=18) ax[0].legend() for a in ax1: a.set_yscale('log') #maglimits_hst3d.F140.dropna().values fig, ax=plt.subplots(ncols=3, figsize=(12, 4)) h=ax[0].hist(bigf_w.F110.values, range=[19, 30], bins=32, histtype='step') h=ax[0].hist(np.array(wt['MAG_AUTO_F110W']), range=[19, 30], bins=32, histtype='step') h=ax[1].hist(bigf_w.F140.values, range=[19, 27], bins=32, histtype='step') h=ax[1].hist(np.array(wt['MAG_AUTO_F140W']), range=[19, 30], bins=32, histtype='step') h=ax[2].hist(bigf_w.F160.values, range=[19, 30], bins=32, histtype='step') h=ax[2].hist(np.array(wt['MAG_AUTO_F160W']), range=[19, 30], bins=32, histtype='step') plt.tight_layout() fig, (ax, ax1)=plt.subplots(ncols=3,nrows=2, figsize=(12, 6), sharex='col') h=ax[0].hist(bigf_w.F110.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[0].hist(np.array(wt['MAG_AUTO_F110W']), range=[19, 30], bins=32, histtype='step', density=True) wkde0=wisps.kernel_density(bigf_w.F110[bigf_w.F110.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde1=wisps.kernel_density(np.array(wt['MAG_AUTO_F110W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F110W']) >15,\ np.array(wt['MAG_AUTO_F110W']) <30)], bw_method=0.1).pdf(mag_grid) ax[0].plot(mag_grid, wkde0) ax[0].plot(mag_grid, wkde1) ax1[0].plot(mag_grid, wkde1/wkde0) h=ax[1].hist(bigf_w.F140.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[1].hist(np.array(wt['MAG_AUTO_F140W']), range=[19, 30], bins=32, histtype='step', density=True) wkde2=wisps.kernel_density(bigf_w.F140[bigf_w.F140.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde3=wisps.kernel_density(np.array(wt['MAG_AUTO_F140W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F140W']) >15,\ np.array(wt['MAG_AUTO_F140W']) <30)], bw_method=0.1).pdf(mag_grid) ax[1].plot(mag_grid, wkde2) ax[1].plot(mag_grid, wkde3) ax1[1].plot(mag_grid, wkde3/wkde2) h=ax[-1].hist(bigf_w.F160.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[-1].hist(np.array(wt['MAG_AUTO_F160W']), range=[19, 30], bins=32, histtype='step', density=True) wkde4=wisps.kernel_density(bigf_w.F160[bigf_w.F160.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde5=wisps.kernel_density(np.array(wt['MAG_AUTO_F160W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F160W']) >15,\ np.array(wt['MAG_AUTO_F160W']) <30)], bw_method=0.1).pdf(mag_grid) ax[-1].plot(mag_grid, wkde4) ax[-1].plot(mag_grid, wkde5) ax1[-1].plot(mag_grid, wkde5/wkde4) for x in maglimits_wisp.F110.dropna().values: ax[0].axvline(x, alpha=0.1) ax1[0].axvline(x, alpha=0.1) for x in maglimits_wisp.F140.dropna().values: ax[1].axvline(x, alpha=0.1) ax1[1].axvline(x, alpha=0.1) for x in maglimits_wisp.F160.dropna().values: ax[-1].axvline(x, alpha=0.1) ax1[-1].axvline(x, alpha=0.1) plt.tight_layout() ax1[0].set_xlabel('F110W', fontsize=18) ax1[1].set_xlabel('F140W', fontsize=18) ax1[-1].set_xlabel('F160W', fontsize=18) for a in ax1: a.set_yscale('log') ```
github_jupyter
import numpy as np import pandas as pd import wisps import wisps.simulations as wispsim import matplotlib.pyplot as plt from astropy.io import fits, ascii from astropy.table import Table %matplotlib inline bigf= wisps.get_big_file() bigf=bigf[bigf.snr1>=3] #3dhst data from astropy.io import ascii hst3d= ascii.read('/users/caganze/3dhst_master.phot.v4.1/3dhst_master.phot.v4.1.cat').to_pandas() #hst3d def magnitude(flux): return 25.0-2.5*np.log10(flux) def mag_err(combined): #combined is a pandas table with flux and flux_error if np.isnan(combined['flux']): return np.nan else: return abs(0.434*2.5*combined['flux_error']/combined['flux']) for k in ['F125', 'F606', 'F140', 'F160', 'F814']: hst3d[k]=magnitude(hst3d['f_{}W'.format(k)]) pnts=pd.read_pickle(wisps.OUTPUT_FILES+'/pointings_correctedf110.pkl') maglimits=pd.DataFrame([x.mag_limits for x in pnts]) maglimits['imexp']=[x.imag_exptime for x in pnts] #exposure_times=[np.nanmean(x.exposure_time) for x in pnts] maglimits['pointing']=[x.name for x in pnts] maglimits['survey']=[x.survey for x in pnts] maglimits_wisp=maglimits[maglimits.survey=='wisps'] maglimits_hst3d=maglimits[maglimits.survey!='wisps'] wisp_ph=fits.open('/users/caganze/WISPPhotometricCatalog_v1.2.fits') wt=Table(wisp_ph[1].data) bigf_3d=bigf[bigf.survey=='HST3D'] bigf_w=bigf[bigf.survey != 'HST3D'] mag_grid=np.linspace(15, 30, 1000) fig, ax=plt.subplots(ncols=2, figsize=(8, 4)) h=ax[0].hist(bigf_3d.F140.values, range=[19, 30], bins=32, histtype='step', density=False) h=ax[0].hist(hst3d.F140.values, range=[15, 30], bins=32, histtype='step', density=False) h=ax[1].hist(bigf_3d.F160.values, range=[15, 30], bins=32, histtype='step', density=False) h=ax[1].hist(hst3d.F160.values, range=[15, 30], bins=32, histtype='step', density=False) plt.tight_layout() fig, (ax, ax1)=plt.subplots(ncols=2, nrows=2, figsize=(8, 4), sharex='col') h=ax[0].hist(bigf_3d.F140.values, range=[19, 30], bins=32, histtype='step', density=True, label='with spectra') h=ax[0].hist(hst3d.F140.values, range=[19, 30], bins=32, histtype='step', density=True, label='all') kde0=wisps.kernel_density(bigf_3d.F140[bigf_3d.F140.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) kde1=wisps.kernel_density(hst3d.F140[hst3d.F140.between(15, 30)], bw_method=0.1).pdf(mag_grid) ax[0].plot(mag_grid, kde0) ax[0].plot(mag_grid, kde1) ax1[0].plot(mag_grid, kde1/kde0) h=ax[1].hist(bigf_3d.F160.values, range=[19, 30], bins=32, histtype='step', density=True, label='with spectra') h=ax[1].hist(hst3d.F160.values, range=[19, 30], bins=32, histtype='step', density=True, label='all') kde2=wisps.kernel_density(bigf_3d.F160[bigf_3d.F160.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) kde3=wisps.kernel_density(hst3d.F160[hst3d.F160.between(15, 30)], bw_method=0.1).pdf(mag_grid) ax[1].plot(mag_grid, kde2) ax[1].plot(mag_grid, kde3) ax1[1].plot(mag_grid, kde3/kde2) for x in maglimits_hst3d.F140.dropna().values: ax[0].axvline(x, alpha=0.1) ax[1].axvline(x, alpha=0.1) for x in maglimits_hst3d.F160.dropna().values: ax1[0].axvline(x, alpha=0.1) ax1[1].axvline(x, alpha=0.1) plt.tight_layout() ax1[0].set_xlabel('F140W', fontsize=18) ax1[1].set_xlabel('F160W', fontsize=18) ax[0].legend() for a in ax1: a.set_yscale('log') #maglimits_hst3d.F140.dropna().values fig, ax=plt.subplots(ncols=3, figsize=(12, 4)) h=ax[0].hist(bigf_w.F110.values, range=[19, 30], bins=32, histtype='step') h=ax[0].hist(np.array(wt['MAG_AUTO_F110W']), range=[19, 30], bins=32, histtype='step') h=ax[1].hist(bigf_w.F140.values, range=[19, 27], bins=32, histtype='step') h=ax[1].hist(np.array(wt['MAG_AUTO_F140W']), range=[19, 30], bins=32, histtype='step') h=ax[2].hist(bigf_w.F160.values, range=[19, 30], bins=32, histtype='step') h=ax[2].hist(np.array(wt['MAG_AUTO_F160W']), range=[19, 30], bins=32, histtype='step') plt.tight_layout() fig, (ax, ax1)=plt.subplots(ncols=3,nrows=2, figsize=(12, 6), sharex='col') h=ax[0].hist(bigf_w.F110.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[0].hist(np.array(wt['MAG_AUTO_F110W']), range=[19, 30], bins=32, histtype='step', density=True) wkde0=wisps.kernel_density(bigf_w.F110[bigf_w.F110.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde1=wisps.kernel_density(np.array(wt['MAG_AUTO_F110W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F110W']) >15,\ np.array(wt['MAG_AUTO_F110W']) <30)], bw_method=0.1).pdf(mag_grid) ax[0].plot(mag_grid, wkde0) ax[0].plot(mag_grid, wkde1) ax1[0].plot(mag_grid, wkde1/wkde0) h=ax[1].hist(bigf_w.F140.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[1].hist(np.array(wt['MAG_AUTO_F140W']), range=[19, 30], bins=32, histtype='step', density=True) wkde2=wisps.kernel_density(bigf_w.F140[bigf_w.F140.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde3=wisps.kernel_density(np.array(wt['MAG_AUTO_F140W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F140W']) >15,\ np.array(wt['MAG_AUTO_F140W']) <30)], bw_method=0.1).pdf(mag_grid) ax[1].plot(mag_grid, wkde2) ax[1].plot(mag_grid, wkde3) ax1[1].plot(mag_grid, wkde3/wkde2) h=ax[-1].hist(bigf_w.F160.values, range=[19, 30], bins=32, histtype='step', density=True) h=ax[-1].hist(np.array(wt['MAG_AUTO_F160W']), range=[19, 30], bins=32, histtype='step', density=True) wkde4=wisps.kernel_density(bigf_w.F160[bigf_w.F160.between(15, 30)], \ bw_method=0.1).pdf(mag_grid) wkde5=wisps.kernel_density(np.array(wt['MAG_AUTO_F160W'])\ [np.logical_and(np.array(wt['MAG_AUTO_F160W']) >15,\ np.array(wt['MAG_AUTO_F160W']) <30)], bw_method=0.1).pdf(mag_grid) ax[-1].plot(mag_grid, wkde4) ax[-1].plot(mag_grid, wkde5) ax1[-1].plot(mag_grid, wkde5/wkde4) for x in maglimits_wisp.F110.dropna().values: ax[0].axvline(x, alpha=0.1) ax1[0].axvline(x, alpha=0.1) for x in maglimits_wisp.F140.dropna().values: ax[1].axvline(x, alpha=0.1) ax1[1].axvline(x, alpha=0.1) for x in maglimits_wisp.F160.dropna().values: ax[-1].axvline(x, alpha=0.1) ax1[-1].axvline(x, alpha=0.1) plt.tight_layout() ax1[0].set_xlabel('F110W', fontsize=18) ax1[1].set_xlabel('F140W', fontsize=18) ax1[-1].set_xlabel('F160W', fontsize=18) for a in ax1: a.set_yscale('log')
0.36727
0.549641
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt import json import io ``` # Import data from json file to dataframe ##### 1. load json files and convert to three dataframe ``` business_json_file = 'business.json' user_json_file = 'user.json' review_json_file = 'review.json' business = [] user = [] review = [] for line in open(business_json_file, 'r'): business.append(json.loads(line)) for line in open(user_json_file, 'r'): user.append(json.loads(line)) for line in open(review_json_file, 'r'): review.append(json.loads(line)) business_df = pd.DataFrame.from_records(business) user_df = pd.DataFrame.from_records(user) review_df = pd.DataFrame.from_records(review) business_df.head(3) user_df.head(3) review_df.head(3) ``` # Exploratary Data Analysis ### 1.business analytics ``` business_df_state = business_df.groupby('state').count() business_df_state business_df_state.sort_values('business_id',ascending = False)[:10] business_df_category = business_df.groupby('categories').count() business_df_category.sort_values('business_id',ascending = False)[:10] business_df_stars = business_df.groupby('stars').count() business_df_stars ``` #### (1οΌ‰visualize the stars ``` business_df_stars.sns ``` ### 2.User analytics ``` user_df['yelp_age'] = user_df['yelping_since'].apply(lambda x: x[:4]) user_df.groupby('yelp_age').count().sort_values('user_id',ascending = False)[0:10] ``` ### 3.Review Analytics # Get some test data (review) ``` business_test = business_df[:5000] review_test = review_df[:5000] user_test = user_df[:5000] ``` # Sentiment Analysis (review) #### 1. import packages ``` from nltk.tokenize import word_tokenize, RegexpTokenizer import nltk.classify.util from nltk.classify import NaiveBayesClassifier import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer ``` #### 2. Using NLTK extract the reviews' Sentiment ``` review_test[:3] sid = SentimentIntensityAnalyzer() def sentiment_extractor(x): ss = sid.polarity_scores(x) sentiment = {} for k in ss: sentiment[k] = ss[k] return sentiment review_test['sentiment'] = review_test['text'].apply(sentiment_extractor) new_review_test = review_test.loc[:,['business_id','user_id','cool','funny','useful','stars','sentiment']] # seperate the sentiment score new_review_test['negative'] = new_review_test['sentiment'].apply(lambda x: x['neg']) new_review_test['neutral'] = new_review_test['sentiment'].apply(lambda x: x['neu']) new_review_test['positive'] = new_review_test['sentiment'].apply(lambda x: x['pos']) new_review_test['compound'] = new_review_test['sentiment'].apply(lambda x: x['compound']) new_review_test[:5000].to_csv('review_sentiment.csv', sep='\t',index = False) ```
github_jupyter
import numpy as np import pandas as pd import matplotlib.pyplot as plt import json import io business_json_file = 'business.json' user_json_file = 'user.json' review_json_file = 'review.json' business = [] user = [] review = [] for line in open(business_json_file, 'r'): business.append(json.loads(line)) for line in open(user_json_file, 'r'): user.append(json.loads(line)) for line in open(review_json_file, 'r'): review.append(json.loads(line)) business_df = pd.DataFrame.from_records(business) user_df = pd.DataFrame.from_records(user) review_df = pd.DataFrame.from_records(review) business_df.head(3) user_df.head(3) review_df.head(3) business_df_state = business_df.groupby('state').count() business_df_state business_df_state.sort_values('business_id',ascending = False)[:10] business_df_category = business_df.groupby('categories').count() business_df_category.sort_values('business_id',ascending = False)[:10] business_df_stars = business_df.groupby('stars').count() business_df_stars business_df_stars.sns user_df['yelp_age'] = user_df['yelping_since'].apply(lambda x: x[:4]) user_df.groupby('yelp_age').count().sort_values('user_id',ascending = False)[0:10] business_test = business_df[:5000] review_test = review_df[:5000] user_test = user_df[:5000] from nltk.tokenize import word_tokenize, RegexpTokenizer import nltk.classify.util from nltk.classify import NaiveBayesClassifier import nltk from nltk.sentiment.vader import SentimentIntensityAnalyzer review_test[:3] sid = SentimentIntensityAnalyzer() def sentiment_extractor(x): ss = sid.polarity_scores(x) sentiment = {} for k in ss: sentiment[k] = ss[k] return sentiment review_test['sentiment'] = review_test['text'].apply(sentiment_extractor) new_review_test = review_test.loc[:,['business_id','user_id','cool','funny','useful','stars','sentiment']] # seperate the sentiment score new_review_test['negative'] = new_review_test['sentiment'].apply(lambda x: x['neg']) new_review_test['neutral'] = new_review_test['sentiment'].apply(lambda x: x['neu']) new_review_test['positive'] = new_review_test['sentiment'].apply(lambda x: x['pos']) new_review_test['compound'] = new_review_test['sentiment'].apply(lambda x: x['compound']) new_review_test[:5000].to_csv('review_sentiment.csv', sep='\t',index = False)
0.212314
0.755186
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All). Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below: ``` NAME = "" COLLABORATORS = "" ``` --- <!--NOTEBOOK_HEADER--> *This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks); content is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).* <!--NAVIGATION--> < [Command Reference](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/A.00-Appendix-A.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Cleaning pdb files](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/C.00-Appendix-C.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/B.00-Appendix-B.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a> # Residue Parameter Files Parameter files describing the chemical and structural properties of each residue is found in the PyRosetta package in the `database/chemical/residue_type_sets` directory. The full-atom residue parameters are stored in the `/fa_standard/residue_types` directory. As an example, the parameter file for threonine is shown below. ``` from IPython.display import Image Image('./Media/res-param-1.png',width='700') from IPython.display import Image Image('./Media/res-param-2.png',width='700') ``` The centroid residue parameters can be found in the `/centroid/residue_types` directory. The centroid parameter file for Threonine is shown below. ``` from IPython.display import Image Image('./Media/centroid-res-param.png',width='700') ``` <!--NAVIGATION--> < [Command Reference](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/A.00-Appendix-A.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Cleaning pdb files](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/C.00-Appendix-C.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/B.00-Appendix-B.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
github_jupyter
NAME = "" COLLABORATORS = "" from IPython.display import Image Image('./Media/res-param-1.png',width='700') from IPython.display import Image Image('./Media/res-param-2.png',width='700') from IPython.display import Image Image('./Media/centroid-res-param.png',width='700')
0.151467
0.903465
# Development of Deep Learning Guided Genetic Algorithm for Material Design Optimization Kuanlin Chen, PhD student of the schulman lab<br> Advisor: Rebecca Schulman, PhD<br> Johns Hopkins University **Keywords: Machine Learning, Deep Learning, Computer Vision, Numeric Simulation, Multi-Objective Optimization** *** #### Summary: We developed a genetic algorithm to efficiently search through large parameter space for designing our digit automata. The algorithm starts with an initial population of automata designs generated from a random seed. Each design within the population is then simulated to find all possible geometric outputs upon sixteen actuation combination and scored with a deep learning model. During the scoring process, to fully utilize each image, all images are rotated with twenty different degrees and the image with the highest score as a digit is selected to represent the final class and score of the image. We thus get a 2d array documenting what digits are formed and the score for each digits. We next developed a custom loss function to evaluate the performance of each design. We define the loss function as such: $Loss = 5000 * (number\_of\_digits\_formed) * \sum_{i = 0}^{i = 9}[1.001 - (score\_for\_digit\_i)]$ The loss function computes the **diversity** and the **similarity to real digits** for the digits formed. Designs that outputs images resembling a larger number of high-quality digits are more likely to be preserved. During the selection stage, we eliminate 80% of the designs within the population, by selecting the designs that have the 20% lowest loss score. These designs are sent into a mutation function to repopulate a new generation. For the mutation function, we used the **single-parent mutation method** where the genetic information of each descendant come from a single survived design from previous selection. During mutation, each design has a fifty percent chance to randomly update the strip segment lengths, preserving the actuator pattern information; each design also has a fifty percent chance of mutating the actuator pattern, where we randomly mutate half of the pattern. Each survivor design generates four descendants, so the population returns to its original size after every round of selection and mutation. Finally, the algorithm iterates the cylce of population generation, selection and mutation until reaching generation limit and outputs the optimized designs. For our even digit automata and odd digit automata search, we slighlty tweaked the loss function and mutation function to obtain fabricable results. We first included an additional rule within the mutation function to ensure new design are within reasonable patterning steps to avoid generating designs overly complex and un-patternable. We developed a custom fabrication step calculation function `fab_steps_strip_requires` - calculating the sumulative sum of unique actuator systems within each layer, and eliminating mutations that requires more than six fabrication steps. As this step limits the complexity of outputs formed, we aimed to search for patterning an even digit automata and an odd digit automata, changing the loss functions for the two search and derived the final optimized outputs. $Loss = 5000 * (number\_of\_digits\_formed) * \sum_{i = 1, 3, 5, 7, 9}[1.001 - (score\_for\_digit\_i)]$ $Loss = 5000 * (number\_of\_digits\_formed) * \sum_{i = 0, 2, 4, 6, 8}[1.001 - (score\_for\_digit\_i)]$ ``` # Package Importing import csv, math, os, time, copy, matplotlib, datetime, keras import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from keras.datasets import mnist from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.utils import np_utils from scipy import io as spio from scipy.ndimage import gaussian_filter from scipy.stats import bernoulli from math import log10, floor from skimage import transform, exposure print(keras.__version__) # 2.4.3 print(tf.__version__) # 2.2.0 ``` #### Set Up Material Simulation Environment ``` %run ./Numeric_Simulation_of_Material_Behavior.ipynb # Sample Design segment_lengths_ex_four_types = [938, 954, 1022, 843, 931, 722, 702, 655, 1066, 947] segment_identities_ex_four_types = [[2,3,2,3,2,3,4,0,1,4],[4,4,3,1,3,4,4,1,3,2]] sampleStrip = ActuatorStrip(segment_lengths_ex_four_types, segment_identities_ex_four_types, four_t_rocs, four_t_ctls) cnn_digit_model = load_model("Deep_Learning_Classifier_v3.h5") ``` ### Multi-Objective Optimization Loss Function $Loss = 5000 * (number\_of\_digits\_formed) * \sum_{i = 0}^{i = 9}[1.001 - (score\_for\_digit\_i)]$ This loss function computes the **diversity** and the **similarity to real digits** for the digits formed. Designs that outputs images resembling a larger number of high-quality digits are more likely to be preserved. ``` def loss_on_dig_sim_var_v2(strip, rotation = 20): """ - loss on digit similarity and variety - default 20 rotations - Final design score list would be stored in a np array, documenting the max score for each digit. - Reconsider np.log as log would greatly lock into one design for a perfect 1 or 7, as log(1-1) = -inf The loss is then calculated by the following: digits_for_score = np.log(1 - digit_recs) * 5000 * digits_form) loss = np.sum(digits_for_score) Where we're taking the log of each value's distance against one, and scaling with the digit numbers that it can form. For design that can form more digit number and higher similairy, they can get lower loss. """ strip.analysis(rotation = rotation); output = strip.selected dig_score = np.zeros(shape=(10, )) if output.keys(): for i in output.keys(): dig_score[i] = output[i]["score"] # did 1.001 - dig_score as -inf often appears when 1 is in dig_score loss = np.sum(np.log(1.001 - dig_score) * 5000 * np.count_nonzero(dig_score)) return loss loss_on_dig_sim_var_v2(sampleStrip, rotation = 4) # -2080099.4708855439 if 0, messed up, restart kernal ``` ### Updated Mutation Function controlling fabrication complexity ``` def fab_steps_strip_requires(identities): top_steps = np.size(np.unique(identities[0])) bottom_steps = np.size(np.unique(identities[1])) return top_steps + bottom_steps def sp_mutation_maxfab_v1(parent_strip, max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * (max_segment_length - min_segment_length) + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") satisfy_max_fab = False while satisfy_max_fab == False: identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] if (fab_steps_strip_requires(new_identities) <= max_fab_steps): satisfy_max_fab = True offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip ``` ### Mutation Function We used the **single-parent mutation method** where the genetic information of each descendant come from a single survived design from previous selection. During mutation, each design has a fifty percent chance to randomly update the strip segment lengths, preserving the actuator pattern information; each design also has a fifty percent chance of mutating the actuator pattern, where we randomly mutate half of the pattern. Each survivor design generates four descendants, so the population returns to its original size after every round of selection and mutation. ``` def sp_mutation_v2(parent_strip, max_segments, min_segment_length,\ rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * min_segment_length + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip ``` ### Combined Genetic Algorithm ``` def strip_optimizer_gen_alg_v3(rocs, ctls, loss_function, mutation_function, save_filename,\ max_segments = 10, min_segment_length = 600,\ max_segment_length = 2000, max_fab_steps = 6,\ population_size = 20, generation_limit = 2500): """ Genetic Algorithm for Strip Optimizer 1. Creates 100 (or 20 for debug) different random initial design. 2. Score them based on the loss function and get a sum of pupulation score. 3. Kill 80% of the population who has score in the last 80%. 4. Mutate the survivor and rescore them. 5. Goes on and on and on until reaches generation limit """ def plot_best_fives(population_loss, strip_population, SAVE = False): """ plots 5 best strip design for overall visualization """ best5s = np.argsort(population_loss)[:5] for best_num in np.flip(best5s): best_strip = strip_population[best_num] print("Actuator segments are " + str(best_strip.identities)) print("Segment lengths are " + str(best_strip.segment_lengths)) print("Loss is " + str(population_loss[best_num])) # best_strip.plot_input_and_all(rotation = 20, save = False) best_strip.plot_input_and_selected(rotation = 20, save = SAVE) def save_best_tens(filename, mode, population_loss, strip_population, gen): """ save 10 best strip design of each generation. """ with open(filename, mode) as f: f.write("Generation {}\n".format(gen)) best10s = np.argsort(population_loss)[:10] for best_num in np.flip(best10s): best_strip = strip_population[best_num] formed = list(best_strip.selected.keys()) f.write("Segments: {}\nIdentities: {}\nFormed: {}\n\n".format(str(best_strip.segment_lengths),str(best_strip.identities),str(formed))) itr = 0; num_actuator_types = len(rocs[0][0]) - 1 # generate initial population population_lengths = np.random.random(size = (population_size, max_segments)) * (max_segment_length - min_segment_length) + min_segment_length population_identities = np.random.randint(0, high = (num_actuator_types + 1), size = (population_size, 2, max_segments)) for i in range(population_size): found_fabbable = False #print("Generating Step " + str(i)) while found_fabbable == False: population_identities[i]=np.random.randint(0, high = (num_actuator_types + 1), size = (2, max_segments)) if fab_steps_strip_requires(population_identities[i]) <= max_fab_steps: found_fabbable = True strip_population = [ActuatorStrip(population_lengths[num], population_identities[num], four_t_rocs, four_t_ctls) for num in range(population_size)] [strip.generate_curves() for strip in strip_population] population_loss = [loss_function(strip) for strip in strip_population] [strip.save_imgs() for strip in strip_population] # plot 5 best individuals for visualization plot_best_fives(population_loss, strip_population, SAVE = False) # save 5 best individuals save_best_tens(save_filename, "w", population_loss, strip_population, itr) while itr < generation_limit: itr += 1 # evolution and mutation print("Evolution {}".format(itr)) # kills 80% of the population survivors = np.argsort(population_loss)[:int(np.floor(population_size/5))] print("Survivors: " + str(survivors)) print("Survivor loss: " + str([population_loss[i] for i in survivors])) # mutation and creates 4 offspring for each survivor new_population = [mutation_function(strip_population[survivor_id], max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types) for survivor_id in survivors for _ in range(4)] [strip.generate_curves() for strip in new_population] # Add survivors to new population [new_population.append(strip_population[survivor_id]) for survivor_id in survivors]; new_loss = [loss_function(strip) for strip in new_population] [strip.save_imgs() for strip in new_population] # plot 5 best individuals for visualization if itr == generation_limit: plot_best_fives(new_loss, new_population, SAVE = True) else: plot_best_fives(new_loss, new_population, SAVE = False) save_best_tens(save_filename, "a+", new_loss, new_population, itr) print("delta population loss: %.4f"%(np.sum(new_loss)-np.sum(population_loss))) population_loss = new_loss; strip_population = new_population ```
github_jupyter
# Package Importing import csv, math, os, time, copy, matplotlib, datetime, keras import tensorflow as tf import numpy as np import matplotlib.pyplot as plt from keras.datasets import mnist from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Flatten from keras.layers.convolutional import Conv2D, MaxPooling2D from keras.utils import np_utils from scipy import io as spio from scipy.ndimage import gaussian_filter from scipy.stats import bernoulli from math import log10, floor from skimage import transform, exposure print(keras.__version__) # 2.4.3 print(tf.__version__) # 2.2.0 %run ./Numeric_Simulation_of_Material_Behavior.ipynb # Sample Design segment_lengths_ex_four_types = [938, 954, 1022, 843, 931, 722, 702, 655, 1066, 947] segment_identities_ex_four_types = [[2,3,2,3,2,3,4,0,1,4],[4,4,3,1,3,4,4,1,3,2]] sampleStrip = ActuatorStrip(segment_lengths_ex_four_types, segment_identities_ex_four_types, four_t_rocs, four_t_ctls) cnn_digit_model = load_model("Deep_Learning_Classifier_v3.h5") def loss_on_dig_sim_var_v2(strip, rotation = 20): """ - loss on digit similarity and variety - default 20 rotations - Final design score list would be stored in a np array, documenting the max score for each digit. - Reconsider np.log as log would greatly lock into one design for a perfect 1 or 7, as log(1-1) = -inf The loss is then calculated by the following: digits_for_score = np.log(1 - digit_recs) * 5000 * digits_form) loss = np.sum(digits_for_score) Where we're taking the log of each value's distance against one, and scaling with the digit numbers that it can form. For design that can form more digit number and higher similairy, they can get lower loss. """ strip.analysis(rotation = rotation); output = strip.selected dig_score = np.zeros(shape=(10, )) if output.keys(): for i in output.keys(): dig_score[i] = output[i]["score"] # did 1.001 - dig_score as -inf often appears when 1 is in dig_score loss = np.sum(np.log(1.001 - dig_score) * 5000 * np.count_nonzero(dig_score)) return loss loss_on_dig_sim_var_v2(sampleStrip, rotation = 4) # -2080099.4708855439 if 0, messed up, restart kernal def fab_steps_strip_requires(identities): top_steps = np.size(np.unique(identities[0])) bottom_steps = np.size(np.unique(identities[1])) return top_steps + bottom_steps def sp_mutation_maxfab_v1(parent_strip, max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * (max_segment_length - min_segment_length) + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") satisfy_max_fab = False while satisfy_max_fab == False: identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] if (fab_steps_strip_requires(new_identities) <= max_fab_steps): satisfy_max_fab = True offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip def sp_mutation_v2(parent_strip, max_segments, min_segment_length,\ rocs, ctls, num_actuator_types): """ - single parent mutation - v1 seems to be unable to produce new identities - v2 now has 40% new identities chance, and can retain 80% of original genetic info """ # 50% chance in length change if np.random.random() < .5: # print("length change") new_length = np.random.random(size = (max_segments, )) * min_segment_length + min_segment_length offspring_strip = ActuatorStrip(new_length, parent_strip.identities, rocs, ctls) # 50% chance in acutator change, mutates 50% of identities else: # print("idt change") identities_mutation_rate = .5 sz = int(np.floor(len(parent_strip.identities[0]) * identities_mutation_rate)) new_identities = copy.deepcopy(parent_strip.identities) for i in range(2): gene_num = np.random.choice(len(parent_strip.identities[0]), size=(sz,), replace = False) new_genes = np.random.choice(num_actuator_types + 1, size = (sz,)) for j in range(sz): new_identities[i][gene_num[j]] = new_genes[j] offspring_strip = ActuatorStrip(parent_strip.segment_lengths, new_identities, rocs, ctls) return offspring_strip def strip_optimizer_gen_alg_v3(rocs, ctls, loss_function, mutation_function, save_filename,\ max_segments = 10, min_segment_length = 600,\ max_segment_length = 2000, max_fab_steps = 6,\ population_size = 20, generation_limit = 2500): """ Genetic Algorithm for Strip Optimizer 1. Creates 100 (or 20 for debug) different random initial design. 2. Score them based on the loss function and get a sum of pupulation score. 3. Kill 80% of the population who has score in the last 80%. 4. Mutate the survivor and rescore them. 5. Goes on and on and on until reaches generation limit """ def plot_best_fives(population_loss, strip_population, SAVE = False): """ plots 5 best strip design for overall visualization """ best5s = np.argsort(population_loss)[:5] for best_num in np.flip(best5s): best_strip = strip_population[best_num] print("Actuator segments are " + str(best_strip.identities)) print("Segment lengths are " + str(best_strip.segment_lengths)) print("Loss is " + str(population_loss[best_num])) # best_strip.plot_input_and_all(rotation = 20, save = False) best_strip.plot_input_and_selected(rotation = 20, save = SAVE) def save_best_tens(filename, mode, population_loss, strip_population, gen): """ save 10 best strip design of each generation. """ with open(filename, mode) as f: f.write("Generation {}\n".format(gen)) best10s = np.argsort(population_loss)[:10] for best_num in np.flip(best10s): best_strip = strip_population[best_num] formed = list(best_strip.selected.keys()) f.write("Segments: {}\nIdentities: {}\nFormed: {}\n\n".format(str(best_strip.segment_lengths),str(best_strip.identities),str(formed))) itr = 0; num_actuator_types = len(rocs[0][0]) - 1 # generate initial population population_lengths = np.random.random(size = (population_size, max_segments)) * (max_segment_length - min_segment_length) + min_segment_length population_identities = np.random.randint(0, high = (num_actuator_types + 1), size = (population_size, 2, max_segments)) for i in range(population_size): found_fabbable = False #print("Generating Step " + str(i)) while found_fabbable == False: population_identities[i]=np.random.randint(0, high = (num_actuator_types + 1), size = (2, max_segments)) if fab_steps_strip_requires(population_identities[i]) <= max_fab_steps: found_fabbable = True strip_population = [ActuatorStrip(population_lengths[num], population_identities[num], four_t_rocs, four_t_ctls) for num in range(population_size)] [strip.generate_curves() for strip in strip_population] population_loss = [loss_function(strip) for strip in strip_population] [strip.save_imgs() for strip in strip_population] # plot 5 best individuals for visualization plot_best_fives(population_loss, strip_population, SAVE = False) # save 5 best individuals save_best_tens(save_filename, "w", population_loss, strip_population, itr) while itr < generation_limit: itr += 1 # evolution and mutation print("Evolution {}".format(itr)) # kills 80% of the population survivors = np.argsort(population_loss)[:int(np.floor(population_size/5))] print("Survivors: " + str(survivors)) print("Survivor loss: " + str([population_loss[i] for i in survivors])) # mutation and creates 4 offspring for each survivor new_population = [mutation_function(strip_population[survivor_id], max_segments, min_segment_length,\ max_segment_length, max_fab_steps, rocs, ctls, num_actuator_types) for survivor_id in survivors for _ in range(4)] [strip.generate_curves() for strip in new_population] # Add survivors to new population [new_population.append(strip_population[survivor_id]) for survivor_id in survivors]; new_loss = [loss_function(strip) for strip in new_population] [strip.save_imgs() for strip in new_population] # plot 5 best individuals for visualization if itr == generation_limit: plot_best_fives(new_loss, new_population, SAVE = True) else: plot_best_fives(new_loss, new_population, SAVE = False) save_best_tens(save_filename, "a+", new_loss, new_population, itr) print("delta population loss: %.4f"%(np.sum(new_loss)-np.sum(population_loss))) population_loss = new_loss; strip_population = new_population
0.512693
0.987993
# Visualizing Logistic Regression ``` import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels ``` # Define the graph ``` # Parameters of Logistic Regression learning_rate = 0.01 training_epochs = 20 batch_size = 100 display_step = 5 # Create Graph for Logistic Regression x = tf.placeholder("float", [None, 784], name="INPUT_x") y = tf.placeholder("float", [None, 10], name="OUTPUT_y") W = tf.Variable(tf.zeros([784, 10]), name="WEIGHT_W") b = tf.Variable(tf.zeros([10]), name="BIAS_b") # Activation, Cost, and Optimizing functions pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accr = tf.reduce_mean(tf.cast(corr, "float")) init = tf.initialize_all_variables() ``` # Launch the graph ``` sess = tf.Session() sess.run(init) ``` # Summary writer ``` summary_path = '/tmp/tf_logs/logistic_regression_mnist' summary_writer = tf.summary.FileWriter(summary_path, graph=sess.graph) print ("Summary writer ready") ``` # Run ``` print ("Summary writer ready") for epoch in range(training_epochs): sum_cost = 0. num_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(num_batch): randidx = np.random.randint(trainimg.shape[0], size=batch_size) batch_xs = trainimg[randidx, :] batch_ys = trainlabel[randidx, :] # Fit training using batch data feeds = {x: batch_xs, y: batch_ys} sess.run(optm, feed_dict=feeds) # Compute average loss sum_cost += sess.run(cost, feed_dict=feeds) avg_cost = sum_cost / num_batch # Display logs per epoch step if epoch % display_step == 0: train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print ("Epoch: %03d/%03d cost: %.9f train_acc: %.3f" % (epoch, training_epochs, avg_cost, train_acc)) print ("Optimization Finished!") # Test model test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (("Test Accuracy: %.3f") % (test_acc)) float(epoch) ``` ### Run the command line ##### tensorboard --logdir=/tmp/tf_logs/logistic_regression_mnist ### Open http://localhost:6006/ into your web browser <img src="images/tsboard/logistic_regression_mnist.png">
github_jupyter
import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('data/', one_hot=True) trainimg = mnist.train.images trainlabel = mnist.train.labels testimg = mnist.test.images testlabel = mnist.test.labels # Parameters of Logistic Regression learning_rate = 0.01 training_epochs = 20 batch_size = 100 display_step = 5 # Create Graph for Logistic Regression x = tf.placeholder("float", [None, 784], name="INPUT_x") y = tf.placeholder("float", [None, 10], name="OUTPUT_y") W = tf.Variable(tf.zeros([784, 10]), name="WEIGHT_W") b = tf.Variable(tf.zeros([10]), name="BIAS_b") # Activation, Cost, and Optimizing functions pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) optm = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) corr = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accr = tf.reduce_mean(tf.cast(corr, "float")) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) summary_path = '/tmp/tf_logs/logistic_regression_mnist' summary_writer = tf.summary.FileWriter(summary_path, graph=sess.graph) print ("Summary writer ready") print ("Summary writer ready") for epoch in range(training_epochs): sum_cost = 0. num_batch = int(mnist.train.num_examples/batch_size) # Loop over all batches for i in range(num_batch): randidx = np.random.randint(trainimg.shape[0], size=batch_size) batch_xs = trainimg[randidx, :] batch_ys = trainlabel[randidx, :] # Fit training using batch data feeds = {x: batch_xs, y: batch_ys} sess.run(optm, feed_dict=feeds) # Compute average loss sum_cost += sess.run(cost, feed_dict=feeds) avg_cost = sum_cost / num_batch # Display logs per epoch step if epoch % display_step == 0: train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys}) print ("Epoch: %03d/%03d cost: %.9f train_acc: %.3f" % (epoch, training_epochs, avg_cost, train_acc)) print ("Optimization Finished!") # Test model test_acc = sess.run(accr, feed_dict={x: testimg, y: testlabel}) print (("Test Accuracy: %.3f") % (test_acc)) float(epoch)
0.676086
0.913252
### Closed-loop control of a deformable mirror (DM) #### using SVD pseudo-inversion of DM influence matrix #### and low-pass filtering of the eigenvalues for improved convergence stability Hardware used: * Thorlabs WFS-150 Shack-Hartmann sensor * Mirao52e deformable mirror This code uses Thorlabs 64-bit WFS driver installed via [Thorlabs WFS Software](https://www.thorlabs.com/software_pages/ViewSoftwarePage.cfm?Code=WFS). Tested using Anaconda 64-bit Python 2.7 distribution, Windows 10. Author: [Nikita Vladimirov](mailto:nikita.vladimirov@mdc-berlin.de) ``` import ctypes as ct import matplotlib.pyplot as plt import numpy as np %matplotlib inline import sys sys.path.append('./lib') from Mirao52_utils import * #define home dir of the code: homeDir = 'C:/Users/Nikita/Documents/GitHub/AO-toolkit/' #load the WFS DLL: WFS = ct.windll.WFS_64 #Load the Mirao52e DLL: DM = ct.windll.LoadLibrary('./lib/x64/mirao52e.dll') byref = ct.byref #Set the data types compatible with C DLL count = ct.c_int32() deviceID = ct.c_int32() instrumentListIndex = ct.c_int32() inUse = ct.c_int32() instrumentName = ct.create_string_buffer("", 256) instrumentSN = ct.create_string_buffer("", 256) resourceName = ct.create_string_buffer("", 256) IDQuery = ct.c_bool() resetDevice = ct.c_bool() instrumentHandle = ct.c_ulong() pupilCenterXMm = ct.c_double() pupilCenterYMm = ct.c_double() pupilDiameterXMm = ct.c_double() pupilDiameterYMm = ct.c_double() exposureTimeAct = ct.c_double() masterGainAct = ct.c_double() dynamicNoiseCut = ct.c_int32() calculateDiameters = ct.c_int32() cancelWavefrontTilt = ct.c_int32() errorMessage = ct.create_string_buffer("", 512) errorCode = ct.c_int32() pixelFormat = ct.c_int32() pixelFormat.value = 0 #currently 8 bit only camResolIndex = ct.c_int32() spotsX = ct.c_int32() spotsY = ct.c_int32() wavefrontType = ct.c_int32() limitToPupil = ct.c_int32() #Set the WFS parameter values MAX_SPOTS_X = 50 MAX_SPOTS_Y = 40 arrayWavefront = np.zeros((MAX_SPOTS_Y,MAX_SPOTS_X),dtype = np.float32) instrumentListIndex.value = 0 #0,1,2,, if multiple instruments connected #Configure WFS camera camResolIndex.value = 1 # camResolIndex values: # For WFS instruments: # Index Resolution # 0 1280x1024 # 1 1024x1024 # 2 768x768 # 3 512x512 # 4 320x320 # For WFS10 instruments: # Index Resolution # 0 640x480 # 1 480x480 # 2 360x360 # 3 260x260 # 4 180x180 # For WFS20 instruments: # Index Resolution # 0 1440x1080 # 1 1080x1080 # 2 768x768 # 3 512x512 # 4 360x360 # 5 720x540, bin2 # 6 540x540, bin2 # 7 384x384, bin2 # 8 256x256, bin2 # 9 180x180, bin2 #Set pupil pupilCenterXMm.value = 0 #mm pupilCenterYMm.value = 0 #mm pupilDiameterXMm.value = 4.5 #mm pupilDiameterYMm.value = 4.5 #mm #Set spot calculation params dynamicNoiseCut.value = 1 calculateDiameters.value = 0 cancelWavefrontTilt.value = 1 wavefrontType.value = 0 # This parameter defines the type of wavefront to calculate. # Valid settings for wavefrontType: # 0 Measured Wavefront # 1 Reconstructed Wavefront based on Zernike coefficients # 2 Difference between measured and reconstructed Wavefront # Note: Function WFS_CalcReconstrDeviations needs to be called prior to this function in case of Wavefront type 1 and 2. limitToPupil.value = 1 # This parameter defines if the Wavefront should be calculated based on all detected spots or only within the defined pupil. # Valid settings: # 0 Calculate Wavefront for all spots # 1 Limit Wavefront to pupil interior #Check how many WFS devices are connected WFS.WFS_GetInstrumentListLen(None,byref(count)) print('WFS sensors connected: ' + str(count.value)) #Select a device and get its info devStatus = WFS.WFS_GetInstrumentListInfo(None,instrumentListIndex, byref(deviceID), byref(inUse), instrumentName, instrumentSN, resourceName) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_GetInstrumentListInfo():' + str(errorMessage.value)) else: print('WFS deviceID: ' + str(deviceID.value)) print('in use? ' + str(inUse.value)) print('instrumentName: ' + str(instrumentName.value)) print('instrumentSN: ' + str(instrumentSN.value)) print('resourceName: ' + str(resourceName.value)) if not inUse.value: devStatus = WFS.WFS_init(resourceName, IDQuery, resetDevice, byref(instrumentHandle)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_init():' + str(errorMessage.value)) else: print('WFS has been initialized. Instrument handle: ' +str(instrumentHandle.value)) else: print('WFS already in use') #Configure WFS camera devStatus = WFS.WFS_ConfigureCam(instrumentHandle, pixelFormat, camResolIndex, byref(spotsX), byref(spotsY)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_ConfigureCam():' + str(errorMessage.value)) else: print('WFS camera configured') print('SpotsX: ' + str(spotsX.value)) print('SpotsY: ' + str(spotsY.value)) #Set pupil devStatus = WFS.WFS_SetPupil(instrumentHandle, pupilCenterXMm, pupilCenterYMm, pupilDiameterXMm, pupilDiameterYMm) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_SetPupil():' + str(errorMessage.value)) else: print('WFS pupil set') nSensorPoints = spotsX.value * spotsY.value def WFS_getAveragedWavefront(N = 10, lamb = 0.633): ''' Measure the average wavefront from WF sensor and return it as 2D array Parameters: N - number of measurements (default 10) lamb - wavelengh of the light to convert (default 0.633 micron, HeNe laser) Returns: meanWavefront - wavefront 2D-array, units: lambdas PV - peak to value, units: lambdas RMS - root-mean-square, units: lambdas ''' meanWavefront = np.zeros((spotsY.value,spotsX.value)) for t in range(N): #Take a spotfield image, with auto-exposure devStatus = WFS.WFS_TakeSpotfieldImageAutoExpos(instrumentHandle, byref(exposureTimeAct), byref(masterGainAct)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_TakeSpotfieldImageAutoExpos():' + str(errorMessage.value)) #calculate spot centroids devStatus = WFS.WFS_CalcSpotsCentrDiaIntens(instrumentHandle, dynamicNoiseCut, calculateDiameters) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcSpotsCentrDiaIntens():' + str(errorMessage.value)) #calculate spot to reference deviations devStatus = WFS.WFS_CalcSpotToReferenceDeviations(instrumentHandle, cancelWavefrontTilt) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcSpotToReferenceDeviations():' + str(errorMessage.value)) #calculate the wavefront devStatus = WFS.WFS_CalcWavefront(instrumentHandle, wavefrontType, limitToPupil, arrayWavefront.ctypes.data) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcWavefront():' + str(errorMessage.value)) #save the data as numpy array meanWavefront[:,:] += arrayWavefront[:spotsY.value,:spotsX.value].copy() #Calculate stats, units: lambda meanWavefront = meanWavefront/N/lamb meanWavefront = meanWavefront - np.nanmean(meanWavefront) #normalize by the mean PV = np.nanmax(meanWavefront) - np.nanmin(meanWavefront) RMS = np.sqrt(np.nanmean(meanWavefront**2,axis=(0,1))) return (meanWavefront, PV, RMS) #open the DM session dm_nActuators = 52 dm_trigger = ct.c_int32() dm_trigger.value = 0 dm_status = ct.c_int32() assert DM.mro_open(byref(dm_status)), errors[dm_status.value] def safe_voltage(cmd): "returns 1 if command data in the safe zone, between -1 and 1 Volt for individual actuators, and < 25 Volt sum of absolute values, \ returns 0 if unsafe" if cmd.min() >= -1.0 and cmd.max() <= 1.0 and np.sum(np.abs(cmd)) < 25.0: return 1 else: return 0 %%time #Measure the poke matrix of DM flat_path = homeDir + 'python/flat/FLAT_MIRAO_0274-01.mro' dm_command_flat = read_Mirao_commandFile(flat_path,DM) dm_command = np.zeros(dm_nActuators, dtype=np.float64) Varray = np.array([-0.25, 0.25]) #Volts, this is applied to every actuator sequentially PokeMatrix = np.zeros((len(Varray),nSensorPoints, dm_nActuators)) for iVolts in range(len(Varray)): for iActuator in range(dm_nActuators): dm_command[:] = 0 dm_command[iActuator] = Varray[iVolts] #cmd = dm_command + dm_command_flat cmd = dm_command #apply poke to DM actuator if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure the wavefront WF,_,_ = WFS_getAveragedWavefront(N=40) WF = WF - np.nanmean(WF) #WF is measured to arbitrary constant, so normalize by the mean WF = WF.reshape(nSensorPoints) # 2D -> 1D PokeMatrix[iVolts,:,iActuator] = np.nan_to_num(WF)/Varray[iVolts] #rescale to unit voltage vector and remember #reset DM to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] #Check PokeMatrix iActuator = 29 plt.imshow(PokeMatrix[0,:,iActuator].reshape((29,29)),aspect = 'equal') plt.title('Single column of poke matrix \n (reshaped to sensor dimensions)') plt.colorbar() PokeMatrix_ave = PokeMatrix.mean(axis=0) # Plot mean poke matrices with +V and -V pokes plt.imshow(PokeMatrix_ave.T,aspect = 'auto') plt.xlabel('Sensor points') plt.ylabel('Actuator') plt.title('Mean poke matrix') plt.colorbar() #do the SVD pseudo-inverse now U, s, V = np.linalg.svd(PokeMatrix_ave, full_matrices=False) #plot the eigenvalues plt.plot(s) plt.grid('on') plt.xlabel('index') plt.ylabel('eigenvalue') print(s) #show the DM orthogonal modes nPrimaryModes = 52 dm_modes = np.zeros((nPrimaryModes,spotsX.value,spotsY.value)) for ind in range(nPrimaryModes): dm_modes[ind,:,:] = U[:,ind].reshape((spotsX.value,spotsY.value)) #plot first n x m modes n = 4 m = 13 zmin = -0.1 zmax = 0.1 fig, ax = plt.subplots(m,n, figsize = (3*n,3*m)) for i in range(n): for j in range(m): im = ax[j,i].imshow(dm_modes[i+j*n],vmin = zmin, vmax = zmax, aspect='equal') ax[j,i].text(0,-1, 'mode = ' + str(i+j*n),fontsize=10) fig.colorbar(im, ax = ax[j,i]) #Inverse PokeMatrix: # low-pass the inverse s_inv = 1.0/s s_thresh = s[0]/40 s_inv[s < s_thresh] = 0 print('Cut-off modes:' + str(np.where(s_inv == 0)[0])) S_inv = np.diag(s_inv) PokeMatrix_inv = np.dot(V.T,np.dot(S_inv, U.T)) #Set arbitrary DM target profile for test: DM_diam = 15.0 #mm x = np.linspace(-DM_diam/2,DM_diam/2,spotsX.value) y = np.linspace(-DM_diam/2,DM_diam/2,spotsY.value) X, Y = np.meshgrid(x, y) def test_gauss(x,y,sigma = 2): return np.exp(-x**2/(2*sigma**2) - y**2/(2*sigma**2)) def test_defocus(x,y,radius = 2000): #units: mm z = np.sqrt(radius**2 - x**2 - y**2) return z def test_flat(x,y): return np.ones(x.shape) DM_target_profile = test_flat(X,Y) #crop the corners to None values cropMask = np.ones(DM_target_profile.shape) crop_radius = DM_diam/2 for i in range(len(x)): for j in range(len(y)): if x[i]**2 + y[j]**2 >= crop_radius**2: cropMask[i,j] = None DM_target_profile = DM_target_profile * cropMask #DM_target_profile = DM_target_profile - np.nanmin(DM_target_profile) #Find the command for target profile, by using pseudo-inverse DM_target_command = np.dot(PokeMatrix_inv, np.nan_to_num(DM_target_profile).flatten()) DM_cmd = DM_target_command fig, ax = plt.subplots(1,3, figsize = (15,3.5)) # target shape im = ax[0].imshow(DM_target_profile, interpolation='nearest') PV = np.nanmax(DM_target_profile) - np.nanmin(DM_target_profile) ax[0].text(0,-1, 'Target profile: PV='"{:2.1f}".format(PV),fontsize=12) fig.colorbar(im, ax = ax[0]) # DM command map, 2D im = ax[1].imshow(DM_voltage_to_map(DM_cmd), interpolation='nearest') ax[1].text(0,-0.75, 'DM actuator voltages',fontsize=12) fig.colorbar(im, ax = ax[1]) # DM predicted profile: apply the command to the forward matrix A to see the pedicted shape WF_predicted = np.dot(PokeMatrix_ave,DM_cmd).reshape((spotsX.value,spotsY.value)) im = ax[2].imshow(WF_predicted, interpolation='nearest') PV = WF_predicted.max() - WF_predicted.min() RMS = np.sqrt(np.mean(WF_predicted**2)) ax[2].text(0,-1, 'Predicted WF: PV='+"{:2.1f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[2]) # Check if command is withing DM allowed limits: V1abs_max = 1.0 #Volt Vall_abs_max = 25.0 #Volt if np.abs(DM_cmd).max() >= V1abs_max: print('Voltage too high for individual actuators') if np.sum(np.abs(DM_cmd)) >= Vall_abs_max: print('Total absolute voltage is too high for all actuators') #Open-loop flattening of the WF fig, ax = plt.subplots(1,2, figsize = (10,3.5)) # measure the 'flat' WF without DM control. # The WF is not very flat because of imprefect optics and alignment # reset DM to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] # measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) # plot im = ax[0].imshow(WF, interpolation='nearest') ax[0].text(0,-1, 'Measured WF (flat DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[0]) #Try to make WF flatter by applying open-loop DM control cmd = DM_target_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[1].imshow(WF, interpolation='nearest') ax[1].text(0,-1, 'Measured WF (open-loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[1]) #reset DM back to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] ``` So the result of **open-loop** control is not very safisfactory - the command for flattening the WF makes WF actually somewhat less flat than before. An iterative closed-loop control (with **low-pass filter** by eigenvalues) should be better - see below. ``` %%time #Closed-loop control #set DM to open-loop shape assert DM.mro_applySmoothCommand(DM_target_command.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] #measure initial WF WF, _, _ = WFS_getAveragedWavefront(N=20) DM_target_profile = DM_target_profile - np.nanmean(DM_target_profile) #normalize target profile DM_CL_command = DM_target_command #set initial command n_iterations = 100 gain = -0.2 PV_RMS_timelapse = np.zeros((n_iterations,2)) WF_timelapse = np.zeros((n_iterations,WF.shape[0],WF.shape[1])) for it in range(n_iterations): #Calculate the diff between measured and target WFs: WF_diff = WF - DM_target_profile WF_diff = WF_diff - np.nanmean(WF_diff) #normalize #Find the control command using pseudo-inverse DM_command_increment = np.dot(PokeMatrix_inv, np.nan_to_num(WF_diff).flatten()) #Try to make WF flatter by applying the updated command DM_CL_command = DM_CL_command + DM_command_increment*gain cmd = DM_CL_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Voltage is outside of safe range. Reducing gain.' gain /= 2.0 #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) WF_timelapse[it,:,:] = WF PV_RMS_timelapse[it,:] = (PV,RMS) plt.plot(PV_RMS_timelapse[:,0]/5,label='PV/5') plt.plot(PV_RMS_timelapse[:,1],label='RMS') plt.legend() plt.xlabel('iteration') plt.grid('on') plt.title('Timelapse of errors, PV and RMS, by iterations') ``` Note that RMS error stays low once it converged to some small value, so errors do not accumulate as in basic (non-filtered eigenvalues) algorithm. ``` plt.imshow(WF_timelapse[-1,:,:]) plt.colorbar() #Plot target, open-loop and closed-loop WFs fig, ax = plt.subplots(1,3, figsize = (15,3.5)) # target WF im = ax[0].imshow(DM_target_profile, interpolation='nearest') PV = np.nanmax(DM_target_profile) - np.nanmin(DM_target_profile) ax[0].text(0,-1, 'Target WF - flat \n PV=' + "{:2.1f}".format(PV) + ', RMS=' + "{:1.2f}".format(0),fontsize=14) fig.colorbar(im, ax = ax[0]) # open-loop WF cmd = DM_target_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[1].imshow(WF, interpolation='nearest') ax[1].text(0,-1, 'Measured WF (open loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=14) fig.colorbar(im, ax = ax[1]) # closed-loop WF cmd = DM_CL_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[2].imshow(WF, interpolation='nearest') ax[2].text(0,-1, 'Measured WF (closed loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=14) fig.colorbar(im, ax = ax[2]) #close the WFS session devStatus = WFS.WFS_close(instrumentHandle) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_close():' + str(errorMessage.value)) else: print('WFS closed') #close the DM session assert DM.mro_close(byref(dm_status)), errors[dm_status.value] ```
github_jupyter
import ctypes as ct import matplotlib.pyplot as plt import numpy as np %matplotlib inline import sys sys.path.append('./lib') from Mirao52_utils import * #define home dir of the code: homeDir = 'C:/Users/Nikita/Documents/GitHub/AO-toolkit/' #load the WFS DLL: WFS = ct.windll.WFS_64 #Load the Mirao52e DLL: DM = ct.windll.LoadLibrary('./lib/x64/mirao52e.dll') byref = ct.byref #Set the data types compatible with C DLL count = ct.c_int32() deviceID = ct.c_int32() instrumentListIndex = ct.c_int32() inUse = ct.c_int32() instrumentName = ct.create_string_buffer("", 256) instrumentSN = ct.create_string_buffer("", 256) resourceName = ct.create_string_buffer("", 256) IDQuery = ct.c_bool() resetDevice = ct.c_bool() instrumentHandle = ct.c_ulong() pupilCenterXMm = ct.c_double() pupilCenterYMm = ct.c_double() pupilDiameterXMm = ct.c_double() pupilDiameterYMm = ct.c_double() exposureTimeAct = ct.c_double() masterGainAct = ct.c_double() dynamicNoiseCut = ct.c_int32() calculateDiameters = ct.c_int32() cancelWavefrontTilt = ct.c_int32() errorMessage = ct.create_string_buffer("", 512) errorCode = ct.c_int32() pixelFormat = ct.c_int32() pixelFormat.value = 0 #currently 8 bit only camResolIndex = ct.c_int32() spotsX = ct.c_int32() spotsY = ct.c_int32() wavefrontType = ct.c_int32() limitToPupil = ct.c_int32() #Set the WFS parameter values MAX_SPOTS_X = 50 MAX_SPOTS_Y = 40 arrayWavefront = np.zeros((MAX_SPOTS_Y,MAX_SPOTS_X),dtype = np.float32) instrumentListIndex.value = 0 #0,1,2,, if multiple instruments connected #Configure WFS camera camResolIndex.value = 1 # camResolIndex values: # For WFS instruments: # Index Resolution # 0 1280x1024 # 1 1024x1024 # 2 768x768 # 3 512x512 # 4 320x320 # For WFS10 instruments: # Index Resolution # 0 640x480 # 1 480x480 # 2 360x360 # 3 260x260 # 4 180x180 # For WFS20 instruments: # Index Resolution # 0 1440x1080 # 1 1080x1080 # 2 768x768 # 3 512x512 # 4 360x360 # 5 720x540, bin2 # 6 540x540, bin2 # 7 384x384, bin2 # 8 256x256, bin2 # 9 180x180, bin2 #Set pupil pupilCenterXMm.value = 0 #mm pupilCenterYMm.value = 0 #mm pupilDiameterXMm.value = 4.5 #mm pupilDiameterYMm.value = 4.5 #mm #Set spot calculation params dynamicNoiseCut.value = 1 calculateDiameters.value = 0 cancelWavefrontTilt.value = 1 wavefrontType.value = 0 # This parameter defines the type of wavefront to calculate. # Valid settings for wavefrontType: # 0 Measured Wavefront # 1 Reconstructed Wavefront based on Zernike coefficients # 2 Difference between measured and reconstructed Wavefront # Note: Function WFS_CalcReconstrDeviations needs to be called prior to this function in case of Wavefront type 1 and 2. limitToPupil.value = 1 # This parameter defines if the Wavefront should be calculated based on all detected spots or only within the defined pupil. # Valid settings: # 0 Calculate Wavefront for all spots # 1 Limit Wavefront to pupil interior #Check how many WFS devices are connected WFS.WFS_GetInstrumentListLen(None,byref(count)) print('WFS sensors connected: ' + str(count.value)) #Select a device and get its info devStatus = WFS.WFS_GetInstrumentListInfo(None,instrumentListIndex, byref(deviceID), byref(inUse), instrumentName, instrumentSN, resourceName) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_GetInstrumentListInfo():' + str(errorMessage.value)) else: print('WFS deviceID: ' + str(deviceID.value)) print('in use? ' + str(inUse.value)) print('instrumentName: ' + str(instrumentName.value)) print('instrumentSN: ' + str(instrumentSN.value)) print('resourceName: ' + str(resourceName.value)) if not inUse.value: devStatus = WFS.WFS_init(resourceName, IDQuery, resetDevice, byref(instrumentHandle)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_init():' + str(errorMessage.value)) else: print('WFS has been initialized. Instrument handle: ' +str(instrumentHandle.value)) else: print('WFS already in use') #Configure WFS camera devStatus = WFS.WFS_ConfigureCam(instrumentHandle, pixelFormat, camResolIndex, byref(spotsX), byref(spotsY)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_ConfigureCam():' + str(errorMessage.value)) else: print('WFS camera configured') print('SpotsX: ' + str(spotsX.value)) print('SpotsY: ' + str(spotsY.value)) #Set pupil devStatus = WFS.WFS_SetPupil(instrumentHandle, pupilCenterXMm, pupilCenterYMm, pupilDiameterXMm, pupilDiameterYMm) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_SetPupil():' + str(errorMessage.value)) else: print('WFS pupil set') nSensorPoints = spotsX.value * spotsY.value def WFS_getAveragedWavefront(N = 10, lamb = 0.633): ''' Measure the average wavefront from WF sensor and return it as 2D array Parameters: N - number of measurements (default 10) lamb - wavelengh of the light to convert (default 0.633 micron, HeNe laser) Returns: meanWavefront - wavefront 2D-array, units: lambdas PV - peak to value, units: lambdas RMS - root-mean-square, units: lambdas ''' meanWavefront = np.zeros((spotsY.value,spotsX.value)) for t in range(N): #Take a spotfield image, with auto-exposure devStatus = WFS.WFS_TakeSpotfieldImageAutoExpos(instrumentHandle, byref(exposureTimeAct), byref(masterGainAct)) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_TakeSpotfieldImageAutoExpos():' + str(errorMessage.value)) #calculate spot centroids devStatus = WFS.WFS_CalcSpotsCentrDiaIntens(instrumentHandle, dynamicNoiseCut, calculateDiameters) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcSpotsCentrDiaIntens():' + str(errorMessage.value)) #calculate spot to reference deviations devStatus = WFS.WFS_CalcSpotToReferenceDeviations(instrumentHandle, cancelWavefrontTilt) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcSpotToReferenceDeviations():' + str(errorMessage.value)) #calculate the wavefront devStatus = WFS.WFS_CalcWavefront(instrumentHandle, wavefrontType, limitToPupil, arrayWavefront.ctypes.data) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_CalcWavefront():' + str(errorMessage.value)) #save the data as numpy array meanWavefront[:,:] += arrayWavefront[:spotsY.value,:spotsX.value].copy() #Calculate stats, units: lambda meanWavefront = meanWavefront/N/lamb meanWavefront = meanWavefront - np.nanmean(meanWavefront) #normalize by the mean PV = np.nanmax(meanWavefront) - np.nanmin(meanWavefront) RMS = np.sqrt(np.nanmean(meanWavefront**2,axis=(0,1))) return (meanWavefront, PV, RMS) #open the DM session dm_nActuators = 52 dm_trigger = ct.c_int32() dm_trigger.value = 0 dm_status = ct.c_int32() assert DM.mro_open(byref(dm_status)), errors[dm_status.value] def safe_voltage(cmd): "returns 1 if command data in the safe zone, between -1 and 1 Volt for individual actuators, and < 25 Volt sum of absolute values, \ returns 0 if unsafe" if cmd.min() >= -1.0 and cmd.max() <= 1.0 and np.sum(np.abs(cmd)) < 25.0: return 1 else: return 0 %%time #Measure the poke matrix of DM flat_path = homeDir + 'python/flat/FLAT_MIRAO_0274-01.mro' dm_command_flat = read_Mirao_commandFile(flat_path,DM) dm_command = np.zeros(dm_nActuators, dtype=np.float64) Varray = np.array([-0.25, 0.25]) #Volts, this is applied to every actuator sequentially PokeMatrix = np.zeros((len(Varray),nSensorPoints, dm_nActuators)) for iVolts in range(len(Varray)): for iActuator in range(dm_nActuators): dm_command[:] = 0 dm_command[iActuator] = Varray[iVolts] #cmd = dm_command + dm_command_flat cmd = dm_command #apply poke to DM actuator if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure the wavefront WF,_,_ = WFS_getAveragedWavefront(N=40) WF = WF - np.nanmean(WF) #WF is measured to arbitrary constant, so normalize by the mean WF = WF.reshape(nSensorPoints) # 2D -> 1D PokeMatrix[iVolts,:,iActuator] = np.nan_to_num(WF)/Varray[iVolts] #rescale to unit voltage vector and remember #reset DM to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] #Check PokeMatrix iActuator = 29 plt.imshow(PokeMatrix[0,:,iActuator].reshape((29,29)),aspect = 'equal') plt.title('Single column of poke matrix \n (reshaped to sensor dimensions)') plt.colorbar() PokeMatrix_ave = PokeMatrix.mean(axis=0) # Plot mean poke matrices with +V and -V pokes plt.imshow(PokeMatrix_ave.T,aspect = 'auto') plt.xlabel('Sensor points') plt.ylabel('Actuator') plt.title('Mean poke matrix') plt.colorbar() #do the SVD pseudo-inverse now U, s, V = np.linalg.svd(PokeMatrix_ave, full_matrices=False) #plot the eigenvalues plt.plot(s) plt.grid('on') plt.xlabel('index') plt.ylabel('eigenvalue') print(s) #show the DM orthogonal modes nPrimaryModes = 52 dm_modes = np.zeros((nPrimaryModes,spotsX.value,spotsY.value)) for ind in range(nPrimaryModes): dm_modes[ind,:,:] = U[:,ind].reshape((spotsX.value,spotsY.value)) #plot first n x m modes n = 4 m = 13 zmin = -0.1 zmax = 0.1 fig, ax = plt.subplots(m,n, figsize = (3*n,3*m)) for i in range(n): for j in range(m): im = ax[j,i].imshow(dm_modes[i+j*n],vmin = zmin, vmax = zmax, aspect='equal') ax[j,i].text(0,-1, 'mode = ' + str(i+j*n),fontsize=10) fig.colorbar(im, ax = ax[j,i]) #Inverse PokeMatrix: # low-pass the inverse s_inv = 1.0/s s_thresh = s[0]/40 s_inv[s < s_thresh] = 0 print('Cut-off modes:' + str(np.where(s_inv == 0)[0])) S_inv = np.diag(s_inv) PokeMatrix_inv = np.dot(V.T,np.dot(S_inv, U.T)) #Set arbitrary DM target profile for test: DM_diam = 15.0 #mm x = np.linspace(-DM_diam/2,DM_diam/2,spotsX.value) y = np.linspace(-DM_diam/2,DM_diam/2,spotsY.value) X, Y = np.meshgrid(x, y) def test_gauss(x,y,sigma = 2): return np.exp(-x**2/(2*sigma**2) - y**2/(2*sigma**2)) def test_defocus(x,y,radius = 2000): #units: mm z = np.sqrt(radius**2 - x**2 - y**2) return z def test_flat(x,y): return np.ones(x.shape) DM_target_profile = test_flat(X,Y) #crop the corners to None values cropMask = np.ones(DM_target_profile.shape) crop_radius = DM_diam/2 for i in range(len(x)): for j in range(len(y)): if x[i]**2 + y[j]**2 >= crop_radius**2: cropMask[i,j] = None DM_target_profile = DM_target_profile * cropMask #DM_target_profile = DM_target_profile - np.nanmin(DM_target_profile) #Find the command for target profile, by using pseudo-inverse DM_target_command = np.dot(PokeMatrix_inv, np.nan_to_num(DM_target_profile).flatten()) DM_cmd = DM_target_command fig, ax = plt.subplots(1,3, figsize = (15,3.5)) # target shape im = ax[0].imshow(DM_target_profile, interpolation='nearest') PV = np.nanmax(DM_target_profile) - np.nanmin(DM_target_profile) ax[0].text(0,-1, 'Target profile: PV='"{:2.1f}".format(PV),fontsize=12) fig.colorbar(im, ax = ax[0]) # DM command map, 2D im = ax[1].imshow(DM_voltage_to_map(DM_cmd), interpolation='nearest') ax[1].text(0,-0.75, 'DM actuator voltages',fontsize=12) fig.colorbar(im, ax = ax[1]) # DM predicted profile: apply the command to the forward matrix A to see the pedicted shape WF_predicted = np.dot(PokeMatrix_ave,DM_cmd).reshape((spotsX.value,spotsY.value)) im = ax[2].imshow(WF_predicted, interpolation='nearest') PV = WF_predicted.max() - WF_predicted.min() RMS = np.sqrt(np.mean(WF_predicted**2)) ax[2].text(0,-1, 'Predicted WF: PV='+"{:2.1f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[2]) # Check if command is withing DM allowed limits: V1abs_max = 1.0 #Volt Vall_abs_max = 25.0 #Volt if np.abs(DM_cmd).max() >= V1abs_max: print('Voltage too high for individual actuators') if np.sum(np.abs(DM_cmd)) >= Vall_abs_max: print('Total absolute voltage is too high for all actuators') #Open-loop flattening of the WF fig, ax = plt.subplots(1,2, figsize = (10,3.5)) # measure the 'flat' WF without DM control. # The WF is not very flat because of imprefect optics and alignment # reset DM to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] # measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) # plot im = ax[0].imshow(WF, interpolation='nearest') ax[0].text(0,-1, 'Measured WF (flat DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[0]) #Try to make WF flatter by applying open-loop DM control cmd = DM_target_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[1].imshow(WF, interpolation='nearest') ax[1].text(0,-1, 'Measured WF (open-loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=12) fig.colorbar(im, ax = ax[1]) #reset DM back to flat shape assert DM.mro_applySmoothCommand(dm_command_flat.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] %%time #Closed-loop control #set DM to open-loop shape assert DM.mro_applySmoothCommand(DM_target_command.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] #measure initial WF WF, _, _ = WFS_getAveragedWavefront(N=20) DM_target_profile = DM_target_profile - np.nanmean(DM_target_profile) #normalize target profile DM_CL_command = DM_target_command #set initial command n_iterations = 100 gain = -0.2 PV_RMS_timelapse = np.zeros((n_iterations,2)) WF_timelapse = np.zeros((n_iterations,WF.shape[0],WF.shape[1])) for it in range(n_iterations): #Calculate the diff between measured and target WFs: WF_diff = WF - DM_target_profile WF_diff = WF_diff - np.nanmean(WF_diff) #normalize #Find the control command using pseudo-inverse DM_command_increment = np.dot(PokeMatrix_inv, np.nan_to_num(WF_diff).flatten()) #Try to make WF flatter by applying the updated command DM_CL_command = DM_CL_command + DM_command_increment*gain cmd = DM_CL_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Voltage is outside of safe range. Reducing gain.' gain /= 2.0 #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) WF_timelapse[it,:,:] = WF PV_RMS_timelapse[it,:] = (PV,RMS) plt.plot(PV_RMS_timelapse[:,0]/5,label='PV/5') plt.plot(PV_RMS_timelapse[:,1],label='RMS') plt.legend() plt.xlabel('iteration') plt.grid('on') plt.title('Timelapse of errors, PV and RMS, by iterations') plt.imshow(WF_timelapse[-1,:,:]) plt.colorbar() #Plot target, open-loop and closed-loop WFs fig, ax = plt.subplots(1,3, figsize = (15,3.5)) # target WF im = ax[0].imshow(DM_target_profile, interpolation='nearest') PV = np.nanmax(DM_target_profile) - np.nanmin(DM_target_profile) ax[0].text(0,-1, 'Target WF - flat \n PV=' + "{:2.1f}".format(PV) + ', RMS=' + "{:1.2f}".format(0),fontsize=14) fig.colorbar(im, ax = ax[0]) # open-loop WF cmd = DM_target_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[1].imshow(WF, interpolation='nearest') ax[1].text(0,-1, 'Measured WF (open loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=14) fig.colorbar(im, ax = ax[1]) # closed-loop WF cmd = DM_CL_command if safe_voltage(cmd): assert DM.mro_applySmoothCommand(cmd.ctypes.data,dm_trigger,byref(dm_status)), errors[dm_status.value] else: print 'Error: voltage is outside of safe range' #measure WF, PV, RMS = WFS_getAveragedWavefront(N=20) #plot im = ax[2].imshow(WF, interpolation='nearest') ax[2].text(0,-1, 'Measured WF (closed loop DM) \n PV=' + "{:2.2f}".format(PV) + ', RMS=' + "{:1.2f}".format(RMS) + ' ($\lambda$)',fontsize=14) fig.colorbar(im, ax = ax[2]) #close the WFS session devStatus = WFS.WFS_close(instrumentHandle) if(devStatus != 0): errorCode.value = devStatus WFS.WFS_error_message(instrumentHandle,errorCode,errorMessage) print('error in WFS_close():' + str(errorMessage.value)) else: print('WFS closed') #close the DM session assert DM.mro_close(byref(dm_status)), errors[dm_status.value]
0.286269
0.795539
# Statistical Relational Learning with `pslpython` As we've seen there are several ways to work with graph-based data, including: SPARQL queries, graph algorithms traversals, ML embedding, etc. Each of these methods makes trade-offs in terms of: * computational costs as the graph size scales * robustness when there is uncertainty or conflicting information in the graph * formalism (i.e., *analytic solutions*) vs. empirical approaches (i.e., data-driven, machine learning) One way to visualize some of these trade-offs is in the following diagram: <img src="https://github.com/DerwenAI/kglab/blob/main/docs/tradeoffs.png?raw=true" width="400"/> Note in the top/right corner of the diagram that a relatively formal category of graph-based approaches is called [*statistical relational learning*](https://www.cs.umd.edu/srl-book/). The gist is that so much of the *network analysis* that we want to perform can be describe mathematically as [*markov networks*](https://en.wikipedia.org/wiki/Markov_random_field), in terms of probabilistic models. Sometimes these can be quite computationally expensive; for example, hedge funds on Wall Street tend to burn lots of cloud computing on markov models. They are *robust* in terms of being able to work well even with lots of missing or conflicting data, and the *formalism* implies that we can infer mathematical guarantees from the analyis. That's quite the opposite of deep learning models, which are great at predicting sequences of things, but terrible at providing guarantees. Clearly, there's been much emphasis in industry recently that equates "artificial intelligence" with "deep learning", although we are also recognizing [*diminishing returns*](https://derwen.ai/s/zf43#33) for methods that rely purely on ever-larger data rates and ever-larger ML models. One path forward will be to combine machine learning with use of *structured knowledge* (i.e., KGs) such that we can avoid "boiling the oceans" with purely data-driven approaches when in so many use cases we can leverage domain expertise. In this secton we'll consider one form of statistical relational learning called [*probabilistic soft logic*](https://psl.linqs.org/) (PSL) which is essentially a kind of "fuzzy logic" for graphs that has interesting computational qualities. Whereas many kinds of formal graph analysis (e.g., "traveling salesman problem") are provably hard and quite expensive in practice, PSL can be solved with a *convex optimization* (e.g., like so many machine learning algorithms). Consider this: we can describe "rules" about nodes and relations in a KG, then assign probabilities to specific instances of those rules that are found within our graph. If the probabilities are all *zero* then the system is consistent. As some of the assigned probabilities are increased, then some of the rules become inconsistent. How high (i.e., optimal) of a set of probabilities can we assign while still keeping the system consistent? Alternatively, if we apply a set of rules, then how "far away" (probabilistically speaking) is a graph from being logically consistent? This comes in quite handy when we want to combine *semantic technologies* and *machine learning*, or rather when we have explicit rules plus lots of empirical data. Data quality is a persistent problem, so we can leverage PSL to identify which parts of the graph seem the least "logically consistent", and therefore need some review and curation. ## RDF representation of the "simple acquaintances" example One of the examples given for PSL is called [*simple acquaintances*](https://github.com/linqs/psl-examples/tree/master/simple-acquaintances), which uses a graph of some friends, where they live, what interests they share, and then infers who probably knows whom. Some people explicitly do or do not know each other, while other "knows" relations can be inferred based on whether two people have lived in the same place or share common interest. The objective is to build a PSL model for [*link prediction*](https://en.wikipedia.org/wiki/Link_prediction), to evaluate the annotations in the friend graph. In this case, we'll assume that the "knows" relations have been added from a questionable source (e.g., some third-party dataset) so we'll measure a subset of these relations and determine their likelihood. NB: this is really useful for cleaning up annotations in a large graph! First, let's load a KG which is an RDF representation of this example, based on a simple extension of the [`foaf`](http://www.foaf-project.org/) vocabulary: ``` import kglab namespaces = { "acq": "http://example.org/stuff/", "foaf": "http://xmlns.com/foaf/0.1/", "rdfs": "http://www.w3.org/2000/01/rdf-schema#", } kg = kglab.KnowledgeGraph( name = "LINQS simple acquaintance example for PSL", base_uri = "http://example.org/stuff/", language = "en", namespaces = namespaces, ) kg.load_ttl("acq.ttl") ``` Take a look at the `acq.ttl` file to see the people and their relations. Here's a quick visualization of the graph: ``` VIS_STYLE = { "foaf": { "color": "orange", "size": 5, }, "acq":{ "color": "blue", "size": 30, }, } g = kg.vis_pyvis(notebook=True, style=VIS_STYLE) g.barnes_hut() g.show("tmp.html") ``` Also, let's serialize this in TTL/Turtle format, then view the resulting `foo.ttl` file to see the relations organized in a way that's probably more readable: ``` kg.save_ttl("foo.ttl") ``` ## Loading a PSL model Next, we'll use the (`pslpython`](https://pypi.org/project/pslpython/) library implemented in Python (atop Java core software) to define three *predicates* (i.e., relations – similar as in RDF) which are: `Neighbors`, `Likes`, `Knows` ``` from pslpython.model import Model from pslpython.partition import Partition from pslpython.predicate import Predicate from pslpython.rule import Rule model = Model("simple acquaintances") # add predicates predicate = Predicate("Neighbors", closed=True, size=2) model.add_predicate(predicate) predicate = Predicate("Likes", closed=True, size=2) model.add_predicate(predicate) predicate = Predicate("Knows", closed=False, size=2) model.add_predicate(predicate) ``` Next, we'll add a set of probabilistic [*rules*](https://psl.linqs.org/wiki/2.2.1/Rule-Specification.html), all with different weights applied: 1. "Two people who live in the same place are **more** likely to know each other" 2. "Two people who don't live in the same place are **less** likely to know each other" 3. "Two people who share a common interest are **more** likely to know each other" 4. "Two people who both know a third person are **more** likely to know each other" 5. "Otherwise, any pair of people are **less** likely to know each other" ``` model.add_rule(Rule("20: Neighbors(P1, L) & Neighbors(P2, L) & (P1 != P2) -> Knows(P1, P2) ^2")) model.add_rule(Rule("5: Neighbors(P1, L1) & Neighbors(P2, L2) & (P1 != P2) & (L1 != L2) -> !Knows(P1, P2) ^2")) model.add_rule(Rule("10: Likes(P1, L) & Likes(P2, L) & (P1 != P2) -> Knows(P1, P2) ^2")) model.add_rule(Rule("5: Knows(P1, P2) & Knows(P2, P3) & (P1 != P3) -> Knows(P1, P3) ^2")) model.add_rule(Rule("5: !Knows(P1, P2) ^2")) ``` Finally we'll add a *commutative* rule such that "If Person 1 knows Person 2, then Person 2 also knows Person 1." ``` model.add_rule(Rule("Knows(P1, P2) = Knows(P2, P1) .")) ``` To initialize the model, we'll clear any pre-existing data from each of the predicates: ``` for predicate in model.get_predicates().values(): predicate.clear_data() ``` And we'll define a simple helper function, to format a unique URL within our `acq` vocabulary (a simple extension of `foaf`) based on the purely numeric identifiers used within PSL: ``` def get_person_id (url): return url.replace("http://example.org/stuff/person_", "") ``` Let's query our KG to populate data into the `Neighbors` predicate in the PSL model, based on `foaf:based_near` that represents living near the same locations: ``` predicate = model.get_predicate("Neighbors") query = """ SELECT DISTINCT ?p1 ?p2 WHERE { ?p1 foaf:based_near ?l . ?p2 foaf:based_near ?l . } """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if p1 != p2: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2]) ``` Note: these data points are *observations*, i.e., empirical support for the probabilistic model. Then let's query our KG to populate data into the `Likes` predicate in the PSL model, based on shared interests in `foaf:topic_interest` topics: ``` predicate = model.get_predicate("Likes") query = """ SELECT DISTINCT ?p1 ?p2 WHERE { ?p1 foaf:topic_interest ?t . ?p2 foaf:topic_interest ?t . } """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if p1 != p2: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2]) ``` Just for kicks, let's take a look at the internal representation of a PSL predicate, which is a `pandas` DataFrame: ``` predicate = model.get_predicate("Likes") predicate.__dict__ ``` Now we'll load data from the `dat/psl/knows_targets.txt` CSV file, which is a list of `foaf:knows` relations in our graph that we want to analyze. Each of these has an assumed value of `1.0` (true) or `0.0` (false). Our PSL analysis will assign probabilities for each so that we can compare which annotations appear to be suspect and require further review: ``` import csv import pandas as pd import rdflib as rdf targets = [] rows_list = [] predicate = model.get_predicate("Knows") with open("dat/psl/knows_targets.txt", "r") as f: reader = csv.reader(f, delimiter="\t") for i, row in enumerate(reader): p1, p2 = row targets.append((p1, p2)) p1_url = rdf.URIRef("http://example.org/stuff/person_" + p1) p2_url = rdf.URIRef("http://example.org/stuff/person_" + p2) if (p1_url, kg.get_ns("foaf").knows, p2_url) in kg._g: truth = 1.0 predicate.add_data_row(Partition.TRUTH, [p1, p2], truth_value=truth) predicate.add_data_row(Partition.TARGETS, [p1, p2]) rows_list.append({ 0: p1, 1: p2, "truth": truth}) elif (p1_url, kg.get_ns("acq").wantsIntro, p2_url) in kg._g: truth = 0.0 predicate.add_data_row(Partition.TRUTH, [p1, p2], truth_value=truth) predicate.add_data_row(Partition.TARGETS, [p1, p2]) rows_list.append({ 0: p1, 1: p2, "truth": truth}) else: print("UNKNOWN", p1, p2) df_dat = pd.DataFrame(rows_list) ``` These data points are considered to be *ground atoms*, each with a *truth* value set initially. These are also our *targets* for which nodes in the graph to analyze based on the rules. Next, we'll add `foaf:knows` observations which are in the graph, although not among our set of targets. This provides more evidence for the probabilistic inference. Note that since RDF does not allow for representing probabilities on relations, we're using the `acq:wantsIntro` to represent a `foaf:knows` with a `0.0` probability: ``` predicate = model.get_predicate("Knows") query = """ SELECT ?p1 ?p2 WHERE { ?p1 foaf:knows ?p2 . } ORDER BY ?p1 ?p2 """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if (p1, p2) not in targets: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2], truth_value=1.0) query = """ SELECT ?p1 ?p2 WHERE { ?p1 acq:wantsIntro ?p2 . } ORDER BY ?p1 ?p2 """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if (p1, p2) not in targets: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2], truth_value=0.0) ``` Now we're ready to optimize the PSL model – this may take a few minutes to run: ``` PSL_OPTIONS = { "log4j.threshold": "INFO" } results = model.infer(additional_cli_optons=[], psl_config=PSL_OPTIONS) ``` Let's examine the results. We'll get a `pandas` DataFrame describing the targets in the `Knows` predicate: ``` predicate = model.get_predicates()["KNOWS"] df = results[predicate] df.head() ``` Now we can compare the "truth" values from our targets, with their probabilities from the inference provided by the PSL model: ``` dat_val = {} for index, row in df_dat.iterrows(): p1 = row[0] p2 = row[1] key = (int(p1), int(p2)) dat_val[key] = row["truth"] for index, row in df.iterrows(): p1 = row[0] p2 = row[1] key = (int(p1), int(p2)) df.at[index, "diff"] = row["truth"] - dat_val[key] df ``` In other words, which of these "knows" relations in the graph appears to be suspect, based on our rules plus the other evidence in the graph? Let's visualize a histogram of how the inferred probabilities are distributed: ``` df["diff"].hist(); ``` In most cases there is little or no difference (`0.0 <= d <= 0.2`) in the probabilities for the target relations. However, some appear to be off by a substantial (`-0.8`) amount, which indicates problems in that part of our graph data. The following rows show where these `foaf:knows` annotations in the graph differs significantly from their truth values predicted by PSL: ``` for index, row in df.iterrows(): p1 = row[0] p2 = row[1] diff = row["diff"] if diff < -0.2: print("?? acq:person_{} foaf:knows acq:person_{}".format(int(row[0]), int(row[1]))) ``` Speaking of [*human-in-the-loop*](https://derwen.ai/d/human-in-the-loop) practices for AI, using PSL along with a KG seems like a great way to leverage machine learning, so that the people can focus on parts of the graph that have the most uncertainty. And, therefore, probably provide the best ROI for investing time+cost into curation. --- ## Exercises **Exercise 1:** Build a PSL model that tests the "noodle vs. pancake" rules used in an earlier example with our recipe KG. Which recipes should be annotated differently? **Exercise 2:** Try representing one of the other [PSL examples](https://github.com/linqs/psl-examples/) using RDF and `kglab`.
github_jupyter
import kglab namespaces = { "acq": "http://example.org/stuff/", "foaf": "http://xmlns.com/foaf/0.1/", "rdfs": "http://www.w3.org/2000/01/rdf-schema#", } kg = kglab.KnowledgeGraph( name = "LINQS simple acquaintance example for PSL", base_uri = "http://example.org/stuff/", language = "en", namespaces = namespaces, ) kg.load_ttl("acq.ttl") VIS_STYLE = { "foaf": { "color": "orange", "size": 5, }, "acq":{ "color": "blue", "size": 30, }, } g = kg.vis_pyvis(notebook=True, style=VIS_STYLE) g.barnes_hut() g.show("tmp.html") kg.save_ttl("foo.ttl") from pslpython.model import Model from pslpython.partition import Partition from pslpython.predicate import Predicate from pslpython.rule import Rule model = Model("simple acquaintances") # add predicates predicate = Predicate("Neighbors", closed=True, size=2) model.add_predicate(predicate) predicate = Predicate("Likes", closed=True, size=2) model.add_predicate(predicate) predicate = Predicate("Knows", closed=False, size=2) model.add_predicate(predicate) model.add_rule(Rule("20: Neighbors(P1, L) & Neighbors(P2, L) & (P1 != P2) -> Knows(P1, P2) ^2")) model.add_rule(Rule("5: Neighbors(P1, L1) & Neighbors(P2, L2) & (P1 != P2) & (L1 != L2) -> !Knows(P1, P2) ^2")) model.add_rule(Rule("10: Likes(P1, L) & Likes(P2, L) & (P1 != P2) -> Knows(P1, P2) ^2")) model.add_rule(Rule("5: Knows(P1, P2) & Knows(P2, P3) & (P1 != P3) -> Knows(P1, P3) ^2")) model.add_rule(Rule("5: !Knows(P1, P2) ^2")) model.add_rule(Rule("Knows(P1, P2) = Knows(P2, P1) .")) for predicate in model.get_predicates().values(): predicate.clear_data() def get_person_id (url): return url.replace("http://example.org/stuff/person_", "") predicate = model.get_predicate("Neighbors") query = """ SELECT DISTINCT ?p1 ?p2 WHERE { ?p1 foaf:based_near ?l . ?p2 foaf:based_near ?l . } """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if p1 != p2: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2]) predicate = model.get_predicate("Likes") query = """ SELECT DISTINCT ?p1 ?p2 WHERE { ?p1 foaf:topic_interest ?t . ?p2 foaf:topic_interest ?t . } """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if p1 != p2: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2]) predicate = model.get_predicate("Likes") predicate.__dict__ import csv import pandas as pd import rdflib as rdf targets = [] rows_list = [] predicate = model.get_predicate("Knows") with open("dat/psl/knows_targets.txt", "r") as f: reader = csv.reader(f, delimiter="\t") for i, row in enumerate(reader): p1, p2 = row targets.append((p1, p2)) p1_url = rdf.URIRef("http://example.org/stuff/person_" + p1) p2_url = rdf.URIRef("http://example.org/stuff/person_" + p2) if (p1_url, kg.get_ns("foaf").knows, p2_url) in kg._g: truth = 1.0 predicate.add_data_row(Partition.TRUTH, [p1, p2], truth_value=truth) predicate.add_data_row(Partition.TARGETS, [p1, p2]) rows_list.append({ 0: p1, 1: p2, "truth": truth}) elif (p1_url, kg.get_ns("acq").wantsIntro, p2_url) in kg._g: truth = 0.0 predicate.add_data_row(Partition.TRUTH, [p1, p2], truth_value=truth) predicate.add_data_row(Partition.TARGETS, [p1, p2]) rows_list.append({ 0: p1, 1: p2, "truth": truth}) else: print("UNKNOWN", p1, p2) df_dat = pd.DataFrame(rows_list) predicate = model.get_predicate("Knows") query = """ SELECT ?p1 ?p2 WHERE { ?p1 foaf:knows ?p2 . } ORDER BY ?p1 ?p2 """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if (p1, p2) not in targets: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2], truth_value=1.0) query = """ SELECT ?p1 ?p2 WHERE { ?p1 acq:wantsIntro ?p2 . } ORDER BY ?p1 ?p2 """ for row in kg.query(query): p1 = get_person_id(row[0]) p2 = get_person_id(row[1]) if (p1, p2) not in targets: predicate.add_data_row(Partition.OBSERVATIONS, [p1, p2], truth_value=0.0) PSL_OPTIONS = { "log4j.threshold": "INFO" } results = model.infer(additional_cli_optons=[], psl_config=PSL_OPTIONS) predicate = model.get_predicates()["KNOWS"] df = results[predicate] df.head() dat_val = {} for index, row in df_dat.iterrows(): p1 = row[0] p2 = row[1] key = (int(p1), int(p2)) dat_val[key] = row["truth"] for index, row in df.iterrows(): p1 = row[0] p2 = row[1] key = (int(p1), int(p2)) df.at[index, "diff"] = row["truth"] - dat_val[key] df df["diff"].hist(); for index, row in df.iterrows(): p1 = row[0] p2 = row[1] diff = row["diff"] if diff < -0.2: print("?? acq:person_{} foaf:knows acq:person_{}".format(int(row[0]), int(row[1])))
0.304765
0.988885
``` import os import sys import time import numpy as np import pandas as pd from scipy import misc import matplotlib.pyplot as plt from scipy import sparse from scipy.sparse import csgraph from scipy import linalg from pysheds.grid import Grid from scipy import ndimage from matplotlib import colors import seaborn as sns import matplotlib import matplotlib.cm as cm from matplotlib.collections import LineCollection import matplotlib.gridspec as gridspec %matplotlib inline sns.set_palette('husl', 2) grid = Grid.from_raster('../data/n30w100_dir', data_name='dir') grid.read_raster('../data/n30w100_con', data_name='dem') dirmap = (64, 128, 1, 2, 4, 8, 16, 32) # Specify pour point x, y = -97.294167, 32.73750 # Delineate the catchment grid.catchment(data='dir', x=x, y=y, dirmap=dirmap, out_name='catch', recursionlimit=15000, xytype='label') # Clip the bounding box to the catchment grid.clip_to('catch', precision=5) grid.accumulation(data='catch', dirmap=dirmap, pad_inplace=False, out_name='acc') profiles, connections = grid.extract_profiles('catch', grid.acc > 25) geom = grid.extract_river_network('catch', grid.acc > 25) ``` # Lo Diffusion + advection ``` dx = 60. * (np.asarray([profile.size for profile in profiles]) + 1) n = max(max(connections.keys()), max(connections.values())) + 1 s = np.ones(n) I = np.eye(n) u = 1.0*np.ones(n) D = (u * dx / 10).mean()*np.ones(n) dt = 1 I[0, 0] = 0 U = np.zeros((n, n)) W = np.zeros((n, n)) for i, j in connections.items(): # Confirmed as i U[j, i] = u[i] * dt / dx[i] W[i, j] = u[i] * dt / dx[i] V = np.diag(W @ s) X = np.zeros((n, n)) for i, j in connections.items(): X[i, j] = 2 / (dx[i] + dx[j]) X[j, i] = 2 / (dx[i] + dx[j]) Y = np.diag(X @ s) Z = np.diag(D * dt / dx) S = (U - V) - Z @ (Y - X) + I np.imag(np.linalg.eig(S)[0]).max(), np.imag(np.linalg.eig(S)[0]).min(), np.real(np.linalg.eig(S)[0]).max(), np.real(np.linalg.eig(S)[0]).min() C = np.eye(n) ns = 11 W_c = linalg.solve_discrete_lyapunov(S, C) st = np.argsort(np.diag(W_c))[::-1][:ns] W_o = linalg.solve_discrete_lyapunov(S.T, C) np.random.seed(0) x0 = np.ones(S.shape[0]) from numba import njit sr = np.asarray([ 1, 128, 136, 52, 64, 92, 194, 9, 143, 161, 191, 13, 71, 15, 125, 77, 141, 198, 58, 150, 102, 12, 88, 164, 204, 2, 70, 87, 159, 177, 197, 22]) st = np.asarray([ 5, 8, 14, 42, 26, 76, 51, 89, 45, 55, 63, 83, 28, 1, 65, 85, 104, 93, 103, 121, 115, 13, 11, 140, 9, 194, 107, 6, 71, 64, 31, 40]) @njit(fastmath=True) def compute_multi_x0s(X0, S, C, T): Ac = np.eye(S.shape[0]) Wo_r = np.zeros(S.shape) R_r = np.zeros(X0.shape) X0_hat = np.zeros(X0.shape) m = X0.shape[0] for _ in range(T): AC = C @ Ac Wo_r += (AC.T @ AC) for i in range(m): y = AC @ X0[i] R_r[i] += (AC.T @ y) Ac = S @ Ac Wo_r_inv = np.linalg.pinv(Wo_r) for i in range(m): X0_hat[i] = (Wo_r_inv @ R_r[i]) return X0_hat C = np.eye(S.shape[0])[sr[:11]] X0 = np.vstack([x0, x0, x0, x0]) %time x0_hats = compute_multi_x0s(X0, S, C, 4000) Adj = np.zeros((n, n)) for i, j in connections.items(): # Confirmed as i Adj[j, i] = 1 Adj[i, j] = 1 Lap = sparse.csgraph.laplacian(Adj) L, U = np.linalg.eigh(Lap) ghat = np.exp(-100*L) np.random.seed(0) trials = 200 T = np.random.choice(len(U), size=trials, replace=False) ghat = [np.exp(np.minimum((100*np.random.randn() - 100), -1)*L) for _ in range(trials)] X0 = np.vstack([(U @ np.diag(ghat[i]) @ U[T[i]]).ravel() for i in range(trials)]) X0 = X0 / np.linalg.norm(X0, axis=1).reshape(-1, 1) start = time.time() x0_hats = compute_multi_x0s(X0, S, C, 4000) end = time.time() print(end - start, ' s') ``` # Rank-based ``` C = np.eye(len(S)) X0_hats_r = {} first_start = time.time() for i in range(1, len(sr)): sensors = sr[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_r[i] = x0_hats_i print('Total: ', end - first_start, ' s') ``` # Trace-based ``` C = np.eye(len(S)) X0_hats_t = {} first_start = time.time() for i in range(1, len(st)): sensors = st[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_t[i] = x0_hats_i print('Total: ', end - first_start, ' s') ``` # Random placement ``` np.random.seed(0) sss = [np.random.choice(np.arange(1, len(Adj)), size=len(sr), replace=False) for _ in range(10)] C = np.eye(len(S)) X0_hats_U = [] for it, ss in enumerate(sss[:]): first_start = time.time() X0_hats_ui = {} print(it) for i in range(1, len(ss)): sensors = ss[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_ui[i] = x0_hats_i print('Total: ', end - first_start, ' s') X0_hats_U.append(X0_hats_ui) mse_by_num_sensors_r = np.asarray([((X0_hats_r[i] - X0)**2).sum() / trials for i in range(1, 32)]) mse_by_num_sensors_t = np.asarray([((X0_hats_t[i] - X0)**2).sum() / trials for i in range(1, 32)]) mse_by_num_sensors_U = [np.asarray([((X0_hats_u[i] - X0)**2).sum() / trials for i in range(1, 32)]) for X0_hats_u in X0_hats_U] sns.set() fig, ax = plt.subplots(1, 2, figsize=(10, 4)) ax[0].plot(np.arange(1, 32), mse_by_num_sensors_U[0], label='randomized', c='0.5', alpha=0.35) for mse_by_num_sensors_u in mse_by_num_sensors_U[1:]: ax[0].plot(np.arange(1, 32), mse_by_num_sensors_u, c='0.5', alpha=0.35) ax[0].plot(np.arange(1, 32), mse_by_num_sensors_r, label='rank-based', c='red') ax[0].plot(np.arange(1, 32), mse_by_num_sensors_t, label='trace-based', c='blue') ax[0].set_xlabel('Number of sensors') ax[0].set_ylabel('Mean squared error') ax[0].set_title('Reconstruction error vs. number of sensors') ax[0].legend() ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_U[0]), label='randomized', c='0.5', alpha=0.35) for mse_by_num_sensors_u in mse_by_num_sensors_U[1:]: ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_u), c='0.5', alpha=0.35) ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_r), label='rank-based', c='red') ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_t), label='trace-based', c='blue') ax[1].set_xlabel('Number of sensors') ax[1].set_ylabel('Log of mean squared error') ax[1].set_title('Log reconstruction error vs. number of sensors') ax[1].legend() plt.tight_layout() std_by_num_sensors_r = np.asarray([((X0_hats_r[i] - X0)).std() for i in range(1, 32)]) std_by_num_sensors_t = np.asarray([((X0_hats_t[i] - X0)).std() for i in range(1, 32)]) std_by_num_sensors_u0 = np.asarray([((X0_hats_U[0][i] - X0)).std() for i in range(1, 32)]) import pickle with open('X0_hat_r.p', 'wb') as outfile: pickle.dump(X0_hats_r, outfile) with open('X0_hat_t.p', 'wb') as outfile: pickle.dump(X0_hats_t, outfile) for i in range(len(X0_hats_U)): with open(f'X0_hat_u{i}.p', 'wb') as outfile: pickle.dump(X0_hats_U[i], outfile) ```
github_jupyter
import os import sys import time import numpy as np import pandas as pd from scipy import misc import matplotlib.pyplot as plt from scipy import sparse from scipy.sparse import csgraph from scipy import linalg from pysheds.grid import Grid from scipy import ndimage from matplotlib import colors import seaborn as sns import matplotlib import matplotlib.cm as cm from matplotlib.collections import LineCollection import matplotlib.gridspec as gridspec %matplotlib inline sns.set_palette('husl', 2) grid = Grid.from_raster('../data/n30w100_dir', data_name='dir') grid.read_raster('../data/n30w100_con', data_name='dem') dirmap = (64, 128, 1, 2, 4, 8, 16, 32) # Specify pour point x, y = -97.294167, 32.73750 # Delineate the catchment grid.catchment(data='dir', x=x, y=y, dirmap=dirmap, out_name='catch', recursionlimit=15000, xytype='label') # Clip the bounding box to the catchment grid.clip_to('catch', precision=5) grid.accumulation(data='catch', dirmap=dirmap, pad_inplace=False, out_name='acc') profiles, connections = grid.extract_profiles('catch', grid.acc > 25) geom = grid.extract_river_network('catch', grid.acc > 25) dx = 60. * (np.asarray([profile.size for profile in profiles]) + 1) n = max(max(connections.keys()), max(connections.values())) + 1 s = np.ones(n) I = np.eye(n) u = 1.0*np.ones(n) D = (u * dx / 10).mean()*np.ones(n) dt = 1 I[0, 0] = 0 U = np.zeros((n, n)) W = np.zeros((n, n)) for i, j in connections.items(): # Confirmed as i U[j, i] = u[i] * dt / dx[i] W[i, j] = u[i] * dt / dx[i] V = np.diag(W @ s) X = np.zeros((n, n)) for i, j in connections.items(): X[i, j] = 2 / (dx[i] + dx[j]) X[j, i] = 2 / (dx[i] + dx[j]) Y = np.diag(X @ s) Z = np.diag(D * dt / dx) S = (U - V) - Z @ (Y - X) + I np.imag(np.linalg.eig(S)[0]).max(), np.imag(np.linalg.eig(S)[0]).min(), np.real(np.linalg.eig(S)[0]).max(), np.real(np.linalg.eig(S)[0]).min() C = np.eye(n) ns = 11 W_c = linalg.solve_discrete_lyapunov(S, C) st = np.argsort(np.diag(W_c))[::-1][:ns] W_o = linalg.solve_discrete_lyapunov(S.T, C) np.random.seed(0) x0 = np.ones(S.shape[0]) from numba import njit sr = np.asarray([ 1, 128, 136, 52, 64, 92, 194, 9, 143, 161, 191, 13, 71, 15, 125, 77, 141, 198, 58, 150, 102, 12, 88, 164, 204, 2, 70, 87, 159, 177, 197, 22]) st = np.asarray([ 5, 8, 14, 42, 26, 76, 51, 89, 45, 55, 63, 83, 28, 1, 65, 85, 104, 93, 103, 121, 115, 13, 11, 140, 9, 194, 107, 6, 71, 64, 31, 40]) @njit(fastmath=True) def compute_multi_x0s(X0, S, C, T): Ac = np.eye(S.shape[0]) Wo_r = np.zeros(S.shape) R_r = np.zeros(X0.shape) X0_hat = np.zeros(X0.shape) m = X0.shape[0] for _ in range(T): AC = C @ Ac Wo_r += (AC.T @ AC) for i in range(m): y = AC @ X0[i] R_r[i] += (AC.T @ y) Ac = S @ Ac Wo_r_inv = np.linalg.pinv(Wo_r) for i in range(m): X0_hat[i] = (Wo_r_inv @ R_r[i]) return X0_hat C = np.eye(S.shape[0])[sr[:11]] X0 = np.vstack([x0, x0, x0, x0]) %time x0_hats = compute_multi_x0s(X0, S, C, 4000) Adj = np.zeros((n, n)) for i, j in connections.items(): # Confirmed as i Adj[j, i] = 1 Adj[i, j] = 1 Lap = sparse.csgraph.laplacian(Adj) L, U = np.linalg.eigh(Lap) ghat = np.exp(-100*L) np.random.seed(0) trials = 200 T = np.random.choice(len(U), size=trials, replace=False) ghat = [np.exp(np.minimum((100*np.random.randn() - 100), -1)*L) for _ in range(trials)] X0 = np.vstack([(U @ np.diag(ghat[i]) @ U[T[i]]).ravel() for i in range(trials)]) X0 = X0 / np.linalg.norm(X0, axis=1).reshape(-1, 1) start = time.time() x0_hats = compute_multi_x0s(X0, S, C, 4000) end = time.time() print(end - start, ' s') C = np.eye(len(S)) X0_hats_r = {} first_start = time.time() for i in range(1, len(sr)): sensors = sr[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_r[i] = x0_hats_i print('Total: ', end - first_start, ' s') C = np.eye(len(S)) X0_hats_t = {} first_start = time.time() for i in range(1, len(st)): sensors = st[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_t[i] = x0_hats_i print('Total: ', end - first_start, ' s') np.random.seed(0) sss = [np.random.choice(np.arange(1, len(Adj)), size=len(sr), replace=False) for _ in range(10)] C = np.eye(len(S)) X0_hats_U = [] for it, ss in enumerate(sss[:]): first_start = time.time() X0_hats_ui = {} print(it) for i in range(1, len(ss)): sensors = ss[:i].tolist() Ci = C[sensors] start = time.time() x0_hats_i = compute_multi_x0s(X0, S, Ci, 3600) end = time.time() print(f'{i}: ', end - start, ' s') X0_hats_ui[i] = x0_hats_i print('Total: ', end - first_start, ' s') X0_hats_U.append(X0_hats_ui) mse_by_num_sensors_r = np.asarray([((X0_hats_r[i] - X0)**2).sum() / trials for i in range(1, 32)]) mse_by_num_sensors_t = np.asarray([((X0_hats_t[i] - X0)**2).sum() / trials for i in range(1, 32)]) mse_by_num_sensors_U = [np.asarray([((X0_hats_u[i] - X0)**2).sum() / trials for i in range(1, 32)]) for X0_hats_u in X0_hats_U] sns.set() fig, ax = plt.subplots(1, 2, figsize=(10, 4)) ax[0].plot(np.arange(1, 32), mse_by_num_sensors_U[0], label='randomized', c='0.5', alpha=0.35) for mse_by_num_sensors_u in mse_by_num_sensors_U[1:]: ax[0].plot(np.arange(1, 32), mse_by_num_sensors_u, c='0.5', alpha=0.35) ax[0].plot(np.arange(1, 32), mse_by_num_sensors_r, label='rank-based', c='red') ax[0].plot(np.arange(1, 32), mse_by_num_sensors_t, label='trace-based', c='blue') ax[0].set_xlabel('Number of sensors') ax[0].set_ylabel('Mean squared error') ax[0].set_title('Reconstruction error vs. number of sensors') ax[0].legend() ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_U[0]), label='randomized', c='0.5', alpha=0.35) for mse_by_num_sensors_u in mse_by_num_sensors_U[1:]: ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_u), c='0.5', alpha=0.35) ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_r), label='rank-based', c='red') ax[1].plot(np.arange(1, 32), np.log10(mse_by_num_sensors_t), label='trace-based', c='blue') ax[1].set_xlabel('Number of sensors') ax[1].set_ylabel('Log of mean squared error') ax[1].set_title('Log reconstruction error vs. number of sensors') ax[1].legend() plt.tight_layout() std_by_num_sensors_r = np.asarray([((X0_hats_r[i] - X0)).std() for i in range(1, 32)]) std_by_num_sensors_t = np.asarray([((X0_hats_t[i] - X0)).std() for i in range(1, 32)]) std_by_num_sensors_u0 = np.asarray([((X0_hats_U[0][i] - X0)).std() for i in range(1, 32)]) import pickle with open('X0_hat_r.p', 'wb') as outfile: pickle.dump(X0_hats_r, outfile) with open('X0_hat_t.p', 'wb') as outfile: pickle.dump(X0_hats_t, outfile) for i in range(len(X0_hats_U)): with open(f'X0_hat_u{i}.p', 'wb') as outfile: pickle.dump(X0_hats_U[i], outfile)
0.268174
0.628892
# Final Project Submission * Student name: `Reno Vieira Neto` * Student pace: `self paced` * Scheduled project review date/time: `Fri Oct 15, 2021 3pm – 3:45pm (PDT)` * Instructor name: `James Irving` * Blog post URL: https://renoneto.github.io/using_streamlit #### This project originated the [following app](https://movie-recommender-reno.herokuapp.com/). I'd recommend playing with the app and then coming back here to understand how the model behind it works. # Table of Contents <a class="anchor" id="toc"></a> - **[Business Case and Goals](#bc)** - **[The Dataset](#td)** - **[Dataset Exploration and Cleaning](#dec)** - **[No. of Movies by Genre](#mg)** - **[No. of Ratings per Year](#ry)** - **[No. of Users rating movies per Year](#urm)** - **[Recommender System](#rs)** - **[Create Popularity Model](#pop)** - **[Collaborative-Based Filtering](#colab)** - **[Hyperparameter Tuning](#grid)** - **[Try different models](#dif)** - **[Model Evaluation](#eval)** - **[Create function to take user input and give recommendations (+ hint of content-based attribute)](#func)** - **[Conclusion](#conclusion)** - **[Export files to create app](#lit)** - **[Improvements](#improvements)** # Business Case and Goal <a class="anchor" id="bc"></a> In this project, I'm creating a movie recommender using the [MovieLens dataset](https://grouplens.org/datasets/movielens/) to build a model that provides top 5 movie recommendations to a user, based on their ratings of other movies. I'm going to be addressing the cold start problem as well by being able to deal with users with no movie ratings. # The Dataset <a class="anchor" id="td"></a> The MovieLens dataset is a "classic" recommendation system dataset used in numerous academic papers and machine learning proofs-of-concept. [You can find more about it here](https://grouplens.org/datasets/movielens/) # Dataset Exploration and Cleaning <a class="anchor" id="dec"></a> ## Import necessary packages ``` import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import re import time from surprise import Reader, Dataset, dump from surprise.model_selection import cross_validate, GridSearchCV from surprise.prediction_algorithms import KNNBasic, KNNBaseline, SVD, SVDpp from surprise.accuracy import rmse from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') %matplotlib inline # Import datasets df_movies = pd.read_csv('./app/data/movies.csv') df_ratings = pd.read_csv('./app/data/ratings.csv') # Show first rows display(df_movies.head()) display(df_ratings.head()) ``` #### Notes - Breakdown genres into different columns (one-hot encoding) - `title` seems to have the release year of the movie. It might be interesting to have title and year in different columns. ``` # Check for nulls and data types display(df_movies.info()) display(df_ratings.info()) ``` #### Notes - No nulls - Might need to convert timestamps to `datetime` - There are 9742 movies in the dataset - 100836 ratings ### `df_movies` First, I'm going to start exploring the movies dataset to understand what I'm dealing with. ``` # Create column with array of genres and calculate the Number of Genres per movie df_movies['genres_array'] = df_movies['genres'].str.split('|') # Flattened genres stacked_genres = df_movies['genres_array'].apply(pd.Series).stack(level=0).reset_index() stacked_genres.columns = ['index', 'level_1', 'genre'] # Combine original dataframe with flattened genres using the index df_movies_new = pd.merge(df_movies, stacked_genres, how='left', left_index=True, right_on=['index']) df_movies_new = df_movies_new[['movieId', 'title', 'genre']] # One-hot Encoding of Genre column one_hot = pd.get_dummies(df_movies_new['genre']) # Get list of genres (it's going to be useful soon) list_of_genres = list(one_hot.columns) # Combine the new dataframe with the one-hot encoded dataframe df_movies_new = pd.merge(df_movies_new, one_hot, left_index=True, right_index=True) df_movies_new = df_movies_new.drop('genre', axis=1) # Use groupby to have one row per movie df_movies_new = df_movies_new.groupby(['movieId', 'title']).sum()[list_of_genres].reset_index() # Split year and title df_movies_new['release_year'] = df_movies_new.apply(lambda x: x['title'].strip()[-5:][:-1], axis=1) df_movies_new['release_year'] = df_movies_new.apply(lambda x: x['release_year'] if len(re.findall("[0-9]{4}", x['release_year'])) == 1 else np.nan, axis=1) df_movies_new['title'] = df_movies_new.apply(lambda x: x['title'][:-6].strip() if x['release_year'] != np.nan else x['title'], axis=1) ``` ### No. of Movies by genre <a class="anchor" id="mg"></a> **[Go back to Table of Contents](#toc)** ``` # Create empty dictionary to store the no of movies by genre no_of_movies_by_genre = {} for genre in list_of_genres: no_of_movies = df_movies_new[genre].sum() no_of_movies_by_genre[genre] = no_of_movies # Transform that into a dataframe to_plot = pd.DataFrame.from_dict(no_of_movies_by_genre, orient='index').reset_index() to_plot.columns = ['genre', 'no_of_movies'] to_plot = to_plot.sort_values('no_of_movies', ascending=False).reset_index(drop=True) # Plot plt.figure(figsize=(10,8)) sns.barplot(x="no_of_movies", y="genre", data=to_plot) plt.title('No of Movies by Genre', size=14) plt.xlabel('No. of Movies', size=13) plt.ylabel(None) plt.show() ``` #### Note - We are dealing with an unbalanced dataset from the perspective of the genres. There are way more Drama and Comedy movies than other genres. The consequence of that to the model is that certain genres will have a smaller set of options to choose from. ### `df_ratings` ### No. of Ratings per Year <a class="anchor" id="ry"></a> I wonder how many ratings were created per year. **[Go back to Table of Contents](#toc)** ``` # Convert timestamp column to datetime df_ratings['datetime'] = pd.to_datetime(df_ratings['timestamp'], unit='s') df_ratings['year'] = df_ratings['datetime'].dt.year # Create plot with No. of ratings per year to_plot = df_ratings.groupby('year').count()['rating'].reset_index() plt.figure(figsize=(17,5)) sns.barplot(x='year', y='rating', data=to_plot, color='blue', alpha=0.5) plt.title('No of Ratings per Year') plt.show() ``` **Note** - I don't see any trends. It's great to see that the last 4 years of the dataset had almost the same number of ratings. ### No. of Users rating movies per Year <a class="anchor" id="urm"></a> **[Go back to Table of Contents](#toc)** ``` # Create Plot with No. of Unique Users giving ratings to_plot = df_ratings.groupby('year').nunique()['userId'].reset_index() plt.figure(figsize=(17,5)) sns.barplot(x='year', y='userId', data=to_plot, color='blue', alpha=0.5) plt.title('No. of Users rating movies per Year') plt.show() ``` **Note** - Not many users rating movies. Around 40 per year. # Recommender System <a class="anchor" id="rs"></a> ## Create Popularity Model <a class="anchor" id="pop"></a> The first model is going to be very simple. It's a popularity model. Basically I'm going to rank movies by popularity. However, I need to find a way to scale the ratings because a movie with 100 ratings with an average of 4.5 and another with 2 with an average of 4.75 are completely different. I'd argue that the first movie actually has a higher rating score than the second one since more users have rated it with a high score. To address that problem I'm using the IMDB's Weighted Rating Method I found [online](https://math.stackexchange.com/questions/169032/understanding-the-imdb-weighted-rating-function-for-usage-on-my-own-website) that does a good job at weighting the ratings. #### Calculation ![](https://image.ibb.co/jYWZp9/wr.png) where, * v is the number of votes for the movie; * m is the minimum votes required to be listed in the chart; * R is the average rating of the movie; And * C is the mean vote across the whole report #### C: Calculate mean vote across the whole dataset ``` # Calculate Mean and Count the No. of Ratings to a given movie mean_ratings_df = df_ratings.groupby('movieId').agg(avg_rating=('rating', 'mean'), count_rating=('rating', 'count')).reset_index() # Calculate the Overall Average Rating mean_ratings_df['overall_avg_rating'] = mean_ratings_df['avg_rating'].mean() mean_ratings_df.head() ``` #### m: Define the minimum number of ratings required to be listed To define the minimum number of votes I'm going to look at the distribution of No. of Ratings by Movies. ``` # Plot plt.figure(figsize=(15,5)) sns.boxplot(x=mean_ratings_df['count_rating']) plt.title('Boxplot of No. of Ratings given to movies') plt.show() ``` Not super helpful. I'm going to print different quantiles ``` # Calculate different quatiles n_of_users = df_ratings['userId'].nunique() n_of_movies = len(mean_ratings_df) quantiles_list = [] for n in range(10, 100, 5): q = mean_ratings_df['count_rating'].quantile(n/100) n_of_selected_movies = len(mean_ratings_df[mean_ratings_df['count_rating'] >= q]) quantiles_list.append([n, q, n_of_selected_movies]) pd.DataFrame(quantiles_list, columns=['quantile', 'quantile_value', 'number_of_movies']) ``` Before deciding the Minimum No. of Ratings, I'm going to look at the number of movies users have rated. ``` df_ratings.groupby('userId').count()['movieId'].describe() ``` The Median number of movies a user has rated is 70 movies and the 75th quantile is 168 movies. Therefore, I'm comfortable moving forward with having the Minimum Number of Ratings (or `m`) of 47 ratings since that represents 491 Movies which is more than most users have rated. > **Disclamer**: I have tried a minimum of 27/17 ratings as well, however, the model resulted in weird recommendations. So I'm picking 47 after iteratively trying 17 and 27. #### m = 47 #### Create function to apply to the dataset ``` def weighted_rating(df): """ Calculates the IMDB's Weighted Rating using the following formula: (v / (v+m) * R) + (m / (m+v) * C) where: - v is the number of votes for the movie; - m is the minimum votes required to be listed in the chart; - R is the average rating of the movie; And - C is the mean vote across the whole report """ v = df['count_rating'] m = df['minimum_no_of_ratings'] R = df['avg_rating'] C = df['overall_avg_rating'] return (v / (v+m) * R) + (m / (m+v) * C) # Create Copy popularity_df = mean_ratings_df.copy() # Calculate the 95th quantile and the weighted rating popularity_df['minimum_no_of_ratings'] = popularity_df['count_rating'].quantile(0.95) popularity_df['weighted_rating'] = popularity_df.apply(weighted_rating, axis=1) ``` I'm going to look at the top 10 movies with the highest ratings. ``` # Grab the top 10 ids top_ten_ids = popularity_df.sort_values('weighted_rating', ascending=False)['movieId'][:10].values # Print them for idx, movie_id in enumerate(top_ten_ids): print((idx + 1), df_movies[df_movies['movieId'] == movie_id]['title'].item()) ``` Not too bad, I agree with these being the top 10. _However, that's very personal._ **[Go back to Table of Contents](#toc)** ## Collaborative-Based Filtering <a class="anchor" id="colab"></a> Collaborative Filtering is based on the idea that users similar to a me can be utilized to predict how much I will like a particular product or service that those same users have used/experienced but I have not. The strategy is to use different models and compare their performances. The metric to optimize for is RMSE. However, most likely, the best model will be the Singular Value Decomposition (SVD) or SVD++ based on what I have seen in different places. Nonetheless, I think it's worth trying different models rather than simply trying only these two models. Moreover, I'm also considering the fit time, otherwise, I might end up with a model that would not be _deployable_. ``` # Create a new dataframe to train the model. df_ratings_clean = df_ratings[['userId', 'movieId', 'rating']] ``` #### Reduce dataset to decrease runtime The dataset is too big and it's going to take too long to train the models if I use the whole dataset (_I've learned that the hard way_). Therefore, I'm picking only 50% of it to identify the best hyperparameters for the SVD model and I'm running GridSearchCV only for 50% of that. Once I identify the best hyperparameters, I'll then train the model using the whole dataset. ``` # Randomly pick 50,000 datapoints fmor the dataset sample_df = df_ratings_clean.sample(n=50000, random_state=111) # Split the sample data in two so I can test the best hyperparameters later on train_df, test_df = train_test_split(sample_df, train_size=.50, random_state=111) # Create reader and dataset objects reader = Reader() traindata = Dataset.load_from_df(train_df, reader) testdata = Dataset.load_from_df(test_df, reader) ``` ### GridSearchCV - Hyperparameter Tunning of SVD <a class="anchor" id="grid"></a> **[Go back to Table of Contents](#toc)** ``` # Perform a gridsearch with SVD param_grid = {'n_factors':[10, 15, 20] , 'n_epochs': [10, 20] , 'lr_all': [0.008, 0.012] , 'reg_all': [0.06, 0.1] , 'random_state': [111]} gs_model = GridSearchCV(SVD, param_grid=param_grid, n_jobs = -1, joblib_verbose=False) %time gs_model.fit(traindata) print('The best parameters are:') gs_model.best_params['rmse'] ``` ### GridSearchCV Metrics Analysis Let's analyze the metrics of each run and pick the best parameters given the RMSE and Fit Time. Sometimes simply choosing the best parameters is not the best option since the only goal of the Grid is to minimize RMSE. We should also consider the Fit Time if we are planning on having this model as a service running online. ``` # Convert results from the GridSearchCV to dataframes df_params = pd.DataFrame(gs_model.cv_results['params']) df_rmse = pd.DataFrame(gs_model.cv_results['mean_test_rmse'], columns=['mean_test_rmse']) df_time = pd.DataFrame(gs_model.cv_results['mean_fit_time'], columns=['mean_fit_time']) df_results = pd.concat([df_params, df_rmse, df_time], axis=1) ``` Create a function to print metrics so we can see the impact of hyperparameters in RMSE and Fit Time. ``` def compare_metrics_chart(df, column_a, column_b): """ Function to plot the comparison of two metrics in a GridSearchCV run. Args: df(pd.Dataframe): Pandas Dataframe with GridSearchCV metrics. column_a(str): First metric column_b(str): Second Metric """ # Create Figure fig = plt.figure(figsize=(10,5)) # Create first axis ax = fig.add_subplot(111) # Plot Column A sns.lineplot(data=df[column_a], color="g", ax=ax) # Set Y Label ax.set_ylabel(column_a, color='g', size=10) # Create axis 2 ax2 = plt.twinx() # Plot Column B sns.lineplot(data=df[column_b], color="b", ax=ax2) # Set Y Label ax2.set_ylabel(column_b, color='b', size=10) # Change the format of the title column_a_title = column_a.replace('_', ' ').title() column_b_title = column_b.replace('_', ' ').title() plt.title(column_a_title + ' vs. ' + column_b_title) plt.show(); ``` #### Number of Factors ``` compare_metrics_chart(df_results, 'n_factors', 'mean_test_rmse') compare_metrics_chart(df_results, 'n_factors', 'mean_fit_time') ``` The lowest values for RMSE is reached regardless of the Number of Factors. It's arguable that we should have more factors to decrease RMSE since that's the expectation. However, it comes at a cost: fit time increase. Since the data is showing we can achieve low RMSE with only `10` factors then I'm going to choose that. #### Number of Epochs ``` compare_metrics_chart(df_results, 'n_epochs', 'mean_test_rmse') compare_metrics_chart(df_results, 'n_epochs', 'mean_fit_time') ``` The Number of Epochs reduces RMSE, but it's possible to see an increase of 50%-80% in Fit Time, which is more than the positive impact in RMSE. Therefore, I'll go with `20` epochs. #### Regularization Term ``` compare_metrics_chart(df_results, 'reg_all', 'mean_test_rmse') compare_metrics_chart(df_results, 'reg_all', 'mean_fit_time') ``` Low regularization term achieves better results with no impact in fit time. #### Learning Rate ``` compare_metrics_chart(df_results, 'lr_all', 'mean_test_rmse') compare_metrics_chart(df_results, 'lr_all', 'mean_fit_time') ``` Having high Learning Rate has a positive impact in RMSE with now impact in Fit Time. #### Final hyperparameters: - `n_factors`: 15 - `n_epochs`: 20 - `lr_all`: 0.012 - `reg_all`: 0.06 **[Go back to Table of Contents](#toc)** ### Try different models <a class="anchor" id="dif"></a> #### Create a function to easily test different models ``` def full_model_training_evaluation(model, model_name, traindata, testdata): """ Train and test different models and collect fit time and train/test RMSE. Args: model(surprise.prediction_algorithms): Model instances from the surprise package. model_name(str): Model name created by the User. A way to identify the model. traindata(surprise.dataset.DatasetAutoFolds): Train dataset testdata(surprise.dataset.DatasetAutoFolds): Test dataset Returns: results(dict): A dictionary with the model name, fit time and RMSE's (train/test). """ # Stor results in dictionary results = {} results['model_name'] = model_name print('Training', model_name, 'model') # Fit on train data start_time = time.time() model.fit(traindata.build_full_trainset()) end_time = time.time() total_time = round(end_time - start_time, 2) results['fit_time_in_seconds'] = total_time # Get RMSE on train data predictions_train = model.test(traindata.build_full_trainset().build_testset()) rmse_train = rmse(predictions_train, verbose=False).round(2) results['rmse_train'] = rmse_train # Get RMSE on test data predictions_test = model.test(testdata.build_full_trainset().build_testset()) rmse_test = rmse(predictions_test, verbose=False).round(2) results['rmse_test'] = rmse_test return results ``` Instantiate different models ``` # Create SVD model with the best hyperparameters svd = SVD(n_factors=15, n_epochs=20, lr_all=0.012, reg_all=0.06, random_state=111) # SVD++: Use the same hyperparameters svd_pp = SVDpp(n_factors=15, n_epochs=20, lr_all=0.012, reg_all=0.06, random_state=111) # Different instances of KNN Basic models with different hyperparameters knn_basic_person_baseline = KNNBasic(sim_options={'name':'pearson_baseline', 'user_based':True}, verbose=False) knn_basic_person = KNNBasic(sim_options={'name':'pearson', 'user_based':True}, verbose=False) knn_basic_cosine = KNNBasic(sim_options={'name':'cosine', 'user_based':True}, verbose=False) # Different instances of KNN Baseline models with different hyperparameters knn_base_person_baseline = KNNBaseline(sim_options={'name':'pearson_baseline', 'user_based':True}, verbose=False) knn_base_person = KNNBaseline(sim_options={'name':'pearson', 'user_based':True}, verbose=False) knn_base_cosine = KNNBaseline(sim_options={'name':'cosine', 'user_based':True}, verbose=False) # Put all models in a dictionary models = {'SVD': svd, 'SVD++': svd_pp, 'KNNBasic Cosine': knn_basic_cosine, 'KNNBasic Person': knn_basic_person, 'KNNBasic Person Baseline': knn_basic_person_baseline, 'KNNBaseline Cosine': knn_base_cosine, 'KNNBaseline Person': knn_base_person, 'KNNBaseline Person Baseline': knn_base_person_baseline} # Loop through different models and evaluate them model_results = [] for model_name, model_instance in models.items(): results = full_model_training_evaluation(model_instance, model_name, traindata, testdata) model_results.append(results) ``` **[Go back to Table of Contents](#toc)** ### Model Evaluation <a class="anchor" id="eval"></a> ``` pd.DataFrame(model_results) ``` #### Notes: - **Fit Time**: `SVD++` is by far the worst model. All KNN models have somewhat the same Fit Time, which is 4 times faster than `SVD`. However, they are all very fast relative to the `SVD++` model. - **RMSE Train**: The KNN Models using `person_baseline` is overfitting the train set. When comparing both Singular Value Decomposition models, the `SVD++` is performing better than the `SVD`. - **RMSE Test**: Both Singular Value Decomposition models had the same performance numbers and performed better than all KNN models. ### Conclusion I'll move forward with the `SVD` model given the fit time and RMSE scores. **[Go back to Table of Contents](#toc)** ## Create function to take user input and give recommendations (+ hint of content-based attribute) <a class="anchor" id="func"></a> Finally, I'm going to create a function that takes a genre and ratings from a user who has no ratings in the dataset. In the process, I'm going to focus my recommendations based on the chosen genre (content-based part of the recommendation). ``` # Create list of genres list_of_genres = stacked_genres['genre'].sort_values().unique()[1:] # Combine mean ratings and movies details ratings_movies_df = pd.merge(mean_ratings_df, df_movies, on='movieId') ``` #### Filter the dataset by removing movies with not enough ratings ``` def filtered_dataset(genre): """ Function to filter the dataset given the genre and remove outliers. Args: genre(str): The genre the user has chosen to come with recommendations. Returns: genre_df(pd.DataFrame): Filtered Dataframe with only the chosen genre. """ # Keep only the selected genre genre_df = ratings_movies_df[ratings_movies_df['genres'].str.contains(genre)] # Calculate the 95th quantile and the weighted rating minimum_no_of_ratings = genre_df['count_rating'].quantile(0.95) genre_df['minimum_no_of_ratings'] = minimum_no_of_ratings genre_df['weighted_rating'] = genre_df.apply(weighted_rating, axis=1) # Remove movies with not enough ratings genre_df = genre_df[genre_df['count_rating'] >= minimum_no_of_ratings] # Sorted it by weighted rating so we have the highest ratings on the top genre_df = genre_df.sort_values('weighted_rating', ascending=False) genre_df = genre_df.reset_index(drop=True) # Keep certain relevant columns genre_df = genre_df[['movieId', 'title', 'genres', 'count_rating', 'minimum_no_of_ratings', 'weighted_rating']] return genre_df ``` #### Create first a function to let the user rate five movies ``` def rate_movie(n_of_movies=5, default_user_id=9999999): """ Function to request a new user to review some movies. Args: n_of_movies(int): Number of ratings the new will have to give. default_user_id(int): Random user id that will be given to the user to be able to reference to it later. Returns: new_ratings_df(pd.DataFrame): Pandas Dataframe with the new ratings favorite_genre(str): The User's favorite genre """ # Print a list of the available genres print('List of Available Genres: ', ", ".join(list_of_genres)) # Gather input from user on which genre will be analyzed favorite_genre = input('Choose one genre from the following (case-sensitive): ') # Filter the dataset df_movies_popularity = filtered_dataset(favorite_genre) # Keep only movies that contain the chosen genre favorite_genre_movies = df_movies_popularity[df_movies_popularity['genres'].str.contains(favorite_genre)] # Keep the highest rated movies favorite_genre_movies = favorite_genre_movies.iloc[:20].sample(frac=1, random_state=111) favorite_genre_movies = favorite_genre_movies.iloc[:n_of_movies] print('') # Created to store ratings from user ratings_list = [] # Loop through dataframe with movies to be rated for row in favorite_genre_movies.iterrows(): # Extract Title and ID movie_title = row[1]['title'] movie_id = row[1]['movieId'] print('Movie to rate: ', movie_title) # Gather rating from user rating = input('How do you rate this movie on a scale of 1-5, press n if you have not seen :\n') # Deal with users not typing a number and create a new variable with the integer try: rating_int = int(rating) except: rating_int = 1 # While the Rating is not valid, keep asking the user while (rating != 'n') and not (1 <= rating_int <=5): rating = input('Please rate the movie between 1-5 or n if you have not seen : \n') else: # If the rating is different from 'n' then we need to add the rating to the list if rating != 'n': ratings_list.append({'userId': default_user_id, 'movieId': movie_id, 'rating': rating_int}) print('') # Convert to DataFrame new_ratings_df = pd.DataFrame(ratings_list) return new_ratings_df, favorite_genre, df_movies_popularity ``` #### Create a function to give the recommendations ``` def give_n_recommendations(model, default_user_id=9999999, n_recommendations=5): """ Function to request a new user to review movies and give recommendations based on that. Args: model(surprise.prediction_algorithms): Model instances from the surprise package. default_user_id(int): Random user id that will be given to the user to be able to reference to it later. n_recommendations(int): Number of recommendations that will be given to the user. """ # Extract ratings from the user new_ratings_df, favorite_genre, df_movies_popularity = rate_movie(default_user_id=default_user_id) watched_movies_id = new_ratings_df['movieId'] ## add the new ratings to the original ratings DataFrame updated_df = pd.concat([new_ratings_df, df_ratings_clean]) new_data = Dataset.load_from_df(updated_df, reader) new_dataset = new_data.build_full_trainset() # Fit new dataset model.fit(new_dataset) # make predictions for the user results = [] for movie_id in df_movies_popularity['movieId'].unique(): predicted_score = model.predict(default_user_id, movie_id)[3] results.append((movie_id, predicted_score)) # order the predictions from highest to lowest rated ranked_movies = pd.DataFrame(results, columns=['movieId', 'predicted_score']) ranked_movies = ranked_movies[~ranked_movies['movieId'].isin(watched_movies_id)] ranked_movies = ranked_movies.sort_values('predicted_score', ascending=False).reset_index(drop=True) ranked_movies = pd.merge(ranked_movies, df_movies, on='movieId') # ranked_movies = ranked_movies[ranked_movies['genres'].str.contains(favorite_genre)] print('The recommendations are the following:') if len(ranked_movies) < n_recommendations: n_recommendations = len(ranked_movies) for row in range(n_recommendations): movie_id = ranked_movies.iloc[row]['movieId'] recommended_title = df_movies[df_movies['movieId'] == movie_id]['title'].item() print(f'No. {row+1} is {recommended_title}') ``` #### Let's test it out! I'm going to try different genres to see how the model behaves. #### `Action` ``` give_n_recommendations(svd) ``` #### `Documentary` ``` give_n_recommendations(svd) ``` #### `Crime` ``` give_n_recommendations(svd) ``` #### `Romance` ``` give_n_recommendations(svd) ``` # Conclusion <a class="anchor" id="conclusion"></a> I'm happy with the results. However, I think the function is a bit limited. I'd like to have the recommender in an app. To do that, I'm going to use Streamlit. **[Go back to Table of Contents](#toc)** # Export files to create app <a class="anchor" id="lit"></a> I'm going to export some files so I can use them in Streamlit ``` # Export it to use it on streamlit ratings_movies_df.to_csv('./app/data/movies_by_rating.csv', index=0) df_ratings_clean.to_csv('./app/data/user_movie_ratings.csv', index=0) dump.dump('./app/data/svd.pkl', algo=svd) ``` # [Check out the App!](https://movie-recommender-reno.herokuapp.com/) # Improvements <a class="anchor" id="improvements"></a> - Use Normalized Discounted Cumulative Gain (NDCG) to evaluate models. - Develop a Content-Based layer using `tags` and `genres` or even `title`/`year`. - Sometimes I rate Star Wars with 1 star and the recommender outputs more Start Wars movies. **[Go back to Table of Contents](#toc)**
github_jupyter
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import re import time from surprise import Reader, Dataset, dump from surprise.model_selection import cross_validate, GridSearchCV from surprise.prediction_algorithms import KNNBasic, KNNBaseline, SVD, SVDpp from surprise.accuracy import rmse from sklearn.model_selection import train_test_split import warnings warnings.filterwarnings('ignore') %matplotlib inline # Import datasets df_movies = pd.read_csv('./app/data/movies.csv') df_ratings = pd.read_csv('./app/data/ratings.csv') # Show first rows display(df_movies.head()) display(df_ratings.head()) # Check for nulls and data types display(df_movies.info()) display(df_ratings.info()) # Create column with array of genres and calculate the Number of Genres per movie df_movies['genres_array'] = df_movies['genres'].str.split('|') # Flattened genres stacked_genres = df_movies['genres_array'].apply(pd.Series).stack(level=0).reset_index() stacked_genres.columns = ['index', 'level_1', 'genre'] # Combine original dataframe with flattened genres using the index df_movies_new = pd.merge(df_movies, stacked_genres, how='left', left_index=True, right_on=['index']) df_movies_new = df_movies_new[['movieId', 'title', 'genre']] # One-hot Encoding of Genre column one_hot = pd.get_dummies(df_movies_new['genre']) # Get list of genres (it's going to be useful soon) list_of_genres = list(one_hot.columns) # Combine the new dataframe with the one-hot encoded dataframe df_movies_new = pd.merge(df_movies_new, one_hot, left_index=True, right_index=True) df_movies_new = df_movies_new.drop('genre', axis=1) # Use groupby to have one row per movie df_movies_new = df_movies_new.groupby(['movieId', 'title']).sum()[list_of_genres].reset_index() # Split year and title df_movies_new['release_year'] = df_movies_new.apply(lambda x: x['title'].strip()[-5:][:-1], axis=1) df_movies_new['release_year'] = df_movies_new.apply(lambda x: x['release_year'] if len(re.findall("[0-9]{4}", x['release_year'])) == 1 else np.nan, axis=1) df_movies_new['title'] = df_movies_new.apply(lambda x: x['title'][:-6].strip() if x['release_year'] != np.nan else x['title'], axis=1) # Create empty dictionary to store the no of movies by genre no_of_movies_by_genre = {} for genre in list_of_genres: no_of_movies = df_movies_new[genre].sum() no_of_movies_by_genre[genre] = no_of_movies # Transform that into a dataframe to_plot = pd.DataFrame.from_dict(no_of_movies_by_genre, orient='index').reset_index() to_plot.columns = ['genre', 'no_of_movies'] to_plot = to_plot.sort_values('no_of_movies', ascending=False).reset_index(drop=True) # Plot plt.figure(figsize=(10,8)) sns.barplot(x="no_of_movies", y="genre", data=to_plot) plt.title('No of Movies by Genre', size=14) plt.xlabel('No. of Movies', size=13) plt.ylabel(None) plt.show() # Convert timestamp column to datetime df_ratings['datetime'] = pd.to_datetime(df_ratings['timestamp'], unit='s') df_ratings['year'] = df_ratings['datetime'].dt.year # Create plot with No. of ratings per year to_plot = df_ratings.groupby('year').count()['rating'].reset_index() plt.figure(figsize=(17,5)) sns.barplot(x='year', y='rating', data=to_plot, color='blue', alpha=0.5) plt.title('No of Ratings per Year') plt.show() # Create Plot with No. of Unique Users giving ratings to_plot = df_ratings.groupby('year').nunique()['userId'].reset_index() plt.figure(figsize=(17,5)) sns.barplot(x='year', y='userId', data=to_plot, color='blue', alpha=0.5) plt.title('No. of Users rating movies per Year') plt.show() # Calculate Mean and Count the No. of Ratings to a given movie mean_ratings_df = df_ratings.groupby('movieId').agg(avg_rating=('rating', 'mean'), count_rating=('rating', 'count')).reset_index() # Calculate the Overall Average Rating mean_ratings_df['overall_avg_rating'] = mean_ratings_df['avg_rating'].mean() mean_ratings_df.head() # Plot plt.figure(figsize=(15,5)) sns.boxplot(x=mean_ratings_df['count_rating']) plt.title('Boxplot of No. of Ratings given to movies') plt.show() # Calculate different quatiles n_of_users = df_ratings['userId'].nunique() n_of_movies = len(mean_ratings_df) quantiles_list = [] for n in range(10, 100, 5): q = mean_ratings_df['count_rating'].quantile(n/100) n_of_selected_movies = len(mean_ratings_df[mean_ratings_df['count_rating'] >= q]) quantiles_list.append([n, q, n_of_selected_movies]) pd.DataFrame(quantiles_list, columns=['quantile', 'quantile_value', 'number_of_movies']) df_ratings.groupby('userId').count()['movieId'].describe() def weighted_rating(df): """ Calculates the IMDB's Weighted Rating using the following formula: (v / (v+m) * R) + (m / (m+v) * C) where: - v is the number of votes for the movie; - m is the minimum votes required to be listed in the chart; - R is the average rating of the movie; And - C is the mean vote across the whole report """ v = df['count_rating'] m = df['minimum_no_of_ratings'] R = df['avg_rating'] C = df['overall_avg_rating'] return (v / (v+m) * R) + (m / (m+v) * C) # Create Copy popularity_df = mean_ratings_df.copy() # Calculate the 95th quantile and the weighted rating popularity_df['minimum_no_of_ratings'] = popularity_df['count_rating'].quantile(0.95) popularity_df['weighted_rating'] = popularity_df.apply(weighted_rating, axis=1) # Grab the top 10 ids top_ten_ids = popularity_df.sort_values('weighted_rating', ascending=False)['movieId'][:10].values # Print them for idx, movie_id in enumerate(top_ten_ids): print((idx + 1), df_movies[df_movies['movieId'] == movie_id]['title'].item()) # Create a new dataframe to train the model. df_ratings_clean = df_ratings[['userId', 'movieId', 'rating']] # Randomly pick 50,000 datapoints fmor the dataset sample_df = df_ratings_clean.sample(n=50000, random_state=111) # Split the sample data in two so I can test the best hyperparameters later on train_df, test_df = train_test_split(sample_df, train_size=.50, random_state=111) # Create reader and dataset objects reader = Reader() traindata = Dataset.load_from_df(train_df, reader) testdata = Dataset.load_from_df(test_df, reader) # Perform a gridsearch with SVD param_grid = {'n_factors':[10, 15, 20] , 'n_epochs': [10, 20] , 'lr_all': [0.008, 0.012] , 'reg_all': [0.06, 0.1] , 'random_state': [111]} gs_model = GridSearchCV(SVD, param_grid=param_grid, n_jobs = -1, joblib_verbose=False) %time gs_model.fit(traindata) print('The best parameters are:') gs_model.best_params['rmse'] # Convert results from the GridSearchCV to dataframes df_params = pd.DataFrame(gs_model.cv_results['params']) df_rmse = pd.DataFrame(gs_model.cv_results['mean_test_rmse'], columns=['mean_test_rmse']) df_time = pd.DataFrame(gs_model.cv_results['mean_fit_time'], columns=['mean_fit_time']) df_results = pd.concat([df_params, df_rmse, df_time], axis=1) def compare_metrics_chart(df, column_a, column_b): """ Function to plot the comparison of two metrics in a GridSearchCV run. Args: df(pd.Dataframe): Pandas Dataframe with GridSearchCV metrics. column_a(str): First metric column_b(str): Second Metric """ # Create Figure fig = plt.figure(figsize=(10,5)) # Create first axis ax = fig.add_subplot(111) # Plot Column A sns.lineplot(data=df[column_a], color="g", ax=ax) # Set Y Label ax.set_ylabel(column_a, color='g', size=10) # Create axis 2 ax2 = plt.twinx() # Plot Column B sns.lineplot(data=df[column_b], color="b", ax=ax2) # Set Y Label ax2.set_ylabel(column_b, color='b', size=10) # Change the format of the title column_a_title = column_a.replace('_', ' ').title() column_b_title = column_b.replace('_', ' ').title() plt.title(column_a_title + ' vs. ' + column_b_title) plt.show(); compare_metrics_chart(df_results, 'n_factors', 'mean_test_rmse') compare_metrics_chart(df_results, 'n_factors', 'mean_fit_time') compare_metrics_chart(df_results, 'n_epochs', 'mean_test_rmse') compare_metrics_chart(df_results, 'n_epochs', 'mean_fit_time') compare_metrics_chart(df_results, 'reg_all', 'mean_test_rmse') compare_metrics_chart(df_results, 'reg_all', 'mean_fit_time') compare_metrics_chart(df_results, 'lr_all', 'mean_test_rmse') compare_metrics_chart(df_results, 'lr_all', 'mean_fit_time') def full_model_training_evaluation(model, model_name, traindata, testdata): """ Train and test different models and collect fit time and train/test RMSE. Args: model(surprise.prediction_algorithms): Model instances from the surprise package. model_name(str): Model name created by the User. A way to identify the model. traindata(surprise.dataset.DatasetAutoFolds): Train dataset testdata(surprise.dataset.DatasetAutoFolds): Test dataset Returns: results(dict): A dictionary with the model name, fit time and RMSE's (train/test). """ # Stor results in dictionary results = {} results['model_name'] = model_name print('Training', model_name, 'model') # Fit on train data start_time = time.time() model.fit(traindata.build_full_trainset()) end_time = time.time() total_time = round(end_time - start_time, 2) results['fit_time_in_seconds'] = total_time # Get RMSE on train data predictions_train = model.test(traindata.build_full_trainset().build_testset()) rmse_train = rmse(predictions_train, verbose=False).round(2) results['rmse_train'] = rmse_train # Get RMSE on test data predictions_test = model.test(testdata.build_full_trainset().build_testset()) rmse_test = rmse(predictions_test, verbose=False).round(2) results['rmse_test'] = rmse_test return results # Create SVD model with the best hyperparameters svd = SVD(n_factors=15, n_epochs=20, lr_all=0.012, reg_all=0.06, random_state=111) # SVD++: Use the same hyperparameters svd_pp = SVDpp(n_factors=15, n_epochs=20, lr_all=0.012, reg_all=0.06, random_state=111) # Different instances of KNN Basic models with different hyperparameters knn_basic_person_baseline = KNNBasic(sim_options={'name':'pearson_baseline', 'user_based':True}, verbose=False) knn_basic_person = KNNBasic(sim_options={'name':'pearson', 'user_based':True}, verbose=False) knn_basic_cosine = KNNBasic(sim_options={'name':'cosine', 'user_based':True}, verbose=False) # Different instances of KNN Baseline models with different hyperparameters knn_base_person_baseline = KNNBaseline(sim_options={'name':'pearson_baseline', 'user_based':True}, verbose=False) knn_base_person = KNNBaseline(sim_options={'name':'pearson', 'user_based':True}, verbose=False) knn_base_cosine = KNNBaseline(sim_options={'name':'cosine', 'user_based':True}, verbose=False) # Put all models in a dictionary models = {'SVD': svd, 'SVD++': svd_pp, 'KNNBasic Cosine': knn_basic_cosine, 'KNNBasic Person': knn_basic_person, 'KNNBasic Person Baseline': knn_basic_person_baseline, 'KNNBaseline Cosine': knn_base_cosine, 'KNNBaseline Person': knn_base_person, 'KNNBaseline Person Baseline': knn_base_person_baseline} # Loop through different models and evaluate them model_results = [] for model_name, model_instance in models.items(): results = full_model_training_evaluation(model_instance, model_name, traindata, testdata) model_results.append(results) pd.DataFrame(model_results) # Create list of genres list_of_genres = stacked_genres['genre'].sort_values().unique()[1:] # Combine mean ratings and movies details ratings_movies_df = pd.merge(mean_ratings_df, df_movies, on='movieId') def filtered_dataset(genre): """ Function to filter the dataset given the genre and remove outliers. Args: genre(str): The genre the user has chosen to come with recommendations. Returns: genre_df(pd.DataFrame): Filtered Dataframe with only the chosen genre. """ # Keep only the selected genre genre_df = ratings_movies_df[ratings_movies_df['genres'].str.contains(genre)] # Calculate the 95th quantile and the weighted rating minimum_no_of_ratings = genre_df['count_rating'].quantile(0.95) genre_df['minimum_no_of_ratings'] = minimum_no_of_ratings genre_df['weighted_rating'] = genre_df.apply(weighted_rating, axis=1) # Remove movies with not enough ratings genre_df = genre_df[genre_df['count_rating'] >= minimum_no_of_ratings] # Sorted it by weighted rating so we have the highest ratings on the top genre_df = genre_df.sort_values('weighted_rating', ascending=False) genre_df = genre_df.reset_index(drop=True) # Keep certain relevant columns genre_df = genre_df[['movieId', 'title', 'genres', 'count_rating', 'minimum_no_of_ratings', 'weighted_rating']] return genre_df def rate_movie(n_of_movies=5, default_user_id=9999999): """ Function to request a new user to review some movies. Args: n_of_movies(int): Number of ratings the new will have to give. default_user_id(int): Random user id that will be given to the user to be able to reference to it later. Returns: new_ratings_df(pd.DataFrame): Pandas Dataframe with the new ratings favorite_genre(str): The User's favorite genre """ # Print a list of the available genres print('List of Available Genres: ', ", ".join(list_of_genres)) # Gather input from user on which genre will be analyzed favorite_genre = input('Choose one genre from the following (case-sensitive): ') # Filter the dataset df_movies_popularity = filtered_dataset(favorite_genre) # Keep only movies that contain the chosen genre favorite_genre_movies = df_movies_popularity[df_movies_popularity['genres'].str.contains(favorite_genre)] # Keep the highest rated movies favorite_genre_movies = favorite_genre_movies.iloc[:20].sample(frac=1, random_state=111) favorite_genre_movies = favorite_genre_movies.iloc[:n_of_movies] print('') # Created to store ratings from user ratings_list = [] # Loop through dataframe with movies to be rated for row in favorite_genre_movies.iterrows(): # Extract Title and ID movie_title = row[1]['title'] movie_id = row[1]['movieId'] print('Movie to rate: ', movie_title) # Gather rating from user rating = input('How do you rate this movie on a scale of 1-5, press n if you have not seen :\n') # Deal with users not typing a number and create a new variable with the integer try: rating_int = int(rating) except: rating_int = 1 # While the Rating is not valid, keep asking the user while (rating != 'n') and not (1 <= rating_int <=5): rating = input('Please rate the movie between 1-5 or n if you have not seen : \n') else: # If the rating is different from 'n' then we need to add the rating to the list if rating != 'n': ratings_list.append({'userId': default_user_id, 'movieId': movie_id, 'rating': rating_int}) print('') # Convert to DataFrame new_ratings_df = pd.DataFrame(ratings_list) return new_ratings_df, favorite_genre, df_movies_popularity def give_n_recommendations(model, default_user_id=9999999, n_recommendations=5): """ Function to request a new user to review movies and give recommendations based on that. Args: model(surprise.prediction_algorithms): Model instances from the surprise package. default_user_id(int): Random user id that will be given to the user to be able to reference to it later. n_recommendations(int): Number of recommendations that will be given to the user. """ # Extract ratings from the user new_ratings_df, favorite_genre, df_movies_popularity = rate_movie(default_user_id=default_user_id) watched_movies_id = new_ratings_df['movieId'] ## add the new ratings to the original ratings DataFrame updated_df = pd.concat([new_ratings_df, df_ratings_clean]) new_data = Dataset.load_from_df(updated_df, reader) new_dataset = new_data.build_full_trainset() # Fit new dataset model.fit(new_dataset) # make predictions for the user results = [] for movie_id in df_movies_popularity['movieId'].unique(): predicted_score = model.predict(default_user_id, movie_id)[3] results.append((movie_id, predicted_score)) # order the predictions from highest to lowest rated ranked_movies = pd.DataFrame(results, columns=['movieId', 'predicted_score']) ranked_movies = ranked_movies[~ranked_movies['movieId'].isin(watched_movies_id)] ranked_movies = ranked_movies.sort_values('predicted_score', ascending=False).reset_index(drop=True) ranked_movies = pd.merge(ranked_movies, df_movies, on='movieId') # ranked_movies = ranked_movies[ranked_movies['genres'].str.contains(favorite_genre)] print('The recommendations are the following:') if len(ranked_movies) < n_recommendations: n_recommendations = len(ranked_movies) for row in range(n_recommendations): movie_id = ranked_movies.iloc[row]['movieId'] recommended_title = df_movies[df_movies['movieId'] == movie_id]['title'].item() print(f'No. {row+1} is {recommended_title}') give_n_recommendations(svd) give_n_recommendations(svd) give_n_recommendations(svd) give_n_recommendations(svd) # Export it to use it on streamlit ratings_movies_df.to_csv('./app/data/movies_by_rating.csv', index=0) df_ratings_clean.to_csv('./app/data/user_movie_ratings.csv', index=0) dump.dump('./app/data/svd.pkl', algo=svd)
0.662906
0.885829

Dataset Card for "clean_notebooks_labeled"

More Information needed

Downloads last month
1
Edit dataset card