text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
# SiteAlign features We read the SiteAlign features from the respective [paper](https://onlinelibrary.wiley.com/doi/full/10.1002/prot.21858) and [SI table](https://onlinelibrary.wiley.com/action/downloadSupplement?doi=10.1002%2Fprot.21858&file=prot21858-SupplementaryTable.pdf) to verify `kissim`'s implementation of the SiteAlign definitions: ``` from kissim.definitions import SITEALIGN_FEATURES SITEALIGN_FEATURES ``` ## Size SiteAlign's size definitions: > Natural amino acids have been classified into three groups according to the number of heavy atoms (<4 heavy atoms: Ala, Cys, Gly, Pro, Ser, Thr, Val; 4–6 heavy atoms: Asn, Asp, Gln, Glu, His, Ile, Leu, Lys, Met; >6 heavy atoms: Arg, Phe, Trp, Tyr) and three values (“1,” “2,” “3”) are outputted according to the group to which the current residues belong to (Table I) https://onlinelibrary.wiley.com/doi/full/10.1002/prot.21858 ### Parse text from SiteAlign paper ``` size = { 1.0: "Ala, Cys, Gly, Pro, Ser, Thr, Val".split(", "), 2.0: "Asn, Asp, Gln, Glu, His, Ile, Leu, Lys, Met".split(", "), 3.0: "Arg, Phe, Trp, Tyr".split(", "), } ``` ### `kissim` definitions correct? ``` import pandas as pd from IPython.display import display, HTML # Format SiteAlign data size_list = [] for value, amino_acids in size.items(): values = [(amino_acid.upper(), value) for amino_acid in amino_acids] size_list = size_list + values size_series = ( pd.DataFrame(size_list, columns=["amino_acid", "size"]) .sort_values("amino_acid") .set_index("amino_acid") .squeeze() ) # KiSSim implementation of SiteAlign features correct? diff = size_series == SITEALIGN_FEATURES["size"] if not diff.all(): raise ValueError( f"KiSSim implementation of SiteAlign features is incorrect!!!\n" f"{display(HTML(diff.to_html()))}" ) else: print("KiSSim implementation of SiteAlign features is correct :)") ``` ## HBA, HBD, charge, aromatic, aliphatic ### Parse table from SiteAlign SI ``` sitealign_table = """ Ala 0 0 0 1 0 Arg 3 0 +1 0 0 Asn 1 1 0 0 0 Asp 0 2 -1 0 0 Cys 1 0 0 1 0 Gly 0 0 0 0 0 Gln 1 1 0 0 0 Glu 0 2 -1 0 0 His/Hid/Hie 1 1 0 0 1 Hip 2 0 1 0 0 Ile 0 0 0 1 0 Leu 0 0 0 1 0 Lys 1 0 +1 0 0 Met 0 0 0 1 0 Phe 0 0 0 0 1 Pro 0 0 0 1 0 Ser 1 1 0 0 0 Thr 1 1 0 1 0 Trp 1 0 0 0 1 Tyr 1 1 0 0 1 Val 0 0 0 1 0 """ sitealign_table = [i.split() for i in sitealign_table.split("\n")[1:-1]] sitealign_dict = {i[0]: i[1:] for i in sitealign_table} sitealign_df = pd.DataFrame.from_dict(sitealign_dict).transpose() sitealign_df.columns = ["hbd", "hba", "charge", "aliphatic", "aromatic"] sitealign_df = sitealign_df[["hbd", "hba", "charge", "aromatic", "aliphatic"]] sitealign_df = sitealign_df.rename(index={"His/Hid/Hie": "His"}) sitealign_df = sitealign_df.drop("Hip", axis=0) sitealign_df = sitealign_df.astype("float") sitealign_df.index = [i.upper() for i in sitealign_df.index] sitealign_df = sitealign_df.sort_index() sitealign_df ``` ### `kissim` definitions correct? ``` from IPython.display import display, HTML diff = sitealign_df == SITEALIGN_FEATURES.drop("size", axis=1).sort_index() if not diff.all().all(): raise ValueError( f"KiSSim implementation of SiteAlign features is incorrect!!!\n" f"{display(HTML(diff.to_html()))}" ) else: print("KiSSim implementation of SiteAlign features is correct :)") ``` ## Table style ``` from Bio.Data.IUPACData import protein_letters_3to1 for feature_name in SITEALIGN_FEATURES.columns: print(feature_name) for name, group in SITEALIGN_FEATURES.groupby(feature_name): amino_acids = {protein_letters_3to1[i.capitalize()] for i in group.index} amino_acids = sorted(amino_acids) print(f"{name:<7}{' '.join(amino_acids)}") print() ```
github_jupyter
``` #@markdown ■■■■■■■■■■■■■■■■■■ #@markdown 初始化openpose #@markdown ■■■■■■■■■■■■■■■■■■ #设置版本为1.x %tensorflow_version 1.x import tensorflow as tf tf.__version__ ! nvcc --version ! nvidia-smi ! pip install PyQt5 import time init_start_time = time.time() #安装 cmake #https://drive.google.com/file/d/1lAXs5X7qMnKQE48I0JqSob4FX1t6-mED/view?usp=sharing file_id = "1lAXs5X7qMnKQE48I0JqSob4FX1t6-mED" file_name = "cmake-3.13.4.zip" ! cd ./ && curl -sc ./cookie "https://drive.google.com/uc?export=download&id=$file_id" > /dev/null code = "$(awk '/_warning_/ {print $NF}' ./cookie)" ! cd ./ && curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=$code&id=$file_id" -o "$file_name" ! cd ./ && unzip cmake-3.13.4.zip ! cd cmake-3.13.4 && ./configure && make && sudo make install # 依赖库安装 ! sudo apt install caffe-cuda ! sudo apt-get --assume-yes update ! sudo apt-get --assume-yes install build-essential # OpenCV ! sudo apt-get --assume-yes install libopencv-dev # General dependencies ! sudo apt-get --assume-yes install libatlas-base-dev libprotobuf-dev libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler ! sudo apt-get --assume-yes install --no-install-recommends libboost-all-dev # Remaining dependencies, 14.04 ! sudo apt-get --assume-yes install libgflags-dev libgoogle-glog-dev liblmdb-dev # Python3 libs ! sudo apt-get --assume-yes install python3-setuptools python3-dev build-essential ! sudo apt-get --assume-yes install python3-pip ! sudo -H pip3 install --upgrade numpy protobuf opencv-python # OpenCL Generic ! sudo apt-get --assume-yes install opencl-headers ocl-icd-opencl-dev ! sudo apt-get --assume-yes install libviennacl-dev # Openpose安装 ver_openpose = "v1.6.0" # Openpose の clone ! git clone --depth 1 -b "$ver_openpose" https://github.com/CMU-Perceptual-Computing-Lab/openpose.git # ! git clone --depth 1 https://github.com/CMU-Perceptual-Computing-Lab/openpose.git # Openpose の モデルデータDL ! cd openpose/models && ./getModels.sh #编译Openpose ! cd openpose && rm -r build || true && mkdir build && cd build && cmake .. && make -j`nproc` # example demo usage # 执行示例确认 ! cd /content/openpose && ./build/examples/openpose/openpose.bin --video examples/media/video.avi --write_json ./output/ --display 0 --write_video ./output/openpose.avi #@markdown ■■■■■■■■■■■■■■■■■■ #@markdown 其他软件初始化 #@markdown ■■■■■■■■■■■■■■■■■■ ver_tag = "ver1.02.01" # FCRN-DepthPrediction-vmd clone ! git clone --depth 1 -b "$ver_tag" https://github.com/miu200521358/FCRN-DepthPrediction-vmd.git # FCRN-DepthPrediction-vmd 识别深度模型下载 # 建立模型数据文件夹 ! mkdir -p ./FCRN-DepthPrediction-vmd/tensorflow/data # 下载模型数据并解压 ! cd ./FCRN-DepthPrediction-vmd/tensorflow/data && wget -c "http://campar.in.tum.de/files/rupprecht/depthpred/NYU_FCRN-checkpoint.zip" && unzip NYU_FCRN-checkpoint.zip # 3d-pose-baseline-vmd clone ! git clone --depth 1 -b "$ver_tag" https://github.com/miu200521358/3d-pose-baseline-vmd.git # 3d-pose-baseline-vmd Human3.6M 模型数据DL # 建立Human3.6M模型数据文件夹 ! mkdir -p ./3d-pose-baseline-vmd/data/h36m # 下载Human3.6M模型数据并解压 file_id = "1W5WoWpCcJvGm4CHoUhfIB0dgXBDCEHHq" file_name = "h36m.zip" ! cd ./ && curl -sc ./cookie "https://drive.google.com/uc?export=download&id=$file_id" > /dev/null code = "$(awk '/_warning_/ {print $NF}' ./cookie)" ! cd ./ && curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=$code&id=$file_id" -o "$file_name" ! cd ./ && unzip h36m.zip ! mv ./h36m ./3d-pose-baseline-vmd/data/ # 3d-pose-baseline-vmd 训练数据 # 3d-pose-baseline学习数据文件夹 ! mkdir -p ./3d-pose-baseline-vmd/experiments # 下载3d-pose-baseline训练后的数据 file_id = "1v7ccpms3ZR8ExWWwVfcSpjMsGscDYH7_" file_name = "experiments.zip" ! cd ./3d-pose-baseline-vmd && curl -sc ./cookie "https://drive.google.com/uc?export=download&id=$file_id" > /dev/null code = "$(awk '/_warning_/ {print $NF}' ./cookie)" ! cd ./3d-pose-baseline-vmd && curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm=$code&id=$file_id" -o "$file_name" ! cd ./3d-pose-baseline-vmd && unzip experiments.zip # VMD-3d-pose-baseline-multi clone ! git clone --depth 1 -b "$ver_tag" https://github.com/miu200521358/VMD-3d-pose-baseline-multi.git # 安装VMD-3d-pose-baseline-multi 依赖库 ! sudo apt-get install python3-pyqt5 ! sudo apt-get install pyqt5-dev-tools ! sudo apt-get install qttools5-dev-tools #安装编码器 ! sudo apt-get install mkvtoolnix init_elapsed_time = (time.time() - init_start_time) / 60 ! echo "■■■■■■■■■■■■■■■■■■■■■■■■" ! echo "■■所有初始化均已完成" ! echo "■■" ! echo "■■处理时间:" "$init_elapsed_time" "分" ! echo "■■■■■■■■■■■■■■■■■■■■■■■■" ! echo "Openpose执行结果" ! ls -l /content/openpose/output #@markdown ■■■■■■■■■■■■■■■■■■ #@markdown 执行函数初始化 #@markdown ■■■■■■■■■■■■■■■■■■ import os import cv2 import datetime import time import datetime import cv2 import shutil import glob from google.colab import files static_number_people_max = 1 static_frame_first = 0 static_end_frame_no = -1 static_reverse_specific = "" static_order_specific = "" static_born_model_csv = "born/animasa_miku_born.csv" static_is_ik = 1 static_heel_position = 0.0 static_center_z_scale = 1 static_smooth_times = 1 static_threshold_pos = 0.5 static_threshold_rot = 3 static_src_input_video = "" static_input_video = "" #执行文件夹 openpose_path = "/content/openpose" #输出文件夹 base_path = "/content/output" output_json = "/content/output/json" output_openpose_avi = "/content/output/openpose.avi" now_str = "" depth_dir_path = "" drive_dir_path = "" def video_hander( input_video): global base_path print("视频名称: ", os.path.basename(input_video)) print("视频大小: ", os.path.getsize(input_video)) video = cv2.VideoCapture(input_video) # 宽 W = video.get(cv2.CAP_PROP_FRAME_WIDTH) # 高 H = video.get(cv2.CAP_PROP_FRAME_HEIGHT) # 总帧数 count = video.get(cv2.CAP_PROP_FRAME_COUNT) # fps fps = video.get(cv2.CAP_PROP_FPS) print("宽: {0}, 高: {1}, 总帧数: {2}, fps: {3}".format(W, H, count, fps)) width = 1280 height = 720 if W != 1280 or (fps != 30 and fps != 60): print("重新编码,因为大小或fps不在范围: "+ input_video) # 縮尺 scale = width / W # 高さ height = int(H * scale) # 出力ファイルパス out_name = 'recode_{0}.mp4'.format("{0:%Y%m%d_%H%M%S}".format(datetime.datetime.now())) out_path = '{0}/{1}'.format(base_path, out_name) # try: # fourcc = cv2.VideoWriter_fourcc(*"MP4V") # out = cv2.VideoWriter(out_path, fourcc, 30.0, (width, height), True) # # 入力ファイル # cap = cv2.VideoCapture(input_video) # while(cap.isOpened()): # # 動画から1枚キャプチャして読み込む # flag, frame = cap.read() # Capture frame-by-frame # # 動画が終わっていたら終了 # if flag == False: # break # # 縮小 # output_frame = cv2.resize(frame, (width, height)) # # 出力 # out.write(output_frame) # # 終わったら開放 # out.release() # except Exception as e: # print("重新编码失败", e) # cap.release() # cv2.destroyAllWindows() # ! mkvmerge --default-duration 0:30fps --fix-bitstream-timing-information 0 "$input_video" -o temp-video.mkv # ! ffmpeg -i temp-video.mkv -c:v copy side_video.mkv # ! ffmpeg -i side_video.mkv -vf scale=1280:720 "$out_path" ! ffmpeg -i "$input_video" -qscale 0 -r 30 -y -vf scale=1280:720 "$out_path" print('MMD重新生成MP4文件成功', out_path) input_video_name = out_name # 入力動画ファイル再設定 input_video = base_path + "/"+ input_video_name video = cv2.VideoCapture(input_video) # 幅 W = video.get(cv2.CAP_PROP_FRAME_WIDTH) # 高さ H = video.get(cv2.CAP_PROP_FRAME_HEIGHT) # 総フレーム数 count = video.get(cv2.CAP_PROP_FRAME_COUNT) # fps fps = video.get(cv2.CAP_PROP_FPS) print("【重新生成】宽: {0}, 高: {1}, 总帧数: {2}, fps: {3}, 名字: {4}".format(W, H, count, fps,input_video_name)) return input_video def run_openpose(input_video,number_people_max,frame_first): #建立临时文件夹 ! mkdir -p "$output_json" #开始执行 ! cd "$openpose_path" && ./build/examples/openpose/openpose.bin --video "$input_video" --display 0 --model_pose COCO --write_json "$output_json" --write_video "$output_openpose_avi" --frame_first "$frame_first" --number_people_max "$number_people_max" def run_fcrn_depth(input_video,end_frame_no,reverse_specific,order_specific): global now_str,depth_dir_path,drive_dir_path now_str = "{0:%Y%m%d_%H%M%S}".format(datetime.datetime.now()) ! cd FCRN-DepthPrediction-vmd && python tensorflow/predict_video.py --model_path tensorflow/data/NYU_FCRN.ckpt --video_path "$input_video" --json_path "$output_json" --interval 10 --reverse_specific "$reverse_specific" --order_specific "$order_specific" --verbose 1 --now "$now_str" --avi_output "yes" --number_people_max "$number_people_max" --end_frame_no "$end_frame_no" # 深度結果コピー depth_dir_path = output_json + "_" + now_str + "_depth" drive_dir_path = base_path + "/" + now_str ! mkdir -p "$drive_dir_path" if os.path.exists( depth_dir_path + "/error.txt"): # 发生错误 ! cp "$depth_dir_path"/error.txt "$drive_dir_path" ! echo "■■■■■■■■■■■■■■■■■■■■■■■■" ! echo "■■由于发生错误,处理被中断。" ! echo "■■" ! echo "■■■■■■■■■■■■■■■■■■■■■■■■" ! echo "$drive_dir_path" "请检查 error.txt 的内容。" else: ! cp "$depth_dir_path"/*.avi "$drive_dir_path" ! cp "$depth_dir_path"/message.log "$drive_dir_path" ! cp "$depth_dir_path"/reverse_specific.txt "$drive_dir_path" ! cp "$depth_dir_path"/order_specific.txt "$drive_dir_path" for i in range(1, number_people_max+1): ! echo ------------------------------------------ ! echo 3d-pose-baseline-vmd ["$i"] ! echo ------------------------------------------ target_name = "_" + now_str + "_idx0" + str(i) target_dir = output_json + target_name !cd ./3d-pose-baseline-vmd && python src/openpose_3dpose_sandbox_vmd.py --camera_frame --residual --batch_norm --dropout 0.5 --max_norm --evaluateActionWise --use_sh --epochs 200 --load 4874200 --gif_fps 30 --verbose 1 --openpose "$target_dir" --person_idx 1 def run_3d_to_vmd(number_people_max,born_model_csv,is_ik,heel_position,center_z_scale,smooth_times,threshold_pos,threshold_rot): global now_str,depth_dir_path,drive_dir_path for i in range(1, number_people_max+1): target_name = "_" + now_str + "_idx0" + str(i) target_dir = output_json + target_name for f in glob.glob(target_dir +"/*.vmd"): ! rm "$f" ! cd ./VMD-3d-pose-baseline-multi && python applications/pos2vmd_multi.py -v 2 -t "$target_dir" -b "$born_model_csv" -c 30 -z "$center_z_scale" -s "$smooth_times" -p "$threshold_pos" -r "$threshold_rot" -k "$is_ik" -e "$heel_position" # INDEX別結果コピー idx_dir_path = drive_dir_path + "/idx0" + str(i) ! mkdir -p "$idx_dir_path" # 日本語対策でpythonコピー for f in glob.glob(target_dir +"/*.vmd"): shutil.copy(f, idx_dir_path) print(f) files.download(f) ! cp "$target_dir"/pos.txt "$idx_dir_path" ! cp "$target_dir"/start_frame.txt "$idx_dir_path" def run_mmd(input_video,number_people_max,frame_first,end_frame_no,reverse_specific,order_specific,born_model_csv,is_ik,heel_position,center_z_scale,smooth_times,threshold_pos,threshold_rot): global static_input_video,static_number_people_max ,static_frame_first ,static_end_frame_no,static_reverse_specific ,static_order_specific,static_born_model_csv global static_is_ik,static_heel_position ,static_center_z_scale ,static_smooth_times ,static_threshold_pos ,static_threshold_rot global base_path,static_src_input_video start_time = time.time() video_check= False openpose_check = False Fcrn_depth_check = False pose_to_vmd_check = False #源文件对比 if static_src_input_video != input_video: video_check = True openpose_check = True Fcrn_depth_check = True pose_to_vmd_check = True if (static_number_people_max != number_people_max) or (static_frame_first != frame_first): openpose_check = True Fcrn_depth_check = True pose_to_vmd_check = True if (static_end_frame_no != end_frame_no) or (static_reverse_specific != reverse_specific) or (static_order_specific != order_specific): Fcrn_depth_check = True pose_to_vmd_check = True if (static_born_model_csv != born_model_csv) or (static_is_ik != is_ik) or (static_heel_position != heel_position) or (static_center_z_scale != center_z_scale) or \ (static_smooth_times != smooth_times) or (static_threshold_pos != threshold_pos) or (static_threshold_rot != threshold_rot): pose_to_vmd_check = True #因为视频源文件重置,所以如果无修改需要重命名文件 if video_check: ! rm -rf "$base_path" ! mkdir -p "$base_path" static_src_input_video = input_video input_video = video_hander(input_video) static_input_video = input_video else: input_video = static_input_video if openpose_check: run_openpose(input_video,number_people_max,frame_first) static_number_people_max = number_people_max static_frame_first = frame_first if Fcrn_depth_check: run_fcrn_depth(input_video,end_frame_no,reverse_specific,order_specific) static_end_frame_no = end_frame_no static_reverse_specific = reverse_specific static_order_specific = order_specific if pose_to_vmd_check: run_3d_to_vmd(number_people_max,born_model_csv,is_ik,heel_position,center_z_scale,smooth_times,threshold_pos,threshold_rot) static_born_model_csv = born_model_csv static_is_ik = is_ik static_heel_position = heel_position static_center_z_scale = center_z_scale static_smooth_times = smooth_times static_threshold_pos = threshold_pos static_threshold_rot = threshold_rot elapsed_time = (time.time() - start_time) / 60 print( "■■■■■■■■■■■■■■■■■■■■■■■■") print( "■■所有处理完成") print( "■■") print( "■■处理時間:" + str(elapsed_time)+ "分") print( "■■■■■■■■■■■■■■■■■■■■■■■■") print( "") print( "MMD自动跟踪执行结果") print( base_path) ! ls -l "$base_path" #@markdown ■■■■■■■■■■■■■■■■■■ #@markdown GO GO GO GO 执行本单元格,上传视频 #@markdown ■■■■■■■■■■■■■■■■■■ from google.colab import files #@markdown --- #@markdown ### 输入视频名称 #@markdown 可以选择手动拖入视频到文件中(比较快),然后输入视频文件名,或者直接运行,不输入文件名直接本地上传 input_video = "" #@param {type: "string"} if input_video == "": uploaded = files.upload() for fn in uploaded.keys(): print('User uploaded file "{name}" with length {length} bytes'.format( name=fn, length=len(uploaded[fn]))) input_video = fn input_video = "/content/" + input_video print("本次执行的转化视频文件名为: "+input_video) #@markdown 输入用于跟踪图像的参数并执行单元。 #@markdown --- #@markdown ### 【O】视频中的最大人数 #@markdown 请输入您希望从视频中获得的人数。 #@markdown 请与视频中人数尽量保持一致 number_people_max = 1#@param {type: "number"} #@markdown --- #@markdown ### 【O】要从第几帧开始分析 #@markdown 输入帧号以开始分析。(从0开始) #@markdown 请指定在视频中显示所有人的第一帧,默认为0即可,除非你需要跳过某些片段(例如片头)。 frame_first = 0 #@param {type: "number"} #@markdown --- #@markdown ### 【F】要从第几帧结束 #@markdown 请输入要从哪一帧结束 #@markdown (从0开始)在“FCRN-DepthPrediction-vmd”中调整反向或顺序时,可以完成过程并查看结果,默认为-1 表示执行到最后 end_frame_no = -1 #@param {type: "number"} #@markdown --- #@markdown ### 【F】反转数据表 #@markdown 指定由Openpose反转的帧号(从0开始),人员INDEX顺序和反转的内容。 #@markdown 按照Openpose在 0F 识别的顺序,将INDEX分配为0,1,...。 #@markdown 格式: [{帧号}: 用于指定反转的人INDEX, {反转内容}] #@markdown {反转内容}: R: 整体身体反转, U:上半身反转, L: 下半身反转, N: 无反转 #@markdown 例如:[10:1,R] 整个人在第10帧中反转第一个人。在message.log中会记录以上述格式输出内容 #@markdown 因此请参考与[10:1,R][30:0,U],中一样,可以在括号中指定多个项目 ps(不要带有中文标点符号)) reverse_specific = "" #@param {type: "string"} #@markdown --- #@markdown ### 【F】输出颜色(仅参考,如果多人时,某个人序号跟别人交换或者错误,可以用此项修改) #@markdown 请在多人轨迹中的交点之后指定人索引顺序。如果要跟踪一个人,可以将其留为空白。 #@markdown 按照Openpose在0F时识别的顺序分配0、1和INDEX。格式:[<帧号>:第几个人的索引,第几个人的索引,…]示例)[10:1,0]…第帧10是从左数第1人按第0个人的顺序对其进行排序。 #@markdown message.log包含以上述格式输出的顺序,因此请参考它。可以在括号中指定多个项目,例如[10:1,0] [30:0,1]。在output_XXX.avi中,按照估计顺序为人们分配了颜色。身体的右半部分为红色,左半部分为以下颜色。 #@markdown 0:绿色,1:蓝色,2:白色,3:黄色,4:桃红色,5:浅蓝色,6:深绿色,7:深蓝色,8:灰色,9:深黄色,​​10:深桃红色,11:深浅蓝色 order_specific = "" #@param {type: "string"} #@markdown --- #@markdown ### 【V】骨骼结构CSV文件 #@markdown 选择或输入跟踪目标模型的骨骼结构CSV文件的路径。请将csv文件上传到Google云端硬盘的“ autotrace”文件夹。 #@markdown 您可以选择 "Animasa-Miku" 和 "Animasa-Miku semi-standard", 也可以输入任何模型的骨骼结构CSV文件 #@markdown 如果要输入任何模型骨骼结构CSV文件, 请将csv文件上传到Google云端硬盘的 "autotrace" 文件夹下 #@markdown 然后请输入「/gdrive/My Drive/autotrace/[csv file name]」 born_model_csv = "born/\u3042\u306B\u307E\u3055\u5F0F\u30DF\u30AF\u6E96\u6A19\u6E96\u30DC\u30FC\u30F3.csv" #@param ["born/animasa_miku_born.csv", "born/animasa_miku_semi_standard_born.csv"] {allow-input: true} #@markdown --- #@markdown ### 【V】是否使用IK输出 #@markdown 选择以IK输出,yes或no #@markdown 如果输入no,则以输出FK ik_flag = "yes" #@param ['yes', 'no'] is_ik = 1 if ik_flag == "yes" else 0 #@markdown --- #@markdown ### 【V】脚与地面位置校正 #@markdown 请输入数值的鞋跟的Y轴校正值(可以为小数) #@markdown 输入负值会接近地面,输入正值会远离地面。 #@markdown 尽管会自动在某种程度上自动校正,但如果无法校正,请进行设置。 heel_position = 0.0 #@param {type: "number"} #@markdown --- #@markdown ### 【V】Z中心放大倍率 #@markdown 以将放大倍数应用到Z轴中心移动(可以是小数) #@markdown 值越小,中心Z移动的宽度越小 #@markdown 输入0时,不进行Z轴中心移动。 center_z_scale = 2#@param {type: "number"} #@markdown --- #@markdown ### 【V】平滑频率 #@markdown 指定运动的平滑频率 #@markdown 请仅输入1或更大的整数 #@markdown 频率越大,频率越平滑。(行为幅度会变小) smooth_times = 1#@param {type: "number"} #@markdown --- #@markdown ### 【V】移动稀疏量 (低于该阀值的运动宽度,不会进行输出,防抖动) #@markdown 用数值(允许小数)指定用于稀疏移动(IK /中心)的移动量 #@markdown 如果在指定范围内有移动,则将稀疏。如果移动抽取量设置为0,则不执行抽取。 #@markdown 当移动稀疏量设置为0时,不进行稀疏。 threshold_pos = 0.3 #@param {type: "number"} #@markdown --- #@markdown ### 【V】旋转稀疏角 (低于该阀值的运动角度,则不会进行输出) #@markdown 指定用于稀疏旋转键的角度(0到180度的十进制数) #@markdown 如果在指定角度范围内有旋转,则稀疏旋转键。 threshold_rot = 3#@param {type: "number"} print(" 【O】Maximum number of people in the video: "+str(number_people_max)) print(" 【O】Frame number to start analysis: "+str(frame_first)) print(" 【F】Frame number to finish analysis: "+str(end_frame_no)) print(" 【F】Reverse specification list: "+str(reverse_specific)) print(" 【F】Ordered list: "+str(order_specific)) print(" 【V】Bone structure CSV file: "+str(born_model_csv)) print(" 【V】Whether to output with IK: "+str(ik_flag)) print(" 【V】Heel position correction: "+str(heel_position)) print(" 【V】Center Z moving magnification: "+str(center_z_scale)) print(" 【V】Smoothing frequency: "+str(smooth_times)) print(" 【V】Movement key thinning amount: "+str(threshold_pos)) print(" 【V】Rotating Key Culling Angle: "+str(threshold_rot)) print("") print("If the above is correct, please proceed to the next.") #input_video = "/content/openpose/examples/media/video.avi" run_mmd(input_video,number_people_max,frame_first,end_frame_no,reverse_specific,order_specific,born_model_csv,is_ik,heel_position,center_z_scale,smooth_times,threshold_pos,threshold_rot) ``` # License许可 发布和分发MMD自动跟踪的结果时,请确保检查许可证。Unity也是如此。 如果您能列出您的许可证,我将不胜感激。 [MMD运动跟踪自动化套件许可证](https://ch.nicovideo.jp/miu200521358/blomaga/ar1686913) 原作者:Twitter miu200521358 修改与优化:B站 妖风瑟瑟
github_jupyter
``` ## Advanced Course in Machine Learning ## Week 4 ## Exercise 2 / Probabilistic PCA import numpy as np import scipy import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib.animation as animation from numpy import linalg as LA sns.set_style("darkgrid") def build_dataset(N, D, K, sigma=1): x = np.zeros((D, N)) z = np.random.normal(0.0, 1.0, size=(K, N)) # Create a w with random values w = np.random.normal(0.0, sigma**2, size=(D, K)) mean = np.dot(w, z) for d in range(D): for n in range(N): x[d, n] = np.random.normal(mean[d, n], sigma**2) print("True principal axes:") print(w) return x, mean, w, z N = 5000 # number of data points D = 2 # data dimensionality K = 1 # latent dimensionality sigma = 1.0 x, mean, w, z = build_dataset(N, D, K, sigma) print(z) print(w) plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k') sns.scatterplot(z[0, :], 0, alpha=0.5, label='z') origin = [0], [0] # origin point plt.xlabel('x') plt.ylabel('y') plt.legend(loc='lower right') plt.title('Probabilistic PCA, generated z') plt.show() plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k') sns.scatterplot(z[0, :], 0, alpha=0.5, label='z') sns.scatterplot(mean[0, :], mean[1, :], color='red', alpha=0.5, label='Wz') origin = [0], [0] # origin point #Plot the principal axis plt.quiver(*origin, w[0,0], w[1,0], color=['g'], scale=1, label='W') plt.xlabel('x') plt.ylabel('y') plt.legend(loc='upper right') plt.title('Probabilistic PCA, generated z') plt.show() print(x) plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k') sns.scatterplot(x[0, :], x[1, :], color='orange', alpha=0.5) #plt.axis([-5, 5, -5, 5]) plt.xlabel('x') plt.ylabel('y') #Plot the principal axis plt.quiver(*origin, w[0,0], w[1,0], color=['g'], scale=10, label='W') #Plot probability density contours sns.kdeplot(x[0, :], x[1, :], n_levels=3, color='purple') plt.title('Probabilistic PCA, generated x') plt.show() plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k') sns.scatterplot(x[0, :], x[1, :], color='orange', alpha=0.5, label='X') sns.scatterplot(z[0, :], 0, alpha=0.5, label='z') sns.scatterplot(mean[0, :], mean[1, :], color='red', alpha=0.5, label='Wz') origin = [0], [0] # origin point #Plot the principal axis plt.quiver(*origin, w[0,0], w[1,0], color=['g'], scale=10, label='W') plt.xlabel('x') plt.ylabel('y') plt.legend(loc='lower right') plt.title('Probabilistic PCA') plt.show() plt.figure(num=None, figsize=(8, 6), dpi=100, facecolor='w', edgecolor='k') sns.scatterplot(x[0, :], x[1, :], color='orange', alpha=0.5, label='X') sns.scatterplot(z[0, :], 0, alpha=0.5, label='z') sns.scatterplot(mean[0, :], mean[1, :], color='red', alpha=0.5, label='Wz') origin = [0], [0] # origin point #Plot the principal axis plt.quiver(*origin, w[0,0], w[1,0], color=['g'], scale=10, label='W') #Plot probability density contours sns.kdeplot(x[0, :], x[1, :], n_levels=6, color='purple') plt.xlabel('x') plt.ylabel('y') plt.legend(loc='lower right') plt.title('Probabilistic PCA') plt.show() ``` def main(): fig = plt.figure() scat = plt.scatter(mean[0, :], color='red', alpha=0.5, label='Wz') ani = animation.FuncAnimation(fig, update_plot, frames=xrange(N), fargs=(scat)) plt.show() def update_plot(i, scat): scat.set_array(data[i]) return scat, main()
github_jupyter
``` %matplotlib inline import pandas as pd import cv2 import numpy as np from matplotlib import pyplot as plt df = pd.read_csv("data/22800_SELECT_t___FROM_data_data_t.csv",header=None,index_col=0) df = df.rename(columns={0:"no", 1: "CAPTDATA", 2: "CAPTIMAGE",3: "timestamp"}) df.info() df.sample(5) def alpha_to_gray(img): alpha_channel = img[:, :, 3] _, mask = cv2.threshold(alpha_channel, 128, 255, cv2.THRESH_BINARY) # binarize mask color = img[:, :, :3] img = cv2.bitwise_not(cv2.bitwise_not(color, mask=mask)) return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def preprocess(data): data = bytes.fromhex(data[2:]) img = cv2.imdecode( np.asarray(bytearray(data), dtype=np.uint8), cv2.IMREAD_UNCHANGED ) img = alpha_to_gray(img) kernel = np.ones((3, 3), np.uint8) img = cv2.dilate(img, kernel, iterations=1) img = cv2.medianBlur(img, 3) kernel = np.ones((4, 4), np.uint8) img = cv2.erode(img, kernel, iterations=1) # plt.imshow(img) return img df["IMAGE"] = df["CAPTIMAGE"].apply(preprocess) def bounding(gray): # data = bytes.fromhex(df["CAPTIMAGE"][1][2:]) # image = cv2.imdecode( np.asarray(bytearray(data), dtype=np.uint8), cv2.IMREAD_UNCHANGED ) # alpha_channel = image[:, :, 3] # _, mask = cv2.threshold(alpha_channel, 128, 255, cv2.THRESH_BINARY) # binarize mask # color = image[:, :, :3] # src = cv2.bitwise_not(cv2.bitwise_not(color, mask=mask)) ret, binary = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY) binary = cv2.bitwise_not(binary) contours, hierachy = cv2.findContours(binary, cv2.RETR_EXTERNAL , cv2.CHAIN_APPROX_NONE) ans = [] for h, tcnt in enumerate(contours): x,y,w,h = cv2.boundingRect(tcnt) if h < 25: continue if 40 < w < 100: # 2개가 붙어 있는 경우 ans.append([x,y,w//2,h]) ans.append([x+(w//2),y,w//2,h]) continue if 100 <= w < 170: ans.append([x,y,w//3,h]) ans.append([x+(w//3),y,w//3,h]) ans.append([x+(2*w//3),y,w//3,h]) # cv2.rectangle(src,(x,y),(x+w,y+h),(255,0,0),1) ans.append([x,y,w,h]) return ans # cv2.destroyAllWindows() df["bounding"] = df["IMAGE"].apply(bounding) def draw_bounding(idx): CAPTIMAGE = df["CAPTIMAGE"][idx] bounding = df["bounding"][idx] data = bytes.fromhex(CAPTIMAGE[2:]) image = cv2.imdecode( np.asarray(bytearray(data), dtype=np.uint8), cv2.IMREAD_UNCHANGED ) alpha_channel = image[:, :, 3] _, mask = cv2.threshold(alpha_channel, 128, 255, cv2.THRESH_BINARY) # binarize mask color = image[:, :, :3] src = cv2.bitwise_not(cv2.bitwise_not(color, mask=mask)) for x,y,w,h in bounding: # print(x,y,w,h) cv2.rectangle(src,(x,y),(x+w,y+h),(255,0,0),1) return src import random nrows = 4 ncols = 4 fig, axes = plt.subplots(nrows=nrows, ncols=ncols) fig.set_size_inches((16, 6)) for i in range(nrows): for j in range(ncols): idx = random.randrange(20,22800) axes[i][j].set_title(str(idx)) axes[i][j].imshow(draw_bounding(idx)) fig.tight_layout() plt.savefig('sample.png') plt.show() charImg = [] for idx in df.index: IMAGE = df["IMAGE"][idx] bounding = df["bounding"][idx] for x,y,w,h in bounding: newImg = IMAGE[y:y+h,x:x+w] newImg = cv2.resize(newImg, dsize=(41, 38), interpolation=cv2.INTER_NEAREST) charImg.append(newImg/255.0) # cast to numpy arrays trainingImages = np.asarray(charImg) # reshape img array to vector def reshape_image(img): return np.reshape(img,len(img)*len(img[0])) img_reshape = np.zeros((len(trainingImages),len(trainingImages[0])*len(trainingImages[0][0]))) for i in range(0,len(trainingImages)): img_reshape[i] = reshape_image(trainingImages[i]) from sklearn.cluster import KMeans import matplotlib.pyplot as plt import seaborn as sns # create model and prediction model = KMeans(n_clusters=40,algorithm='auto') model.fit(img_reshape) predict = pd.DataFrame(model.predict(img_reshape)) predict.columns=['predict'] import pickle pickle.dump(model, open("KMeans_40_22800.pkl", "wb")) import pickle model = pickle.load(open("KMeans_40_22800.pkl", "rb")) predict = pd.DataFrame(model.predict(img_reshape)) predict.columns=['predict'] import random from tqdm import tqdm r = pd.concat([pd.DataFrame(img_reshape),predict],axis=1) !rm -rf res_40 !mkdir res_40 nrows = 4 ncols = 10 fig, axes = plt.subplots(nrows=nrows, ncols=ncols) fig.set_size_inches((16, 6)) for j in tqdm(range(40)): i = 0 nSample = min(nrows * ncols,len(r[r["predict"] == j])) for idx in r[r["predict"] == j].sample(nSample).index: axes[i // ncols][i % ncols].set_title(str(idx)) axes[i // ncols][i % ncols].imshow(trainingImages[idx]) i+=1 fig.tight_layout() plt.savefig('res_40/sample_' + str(j) + '.png') ``` 98 95 92 222 255
github_jupyter
``` # Import and create a new SQLContext from pyspark.sql import SQLContext sqlContext = SQLContext(sc) # Read the country CSV file into an RDD. country_lines = sc.textFile('file:///home/ubuntu/work/notebooks/UCSD/big-data-3/final-project/country-list.csv') country_lines.collect() # Convert each line into a pair of words country_lines.map(lambda a: a.split(",")).collect() # Convert each pair of words into a tuple country_tuples = country_lines.map(lambda a: (a.split(",")[0].lower(), a.split(",")[1])) # Create the DataFrame, look at schema and contents countryDF = sqlContext.createDataFrame(country_tuples, ["country", "code"]) countryDF.printSchema() countryDF.take(3) # Read tweets CSV file into RDD of lines tweets = sc.textFile('file:///home/ubuntu/work/notebooks/UCSD/big-data-3/final-project/tweets.csv') tweets.count() # Clean the data: some tweets are empty. Remove the empty tweets using filter() filtered_tweets = tweets.filter(lambda a: len(a) > 0) filtered_tweets.count() # Perform WordCount on the cleaned tweet texts. (note: this is several lines.) word_counts = filtered_tweets.flatMap(lambda a: a.split(" ")) \ .map(lambda word: (word.lower(), 1)) \ .reduceByKey(lambda a, b: a + b) from pyspark.sql import HiveContext from pyspark.sql.types import * # sc is an existing SparkContext. sqlContext = HiveContext(sc) schemaString = "word count" fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()] schema = StructType(fields) # Create the DataFrame of tweet word counts tweetsDF = sqlContext.createDataFrame(word_counts, schema) tweetsDF.printSchema() tweetsDF.count() # Join the country and tweet DataFrames (on the appropriate column) joined = countryDF.join(tweetsDF, countryDF.country == tweetsDF.word) joined.take(5) joined.show() # Question 1: number of distinct countries mentioned distinct_countries = joined.select("country").distinct() distinct_countries.show(100) # Question 2: number of countries mentioned in tweets. from pyspark.sql.functions import sum from pyspark.sql import SparkSession from pyspark.sql import Row countries_count = joined.groupBy("country") joined.createOrReplaceTempView("records") spark.sql("SELECT country, count(*) count1 FROM records group by country order by count1 desc, country asc").show(100) # Table 1: top three countries and their counts. from pyspark.sql.functions import desc from pyspark.sql.functions import col top_3 = joined.sort(col("count").desc()) top_3.show() # Table 2: counts for Wales, Iceland, and Japan. ```
github_jupyter
# Datafaucet Datafaucet is a productivity framework for ETL, ML application. Simplifying some of the common activities which are typical in Data pipeline such as project scaffolding, data ingesting, start schema generation, forecasting etc. ``` import datafaucet as dfc ``` ## Loading and Saving Data ``` dfc.project.load() query = """ SELECT p.payment_date, p.amount, p.rental_id, p.staff_id, c.* FROM payment p INNER JOIN customer c ON p.customer_id = c.customer_id; """ df = dfc.load(query, 'pagila') ``` #### Select cols ``` df.cols.find('id').columns df.cols.find(by_type='string').columns df.cols.find(by_func=lambda x: x.startswith('st')).columns df.cols.find('^st').columns ``` #### Collect data, oriented by rows or cols ``` df.cols.find(by_type='numeric').rows.collect(3) df.cols.find(by_type='string').collect(3) df.cols.find('name', 'date').data.collect(3) ``` #### Get just one row or column ``` df.cols.find('active', 'amount', 'name').one() df.cols.find('active', 'amount', 'name').rows.one() ``` #### Grid view ``` df.cols.find('amount', 'id', 'name').data.grid(5) ``` #### Data Exploration ``` df.cols.find('amount', 'id', 'name').data.facets() ``` #### Rename columns ``` df.cols.find(by_type='timestamp').rename('new_', '***').columns # to do # df.cols.rename(transform=['unidecode', 'alnum', 'alpha', 'num', 'lower', 'trim', 'squeeze', 'slice', tr("abc", "_", mode='')']) # df.cols.rename(transform=['unidecode', 'alnum', 'lower', 'trim("_")', 'squeeze("_")']) # as a dictionary mapping = { 'staff_id': 'foo', 'first_name': 'bar', 'email': 'qux', 'active':'active' } # or as a list of 2-tuples mapping = [ ('staff_id','foo'), ('first_name','bar'), 'active' ] dict(zip(df.columns, df.cols.rename('new_', '***', mapping).columns)) ``` #### Drop multiple columns ``` df.cols.find('id').drop().rows.collect(3) ``` #### Apply to multiple columns ``` from pyspark.sql import functions as F (df .cols.find(by_type='string').lower() .cols.get('email').split('@') .cols.get('email').expand(2) .cols.find('name', 'email') .rows.collect(3) ) ``` ### Aggregations ``` from datafaucet.spark import aggregations as A df.cols.find('amount', '^st.*id', 'first_name').agg(A.all).cols.collect(10) ``` ##### group by a set of columns ``` df.cols.find('amount').groupby('staff_id', 'store_id').agg(A.all).cols.collect(4) ``` #### Aggregate specific metrics ``` # by function df.cols.get('amount', 'active').groupby('customer_id').agg({'count':F.count, 'sum': F.sum}).rows.collect(10) # or by alias df.cols.get('amount', 'active').groupby('customer_id').agg('count','sum').rows.collect(10) # or a mix of the two df.cols.get('amount', 'active').groupby('customer_id').agg('count',{'sum': F.sum}).rows.collect(10) ``` #### Featurize specific metrics in a single row ``` (df .cols.get('amount', 'active') .groupby('customer_id', 'store_id') .featurize({'count':A.count, 'sum':A.sum, 'avg':A.avg}) .rows.collect(10) ) # todo: # different features per different column ``` #### Plot dataset statistics ``` df.data.summary() from bokeh.io import output_notebook output_notebook() from bokeh.plotting import figure, show, output_file p = figure(plot_width=400, plot_height=400) p.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1.2, 2.5, 3.7], color="navy") show(p) import seaborn as sns import matplotlib.pyplot as plt sns.set(style="whitegrid") # Initialize the matplotlib figure f, ax = plt.subplots(figsize=(6, 6)) # Load the example car crash dataset crashes = sns.load_dataset("car_crashes").sort_values("total", ascending=False)[:10] # Plot the total crashes sns.set_color_codes("pastel") sns.barplot(x="total", y="abbrev", data=crashes, label="Total", color="b") # Plot the crashes where alcohol was involved sns.set_color_codes("muted") sns.barplot(x="alcohol", y="abbrev", data=crashes, label="Alcohol-involved", color="b") # Add a legend and informative axis label ax.legend(ncol=2, loc="lower right", frameon=True) ax.set(xlim=(0, 24), ylabel="", xlabel="Automobile collisions per billion miles") sns.despine(left=True, bottom=True) import numpy as np import seaborn as sns import matplotlib.pyplot as plt sns.set(style="white", palette="muted", color_codes=True) # Generate a random univariate dataset rs = np.random.RandomState(10) d = rs.normal(size=100) # Plot a simple histogram with binsize determined automatically sns.distplot(d, hist=True, kde=True, rug=True, color="b"); import seaborn as sns sns.set(style="ticks") df = sns.load_dataset("iris") sns.pairplot(df, hue="species") from IPython.display import HTML HTML(''' <!-- Bootstrap CSS --> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" crossorigin="anonymous"> <div class="container-fluid"> <div class="jumbotron"> <h1 class="display-4">Hello, world!</h1> <p class="lead">This is a simple hero unit, a simple jumbotron-style component for calling extra attention to featured content or information.</p> <hr class="my-4"> <p>It uses utility classes for typography and spacing to space content out within the larger container.</p> <a class="btn btn-primary btn-lg" href="#" role="button">Learn more</a> </div> <button type="button" class="btn btn-secondary" data-toggle="tooltip" data-placement="top" title="Tooltip on top"> Tooltip on top </button> <button type="button" class="btn btn-secondary" data-toggle="tooltip" data-placement="right" title="Tooltip on right"> Tooltip on right </button> <button type="button" class="btn btn-secondary" data-toggle="tooltip" data-placement="bottom" title="Tooltip on bottom"> Tooltip on bottom </button> <button type="button" class="btn btn-secondary" data-toggle="tooltip" data-placement="left" title="Tooltip on left"> Tooltip on left </button> <table class="table"> <thead> <tr> <th scope="col">#</th> <th scope="col">First</th> <th scope="col">Last</th> <th scope="col">Handle</th> </tr> </thead> <tbody> <tr> <th scope="row">1</th> <td>Mark</td> <td>Otto</td> <td>@mdo</td> </tr> <tr> <th scope="row">2</th> <td>Jacob</td> <td>Thornton</td> <td>@fat</td> </tr> <tr> <th scope="row">3</th> <td>Larry</td> <td>the Bird</td> <td>@twitter</td> </tr> </tbody> </table> <span class="badge badge-primary">Primary</span> <span class="badge badge-secondary">Secondary</span> <span class="badge badge-success">Success</span> <span class="badge badge-danger">Danger</span> <span class="badge badge-warning">Warning</span> <span class="badge badge-info">Info</span> <span class="badge badge-light">Light</span> <span class="badge badge-dark">Dark</span> <table class="table table-sm" style="text-align:left"> <thead> <tr> <th scope="col">#</th> <th scope="col">First</th> <th scope="col">Last</th> <th scope="col">Handle</th> <th scope="col">bar</th> </tr> </thead> <tbody> <tr> <th scope="row">1</th> <td>Mark</td> <td>Otto</td> <td>@mdo</td> <td class="text-left"><span class="badge badge-primary" style="width: 75%">Primary</span></td> </tr> <tr> <th scope="row">2</th> <td>Jacob</td> <td>Thornton</td> <td>@fat</td> <td class="text-left"><span class="badge badge-secondary" style="width: 25%">Primary</span></td> </tr> <tr> <th scope="row">3</th> <td colspan="2">Larry the Bird</td> <td>@twitter</td> <td class="text-left"><span class="badge badge-warning" style="width: 55%">Primary</span></td> </div> </tr> </tbody> </table> </div>''') tbl = ''' <table class="table table-sm"> <thead> <tr> <th scope="col">#</th> <th scope="col">First</th> <th scope="col">Last</th> <th scope="col">Handle</th> <th scope="col">bar</th> </tr> </thead> <tbody> <tr> <th scope="row">1</th> <td>Mark</td> <td>Otto</td> <td>@mdo</td> <td class="text-left"><span class="badge badge-primary" style="width: 75%">75%</span></td> </tr> <tr> <th scope="row">2</th> <td>Jacob</td> <td>Thornton</td> <td>@fat</td> <td class="text-left"><span class="badge badge-secondary" style="width: 25%" title="Tooltip on top">25%</span></td> </tr> <tr> <th scope="row">3</th> <td colspan="2">Larry the Bird</td> <td>@twitter</td> <td class="text-left"><span class="badge badge-warning" style="width: 0%">0%</span></td> </tr> </tbody> </table> ''' drp = ''' <div class="dropdown"> <button class="btn btn-secondary dropdown-toggle" type="button" id="dropdownMenuButton" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> Dropdown button </button> <div class="dropdown-menu" aria-labelledby="dropdownMenuButton"> <a class="dropdown-item" href="#">Action</a> <a class="dropdown-item" href="#">Another action</a> <a class="dropdown-item" href="#">Something else here</a> </div> </div>''' tabs = f''' <nav> <div class="nav nav-tabs" id="nav-tab" role="tablist"> <a class="nav-item nav-link active" id="nav-home-tab" data-toggle="tab" href="#nav-home" role="tab" aria-controls="nav-home" aria-selected="true">Home</a> <a class="nav-item nav-link" id="nav-profile-tab" data-toggle="tab" href="#nav-profile" role="tab" aria-controls="nav-profile" aria-selected="false">Profile</a> <a class="nav-item nav-link" id="nav-contact-tab" data-toggle="tab" href="#nav-contact" role="tab" aria-controls="nav-contact" aria-selected="false">Contact</a> </div> </nav> <div class="tab-content" id="nav-tabContent"> <div class="tab-pane fade show active" id="nav-home" role="tabpanel" aria-labelledby="nav-home-tab">..jjj.</div> <div class="tab-pane fade" id="nav-profile" role="tabpanel" aria-labelledby="nav-profile-tab">..kkk.</div> <div class="tab-pane fade" id="nav-contact" role="tabpanel" aria-labelledby="nav-contact-tab">{tbl}</div> </div> ''' from IPython.display import HTML HTML(f''' <!-- Bootstrap CSS --> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" crossorigin="anonymous"> <div class="container-fluid"> <div class="row"> <div class="col"> {drp} </div> <div class="col"> {tabs} </div> <div class="col"> {tbl} </div> </div> </div> <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.bundle.min.js" crossorigin="anonymous" > ''') from IPython.display import HTML HTML(f''' <!-- Bootstrap CSS --> <link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" crossorigin="anonymous"> <script src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.bundle.min.js" crossorigin="anonymous" > ''') d =df.cols.find('id', 'name').sample(10) d.columns tbl_head = ''' <thead> <tr> ''' tbl_head += '\n'.join([' <th scope="col">'+str(x)+'</th>' for x in d.columns]) tbl_head +=''' </tr> </thead> ''' print(tbl_head) tbl_body = ''' <tbody> <tr> <th scope="row">1</th> <td>Mark</td> <td>Otto</td> <td>@mdo</td> <td class="text-left"><span class="badge badge-primary" style="width: 75%">75%</span></td> </tr> <tr> <th scope="row">2</th> <td>Jacob</td> <td>Thornton</td> <td>@fat</td> <td class="text-left"><span class="badge badge-secondary" style="width: 25%" title="Tooltip on top">25%</span></td> </tr> <tr> <th scope="row">3</th> <td colspan="2">Larry the Bird</td> <td>@twitter</td> <td class="text-left"><span class="badge badge-warning" style="width: 0%">0%</span></td> </tr> </tbody> </table> ''' HTML(f''' <!-- Bootstrap CSS --> <div class="container-fluid"> <div class="row"> <div class="col"> <table class="table table-sm"> {tbl_head} {tbl_body} </table> </div> </div> </div> ''') # .rows.sample() # .cols.select('name', 'id', 'amount')\ # .cols.apply(F.lower, 'name')\ # .cols.apply(F.floor, 'amount', output_prefix='_')\ # .cols.drop('^amount$')\ # .cols.rename() # .cols.unicode() .grid() df = df.cols.select('name') df = df.rows.overwrite([('Nhập mật', 'khẩu')]) df.columns # .rows.overwrite(['Nhập mật', 'khẩu'])\ # .cols.apply(F.lower)\ # .grid() # #withColumn('pippo', F.lower(F.col('first_name'))).grid() import pandas as pd df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]}) df.plot.bar(x='lab', y='val', rot=0); ```
github_jupyter
![](https://memesbams.com/wp-content/uploads/2017/11/sheldon-sarcasm-meme.jpg) https://www.kaggle.com/danofer/sarcasm <div class="markdown-converter__text--rendered"><h3>Context</h3> <p>This dataset contains 1.3 million Sarcastic comments from the Internet commentary website Reddit. The dataset was generated by scraping comments from Reddit (not by me :)) containing the <code>\s</code> ( sarcasm) tag. This tag is often used by Redditors to indicate that their comment is in jest and not meant to be taken seriously, and is generally a reliable indicator of sarcastic comment content.</p> <h3>Content</h3> <p>Data has balanced and imbalanced (i.e true distribution) versions. (True ratio is about 1:100). The corpus has 1.3 million sarcastic statements, along with what they responded to as well as many non-sarcastic comments from the same source.</p> <p>Labelled comments are in the <code>train-balanced-sarcasm.csv</code> file.</p> <h3>Acknowledgements</h3> <p>The data was gathered by: Mikhail Khodak and Nikunj Saunshi and Kiran Vodrahalli for their article "<a href="https://arxiv.org/abs/1704.05579" rel="nofollow">A Large Self-Annotated Corpus for Sarcasm</a>". The data is hosted <a href="http://nlp.cs.princeton.edu/SARC/0.0/" rel="nofollow">here</a>.</p> <p>Citation:</p> <pre><code>@unpublished{SARC, authors={Mikhail Khodak and Nikunj Saunshi and Kiran Vodrahalli}, title={A Large Self-Annotated Corpus for Sarcasm}, url={https://arxiv.org/abs/1704.05579}, year=2017 } </code></pre> <p><a href="http://nlp.cs.princeton.edu/SARC/0.0/readme.txt" rel="nofollow">Annotation of files in the original dataset: readme.txt</a>.</p> <h3>Inspiration</h3> <ul> <li>Predicting sarcasm and relevant NLP features (e.g. subjective determinant, racism, conditionals, sentiment heavy words, "Internet Slang" and specific phrases). </li> <li>Sarcasm vs Sentiment</li> <li>Unusual linguistic features such as caps, italics, or elongated words. e.g., "Yeahhh, I'm sure THAT is the right answer".</li> <li>Topics that people tend to react to sarcastically</li> </ul></div> ``` import os # Install java ! apt-get update -qq ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] ! java -version # Install pyspark ! pip install --ignore-installed pyspark==2.4.4 # Install Spark NLP ! pip install --ignore-installed spark-nlp import sys import time import sparknlp from pyspark.sql import SparkSession packages = [ 'JohnSnowLabs:spark-nlp: 2.5.5' ] spark = SparkSession \ .builder \ .appName("ML SQL session") \ .config('spark.jars.packages', ','.join(packages)) \ .config('spark.executor.instances','2') \ .config("spark.executor.memory", "2g") \ .config("spark.driver.memory","16g") \ .getOrCreate() print("Spark NLP version: ", sparknlp.version()) print("Apache Spark version: ", spark.version) ! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/en/sarcasm/train-balanced-sarcasm.csv -P /tmp from pyspark.sql import SQLContext sql = SQLContext(spark) trainBalancedSarcasmDF = spark.read.option("header", True).option("inferSchema", True).csv("/tmp/train-balanced-sarcasm.csv") trainBalancedSarcasmDF.printSchema() # Let's create a temp view (table) for our SQL queries trainBalancedSarcasmDF.createOrReplaceTempView('data') sql.sql('SELECT COUNT(*) FROM data').collect() sql.sql('select * from data limit 20').show() sql.sql('select label,count(*) as cnt from data group by label order by cnt desc').show() sql.sql('select count(*) from data where comment is null').collect() df = sql.sql('select label,concat(parent_comment,"\n",comment) as comment from data where comment is not null and parent_comment is not null limit 100000') print(type(df)) df.printSchema() df.show() from sparknlp.annotator import * from sparknlp.common import * from sparknlp.base import * from pyspark.ml import Pipeline document_assembler = DocumentAssembler() \ .setInputCol("comment") \ .setOutputCol("document") sentence_detector = SentenceDetector() \ .setInputCols(["document"]) \ .setOutputCol("sentence") \ .setUseAbbreviations(True) tokenizer = Tokenizer() \ .setInputCols(["sentence"]) \ .setOutputCol("token") stemmer = Stemmer() \ .setInputCols(["token"]) \ .setOutputCol("stem") normalizer = Normalizer() \ .setInputCols(["stem"]) \ .setOutputCol("normalized") finisher = Finisher() \ .setInputCols(["normalized"]) \ .setOutputCols(["ntokens"]) \ .setOutputAsArray(True) \ .setCleanAnnotations(True) nlp_pipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, stemmer, normalizer, finisher]) nlp_model = nlp_pipeline.fit(df) processed = nlp_model.transform(df).persist() processed.count() processed.show() train, test = processed.randomSplit(weights=[0.7, 0.3], seed=123) print(train.count()) print(test.count()) from pyspark.ml import feature as spark_ft stopWords = spark_ft.StopWordsRemover.loadDefaultStopWords('english') sw_remover = spark_ft.StopWordsRemover(inputCol='ntokens', outputCol='clean_tokens', stopWords=stopWords) tf = spark_ft.CountVectorizer(vocabSize=500, inputCol='clean_tokens', outputCol='tf') idf = spark_ft.IDF(minDocFreq=5, inputCol='tf', outputCol='idf') feature_pipeline = Pipeline(stages=[sw_remover, tf, idf]) feature_model = feature_pipeline.fit(train) train_featurized = feature_model.transform(train).persist() train_featurized.count() train_featurized.show() train_featurized.groupBy("label").count().show() train_featurized.printSchema() from pyspark.ml import classification as spark_cls rf = spark_cls. RandomForestClassifier(labelCol="label", featuresCol="idf", numTrees=100) model = rf.fit(train_featurized) test_featurized = feature_model.transform(test) preds = model.transform(test_featurized) preds.show() pred_df = preds.select('comment', 'label', 'prediction').toPandas() pred_df.head() import pandas as pd from sklearn import metrics as skmetrics pd.DataFrame( data=skmetrics.confusion_matrix(pred_df['label'], pred_df['prediction']), columns=['pred ' + l for l in ['0','1']], index=['true ' + l for l in ['0','1']] ) print(skmetrics.classification_report(pred_df['label'], pred_df['prediction'], target_names=['0','1'])) spark.stop() ```
github_jupyter
``` # Copyright 2020 NVIDIA Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== ``` <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;"> # Object Detection with TRTorch (SSD) --- ## Overview In PyTorch 1.0, TorchScript was introduced as a method to separate your PyTorch model from Python, make it portable and optimizable. TRTorch is a compiler that uses TensorRT (NVIDIA's Deep Learning Optimization SDK and Runtime) to optimize TorchScript code. It compiles standard TorchScript modules into ones that internally run with TensorRT optimizations. TensorRT can take models from any major framework and specifically tune them to perform better on specific target hardware in the NVIDIA family, and TRTorch enables us to continue to remain in the PyTorch ecosystem whilst doing so. This allows us to leverage the great features in PyTorch, including module composability, its flexible tensor implementation, data loaders and more. TRTorch is available to use with both PyTorch and LibTorch. To get more background information on this, we suggest the **lenet-getting-started** notebook as a primer for getting started with TRTorch. ### Learning objectives This notebook demonstrates the steps for compiling a TorchScript module with TRTorch on a pretrained SSD network, and running it to test the speedup obtained. ## Contents 1. [Requirements](#1) 2. [SSD Overview](#2) 3. [Creating TorchScript modules](#3) 4. [Compiling with TRTorch](#4) 5. [Running Inference](#5) 6. [Measuring Speedup](#6) 7. [Conclusion](#7) --- <a id="1"></a> ## 1. Requirements Follow the steps in `notebooks/README` to prepare a Docker container, within which you can run this demo notebook. In addition to that, run the following cell to obtain additional libraries specific to this demo. ``` # Known working versions !pip install numpy==1.21.2 scipy==1.5.2 Pillow==6.2.0 scikit-image==0.17.2 matplotlib==3.3.0 ``` --- <a id="2"></a> ## 2. SSD ### Single Shot MultiBox Detector model for object detection _ | _ - | - ![alt](https://pytorch.org/assets/images/ssd_diagram.png) | ![alt](https://pytorch.org/assets/images/ssd.png) PyTorch has a model repository called the PyTorch Hub, which is a source for high quality implementations of common models. We can get our SSD model pretrained on [COCO](https://cocodataset.org/#home) from there. ### Model Description This SSD300 model is based on the [SSD: Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325) paper, which describes SSD as “a method for detecting objects in images using a single deep neural network". The input size is fixed to 300x300. The main difference between this model and the one described in the paper is in the backbone. Specifically, the VGG model is obsolete and is replaced by the ResNet-50 model. From the [Speed/accuracy trade-offs for modern convolutional object detectors](https://arxiv.org/abs/1611.10012) paper, the following enhancements were made to the backbone: * The conv5_x, avgpool, fc and softmax layers were removed from the original classification model. * All strides in conv4_x are set to 1x1. The backbone is followed by 5 additional convolutional layers. In addition to the convolutional layers, we attached 6 detection heads: * The first detection head is attached to the last conv4_x layer. * The other five detection heads are attached to the corresponding 5 additional layers. Detector heads are similar to the ones referenced in the paper, however, they are enhanced by additional BatchNorm layers after each convolution. More information about this SSD model is available at Nvidia's "DeepLearningExamples" Github [here](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Detection/SSD). ``` import torch torch.hub._validate_not_a_forked_repo=lambda a,b,c: True # List of available models in PyTorch Hub from Nvidia/DeepLearningExamples torch.hub.list('NVIDIA/DeepLearningExamples:torchhub') # load SSD model pretrained on COCO from Torch Hub precision = 'fp32' ssd300 = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd', model_math=precision); ``` Setting `precision="fp16"` will load a checkpoint trained with mixed precision into architecture enabling execution on Tensor Cores. Handling mixed precision data requires the Apex library. ### Sample Inference We can now run inference on the model. This is demonstrated below using sample images from the COCO 2017 Validation set. ``` # Sample images from the COCO validation set uris = [ 'http://images.cocodataset.org/val2017/000000397133.jpg', 'http://images.cocodataset.org/val2017/000000037777.jpg', 'http://images.cocodataset.org/val2017/000000252219.jpg' ] # For convenient and comprehensive formatting of input and output of the model, load a set of utility methods. utils = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_ssd_processing_utils') # Format images to comply with the network input inputs = [utils.prepare_input(uri) for uri in uris] tensor = utils.prepare_tensor(inputs, False) # The model was trained on COCO dataset, which we need to access in order to # translate class IDs into object names. classes_to_labels = utils.get_coco_object_dictionary() # Next, we run object detection model = ssd300.eval().to("cuda") detections_batch = model(tensor) # By default, raw output from SSD network per input image contains 8732 boxes with # localization and class probability distribution. # Let’s filter this output to only get reasonable detections (confidence>40%) in a more comprehensive format. results_per_input = utils.decode_results(detections_batch) best_results_per_input = [utils.pick_best(results, 0.40) for results in results_per_input] ``` ### Visualize results ``` from matplotlib import pyplot as plt import matplotlib.patches as patches # The utility plots the images and predicted bounding boxes (with confidence scores). def plot_results(best_results): for image_idx in range(len(best_results)): fig, ax = plt.subplots(1) # Show original, denormalized image... image = inputs[image_idx] / 2 + 0.5 ax.imshow(image) # ...with detections bboxes, classes, confidences = best_results[image_idx] for idx in range(len(bboxes)): left, bot, right, top = bboxes[idx] x, y, w, h = [val * 300 for val in [left, bot, right - left, top - bot]] rect = patches.Rectangle((x, y), w, h, linewidth=1, edgecolor='r', facecolor='none') ax.add_patch(rect) ax.text(x, y, "{} {:.0f}%".format(classes_to_labels[classes[idx] - 1], confidences[idx]*100), bbox=dict(facecolor='white', alpha=0.5)) plt.show() # Visualize results without TRTorch/TensorRT plot_results(best_results_per_input) ``` ### Benchmark utility ``` import time import numpy as np import torch.backends.cudnn as cudnn cudnn.benchmark = True # Helper function to benchmark the model def benchmark(model, input_shape=(1024, 1, 32, 32), dtype='fp32', nwarmup=50, nruns=1000): input_data = torch.randn(input_shape) input_data = input_data.to("cuda") if dtype=='fp16': input_data = input_data.half() print("Warm up ...") with torch.no_grad(): for _ in range(nwarmup): features = model(input_data) torch.cuda.synchronize() print("Start timing ...") timings = [] with torch.no_grad(): for i in range(1, nruns+1): start_time = time.time() pred_loc, pred_label = model(input_data) torch.cuda.synchronize() end_time = time.time() timings.append(end_time - start_time) if i%10==0: print('Iteration %d/%d, avg batch time %.2f ms'%(i, nruns, np.mean(timings)*1000)) print("Input shape:", input_data.size()) print("Output location prediction size:", pred_loc.size()) print("Output label prediction size:", pred_label.size()) print('Average batch time: %.2f ms'%(np.mean(timings)*1000)) ``` We check how well the model performs **before** we use TRTorch/TensorRT ``` # Model benchmark without TRTorch/TensorRT model = ssd300.eval().to("cuda") benchmark(model, input_shape=(128, 3, 300, 300), nruns=100) ``` --- <a id="3"></a> ## 3. Creating TorchScript modules To compile with TRTorch, the model must first be in **TorchScript**. TorchScript is a programming language included in PyTorch which removes the Python dependency normal PyTorch models have. This conversion is done via a JIT compiler which given a PyTorch Module will generate an equivalent TorchScript Module. There are two paths that can be used to generate TorchScript: **Tracing** and **Scripting**. <br> - Tracing follows execution of PyTorch generating ops in TorchScript corresponding to what it sees. <br> - Scripting does an analysis of the Python code and generates TorchScript, this allows the resulting graph to include control flow which tracing cannot do. Tracing however due to its simplicity is more likely to compile successfully with TRTorch (though both systems are supported). ``` model = ssd300.eval().to("cuda") traced_model = torch.jit.trace(model, [torch.randn((1,3,300,300)).to("cuda")]) ``` If required, we can also save this model and use it independently of Python. ``` # This is just an example, and not required for the purposes of this demo torch.jit.save(traced_model, "ssd_300_traced.jit.pt") # Obtain the average time taken by a batch of input with Torchscript compiled modules benchmark(traced_model, input_shape=(128, 3, 300, 300), nruns=100) ``` --- <a id="4"></a> ## 4. Compiling with TRTorch TorchScript modules behave just like normal PyTorch modules and are intercompatible. From TorchScript we can now compile a TensorRT based module. This module will still be implemented in TorchScript but all the computation will be done in TensorRT. ``` import trtorch # The compiled module will have precision as specified by "op_precision". # Here, it will have FP16 precision. trt_model = trtorch.compile(traced_model, { "inputs": [trtorch.Input((3, 3, 300, 300))], "enabled_precisions": {torch.float, torch.half}, # Run with FP16 "workspace_size": 1 << 20 }) ``` --- <a id="5"></a> ## 5. Running Inference Next, we run object detection ``` # using a TRTorch module is exactly the same as how we usually do inference in PyTorch i.e. model(inputs) detections_batch = trt_model(tensor.to(torch.half)) # convert the input to half precision # By default, raw output from SSD network per input image contains 8732 boxes with # localization and class probability distribution. # Let’s filter this output to only get reasonable detections (confidence>40%) in a more comprehensive format. results_per_input = utils.decode_results(detections_batch) best_results_per_input_trt = [utils.pick_best(results, 0.40) for results in results_per_input] ``` Now, let's visualize our predictions! ``` # Visualize results with TRTorch/TensorRT plot_results(best_results_per_input_trt) ``` We get similar results as before! --- ## 6. Measuring Speedup We can run the benchmark function again to see the speedup gained! Compare this result with the same batch-size of input in the case without TRTorch/TensorRT above. ``` batch_size = 128 # Recompiling with batch_size we use for evaluating performance trt_model = trtorch.compile(traced_model, { "inputs": [trtorch.Input((batch_size, 3, 300, 300))], "enabled_precisions": {torch.float, torch.half}, # Run with FP16 "workspace_size": 1 << 20 }) benchmark(trt_model, input_shape=(batch_size, 3, 300, 300), nruns=100, dtype="fp16") ``` --- ## 7. Conclusion In this notebook, we have walked through the complete process of compiling a TorchScript SSD300 model with TRTorch, and tested the performance impact of the optimization. We find that using the TRTorch compiled model, we gain significant speedup in inference without any noticeable drop in performance! ### Details For detailed information on model input and output, training recipies, inference and performance visit: [github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Detection/SSD) and/or [NGC](https://ngc.nvidia.com/catalog/model-scripts/nvidia:ssd_for_pytorch) ### References - [SSD: Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325) paper - [Speed/accuracy trade-offs for modern convolutional object detectors](https://arxiv.org/abs/1611.10012) paper - [SSD on NGC](https://ngc.nvidia.com/catalog/model-scripts/nvidia:ssd_for_pytorch) - [SSD on github](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Detection/SSD)
github_jupyter
# 3. Markov Models Example Problems We will now look at a model that examines our state of healthiness vs. being sick. Keep in mind that this is very much like something you could do in real life. If you wanted to model a certain situation or environment, we could take some data that we have gathered, build a maximum likelihood model on it, and do things like study the properties that emerge from the model, or make predictions from the model, or generate the next most likely state. Let's say we have 2 states: **sick** and **healthy**. We know that we spend most of our time in a healthy state, so the probability of transitioning from healthy to sick is very low: $$p(sick \; | \; healthy) = 0.005$$ Hence, the probability of going from healthy to healthy is: $$p(healthy \; | \; healthy) = 0.995$$ Now, on the other hand the probability of going from sick to sick is also very high. This is because if you just got sick yesterday then you are very likely to be sick tomorrow. $$p(sick \; | \; sick) = 0.8$$ However, the probability of transitioning from sick to healthy should be higher than the reverse, because you probably won't stay sick for as long as you would stay healthy: $$p(healthy \; | \; sick) = 0.02$$ We have now fully defined our state transition matrix, and we can now do some calculations. ## 1.1 Example Calculations ### 1.1.1 What is the probability of being healthy for 10 days in a row, given that we already start out as healthy? Well that is: $$p(healthy \; 10 \; days \; in \; a \; row \; | \; healthy \; at \; t=0) = 0.995^9 = 95.6 \%$$ How about the probability of being healthy for 100 days in a row? $$p(healthy \; 100 \; days \; in \; a \; row \; | \; healthy \; at \; t=0) = 0.995^{99} = 60.9 \%$$ ## 2. Expected Number of Continuously Sick Days We can now look at the expected number of days that you would remain in the same state (e.g. how many days would you expect to stay sick given the model?). This is a bit more difficult than the last problem, but completely doable, only involving the mathematics of <a href="https://en.wikipedia.org/wiki/Geometric_series">infinite sums</a>. First, we can look at the probability of being in state $i$, and going to state $i$ in the next state. That is just $A(i,i)$: $$p \big(s(t)=i \; | \; s(t-1)=i \big) = A(i, i)$$ Now, what is the probability distribution that we actually want to calculate? How about we calculate the probability that we stay in state $i$ for $n$ transitions, at which point we move to another state: $$p \big(s(t) \;!=i \; | \; s(t-1)=i \big) = 1 - A(i, i)$$ So, the joint probability that we are trying to model is: $$p\big(s(1)=i, s(2)=i,...,s(n)=i, s(n+1) \;!= i\big) = A(i,i)^{n-1}\big(1-A(i,i)\big)$$ In english this means that we are multiplying the transition probability of staying in the same state, $A(i,i)$, times the number of times we stayed in the same state, $n$, (note it is $n-1$ because we are given that we start in that state, hence there is no transition associated with it) times $1 - A(i,i)$, the probability of transitioning from that state. This leaves us with an expected value for $n$ of: $$E(n) = \sum np(n) = \sum_{n=1..\infty} nA(i,i)^{n-1}(1-A(i,i))$$ Note, in the above equation $p(n)$ is the probability that we will see state $i$ $n-1$ times after starting from $i$ and then see a state that is not $i$. Also, we know that the expected value of $n$ should be the sum of all possible values of $n$ times $p(n)$. ### 2.1 Expected $n$ So, we can now expand this function and calculate the two sums separately. $$E(n) = \sum_{n=1..\infty}nA(i,i)^{n-1}(1 - A(i,i)) = \sum nA(i, i)^{n-1} - \sum nA(i,i)^n$$ **First Sum**<br> With our first sum, we can say that: $$S = \sum na(i, i)^{n-1}$$ $$S = 1 + 2a + 3a^2 + 4a^3+ ...$$ And we can then multiply that sum, $S$, by $a$, to get: $$aS = a + 2a^2 + 3a^3 + 4a^4+...$$ And then we can subtract $aS$ from $S$: $$S - aS = S'= 1 + a + a^2 + a^3+...$$ This $S'$ is another infinite sum, but it is one that is much easier to solve! $$S'= 1 + a + a^2 + a^3+...$$ And then $aS'$ is: $$aS' = a + a^2 + a^3+ + a^4 + ...$$ Which, when we then do $S' - aS'$, we end up with: $$S' - aS' = 1$$ $$S' = \frac{1}{1 - a}$$ And if we then substitute that value in for $S'$ above: $$S - aS = S'= 1 + a + a^2 + a^3+... = \frac{1}{1 - a}$$ $$S - aS = \frac{1}{1 - a}$$ $$S = \frac{1}{(1 - a)^2}$$ **Second Sum**<br> We can now look at our second sum: $$S = \sum na(i,i)^n$$ $$S = 1a + 2a^2 + 3a^3 +...$$ $$Sa = 1a^2 + 2a^3 +...$$ $$S - aS = S' = a + a^2 + a^3 + ...$$ $$aS' = a^2 + a^3 + a^4 +...$$ $$S' - aS' = a$$ $$S' = \frac{a}{1 - a}$$ And we can plug back in $S'$ to get: $$S - aS = \frac{a}{1 - a}$$ $$S = \frac{a}{(1 - a)^2}$$ **Combine** <br> We can now combine these two sums as follows: $$E(n) = \frac{1}{(1 - a)^2} - \frac{a}{(1-a)^2}$$ $$E(n) = \frac{1}{1-a}$$ **Calculate Number of Sick Days**<br> So, how do we calculate the correct number of sick days? That is just: $$\frac{1}{1 - 0.8} = 5$$ ## 3. SEO and Bounce Rate Optimization We are now going to look at SEO and Bounch Rate Optimization. This is a problem that every developer and website owner can relate to. You have a website and obviously you would like to increase traffic, increase conversions, and avoid a high bounce rate (which could lead to google assigning your page a low ranking). What would a good way of modeling this data be? Without even looking at any code we can look at some examples of things that we want to know, and how they relate to markov models. ### 3.1 Arrival First and foremost, how do people arrive on your page? Is it your home page? Your landing page? Well, this is just the very first page of what is hopefully a sequence of pages. So, the markov analogy here is that this is just the initial state distribution or $\pi$. So, once we have our markov model, the $\pi$ vector will tell us which of our pages a user is most likely to start on. ### 3.2 Sequences of Pages What about sequences of pages? Well, if you think people are getting to your landing page, hitting the buy button, checking out, and then closing the browser window, you can test the validity of that assumption by calculating the probability of that sequence. Of course, the probability of any sequence is probability going to be much less than 1. This is because for a longer sequence, we have more multiplication, and hence smaller final numbers. We do have two alternatives however: > * 1) You can compare the probability of two different sequences. So, are people going through the entire checkout process? Or is it more probable that they are just bouncing? * 2) Another option is to just find the transition probabilities themselves. These are conditional probabilities instead of joint probabilities. You want to know, once they have made it to the landing page, what is the probability of hitting buy. Then, once they have hit buy, what is the probability of them completing the checkout. ### 3.3 Bounce Rate This is hard to measure, unless you are google and hence have analytics on nearly every page on the web. This is because once a user has left your site, you can no longer run code on their computer or track what they are doing. However, let's pretend that we can determine this information. Once we have done this, we can measure which page has the highest bounce rate. At this point we can manually analyze that page and ask our marketing people "what is different about this page that people don't find it useful/want to leave?" We can then address that problem, and the hopefully later analysis shows that the fixed page no longer has a high bounce right. In the markov model, we can just represents this as the null state. ### 3.4 Data So, the data we are going to be working with has two columns: `last_page_id` and `next_page_id`. This can be interpreted as the current page and the next page. The site has 10 pages with the id's 0-9. We can represent start pages by making the current page -1, and the next page the actual page. We can represent the end of the page with two different codes, `B`(bounce) or `C` (close). In the case of bounce, the user saw the page and then immediately bounced. In the case of close, the user saw the page stayed and potentially saw some useful information, and then closed the window. So, you can imagine that our engineer may use time as a factor in determining if it is a bounce or a close. ``` import numpy as np import pandas as pd """Goal here is to store start page and end page, and the count how many times that happens. After that we are going to turn it into a probability distribution. We can divide all transitions that start with specific start state, by row_sum""" transitions = {} # getting all specific transitions from start pg to end pg, tallying up # of times each occurs row_sums = {} # start date as key -> getting number of times each starting pg occurs # Collect our counts for line in open('../../../data/site/site_data.csv'): s, e = line.rstrip().split(',') # get start and end page transitions[(s, e)] = transitions.get((s, e), 0.) + 1 row_sums[s] = row_sums.get(s, 0.) + 1 # Normalize the counts so they become real probability distributions for k, v in transitions.items(): s, e = k transitions[k] = v / row_sums[s] # Calculate initial state distribution print('Initial state distribution') for k, v in transitions.items(): s, e = k if s == '-1': # this means it is the start of the sequence. print (e, v) # Which page has the highest bounce rate? for k, v in transitions.items(): s, e = k if e == 'B': print(f'Bounce rate for {s}: {v}') ``` We can see that page with `id` 9 has the highest value in the initial state distribution, so we are most likely to start on that page. We can then see that the page with highest bounce rate is also at page `id` 9. ## 4. Build a 2nd-order language model and generate phrases So, we are now going to work with non first order markov chains for a little bit. In this example we are going to try and create a language model. So we are going to first train a model on some data to determine the distribution of a word given the previous two words. We can then use this model to generate new phrases. Note that another step of this model would be to calculate the probability of a phrase. So the data that we are going to look at is just a collection of Robert Frost Poems. It is just a text file with all of the poems concatenated together. So, the first thing we are going to want to do is tokenize each sentence, and remove punctuation. It will look similar to this: ``` def remove_punctuation(s): return s.translate(None, string.punctuation) tokens = [t for t in remove_puncuation(line.rstrip().lower()).split()] ``` Once we have tokenized each line, we want to perform various counts in addition to the second order model counts. We need to measure the initial distribution of words, or stated another way the distribution of the first word of a sentence. We also want to know the distribution of the second word of a sentence. Both of these do not have two previous words, so they are not second order. We could technically include them in the second order measurement by using `None` in place of the previous words, but we won't do that here. We also want to keep track of how to end the sentence (end of sentence distribution, will look similar to (w(t-2), w(t-1) -> END)), so we will include a special token for that too. When we do this counting, what we first want to do is create an array of all possibilities. So, for example if we had two sentences: ``` I love dogs I love cats ``` Then we could have a dictionary where the key was `(I, love)` and the value was an array `[dogs, cats]`. If "I love" was also a stand alone sentence, then the value would be `[dogs, cats, END]`. The function below can help us with this, since we first need to check if there is any value for the key, create an array if not, otherwise just append to the array. ``` def add2dict(d, k, v): if k not in d: d[k] = [] else: d[k].append(v) ``` One we have collected all of these arrays of possible next words, we need to turn them into **probability distributions**. For example, the array `[cat, cat, dog]` would become the dictionary `{"cat": 2/3, "dog": 1/3}`. Here is a function that can do this: ``` def list2pdict(ts): d = {} n = len(ts) for t in ts: d[t] = d.get(t, 0.) + 1 for t, c in d.items(): d[t] = c / n return d ``` Next, we will need a function that can sample from this dictionary. To do this we will need to generate a random number between 0 and 1, and then use the distribution of the words to sample a word given a random number. Here is a function that can do that: ``` def sample_word(d): p0 = np.random.random() cumulative = 0 for t, p in d.items(): cumulative += p if p0 < cumulative: return t assert(False) # should never get here ``` Because all of our distributions are structured as dictionaries, we can use the same function for all of them. ``` import numpy as np import string """3 dicts. 1st store pdist for the start of a phrase, then a second word dict which stores the distributions for the 2nd word of a sentence, and then we are going to have a dict for all second order transitions""" initial = {} second_word = {} transitions = {} def remove_punctuation(s): return s.translate(str.maketrans('', '', string.punctuation)) def add2dict(d, k, v): """Parameters: Dictionary, Key, Value""" if k not in d: d[k] = [] d[k].append(v) # Loop through file of poems for line in open('../../../data/poems/robert_frost.txt'): tokens = remove_punctuation(line.rstrip().lower()).split() # Get all tokens for specific line we are looping over T = len(tokens) # Length of sequence for i in range(T): # Loop through every token in sequence t = tokens[i] if i == 0: # We are looking at first word initial[t] = initial.get(t, 0.) + 1 else: t_1 = tokens[i - 1] if i == T - 1: # Looking at last word add2dict(transitions, (t_1, t), 'END') if i == 1: # second word of sentence, hence only 1 previous word add2dict(second_word, t_1, t) else: t_2 = tokens[i - 2] # Get second previous word add2dict(transitions, (t_2, t_1), t) # add previous and 2nd previous word as key, and current word as val # Normalize the distributions initial_total = sum(initial.values()) for t, c in initial.items(): initial[t] = c / initial_total # Take our list and turn it into a dictionary of probabilities def list2pdict(ts): d = {} n = len(ts) # get total number of values for t in ts: # look at each token d[t] = d.get(t, 0.) + 1 for t, c in d.items(): # go through dictionary, divide frequency by sum d[t] = c / n return d for t_1, ts in second_word.items(): second_word[t_1] = list2pdict(ts) for k, ts in transitions.items(): transitions[k] = list2pdict(ts) def sample_word(d): p0 = np.random.random() # Generate random number from 0 to 1 cumulative = 0 # cumulative count for all probabilities seen so far for t, p in d.items(): cumulative += p if p0 < cumulative: return t assert(False) # should never hit this """Function to generate a poem""" def generate(): for i in range(4): sentence = [] # initial word w0 = sample_word(initial) sentence.append(w0) # sample second word w1 = sample_word(second_word[w0]) sentence.append(w1) # second-order transitions until END -> enter infinite loop while True: w2 = sample_word(transitions[(w0, w1)]) # sample next word given previous two words if w2 == 'END': break sentence.append(w2) w0 = w1 w1 = w2 print(' '.join(sentence)) generate() ``` ## 5. Google's PageRank Algorithm Markov models were even used in Google's PageRank algorithm. The basic problem we face is: > * We have $M$ webpages that link to eachother, and we would like to assign importance scores $x(1),...,x(M)$ * All of these scores are greater than or equal to 0 * So, we want to assign a page rank to all of these pages How can we go about doing this? Well, we can think of a webpage as a sequence, and the page you are on as the state. Where does the ranking come from? Well, the ranking actually comes from the limiting distribution. That is, in the long run, the proportion of visits that will be spent on this page. Now, if you think "great that is all I need to know", slow down. How can we actually do this in practice? How do we train the markov model, and what are the values we assign to the state transition matrix? And how can we ensure that the limiting distribution exists and is unique? The key insight was that **we can use the linked structure of the web to determine the ranking**. The main idea is that a *link to a page* is like a *vote for its importance*. So, as a first attempt we could just use a frequency count to measure the votes. Of course, that wouldn't be a valid probability distribution, so we could just divide each row by its sum to make it sum to 1. So we set: $$A(i, j) = \frac{1}{n(i)} \; if \; i \; links \; to \; j$$ $$A(i, j) = 0 \; otherwise$$ Here $n(i)$ stands for the total number of links on a page, and you can confirm that the sum of a row is $\frac{n(i)}{n(i)} = 1$, so this is a valid markov matrix. Now, we still aren't sure if the limiting distribution is unique. ### 5.1 This is already a good start Let's keep in mind that the above solution already solves a few problems. For instance, let's say you are a spammer and you want to sell 1000 links on your webpage. Well, because the transition matrix must remain a valid probability matrix, the rows must sum to 1, which means that each of your links now only has a strength of $\frac{1}{1000}$. For example the frequency matrix would look like: | |abc.com|amazon.com|facebook.com|github.com| |--- |--- |--- | --- |--- | |thespammer.com|1 |1 |1 |1 | And then if we transformed that into a probability matrix it would just be each value divided by the total number of links, 4: | |abc.com|amazon.com|facebook.com|github.com| |--- |--- |--- | --- |--- | |thespammer.com|0.25 |0.25 |0.25 |0.25 | You may then think, I will just create 1000 pages and each of them will only have 1 link. Unfortunately, since nobody knows about those 1000 pages you just created nobody is going to link to them, which means they are impossible to get to. So, in the limiting distribution, those states will have 0 probability because you can't even get to them, so there outgoing links are worthless. Remember, the markov chains limiting distribution will model the long running proportion of visits to a state. So, if you never visit that state, its probability will be 0. We still have not ensure that the limiting distribution exists and is unique. ### 5.2 Perron-Frobenius Theorem How can we ensure that our model has a unique stationary distribution. In 1910, this was actually determined. It is known as the **Perron-Frobenius Theorem**, and it states that: > *If our transition matrix is a markov matrix -meaning that all of the rows sum to 1, and all of the values are strictly positive, i.e. no values that are 0- then the stationary distribution exists and is unique*. In fact, we can start in any initial state and as time approaches infinity we will always end up with the same stationary distribution, therefore this is also the limiting distribution. So, how can we satisfy the PF criterion? Let's return to this idea of **smoothing**, which we first talked about when discussing how to train a markov model. The basic idea was that we can make things that were 0, non-zero, so there is still a small possibility that we can get to that state. This might be good news for the spammer. So, we can create a uniform probability distribution $U = \frac{1}{M}$, which is an $M x M$ matrix ($M$ is the number of states). PageRanks solution was to take the matrix we had before and multiply it by 0.85, and to take the uniform distribution and multiply it by 0.15, and add them together to get the final pagerank matrix. $$G = 0.85A + 0.15U$$ Now all of the elements are strictly positive, and we can convince ourselves that G is still a valid markov matrix.
github_jupyter
# Quantization of Signals *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:Sascha.Spors@uni-rostock.de).* ## Spectral Shaping of the Quantization Noise The quantized signal $x_Q[k]$ can be expressed by the continuous amplitude signal $x[k]$ and the quantization error $e[k]$ as \begin{equation} x_Q[k] = \mathcal{Q} \{ x[k] \} = x[k] + e[k] \end{equation} According to the [introduced model](linear_uniform_quantization_error.ipynb#Model-for-the-Quantization-Error), the quantization noise can be modeled as uniformly distributed white noise. Hence, the noise is distributed over the entire frequency range. The basic concept of [noise shaping](https://en.wikipedia.org/wiki/Noise_shaping) is a feedback of the quantization error to the input of the quantizer. This way the spectral characteristics of the quantization noise can be modified, i.e. spectrally shaped. Introducing a generic filter $h[k]$ into the feedback loop yields the following structure ![Feedback structure for noise shaping](noise_shaping.png) The quantized signal can be deduced from the block diagram above as \begin{equation} x_Q[k] = \mathcal{Q} \{ x[k] - e[k] * h[k] \} = x[k] + e[k] - e[k] * h[k] \end{equation} where the additive noise model from above has been introduced and it has been assumed that the impulse response $h[k]$ is normalized such that the magnitude of $e[k] * h[k]$ is below the quantization step $Q$. The overall quantization error is then \begin{equation} e_H[k] = x_Q[k] - x[k] = e[k] * (\delta[k] - h[k]) \end{equation} The power spectral density (PSD) of the quantization error with noise shaping is calculated to \begin{equation} \Phi_{e_H e_H}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \cdot \left| 1 - H(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \right|^2 \end{equation} Hence the PSD $\Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega})$ of the quantizer without noise shaping is weighted by $| 1 - H(\mathrm{e}^{\,\mathrm{j}\,\Omega}) |^2$. Noise shaping allows a spectral modification of the quantization error. The desired shaping depends on the application scenario. For some applications, high-frequency noise is less disturbing as low-frequency noise. ### Example - First-Order Noise Shaping If the feedback of the error signal is delayed by one sample we get with $h[k] = \delta[k-1]$ \begin{equation} \Phi_{e_H e_H}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) \cdot \left| 1 - \mathrm{e}^{\,-\mathrm{j}\,\Omega} \right|^2 \end{equation} For linear uniform quantization $\Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \sigma_e^2$ is constant. Hence, the spectral shaping constitutes a high-pass characteristic of first order. The following simulation evaluates the noise shaping quantizer of first order. ``` %matplotlib inline import numpy as np import matplotlib.pyplot as plt import scipy.signal as sig w = 8 # wordlength of the quantized signal xmin = -1 # minimum of input signal N = 32768 # number of samples def uniform_midtread_quantizer_w_ns(x, Q): # limiter x = np.copy(x) idx = np.where(x <= -1) x[idx] = -1 idx = np.where(x > 1 - Q) x[idx] = 1 - Q # linear uniform quantization with noise shaping xQ = Q * np.floor(x/Q + 1/2) e = xQ - x xQ = xQ - np.concatenate(([0], e[0:-1])) return xQ[1:] # quantization step Q = 1/(2**(w-1)) # compute input signal np.random.seed(5) x = np.random.uniform(size=N, low=xmin, high=(-xmin-Q)) # quantize signal xQ = uniform_midtread_quantizer_w_ns(x, Q) e = xQ - x[1:] # estimate PSD of error signal nf, Pee = sig.welch(e, nperseg=64) # estimate SNR SNR = 10*np.log10((np.var(x)/np.var(e))) print('SNR = {:2.1f} dB'.format(SNR)) plt.figure(figsize=(10,5)) Om = nf*2*np.pi plt.plot(Om, Pee*6/Q**2, label='estimated PSD') plt.plot(Om, np.abs(1 - np.exp(-1j*Om))**2, label='theoretic PSD') plt.plot(Om, np.ones(Om.shape), label='PSD w/o noise shaping') plt.title('PSD of quantization error') plt.xlabel(r'$\Omega$') plt.ylabel(r'$\hat{\Phi}_{e_H e_H}(e^{j \Omega}) / \sigma_e^2$') plt.axis([0, np.pi, 0, 4.5]); plt.legend(loc='upper left') plt.grid() ``` **Exercise** * The overall average SNR is lower than for the quantizer without noise shaping. Why? Solution: The average power per frequency is lower that without noise shaping for frequencies below $\Omega \approx \pi$. However, this comes at the cost of a larger average power per frequency for frequencies above $\Omega \approx \pi$. The average power of the quantization noise is given as the integral over the PSD of the quantization noise. It is larger for noise shaping and the resulting SNR is consequently lower. Noise shaping is nevertheless beneficial in applications where a lower quantization error in a limited frequency region is desired. **Copyright** This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Digital Signal Processing - Lecture notes featuring computational examples, 2016-2018*.
github_jupyter
# ------------ First A.I. activity ------------ ## 1. IBOVESPA volume prediction -> Importing libraries that are going to be used in the code ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt ``` -> Importing the datasets ``` dataset = pd.read_csv("datasets/ibovespa.csv",delimiter = ";") ``` -> Converting time to datetime in order to make it easy to manipulate ``` dataset['Data/Hora'] = dataset['Data/Hora'].str.replace("/","-") dataset['Data/Hora'] = pd.to_datetime(dataset['Data/Hora']) ``` -> Visualizing the data ``` dataset.head() ``` -> creating date dataframe and splitting its features date = dataset.iloc[:,0:1] date['day'] = date['Data/Hora'].dt.day date['month'] = date['Data/Hora'].dt.month date['year'] = date['Data/Hora'].dt.year date = date.drop(columns = ['Data/Hora']) -> removing useless columns ``` dataset = dataset.drop(columns = ['Data/Hora','Unnamed: 7','Unnamed: 8','Unnamed: 9']) ``` -> transforming atributes to the correct format ``` for key, value in dataset.head().iteritems(): dataset[key] = dataset[key].str.replace(".","").str.replace(",",".").astype(float) """ for key, value in date.head().iteritems(): dataset[key] = date[key] """ ``` -> Means ``` dataset.mean() ``` -> plotting graphics ``` plt.boxplot(dataset['Volume']) plt.title('boxplot') plt.xlabel('volume') plt.ylabel('valores') plt.ticklabel_format(style='sci', axis='y', useMathText = True) dataset['Maxima'].median() dataset['Minima'].mean() ``` -> Média truncada ``` from scipy import stats m = stats.trim_mean(dataset['Minima'], 0.1) print(m) ``` -> variancia e standard deviation ``` v = dataset['Cotacao'].var() print(v) d = dataset['Cotacao'].std() print(v) m = dataset['Cotacao'].mean() print(m) ``` -> covariancia dos atributos, mas antes fazer uma standard scaler pra facilitar a visão e depois transforma de volta pra dataframe pandas #### correlation shows us the relationship between the two variables and how are they related while covariance shows us how the two variables vary from each other. ``` from sklearn.preprocessing import StandardScaler sc = StandardScaler() dataset_cov = sc.fit_transform(dataset) dataset_cov = pd.DataFrame(dataset_cov) dataset_cov.cov() ``` -> plotting the graph may be easier to observe the correlation ``` corr = dataset.corr() corr.style.background_gradient(cmap = 'coolwarm') pd.plotting.scatter_matrix(dataset, figsize=(6, 6)) plt.show() plt.matshow(dataset.corr()) plt.xticks(range(len(dataset.columns)), dataset.columns) plt.yticks(range(len(dataset.columns)), dataset.columns) plt.colorbar() plt.show() ```
github_jupyter
# Plotting massive data sets This notebook plots about half a million LIDAR points around Toronto from the KITTI data set. ([Source](http://www.cvlibs.net/datasets/kitti/raw_data.php)) The data is meant to be played over time. With pydeck, we can render these points and interact with them. ### Cleaning the data First we need to import the data. Each row of data represents one x/y/z coordinate for a point in space at a point in time, with each frame representing about 115,000 points. We also need to scale the points to plot closely on a map. These point coordinates are not given in latitude and longitude, so as a workaround we'll plot them very close to (0, 0) on the earth. In future versions of pydeck other viewports, like a flat plane, will be supported out-of-the-box. For now, we'll make do with scaling the points. ``` import pandas as pd all_lidar = pd.concat([ pd.read_csv('https://raw.githubusercontent.com/ajduberstein/kitti_subset/master/kitti_1.csv'), pd.read_csv('https://raw.githubusercontent.com/ajduberstein/kitti_subset/master/kitti_2.csv'), pd.read_csv('https://raw.githubusercontent.com/ajduberstein/kitti_subset/master/kitti_3.csv'), pd.read_csv('https://raw.githubusercontent.com/ajduberstein/kitti_subset/master/kitti_4.csv'), ]) # Filter to one frame of data lidar = all_lidar[all_lidar['source'] == 136] lidar.loc[: , ['x', 'y']] = lidar[['x', 'y']] / 10000 ``` ### Plotting the data We'll define a single `PointCloudLayer` and plot it. Pydeck by default expects the input of `get_position` to be a string name indicating a single position value. For convenience, you can pass in a string indicating the X/Y/Z coordinate, here `get_position='[x, y, z]'`. You also have access to a small expression parser--in our `get_position` function here, we increase the size of the z coordinate times 10. Using `pydeck.data_utils.compute_view`, we'll zoom to the approximate center of the data. ``` import pydeck as pdk point_cloud = pdk.Layer( 'PointCloudLayer', lidar[['x', 'y', 'z']], get_position=['x', 'y', 'z * 10'], get_normal=[0, 0, 1], get_color=[255, 0, 100, 200], pickable=True, auto_highlight=True, point_size=1) view_state = pdk.data_utils.compute_view(lidar[['x', 'y']], 0.9) view_state.max_pitch = 360 view_state.pitch = 80 view_state.bearing = 120 r = pdk.Deck( point_cloud, initial_view_state=view_state, map_provider=None, ) r.show() import time from collections import deque # Choose a handful of frames to loop through frame_buffer = deque([42, 56, 81, 95]) print('Press the stop icon to exit') while True: current_frame = frame_buffer[0] lidar = all_lidar[all_lidar['source'] == current_frame] r.layers[0].get_position = '@@=[x / 10000, y / 10000, z * 10]' r.layers[0].data = lidar.to_dict(orient='records') frame_buffer.rotate() r.update() time.sleep(0.5) ```
github_jupyter
# Seq2Seq with Attention for Korean-English Neural Machine Translation - Network architecture based on this [paper](https://arxiv.org/abs/1409.0473) - Fit to run on Google Colaboratory ``` import os import io import tarfile import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torchtext from torchtext.data import Dataset from torchtext.data import Example from torchtext.data import Field from torchtext.data import BucketIterator ``` # 1. Upload Data to Colab Workspace 로컬에 존재하는 다음 3개의 데이터를 가상 머신에 업로드. 파일의 원본은 [여기](https://github.com/jungyeul/korean-parallel-corpora/tree/master/korean-english-news-v1/)에서도 확인 - korean-english-park.train.tar.gz - korean-english-park.dev.tar.gz - korean.english-park.test.tar.gz ``` # 현재 작업경로를 확인 & 'data' 폴더 생성 !echo 'Current working directory:' ${PWD} !mkdir -p data/ !ls -al # 로컬의 데이터 업로드 from google.colab import files uploaded = files.upload() # 'data' 폴더 하위로 이동, 잘 옮겨졌는지 확인 !mv *.tar.gz data/ !ls -al data/ ``` # 2. Check Packages ## KoNLPy (설치 필요) ``` # Java 1.8 & KoNLPy 설치 !apt-get update !apt-get install g++ openjdk-8-jdk python-dev python3-dev !pip3 install JPype1-py3 !pip3 install konlpy from konlpy.tag import Okt ko_tokens = Okt().pos('트위터 데이터로 학습한 형태소 분석기가 잘 실행이 되는지 확인해볼까요?') # list of (word, POS TAG) tuples ko_tokens = [t[0] for t in ko_tokens] # Only get words print(ko_tokens) del ko_tokens # 필요 없으니까 삭제 ``` ## Spacy (이미 설치되어 있음) ``` # 설치가 되어있는지 확인 !pip show spacy # 설치가 되어있는지 확인 (없다면 자동설치됨) !python -m spacy download en_core_web_sm import spacy spacy_en = spacy.load('en_core_web_sm') en_tokens = [t.text for t in spacy_en.tokenizer('Check that spacy tokenizer works.')] print(en_tokens) del en_tokens # 필요 없으니까 삭제 ``` # 3. Define Tokenizing Functions 문장을 받아 그보다 작은 어절 혹은 형태소 단위의 리스트로 반환해주는 함수를 각 언어에 대해 작성 - Korean: konlpy.tag.Okt() <- Twitter()에서 명칭변경 - English: spacy.tokenizer ## Korean Tokenizer ``` #from konlpy.tag import Okt class KoTokenizer(object): """For Korean.""" def __init__(self): self.tokenizer = Okt() def tokenize(self, text): tokens = self.tokenizer.pos(text) tokens = [t[0] for t in tokens] return tokens # Usage example print(KoTokenizer().tokenize('전처리는 언제나 지겨워요.')) ``` ## English Tokenizer ``` #import spacy class EnTokenizer(object): """For English.""" def __init__(self): self.spacy_en = spacy.load('en_core_web_sm') def tokenize(self, text): tokens = [t.text for t in self.spacy_en.tokenizer(text)] return tokens # Usage example print(EnTokenizer().tokenize("What I cannot create, I don't understand.")) ``` # 4. Data Preprocessing ## Load data ``` # Current working directory & list of files !echo 'Current working directory:' ${PWD} !ls -al DATA_DIR = './data/' print('Data directory exists:', os.path.isdir(DATA_DIR)) print('List of files:') print(*os.listdir(DATA_DIR), sep='\n') def get_data_from_tar_gz(filename): """ Retrieve contents from a `tar.gz` file without extraction. Arguments: filename: path to `tar.gz` file. Returns: dict, (name, content) pairs """ assert os.path.exists(filename) out = {} with tarfile.open(filename, 'r:gz') as tar: for member in tar.getmembers(): lang = member.name.split('.')[-1] # ex) korean-english-park.train.ko -> ko f = tar.extractfile(member) if f is not None: content = f.read().decode('utf-8') content = content.splitlines() out[lang] = content assert isinstance(out, dict) return out # Each 'xxx_data' is a dictionary with keys; 'ko', 'en' train_dict= get_data_from_tar_gz(os.path.join(DATA_DIR, 'korean-english-park.train.tar.gz')) # train dev_dict = get_data_from_tar_gz(os.path.join(DATA_DIR, 'korean-english-park.dev.tar.gz')) # dev test_dict = get_data_from_tar_gz(os.path.join(DATA_DIR, 'korean-english-park.test.tar.gz')) # test # Some samples (ko) train_dict['ko'][100:105] # Some samples (en) train_dict['en'][100:105] ``` ## Define Datasets ``` #from torchtext.data import Dataset #from torchtext.data import Example class KoEnTranslationDataset(Dataset): """A dataset for Korean-English Neural Machine Translation.""" @staticmethod def sort_key(ex): return torchtext.data.interleave_keys(len(ex.src), len(ex.trg)) def __init__(self, data_dict, field_dict, source_lang='ko', max_samples=None, **kwargs): """ Only 'ko' and 'en' supported for `language` Arguments: data_dict: dict of (`language`, text) pairs. field_dict: dict of (`language`, Field instance) pairs. source_lang: str, default 'ko'. Other kwargs are passed to the constructor of `torchtext.data.Dataset`. """ if not all(k in ['ko', 'en'] for k in data_dict.keys()): raise KeyError("Check data keys.") if not all(k in ['ko', 'en'] for k in field_dict.keys()): raise KeyError("Check field keys.") if source_lang == 'ko': fields = [('src', field_dict['ko']), ('trg', field_dict['en'])] src_data = data_dict['ko'] trg_data = data_dict['en'] elif source_lang == 'en': fields = [('src', field_dict['en']), ('trg', field_dict['ko'])] src_data = data_dict['en'] trg_data = data_dict['ko'] else: raise NotImplementedError if not len(src_data) == len(trg_data): raise ValueError('Inconsistent number of instances between two languages.') examples = [] for i, (src_line, trg_line) in enumerate(zip(src_data, trg_data)): src_line = src_line.strip() trg_line = trg_line.strip() if src_line != '' and trg_line != '': examples.append( torchtext.data.Example.fromlist( [src_line, trg_line], fields ) ) i += 1 if max_samples is not None: if i >= max_samples: break super(KoEnTranslationDataset, self).__init__(examples, fields, **kwargs) ``` ## Define Fields - Instantiate tokenizers; one for each language. - The 'tokenize' argument of `Field` requires a tokenizing function. ``` #from torchtext.data import Field ko_tokenizer = KoTokenizer() # korean tokenizer en_tokenizer = EnTokenizer() # english tokenizer # Field instance for korean KOREAN = Field( init_token='<sos>', eos_token='<eos>', tokenize=ko_tokenizer.tokenize, batch_first=True, lower=False ) # Field instance for english ENGLISH = Field( init_token='<sos>', eos_token='<eos>', tokenize=en_tokenizer.tokenize, batch_first=True, lower=True ) # Store Field instances in a dictionary field_dict = { 'ko': KOREAN, 'en': ENGLISH, } ``` ## Instantiate datasets - one for each set (train, dev, test) ``` # 학습시간 단축을 위해 학습 데이터 줄이기 MAX_TRAIN_SAMPLES = 10000 # Instantiate with data train_set = KoEnTranslationDataset(train_dict, field_dict, max_samples=MAX_TRAIN_SAMPLES) print('Train set ready.') print('#. examples:', len(train_set.examples)) dev_set = KoEnTranslationDataset(dev_dict, field_dict) print('Dev set ready...') print('#. examples:', len(dev_set.examples)) test_set = KoEnTranslationDataset(test_dict, field_dict) print('Test set ready...') print('#. examples:', len(test_set.examples)) # Training example (KO, source language) train_set.examples[50].src # Training example (EN, target language) train_set.examples[50].trg ``` ## Build Vocabulary - 각 언어별 생성: `Field`의 인스턴스를 활용 - 최소 빈도수(`MIN_FREQ`) 값을 작게 하면 vocabulary의 크기가 커짐. - 최소 빈도수(`MIN_FREQ`) 값을 크게 하면 vocabulary의 크기가 작아짐. ``` MIN_FREQ = 2 # TODO: try different values # Build vocab for Korean KOREAN.build_vocab(train_set, dev_set, test_set, min_freq=MIN_FREQ) # ko print('Size of source vocab (ko):', len(KOREAN.vocab)) # Check indices of some important tokens tokens = ['<unk>', '<pad>', '<sos>', '<eos>'] for token in tokens: print(f"{token} -> {KOREAN.vocab.stoi[token]}") # Build vocab for English ENGLISH.build_vocab(train_set, dev_set, test_set, min_freq=MIN_FREQ) # en print('Size of target vocab (en):', len(ENGLISH.vocab)) # Check indices of some important tokens tokens = ['<unk>', '<pad>', '<sos>', '<eos>'] for token in tokens: print(f"{token} -> {KOREAN.vocab.stoi[token]}") ``` ## Configure Device - *'런타임' -> '런타임 유형변경'* 에서 하드웨어 가속기로 **GPU** 선택 ``` device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('Device to use:', device) ``` ## Create Data Iterators - 데이터를 미니배치(mini-batch) 단위로 반환해주는 역할 - `train_set`, `dev_set`, `test_set`에 대해 개별적으로 정의해야 함 - `BATCH_SIZE`를 정의해주어야 함 - `torchtext.data.BucketIterator`는 하나의 미니배치를 서로 비슷한 길이의 관측치들로 구성함 - [Bucketing](https://medium.com/@rashmi.margani/how-to-speed-up-the-training-of-the-sequence-model-using-bucketing-techniques-9e302b0fd976)의 효과: 하나의 미니배치 내 padding을 최소화하여 연산의 낭비를 줄여줌 ``` BATCH_SIZE = 128 #from torchtext.data import BucketIterator # Train iterator train_iterator = BucketIterator( train_set, batch_size=BATCH_SIZE, train=True, shuffle=True, device=device ) print(f'Number of minibatches per epoch: {len(train_iterator)}') #from torchtext.data import BucketIterator # Dev iterator dev_iterator = BucketIterator( dev_set, batch_size=100, train=False, shuffle=False, device=device ) print(f'Number of minibatches per epoch: {len(dev_iterator)}') #from torchtext.data import BucketIterator # Test iterator test_iterator = BucketIterator( test_set, batch_size=200, train=False, shuffle=False, device=device ) print(f'Number of minibatches per epoch: {len(test_iterator)}') train_batch = next(iter(train_iterator)) print('a batch of source examples has shape:', train_batch.src.size()) # (b, s) print('a batch of target examples has shape:', train_batch.trg.size()) # (b, s) # Checking first sample in mini-batch (KO, source lang) ko_indices = train_batch.src[0] ko_tokens = [KOREAN.vocab.itos[i] for i in ko_indices] for t, i in zip(ko_tokens, ko_indices): print(f"{t} ({i})") del ko_indices, ko_tokens # Checking first sample in mini-batch (EN, target lang) en_indices = train_batch.trg[0] en_tokens = [ENGLISH.vocab.itos[i] for i in en_indices] for t, i in zip(en_tokens, en_indices): print(f"{t} ({i})") del en_indices, en_tokens del train_batch # 더 이상 필요 없으니까 삭제 ``` # 5. Building Seq2Seq Model ## Hyperparameters ``` # Hyperparameters INPUT_DIM = len(KOREAN.vocab) OUTPUT_DIM = len(ENGLISH.vocab) ENC_EMB_DIM = DEC_EMB_DIM = 100 ENC_HID_DIM = DEC_HID_DIM = 60 USE_BIDIRECTIONAL = False ``` ## Encoder ``` class Encoder(nn.Module): """ Learns an embedding for the source text. Arguments: input_dim: int, size of input language vocabulary. emb_dim: int, size of embedding layer output. enc_hid_dim: int, size of encoder hidden state. dec_hid_dim: int, size of decoder hidden state. bidirectional: uses bidirectional RNNs if True. default is False. """ def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, bidirectional=False): super(Encoder, self).__init__() self.input_dim = input_dim self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.bidirectional = bidirectional self.embedding = nn.Embedding( num_embeddings=self.input_dim, embedding_dim=self.emb_dim ) self.rnn = nn.GRU( input_size=self.emb_dim, hidden_size=self.enc_hid_dim, bidirectional=self.bidirectional, batch_first=True ) self.rnn_output_dim = self.enc_hid_dim if self.bidirectional: self.rnn_output_dim *= 2 self.fc = nn.Linear(self.rnn_output_dim, self.dec_hid_dim) self.dropout = nn.Dropout(.2) def forward(self, src): """ Arguments: src: 2d tensor of shape (batch_size, input_seq_len) Returns: outputs: 3d tensor of shape (batch_size, input_seq_len, num_directions * enc_h) hidden: 2d tensor of shape (b, dec_h). This tensor will be used as the initial hidden state value of the decoder (h0 of decoder). """ assert len(src.size()) == 2, 'Input requires dimension (batch_size, seq_len).' # Shape: (b, s, h) embedded = self.embedding(src) embedded = self.dropout(embedded) outputs, hidden = self.rnn(embedded) if self.bidirectional: # (2, b, enc_h) -> (b, 2 * enc_h) hidden = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1) else: # (1, b, enc_h) -> (b, enc_h) hidden = hidden.squeeze(0) # (b, num_directions * enc_h) -> (b, dec_h) hidden = self.fc(hidden) hidden = torch.tanh(hidden) return outputs, hidden ``` ## Attention ``` class Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim, encoder_is_bidirectional=False): super(Attention, self).__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.encoder_is_bidirectional = encoder_is_bidirectional self.attention_input_dim = enc_hid_dim + dec_hid_dim if self.encoder_is_bidirectional: self.attention_input_dim += enc_hid_dim # 2 * h_enc + h_dec self.linear = nn.Linear(self.attention_input_dim, dec_hid_dim) self.v = nn.Parameter(torch.rand(dec_hid_dim)) def forward(self, hidden, encoder_outputs): """ Arguments: hidden: 2d tensor with shape (batch_size, dec_hid_dim). encoder_outputs: 3d tensor with shape (batch_size, input_seq_len, enc_hid_dim). if encoder is bidirectional, expects (batch_size, input_seq_len, 2 * enc_hid_dim). """ # Shape check assert hidden.dim() == 2 assert encoder_outputs.dim() == 3 batch_size, seq_len, _ = encoder_outputs.size() # (b, dec_h) -> (b, s, dec_h) hidden = hidden.unsqueeze(1).expand(-1, seq_len, -1) # concat; shape results in (b, s, enc_h + dec_h). # if encoder is bidirectional, (b, s, 2 * h_enc + h_dec). concat = torch.cat((hidden, encoder_outputs), dim=2) # concat; shape is (b, s, dec_h) concat = self.linear(concat) concat = torch.tanh(concat) # tile v; (dec_h, ) -> (b, dec_h, 1) v = self.v.repeat(batch_size, 1).unsqueeze(2) # attn; (b, s, dec_h) @ (b, dec_h, 1) -> (b, s, 1) -> (b, s) attn_scores = torch.bmm(concat, v).squeeze(-1) assert attn_scores.dim() == 2 # Final shape check: (b, s) return F.softmax(attn_scores, dim=1) ``` ## Decoder ``` class Decoder(nn.Module): """ Unlike the encoder, a single forward pass of a `Decoder` instance is defined for only a single timestep. Arguments: output_dim: int, emb_dim: int, enc_hid_dim: int, dec_hid_dim: int, attention_module: torch.nn.Module, encoder_is_bidirectional: False """ def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, attention_module, encoder_is_bidirectional=False): super(Decoder, self).__init__() self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.output_dim = output_dim self.encoder_is_bidirectional = encoder_is_bidirectional if isinstance(attention_module, nn.Module): self.attention_module = attention_module else: raise ValueError self.rnn_input_dim = enc_hid_dim + emb_dim # enc_h + dec_emb_dim if self.encoder_is_bidirectional: self.rnn_input_dim += enc_hid_dim # 2 * enc_h + dec_emb_dim self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU( input_size=self.rnn_input_dim, hidden_size=dec_hid_dim, bidirectional=False, batch_first=True, ) out_input_dim = 2 * dec_hid_dim + emb_dim # hidden + dec_hidden_dim + dec_emb_dim self.out = nn.Linear(out_input_dim, output_dim) self.dropout = nn.Dropout(.2) def forward(self, inp, hidden, encoder_outputs): """ Arguments: inp: 1d tensor with shape (batch_size, ) hidden: 2d tensor with shape (batch_size, dec_hid_dim). This `hidden` tensor is the hidden state vector from the previous timestep. encoder_outputs: 3d tensor with shape (batch_size, seq_len, enc_hid_dim). If encoder_is_bidirectional is True, expects shape (batch_size, seq_len, 2 * enc_hid_dim). """ assert inp.dim() == 1 assert hidden.dim() == 2 assert encoder_outputs.dim() == 3 # (batch_size, ) -> (batch_size, 1) inp = inp.unsqueeze(1) # (batch_size, 1) -> (batch_size, 1, emb_dim) embedded = self.embedding(inp) embedded = self.dropout(embedded) # attention probabilities; (batch_size, seq_len) attn_probs = self.attention_module(hidden, encoder_outputs) # (batch_size, 1, seq_len) attn_probs = attn_probs.unsqueeze(1) # (b, 1, s) @ (b, s, enc_hid_dim) -> (b, 1, enc_hid_dim) weighted = torch.bmm(attn_probs, encoder_outputs) # (batch_size, 1, emb_dim + enc_hid_dim) rnn_input = torch.cat((embedded, weighted), dim=2) # output; (batch_size, 1, dec_hid_dim) # new_hidden; (1, batch_size, dec_hid_dim) output, new_hidden = self.rnn(rnn_input, hidden.unsqueeze(0)) embedded = embedded.squeeze(1) # (b, 1, emb) -> (b, emb) output = output.squeeze(1) # (b, 1, dec_h) -> (b, dec_h) weighted = weighted.squeeze(1) # (b, 1, dec_h) -> (b, dec_h) # output; (batch_size, emb + 2 * dec_h) -> (batch_size, output_dim) output = self.out(torch.cat((output, weighted, embedded), dim=1)) return output, new_hidden.squeeze(0) ``` ## Seq2Seq ``` class Seq2Seq(nn.Module): def __init__(self, encoder, decoder, device): super(Seq2Seq, self).__init__() self.encoder = encoder self.decoder = decoder self.device = device def forward(self, src, trg, teacher_forcing_ratio=.5): batch_size, max_seq_len = trg.size() trg_vocab_size = self.decoder.output_dim # An empty tesnor to store decoder outputs (time index first for indexing) outputs_shape = (max_seq_len, batch_size, trg_vocab_size) outputs = torch.zeros(outputs_shape).to(self.device) encoder_outputs, hidden = self.encoder(src) # first input to the decoder is '<sos>' # trg; shape (batch_size, seq_len) initial_dec_input = output = trg[:, 0] # get first timestep token for t in range(1, max_seq_len): output, hidden = self.decoder(output, hidden, encoder_outputs) outputs[t] = output # Save output for timestep t, for 1 <= t <= max_len top1_val, top1_idx = output.max(dim=1) teacher_force = torch.rand(1).item() >= teacher_forcing_ratio output = trg[:, t] if teacher_force else top1_idx # Switch batch and time dimensions for consistency (batch_first=True) outputs = outputs.permute(1, 0, 2) # (s, b, trg_vocab) -> (b, s, trg_vocab) return outputs ``` ## Build Model ``` # Define encoder enc = Encoder( input_dim=INPUT_DIM, emb_dim=ENC_EMB_DIM, enc_hid_dim=ENC_HID_DIM, dec_hid_dim=DEC_HID_DIM, bidirectional=USE_BIDIRECTIONAL ) print(enc) # Define attention layer attn = Attention( enc_hid_dim=ENC_HID_DIM, dec_hid_dim=DEC_HID_DIM, encoder_is_bidirectional=USE_BIDIRECTIONAL ) print(attn) # Define decoder dec = Decoder( output_dim=OUTPUT_DIM, emb_dim=DEC_EMB_DIM, enc_hid_dim=ENC_HID_DIM, dec_hid_dim=DEC_HID_DIM, attention_module=attn, encoder_is_bidirectional=USE_BIDIRECTIONAL ) print(dec) model = Seq2Seq(enc, dec, device).to(device) print(model) def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) print(f'The model has {count_parameters(model):,} trainable parameters.') ``` # 6. Train ## Optimizer - Use `optim.Adam` or `optim.RMSprop`. ``` optimizer = optim.Adam(model.parameters(), lr=0.001) #optimizer = optim.RMSprop(model.parameters(), lr=0.01) ``` ## Loss function ``` # Padding indices should not be considered when loss is calculated. PAD_IDX = ENGLISH.vocab.stoi['<pad>'] criterion = nn.CrossEntropyLoss(ignore_index=PAD_IDX) ``` ## Train function ``` def train(seq2seq_model, iterator, optimizer, criterion, grad_clip=1.0): seq2seq_model.train() epoch_loss = .0 for i, batch in enumerate(iterator): print('.', end='') src = batch.src trg = batch.trg optimizer.zero_grad() decoder_outputs = seq2seq_model(src, trg, teacher_forcing_ratio=.5) seq_len, batch_size, trg_vocab_size = decoder_outputs.size() # (b, s, trg_vocab) # (b-1, s, trg_vocab) decoder_outputs = decoder_outputs[:, 1:, :] # ((b-1) * s, trg_vocab) decoder_outputs = decoder_outputs.contiguous().view(-1, trg_vocab_size) # ((b-1) * s, ) trg = trg[:, 1:].contiguous().view(-1) loss = criterion(decoder_outputs, trg) loss.backward() # Gradient clipping; remedy for exploding gradients torch.nn.utils.clip_grad_norm_(seq2seq_model.parameters(), grad_clip) optimizer.step() epoch_loss += loss.item() return epoch_loss / len(iterator) ``` ## Evaluate function ``` def evaluate(seq2seq_model, iterator, criterion): seq2seq_model.eval() epoch_loss = 0. with torch.no_grad(): for i, batch in enumerate(iterator): print('.', end='') src = batch.src trg = batch.trg decoder_outputs = seq2seq_model(src, trg, teacher_forcing_ratio=0.) seq_len, batch_size, trg_vocab_size = decoder_outputs.size() # (b, s, trg_vocab) # (b-1, s, trg_vocab) decoder_outputs = decoder_outputs[:, 1:, :] # ((b-1) * s, trg_vocab) decoder_outputs = decoder_outputs.contiguous().view(-1, trg_vocab_size) # ((b-1) * s, ) trg = trg[:, 1:].contiguous().view(-1) loss = criterion(decoder_outputs, trg) epoch_loss += loss.item() return epoch_loss / len(iterator) ``` ## Epoch time measure function ``` def epoch_time(start_time, end_time): """Returns elapsed time in mins & secs.""" elapsed_time = end_time - start_time elapsed_mins = int(elapsed_time / 60) elapsed_secs = int(elapsed_time - (elapsed_mins * 60)) return elapsed_mins, elapsed_secs ``` ## Train for multiple epochs ``` NUM_EPOCHS = 50 import time import math best_dev_loss = float('inf') for epoch in range(NUM_EPOCHS): start_time = time.time() train_loss = train(model, train_iterator, optimizer, criterion) dev_loss = evaluate(model, dev_iterator, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) if dev_loss < best_dev_loss: best_dev_loss = dev_loss torch.save(model.state_dict(), './best_model.pt') print("\n") print(f"Epoch: {epoch + 1:>02d} | Time: {epoch_mins}m {epoch_secs}s") print(f"Train Loss: {train_loss:>.4f} | Train Perplexity: {math.exp(train_loss):7.3f}") print(f"Dev Loss: {dev_loss:>.4f} | Dev Perplexity: {math.exp(dev_loss):7.3f}") ``` ## Save last model (overfitted) ``` torch.save(model.state_dict(), './last_model.pt') ``` # 7. Test ## Function to convert indices to original text strings ``` def indices_to_text(src_or_trg, lang_field): assert src_or_trg.dim() == 1, f'{src_or_trg.dim()}' #(seq_len, ) assert isinstance(lang_field, torchtext.data.Field) assert hasattr(lang_field, 'vocab') return [lang_field.vocab.itos[t] for t in src_or_trg] ``` ## Function to make predictions - Returns a list of examples, where each example is a (src, trg, prediction) tuple. ``` def predict(seq2seq_model, iterator): seq2seq_model.eval() out = [] with torch.no_grad(): for i, batch in enumerate(iterator): src = batch.src trg = batch.trg decoder_outputs = seq2seq_model(src, trg, teacher_forcing_ratio=0.) seq_len, batch_size, trg_vocab_size = decoder_outputs.size() # (b, s, trg_vocab) # Discard initial decoder input (index = 0) #decoder_outputs = decoder_outputs[:, 1:, :] decoder_predictions = decoder_outputs.argmax(dim=-1) # (b, s) for i, pred in enumerate(decoder_predictions): out.append((src[i], trg[i], pred)) return out ``` ## Load best model ``` !ls -al # Load model model.load_state_dict(torch.load('./best_model.pt')) ``` ## Make predictions ``` # Make prediction test_predictions = predict(model, dev_iterator) for i, prediction in enumerate(test_predictions): src, trg, pred = prediction src_text = indices_to_text(src, lang_field=KOREAN) trg_text = indices_to_text(trg, lang_field=ENGLISH) pred_text = indices_to_text(pred, lang_field=ENGLISH) print('source:\n', src_text) print('target:\n', trg_text) print('prediction:\n', pred_text) print('-' * 160) if i > 5: break ``` # 8. Download Model ``` !ls -al from google.colab import files print('Downloading models...') # Known bug; if using Firefox, a print statement in the same cell is necessary. files.download('./best_model.pt') files.download('./last_model.pt') ``` # 9. Discussions ``` ```
github_jupyter
``` import os import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline STATS_DIR = "/hg191/corpora/legaldata/data/stats/" SEM_FEATS_FILE = os.path.join (STATS_DIR, "ops.temp.semfeat") INDEG_FILE = os.path.join (STATS_DIR, "ops.ind") ind = pd.read_csv (INDEG_FILE, sep=",", header=None, names=["opid", "indeg"]) semfeat = pd.read_csv (SEM_FEATS_FILE, sep=",", header=None, names=["opid", "semfeat"]) indegs = pd.Series([ind[ind["opid"] == opid]["indeg"].values[0] for opid in semfeat.opid.values]) semfeat["indeg"] = indegs def labelPercentile (series): labels = list () p50 = np.percentile (series, q=50) p75 = np.percentile (series, q=75) p90 = np.percentile (series, q=90) for value in series: if value <= p50: labels.append ("<=50") elif value <= p90: labels.append (">50") elif value > p90: labels.append (">90") return labels semfeat["percentile"] = pd.Series (labelPercentile(semfeat["semfeat"].values)) df = semfeat[semfeat["indeg"] > 0] df["log(indeg)"] = np.log(df["indeg"]) ax = sns.boxplot(x="percentile", y="log(indeg)", data=df, order=["<=50", ">50", ">90"]) vals = df[df["percentile"] == ">50"]["log(indeg)"].values np.sort(vals)[int(len(vals)/2)] print(len(df[df["percentile"] == ">50"])) print(len(df[df["percentile"] == ">90"])) print (df[df["percentile"] == "<=50"]["log(indeg)"].median()) print (df[df["percentile"] == ">50"]["log(indeg)"].median()) print (df[df["percentile"] == ">90"]["log(indeg)"].median()) #print (semfeat[semfeat["percentile"] == ">P99"]["logindeg"].mean()) print (semfeat[semfeat["percentile"] == "<=P50"]["logindeg"].mean()) print (semfeat[semfeat["percentile"] == ">P50"]["logindeg"].mean()) print (semfeat[semfeat["percentile"] == ">P90"]["logindeg"].mean()) print (semfeat[semfeat["percentile"] == "<=P50"]["indeg"].median()) print (semfeat[semfeat["percentile"] == ">P50"]["indeg"].median()) print (semfeat[semfeat["percentile"] == ">P90"]["indeg"].median()) print (semfeat[semfeat["percentile"] == "<=P50"]["indeg"].median()) print (semfeat[semfeat["percentile"] == ">P50"]["indeg"].median()) print (semfeat[semfeat["percentile"] == ">P90"]["indeg"].median()) np.percentile(semfeat["semfeat"].values, q=90) [semfeat["percentile"] == ">P90"]["indeg"].mean() semfeat[semfeat["percentile"] == ">P90"].tail(500) sorted(semfeat["indeg"], reverse=True)[0:10] semfeat[semfeat["indeg"].isin(sorted(semfeat["indeg"], reverse=True)[0:10])] semfeat.loc[48004,]["semfeat"] = 1 semfeat[semfeat["indeg"].isin(sorted(semfeat["indeg"], reverse=True)[0:10])] print(np.mean((semfeat[semfeat["percentile"] == "<=P50"]["indeg"] > 0).values)) print(np.mean((semfeat[semfeat["percentile"] == ">P50"]["indeg"] > 0).values)) print(np.mean((semfeat[semfeat["percentile"] == ">P90"]["indeg"] > 0).values)) print (len(semfeat[(semfeat["percentile"] == "<=P50") & (semfeat["indeg"] > 0)])) print (len(semfeat[(semfeat["percentile"] == ">P50") & (semfeat["indeg"] > 0)])) print (len(semfeat[(semfeat["percentile"] == ">P90") & (semfeat["indeg"] > 0)])) print (semfeat[(semfeat["percentile"] == "<=P50") & (semfeat["indeg"] > 0)]["indeg"].mean()) print (semfeat[(semfeat["percentile"] == ">P50") & (semfeat["indeg"] > 0)]["indeg"].mean()) print (semfeat[(semfeat["percentile"] == ">P90") & (semfeat["indeg"] > 0)]["indeg"].mean()) print (semfeat[(semfeat["percentile"] == "<=P50") & (semfeat["indeg"] > 0)]["logindeg"].mean()) print (semfeat[(semfeat["percentile"] == ">P50") & (semfeat["indeg"] > 0)]["logindeg"].mean()) print (semfeat[(semfeat["percentile"] == ">P90") & (semfeat["indeg"] > 0)]["logindeg"].mean()) ax = sns.violinplot(x="percentile", y="logindeg", data=df, order=["<=P50", ">P50", ">P90"]) semfeat[semfeat["indeg"] == 1] ```
github_jupyter
``` #hide #skip ! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab # default_exp losses # default_cls_lvl 3 #export from fastai.imports import * from fastai.torch_imports import * from fastai.torch_core import * from fastai.layers import * #hide from nbdev.showdoc import * ``` # Loss Functions > Custom fastai loss functions ``` F.binary_cross_entropy_with_logits(torch.randn(4,5), torch.randint(0, 2, (4,5)).float(), reduction='none') funcs_kwargs # export @log_args class BaseLoss(): "Same as `loss_cls`, but flattens input and target." activation=decodes=noops def __init__(self, loss_cls, *args, axis=-1, flatten=True, floatify=False, is_2d=True, **kwargs): store_attr("axis,flatten,floatify,is_2d") self.func = loss_cls(*args,**kwargs) functools.update_wrapper(self, self.func) def __repr__(self): return f"FlattenedLoss of {self.func}" @property def reduction(self): return self.func.reduction @reduction.setter def reduction(self, v): self.func.reduction = v def __call__(self, inp, targ, **kwargs): inp = inp .transpose(self.axis,-1).contiguous() targ = targ.transpose(self.axis,-1).contiguous() if self.floatify and targ.dtype!=torch.float16: targ = targ.float() if targ.dtype in [torch.int8, torch.int16, torch.int32]: targ = targ.long() if self.flatten: inp = inp.view(-1,inp.shape[-1]) if self.is_2d else inp.view(-1) return self.func.__call__(inp, targ.view(-1) if self.flatten else targ, **kwargs) ``` Wrapping a general loss function inside of `BaseLoss` provides extra functionalities to your loss functions: - flattens the tensors before trying to take the losses since it's more convenient (with a potential tranpose to put `axis` at the end) - a potential `activation` method that tells the library if there is an activation fused in the loss (useful for inference and methods such as `Learner.get_preds` or `Learner.predict`) - a potential <code>decodes</code> method that is used on predictions in inference (for instance, an argmax in classification) The `args` and `kwargs` will be passed to `loss_cls` during the initialization to instantiate a loss function. `axis` is put at the end for losses like softmax that are often performed on the last axis. If `floatify=True`, the `targs` will be converted to floats (useful for losses that only accept float targets like `BCEWithLogitsLoss`), and `is_2d` determines if we flatten while keeping the first dimension (batch size) or completely flatten the input. We want the first for losses like Cross Entropy, and the second for pretty much anything else. ``` # export @log_args @delegates() class CrossEntropyLossFlat(BaseLoss): "Same as `nn.CrossEntropyLoss`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, weight=None, ignore_index=-100, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(nn.CrossEntropyLoss, *args, axis=axis, **kwargs) def decodes(self, x): return x.argmax(dim=self.axis) def activation(self, x): return F.softmax(x, dim=self.axis) tst = CrossEntropyLossFlat() output = torch.randn(32, 5, 10) target = torch.randint(0, 10, (32,5)) #nn.CrossEntropy would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.CrossEntropyLoss()(output,target)) #Associated activation is softmax test_eq(tst.activation(output), F.softmax(output, dim=-1)) #This loss function has a decodes which is argmax test_eq(tst.decodes(output), output.argmax(dim=-1)) #In a segmentation task, we want to take the softmax over the channel dimension tst = CrossEntropyLossFlat(axis=1) output = torch.randn(32, 5, 128, 128) target = torch.randint(0, 5, (32, 128, 128)) _ = tst(output, target) test_eq(tst.activation(output), F.softmax(output, dim=1)) test_eq(tst.decodes(output), output.argmax(dim=1)) # export @log_args @delegates() class BCEWithLogitsLossFlat(BaseLoss): "Same as `nn.BCEWithLogitsLoss`, but flattens input and target." @use_kwargs_dict(keep=True, weight=None, reduction='mean', pos_weight=None) def __init__(self, *args, axis=-1, floatify=True, thresh=0.5, **kwargs): super().__init__(nn.BCEWithLogitsLoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) self.thresh = thresh def decodes(self, x): return x>self.thresh def activation(self, x): return torch.sigmoid(x) tst = BCEWithLogitsLossFlat() output = torch.randn(32, 5, 10) target = torch.randn(32, 5, 10) #nn.BCEWithLogitsLoss would fail with those two tensors, but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) output = torch.randn(32, 5) target = torch.randint(0,2,(32, 5)) #nn.BCEWithLogitsLoss would fail with int targets but not our flattened version. _ = tst(output, target) test_fail(lambda x: nn.BCEWithLogitsLoss()(output,target)) #Associated activation is sigmoid test_eq(tst.activation(output), torch.sigmoid(output)) # export @log_args(to_return=True) @use_kwargs_dict(weight=None, reduction='mean') def BCELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.BCELoss`, but flattens input and target." return BaseLoss(nn.BCELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = BCELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.BCELoss()(output,target)) # export @log_args(to_return=True) @use_kwargs_dict(reduction='mean') def MSELossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.MSELoss`, but flattens input and target." return BaseLoss(nn.MSELoss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) tst = MSELossFlat() output = torch.sigmoid(torch.randn(32, 5, 10)) target = torch.randint(0,2,(32, 5, 10)) _ = tst(output, target) test_fail(lambda x: nn.MSELoss()(output,target)) #hide #cuda #Test losses work in half precision output = torch.sigmoid(torch.randn(32, 5, 10)).half().cuda() target = torch.randint(0,2,(32, 5, 10)).half().cuda() for tst in [BCELossFlat(), MSELossFlat()]: _ = tst(output, target) # export @log_args(to_return=True) @use_kwargs_dict(reduction='mean') def L1LossFlat(*args, axis=-1, floatify=True, **kwargs): "Same as `nn.L1Loss`, but flattens input and target." return BaseLoss(nn.L1Loss, *args, axis=axis, floatify=floatify, is_2d=False, **kwargs) #export @log_args class LabelSmoothingCrossEntropy(Module): y_int = True def __init__(self, eps:float=0.1, reduction='mean'): self.eps,self.reduction = eps,reduction def forward(self, output, target): c = output.size()[-1] log_preds = F.log_softmax(output, dim=-1) if self.reduction=='sum': loss = -log_preds.sum() else: loss = -log_preds.sum(dim=-1) #We divide by that size at the return line so sum and not mean if self.reduction=='mean': loss = loss.mean() return loss*self.eps/c + (1-self.eps) * F.nll_loss(log_preds, target.long(), reduction=self.reduction) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1) ``` On top of the formula we define: - a `reduction` attribute, that will be used when we call `Learner.get_preds` - an `activation` function that represents the activation fused in the loss (since we use cross entropy behind the scenes). It will be applied to the output of the model when calling `Learner.get_preds` or `Learner.predict` - a <code>decodes</code> function that converts the output of the model to a format similar to the target (here indices). This is used in `Learner.predict` and `Learner.show_results` to decode the predictions ``` #export @log_args @delegates() class LabelSmoothingCrossEntropyFlat(BaseLoss): "Same as `LabelSmoothingCrossEntropy`, but flattens input and target." y_int = True @use_kwargs_dict(keep=True, eps=0.1, reduction='mean') def __init__(self, *args, axis=-1, **kwargs): super().__init__(LabelSmoothingCrossEntropy, *args, axis=axis, **kwargs) def activation(self, out): return F.softmax(out, dim=-1) def decodes(self, out): return out.argmax(dim=-1) ``` ## Export - ``` #hide from nbdev.export import * notebook2script() ```
github_jupyter
``` # HIDDEN from datascience import * %matplotlib inline import matplotlib.pyplot as plots plots.style.use('fivethirtyeight') import math import numpy as np from scipy import stats import ipywidgets as widgets import nbinteract as nbi ``` ### The Central Limit Theorem ### Very few of the data histograms that we have seen in this course have been bell shaped. When we have come across a bell shaped distribution, it has almost invariably been an empirical histogram of a statistic based on a random sample. **The Central Limit Theorem says that the probability distribution of the sum or average of a large random sample drawn with replacement will be roughly normal, *regardless of the distribution of the population from which the sample is drawn*.** As we noted when we were studying Chebychev's bounds, results that can be applied to random samples *regardless of the distribution of the population* are very powerful, because in data science we rarely know the distribution of the population. The Central Limit Theorem makes it possible to make inferences with very little knowledge about the population, provided we have a large random sample. That is why it is central to the field of statistical inference. ### Proportion of Purple Flowers ### Recall Mendel's probability model for the colors of the flowers of a species of pea plant. The model says that the flower colors of the plants are like draws made at random with replacement from {Purple, Purple, Purple, White}. In a large sample of plants, about what proportion will have purple flowers? We would expect the answer to be about 0.75, the proportion purple in the model. And, because proportions are means, the Central Limit Theorem says that the distribution of the sample proportion of purple plants is roughly normal. We can confirm this by simulation. Let's simulate the proportion of purple-flowered plants in a sample of 200 plants. ``` colors = make_array('Purple', 'Purple', 'Purple', 'White') model = Table().with_column('Color', colors) model props = make_array() num_plants = 200 repetitions = 1000 for i in np.arange(repetitions): sample = model.sample(num_plants) new_prop = np.count_nonzero(sample.column('Color') == 'Purple')/num_plants props = np.append(props, new_prop) props[:5] opts = { 'title': 'Distribution of sample proportions', 'xlabel': 'Sample Proportion', 'ylabel': 'Percent per unit', 'xlim': (0.64, 0.84), 'ylim': (0, 25), 'bins': 20, } nbi.hist(props, options=opts) ``` There's that normal curve again, as predicted by the Central Limit Theorem, centered at around 0.75 just as you would expect. How would this distribution change if we increased the sample size? We can copy our sampling code into a function and then use interaction to see how the distribution changes as the sample size increases. We will keep the number of `repetitions` the same as before so that the two columns have the same length. ``` def empirical_props(num_plants): props = make_array() for i in np.arange(repetitions): sample = model.sample(num_plants) new_prop = np.count_nonzero(sample.column('Color') == 'Purple')/num_plants props = np.append(props, new_prop) return props nbi.hist(empirical_props, options=opts, num_plants=widgets.ToggleButtons(options=[100, 200, 400, 800])) ``` All of the above distributions are approximately normal but become more narrow as the sample size increases. For example, the proportions based on a sample size of 800 are more tightly clustered around 0.75 than those from a sample size of 200. Increasing the sample size has decreased the variability in the sample proportion.
github_jupyter
# Spark on Kubernetes Preparing the notebook https://towardsdatascience.com/make-kubeflow-into-your-own-data-science-workspace-cc8162969e29 ## Setup service account permissions https://github.com/kubeflow/kubeflow/issues/4306 issue with launching spark-operator from jupyter notebook Run command in your shell (not in notebook) ```shell export NAMESPACE=<your_namespace> kubectl create serviceaccount spark -n ${NAMESPACE} kubectl create clusterrolebinding spark-role --clusterrole=edit --serviceaccount=${NAMESPACE}:spark --namespace=${NAMESPACE} ``` ## Python version > Note: Make sure your driver python and executor python version matches. > Otherwise, you will see error msg like below Exception: Python in worker has different version 3.7 than that in driver 3.6, PySpark cannot run with different minor versions.Please check environment variables `PYSPARK_PYTHON` and `PYSPARK_DRIVER_PYTHON` are correctly set. ``` import sys print(sys.version) ``` ## Client Mode ``` import findspark, pyspark,socket from pyspark import SparkContext, SparkConf from pyspark.sql import SparkSession findspark.init() localIpAddress = socket.gethostbyname(socket.gethostname()) conf = SparkConf().setAppName('sparktest1') conf.setMaster('k8s://https://kubernetes.default.svc:443') conf.set("spark.submit.deployMode", "client") conf.set("spark.executor.instances", "2") conf.set("spark.driver.host", localIpAddress) conf.set("spark.driver.port", "7778") conf.set("spark.kubernetes.namespace", "yahavb") conf.set("spark.kubernetes.container.image", "seedjeffwan/spark-py:v2.4.6") conf.set("spark.kubernetes.pyspark.pythonVersion", "3") conf.set("spark.kubernetes.authenticate.driver.serviceAccountName", "spark") conf.set("spark.kubernetes.executor.annotation.sidecar.istio.io/inject", "false") sc = pyspark.context.SparkContext.getOrCreate(conf=conf) # following works as well # spark = SparkSession.builder.config(conf=conf).getOrCreate() num_samples = 100000 def inside(p): x, y = random.random(), random.random() return x*x + y*y < 1 count = sc.parallelize(range(0, num_samples)).filter(inside).count() sc.stop() ``` ## Cluster Mode ## Java ``` %%bash /opt/spark-2.4.6/bin/spark-submit --master "k8s://https://kubernetes.default.svc:443" \ --deploy-mode cluster \ --name spark-java-pi \ --class org.apache.spark.examples.SparkPi \ --conf spark.executor.instances=30 \ --conf spark.kubernetes.namespace=yahavb \ --conf spark.kubernetes.driver.annotation.sidecar.istio.io/inject=false \ --conf spark.kubernetes.executor.annotation.sidecar.istio.io/inject=false \ --conf spark.kubernetes.container.image=seedjeffwan/spark:v2.4.6 \ --conf spark.kubernetes.driver.pod.name=spark-java-pi-driver \ --conf spark.kubernetes.executor.request.cores=4 \ --conf spark.kubernetes.node.selector.computetype=gpu \ --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark \ local:///opt/spark/examples/jars/spark-examples_2.11-2.4.6.jar 262144 %%bash kubectl -n yahavb delete po ` kubectl -n yahavb get po | grep spark-java-pi-driver | awk '{print $1}'` ``` ## Python ``` %%bash /opt/spark-2.4.6/bin/spark-submit --master "k8s://https://kubernetes.default.svc:443" \ --deploy-mode cluster \ --name spark-python-pi \ --conf spark.executor.instances=50 \ --conf spark.kubernetes.container.image=seedjeffwan/spark-py:v2.4.6 \ --conf spark.kubernetes.driver.pod.name=spark-python-pi-driver \ --conf spark.kubernetes.namespace=yahavb \ --conf spark.kubernetes.driver.annotation.sidecar.istio.io/inject=false \ --conf spark.kubernetes.executor.annotation.sidecar.istio.io/inject=false \ --conf spark.kubernetes.pyspark.pythonVersion=3 \ --conf spark.kubernetes.executor.request.cores=4 \ --conf spark.kubernetes.authenticate.driver.serviceAccountName=spark /opt/spark/examples/src/main/python/pi.py 64000 %%bash kubectl -n yahavb delete po `kubectl -n yahavb get po | grep spark-python-pi-driver | awk '{print $1}'` ```
github_jupyter
``` from Maze import Maze from sarsa_agent import SarsaAgent import numpy as np import matplotlib.pyplot as plt from matplotlib import animation from IPython.display import HTML ``` ## Designing the maze ``` arr=np.array([[0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0], [0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [0,1,0,0,1,0,0,1,1,1,1,1,0,1,1,0,1,1,1,0], [0,1,0,0,1,0,0,0,0,0,1,0,0,1,0,0,1,0,0,0], [0,0,0,0,1,0,0,1,1,1,0,0,1,1,0,0,1,0,0,0], [0,0,0,0,0,0,0,1,0,0,0,1,0,1,0,1,1,0,1,1], [1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,1,0,0,0], [0,0,1,0,1,0,1,0,0,1,0,0,0,0,0,0,1,0,1,0], [0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,1,1,0,1,0], [0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,1,0,0,0,0], [1,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,0,1,0,0], [1,0,1,1,1,0,1,0,0,1,0,0,1,1,0,0,0,1,0,0], [1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0], [0,0,0,0,1,0,1,0,0,1,1,0,1,0,0,0,1,1,1,0], [0,0,1,1,1,0,1,0,0,1,0,1,0,0,1,1,0,0,0,0], [0,1,1,0,0,0,0,1,0,1,0,0,1,1,0,1,0,1,1,1], [0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0], [0,0,1,1,1,0,1,1,0,0,1,0,1,0,0,1,1,0,0,0], [1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,1,1,1,0,0], [1,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,1,0,0] ],dtype=float) #Position of the rat rat=(0,0) #If Cheese is None, cheese is placed in the bottom-right cell of the maze cheese=None #The maze object takes the maze maze=Maze(arr,rat,cheese) maze.show_maze() ``` ## Defining a Agent [Sarsa Agent because it uses Sarsa to solve the maze] ``` agent=SarsaAgent(maze) ``` ## Making the agent play episodes and learn ``` agent.learn(episodes=1000) ``` ## Plotting the maze ``` nrow=maze.nrow ncol=maze.ncol fig=plt.figure() ax=fig.gca() ax.set_xticks(np.arange(0.5,ncol,1)) ax.set_yticks(np.arange(0.5,nrow,1)) ax.set_xticklabels([]) ax.set_yticklabels([]) ax.grid('on') img=ax.imshow(maze.maze,cmap="gray",) a=5 ``` ## Making Animation of the maze solution ``` def gen_func(): maze=Maze(arr,rat,cheese) done=False while not done: row,col,_=maze.state cell=(row,col) action=agent.get_policy(cell) maze.step(action) done=maze.get_status() yield maze.get_canvas() def update_plot(canvas): img.set_data(canvas) anim=animation.FuncAnimation(fig,update_plot,gen_func) HTML(anim.to_html5_video()) anim.save("big_maze.gif",animation.PillowWriter()) ```
github_jupyter
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D2_ModelingPractice/student/W1D2_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Neuromatch Academy: Week1, Day 2, Tutorial 2 #Tutorial objectives We are investigating a simple phenomena, working through the 10 steps of modeling ([Blohm et al., 2019](https://doi.org/10.1523/ENEURO.0352-19.2019)) in two notebooks: **Framing the question** 1. finding a phenomenon and a question to ask about it 2. understanding the state of the art 3. determining the basic ingredients 4. formulating specific, mathematically defined hypotheses **Implementing the model** 5. selecting the toolkit 6. planning the model 7. implementing the model **Model testing** 8. completing the model 9. testing and evaluating the model **Publishing** 10. publishing models We did steps 1-5 in Tutorial 1 and will cover steps 6-10 in Tutorial 2 (this notebook). # Utilities Setup and Convenience Functions Please run the following **3** chunks to have functions and data available. ``` #@title Utilities and setup # set up the environment for this tutorial import time # import time import numpy as np # import numpy import scipy as sp # import scipy from scipy.stats import gamma # import gamma distribution import math # import basic math functions import random # import basic random number generator functions import matplotlib.pyplot as plt # import matplotlib from IPython import display fig_w, fig_h = (12, 8) plt.rcParams.update({'figure.figsize': (fig_w, fig_h)}) plt.style.use('ggplot') %matplotlib inline #%config InlineBackend.figure_format = 'retina' from scipy.signal import medfilt # make #@title Convenience functions: Plotting and Filtering # define some convenience functions to be used later def my_moving_window(x, window=3, FUN=np.mean): ''' Calculates a moving estimate for a signal Args: x (numpy.ndarray): a vector array of size N window (int): size of the window, must be a positive integer FUN (function): the function to apply to the samples in the window Returns: (numpy.ndarray): a vector array of size N, containing the moving average of x, calculated with a window of size window There are smarter and faster solutions (e.g. using convolution) but this function shows what the output really means. This function skips NaNs, and should not be susceptible to edge effects: it will simply use all the available samples, which means that close to the edges of the signal or close to NaNs, the output will just be based on fewer samples. By default, this function will apply a mean to the samples in the window, but this can be changed to be a max/min/median or other function that returns a single numeric value based on a sequence of values. ''' # if data is a matrix, apply filter to each row: if len(x.shape) == 2: output = np.zeros(x.shape) for rown in range(x.shape[0]): output[rown,:] = my_moving_window(x[rown,:],window=window,FUN=FUN) return output # make output array of the same size as x: output = np.zeros(x.size) # loop through the signal in x for samp_i in range(x.size): values = [] # loop through the window: for wind_i in range(int(-window), 1): if ((samp_i+wind_i) < 0) or (samp_i+wind_i) > (x.size - 1): # out of range continue # sample is in range and not nan, use it: if not(np.isnan(x[samp_i+wind_i])): values += [x[samp_i+wind_i]] # calculate the mean in the window for this point in the output: output[samp_i] = FUN(values) return output def my_plot_percepts(datasets=None, plotconditions=False): if isinstance(datasets,dict): # try to plot the datasets # they should be named... # 'expectations', 'judgments', 'predictions' fig = plt.figure(figsize=(8, 8)) # set aspect ratio = 1? not really plt.ylabel('perceived self motion [m/s]') plt.xlabel('perceived world motion [m/s]') plt.title('perceived velocities') # loop through the entries in datasets # plot them in the appropriate way for k in datasets.keys(): if k == 'expectations': expect = datasets[k] plt.scatter(expect['world'],expect['self'],marker='*',color='xkcd:green',label='my expectations') elif k == 'judgments': judgments = datasets[k] for condition in np.unique(judgments[:,0]): c_idx = np.where(judgments[:,0] == condition)[0] cond_self_motion = judgments[c_idx[0],1] cond_world_motion = judgments[c_idx[0],2] if cond_world_motion == -1 and cond_self_motion == 0: c_label = 'world-motion condition judgments' elif cond_world_motion == 0 and cond_self_motion == 1: c_label = 'self-motion condition judgments' else: c_label = 'condition [%d] judgments'%condition plt.scatter(judgments[c_idx,3],judgments[c_idx,4], label=c_label, alpha=0.2) elif k == 'predictions': predictions = datasets[k] for condition in np.unique(predictions[:,0]): c_idx = np.where(predictions[:,0] == condition)[0] cond_self_motion = predictions[c_idx[0],1] cond_world_motion = predictions[c_idx[0],2] if cond_world_motion == -1 and cond_self_motion == 0: c_label = 'predicted world-motion condition' elif cond_world_motion == 0 and cond_self_motion == 1: c_label = 'predicted self-motion condition' else: c_label = 'condition [%d] prediction'%condition plt.scatter(predictions[c_idx,4],predictions[c_idx,3], marker='x', label=c_label) else: print("datasets keys should be 'hypothesis', 'judgments' and 'predictions'") if plotconditions: # this code is simplified but only works for the dataset we have: plt.scatter([1],[0],marker='<',facecolor='none',edgecolor='xkcd:black',linewidths=2,label='world-motion stimulus',s=80) plt.scatter([0],[1],marker='>',facecolor='none',edgecolor='xkcd:black',linewidths=2,label='self-motion stimulus',s=80) plt.legend(facecolor='xkcd:white') plt.show() else: if datasets is not None: print('datasets argument should be a dict') raise TypeError def my_plot_motion_signals(): dt = 1/10 a = gamma.pdf( np.arange(0,10,dt), 2.5, 0 ) t = np.arange(0,10,dt) v = np.cumsum(a*dt) fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharex='col', sharey='row', figsize=(14,6)) fig.suptitle('Sensory ground truth') ax1.set_title('world-motion condition') ax1.plot(t,-v,label='visual [$m/s$]') ax1.plot(t,np.zeros(a.size),label='vestibular [$m/s^2$]') ax1.set_xlabel('time [s]') ax1.set_ylabel('motion') ax1.legend(facecolor='xkcd:white') ax2.set_title('self-motion condition') ax2.plot(t,-v,label='visual [$m/s$]') ax2.plot(t,a,label='vestibular [$m/s^2$]') ax2.set_xlabel('time [s]') ax2.set_ylabel('motion') ax2.legend(facecolor='xkcd:white') plt.show() def my_plot_sensorysignals(judgments, opticflow, vestibular, returnaxes=False, addaverages=False): wm_idx = np.where(judgments[:,0] == 0) sm_idx = np.where(judgments[:,0] == 1) opticflow = opticflow.transpose() wm_opticflow = np.squeeze(opticflow[:,wm_idx]) sm_opticflow = np.squeeze(opticflow[:,sm_idx]) vestibular = vestibular.transpose() wm_vestibular = np.squeeze(vestibular[:,wm_idx]) sm_vestibular = np.squeeze(vestibular[:,sm_idx]) X = np.arange(0,10,.1) fig, my_axes = plt.subplots(nrows=2, ncols=2, sharex='col', sharey='row', figsize=(15,10)) fig.suptitle('Sensory signals') my_axes[0][0].plot(X,wm_opticflow, color='xkcd:light red', alpha=0.1) my_axes[0][0].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[0][0].plot(X,np.average(wm_opticflow, axis=1), color='xkcd:red', alpha=1) my_axes[0][0].set_title('world-motion optic flow') my_axes[0][0].set_ylabel('[motion]') my_axes[0][1].plot(X,sm_opticflow, color='xkcd:azure', alpha=0.1) my_axes[0][1].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[0][1].plot(X,np.average(sm_opticflow, axis=1), color='xkcd:blue', alpha=1) my_axes[0][1].set_title('self-motion optic flow') my_axes[1][0].plot(X,wm_vestibular, color='xkcd:light red', alpha=0.1) my_axes[1][0].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[1][0].plot(X,np.average(wm_vestibular, axis=1), color='xkcd:red', alpha=1) my_axes[1][0].set_title('world-motion vestibular signal') my_axes[1][0].set_xlabel('time [s]') my_axes[1][0].set_ylabel('[motion]') my_axes[1][1].plot(X,sm_vestibular, color='xkcd:azure', alpha=0.1) my_axes[1][1].plot([0,10], [0,0], ':', color='xkcd:black') if addaverages: my_axes[1][1].plot(X,np.average(sm_vestibular, axis=1), color='xkcd:blue', alpha=1) my_axes[1][1].set_title('self-motion vestibular signal') my_axes[1][1].set_xlabel('time [s]') if returnaxes: return my_axes else: plt.show() def my_plot_thresholds(thresholds, world_prop, self_prop, prop_correct): plt.figure(figsize=(12,8)) plt.title('threshold effects') plt.plot([min(thresholds),max(thresholds)],[0,0],':',color='xkcd:black') plt.plot([min(thresholds),max(thresholds)],[0.5,0.5],':',color='xkcd:black') plt.plot([min(thresholds),max(thresholds)],[1,1],':',color='xkcd:black') plt.plot(thresholds, world_prop, label='world motion') plt.plot(thresholds, self_prop, label='self motion') plt.plot(thresholds, prop_correct, color='xkcd:purple', label='correct classification') plt.xlabel('threshold') plt.ylabel('proportion correct or classified as self motion') plt.legend(facecolor='xkcd:white') plt.show() def my_plot_predictions_data(judgments, predictions): conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) # self: conditions_self = np.abs(judgments[:,1]) veljudgmnt_self = judgments[:,3] velpredict_self = predictions[:,3] # world: conditions_world = np.abs(judgments[:,2]) veljudgmnt_world = judgments[:,4] velpredict_world = predictions[:,4] fig, [ax1, ax2] = plt.subplots(nrows=1, ncols=2, sharey='row', figsize=(12,5)) ax1.scatter(veljudgmnt_self,velpredict_self, alpha=0.2) ax1.plot([0,1],[0,1],':',color='xkcd:black') ax1.set_title('self-motion judgments') ax1.set_xlabel('observed') ax1.set_ylabel('predicted') ax2.scatter(veljudgmnt_world,velpredict_world, alpha=0.2) ax2.plot([0,1],[0,1],':',color='xkcd:black') ax2.set_title('world-motion judgments') ax2.set_xlabel('observed') ax2.set_ylabel('predicted') plt.show() #@title Data generation code (needs to go on OSF and deleted here) def my_simulate_data(repetitions=100, conditions=[(0,-1),(+1,0)] ): """ Generate simulated data for this tutorial. You do not need to run this yourself. Args: repetitions: (int) number of repetitions of each condition (default: 30) conditions: list of 2-tuples of floats, indicating the self velocity and world velocity in each condition (default: returns data that is good for exploration: [(-1,0),(0,+1)] but can be flexibly extended) The total number of trials used (ntrials) is equal to: repetitions * len(conditions) Returns: dict with three entries: 'judgments': ntrials * 5 matrix 'opticflow': ntrials * 100 matrix 'vestibular': ntrials * 100 matrix The default settings would result in data where first 30 trials reflect a situation where the world (other train) moves in one direction, supposedly at 1 m/s (perhaps to the left: -1) while the participant does not move at all (0), and 30 trials from a second condition, where the world does not move, while the participant moves with 1 m/s in the opposite direction from where the world is moving in the first condition (0,+1). The optic flow should be the same, but the vestibular input is not. """ # reproducible output np.random.seed(1937) # set up some variables: ntrials = repetitions * len(conditions) # the following arrays will contain the simulated data: judgments = np.empty(shape=(ntrials,5)) opticflow = np.empty(shape=(ntrials,100)) vestibular = np.empty(shape=(ntrials,100)) # acceleration: a = gamma.pdf(np.arange(0,10,.1), 2.5, 0 ) # divide by 10 so that velocity scales from 0 to 1 (m/s) # max acceleration ~ .308 m/s^2 # not realistic! should be about 1/10 of that # velocity: v = np.cumsum(a*.1) # position: (not necessary) #x = np.cumsum(v) ################################# # REMOVE ARBITRARY SCALING & CORRECT NOISE PARAMETERS vest_amp = 1 optf_amp = 1 # we start at the first trial: trialN = 0 # we start with only a single velocity, but it should be possible to extend this for conditionno in range(len(conditions)): condition = conditions[conditionno] for repetition in range(repetitions): # # generate optic flow signal OF = v * np.diff(condition) # optic flow: difference between self & world motion OF = (OF * optf_amp) # fairly large spike range OF = OF + (np.random.randn(len(OF)) * .1) # adding noise # generate vestibular signal VS = a * condition[0] # vestibular signal: only self motion VS = (VS * vest_amp) # less range VS = VS + (np.random.randn(len(VS)) * 1.) # acceleration is a smaller signal, what is a good noise level? # store in matrices, corrected for sign #opticflow[trialN,:] = OF * -1 if (np.sign(np.diff(condition)) < 0) else OF #vestibular[trialN,:] = VS * -1 if (np.sign(condition[1]) < 0) else VS opticflow[trialN,:], vestibular[trialN,:] = OF, VS ######################################################### # store conditions in judgments matrix: judgments[trialN,0:3] = [ conditionno, condition[0], condition[1] ] # vestibular SD: 1.0916052957046194 and 0.9112684509277528 # visual SD: 0.10228834313079663 and 0.10975472557444346 # generate judgments: if (abs(np.average(np.cumsum(medfilt(VS/vest_amp,5)*.1)[70:90])) < 1): ########################### # NO self motion detected ########################### selfmotion_weights = np.array([.01,.01]) # there should be low/no self motion worldmotion_weights = np.array([.01,.99]) # world motion is dictated by optic flow else: ######################## # self motion DETECTED ######################## #if (abs(np.average(np.cumsum(medfilt(VS/vest_amp,15)*.1)[70:90]) - np.average(medfilt(OF,15)[70:90])) < 5): if True: #################### # explain all self motion by optic flow selfmotion_weights = np.array([.01,.99]) # there should be lots of self motion, but determined by optic flow worldmotion_weights = np.array([.01,.01]) # very low world motion? else: # we use both optic flow and vestibular info to explain both selfmotion_weights = np.array([ 1, 0]) # motion, but determined by vestibular signal worldmotion_weights = np.array([ 1, 1]) # very low world motion? # integrated_signals = np.array([ np.average( np.cumsum(medfilt(VS/vest_amp,15))[90:100]*.1 ), np.average((medfilt(OF/optf_amp,15))[90:100]) ]) selfmotion = np.sum(integrated_signals * selfmotion_weights) worldmotion = np.sum(integrated_signals * worldmotion_weights) #print(worldmotion,selfmotion) judgments[trialN,3] = abs(selfmotion) judgments[trialN,4] = abs(worldmotion) # this ends the trial loop, so we increment the counter: trialN += 1 return {'judgments':judgments, 'opticflow':opticflow, 'vestibular':vestibular} simulated_data = my_simulate_data() judgments = simulated_data['judgments'] opticflow = simulated_data['opticflow'] vestibular = simulated_data['vestibular'] ``` #Micro-tutorial 6 - planning the model ``` #@title Video: Planning the model from IPython.display import YouTubeVideo video = YouTubeVideo(id='daEtkVporBE', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` ###**Goal:** Identify the key components of the model and how they work together. Our goal all along has been to model our perceptual estimates of sensory data. Now that we have some idea of what we want to do, we need to line up the components of the model: what are the input and output? Which computations are done and in what order? The figure below shows a generic model we will use to guide our code construction. ![Model as code](https://i.ibb.co/hZdHmkk/modelfigure.jpg) Our model will have: * **inputs**: the values the system has available - for this tutorial the sensory information in a trial. We want to gather these together and plan how to process them. * **parameters**: unless we are lucky, our functions will have unknown parameters - we want to identify these and plan for them. * **outputs**: these are the predictions our model will make - for this tutorial these are the perceptual judgments on each trial. Ideally these are directly comparable to our data. * **Model functions**: A set of functions that perform the hypothesized computations. >Using Python (with Numpy and Scipy) we will define a set of functions that take our data and some parameters as input, can run our model, and output a prediction for the judgment data. #Recap of what we've accomplished so far: To model perceptual estimates from our sensory data, we need to 1. _integrate_ to ensure sensory information are in appropriate units 2. _reduce noise and set timescale_ by filtering 3. _threshold_ to model detection Remember the kind of operations we identified: * integration: `np.cumsum()` * filtering: `my_moving_window()` * threshold: `if` with a comparison (`>` or `<`) and `else` We will collect all the components we've developed and design the code by: 1. **identifying the key functions** we need 2. **sketching the operations** needed in each. **_Planning our model:_** We know what we want the model to do, but we need to plan and organize the model into functions and operations. We're providing a draft of the first function. For each of the two other code chunks, write mostly comments and help text first. This should put into words what role each of the functions plays in the overall model, implementing one of the steps decided above. _______ Below is the main function with a detailed explanation of what the function is supposed to do: what input is expected, and what output will generated. The code is not complete, and only returns nans for now. However, this outlines how most model code works: it gets some measured data (the sensory signals) and a set of parameters as input, and as output returns a prediction on other measured data (the velocity judgments). The goal of this function is to define the top level of a simulation model which: * receives all input * loops through the cases * calls functions that computes predicted values for each case * outputs the predictions ### **TD 6.1**: Complete main model function The function `my_train_illusion_model()` below should call one other function: `my_perceived_motion()`. What input do you think this function should get? **Complete main model function** ``` def my_train_illusion_model(sensorydata, params): ''' Generate output predictions of perceived self-motion and perceived world-motion velocity based on input visual and vestibular signals. Args (Input variables passed into function): sensorydata: (dict) dictionary with two named entries: opticflow: (numpy.ndarray of float) NxM array with N trials on rows and M visual signal samples in columns vestibular: (numpy.ndarray of float) NxM array with N trials on rows and M vestibular signal samples in columns params: (dict) dictionary with named entries: threshold: (float) vestibular threshold for credit assignment filterwindow: (list of int) determines the strength of filtering for the visual and vestibular signals, respectively integrate (bool): whether to integrate the vestibular signals, will be set to True if absent FUN (function): function used in the filter, will be set to np.mean if absent samplingrate (float): the number of samples per second in the sensory data, will be set to 10 if absent Returns: dict with two entries: selfmotion: (numpy.ndarray) vector array of length N, with predictions of perceived self motion worldmotion: (numpy.ndarray) vector array of length N, with predictions of perceived world motion ''' # sanitize input a little if not('FUN' in params.keys()): params['FUN'] = np.mean if not('integrate' in params.keys()): params['integrate'] = True if not('samplingrate' in params.keys()): params['samplingrate'] = 10 # number of trials: ntrials = sensorydata['opticflow'].shape[0] # set up variables to collect output selfmotion = np.empty(ntrials) worldmotion = np.empty(ntrials) # loop through trials? for trialN in range(ntrials): #these are our sensory variables (inputs) vis = sensorydata['opticflow'][trialN,:] ves = sensorydata['vestibular'][trialN,:] ######################################################## # generate output predicted perception: ######################################################## #our inputs our vis, ves, and params selfmotion[trialN], worldmotion[trialN] = [np.nan, np.nan] ######################################################## # replace above with # selfmotion[trialN], worldmotion[trialN] = my_perceived_motion( ???, ???, params=params) # and fill in question marks ######################################################## # comment this out when you've filled raise NotImplementedError("Student excercise: generate predictions") return {'selfmotion':selfmotion, 'worldmotion':worldmotion} # uncomment the following lines to run the main model function: ## here is a mock version of my_perceived motion. ## so you can test my_train_illusion_model() #def my_perceived_motion(*args, **kwargs): #return np.random.rand(2) ##let's look at the preditions we generated for two sample trials (0,100) ##we should get a 1x2 vector of self-motion prediction and another for world-motion #sensorydata={'opticflow':opticflow[[0,100],:0], 'vestibular':vestibular[[0,100],:0]} #params={'threshold':0.33, 'filterwindow':[100,50]} #my_train_illusion_model(sensorydata=sensorydata, params=params) ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_685e0a13.py) ### **TD 6.2**: Draft perceived motion functions Now we draft a set of functions, the first of which is used in the main model function (see above) and serves to generate perceived velocities. The other two are used in the first one. Only write help text and/or comments, you don't have to write the whole function. Each time ask yourself these questions: * what sensory data is necessary? * what other input does the function need, if any? * which operations are performed on the input? * what is the output? (the number of arguments is correct) **Template perceived motion** ``` # fill in the input arguments the function should have: # write the help text for the function: def my_perceived_motion(arg1, arg2, arg3): ''' Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument argument 3: explain the format and content of the third argument Returns: what output does the function generate? Any further description? ''' # structure your code into two functions: "my_selfmotion" and "my_worldmotion" # write comments outlining the operations to be performed on the inputs by each of these functions # use the elements from micro-tutorials 3, 4, and 5 (found in W1D2 Tutorial Part 1) # # # # what kind of output should this function produce? return output ``` We've completed the `my_perceived_motion()` function for you below. Follow this example to complete the template for `my_selfmotion()` and `my_worldmotion()`. Write out the inputs and outputs, and the steps required to calculate the outputs from the inputs. **Perceived motion function** ``` #Full perceived motion function def my_perceived_motion(vis, ves, params): ''' Takes sensory data and parameters and returns predicted percepts Args: vis (numpy.ndarray): 1xM array of optic flow velocity data ves (numpy.ndarray): 1xM array of vestibular acceleration data params: (dict) dictionary with named entries: see my_train_illusion_model() for details Returns: [list of floats]: prediction for perceived self-motion based on vestibular data, and prediction for perceived world-motion based on perceived self-motion and visual data ''' # estimate self motion based on only the vestibular data # pass on the parameters selfmotion = my_selfmotion(ves=ves, params=params) # estimate the world motion, based on the selfmotion and visual data # pass on the parameters as well worldmotion = my_worldmotion(vis=vis, selfmotion=selfmotion, params=params) return [selfmotion, worldmotion] ``` **Template calculate self motion** Put notes in the function below that describe the inputs, the outputs, and steps that transform the output from the input using elements from micro-tutorials 3,4,5. ``` def my_selfmotion(arg1, arg2): ''' Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument Returns: what output does the function generate? Any further description? ''' # what operations do we perform on the input? # use the elements from micro-tutorials 3, 4, and 5 # 1. # 2. # 3. # 4. # what output should this function produce? return output ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_181325a9.py) **Template calculate world motion** Put notes in the function below that describe the inputs, the outputs, and steps that transform the output from the input using elements from micro-tutorials 3,4,5. ``` def my_worldmotion(arg1, arg2, arg3): ''' Short description of the function Args: argument 1: explain the format and content of the first argument argument 2: explain the format and content of the second argument argument 3: explain the format and content of the third argument Returns: what output does the function generate? Any further description? ''' # what operations do we perform on the input? # use the elements from micro-tutorials 3, 4, and 5 # 1. # 2. # 3. # what output should this function produce? return output ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_8f913582.py) #Micro-tutorial 7 - implement model ``` #@title Video: implement the model from IPython.display import YouTubeVideo video = YouTubeVideo(id='gtSOekY8jkw', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` **Goal:** We write the components of the model in actual code. For the operations we picked, there function ready to use: * integration: `np.cumsum(data, axis=1)` (axis=1: per trial and over samples) * filtering: `my_moving_window(data, window)` (window: int, default 3) * average: `np.mean(data)` * threshold: if (value > thr): <operation 1> else: <operation 2> ###**TD 7.1:** Write code to estimate self motion Use the operations to finish writing the function that will calculate an estimate of self motion. Fill in the descriptive list of items with actual operations. Use the function for estimating world-motion below, which we've filled for you! **Template finish self motion function** ``` def my_selfmotion(ves, params): ''' Estimates self motion for one vestibular signal Args: ves (numpy.ndarray): 1xM array with a vestibular signal params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of self motion in m/s ''' ###uncomment the code below and fill in with your code ## 1. integrate vestibular signal #ves = np.cumsum(ves*(1/params['samplingrate'])) ## 2. running window function to accumulate evidence: #selfmotion = YOUR CODE HERE ## 3. take final value of self-motion vector as our estimate #selfmotion = ## 4. compare to threshold. Hint the threshodl is stored in params['threshold'] ## if selfmotion is higher than threshold: return value ## if it's lower than threshold: return 0 #if YOURCODEHERE #selfmotion = YOURCODHERE # comment this out when you've filled raise NotImplementedError("Student excercise: estimate my_selfmotion") return output ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_3ea16348.py) ### Estimate world motion We have completed the `my_worldmotion()` function for you. **World motion function** ``` # World motion function def my_worldmotion(vis, selfmotion, params): ''' Short description of the function Args: vis (numpy.ndarray): 1xM array with the optic flow signal selfmotion (float): estimate of self motion params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of world motion in m/s ''' # running average to smooth/accumulate sensory evidence visualmotion = my_moving_window(vis, window=params['filterwindows'][1], FUN=np.mean) # take final value visualmotion = visualmotion[-1] # subtract selfmotion from value worldmotion = visualmotion + selfmotion # return final value return worldmotion ``` #Micro-tutorial 8 - completing the model ``` #@title Video: completing the model from IPython.display import YouTubeVideo video = YouTubeVideo(id='-NiHSv4xCDs', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` **Goal:** Make sure the model can speak to the hypothesis. Eliminate all the parameters that do not speak to the hypothesis. Now that we have a working model, we can keep improving it, but at some point we need to decide that it is finished. Once we have a model that displays the properties of a system we are interested in, it should be possible to say something about our hypothesis and question. Keeping the model simple makes it easier to understand the phenomenon and answer the research question. Here that means that our model should have illusory perception, and perhaps make similar judgments to those of the participants, but not much more. To test this, we will run the model, store the output and plot the models' perceived self motion over perceived world motion, like we did with the actual perceptual judgments (it even uses the same plotting function). ### **TD 8.1:** See if the model produces illusions ``` #@title Run to plot model predictions of motion estimates # prepare to run the model again: data = {'opticflow':opticflow, 'vestibular':vestibular} params = {'threshold':0.6, 'filterwindows':[100,50], 'FUN':np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # process the data to allow plotting... predictions = np.zeros(judgments.shape) predictions[:,0:3] = judgments[:,0:3] predictions[:,3] = modelpredictions['selfmotion'] predictions[:,4] = modelpredictions['worldmotion'] *-1 my_plot_percepts(datasets={'predictions':predictions}, plotconditions=True) ``` **Questions:** * Why is the data distributed this way? How does it compare to the plot in TD 1.2? * Did you expect to see this? * Where do the model's predicted judgments for each of the two conditions fall? * How does this compare to the behavioral data? However, the main observation should be that **there are illusions**: the blue and red data points are mixed in each of the two sets of data. Does this mean the model can help us understand the phenomenon? #Micro-tutorial 9 - testing and evaluating the model ``` #@title Video: Background from IPython.display import YouTubeVideo video = YouTubeVideo(id='5vnDOxN3M_k', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` **Goal:** Once we have finished the model, we need a description of how good it is. The question and goals we set in micro-tutorial 1 and 4 help here. There are multiple ways to evaluate a model. Aside from the obvious fact that we want to get insight into the phenomenon that is not directly accessible without the model, we always want to quantify how well the model agrees with the data. ### Quantify model quality with $R^2$ Let's look at how well our model matches the actual judgment data. ``` #@title Run to plot predictions over data my_plot_predictions_data(judgments, predictions) ``` When model predictions are correct, the red points in the figure above should lie along the identity line (a dotted black line here). Points off the identity line represent model prediction errors. While in each plot we see two clusters of dots that are fairly close to the identity line, there are also two clusters that are not. For the trials that those points represent, the model has an illusion while the participants don't or vice versa. We will use a straightforward, quantitative measure of how good the model is: $R^2$ (pronounced: "R-squared"), which can take values between 0 and 1, and expresses how much variance is explained by the relationship between two variables (here the model's predictions and the actual judgments). It is also called [coefficient of determination](https://en.wikipedia.org/wiki/Coefficient_of_determination), and is calculated here as the square of the correlation coefficient (r or $\rho$). Just run the chunk below: ``` #@title Run to calculate R^2 conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(conditions,veljudgmnt) print('conditions -> judgments R^2: %0.3f'%( r_value**2 )) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(veljudgmnt,velpredict) print('predictions -> judgments R^2: %0.3f'%( r_value**2 )) ``` These $R^2$s express how well the experimental conditions explain the participants judgments and how well the models predicted judgments explain the participants judgments. You will learn much more about model fitting, quantitative model evaluation and model comparison tomorrow! Perhaps the $R^2$ values don't seem very impressive, but the judgments produced by the participants are explained by the model's predictions better than by the actual conditions. In other words: the model tends to have the same illusions as the participants. ### **TD 9.1** Varying the threshold parameter to improve the model In the code below, see if you can find a better value for the threshold parameter, to reduce errors in the models' predictions. **Testing thresholds** ``` # Testing thresholds def test_threshold(threshold=0.33): # prepare to run model data = {'opticflow':opticflow, 'vestibular':vestibular} params = {'threshold':threshold, 'filterwindows':[100,50], 'FUN':np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # get predictions in matrix predictions = np.zeros(judgments.shape) predictions[:,0:3] = judgments[:,0:3] predictions[:,3] = modelpredictions['selfmotion'] predictions[:,4] = modelpredictions['worldmotion'] *-1 # get percepts from participants and model conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) # calculate R2 slope, intercept, r_value, p_value, std_err = sp.stats.linregress(veljudgmnt,velpredict) print('predictions -> judgments R2: %0.3f'%( r_value**2 )) test_threshold(threshold=0.5) ``` ### **TD 9.2:** Credit assigmnent of self motion When we look at the figure in **TD 8.1**, we can see a cluster does seem very close to (1,0), just like in the actual data. The cluster of points at (1,0) are from the case where we conclude there is no self motion, and then set the self motion to 0. That value of 0 removes a lot of noise from the world-motion estimates, and all noise from the self-motion estimate. In the other case, where there is self motion, we still have a lot of noise (see also micro-tutorial 4). Let's change our `my_selfmotion()` function to return a self motion of 1 when the vestibular signal indicates we are above threshold, and 0 when we are below threshold. Edit the function here. **Template function for credit assigment of self motion** ``` # Template binary self-motion estimates def my_selfmotion(ves, params): ''' Estimates self motion for one vestibular signal Args: ves (numpy.ndarray): 1xM array with a vestibular signal params (dict): dictionary with named entries: see my_train_illusion_model() for details Returns: (float): an estimate of self motion in m/s ''' # integrate signal: ves = np.cumsum(ves*(1/params['samplingrate'])) # use running window to accumulate evidence: selfmotion = my_moving_window(ves, window=params['filterwindows'][0], FUN=params['FUN']) ## take the final value as our estimate: selfmotion = selfmotion[-1] ########################################## # this last part will have to be changed # compare to threshold, set to 0 if lower and else... if selfmotion < params['threshold']: selfmotion = 0 #uncomment the lines below and fill in with your code #else: #YOUR CODE HERE # comment this out when you've filled raise NotImplementedError("Student excercise: modify with credit assignment") return selfmotion ``` [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D2_ModelingPractice/solutions/W1D2_Tutorial2_Solution_90571e21.py) The function you just wrote will be used when we run the model again below. ``` #@title Run model credit assigment of self motion # prepare to run the model again: data = {'opticflow':opticflow, 'vestibular':vestibular} params = {'threshold':0.33, 'filterwindows':[100,50], 'FUN':np.mean} modelpredictions = my_train_illusion_model(sensorydata=data, params=params) # no process the data to allow plotting... predictions = np.zeros(judgments.shape) predictions[:,0:3] = judgments[:,0:3] predictions[:,3] = modelpredictions['selfmotion'] predictions[:,4] = modelpredictions['worldmotion'] *-1 my_plot_percepts(datasets={'predictions':predictions}, plotconditions=False) ``` That looks much better, and closer to the actual data. Let's see if the $R^2$ values have improved: ``` #@title Run to calculate R^2 for model with self motion credit assignment conditions = np.concatenate((np.abs(judgments[:,1]),np.abs(judgments[:,2]))) veljudgmnt = np.concatenate((judgments[:,3],judgments[:,4])) velpredict = np.concatenate((predictions[:,3],predictions[:,4])) my_plot_predictions_data(judgments, predictions) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(conditions,veljudgmnt) print('conditions -> judgments R2: %0.3f'%( r_value**2 )) slope, intercept, r_value, p_value, std_err = sp.stats.linregress(velpredict,veljudgmnt) print('predictions -> judgments R2: %0.3f'%( r_value**2 )) ``` While the model still predicts velocity judgments better than the conditions (i.e. the model predicts illusions in somewhat similar cases), the $R^2$ values are actually worse than those of the simpler model. What's really going on is that the same set of points that were model prediction errors in the previous model are also errors here. All we have done is reduce the spread. ### Interpret the model's meaning Here's what you should have learned: 1. A noisy, vestibular, acceleration signal can give rise to illusory motion. 2. However, disambiguating the optic flow by adding the vestibular signal simply adds a lot of noise. This is not a plausible thing for the brain to do. 3. Our other hypothesis - credit assignment - is more qualitatively correct, but our simulations were not able to match the frequency of the illusion on a trial-by-trial basis. _It's always possible to refine our models to improve the fits._ There are many ways to try to do this. A few examples; we could implement a full sensory cue integration model, perhaps with Kalman filters (Week 2, Day 3), or we could add prior knowledge (at what time do the trains depart?). However, we decided that for now we have learned enough, so it's time to write it up. # Micro-tutorial 10 - publishing the model ``` #@title Video: Background from IPython.display import YouTubeVideo video = YouTubeVideo(id='kf4aauCr5vA', width=854, height=480, fs=1) print("Video available at https://youtube.com/watch?v=" + video.id) video ``` **Goal:** In order for our model to impact the field, it needs to be accepted by our peers, and order for that to happen it matters how the model is published. ### **TD 10.1:** Write a summary of the project Here we will write up our model, by answering the following questions: * **What is the phenomena**? Here summarize the part of the phenomena which your model addresses. * **What is the key scientific question?**: Clearly articulate the question which your model tries to answer. * **What was our hypothesis?**: Explain the key relationships which we relied on to simulate the phenomena. * **How did your model work?** Give an overview of the model, it's main components, and how the model works. ''Here we ... '' * **What did we find? Did the model work?** Explain the key outcomes of your model evaluation. * **What can we conclude?** Conclude as much as you can _with reference to the hypothesis_, within the limits of the model. * **What did you learn? What is left to be learned?** Briefly argue the plausibility of the approach and what you think is _essential_ that may have been left out. ### Guidance for the future There are good guidelines for structuring and writing an effective paper (e.g. [Mensh & Kording, 2017](https://doi.org/10.1371/journal.pcbi.1005619)), all of which apply to papers about models. There are some extra considerations when publishing a model. In general, you should explain each of the steps in the paper: **Introduction:** Steps 1 & 2 (maybe 3) **Methods:** Steps 3-7, 9 **Results:** Steps 8 & 9, going back to 1, 2 & 4 In addition, you should provide a visualization of the model, and upload the code implementing the model and the data it was trained and tested on to a repository (e.g. GitHub and OSF). The audience for all of this should be experimentalists, as they are the ones who can test predictions made by your your model and collect new data. This way your models can impact future experiments, and that future data can then be modeled (see modeling process schematic below). Remember your audience - it is _always_ hard to clearly convey the main points of your work to others, especially if your audience doesn't necessarily create computational models themselves. ![how-to-model process from Blohm et al 2019](https://deniseh.lab.yorku.ca/files/2020/06/HowToModel-ENEURO.0352-19.2019.full_.pdf.png) ### Suggestion For every modeling project, a very good exercise in this is to _**first**_ write a short, 100-word abstract of the project plan and expected impact, like the summary you wrote. This forces focussing on the main points: describing the relevance, question, model, answer and what it all means very succinctly. This allows you to decide to do this project or not **before you commit time writing code for no good purpose**. Notice that this is really what we've walked you through carefully in this tutorial! :) # Post-script Note that the model we built here was extremely simple and used artificial data on purpose. It allowed us to go through all the steps of building a model, and hopefully you noticed that it is not always a linear process, you will go back to different steps if you hit a roadblock somewhere. However, if you're interested in how to actually approach modeling a similar phenomenon in a probabilistic way, we encourage you to read the paper by [Dokka et. al., 2019](https://doi.org/10.1073/pnas.1820373116), where the authors model how judgments of heading direction are influenced by objects that are also moving. # Reading Blohm G, Kording KP, Schrater PR (2020). _A How-to-Model Guide for Neuroscience_ eNeuro, 7(1) ENEURO.0352-19.2019. https://doi.org/10.1523/ENEURO.0352-19.2019 Dokka K, Park H, Jansen M, DeAngelis GC, Angelaki DE (2019). _Causal inference accounts for heading perception in the presence of object motion._ PNAS, 116(18):9060-9065. https://doi.org/10.1073/pnas.1820373116 Drugowitsch J, DeAngelis GC, Klier EM, Angelaki DE, Pouget A (2014). _Optimal Multisensory Decision-Making in a Reaction-Time Task._ eLife, 3:e03005. https://doi.org/10.7554/eLife.03005 Hartmann, M, Haller K, Moser I, Hossner E-J, Mast FW (2014). _Direction detection thresholds of passive self-motion in artistic gymnasts._ Exp Brain Res, 232:1249–1258. https://doi.org/10.1007/s00221-014-3841-0 Mensh B, Kording K (2017). _Ten simple rules for structuring papers._ PLoS Comput Biol 13(9): e1005619. https://doi.org/10.1371/journal.pcbi.1005619 Seno T, Fukuda H (2012). _Stimulus Meanings Alter Illusory Self-Motion (Vection) - Experimental Examination of the Train Illusion._ Seeing Perceiving, 25(6):631-45. https://doi.org/10.1163/18784763-00002394
github_jupyter
_Lambda School Data Science_ # Make explanatory visualizations Tody we will reproduce this [example by FiveThirtyEight:](https://fivethirtyeight.com/features/al-gores-new-movie-exposes-the-big-flaw-in-online-movie-ratings/) ``` from IPython.display import display, Image url = 'https://fivethirtyeight.com/wp-content/uploads/2017/09/mehtahickey-inconvenient-0830-1.png' example = Image(url=url, width=400) display(example) ``` Using this data: https://github.com/fivethirtyeight/data/tree/master/inconvenient-sequel Objectives - add emphasis and annotations to transform visualizations from exploratory to explanatory - remove clutter from visualizations Links - [Strong Titles Are The Biggest Bang for Your Buck](http://stephanieevergreen.com/strong-titles/) - [Remove to improve (the data-ink ratio)](https://www.darkhorseanalytics.com/blog/data-looks-better-naked) - [How to Generate FiveThirtyEight Graphs in Python](https://www.dataquest.io/blog/making-538-plots/) ## Make prototypes This helps us understand the problem ``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np import pandas as pd plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) # index will start from 0 if not for this fake.plot.bar(color='C1', width=0.9); fake2 = pd.Series( [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]) fake2.value_counts().sort_index().plot.bar(color='C1', width=0.9); ``` ## Annotate with text ``` display(example) plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) # index will start from 0 if not for this fake.plot.bar(color='C1', width=0.9); # rotate x axis numbers plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) # index will start from 0 if not for this ax = fake.plot.bar(color='C1', width=0.9) ax.tick_params(labelrotation=0) #to unrotate or remove the rotation ax.set(title="'An Incovenient Sequel: Truth to Power' is divisive"); #or '\'An Incovenient Sequel: Truth to Power\' is divisive' plt.style.use('fivethirtyeight') fake = pd.Series([38, 3, 2, 1, 2, 4, 6, 5, 5, 33], index=range(1,11)) # index will start from 0 if not for this ax = fake.plot.bar(color='C1', width=0.9) ax.tick_params(labelrotation=0) ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive", fontsize=16, fontweight='bold') ax.text(x=-2,y=45, s='IMDb ratings for the film as of Aug. 29', fontsize=12) ax.set(xlabel='Rating', ylabel='Percent of total votes', yticks=range(0,50,10)); #(start pt., end pt., increment) ``` ## Reproduce with real data ``` df = pd.read_csv('https://raw.githubusercontent.com/fivethirtyeight/data/master/inconvenient-sequel/ratings.csv') df.shape df.head() width,height = df.shape width*height pd.options.display.max_columns = 500 df.head() df.sample(1).T df.timestamp.describe() # convert timestamp to date time df.timestamp = pd.to_datetime(df.timestamp) df.timestamp.describe() # Making datetime index of your df df = df.set_index('timestamp') df.head() df['2017-08-09'] # everything from this date df.category.value_counts() ``` ####only interested in IMDb users ``` df.category == 'IMDb users' # As a filter to select certain rows df[df.category == 'IMDb users'] lastday = df['2017-08-09'] lastday.head(1) lastday[lastday.category =='IMDb users'].tail() lastday[lastday.category =='IMDb users'].respondents.plot(); final = df.tail(1) #columns = ['1_pct','2_pct','3_pct','4_pct','5_pct','6_pct','7_pct','8_pct','9_pct','10_pct'] #OR columns = [str(i) + '_pct' for i in range(1,11)] final[columns] #OR #data.index.str.replace('_pct', '') data = final[columns].T data data.plot.bar() plt.style.use('fivethirtyeight') ax = data.plot.bar(color='C1', width=0.9) ax.tick_params(labelrotation=0) ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive", fontsize=16, fontweight='bold') ax.text(x=-2,y=44, s='IMDb ratings for the film as of Aug. 29', fontsize=12) ax.set(xlabel='Rating', ylabel='Percent of total votes', yticks=range(0,50,10)); #(start pt., end pt., increment) # to remove the timestamp texts in the center # to change the x axis texts plt.style.use('fivethirtyeight') ax = data.plot.bar(color='C1', width=0.9, legend=False) ax.tick_params(labelrotation=0) ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive", fontsize=16, fontweight='bold') ax.text(x=-2,y=44, s='IMDb ratings for the film as of Aug. 29', fontsize=12) ax.set(xlabel='Rating', ylabel='Percent of total votes', yticks=range(0,50,10)); data.index = range(1,11) data plt.style.use('fivethirtyeight') ax = data.plot.bar(color='C1', width=0.9, legend=False) ax.tick_params(labelrotation=0) ax.text(x=-2,y=48,s="'An Incovenient Sequel: Truth to Power' is divisive", fontsize=16, fontweight='bold') ax.text(x=-2,y=44, s='IMDb ratings for the film as of Aug. 29', fontsize=12) ax.set(xlabel='Rating', ylabel='Percent of total votes', yticks=range(0,50,10)) plt.xlabel('Rating', fontsize=14); ``` # ASSIGNMENT Replicate the lesson code. I recommend that you [do not copy-paste](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit). # STRETCH OPTIONS #### Reproduce another example from [FiveThityEight's shared data repository](https://data.fivethirtyeight.com/). For example: - [thanksgiving-2015](https://fivethirtyeight.com/features/heres-what-your-part-of-america-eats-on-thanksgiving/) (try the [`altair`](https://altair-viz.github.io/gallery/index.html#maps) library) - [candy-power-ranking](https://fivethirtyeight.com/features/the-ultimate-halloween-candy-power-ranking/) (try the [`statsmodels`](https://www.statsmodels.org/stable/index.html) library) - or another example of your choice! #### Make more charts! Choose a chart you want to make, from [FT's Visual Vocabulary poster](http://ft.com/vocabulary). Find the chart in an example gallery of a Python data visualization library: - [Seaborn](http://seaborn.pydata.org/examples/index.html) - [Altair](https://altair-viz.github.io/gallery/index.html) - [Matplotlib](https://matplotlib.org/gallery.html) - [Pandas](https://pandas.pydata.org/pandas-docs/stable/visualization.html) Reproduce the chart. [Optionally, try the "Ben Franklin Method."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) If you want, experiment and make changes. Take notes. Consider sharing your work with your cohort!
github_jupyter
... ***CURRENTLY UNDER DEVELOPMENT*** ... ## Obtain synthetic waves and water level timeseries under a climate change scenario (future AWTs occurrence probability) inputs required: * Historical DWTs (for plotting) * Historical wave families (for plotting) * Synthetic DWTs climate change * Historical intradaily hydrograph parameters * TCs waves * Fitted multivariate extreme model for the waves associated to each DWT in this notebook: * Generate synthetic time series of wave conditions ``` #!/usr/bin/env python # -*- coding: utf-8 -*- # common import os import os.path as op # pip import numpy as np import xarray as xr import pandas as pd from datetime import datetime import matplotlib.pyplot as plt # DEV: override installed teslakit import sys sys.path.insert(0, op.join(os.path.abspath(''), '..', '..','..', '..')) # teslakit from teslakit.database import Database from teslakit.climate_emulator import Climate_Emulator from teslakit.waves import AWL, Aggregate_WavesFamilies from teslakit.plotting.outputs import Plot_FitSim_Histograms from teslakit.plotting.extremes import Plot_FitSim_AnnualMax, Plot_FitSim_GevFit, Plot_Fit_QQ from teslakit.plotting.waves import Plot_Waves_Histogram_FitSim ``` ## Database and Site parameters ``` # -------------------------------------- # Teslakit database p_data = r'/Users/anacrueda/Documents/Proyectos/TESLA/data' # offshore db = Database(p_data) db.SetSite('ROI') # climate change - S5 db_S5 = Database(p_data) db_S5.SetSite('ROI_CC_S5') # climate emulator simulation modified path p_S5_CE_sims = op.join(db_S5.paths.site.EXTREMES.climate_emulator, 'Simulations') # -------------------------------------- # Load data for climate emulator simulation climate change: ESTELA DWT and TCs (MU, TAU) DWTs_sim = db_S5.Load_ESTELA_DWT_sim() # DWTs climate change TCs_params = db.Load_TCs_r2_sim_params() # TCs parameters (copula generated) TCs_RBFs = db.Load_TCs_sim_r2_rbf_output() # TCs numerical_IH-RBFs_interpolation output probs_TCs = db.Load_TCs_probs_synth() # TCs synthetic probabilities pchange_TCs = probs_TCs['category_change_cumsum'].values[:] l_mutau_wt = db.Load_MU_TAU_hydrograms() # MU - TAU intradaily hidrographs for each DWT MU_WT = np.array([x.MU.values[:] for x in l_mutau_wt]) # MU and TAU numpy arrays TAU_WT = np.array([x.TAU.values[:] for x in l_mutau_wt]) # solve first 10 DWTs simulations DWTs_sim = DWTs_sim.isel(n_sim=slice(0, 10)) #DWTs_sim = DWTs_sim.isel(time=slice(0,365*40+10), n_sim=slice(0,1)) print(DWTs_sim) ``` ## Climate Emulator - Simulation ``` # -------------------------------------- # Climate Emulator extremes model fitting # Load Climate Emulator CE = Climate_Emulator(db.paths.site.EXTREMES.climate_emulator) CE.Load() # set a new path for S5 simulations CE.Set_Simulation_Folder(p_S5_CE_sims, copy_WAVES_noTCs = False) # climate change waves (no TCs) not simulated, DWTs have changed # optional: list variables to override distribution to empirical #CE.sim_icdf_empirical_override = ['sea_Hs_31', # 'swell_1_Hs_1','swell_1_Tp_1', # 'swell_1_Hs_2','swell_1_Tp_2',] # set simulated waves min-max filter CE.sim_waves_filter.update({ 'hs': (0, 8), 'tp': (2, 25), 'ws': (0, 0.06), }) # -------------------------------------- #  Climate Emulator simulation # each DWT series will generate a different set of waves for n in DWTs_sim.n_sim: print('- Sim: {0} -'.format(int(n)+1)) # Select DWTs simulation DWTs = DWTs_sim.sel(n_sim=n) # Simulate waves n_ce = 1 # (one CE sim. for each DWT sim.) WVS_sim = CE.Simulate_Waves(DWTs, n_ce, filters={'hs':True, 'tp':True, 'ws':True}) # Simulate TCs and update simulated waves TCs_sim, WVS_upd = CE.Simulate_TCs(DWTs, WVS_sim, TCs_params, TCs_RBFs, pchange_TCs, MU_WT, TAU_WT) # store simulation data CE.SaveSim(WVS_sim, TCs_sim, WVS_upd, int(n)) ```
github_jupyter
# Cyclical Systems: An Example of the Crank-Nicolson Method ## CH EN 2450 - Numerical Methods **Prof. Tony Saad (<a>www.tsaad.net</a>) <br/>Department of Chemical Engineering <br/>University of Utah** <hr/> ``` import numpy as np from numpy import * # %matplotlib notebook # %matplotlib nbagg %matplotlib inline %config InlineBackend.figure_format = 'svg' # %matplotlib qt import matplotlib.pyplot as plt from scipy.optimize import fsolve from scipy.integrate import odeint def forward_euler(rhs, f0, tend, dt): ''' Computes the forward_euler method ''' nsteps = int(tend/dt) f = np.zeros(nsteps) f[0] = f0 time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): f[n+1] = f[n] + dt * rhs(f[n], time[n]) return time, f def forward_euler_system(rhsvec, f0vec, tend, dt): ''' Solves a system of ODEs using the Forward Euler method ''' nsteps = int(tend/dt) neqs = len(f0vec) f = np.zeros( (neqs, nsteps) ) f[:,0] = f0vec time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): t = time[n] f[:,n+1] = f[:,n] + dt * rhsvec(f[:,n], t) return time, f def be_residual(fnp1, rhs, fn, dt, tnp1): ''' Nonlinear residual function for the backward Euler implicit time integrator ''' return fnp1 - fn - dt * rhs(fnp1, tnp1) def backward_euler(rhs, f0, tend, dt): ''' Computes the backward euler method :param rhs: an rhs function ''' nsteps = int(tend/dt) f = np.zeros(nsteps) f[0] = f0 time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): fn = f[n] tnp1 = time[n+1] fnew = fsolve(be_residual, fn, (rhs, fn, dt, tnp1)) f[n+1] = fnew return time, f def cn_residual(fnp1, rhs, fn, dt, tnp1, tn): ''' Nonlinear residual function for the Crank-Nicolson implicit time integrator ''' return fnp1 - fn - 0.5 * dt * ( rhs(fnp1, tnp1) + rhs(fn, tn) ) def crank_nicolson(rhs,f0,tend,dt): nsteps = int(tend/dt) f = np.zeros(nsteps) f[0] = f0 time = np.linspace(0,tend,nsteps) for n in np.arange(nsteps-1): fn = f[n] tnp1 = time[n+1] tn = time[n] fnew = fsolve(cn_residual, fn, (rhs, fn, dt, tnp1, tn)) f[n+1] = fnew return time, f ``` # Sharp Transient Solve the ODE: \begin{equation} \frac{\text{d}y}{\text{d}t} = -1000 y + 3000 - 2000 e^{-t};\quad y(0) = 0 \end{equation} The analytical solution is \begin{equation} y(t) = 3 - 0.998 e^{-1000t} - 2.002 e^{-t} \end{equation} We first plot the analytical solution ``` y = lambda t : 3 - 0.998*exp(-1000*t) - 2.002*exp(-t) t = np.linspace(0,1,500) plt.plot(t,y(t)) plt.grid() ``` Now let's solve this numerically. We first define the RHS for this function ``` def rhs_sharp_transient(f,t): return 3000 - 1000 * f - 2000* np.exp(-t) ``` Let's solve this using forward euler and backward euler ``` y0 = 0 tend = 0.03 dt = 0.001 t,yfe = forward_euler(rhs_sharp_transient,y0,tend,dt) t,ybe = backward_euler(rhs_sharp_transient,y0,tend,dt) t,ycn = crank_nicolson(rhs_sharp_transient,y0,tend,dt) plt.plot(t,y(t),label='Exact') # plt.plot(t,yfe,'r.-',markevery=1,markersize=10,label='Forward Euler') plt.plot(t,ybe,'k*-',markevery=2,markersize=10,label='Backward Euler') plt.plot(t,ycn,'o-',markevery=2,markersize=2,label='Crank Nicholson') plt.grid() plt.legend() ``` # Oscillatory Systems Solve the ODE: Solve the ODE: \begin{equation} \frac{\text{d}y}{\text{d}t} = r \omega \sin(\omega t) \end{equation} The analytical solution is \begin{equation} y(t) = r - r \cos(\omega t) \end{equation} First plot the analytical solution ``` r = 0.5 ω = 0.02 y = lambda t : r - r * cos(ω*t) t = np.linspace(0,100*pi) plt.clf() plt.plot(t,y(t)) plt.grid() ``` Let's solve this numerically ``` def rhs_oscillatory(f,t): r = 0.5 ω = 0.02 return r * ω * sin(ω*t) y0 = 0 tend = 100*pi dt = 10 t,yfe = forward_euler(rhs_oscillatory,y0,tend,dt) t,ybe = backward_euler(rhs_oscillatory,y0,tend,dt) t,ycn = crank_nicolson(rhs_oscillatory,y0,tend,dt) plt.plot(t,y(t),label='Exact') plt.plot(t,yfe,'r.-',markevery=1,markersize=10,label='Forward Euler') plt.plot(t,ybe,'k*-',markevery=2,markersize=10,label='Backward Euler') plt.plot(t,ycn,'o-',markevery=2,markersize=2,label='Crank Nicholson') plt.grid() plt.legend() plt.savefig('cyclical-system-example.pdf') import urllib import requests from IPython.core.display import HTML def css_styling(): styles = requests.get("https://raw.githubusercontent.com/saadtony/NumericalMethods/master/styles/custom.css") return HTML(styles.text) css_styling() ```
github_jupyter
<table class="ee-notebook-buttons" align="left"> <td><a target="_parent" href="https://github.com/giswqs/geemap/tree/master/tutorials/Image/06_convolutions.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td> <td><a target="_parent" href="https://nbviewer.jupyter.org/github/giswqs/geemap/blob/master/tutorials/Image/06_convolutions.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td> <td><a target="_parent" href="https://colab.research.google.com/github/giswqs/geemap/blob/master/tutorials/Image/06_convolutions.ipynb"><img width=26px src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td> </table> # Convolutions To perform linear convolutions on images, use `image.convolve()`. The only argument to convolve is an `ee.Kernel` which is specified by a shape and the weights in the kernel. Each pixel of the image output by `convolve()` is the linear combination of the kernel values and the input image pixels covered by the kernel. The kernels are applied to each band individually. For example, you might want to use a low-pass (smoothing) kernel to remove high-frequency information. The following illustrates a 15x15 low-pass kernel applied to a Landsat 8 image: ## Install Earth Engine API and geemap Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`. The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet. **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.foliumap`](https://github.com/giswqs/geemap/blob/master/geemap/foliumap.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving). ``` # Installs geemap package import subprocess try: import geemap except ImportError: print('geemap package not installed. Installing ...') subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap']) # Checks whether this notebook is running on Google Colab try: import google.colab import geemap.foliumap as emap except: import geemap as emap # Authenticates and initializes Earth Engine import ee try: ee.Initialize() except Exception as e: ee.Authenticate() ee.Initialize() ``` ## Create an interactive map The default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ``` Map = emap.Map(center=[40, -100], zoom=4) Map.add_basemap('ROADMAP') # Add Google Map Map ``` ## Add Earth Engine Python script ``` # Load and display an image. image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318') Map.setCenter(-121.9785, 37.8694, 11) Map.addLayer(image, {'bands': ['B5', 'B4', 'B3'], 'max': 0.5}, 'input image') # Define a boxcar or low-pass kernel. # boxcar = ee.Kernel.square({ # 'radius': 7, 'units': 'pixels', 'normalize': True # }) boxcar = ee.Kernel.square(7, 'pixels', True) # Smooth the image by convolving with the boxcar kernel. smooth = image.convolve(boxcar) Map.addLayer(smooth, {'bands': ['B5', 'B4', 'B3'], 'max': 0.5}, 'smoothed') Map.addLayerControl() Map ``` The output of convolution with the low-pass filter should look something like Figure 1. Observe that the arguments to the kernel determine its size and coefficients. Specifically, with the `units` parameter set to pixels, the `radius` parameter specifies the number of pixels from the center that the kernel will cover. If `normalize` is set to true, the kernel coefficients will sum to one. If the `magnitude` parameter is set, the kernel coefficients will be multiplied by the magnitude (if `normalize` is also true, the coefficients will sum to `magnitude`). If there is a negative value in any of the kernel coefficients, setting `normalize` to true will make the coefficients sum to zero. Use other kernels to achieve the desired image processing effect. This example uses a Laplacian kernel for isotropic edge detection: ``` Map = emap.Map(center=[40, -100], zoom=4) # Define a Laplacian, or edge-detection kernel. laplacian = ee.Kernel.laplacian8(1, False) # Apply the edge-detection kernel. edgy = image.convolve(laplacian) Map.addLayer(edgy, {'bands': ['B5', 'B4', 'B3'], 'max': 0.5}, 'edges') Map.setCenter(-121.9785, 37.8694, 11) Map.addLayerControl() Map ``` Note the format specifier in the visualization parameters. Earth Engine sends display tiles to the Code Editor in JPEG format for efficiency, however edge tiles are sent in PNG format to handle transparency of pixels outside the image boundary. When a visual discontinuity results, setting the format to PNG results in a consistent display. The result of convolving with the Laplacian edge detection kernel should look something like Figure 2. There are also anisotropic edge detection kernels (e.g. Sobel, Prewitt, Roberts), the direction of which can be changed with `kernel.rotate()`. Other low pass kernels include a Gaussian kernel and kernels of various shape with uniform weights. To create kernels with arbitrarily defined weights and shape, use `ee.Kernel.fixed()`. For example, this code creates a 9x9 kernel of 1’s with a zero in the middle: ``` # Create a list of weights for a 9x9 kernel. list = [1, 1, 1, 1, 1, 1, 1, 1, 1] # The center of the kernel is zero. centerList = [1, 1, 1, 1, 0, 1, 1, 1, 1] # Assemble a list of lists: the 9x9 kernel weights as a 2-D matrix. lists = [list, list, list, list, centerList, list, list, list, list] # Create the kernel from the weights. kernel = ee.Kernel.fixed(9, 9, lists, -4, -4, False) print(kernel.getInfo()) ```
github_jupyter
<h1 align="center">Theano</h1> ``` !pip install numpy matplotlib !pip install --upgrade https://github.com/Theano/Theano/archive/master.zip !pip install --upgrade https://github.com/Lasagne/Lasagne/archive/master.zip ``` ### Разминка ``` import theano import theano.tensor as T %pylab inline ``` #### будущий параметр функции -- символьная переменная ``` N = T.scalar('a dimension', dtype='float32') ``` #### рецепт получения квадрата -- орперации над символьными переменным ``` result = T.power(N, 2) ``` #### theano.grad(cost, wrt) ``` grad_result = theano.grad(result, N) ``` #### компиляция функции "получения квадрата" ``` sq_function = theano.function(inputs=[N], outputs=result) gr_function = theano.function(inputs=[N], outputs=grad_result) ``` #### применение функции ``` # Заводим np.array x xv = np.arange(-10, 10) # Применяем функцию к каждому x val = map(float, [sq_function(x) for x in xv]) # Посичтаем градиент в кажой точке grad = map(float, [gr_function(x) for x in xv]) ``` ### Что мы увидим если нарисуем функцию и градиент? ``` pylab.plot(xv, val, label='x*x') pylab.plot(xv, grad, label='d x*x / dx') pylab.legend() ``` <h1 align="center">Lasagne</h1> * lasagne - это библиотека для написания нейронок произвольной формы на theano * В качестве демо-задачи выберем то же распознавание чисел, но на большем масштабе задачи, картинки 28x28, 10 цифр ``` from mnist import load_dataset X_train, y_train, X_val, y_val, X_test, y_test = load_dataset() print 'X размера', X_train.shape, 'y размера', y_train.shape fig, axes = plt.subplots(nrows=1, ncols=7, figsize=(20, 20)) for i, ax in enumerate(axes): ax.imshow(X_train[i, 0], cmap='gray') ``` Давайте посмотрим на DenseLayer в lasagne - http://lasagne.readthedocs.io/en/latest/modules/layers/dense.html - https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/dense.py#L16-L124 - Весь содаржательный код тут https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/dense.py#L121 ``` import lasagne from lasagne import init from theano import tensor as T from lasagne.nonlinearities import softmax X, y = T.tensor4('X'), T.vector('y', 'int32') ``` Так задаётся архитектура нейронки ``` #входной слой (вспомогательный) net = lasagne.layers.InputLayer(shape=(None, 1, 28, 28), input_var=X) net = lasagne.layers.Conv2DLayer(net, 15, 28, pad='valid', W=init.Constant()) # сверточный слой net = lasagne.layers.Conv2DLayer(net, 10, 2, pad='full', W=init.Constant()) # сверточный слой net = lasagne.layers.DenseLayer(net, num_units=500) # полносвязный слой net = lasagne.layers.DropoutLayer(net, 1.0) # регуляризатор net = lasagne.layers.DenseLayer(net, num_units=200) # полносвязный слой net = lasagne.layers.DenseLayer(net, num_units=10) # полносвязный слой #предсказание нейронки (theano-преобразование) y_predicted = lasagne.layers.get_output(net) #все веса нейронки (shared-переменные) all_weights = lasagne.layers.get_all_params(net) print all_weights #функция ошибки и точности будет прямо внутри loss = lasagne.objectives.categorical_accuracy(y_predicted, y).mean() accuracy = lasagne.objectives.categorical_accuracy(y_predicted, y).mean() #сразу посчитать словарь обновлённых значений с шагом по градиенту, как раньше updates = lasagne.updates.momentum(loss, all_weights, learning_rate=1.0, momentum=1.5) #функция, делает updates и возвращащет значение функции потерь и точности train_fun = theano.function([X, y], [loss, accuracy], updates=updates) accuracy_fun = theano.function([X, y], accuracy) # точность без обновления весов, для теста ``` # Процесс обучения ``` import time from mnist import iterate_minibatches num_epochs = 5 #количество проходов по данным batch_size = 50 #размер мини-батча for epoch in range(num_epochs): train_err, train_acc, train_batches, start_time = 0, 0, 0, time.time() for inputs, targets in iterate_minibatches(X_train, y_train, batch_size): train_err_batch, train_acc_batch = train_fun(inputs, targets) train_err += train_err_batch train_acc += train_acc_batch train_batches += 1 val_acc, val_batches = 0, 0 for inputs, targets in iterate_minibatches(X_test, y_test, batch_size): val_acc += accuracy_fun(inputs, targets) val_batches += 1 print "Epoch %s of %s took %.3f s" % (epoch + 1, num_epochs, time.time() - start_time) print " train loss:\t %.3f" % (train_err / train_batches) print " train acc:\t %.3f" % (train_acc * 100 / train_batches), '%' print " test acc:\t %.3f" % (val_acc * 100 / val_batches), '%' print test_acc = 0 test_batches = 0 for batch in iterate_minibatches(X_test, y_test, 500): inputs, targets = batch acc = accuracy_fun(inputs, targets) test_acc += acc test_batches += 1 print("Final results: \n test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100)) ``` # Ансамблирование с DropOut ``` #предсказание нейронки (theano-преобразование) y_predicted = T.mean([lasagne.layers.get_output(net, deterministic=False) for i in range(10)], axis=0) accuracy = lasagne.objectives.categorical_accuracy(y_predicted, y).mean() accuracy_fun = theano.function([X, y], accuracy) # точность без обновления весов, для теста test_acc = 0 test_batches = 0 for batch in iterate_minibatches(X_test, y_test, 500): inputs, targets = batch acc = accuracy_fun(inputs, targets) test_acc += acc test_batches += 1 print("Final results: \n test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100)) ```
github_jupyter
查看当前GPU信息 ``` from tensorflow.python.client import device_lib device_lib.list_local_devices() !pip install bert-tensorflow import pandas as pd import tensorflow as tf import tensorflow_hub as hub import pickle import bert from bert import run_classifier from bert import optimization from bert import tokenization def pretty_print(result): df = pd.DataFrame([result]).T df.columns = ["values"] return df def create_tokenizer_from_hub_module(bert_model_hub): """Get the vocab file and casing info from the Hub module.""" with tf.Graph().as_default(): bert_module = hub.Module(bert_model_hub) tokenization_info = bert_module(signature="tokenization_info", as_dict=True) with tf.Session() as sess: vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"], tokenization_info["do_lower_case"]]) return bert.tokenization.FullTokenizer( vocab_file=vocab_file, do_lower_case=do_lower_case) def make_features(dataset, label_list, MAX_SEQ_LENGTH, tokenizer, DATA_COLUMN, LABEL_COLUMN): input_example = dataset.apply(lambda x: bert.run_classifier.InputExample(guid=None, text_a = x[DATA_COLUMN], text_b = None, label = x[LABEL_COLUMN]), axis = 1) features = bert.run_classifier.convert_examples_to_features(input_example, label_list, MAX_SEQ_LENGTH, tokenizer) return features def create_model(bert_model_hub, is_predicting, input_ids, input_mask, segment_ids, labels, num_labels): """Creates a classification model.""" bert_module = hub.Module( bert_model_hub, trainable=True) bert_inputs = dict( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids) bert_outputs = bert_module( inputs=bert_inputs, signature="tokens", as_dict=True) # Use "pooled_output" for classification tasks on an entire sentence. # Use "sequence_outputs" for token-level output. output_layer = bert_outputs["pooled_output"] hidden_size = output_layer.shape[-1].value # Create our own layer to tune for politeness data. output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): # Dropout helps prevent overfitting output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) log_probs = tf.nn.log_softmax(logits, axis=-1) # Convert labels into one-hot encoding one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32)) # If we're predicting, we want predicted labels and the probabiltiies. if is_predicting: return (predicted_labels, log_probs) # If we're train/eval, compute loss between predicted and actual label per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, predicted_labels, log_probs) # model_fn_builder actually creates our model function # using the passed parameters for num_labels, learning_rate, etc. def model_fn_builder(bert_model_hub, num_labels, learning_rate, num_train_steps, num_warmup_steps): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_predicting = (mode == tf.estimator.ModeKeys.PREDICT) # TRAIN and EVAL if not is_predicting: (loss, predicted_labels, log_probs) = create_model( bert_model_hub, is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) train_op = bert.optimization.create_optimizer( loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False) # Calculate evaluation metrics. def metric_fn(label_ids, predicted_labels): accuracy = tf.metrics.accuracy(label_ids, predicted_labels) f1_score = tf.contrib.metrics.f1_score( label_ids, predicted_labels) auc = tf.metrics.auc( label_ids, predicted_labels) recall = tf.metrics.recall( label_ids, predicted_labels) precision = tf.metrics.precision( label_ids, predicted_labels) true_pos = tf.metrics.true_positives( label_ids, predicted_labels) true_neg = tf.metrics.true_negatives( label_ids, predicted_labels) false_pos = tf.metrics.false_positives( label_ids, predicted_labels) false_neg = tf.metrics.false_negatives( label_ids, predicted_labels) return { "eval_accuracy": accuracy, "f1_score": f1_score, "auc": auc, "precision": precision, "recall": recall, "true_positives": true_pos, "true_negatives": true_neg, "false_positives": false_pos, "false_negatives": false_neg } eval_metrics = metric_fn(label_ids, predicted_labels) if mode == tf.estimator.ModeKeys.TRAIN: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) else: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics) else: (predicted_labels, log_probs) = create_model( bert_model_hub, is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels) predictions = { 'probabilities': log_probs, 'labels': predicted_labels } return tf.estimator.EstimatorSpec(mode, predictions=predictions) # Return the actual model function in the closure return model_fn def estimator_builder(bert_model_hub, OUTPUT_DIR, SAVE_SUMMARY_STEPS, SAVE_CHECKPOINTS_STEPS, label_list, LEARNING_RATE, num_train_steps, num_warmup_steps, BATCH_SIZE): # Specify outpit directory and number of checkpoint steps to save run_config = tf.estimator.RunConfig( model_dir=OUTPUT_DIR, save_summary_steps=SAVE_SUMMARY_STEPS, save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS) model_fn = model_fn_builder( bert_model_hub = bert_model_hub, num_labels=len(label_list), learning_rate=LEARNING_RATE, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps) estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params={"batch_size": BATCH_SIZE}) return estimator, model_fn, run_config def run_on_dfs(train, test, DATA_COLUMN, LABEL_COLUMN, MAX_SEQ_LENGTH = 128, BATCH_SIZE = 32, LEARNING_RATE = 2e-5, NUM_TRAIN_EPOCHS = 3.0, WARMUP_PROPORTION = 0.1, SAVE_SUMMARY_STEPS = 100, SAVE_CHECKPOINTS_STEPS = 10000, bert_model_hub = "https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1"): label_list = train[LABEL_COLUMN].unique().tolist() tokenizer = create_tokenizer_from_hub_module(bert_model_hub) train_features = make_features(train, label_list, MAX_SEQ_LENGTH, tokenizer, DATA_COLUMN, LABEL_COLUMN) test_features = make_features(test, label_list, MAX_SEQ_LENGTH, tokenizer, DATA_COLUMN, LABEL_COLUMN) num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS) num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION) estimator, model_fn, run_config = estimator_builder( bert_model_hub, OUTPUT_DIR, SAVE_SUMMARY_STEPS, SAVE_CHECKPOINTS_STEPS, label_list, LEARNING_RATE, num_train_steps, num_warmup_steps, BATCH_SIZE) train_input_fn = bert.run_classifier.input_fn_builder( features=train_features, seq_length=MAX_SEQ_LENGTH, is_training=True, drop_remainder=False) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) test_input_fn = run_classifier.input_fn_builder( features=test_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False) result_dict = estimator.evaluate(input_fn=test_input_fn, steps=None) return result_dict, estimator import random random.seed(10) OUTPUT_DIR = 'output' ``` ----- 只需更改下方代码 ------ 导入数据集 ``` !wget https://github.com/yaoyue123/SocialComputing/raw/master/spam_message/training.txt !wget https://github.com/yaoyue123/SocialComputing/raw/master/spam_message/validation.txt train = pd.read_table("training.txt",sep='\t',error_bad_lines=False) #mytrain= mytrain[order] test = pd.read_table("validation.txt",sep='\t',error_bad_lines=False) #mytest= mytest[order] train.head() test.head() ``` 在此更改你的参数,如标签,bert模型地址,epochs ``` myparam = { "DATA_COLUMN": "massage", "LABEL_COLUMN": "label", "LEARNING_RATE": 2e-5, "NUM_TRAIN_EPOCHS":1, "bert_model_hub":"https://tfhub.dev/google/bert_chinese_L-12_H-768_A-12/1" } ``` 训练模型,通常情况下,一个epochs用k80训练大概在10min左右 ``` result, estimator = run_on_dfs(train, test, **myparam) ``` bert模型还是比较强的,一个epochs就能达到准确率为99% ``` pretty_print(result) ```
github_jupyter
## Change sys.path to use my tensortrade instead of the one in env ``` import sys sys.path.append("/Users/jasonfiacco/Documents/Yale/Senior/thesis/deeptrader") print(sys.path) ``` ## Read PredictIt Data Instead ``` import ssl import pandas as pd ssl._create_default_https_context = ssl._create_unverified_context # Only used if pandas gives a SSLError def fetch_data(symbol): path = "/Users/jasonfiacco/Documents/Yale/Senior/thesis/predictit_datasets/" filename = "{}.xlsx".format(symbol) df = pd.read_excel(path + filename, skiprows=4) df = df.set_index("Date") df = df.drop(df.columns[[7,8,9]], axis=1) df = df.drop("ID", 1) df.columns = [symbol + ":" + name.lower() for name in df.columns] return df all_data = pd.concat([ fetch_data("WARREN"), fetch_data("CRUZ"), fetch_data("MANCHIN"), fetch_data("SANDERS"), fetch_data("NELSON"), fetch_data("DONNELLY"), fetch_data("PELOSI"), fetch_data("MANAFORT"), fetch_data("BROWN"), fetch_data("RYAN"), fetch_data("STABENOW") ], axis=1) all_data.head() ``` ## Plot the closing prices for all the markets ``` %matplotlib inline closing_prices = all_data.loc[:, [("close" in name) for name in all_data.columns]] closing_prices.plot() ``` ## Slice just a specific time period from the dataframe ``` all_data.index = pd.to_datetime(all_data.index) subset_data = all_data[(all_data.index >= '09-01-2017') & (all_data.index <= '09-04-2019')] subset_data.head() ``` ## Define Exchanges An exchange needs a name, an execution service, and streams of price data in order to function properly. The setups supported right now are the simulated execution service using simulated or stochastic data. More execution services will be made available in the future, as well as price streams so that live data and execution can be supported. ``` from tensortrade.exchanges import Exchange from tensortrade.exchanges.services.execution.simulated import execute_order from tensortrade.data import Stream #Exchange(name of exchange, service) #It looks like each Stream takes a name, and then a list of the closing prices. predictit_exch = Exchange("predictit", service=execute_order)( Stream("USD-WARREN", list(subset_data['WARREN:close'])), Stream("USD-CRUZ", list(subset_data['CRUZ:close'])), Stream("USD-MANCHIN", list(subset_data['MANCHIN:close'])), Stream("USD-SANDERS", list(subset_data['SANDERS:close'])), Stream("USD-NELSON", list(subset_data['NELSON:close'])), Stream("USD-DONNELLY", list(subset_data['DONNELLY:close'])), Stream("USD-PELOSI", list(subset_data['PELOSI:close'])), Stream("USD-MANAFORT", list(subset_data['MANAFORT:close'])), Stream("USD-BROWN", list(subset_data['BROWN:close'])), Stream("USD-RYAN", list(subset_data['RYAN:close'])), Stream("USD-STABENOW", list(subset_data['STABENOW:close'])) ) ``` Now that the exchanges have been defined we can define our features that we would like to include, excluding the prices we have provided for the exchanges. ### Doing it without adding other features. Just use price ``` #You still have to add "Streams" for all the standard columns open, high, low, close, volume in this case from tensortrade.data import DataFeed, Module with Module("predictit") as predictit_ns: predictit_nodes = [Stream(name, list(subset_data[name])) for name in subset_data.columns] #Then create the Feed from it feed = DataFeed([predictit_ns]) feed.next() ``` ## Portfolio Make the portfolio using the any combinations of exchanges and intruments that the exchange supports ``` #I am going to have to add "instruments" for all 25 of the PredictIt markets I'm working with. from tensortrade.instruments import USD, WARREN, CRUZ, MANCHIN, SANDERS, NELSON, DONNELLY,\ PELOSI, MANAFORT, BROWN, RYAN, STABENOW from tensortrade.wallets import Wallet, Portfolio portfolio = Portfolio(USD, [ Wallet(predictit_exch, 10000 * USD), Wallet(predictit_exch, 0 * WARREN), Wallet(predictit_exch, 0 * CRUZ), Wallet(predictit_exch, 0 * MANCHIN), Wallet(predictit_exch, 0 * SANDERS), Wallet(predictit_exch, 0 * NELSON), Wallet(predictit_exch, 0 * DONNELLY), Wallet(predictit_exch, 0 * PELOSI), Wallet(predictit_exch, 0 * MANAFORT), Wallet(predictit_exch, 0 * BROWN), Wallet(predictit_exch, 0 * RYAN), Wallet(predictit_exch, 0 * STABENOW) ]) ``` ## Environment ``` from tensortrade.environments import TradingEnvironment env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme='simple', reward_scheme='simple', window_size=15, enable_logger=False, renderers = 'screenlog' ) env.feed.next() ``` #### ^An environment doesn't just show the OHLCV for each instrument. It also shows free, locked, total, as well as "USD_BTC" ## Using 123's Ray example ``` import os parent_dir = "/Users/jasonfiacco/Documents/Yale/Senior/thesis/deeptrader" os.environ["PYTHONPATH"] = parent_dir + ":" + os.environ.get("PYTHONPATH", "") !PYTHONWARNINGS=ignore::yaml.YAMLLoadWarning #Import tensortrade import tensortrade # Define Exchanges from tensortrade.exchanges import Exchange from tensortrade.exchanges.services.execution.simulated import execute_order from tensortrade.data import Stream # Define External Data Feed (features) import ta from sklearn import preprocessing from tensortrade.data import DataFeed, Module # Portfolio from tensortrade.instruments import USD, BTC from tensortrade.wallets import Wallet, Portfolio from tensortrade.actions import ManagedRiskOrders from gym.spaces import Discrete # Environment from tensortrade.environments import TradingEnvironment import gym import ray from ray import tune from ray.tune import grid_search from ray.tune.registry import register_env import ray.rllib.agents.ppo as ppo import ray.rllib.agents.dqn as dqn from ray.tune.logger import pretty_print from tensortrade.rewards import RiskAdjustedReturns class RayTradingEnv(TradingEnvironment): def __init__(self): env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme="simple", reward_scheme="simple", window_size=15, enable_logger=False, renderers = 'screenlog' ) self.env = env self.action_space = self.env.action_space self.observation_space = self.env.observation_space def reset(self): return self.env.reset() def step(self, action): return self.env.step(action) def env_creator(env_config): return RayTradingEnv() register_env("ray_trading_env", env_creator) ray.init(ignore_reinit_error=True) config = dqn.DEFAULT_CONFIG.copy() config["num_gpus"] = 0 #config["num_workers"] = 4 #config["num_envs_per_worker"] = 8 # config["eager"] = False # config["timesteps_per_iteration"] = 100 # config["train_batch_size"] = 20 #config['log_level'] = "DEBUG" trainer = dqn.DQNTrainer(config=config, env="ray_trading_env") config ``` ## Train using the old fashioned RLLib way ``` for i in range(10): # Perform one iteration of training the policy with PPO print("Training iteration {}...".format(i)) result = trainer.train() print("result: {}".format(result)) if i % 100 == 0: checkpoint = trainer.save() print("checkpoint saved at", checkpoint) result['hist_stats']['episode_reward'] ``` ## OR train using the tune way (better so far) ``` analysis = tune.run( "DQN", name = "DQN10-paralellism", checkpoint_at_end=True, stop={ "timesteps_total": 4000, }, config={ "env": "ray_trading_env", "lr": grid_search([1e-4]), # try different lrs "num_workers": 2, # parallelism, }, ) #Use the below command to see results #tensorboard --logdir=/Users/jasonfiacco/ray_results/DQN2 #Now you can plot the reward results of your tuner. dfs = analysis.trial_dataframes ax = None for d in dfs.values(): ax = d.episode_reward_mean.plot(ax=ax, legend=True) ``` ## Restoring an already existing agent that I tuned ``` import os logdir = analysis.get_best_logdir("episode_reward_mean", mode="max") trainer.restore(os.path.join(logdir, "checkpoint_993/checkpoint-993")) trainer.restore("/Users/jasonfiacco/ray_results/DQN4/DQN_ray_trading_env_fedb24f0_0_lr=1e-06_2020-03-03_15-46-02kzbdv53d/checkpoint_5/checkpoint-5") ``` ## Testing ``` #Set up a testing environment with test data. test_env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme='simple', reward_scheme='simple', window_size=15, enable_logger=False, renderers = 'screenlog' ) for episode_num in range(1): state = test_env.reset() done = False cumulative_reward = 0 step = 0 action = trainer.compute_action(state) while not done: action = trainer.compute_action(state) state, reward, done, results = test_env.step(action) cumulative_reward += reward #Render every 100 steps: if step % 100 == 0: test_env.render() step += 1 print("Cumulative reward: ", cumulative_reward) ``` ## Plot ``` %matplotlib inline portfolio.performance.plot() portfolio.performance.net_worth.plot() #Plot the total balance in each type of item p = portfolio.performance p2 = p.iloc[:, :] weights = p2.loc[:, [("/worth" in name) for name in p2.columns]] weights.iloc[:, 1:8].plot() ``` ## Try Plotly Render too ``` from tensortrade.environments.render import PlotlyTradingChart from tensortrade.environments.render import FileLogger chart_renderer = PlotlyTradingChart( height = 800 ) file_logger = FileLogger( filename='example.log', # omit or None for automatic file name path='training_logs' # create a new directory if doesn't exist, None for no directory ) price_history.columns = ['datetime', 'open', 'high', 'low', 'close', 'volume'] env = TradingEnvironment( feed=feed, portfolio=portfolio, action_scheme='managed-risk', reward_scheme='risk-adjusted', window_size=20, price_history=price_history, renderers = [chart_renderer, file_logger] ) from tensortrade.agents import DQNAgent agent = DQNAgent(env) agent.train(n_episodes=1, n_steps=1000, render_interval=1) ``` ## Extra Stuff ``` apath = "/Users/jasonfiacco/Documents/Yale/Senior/thesis/jasonfiacco-selectedmarkets-mytickers.xlsx" df = pd.read_excel(apath, skiprows=2) jason_tickers = df.iloc[:, 5].tolist() descriptions = df.iloc[:, 1].tolist() for ticker, description in zip(jason_tickers, descriptions): l = "{} = Instrument(\'{}\', 2, \'{}\')".format(ticker, ticker, description) print(l) ```
github_jupyter
[Table of Contents](./table_of_contents.ipynb) # Smoothing ``` #format the book %matplotlib inline from __future__ import division, print_function from book_format import load_style load_style() ``` ## Introduction The performance of the Kalman filter is not optimal when you consider future data. For example, suppose we are tracking an aircraft, and the latest measurement deviates far from the current track, like so (I'll only consider 1 dimension for simplicity): ``` import matplotlib.pyplot as plt data = [10.1, 10.2, 9.8, 10.1, 10.2, 10.3, 10.1, 9.9, 10.2, 10.0, 9.9, 11.4] plt.plot(data) plt.xlabel('time') plt.ylabel('position'); ``` After a period of near steady state, we have a very large change. Assume the change is past the limit of the aircraft's flight envelope. Nonetheless the Kalman filter incorporates that new measurement into the filter based on the current Kalman gain. It cannot reject the noise because the measurement could reflect the initiation of a turn. Granted it is unlikely that we are turning so abruptly, but it is impossible to say whether * The aircraft started a turn awhile ago, but the previous measurements were noisy and didn't show the change. * The aircraft is turning, and this measurement is very noisy * The measurement is very noisy and the aircraft has not turned * The aircraft is turning in the opposite direction, and the measurement is extremely noisy Now, suppose the following measurements are: 11.3 12.1 13.3 13.9 14.5 15.2 ``` data2 = [11.3, 12.1, 13.3, 13.9, 14.5, 15.2] plt.plot(data + data2); ``` Given these future measurements we can infer that yes, the aircraft initiated a turn. On the other hand, suppose these are the following measurements. ``` data3 = [9.8, 10.2, 9.9, 10.1, 10.0, 10.3, 9.9, 10.1] plt.plot(data + data3); ``` In this case we are led to conclude that the aircraft did not turn and that the outlying measurement was merely very noisy. ## An Overview of How Smoothers Work The Kalman filter is a *recursive* filter with the Markov property - it's estimate at step `k` is based only on the estimate from step `k-1` and the measurement at step `k`. But this means that the estimate from step `k-1` is based on step `k-2`, and so on back to the first epoch. Hence, the estimate at step `k` depends on all of the previous measurements, though to varying degrees. `k-1` has the most influence, `k-2` has the next most, and so on. Smoothing filters incorporate future measurements into the estimate for step `k`. The measurement from `k+1` will have the most effect, `k+2` will have less effect, `k+3` less yet, and so on. This topic is called *smoothing*, but I think that is a misleading name. I could smooth the data above by passing it through a low pass filter. The result would be smooth, but not necessarily accurate because a low pass filter will remove real variations just as much as it removes noise. In contrast, Kalman smoothers are *optimal* - they incorporate all available information to make the best estimate that is mathematically achievable. ## Types of Smoothers There are three classes of Kalman smoothers that produce better tracking in these situations. * Fixed-Interval Smoothing This is a batch processing based filter. This filter waits for all of the data to be collected before making any estimates. For example, you may be a scientist collecting data for an experiment, and don't need to know the result until the experiment is complete. A fixed-interval smoother will collect all the data, then estimate the state at each measurement using all available previous and future measurements. If it is possible for you to run your Kalman filter in batch mode it is always recommended to use one of these filters a it will provide much better results than the recursive forms of the filter from the previous chapters. * Fixed-Lag Smoothing Fixed-lag smoothers introduce latency into the output. Suppose we choose a lag of 4 steps. The filter will ingest the first 3 measurements but not output a filtered result. Then, when the 4th measurement comes in the filter will produce the output for measurement 1, taking measurements 1 through 4 into account. When the 5th measurement comes in, the filter will produce the result for measurement 2, taking measurements 2 through 5 into account. This is useful when you need recent data but can afford a bit of lag. For example, perhaps you are using machine vision to monitor a manufacturing process. If you can afford a few seconds delay in the estimate a fixed-lag smoother will allow you to produce very accurate and smooth results. * Fixed-Point Smoothing A fixed-point filter operates as a normal Kalman filter, but also produces an estimate for the state at some fixed time $j$. Before the time $k$ reaches $j$ the filter operates as a normal filter. Once $k>j$ the filter estimates $x_k$ and then also updates its estimate for $x_j$ using all of the measurements between $j\dots k$. This can be useful to estimate initial paramters for a system, or for producing the best estimate for an event that happened at a specific time. For example, you may have a robot that took a photograph at time $j$. You can use a fixed-point smoother to get the best possible pose information for the camera at time $j$ as the robot continues moving. ## Choice of Filters The choice of these filters depends on your needs and how much memory and processing time you can spare. Fixed-point smoothing requires storage of all measurements, and is very costly to compute because the output is for every time step is recomputed for every measurement. On the other hand, the filter does produce a decent output for the current measurement, so this filter can be used for real time applications. Fixed-lag smoothing only requires you to store a window of data, and processing requirements are modest because only that window is processed for each new measurement. The drawback is that the filter's output always lags the input, and the smoothing is not as pronounced as is possible with fixed-interval smoothing. Fixed-interval smoothing produces the most smoothed output at the cost of having to be batch processed. Most algorithms use some sort of forwards/backwards algorithm that is only twice as slow as a recursive Kalman filter. ## Fixed-Interval Smoothing There are many fixed-lag smoothers available in the literature. I have chosen to implement the smoother invented by Rauch, Tung, and Striebel because of its ease of implementation and efficiency of computation. It is also the smoother I have seen used most often in real applications. This smoother is commonly known as an RTS smoother. Derivation of the RTS smoother runs to several pages of densely packed math. I'm not going to inflict it on you. Instead I will briefly present the algorithm, equations, and then move directly to implementation and demonstration of the smoother. The RTS smoother works by first running the Kalman filter in a batch mode, computing the filter output for each step. Given the filter output for each measurement along with the covariance matrix corresponding to each output the RTS runs over the data backwards, incorporating its knowledge of the future into the past measurements. When it reaches the first measurement it is done, and the filtered output incorporates all of the information in a maximally optimal form. The equations for the RTS smoother are very straightforward and easy to implement. This derivation is for the linear Kalman filter. Similar derivations exist for the EKF and UKF. These steps are performed on the output of the batch processing, going backwards from the most recent in time back to the first estimate. Each iteration incorporates the knowledge of the future into the state estimate. Since the state estimate already incorporates all of the past measurements the result will be that each estimate will contain knowledge of all measurements in the past and future. Here is it very important to distinguish between past, present, and future so I have used subscripts to denote whether the data is from the future or not. Predict Step $$\begin{aligned} \mathbf{P} &= \mathbf{FP}_k\mathbf{F}^\mathsf{T} + \mathbf{Q } \end{aligned}$$ Update Step $$\begin{aligned} \mathbf{K}_k &= \mathbf{P}_k\mathbf{F}^\mathsf{T}\mathbf{P}^{-1} \\ \mathbf{x}_k &= \mathbf{x}_k + \mathbf{K}_k(\mathbf{x}_{k+1} - \mathbf{Fx}_k) \\ \mathbf{P}_k &= \mathbf{P}_k + \mathbf{K}_k(\mathbf{P}_{k+1} - \mathbf{P})\mathbf{K}_k^\mathsf{T} \end{aligned}$$ As always, the hardest part of the implementation is correctly accounting for the subscripts. A basic implementation without comments or error checking would be: ```python def rts_smoother(Xs, Ps, F, Q): n, dim_x, _ = Xs.shape # smoother gain K = zeros((n,dim_x, dim_x)) x, P, Pp = Xs.copy(), Ps.copy(), Ps.copy for k in range(n-2,-1,-1): Pp[k] = dot(F, P[k]).dot(F.T) + Q # predicted covariance K[k] = dot(P[k], F.T).dot(inv(Pp[k])) x[k] += dot(K[k], x[k+1] - dot(F, x[k])) P[k] += dot(K[k], P[k+1] - Pp[k]).dot(K[k].T) return (x, P, K, Pp) ``` This implementation mirrors the implementation provided in FilterPy. It assumes that the Kalman filter is being run externally in batch mode, and the results of the state and covariances are passed in via the `Xs` and `Ps` variable. Here is an example. ``` import numpy as np from numpy import random from numpy.random import randn import matplotlib.pyplot as plt from filterpy.kalman import KalmanFilter import kf_book.book_plots as bp def plot_rts(noise, Q=0.001, show_velocity=False): random.seed(123) fk = KalmanFilter(dim_x=2, dim_z=1) fk.x = np.array([0., 1.]) # state (x and dx) fk.F = np.array([[1., 1.], [0., 1.]]) # state transition matrix fk.H = np.array([[1., 0.]]) # Measurement function fk.P = 10. # covariance matrix fk.R = noise # state uncertainty fk.Q = Q # process uncertainty # create noisy data zs = np.asarray([t + randn()*noise for t in range (40)]) # filter data with Kalman filter, than run smoother on it mu, cov, _, _ = fk.batch_filter(zs) M, P, C, _ = fk.rts_smoother(mu, cov) # plot data if show_velocity: index = 1 print('gu') else: index = 0 if not show_velocity: bp.plot_measurements(zs, lw=1) plt.plot(M[:, index], c='b', label='RTS') plt.plot(mu[:, index], c='g', ls='--', label='KF output') if not show_velocity: N = len(zs) plt.plot([0, N], [0, N], 'k', lw=2, label='track') plt.legend(loc=4) plt.show() plot_rts(7.) ``` I've injected a lot of noise into the signal to allow you to visually distinguish the RTS output from the ideal output. In the graph above we can see that the Kalman filter, drawn as the green dotted line, is reasonably smooth compared to the input, but it still wanders from from the ideal line when several measurements in a row are biased towards one side of the line. In contrast, the RTS output is both extremely smooth and very close to the ideal output. With a perhaps more reasonable amount of noise we can see that the RTS output nearly lies on the ideal output. The Kalman filter output, while much better, still varies by a far greater amount. ``` plot_rts(noise=1.) ``` However, we must understand that this smoothing is predicated on the system model. We have told the filter that what we are tracking follows a constant velocity model with very low process error. When the filter *looks ahead* it sees that the future behavior closely matches a constant velocity so it is able to reject most of the noise in the signal. Suppose instead our system has a lot of process noise. For example, if we are tracking a light aircraft in gusty winds its velocity will change often, and the filter will be less able to distinguish between noise and erratic movement due to the wind. We can see this in the next graph. ``` plot_rts(noise=7., Q=.1) ``` This underscores the fact that these filters are not *smoothing* the data in colloquial sense of the term. The filter is making an optimal estimate based on previous measurements, future measurements, and what you tell it about the behavior of the system and the noise in the system and measurements. Let's wrap this up by looking at the velocity estimates of Kalman filter vs the RTS smoother. ``` plot_rts(7.,show_velocity=True) ``` The improvement in the velocity, which is an hidden variable, is even more dramatic. ## Fixed-Lag Smoothing The RTS smoother presented above should always be your choice of algorithm if you can run in batch mode because it incorporates all available data into each estimate. Not all problems allow you to do that, but you may still be interested in receiving smoothed values for previous estimates. The number line below illustrates this concept. ``` from kf_book.book_plots import figsize from kf_book.smoothing_internal import * with figsize(y=2): show_fixed_lag_numberline() ``` At step $k$ we can estimate $x_k$ using the normal Kalman filter equations. However, we can make a better estimate for $x_{k-1}$ by using the measurement received for $x_k$. Likewise, we can make a better estimate for $x_{k-2}$ by using the measurements recevied for $x_{k-1}$ and $x_{k}$. We can extend this computation back for an arbitrary $N$ steps. Derivation for this math is beyond the scope of this book; Dan Simon's *Optimal State Estimation* [2] has a very good exposition if you are interested. The essense of the idea is that instead of having a state vector $\mathbf{x}$ we make an augmented state containing $$\mathbf{x} = \begin{bmatrix}\mathbf{x}_k \\ \mathbf{x}_{k-1} \\ \vdots\\ \mathbf{x}_{k-N+1}\end{bmatrix}$$ This yields a very large covariance matrix that contains the covariance between states at different steps. FilterPy's class `FixedLagSmoother` takes care of all of this computation for you, including creation of the augmented matrices. All you need to do is compose it as if you are using the `KalmanFilter` class and then call `smooth()`, which implements the predict and update steps of the algorithm. Each call of `smooth` computes the estimate for the current measurement, but it also goes back and adjusts the previous `N-1` points as well. The smoothed values are contained in the list `FixedLagSmoother.xSmooth`. If you use `FixedLagSmoother.x` you will get the most recent estimate, but it is not smoothed and is no different from a standard Kalman filter output. ``` from filterpy.kalman import FixedLagSmoother, KalmanFilter import numpy.random as random fls = FixedLagSmoother(dim_x=2, dim_z=1, N=8) fls.x = np.array([0., .5]) fls.F = np.array([[1.,1.], [0.,1.]]) fls.H = np.array([[1.,0.]]) fls.P *= 200 fls.R *= 5. fls.Q *= 0.001 kf = KalmanFilter(dim_x=2, dim_z=1) kf.x = np.array([0., .5]) kf.F = np.array([[1.,1.], [0.,1.]]) kf.H = np.array([[1.,0.]]) kf.P *= 200 kf.R *= 5. kf.Q *= 0.001 N = 4 # size of lag nom = np.array([t/2. for t in range (0, 40)]) zs = np.array([t + random.randn()*5.1 for t in nom]) for z in zs: fls.smooth(z) kf_x, _, _, _ = kf.batch_filter(zs) x_smooth = np.array(fls.xSmooth)[:, 0] fls_res = abs(x_smooth - nom) kf_res = abs(kf_x[:, 0] - nom) plt.plot(zs,'o', alpha=0.5, marker='o', label='zs') plt.plot(x_smooth, label='FLS') plt.plot(kf_x[:, 0], label='KF', ls='--') plt.legend(loc=4) print('standard deviation fixed-lag: {:.3f}'.format(np.mean(fls_res))) print('standard deviation kalman: {:.3f}'.format(np.mean(kf_res))) ``` Here I have set `N=8` which means that we will incorporate 8 future measurements into our estimates. This provides us with a very smooth estimate once the filter converges, at the cost of roughly 8x the amount of computation of the standard Kalman filter. Feel free to experiment with larger and smaller values of `N`. I chose 8 somewhat at random, not due to any theoretical concerns. ## References [1] H. Rauch, F. Tung, and C. Striebel. "Maximum likelihood estimates of linear dynamic systems," *AIAA Journal*, **3**(8), pp. 1445-1450 (August 1965). [2] Dan Simon. "Optimal State Estimation," John Wiley & Sons, 2006. http://arc.aiaa.org/doi/abs/10.2514/3.3166
github_jupyter
# 準備 ``` # バージョン指定時にコメントアウト #!pip install torch==1.7.0 #!pip install torchvision==0.8.1 import torch import torchvision # バージョンの確認 print(torch.__version__) print(torchvision.__version__) # Google ドライブにマウント from google.colab import drive drive.mount('/content/gdrive') %cd '/content/gdrive/MyDrive/Colab Notebooks/gan_sample/chapter2' import os import numpy as np import torch import torch.nn as nn import torch.optim as optimizers import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import torchvision import torchvision.transforms as transforms import matplotlib import matplotlib.pyplot as plt %matplotlib inline ``` # データセットの作成 ``` np.random.seed(1234) torch.manual_seed(1234) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # データの取得 root = os.path.join('data', 'mnist') transform = transforms.Compose([transforms.ToTensor(), lambda x: x.view(-1)]) mnist_train = \ torchvision.datasets.MNIST(root=root, download=True, train=True, transform=transform) mnist_test = \ torchvision.datasets.MNIST(root=root, download=True, train=False, transform=transform) train_dataloader = DataLoader(mnist_train, batch_size=100, shuffle=True) test_dataloader = DataLoader(mnist_test, batch_size=1, shuffle=False) ``` # ネットワークの定義 ``` class Autoencoder(nn.Module): def __init__(self, device='cpu'): super().__init__() self.device = device self.l1 = nn.Linear(784, 200) self.l2 = nn.Linear(200, 784) def forward(self, x): # エンコーダ h = self.l1(x) # 活性化関数 h = torch.relu(h) # デコーダ h = self.l2(h) # シグモイド関数で0~1の値域に変換 y = torch.sigmoid(h) return y ``` # 学習の実行 ``` # モデルの設定 model = Autoencoder(device=device).to(device) # 損失関数の設定 criterion = nn.BCELoss() # 最適化関数の設定 optimizer = optimizers.Adam(model.parameters()) epochs = 10 # エポックのループ for epoch in range(epochs): train_loss = 0. # バッチサイズのループ for (x, _) in train_dataloader: x = x.to(device) # 訓練モードへの切替 model.train() # 順伝播計算 preds = model(x) # 入力画像xと復元画像predsの誤差計算 loss = criterion(preds, x) # 勾配の初期化 optimizer.zero_grad() # 誤差の勾配計算 loss.backward() # パラメータの更新 optimizer.step() # 訓練誤差の更新 train_loss += loss.item() train_loss /= len(train_dataloader) print('Epoch: {}, Loss: {:.3f}'.format( epoch+1, train_loss )) ``` # 画像の復元 ``` # dataloaderからのデータ取り出し x, _ = next(iter(test_dataloader)) x = x.to(device) # 評価モードへの切替 model.eval() # 復元画像 x_rec = model(x) # 入力画像、復元画像の表示 for i, image in enumerate([x, x_rec]): image = image.view(28, 28).detach().cpu().numpy() plt.subplot(1, 2, i+1) plt.imshow(image, cmap='binary_r') plt.axis('off') plt.show() ```
github_jupyter
# One-step error probability Write a computer program implementing asynchronous deterministic updates for a Hopfield network. Use Hebb's rule with $w_{ii}=0$. Generate and store p=[12,24,48,70,100,120] random patterns with N=120 bits. Each bit is either +1 or -1 with probability $\tfrac{1}{2}$. For each value of ppp estimate the one-step error probability $P_{\text {error}}^{t=1}$ based on $10^5$ independent trials. Here, one trial means that you generate and store a set of p random patterns, feed one of them, and perform one asynchronous update of a single randomly chosen neuron. If in some trials you encounter sgn(0), simply set sgn(0)=1. List below the values of $P_{\text {error}}^{t=1}$ that you obtained in the following form: [$p_1,p_2,\ldots,p_{6}$], where $p_n$ is the value of $P_{\text {error}}^{t=1}$ for the n-th value of p from the list above. Give four decimal places for each $p_n$ ``` import numpy as np import time def calculate_instance( n, p, zero_diagonal): #Create p random patterns patterns = [] for i in range(p): patterns.append(np.random.choice([-1,1],n)) #Create weights matrix according to hebbs rule weights = patterns[0][:,None]*patterns[0] for el in patterns[1:]: weights = weights + el[:,None]*el weights = np.true_divide(weights, n) #Fill diagonal with zeroes if zero_diagonal: np.fill_diagonal(weights,0) #Feed random pattern as input and test if an error occurs S1 = patterns[0] chosen_i = np.random.choice(range(n)) S_i_old = S1[chosen_i] S_i = esign(np.dot(weights[chosen_i], S1)) #breakpoint() return S_i_old == S_i def esign(x): if(x == 0): return 1 else: return np.sign(x) ``` List your numerically computed $P_{\text {error}}^{t=1}$ for the parameters given above. ``` p = [12, 24, 48, 70, 100, 120] N = 120 I = 100000 for p_i in p: solve = [0,0] for i in range(I): ret = calculate_instance(N, p_i, True) if ret: solve[0]+=1 else: solve[1]+=1 p_error = float(solve[1]/I) print(f"Number of patterns: {p_i}, P_error(t=1): {p_error} ") ``` Repeat the task, but now apply Hebb's rule without setting the diagonal weights to zero. For each value of p listed above, estimate the one-step error probability $P_{\text {error}}^{t=1}$ based on $10^5$ independent trials. ``` p = [12, 24, 48, 70, 100, 120] N = 120 I = 100000 for p_i in p: solve = [0,0] for i in range(I): ret = calculate_instance(N, p_i, False) if ret: solve[0]+=1 else: solve[1]+=1 p_error = float(solve[1]/I) print(f"Number of patterns: {p_i}, P_error(t=1): {p_error} ") ```
github_jupyter
# Acquiring Data from open repositories A crucial step in the work of a computational biologist is not only to analyse data, but acquiring datasets to analyse as well as toy datasets to test out computational methods and algorithms. The internet is full of such open datasets. Sometimes you have to sign up and make a user to get authentication, especially for medical data. This can sometimes be time consuming, so here we will deal with easy access resources, mostly of modest size. Multiple python libraries provide a `dataset` module which makes the effort to fetch online data extremely seamless, with little requirement for preprocessing. #### Goal of the notebook Here you will get familiar with some ways to fetch datasets from online. We do some data exploration on the data just for illustration, but the methods will be covered later. # Useful resources and links When playing around with algorithms, it can be practical to use relatively small datasets. A good example is the `datasets` submodule of `scikit-learn`. `Nilearn` (library for neuroimaging) also provides a collection of neuroimaging datasets. Many datasets can also be acquired through the competition website [Kaggle](https://www.kaggle.com), in which they describe how to access the data. ### Links - [OpenML](https://www.openml.org/search?type=data) - [Nilearn datasets](https://nilearn.github.io/modules/reference.html#module-nilearn.datasets) - [Sklearn datasets](https://scikit-learn.org/stable/modules/classes.html?highlight=datasets#module-sklearn.datasets) - [Kaggle](https://www.kaggle.com/datasets) - [MEDNIST] - [**Awesomedata**](https://github.com/awesomedata/awesome-public-datasets) - We strongly recommend to check out the Awesomedata lists of public datasets, covering topics such as [biology/medicine](https://github.com/awesomedata/awesome-public-datasets#biology) and [neuroscience](https://github.com/awesomedata/awesome-public-datasets#neuroscience) - [Papers with code](https://paperswithcode.com) - [SNAP](https://snap.stanford.edu/data/) - Stanford Large Network Dataset Collection - [Open Graph Benchmark (OGB)](https://github.com/snap-stanford/ogb) - Network datasets - [Open Neuro](https://openneuro.org/) - [Open fMRI](https://openfmri.org/dataset/) ``` # import basic libraries import numpy as np import pandas as pd from matplotlib import pyplot as plt ``` We start with scikit-learn's datasets for testing out ML algorithms. Visit [here](https://scikit-learn.org/stable/modules/classes.html?highlight=datasets#module-sklearn.datasets) for an overview of the datasets. ``` from sklearn.datasets import fetch_olivetti_faces, fetch_20newsgroups, load_breast_cancer, load_diabetes, load_digits, load_iris ``` Load the MNIST dataset (images of hand written digits) ``` X,y = load_digits(return_X_y=True) y.shape X.shape #1797 images, 64 pixels per image ``` #### exercise 1. Make a function `plot` taking an argument (k) to visualize the k'th sample. It is currently flattened, you will need to reshape it. Use `plt.imshow` for plotting. ``` # %load solutions/ex2_1.py def plot(k): plt.imshow(X[k].reshape(8,8), cmap='gray') plt.title(f"Number = {y[k]}") plt.show() plot(15); plot(450) faces = fetch_olivetti_faces() ``` #### Exercise 2. Inspect the dataset. How many classes are there? How many samples per class? Also, plot some examples. What do the classes represent? ``` # %load solutions/ex2_2.py # example solution. # You are not expected to make a nice plotting function, # you can simply call plt.imshow a number of times and observe print(faces.DESCR) # this shows there are 40 classes, 10 samples per class print(faces.target) #the targets i.e. classes print(np.unique(faces.target).shape) # another way to see n_classes X = faces.images y = faces.target fig = plt.figure(figsize=(16,5)) idxs = [0,1,2, 11,12,13, 40,41] for i,k in enumerate(idxs): ax=fig.add_subplot(2,4,i+1) ax.imshow(X[k]) ax.set_title(f"target={y[k]}") # looking at a few plots shows that each target is a single person. ``` Once you have made yourself familiar with the dataset you can do some data exploration with unsupervised methods, like below. The next few lines of code are simply for illustration, don't worry about the code (we will cover unsupervised methods in submodule F). ``` from sklearn.decomposition import randomized_svd X = faces.data n_dim = 3 u, s, v = randomized_svd(X, n_dim) ``` Now we have factorized the images into their constituent parts. The code below displays the various components isolated one by one. ``` def show_ims(ims): fig = plt.figure(figsize=(16,10)) idxs = [0,1,2, 11,12,13, 40,41,42, 101,101,103] for i,k in enumerate(idxs): ax=fig.add_subplot(3,4,i+1) ax.imshow(ims[k]) ax.set_title(f"target={y[k]}") for i in range(n_dim): my_s = np.zeros(s.shape[0]) my_s[i] = s[i] recon = u@np.diag(my_s)@v recon = recon.reshape(400,64,64) show_ims(recon) ``` Are you able to see what the components represent? It at least looks like the second component signifies the lightning (the light direction), the third highlights eyebrows and facial chin shape. ``` from sklearn.manifold import TSNE tsne = TSNE(init='pca', random_state=0) trans = tsne.fit_transform(X) m = 8*10 # choose 4 people plt.figure(figsize=(16,10)) xs, ys = trans[:m,0], trans[:m,1] plt.scatter(xs, ys, c=y[:m], cmap='rainbow') for i,v in enumerate(zip(xs,ys, y[:m])): xx,yy,s = v #plt.text(xx,yy,s) #class plt.text(xx,yy,i) #index ``` Many people seem to have multiple subclusters. What is the difference between those clusters? (e.g. 68,62,65 versus the other 60's) ``` ims = faces.images idxs = [68,62,65,66,60,64,63] #idxs = [9,4,1, 5,3] for k in idxs: plt.imshow(ims[k], cmap='gray') plt.show() def show(im): return plt.imshow(im, cmap='gray') import pandas as pd df= pd.read_csv('data/archive/covid_impact_on_airport_traffic.csv') df.shape df.describe() df.head() df.Country.unique() df.ISO_3166_2.unique() df.AggregationMethod.unique() ``` Here we will look at [OpenML](https://www.openml.org/) - a repository of open datasets free to explore data and test methods. ### Fetching an OpenML dataset We need to pass in an ID to access, as follows: ``` from sklearn.datasets import fetch_openml ``` OpenML contains all sorts of datatypes. By browsing the website we found a electroencephalography (EEG) dataset to explore: ``` data_id = 1471 #this was found by browsing OpenML dataset = fetch_openml(data_id=data_id, as_frame=True) dir(dataset) dataset.url type(dataset) print(dataset.DESCR) original_names = ['AF3', 'F7', 'F3', 'FC5', 'T7', 'P', 'O1', 'O2', 'P8', 'T8', 'FC6', 'F4', 'F8', 'AF4'] dataset.feature_names df = dataset.frame df.head() df.shape[0] / 117 # 128 frames per second df = dataset.frame y = df.Class #df.drop(columns='Class', inplace=True) df.dtypes #def summary(s): # print(s.max(), s.min(), s.mean(), s.std()) # print() # #for col in df.columns[:-1]: # column = df.loc[:,col] # summary(column) df.plot() ``` From the plot we can quickly identify a bunch of huge outliers, making the plot look completely uselss. We assume these are artifacts, and remove them. ``` df2 = df.iloc[:,:-1].clip_upper(6000) df2.plot() ``` Now we see better what is going on. Lets just remove the frames corresponding to those outliers ``` frames = np.nonzero(np.any(df.iloc[:,:-1].values>5000, axis=1))[0] frames df.drop(index=frames, inplace=True) df.plot(figsize=(16,8)) plt.legend(labels=original_names) df.columns ``` ### Do some modelling of the data ``` from sklearn.linear_model import LogisticRegression lasso = LogisticRegression(penalty='l2') X = df.values[:,:-1] y = df.Class y = y.astype(np.int) - 1 # map to 0,1 print(X.shape) print(y.shape) lasso.fit(X,y) comp = (lasso.predict(X) == y).values np.sum(comp.astype(np.int))/y.shape[0] # shitty accuracy lasso.coef_[0].shape names = dataset.feature_names original_names coef = lasso.coef_[0] plt.barh(range(coef.shape[0]), coef) plt.yticks(ticks=range(14),labels=original_names) plt.show() ``` Interpreting the coeficients: we naturally tend to read the magnitude of the coefficients as feature importance. That is a fair interpretation, but currently we did not scale our features to a comparable range prior to fittting the model, so we cannot draw that conclusion. ### Extra exercise. Go to [OpenML](https://openml.org) and use the search function (or just look around) to find any dataset that interest you. Load it using the above methodology, and try to do anything you can to understand the datatype, visualize it etc. ``` ### YOUR CODE HERE ```
github_jupyter
# Code Review #1 Purpose: To introduce the group to looking at code analytically Created By: Hawley Helmbrecht Creation Date: 10-12-21 # Introduction to Analyzing Code All snipets within this section are taken from the Hitchhiker's Guide to Python (https://docs.python-guide.org/writing/style/) ### Example 1: Explicit Code ``` def make_complex(*args): x, y = args return dict(**locals()) def make_complex(x, y): return {'x': x, 'y': y} ``` ### Example 2: One Statement per Line ``` print('one'); print('two') if x == 1: print('one') if <complex comparison> and <other complex comparison>: # do something print('one') print('two') if x == 1: print('one') cond1 = <complex comparison> cond2 = <other complex comparison> if cond1 and cond2: # do something ``` ## Intro to Pep 8 Example 1: Limit all lines to a maximum of 79 characters. ``` #Wrong: income = (gross_wages + taxable_interest + (dividends - qualified_dividends) - ira_deduction - student_loan_interest) #Correct: income = (gross_wages + taxable_interest + (dividends - qualified_dividends) - ira_deduction - student_loan_interest) ``` Example 2: Line breaks around binary operators ``` # Wrong: # operators sit far away from their operands income = (gross_wages + taxable_interest + (dividends - qualified_dividends) - ira_deduction - student_loan_interest) # Correct: # easy to match operators with operands income = (gross_wages + taxable_interest + (dividends - qualified_dividends) - ira_deduction - student_loan_interest) ``` Example 3: Import formatting ``` # Correct: import os import sys # Wrong: import sys, os ``` ## Let's look at some code! Sci-kit images Otsu Threshold code! (https://github.com/scikit-image/scikit-image/blob/main/skimage/filters/thresholding.py) ``` def threshold_otsu(image=None, nbins=256, *, hist=None): """Return threshold value based on Otsu's method. Either image or hist must be provided. If hist is provided, the actual histogram of the image is ignored. Parameters ---------- image : (N, M[, ..., P]) ndarray, optional Grayscale input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. hist : array, or 2-tuple of arrays, optional Histogram from which to determine the threshold, and optionally a corresponding array of bin center intensities. If no hist provided, this function will compute it from the image. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. References ---------- .. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_otsu(image) >>> binary = image <= thresh Notes ----- The input image must be grayscale. """ if image is not None and image.ndim > 2 and image.shape[-1] in (3, 4): warn(f'threshold_otsu is expected to work correctly only for ' f'grayscale images; image shape {image.shape} looks like ' f'that of an RGB image.') # Check if the image has more than one intensity value; if not, return that # value if image is not None: first_pixel = image.ravel()[0] if np.all(image == first_pixel): return first_pixel counts, bin_centers = _validate_image_histogram(image, hist, nbins) # class probabilities for all possible thresholds weight1 = np.cumsum(counts) weight2 = np.cumsum(counts[::-1])[::-1] # class means for all possible thresholds mean1 = np.cumsum(counts * bin_centers) / weight1 mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1] # Clip ends to align class 1 and class 2 variables: # The last value of ``weight1``/``mean1`` should pair with zero values in # ``weight2``/``mean2``, which do not exist. variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2 idx = np.argmax(variance12) threshold = bin_centers[idx] return threshold ``` What do you observe about the code that makes it pythonic? ``` Do the pythonic conventions make it easier to understand? ``` How is the documentation on this function?
github_jupyter
**Recursion and Higher Order Functions** Today we're tackling recursion, and touching on higher-order functions in Python. A **recursive** function is one that calls itself. A classic example: the Fibonacci sequence. The Fibonacci sequence was originally described to model population growth, and is self-referential in its definition. The nth Fib number is defined in terms of the previous two: - F(n) = F(n-1) + F(n-2) - F(1) = 0 - F(2) = 1 Another classic example: Factorial: - n! = n(n-1)(n-2)(n-3) ... 1 or: - n! = n*(n-1)! Let's look at an implementation of the factorial and of the Fibonacci sequence in Python: ``` def factorial(n): if n == 1: return 1 else: return n*factorial(n-1) print(factorial(5)) def fibonacci(n): if n == 1: return 0 elif n == 2: return 1 else: # print('working on number ' + str(n)) return fibonacci(n-1)+fibonacci(n-2) fibonacci(7) ``` There are two very important parts of these functions: a base case (or two) and a recursive case. When designing recursive functions it can help to think about these two cases! The base case is the case when we know we are done, and can just return a value. (e.g. in fibonacci above there are two base cases, `n ==1` and `n ==2`). The recursive case is the case when we make the recursive call - that is we call the function again. Let's write a function that counts down from a parameter n to zero, and then prints "Blastoff!". ``` def countdown(n): # base case if n == 0: print('Blastoff!') # recursive case else: print(n) countdown(n-1) countdown(10) ``` Let's write a recursive function that adds up the elements of a list: ``` def add_up_list(my_list): # base case if len(my_list) == 0: return 0 # recursive case else: first_elem = my_list[0] return first_elem + add_up_list(my_list[1:]) my_list = [1, 2, 1, 3, 4] print(add_up_list(my_list)) ``` **Higher-order functions** are functions that takes a function as an argument or returns a function. We will talk briefly about functions that take a function as an argument. Let's look at an example. ``` def h(x): return x+4 def g(x): return x**2 def doItTwice(f, x): return f(f(x)) print(doItTwice(h, 3)) print(doItTwice(g, 3)) ``` A common reason for using a higher-order function is to apply a parameter-specified function repeatedly over a data structure (like a list or a dictionary). Let's look at an example function that applies a parameter function to every element of a list: ``` def sampleFunction1(x): return 2*x def sampleFunction2(x): return x % 2 def applyToAll(func, myList): newList = [] for element in myList: newList.append(func(element)) return newList aList = [2, 3, 4, 5] print(applyToAll(sampleFunction1, aList)) print(applyToAll(sampleFunction2, aList)) ``` Something like this applyToAll function is built into Python, and is called map ``` def sampleFunction1(x): return 2*x def sampleFunction2(x): return x % 2 aList = [2, 3, 4, 5] print(list(map(sampleFunction1, aList))) bList = [2, 3, 4, 5] print(list(map(sampleFunction2, aList))) ``` Python has quite a few built-in functions (some higher-order, some not). You can find lots of them here: https://docs.python.org/3.3/library/functions.html (I **will not** by default require you to remember those for an exam!!) Example: zip does something that may be familiar from last week's lab. ``` x = [1, 2, 3] y = [4, 5, 6] zipped = zip(x, y) print(list(zipped)) ```
github_jupyter
# Introduction to `pandas` ``` import numpy as np import pandas as pd ``` ## Series and Data Frames ### Series objects A `Series` is like a vector. All elements must have the same type or are nulls. ``` s = pd.Series([1,1,2,3] + [None]) s ``` ### Size ``` s.size ``` ### Unique Counts ``` s.value_counts() ``` ### Special types of series #### Strings ``` words = 'the quick brown fox jumps over the lazy dog'.split() s1 = pd.Series([' '.join(item) for item in zip(words[:-1], words[1:])]) s1 s1.str.upper() s1.str.split() s1.str.split().str[1] ``` ### Categories ``` s2 = pd.Series(['Asian', 'Asian', 'White', 'Black', 'White', 'Hispanic']) s2 s2 = s2.astype('category') s2 s2.cat.categories s2.cat.codes ``` ### DataFrame objects A `DataFrame` is like a matrix. Columns in a `DataFrame` are `Series`. - Each column in a DataFrame represents a **variale** - Each row in a DataFrame represents an **observation** - Each cell in a DataFrame represents a **value** ``` df = pd.DataFrame(dict(num=[1,2,3] + [None])) df df.num ``` ### Index Row and column identifiers are of `Index` type. Somewhat confusingly, index is also a a synonym for the row identifiers. ``` df.index ``` #### Setting a column as the row index ``` df df1 = df.set_index('num') df1 ``` #### Making an index into a column ``` df1.reset_index() ``` ### Columns This is just a different index object ``` df.columns ``` ### Getting raw values Sometimes you just want a `numpy` array, and not a `pandas` object. ``` df.values ``` ## Creating Data Frames ### Manual ``` from collections import OrderedDict n = 5 dates = pd.date_range(start='now', periods=n, freq='d') df = pd.DataFrame(OrderedDict(pid=np.random.randint(100, 999, n), weight=np.random.normal(70, 20, n), height=np.random.normal(170, 15, n), date=dates, )) df ``` ### From file You can read in data from many different file types - plain text, JSON, spreadsheets, databases etc. Functions to read in data look like `read_X` where X is the data type. ``` %%file measures.txt pid weight height date 328 72.654347 203.560866 2018-11-11 14:16:18.148411 756 34.027679 189.847316 2018-11-12 14:16:18.148411 185 28.501914 158.646074 2018-11-13 14:16:18.148411 507 17.396343 180.795993 2018-11-14 14:16:18.148411 919 64.724301 173.564725 2018-11-15 14:16:18.148411 df = pd.read_table('measures.txt') df ``` ## Indexing Data Frames ### Implicit defaults if you provide a slice, it is assumed that you are asking for rows. ``` df[1:3] ``` If you provide a singe value or list, it is assumed that you are asking for columns. ``` df[['pid', 'weight']] ``` ### Extracting a column #### Dictionary style access ``` df['pid'] ``` #### Property style access This only works for column names tat are also valid Python identifier (i.e., no spaces or dashes or keywords) ``` df.pid ``` ### Indexing by location This is similar to `numpy` indexing ``` df.iloc[1:3, :] df.iloc[1:3, [True, False, True]] ``` ### Indexing by name ``` df.loc[1:3, 'weight':'height'] ``` **Warning**: When using `loc`, the row slice indicates row names, not positions. ``` df1 = df.copy() df1.index = df.index + 1 df1 df1.loc[1:3, 'weight':'height'] ``` ## Structure of a Data Frame ### Data types ``` df.dtypes ``` ### Converting data types #### Using `astype` on one column ``` df.pid = df.pid.astype('category') ``` #### Using `astype` on multiple columns ``` df = df.astype(dict(weight=float, height=float)) ``` #### Using a conversion function ``` df.date = pd.to_datetime(df.date) ``` #### Check ``` df.dtypes ``` ### Basic properties ``` df.size df.shape df.describe() ``` ### Inspection ``` df.head(n=3) df.tail(n=3) df.sample(n=3) df.sample(frac=0.5) ``` ## Selecting, Renaming and Removing Columns ### Selecting columns ``` df.filter(items=['pid', 'date']) df.filter(regex='.*ght') ``` #### Note that you can also use regular string methods on the columns ``` df.loc[:, df.columns.str.contains('d')] ``` ### Renaming columns ``` df.rename(dict(weight='w', height='h'), axis=1) orig_cols = df.columns df.columns = list('abcd') df df.columns = orig_cols df ``` ### Removing columns ``` df.drop(['pid', 'date'], axis=1) df.drop(columns=['pid', 'date']) df.drop(columns=df.columns[df.columns.str.contains('d')]) ``` ## Selecting, Renaming and Removing Rows ### Selecting rows ``` df[df.weight.between(60,70)] df[(69 <= df.weight) & (df.weight < 70)] df[df.date.between(pd.to_datetime('2018-11-13'), pd.to_datetime('2018-11-15 23:59:59'))] ``` ### Renaming rows ``` df.rename({i:letter for i,letter in enumerate('abcde')}) df.index = ['the', 'quick', 'brown', 'fox', 'jumphs'] df df = df.reset_index(drop=True) df ``` ### Dropping rows ``` df.drop([1,3], axis=0) ``` #### Dropping duplicated data ``` df['something'] = [1,1,None,2,None] df.loc[df.something.duplicated()] df.drop_duplicates(subset='something') ``` #### Dropping missing data ``` df df.something.fillna(0) df.something.ffill() df.something.bfill() df.something.interpolate() df.dropna() ``` ## Transforming and Creating Columns ``` df.assign(bmi=df['weight'] / (df['height']/100)**2) df['bmi'] = df['weight'] / (df['height']/100)**2 df df['something'] = [2,2,None,None,3] df ``` ## Sorting Data Frames ### Sort on indexes ``` df.sort_index(axis=1) df.sort_index(axis=0, ascending=False) ``` ### Sort on values ``` df.sort_values(by=['something', 'bmi'], ascending=[True, False]) ``` ## Summarizing ### Apply an aggregation function ``` df.select_dtypes(include=np.number) df.select_dtypes(include=np.number).agg(np.sum) df.agg(['count', np.sum, np.mean]) ``` ## Split-Apply-Combine We often want to perform subgroup analysis (conditioning by some discrete or categorical variable). This is done with `groupby` followed by an aggregate function. Conceptually, we split the data frame into separate groups, apply the aggregate function to each group separately, then combine the aggregated results back into a single data frame. ``` df['treatment'] = list('ababa') df grouped = df.groupby('treatment') grouped.get_group('a') grouped.mean() ``` ### Using `agg` with `groupby` ``` grouped.agg('mean') grouped.agg(['mean', 'std']) grouped.agg({'weight': ['mean', 'std'], 'height': ['min', 'max'], 'bmi': lambda x: (x**2).sum()}) ``` ### Using `trasnform` wtih `groupby` ``` g_mean = grouped['weight', 'height'].transform(np.mean) g_mean g_std = grouped['weight', 'height'].transform(np.std) g_std (df[['weight', 'height']] - g_mean)/g_std ``` ## Combining Data Frames ``` df df1 = df.iloc[3:].copy() df1.drop('something', axis=1, inplace=True) df1 ``` ### Adding rows Note that `pandas` aligns by column indexes automatically. ``` df.append(df1, sort=False) pd.concat([df, df1], sort=False) ``` ### Adding columns ``` df.pid df2 = pd.DataFrame(OrderedDict(pid=[649, 533, 400, 600], age=[23,34,45,56])) df2.pid df.pid = df.pid.astype('int') pd.merge(df, df2, on='pid', how='inner') pd.merge(df, df2, on='pid', how='left') pd.merge(df, df2, on='pid', how='right') pd.merge(df, df2, on='pid', how='outer') ``` ### Merging on the index ``` df1 = pd.DataFrame(dict(x=[1,2,3]), index=list('abc')) df2 = pd.DataFrame(dict(y=[4,5,6]), index=list('abc')) df3 = pd.DataFrame(dict(z=[7,8,9]), index=list('abc')) df1 df2 df3 df1.join([df2, df3]) ``` ## Fixing common DataFrame issues ### Multiple variables in a column ``` df = pd.DataFrame(dict(pid_treat = ['A-1', 'B-2', 'C-1', 'D-2'])) df df.pid_treat.str.split('-') df.pid_treat.str.split('-').apply(pd.Series, index=['pid', 'treat']) ``` ### Multiple values in a cell ``` df = pd.DataFrame(dict(pid=['a', 'b', 'c'], vals = [(1,2,3), (4,5,6), (7,8,9)])) df df[['t1', 't2', 't3']] = df.vals.apply(pd.Series) df df.drop('vals', axis=1, inplace=True) pd.melt(df, id_vars='pid', value_name='vals').drop('variable', axis=1) ``` ## Reshaping Data Frames Sometimes we need to make rows into columns or vice versa. ### Converting multiple columns into a single column This is often useful if you need to condition on some variable. ``` url = 'https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv' iris = pd.read_csv(url) iris.head() iris.shape df_iris = pd.melt(iris, id_vars='species') df_iris.sample(10) ``` ## Chaining commands Sometimes you see this functional style of method chaining that avoids the need for temporary intermediate variables. ``` ( iris. sample(frac=0.2). filter(regex='s.*'). assign(both=iris.sepal_length + iris.sepal_length). groupby('species').agg(['mean', 'sum']). pipe(lambda x: np.around(x, 1)) ) ``` ## Moving between R and Python in Jupyter ``` %load_ext rpy2.ipython import warnings warnings.simplefilter('ignore', FutureWarning) iris = %R iris iris.head() iris_py = iris.copy() iris_py.Species = iris_py.Species.str.upper() %%R -i iris_py -o iris_r iris_r <- iris_py[1:3,] iris_r ```
github_jupyter
# SLU13: Bias-Variance trade-off & Model Selection -- Examples --- <a id='top'></a> ### 1. Model evaluation * a. [Train-test split](#traintest) * b. [Train-val-test split](#val) * c. [Cross validation](#crossval) ### 2. [Learning curves](#learningcurves) # 1. Model evaluation ``` import matplotlib.pyplot as plt import pandas as pd import numpy as np from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import learning_curve %matplotlib inline # Create the DataFrame with the data df = pd.read_csv("data/beer.csv") # Create a DataFrame with the features (X) and labels (y) X = df.drop(["IsIPA"], axis=1) y = df["IsIPA"] print("Number of entries: ", X.shape[0]) ``` <a id='traintest'></a> [Return to top](#top) ## Create a training and a test set ``` from sklearn.model_selection import train_test_split # Using 20 % of the data as test set X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) print("Number of training entries: ", X_train.shape[0]) print("Number of test entries: ", X_test.shape[0]) ``` <a id='val'></a> [Return to top](#top) ## Create a training, test and validation set ``` # Using 20 % as test set and 20 % as validation set X_train, X_temp, y_train, y_temp = train_test_split(X, y, test_size=0.4) X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, test_size=0.50) print("Number of training entries: ", X_train.shape[0]) print("Number of validation entries: ", X_val.shape[0]) print("Number of test entries: ", X_test.shape[0]) ``` <a id='crossval'></a> [Return to top](#top) ## Use cross-validation (using a given classifier) ``` from sklearn.model_selection import cross_val_score knn = KNeighborsClassifier(n_neighbors=5) # Use cv to specify the number of folds scores = cross_val_score(knn, X, y, cv=5) print(f"Mean of scores: {scores.mean():.3f}") print(f"Variance of scores: {scores.var():.3f}") ``` <a id='learningcurves'></a> [Return to top](#top) # 2. Learning Curves Here is the function that is taken from the sklearn page on learning curves: ``` def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)): """ Generate a simple plot of the test and training learning curve. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. title : string Title for the chart. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. ylim : tuple, shape (ymin, ymax), optional Defines minimum and maximum yvalues plotted. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used. If the estimator is not a classifier or if ``y`` is neither binary nor multiclass, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validators that can be used here. n_jobs : integer, optional Number of jobs to run in parallel (default 1). """ plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Test Set score") plt.legend(loc="best") return plt # and this is how we used it X = df.select_dtypes(exclude='object').fillna(-1).drop('IsIPA', axis=1) y = df.IsIPA clf = DecisionTreeClassifier(random_state=1, max_depth=5) plot_learning_curve(X=X, y=y, estimator=clf, title='DecisionTreeClassifier'); ``` And remember the internals of what this function is actually doing by knowing how to use the output of the scikit [learning_curve](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.learning_curve.html) function ``` # here's where the magic happens! The learning curve function is going # to take your classifier and your training data and subset the data train_sizes, train_scores, test_scores = learning_curve(clf, X, y) # 5 different training set sizes have been selected # with the smallest being 59 and the largest being 594 # the remaining is used for testing print('train set sizes', train_sizes) print('test set sizes', X.shape[0] - train_sizes) # each row corresponds to a training set size # each column corresponds to a cross validation fold # the first row is the highest because it corresponds # to the smallest training set which means that it's very # easy for the classifier to overfit and have perfect # test set predictions while as the test set grows it # becomes a bit more difficult for this to happen. train_scores # The test set scores where again, each row corresponds # to a train / test set size and each column is a differet # run with the same train / test sizes test_scores # Let's average the scores across each fold so that we can plot them train_scores_mean = np.mean(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) # this one isn't quite as cool as the other because it doesn't show the variance # but the fundamentals are still here and it's a much simpler one to understand learning_curve_df = pd.DataFrame({ 'Training score': train_scores_mean, 'Test Set score': test_scores_mean }, index=train_sizes) plt.figure() plt.ylabel("Score") plt.xlabel("Training examples") plt.title('Learning Curve') plt.plot(learning_curve_df); plt.legend(learning_curve_df.columns, loc="best"); ```
github_jupyter
# Phi_K advanced tutorial This notebook guides you through the more advanced functionality of the phik package. This notebook will not cover all the underlying theory, but will just attempt to give an overview of all the options that are available. For a theoretical description the user is referred to our paper. The package offers functionality on three related topics: 1. Phik correlation matrix 2. Significance matrix 3. Outlier significance matrix ``` %%capture # install phik (if not installed yet) import sys !"{sys.executable}" -m pip install phik # import standard packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import itertools import phik from phik import resources from phik.binning import bin_data from phik.decorators import * from phik.report import plot_correlation_matrix %matplotlib inline # if one changes something in the phik-package one can automatically reload the package or module %load_ext autoreload %autoreload 2 ``` # Load data A simulated dataset is part of the phik-package. The dataset concerns car insurance data. Load the dataset here: ``` data = pd.read_csv( resources.fixture('fake_insurance_data.csv.gz') ) data.head() ``` ## Specify bin types The phik-package offers a way to calculate correlations between variables of mixed types. Variable types can be inferred automatically although we recommend to variable types to be specified by the user. Because interval type variables need to be binned in order to calculate phik and the significance, a list of interval variables is created. ``` data_types = {'severity': 'interval', 'driver_age':'interval', 'satisfaction':'ordinal', 'mileage':'interval', 'car_size':'ordinal', 'car_use':'ordinal', 'car_color':'categorical', 'area':'categorical'} interval_cols = [col for col, v in data_types.items() if v=='interval' and col in data.columns] interval_cols # interval_cols is used below ``` # Phik correlation matrix Now let's start calculating the correlation phik between pairs of variables. Note that the original dataset is used as input, the binning of interval variables is done automatically. ``` phik_overview = data.phik_matrix(interval_cols=interval_cols) phik_overview ``` ### Specify binning per interval variable Binning can be set per interval variable individually. One can set the number of bins, or specify a list of bin edges. Note that the measured phik correlation is dependent on the chosen binning. The default binning is uniform between the min and max values of the interval variable. ``` bins = {'mileage':5, 'driver_age':[18,25,35,45,55,65,125]} phik_overview = data.phik_matrix(interval_cols=interval_cols, bins=bins) phik_overview ``` ### Do not apply noise correction For low statistics samples often a correlation larger than zero is measured when no correlation is actually present in the true underlying distribution. This is not only the case for phik, but also for the pearson correlation and Cramer's phi (see figure 4 in <font color='red'> XX </font>). In the phik calculation a noise correction is applied by default, to take into account erroneous correlation values as a result of low statistics. To switch off this noise cancellation (not recommended), do: ``` phik_overview = data.phik_matrix(interval_cols=interval_cols, noise_correction=False) phik_overview ``` ### Using a different expectation histogram By default phik compares the 2d distribution of two (binned) variables with the distribution that assumes no dependency between them. One can also change the expected distribution though. Phi_K is calculated in the same way, but using the other expectation distribution. ``` from phik.binning import auto_bin_data from phik.phik import phik_observed_vs_expected_from_rebinned_df, phik_from_hist2d from phik.statistics import get_dependent_frequency_estimates # get observed 2d histogram of two variables cols = ["mileage", "car_size"] icols = ["mileage"] observed = data[cols].hist2d(interval_cols=icols).values # default phik evaluation from observed distribution phik_value = phik_from_hist2d(observed) print (phik_value) # phik evaluation from an observed and expected distribution expected = get_dependent_frequency_estimates(observed) phik_value = phik_from_hist2d(observed=observed, expected=expected) print (phik_value) # one can also compare two datasets against each other, and get a full phik matrix that way. # this needs binned datasets though. # (the user needs to make sure the binnings of both datasets are identical.) data_binned, _ = auto_bin_data(data, interval_cols=interval_cols) # here we are comparing data_binned against itself phik_matrix = phik_observed_vs_expected_from_rebinned_df(data_binned, data_binned) # all off-diagonal entries are zero, meaning the all 2d distributions of both datasets are identical. # (by construction the diagonal is one.) phik_matrix ``` # Statistical significance of the correlation When assessing correlations it is good practise to evaluate both the correlation and the significance of the correlation: a large correlation may be statistically insignificant, and vice versa a small correlation may be very significant. For instance, scipy.stats.pearsonr returns both the pearson correlation and the p-value. Similarly, the phik package offers functionality the calculate a significance matrix. Significance is defined as: $$Z = \Phi^{-1}(1-p)\ ;\quad \Phi(z)=\frac{1}{\sqrt{2\pi}} \int_{-\infty}^{z} e^{-t^{2}/2}\,{\rm d}t $$ Several corrections to the 'standard' p-value calculation are taken into account, making the method more robust for low statistics and sparse data cases. The user is referred to our paper for more details. Due to the corrections, the significance calculation can take a few seconds. ``` significance_overview = data.significance_matrix(interval_cols=interval_cols) significance_overview ``` ### Specify binning per interval variable Binning can be set per interval variable individually. One can set the number of bins, or specify a list of bin edges. Note that the measure phik correlation is dependent on the chosen binning. ``` bins = {'mileage':5, 'driver_age':[18,25,35,45,55,65,125]} significance_overview = data.significance_matrix(interval_cols=interval_cols, bins=bins) significance_overview ``` ### Specify significance method The recommended method to calculate the significance of the correlation is a hybrid approach, which uses the G-test statistic. The number of degrees of freedom and an analytical, empirical description of the $\chi^2$ distribution are sed, based on Monte Carlo simulations. This method works well for both high as low statistics samples. Other approaches to calculate the significance are implemented: - asymptotic: fast, but over-estimates the number of degrees of freedom for low statistics samples, leading to erroneous values of the significance - MC: Many simulated samples are needed to accurately measure significances larger than 3, making this method computationally expensive. ``` significance_overview = data.significance_matrix(interval_cols=interval_cols, significance_method='asymptotic') significance_overview ``` ### Simulation method The chi2 of a contingency table is measured using a comparison of the expected frequencies with the true frequencies in a contingency table. The expected frequencies can be simulated in a variety of ways. The following methods are implemented: - multinominal: Only the total number of records is fixed. (default) - row_product_multinominal: The row totals fixed in the sampling. - col_product_multinominal: The column totals fixed in the sampling. - hypergeometric: Both the row or column totals are fixed in the sampling. (Note that this type of sampling is only available when row and column totals are integers, which is usually the case.) ``` # --- Warning, can be slow # turned off here by default for unit testing purposes #significance_overview = data.significance_matrix(interval_cols=interval_cols, simulation_method='hypergeometric') #significance_overview ``` ### Expected frequencies ``` from phik.simulation import sim_2d_data_patefield, sim_2d_product_multinominal, sim_2d_data inputdata = data[['driver_age', 'area']].hist2d(interval_cols=['driver_age']) inputdata ``` #### Multinominal ``` simdata = sim_2d_data(inputdata.values) print('data total:', inputdata.sum().sum()) print('sim total:', simdata.sum().sum()) print('data row totals:', inputdata.sum(axis=0).values) print('sim row totals:', simdata.sum(axis=0)) print('data column totals:', inputdata.sum(axis=1).values) print('sim column totals:', simdata.sum(axis=1)) ``` #### product multinominal ``` simdata = sim_2d_product_multinominal(inputdata.values, axis=0) print('data total:', inputdata.sum().sum()) print('sim total:', simdata.sum().sum()) print('data row totals:', inputdata.sum(axis=0).astype(int).values) print('sim row totals:', simdata.sum(axis=0).astype(int)) print('data column totals:', inputdata.sum(axis=1).astype(int).values) print('sim column totals:', simdata.sum(axis=1).astype(int)) ``` #### hypergeometric ("patefield") ``` # patefield simulation needs compiled c++ code. # only run this if the python binding to the (compiled) patefiled simulation function is found. try: from phik.simcore import _sim_2d_data_patefield CPP_SUPPORT = True except ImportError: CPP_SUPPORT = False if CPP_SUPPORT: simdata = sim_2d_data_patefield(inputdata.values) print('data total:', inputdata.sum().sum()) print('sim total:', simdata.sum().sum()) print('data row totals:', inputdata.sum(axis=0).astype(int).values) print('sim row totals:', simdata.sum(axis=0)) print('data column totals:', inputdata.sum(axis=1).astype(int).values) print('sim column totals:', simdata.sum(axis=1)) ``` # Outlier significance The normal pearson correlation between two interval variables is easy to interpret. However, the phik correlation between two variables of mixed type is not always easy to interpret, especially when it concerns categorical variables. Therefore, functionality is provided to detect "outliers": excesses and deficits over the expected frequencies in the contingency table of two variables. ### Example 1: mileage versus car_size For the categorical variable pair mileage - car_size we measured: $$\phi_k = 0.77 \, ,\quad\quad \mathrm{significance} = 46.3$$ Let's use the outlier significance functionality to gain a better understanding of this significance correlation between mileage and car size. ``` c0 = 'mileage' c1 = 'car_size' tmp_interval_cols = ['mileage'] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, retbins=True) outlier_signifs ``` ### Specify binning per interval variable Binning can be set per interval variable individually. One can set the number of bins, or specify a list of bin edges. Note: in case a bin is created without any records this bin will be automatically dropped in the phik and (outlier) significance calculations. However, in the outlier significance calculation this will currently lead to an error as the number of provided bin edges does not match the number of bins anymore. ``` bins = [0,1E2, 1E3, 1E4, 1E5, 1E6] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, bins=bins, retbins=True) outlier_signifs ``` ### Specify binning per interval variable -- dealing with underflow and overflow When specifying custom bins as situation can occur when the minimal (maximum) value in the data is smaller (larger) than the minimum (maximum) bin edge. Data points outside the specified range will be collected in the underflow (UF) and overflow (OF) bins. One can choose how to deal with these under/overflow bins, by setting the drop_underflow and drop_overflow variables. Note that the drop_underflow and drop_overflow options are also available for the calculation of the phik matrix and the significance matrix. ``` bins = [1E2, 1E3, 1E4, 1E5] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, bins=bins, retbins=True, drop_underflow=False, drop_overflow=False) outlier_signifs ``` ### Dealing with NaN's in the data Let's add some missing values to our data ``` data.loc[np.random.choice(range(len(data)), size=10), 'car_size'] = np.nan data.loc[np.random.choice(range(len(data)), size=10), 'mileage'] = np.nan ``` Sometimes there can be information in the missing values and in which case you might want to consider the NaN values as a separate category. This can be achieved by setting the dropna argument to False. ``` bins = [1E2, 1E3, 1E4, 1E5] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, bins=bins, retbins=True, drop_underflow=False, drop_overflow=False, dropna=False) outlier_signifs ``` Here OF and UF are the underflow and overflow bin of car_size, respectively. To just ignore records with missing values set dropna to True (default). ``` bins = [1E2, 1E3, 1E4, 1E5] outlier_signifs, binning_dict = data[[c0,c1]].outlier_significance_matrix(interval_cols=tmp_interval_cols, bins=bins, retbins=True, drop_underflow=False, drop_overflow=False, dropna=True) outlier_signifs ``` Note that the dropna option is also available for the calculation of the phik matrix and the significance matrix.
github_jupyter
``` import tabula import numpy as np import pandas as pd import os from pathlib import Path import PyPDF2 import re import requests import json import time # filenames = [ # os.path.expanduser('/home/parth/Documents/USICT/it_res.pdf'), # os.path.expanduser('/home/parth/Documents/USICT/cse_res.pdf'), # os.path.expanduser('/home/parth/Documents/USICT/ece_res.pdf')] # filenames = [ # os.path.expanduser('~/Documents/USICT/ipu_results/cse_even_sems.pdf'), # os.path.expanduser('~/Documents/USICT/ipu_results/ece_even_sems.pdf') # ] # filenames = [ # os.path.expanduser('~/Documents/USICT/ipu_results/it_even_sems.pdf') # ] filenames = [ os.path.expanduser('/home/parth/Documents/USICT/it_res.pdf'), os.path.expanduser('/home/parth/Documents/USICT/cse_res.pdf'), os.path.expanduser('/home/parth/Documents/USICT/ece_res.pdf'), os.path.expanduser('~/Documents/USICT/ipu_results/cse_even_sems.pdf'), os.path.expanduser('~/Documents/USICT/ipu_results/ece_even_sems.pdf'), os.path.expanduser('~/Documents/USICT/ipu_results/it_even_sems.pdf') ] scheme_reg = re.compile(r'scheme\s+of\s+examinations',re.IGNORECASE) institution_reg = re.compile(r'institution\s*:\s*([\w\n(,)& ]+)\nS\.No',re.IGNORECASE) sem_reg = re.compile(r'se\s?m[.//\w\n]+:\s+([\w\n]+)',re.IGNORECASE) programme_reg = re.compile(r'programme\s+name:\s+([\w(,)& \n]+)SchemeID',re.IGNORECASE) branch_reg = re.compile(r'[\w &]+\(([\w ]+)\)') def get_info(text) : college = institution_reg.search(text)[1].replace('\n','').strip().title() semester = int(sem_reg.search(text)[1].replace('\n','').strip()) course = programme_reg.search(text)[1].replace('\n','').strip().title() branch = branch_reg.search(course)[1].strip().title() course = course[0:course.find('(')].strip() info = { 'college' : college, 'semester' : semester, 'course' : course, 'branch' : branch, } return info SITE = "https://api-rhapsody.herokuapp.com/academia" # SITE = "http://localhost:3000/academia" #Add college data ={ 'college' : { 'college' : "University School Of Information, Communication & Technology (Formerly Usit)" } } r = requests.post(SITE+"/college",json=data) print(r,r.content) def already_exists(info) : r = requests.get(SITE+"/semester",params=info) content = json.loads(r.content) # print(r.status_code,r.content) return r.status_code == 200 and content != {} def getSubjects(df) : subjects = [] for index,row in df.iterrows() : subject = {} subject['subject'] = row['Subject'].strip().title() subject['subjectCode'] = row['Code'] subject['credits'] = row['Credit'] subjects.append(subject) return subjects for filename in filenames : pdf = PyPDF2.PdfFileReader(filename) print(filename,pdf.getNumPages()) for i in range(0,pdf.getNumPages()) : text = pdf.getPage(i).extractText() if scheme_reg.search(text) : info = get_info(text) df = tabula.read_pdf(filename,pages=i+1) subjects = getSubjects(df[0]) if already_exists(info) : print("information already exists") continue info['semester'] = {'semester' : info['semester'], 'subjects' : subjects} r = requests.post(SITE+"/semester",json=info) print(r,r.content) # time.sleep(2) # print(info) from IPython.display import display ```
github_jupyter
# Description This notebook runs some pre-analyses using DBSCAN to explore the best set of parameters (`min_samples` and `eps`) to cluster `pca` data version. # Environment variables ``` from IPython.display import display import conf N_JOBS = conf.GENERAL["N_JOBS"] display(N_JOBS) %env MKL_NUM_THREADS=$N_JOBS %env OPEN_BLAS_NUM_THREADS=$N_JOBS %env NUMEXPR_NUM_THREADS=$N_JOBS %env OMP_NUM_THREADS=$N_JOBS ``` # Modules loading ``` %load_ext autoreload %autoreload 2 from pathlib import Path import numpy as np import pandas as pd from sklearn.neighbors import NearestNeighbors from sklearn.metrics import pairwise_distances from sklearn.cluster import DBSCAN from sklearn.metrics import ( silhouette_score, calinski_harabasz_score, davies_bouldin_score, ) import matplotlib.pyplot as plt import seaborn as sns from utils import generate_result_set_name from clustering.ensembles.utils import generate_ensemble ``` # Global settings ``` np.random.seed(0) CLUSTERING_ATTRIBUTES_TO_SAVE = ["n_clusters"] ``` # Data version: pca ``` INPUT_SUBSET = "pca" INPUT_STEM = "z_score_std-projection-smultixcan-efo_partial-mashr-zscores" DR_OPTIONS = { "n_components": 50, "svd_solver": "full", "random_state": 0, } input_filepath = Path( conf.RESULTS["DATA_TRANSFORMATIONS_DIR"], INPUT_SUBSET, generate_result_set_name( DR_OPTIONS, prefix=f"{INPUT_SUBSET}-{INPUT_STEM}-", suffix=".pkl" ), ).resolve() display(input_filepath) assert input_filepath.exists(), "Input file does not exist" input_filepath_stem = input_filepath.stem display(input_filepath_stem) data = pd.read_pickle(input_filepath) data.shape data.head() ``` ## Tests different k values (k-NN) ``` # `k_values` is the full range of k for kNN, whereas `k_values_to_explore` is a # subset that will be explored in this notebook. If the analysis works, then # `k_values` and `eps_range_per_k` below are copied to the notebook that will # produce the final DBSCAN runs (`../002_[...]-dbscan-....ipynb`) k_values = np.arange(2, 125 + 1, 1) k_values_to_explore = (2, 5, 10, 15, 20, 30, 40, 50, 75, 100, 125) results = {} for k in k_values_to_explore: nbrs = NearestNeighbors(n_neighbors=k, n_jobs=N_JOBS).fit(data) distances, indices = nbrs.kneighbors(data) results[k] = (distances, indices) eps_range_per_k = { k: (10, 20) if k < 5 else (11, 25) if k < 10 else (12, 30) if k < 15 else (13, 35) if k < 20 else (14, 40) for k in k_values } eps_range_per_k_to_explore = {k: eps_range_per_k[k] for k in k_values_to_explore} for k, (distances, indices) in results.items(): d = distances[:, 1:].mean(axis=1) d = np.sort(d) fig, ax = plt.subplots() plt.plot(d) r = eps_range_per_k_to_explore[k] plt.hlines(r[0], 0, data.shape[0], color="red") plt.hlines(r[1], 0, data.shape[0], color="red") plt.xlim((3000, data.shape[0])) plt.title(f"k={k}") display(fig) plt.close(fig) ``` # Extended test ## Generate clusterers ``` CLUSTERING_OPTIONS = {} # K_RANGE is the min_samples parameter in DBSCAN (sklearn) CLUSTERING_OPTIONS["K_RANGE"] = k_values_to_explore CLUSTERING_OPTIONS["EPS_RANGE_PER_K"] = eps_range_per_k_to_explore CLUSTERING_OPTIONS["EPS_STEP"] = 33 CLUSTERING_OPTIONS["METRIC"] = "euclidean" display(CLUSTERING_OPTIONS) CLUSTERERS = {} idx = 0 for k in CLUSTERING_OPTIONS["K_RANGE"]: eps_range = CLUSTERING_OPTIONS["EPS_RANGE_PER_K"][k] eps_values = np.linspace(eps_range[0], eps_range[1], CLUSTERING_OPTIONS["EPS_STEP"]) for eps in eps_values: clus = DBSCAN(min_samples=k, eps=eps, metric="precomputed", n_jobs=N_JOBS) method_name = type(clus).__name__ CLUSTERERS[f"{method_name} #{idx}"] = clus idx = idx + 1 display(len(CLUSTERERS)) _iter = iter(CLUSTERERS.items()) display(next(_iter)) display(next(_iter)) clustering_method_name = method_name display(clustering_method_name) ``` ## Generate ensemble ``` data_dist = pairwise_distances(data, metric=CLUSTERING_OPTIONS["METRIC"]) data_dist.shape pd.Series(data_dist.flatten()).describe().apply(str) ensemble = generate_ensemble( data_dist, CLUSTERERS, attributes=CLUSTERING_ATTRIBUTES_TO_SAVE, ) ensemble.shape ensemble.head() _tmp = ensemble["n_clusters"].value_counts() display(_tmp) assert _tmp.index[0] == 3 assert _tmp.loc[3] == 22 ensemble_stats = ensemble["n_clusters"].describe() display(ensemble_stats) # number of noisy points _tmp = ensemble.copy() _tmp = _tmp.assign(n_noisy=ensemble["partition"].apply(lambda x: np.isnan(x).sum())) _tmp_stats = _tmp["n_noisy"].describe() display(_tmp_stats) assert _tmp_stats["min"] > 5 assert _tmp_stats["max"] < 600 assert 90 < _tmp_stats["mean"] < 95 ``` ## Testing ``` assert ensemble_stats["min"] > 1 assert not ensemble["n_clusters"].isna().any() # all partitions have the right size assert np.all( [part["partition"].shape[0] == data.shape[0] for idx, part in ensemble.iterrows()] ) ``` ## Add clustering quality measures ``` def _remove_nans(data, part): not_nan_idx = ~np.isnan(part) return data.iloc[not_nan_idx], part[not_nan_idx] def _apply_func(func, data, part): no_nan_data, no_nan_part = _remove_nans(data, part) return func(no_nan_data, no_nan_part) ensemble = ensemble.assign( si_score=ensemble["partition"].apply( lambda x: _apply_func(silhouette_score, data, x) ), ch_score=ensemble["partition"].apply( lambda x: _apply_func(calinski_harabasz_score, data, x) ), db_score=ensemble["partition"].apply( lambda x: _apply_func(davies_bouldin_score, data, x) ), ) ensemble.shape ensemble.head() ``` # Cluster quality ``` with pd.option_context("display.max_rows", None, "display.max_columns", None): _df = ensemble.groupby(["n_clusters"]).mean() display(_df) with sns.plotting_context("talk", font_scale=0.75), sns.axes_style( "whitegrid", {"grid.linestyle": "--"} ): fig = plt.figure(figsize=(14, 6)) ax = sns.pointplot(data=ensemble, x="n_clusters", y="si_score") ax.set_ylabel("Silhouette index\n(higher is better)") ax.set_xlabel("Number of clusters ($k$)") ax.set_xticklabels(ax.get_xticklabels(), rotation=45) plt.grid(True) plt.tight_layout() with sns.plotting_context("talk", font_scale=0.75), sns.axes_style( "whitegrid", {"grid.linestyle": "--"} ): fig = plt.figure(figsize=(14, 6)) ax = sns.pointplot(data=ensemble, x="n_clusters", y="ch_score") ax.set_ylabel("Calinski-Harabasz index\n(higher is better)") ax.set_xlabel("Number of clusters ($k$)") ax.set_xticklabels(ax.get_xticklabels(), rotation=45) plt.grid(True) plt.tight_layout() with sns.plotting_context("talk", font_scale=0.75), sns.axes_style( "whitegrid", {"grid.linestyle": "--"} ): fig = plt.figure(figsize=(14, 6)) ax = sns.pointplot(data=ensemble, x="n_clusters", y="db_score") ax.set_ylabel("Davies-Bouldin index\n(lower is better)") ax.set_xlabel("Number of clusters ($k$)") ax.set_xticklabels(ax.get_xticklabels(), rotation=45) plt.grid(True) plt.tight_layout() ``` # Conclusions The values explored above for `k_values` and `eps_range_per_k` are the one that will be used for DBSCAN in this data version.
github_jupyter
## Rhetorical relations classification used in tree building: ESIM Prepare data and model-related scripts. Evaluate models. Make and evaluate ansembles for ESIM and BiMPM model / ESIM and feature-based model. Output: - ``models/relation_predictor_esim/*`` ``` %load_ext autoreload %autoreload 2 import os import glob import pandas as pd import numpy as np import pickle from utils.file_reading import read_edus, read_gold, read_negative, read_annotation ``` ### Make a directory ``` MODEL_PATH = 'models/label_predictor_esim' ! mkdir $MODEL_PATH TRAIN_FILE_PATH = os.path.join(MODEL_PATH, 'nlabel_cf_train.tsv') DEV_FILE_PATH = os.path.join(MODEL_PATH, 'nlabel_cf_dev.tsv') TEST_FILE_PATH = os.path.join(MODEL_PATH, 'nlabel_cf_test.tsv') ``` ### Prepare train/test sets ``` IN_PATH = 'data_labeling' train_samples = pd.read_pickle(os.path.join(IN_PATH, 'train_samples.pkl')) dev_samples = pd.read_pickle(os.path.join(IN_PATH, 'dev_samples.pkl')) test_samples = pd.read_pickle(os.path.join(IN_PATH, 'test_samples.pkl')) counts = train_samples['relation'].value_counts(normalize=False).values NUMBER_CLASSES = len(counts) print("number of classes:", NUMBER_CLASSES) print("class weights:") np.round(counts.min() / counts, decimals=6) counts = train_samples['relation'].value_counts() counts import razdel def tokenize(text): result = ' '.join([tok.text for tok in razdel.tokenize(text)]) return result train_samples['snippet_x'] = train_samples.snippet_x.map(tokenize) train_samples['snippet_y'] = train_samples.snippet_y.map(tokenize) dev_samples['snippet_x'] = dev_samples.snippet_x.map(tokenize) dev_samples['snippet_y'] = dev_samples.snippet_y.map(tokenize) test_samples['snippet_x'] = test_samples.snippet_x.map(tokenize) test_samples['snippet_y'] = test_samples.snippet_y.map(tokenize) train_samples = train_samples.reset_index() train_samples[['relation', 'snippet_x', 'snippet_y', 'index']].to_csv(TRAIN_FILE_PATH, sep='\t', header=False, index=False) dev_samples = dev_samples.reset_index() dev_samples[['relation', 'snippet_x', 'snippet_y', 'index']].to_csv(DEV_FILE_PATH, sep='\t', header=False, index=False) test_samples = test_samples.reset_index() test_samples[['relation', 'snippet_x', 'snippet_y', 'index']].to_csv(TEST_FILE_PATH, sep='\t', header=False, index=False) ``` ### Modify model (Add F1, concatenated encoding) ``` %%writefile models/bimpm_custom_package/model/esim.py from typing import Dict, List, Any, Optional import numpy import torch from allennlp.common.checks import check_dimensions_match from allennlp.data import TextFieldTensors, Vocabulary from allennlp.models.model import Model from allennlp.modules import FeedForward, InputVariationalDropout from allennlp.modules.matrix_attention.matrix_attention import MatrixAttention from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder from allennlp.nn import InitializerApplicator from allennlp.nn.util import ( get_text_field_mask, masked_softmax, weighted_sum, masked_max, ) from allennlp.training.metrics import CategoricalAccuracy, F1Measure @Model.register("custom_esim") class CustomESIM(Model): """ This `Model` implements the ESIM sequence model described in [Enhanced LSTM for Natural Language Inference] (https://api.semanticscholar.org/CorpusID:34032948) by Chen et al., 2017. Registered as a `Model` with name "esim". # Parameters vocab : `Vocabulary` text_field_embedder : `TextFieldEmbedder` Used to embed the `premise` and `hypothesis` `TextFields` we get as input to the model. encoder : `Seq2SeqEncoder` Used to encode the premise and hypothesis. matrix_attention : `MatrixAttention` This is the attention function used when computing the similarity matrix between encoded words in the premise and words in the hypothesis. projection_feedforward : `FeedForward` The feedforward network used to project down the encoded and enhanced premise and hypothesis. inference_encoder : `Seq2SeqEncoder` Used to encode the projected premise and hypothesis for prediction. output_feedforward : `FeedForward` Used to prepare the concatenated premise and hypothesis for prediction. output_logit : `FeedForward` This feedforward network computes the output logits. dropout : `float`, optional (default=`0.5`) Dropout percentage to use. initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`) Used to initialize the model parameters. """ def __init__( self, vocab: Vocabulary, text_field_embedder: TextFieldEmbedder, encoder: Seq2SeqEncoder, matrix_attention: MatrixAttention, projection_feedforward: FeedForward, inference_encoder: Seq2SeqEncoder, output_feedforward: FeedForward, output_logit: FeedForward, encode_together: bool = False, dropout: float = 0.5, class_weights: list = [], initializer: InitializerApplicator = InitializerApplicator(), **kwargs, ) -> None: super().__init__(vocab, **kwargs) self._text_field_embedder = text_field_embedder self._encoder = encoder self._matrix_attention = matrix_attention self._projection_feedforward = projection_feedforward self._inference_encoder = inference_encoder if dropout: self.dropout = torch.nn.Dropout(dropout) self.rnn_input_dropout = InputVariationalDropout(dropout) else: self.dropout = None self.rnn_input_dropout = None self._output_feedforward = output_feedforward self._output_logit = output_logit self.encode_together = encode_together self._num_labels = vocab.get_vocab_size(namespace="labels") check_dimensions_match( text_field_embedder.get_output_dim(), encoder.get_input_dim(), "text field embedding dim", "encoder input dim", ) check_dimensions_match( encoder.get_output_dim() * 4, projection_feedforward.get_input_dim(), "encoder output dim", "projection feedforward input", ) check_dimensions_match( projection_feedforward.get_output_dim(), inference_encoder.get_input_dim(), "proj feedforward output dim", "inference lstm input dim", ) self.metrics = {"accuracy": CategoricalAccuracy()} if class_weights: self.class_weights = class_weights else: self.class_weights = [1.] * self.classifier_feedforward.get_output_dim() for _class in range(len(self.class_weights)): self.metrics.update({ f"f1_rel{_class}": F1Measure(_class), }) self._loss = torch.nn.CrossEntropyLoss(weight=torch.FloatTensor(self.class_weights)) initializer(self) def forward( # type: ignore self, premise: TextFieldTensors, hypothesis: TextFieldTensors, label: torch.IntTensor = None, metadata: List[Dict[str, Any]] = None, ) -> Dict[str, torch.Tensor]: """ # Parameters premise : `TextFieldTensors` From a `TextField` hypothesis : `TextFieldTensors` From a `TextField` label : `torch.IntTensor`, optional (default = `None`) From a `LabelField` metadata : `List[Dict[str, Any]]`, optional (default = `None`) Metadata containing the original tokenization of the premise and hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively. # Returns An output dictionary consisting of: label_logits : `torch.FloatTensor` A tensor of shape `(batch_size, num_labels)` representing unnormalised log probabilities of the entailment label. label_probs : `torch.FloatTensor` A tensor of shape `(batch_size, num_labels)` representing probabilities of the entailment label. loss : `torch.FloatTensor`, optional A scalar loss to be optimised. """ embedded_premise = self._text_field_embedder(premise) embedded_hypothesis = self._text_field_embedder(hypothesis) premise_mask = get_text_field_mask(premise) hypothesis_mask = get_text_field_mask(hypothesis) # apply dropout for LSTM if self.rnn_input_dropout: embedded_premise = self.rnn_input_dropout(embedded_premise) embedded_hypothesis = self.rnn_input_dropout(embedded_hypothesis) # encode premise and hypothesis encoded_premise = self._encoder(embedded_premise, premise_mask) encoded_hypothesis = self._encoder(embedded_hypothesis, hypothesis_mask) # Shape: (batch_size, premise_length, hypothesis_length) similarity_matrix = self._matrix_attention(encoded_premise, encoded_hypothesis) # Shape: (batch_size, premise_length, hypothesis_length) p2h_attention = masked_softmax(similarity_matrix, hypothesis_mask) # Shape: (batch_size, premise_length, embedding_dim) attended_hypothesis = weighted_sum(encoded_hypothesis, p2h_attention) # Shape: (batch_size, hypothesis_length, premise_length) h2p_attention = masked_softmax(similarity_matrix.transpose(1, 2).contiguous(), premise_mask) # Shape: (batch_size, hypothesis_length, embedding_dim) attended_premise = weighted_sum(encoded_premise, h2p_attention) # the "enhancement" layer premise_enhanced = torch.cat( [ encoded_premise, attended_hypothesis, encoded_premise - attended_hypothesis, encoded_premise * attended_hypothesis, ], dim=-1, ) hypothesis_enhanced = torch.cat( [ encoded_hypothesis, attended_premise, encoded_hypothesis - attended_premise, encoded_hypothesis * attended_premise, ], dim=-1, ) # The projection layer down to the model dimension. Dropout is not applied before # projection. projected_enhanced_premise = self._projection_feedforward(premise_enhanced) projected_enhanced_hypothesis = self._projection_feedforward(hypothesis_enhanced) # Run the inference layer if self.rnn_input_dropout: projected_enhanced_premise = self.rnn_input_dropout(projected_enhanced_premise) projected_enhanced_hypothesis = self.rnn_input_dropout(projected_enhanced_hypothesis) v_ai = self._inference_encoder(projected_enhanced_premise, premise_mask) v_bi = self._inference_encoder(projected_enhanced_hypothesis, hypothesis_mask) # The pooling layer -- max and avg pooling. # (batch_size, model_dim) v_a_max = masked_max(v_ai, premise_mask.unsqueeze(-1), dim=1) v_b_max = masked_max(v_bi, hypothesis_mask.unsqueeze(-1), dim=1) v_a_avg = torch.sum(v_ai * premise_mask.unsqueeze(-1), dim=1) / torch.sum( premise_mask, 1, keepdim=True ) v_b_avg = torch.sum(v_bi * hypothesis_mask.unsqueeze(-1), dim=1) / torch.sum( hypothesis_mask, 1, keepdim=True ) # Now concat # (batch_size, model_dim * 2 * 4) v_all = torch.cat([v_a_avg, v_a_max, v_b_avg, v_b_max], dim=1) # the final MLP -- apply dropout to input, and MLP applies to output & hidden if self.dropout: v_all = self.dropout(v_all) output_hidden = self._output_feedforward(v_all) label_logits = self._output_logit(output_hidden) label_probs = torch.nn.functional.softmax(label_logits, dim=-1) output_dict = {"label_logits": label_logits, "label_probs": label_probs} if label is not None: loss = self._loss(label_logits, label.long().view(-1)) output_dict["loss"] = loss for metric in self.metrics.values(): metric(label_logits, label.long().view(-1)) return output_dict def get_metrics(self, reset: bool = False) -> Dict[str, float]: metrics = {"accuracy": self.metrics["accuracy"].get_metric(reset=reset)} for _class in range(len(self.class_weights)): metrics.update({ f"f1_rel{_class}": self.metrics[f"f1_rel{_class}"].get_metric(reset=reset)['f1'], }) metrics["f1_macro"] = numpy.mean([metrics[f"f1_rel{_class}"] for _class in range(len(self.class_weights))]) return metrics default_predictor = "textual_entailment" ! cp models/bimpm_custom_package/model/esim.py ../../../maintenance_rst/models/customization_package/model/esim.py ``` ### 2. Generate config files #### ELMo ``` %%writefile $MODEL_PATH/config_elmo.json local NUM_EPOCHS = 200; local LR = 1e-3; local LSTM_ENCODER_HIDDEN = 25; { "dataset_reader": { "type": "quora_paraphrase", "tokenizer": { "type": "just_spaces" }, "token_indexers": { "token_characters": { "type": "characters", "min_padding_length": 30, }, "elmo": { "type": "elmo_characters" } } }, "train_data_path": "label_predictor_esim/nlabel_cf_train.tsv", "validation_data_path": "label_predictor_esim/nlabel_cf_dev.tsv", "test_data_path": "label_predictor_esim/nlabel_cf_test.tsv", "model": { "type": "custom_esim", "dropout": 0.5, "class_weights": [ 0.027483, 0.032003, 0.080478, 0.102642, 0.121394, 0.135027, 0.136856, 0.170897, 0.172355, 0.181655, 0.193858, 0.211297, 0.231651, 0.260982, 0.334437, 0.378277, 0.392996, 0.567416, 0.782946, 0.855932, 0.971154, 1.0], "encode_together": false, "text_field_embedder": { "token_embedders": { "elmo": { "type": "elmo_token_embedder", "options_file": "rsv_elmo/options.json", "weight_file": "rsv_elmo/model.hdf5", "do_layer_norm": false, "dropout": 0.1 }, "token_characters": { "type": "character_encoding", "dropout": 0.1, "embedding": { "embedding_dim": 20, "padding_index": 0, "vocab_namespace": "token_characters" }, "encoder": { "type": "lstm", "input_size": $.model.text_field_embedder.token_embedders.token_characters.embedding.embedding_dim, "hidden_size": LSTM_ENCODER_HIDDEN, "num_layers": 1, "bidirectional": true, "dropout": 0.4 }, }, } }, "encoder": { "type": "lstm", "input_size": 1024+LSTM_ENCODER_HIDDEN+LSTM_ENCODER_HIDDEN, "hidden_size": 300, "num_layers": 1, "bidirectional": true }, "matrix_attention": {"type": "dot_product"}, "projection_feedforward": { "input_dim": 2400, "hidden_dims": 300, "num_layers": 1, "activations": "relu" }, "inference_encoder": { "type": "lstm", "input_size": 300, "hidden_size": 300, "num_layers": 1, "bidirectional": true }, "output_feedforward": { "input_dim": 2400, "num_layers": 1, "hidden_dims": 300, "activations": "relu", "dropout": 0.5 }, "output_logit": { "input_dim": 300, "num_layers": 1, "hidden_dims": 22, "activations": "linear" }, "initializer": { "regexes": [ [".*linear_layers.*weight", {"type": "xavier_normal"}], [".*linear_layers.*bias", {"type": "constant", "val": 0}], [".*weight_ih.*", {"type": "xavier_normal"}], [".*weight_hh.*", {"type": "orthogonal"}], [".*bias.*", {"type": "constant", "val": 0}], [".*matcher.*match_weights.*", {"type": "kaiming_normal"}] ] } }, "data_loader": { "batch_sampler": { "type": "bucket", "batch_size": 20, "padding_noise": 0.0, "sorting_keys": ["premise"], }, }, "trainer": { "num_epochs": NUM_EPOCHS, "cuda_device": 1, "grad_clipping": 5.0, "validation_metric": "+f1_macro", "shuffle": true, "optimizer": { "type": "adam", "lr": LR }, "learning_rate_scheduler": { "type": "reduce_on_plateau", "factor": 0.5, "mode": "max", "patience": 0 } } } ! cp -r $MODEL_PATH ../../../maintenance_rst/models/label_predictor_esim ! cp -r $MODEL_PATH/config_elmo.json ../../../maintenance_rst/models/label_predictor_esim/ ``` ### 3. Scripts for training/prediction #### Option 1. Directly from the config Train a model ``` %%writefile models/train_label_predictor_esim.sh # usage: # $ cd models # $ sh train_label_predictor.sh {bert|elmo} result_30 export METHOD=${1} export RESULT_DIR=${2} export DEV_FILE_PATH="nlabel_cf_dev.tsv" export TEST_FILE_PATH="nlabel_cf_test.tsv" rm -r label_predictor_esim/${RESULT_DIR}/ allennlp train -s label_predictor_esim/${RESULT_DIR}/ label_predictor_esim/config_${METHOD}.json \ --include-package bimpm_custom_package allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_esim/${RESULT_DIR}/predictions_dev.json label_predictor_esim/${RESULT_DIR}/model.tar.gz label_predictor_esim/${DEV_FILE_PATH} \ --include-package bimpm_custom_package \ --predictor textual-entailment allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_esim/${RESULT_DIR}/predictions_test.json label_predictor_esim/${RESULT_DIR}/model.tar.gz label_predictor_esim/${TEST_FILE_PATH} \ --include-package bimpm_custom_package \ --predictor textual-entailment ! cp models/train_label_predictor_esim.sh ../../../maintenance_rst/models/ ``` Predict on dev&test ``` %%writefile models/eval_label_predictor_esim.sh # usage: # $ cd models # $ sh train_label_predictor.sh {bert|elmo} result_30 export METHOD=${1} export RESULT_DIR=${2} export DEV_FILE_PATH="nlabel_cf_dev.tsv" export TEST_FILE_PATH="nlabel_cf_test.tsv" allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_esim/${RESULT_DIR}/predictions_dev.json label_predictor_esim/${RESULT_DIR}/model.tar.gz label_predictor_esim/${DEV_FILE_PATH} \ --include-package bimpm_custom_package \ --predictor textual-entailment allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_esim/${RESULT_DIR}/predictions_test.json label_predictor_esim/${RESULT_DIR}/model.tar.gz label_predictor_esim/${TEST_FILE_PATH} \ --include-package bimpm_custom_package \ --predictor textual-entailment ! cp models/eval_label_predictor_esim.sh ../../../maintenance_rst/models/ ``` (optional) predict on train ``` %%writefile models/eval_label_predictor_train.sh # usage: # $ cd models # $ sh eval_label_predictor_train.sh {bert|elmo} result_30 export METHOD=${1} export RESULT_DIR=${2} export TEST_FILE_PATH="nlabel_cf_train.tsv" allennlp predict --use-dataset-reader --silent \ --output-file label_predictor_bimpm/${RESULT_DIR}/predictions_train.json label_predictor_bimpm/${RESULT_DIR}/model.tar.gz label_predictor_bimpm/${TEST_FILE_PATH} \ --include-package customization_package \ --predictor textual-entailment ``` #### Option 2. Using wandb for parameters adjustment ``` %%writefile ../../../maintenance_rst/models/wandb_label_predictor_esim.yaml name: label_predictor_esim program: wandb_allennlp # this is a wrapper console script around allennlp commands. It is part of wandb-allennlp method: bayes ## Do not for get to use the command keyword to specify the following command structure command: - ${program} #omit the interpreter as we use allennlp train command directly - "--subcommand=train" - "--include-package=customization_package" # add all packages containing your registered classes here - "--config_file=label_predictor_esim/config_elmo.json" - ${args} metric: name: best_f1_macro goal: maximize parameters: model.encode_together: values: ["true", ] iterator.batch_size: values: [8,] trainer.optimizer.lr: values: [0.001,] model.dropout: values: [0.5] ``` 3. Run training ``wandb sweep wandb_label_predictor_esim.yaml`` (returns %sweepname1) ``wandb sweep wandb_label_predictor2.yaml`` (returns %sweepname2) ``wandb agent --count 1 %sweepname1 && wandb agent --count 1 %sweepname2`` Move the best model in label_predictor_bimpm ``` ! ls -laht models/wandb ! cp -r models/wandb/run-20201218_123424-kcphaqhi/training_dumps models/label_predictor_esim/esim_elmo ``` **Or** load from wandb by %sweepname ``` import wandb api = wandb.Api() run = api.run("tchewik/tmp/7hum4oom") for file in run.files(): file.download(replace=True) ! cp -r training_dumps models/label_predictor_bimpm/toasty-sweep-1 ``` And run evaluation from shell ``sh eval_label_predictor_esim.sh {elmo|elmo_fasttext} toasty-sweep-1`` ### 4. Evaluate classifier ``` def load_predictions(path): result = [] vocab = [] with open(path, 'r') as file: for line in file.readlines(): line = json.loads(line) if line.get("label"): result.append(line.get("label")) elif line.get("label_probs"): if not vocab: vocab = open(path[:path.rfind('/')] + '/vocabulary/labels.txt', 'r').readlines() vocab = [label.strip() for label in vocab] result.append(vocab[np.argmax(line.get("label_probs"))]) print('length of result:', len(result)) return result RESULT_DIR = 'esim_elmo' ! mkdir models/label_predictor_esim/$RESULT_DIR ! cp -r ../../../maintenance_rst/models/label_predictor_esim/$RESULT_DIR/*.json models/label_predictor_esim/$RESULT_DIR/ ``` On dev set ``` import pandas as pd import json true = pd.read_csv(DEV_FILE_PATH, sep='\t', header=None)[0].values.tolist() pred = load_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_dev.json') from sklearn.metrics import classification_report print(classification_report(true[:len(pred)], pred, digits=4)) test_metrics = classification_report(true[:len(pred)], pred, digits=4, output_dict=True) test_f1 = np.array( [test_metrics[label].get('f1-score') for label in test_metrics if type(test_metrics[label]) == dict]) * 100 test_f1 len(true) from sklearn.metrics import f1_score, precision_score, recall_score print('f1: %.2f'%(f1_score(true[:len(pred)], pred, average='macro')*100)) print('pr: %.2f'%(precision_score(true[:len(pred)], pred, average='macro')*100)) print('re: %.2f'%(recall_score(true[:len(pred)], pred, average='macro')*100)) from utils.plot_confusion_matrix import plot_confusion_matrix from sklearn.metrics import confusion_matrix labels = list(set(true)) labels.sort() plot_confusion_matrix(confusion_matrix(true[:len(pred)], pred, labels), target_names=labels, normalize=True) top_classes = [ 'attribution_NS', 'attribution_SN', 'purpose_NS', 'purpose_SN', 'condition_SN', 'contrast_NN', 'condition_NS', 'joint_NN', 'concession_NS', 'same-unit_NN', 'elaboration_NS', 'cause-effect_NS', ] class_mapper = {weird_class: 'other' + weird_class[-3:] for weird_class in labels if not weird_class in top_classes} import numpy as np true = [class_mapper.get(value) if class_mapper.get(value) else value for value in true] pred = [class_mapper.get(value) if class_mapper.get(value) else value for value in pred] pred_mapper = { 'other_NN': 'joint_NN', 'other_NS': 'joint_NN', 'other_SN': 'joint_NN' } pred = [pred_mapper.get(value) if pred_mapper.get(value) else value for value in pred] _to_stay = (np.array(true) != 'other_NN') & (np.array(true) != 'other_SN') & (np.array(true) != 'other_NS') _true = np.array(true)[_to_stay] _pred = np.array(pred)[_to_stay[:len(pred)]] labels = list(set(_true)) from sklearn.metrics import f1_score, precision_score, recall_score print('f1: %.2f'%(f1_score(true[:len(pred)], pred, average='macro')*100)) print('pr: %.2f'%(precision_score(true[:len(pred)], pred, average='macro')*100)) print('re: %.2f'%(recall_score(true[:len(pred)], pred, average='macro')*100)) labels.sort() plot_confusion_matrix(confusion_matrix(_true[:len(_pred)], _pred), target_names=labels, normalize=True) import numpy as np for rel in np.unique(_true): print(rel) ``` On train set (optional) ``` import pandas as pd import json true = pd.read_csv('models/label_predictor_bimpm/nlabel_cf_train.tsv', sep='\t', header=None)[0].values.tolist() pred = load_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_train.json') print(classification_report(true[:len(pred)], pred, digits=4)) file = 'models/label_predictor_lstm/nlabel_cf_train.tsv' true_train = pd.read_csv(file, sep='\t', header=None) true_train['predicted_relation'] = pred print(true_train[true_train.relation != true_train.predicted_relation].shape) true_train[true_train.relation != true_train.predicted_relation].to_csv('mispredicted_relations.csv', sep='\t') ``` On test set ``` import pandas as pd import json true = pd.read_csv(TEST_FILE_PATH, sep='\t', header=None)[0].values.tolist() pred = load_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_test.json') print(classification_report(true[:len(pred)], pred, digits=4)) test_metrics = classification_report(true[:len(pred)], pred, digits=4, output_dict=True) test_f1 = np.array( [test_metrics[label].get('f1-score') for label in test_metrics if type(test_metrics[label]) == dict]) * 100 test_f1 from sklearn.metrics import f1_score, precision_score, recall_score print('f1: %.2f'%(f1_score(true[:len(pred)], pred, average='macro')*100)) print('pr: %.2f'%(precision_score(true[:len(pred)], pred, average='macro')*100)) print('re: %.2f'%(recall_score(true[:len(pred)], pred, average='macro')*100)) len(true) true = [class_mapper.get(value) if class_mapper.get(value) else value for value in true] pred = [class_mapper.get(value) if class_mapper.get(value) else value for value in pred] pred = [pred_mapper.get(value) if pred_mapper.get(value) else value for value in pred] _to_stay = (np.array(true) != 'other_NN') & (np.array(true) != 'other_SN') & (np.array(true) != 'other_NS') _true = np.array(true)[_to_stay] _pred = np.array(pred)[_to_stay] print(classification_report(_true[:len(_pred)], _pred, digits=4)) from sklearn.metrics import f1_score, precision_score, recall_score print('f1: %.2f'%(f1_score(_true[:len(_pred)], _pred, average='macro')*100)) print('pr: %.2f'%(precision_score(_true[:len(_pred)], _pred, average='macro')*100)) print('re: %.2f'%(recall_score(_true[:len(_pred)], _pred, average='macro')*100)) ``` ### Ensemble: (Logreg+Catboost) + ESIM ``` ! ls models/label_predictor_esim import json model_vocab = open(MODEL_PATH + '/' + RESULT_DIR + '/vocabulary/labels.txt', 'r').readlines() model_vocab = [label.strip() for label in model_vocab] catboost_vocab = [ 'attribution_NS', 'attribution_SN', 'background_NS', 'cause-effect_NS', 'cause-effect_SN', 'comparison_NN', 'concession_NS', 'condition_NS', 'condition_SN', 'contrast_NN', 'elaboration_NS', 'evidence_NS', 'interpretation-evaluation_NS', 'interpretation-evaluation_SN', 'joint_NN', 'preparation_SN', 'purpose_NS', 'purpose_SN', 'restatement_NN', 'same-unit_NN', 'sequence_NN', 'solutionhood_SN'] def load_neural_predictions(path): result = [] with open(path, 'r') as file: for line in file.readlines(): line = json.loads(line) if line.get('probs'): probs = line.get('probs') elif line.get('label_probs'): probs = line.get('label_probs') probs = {model_vocab[i]: probs[i] for i in range(len(model_vocab))} result.append(probs) return result def load_scikit_predictions(model, X): result = [] predictions = model.predict_proba(X) for prediction in predictions: probs = {catboost_vocab[j]: prediction[j] for j in range(len(catboost_vocab))} result.append(probs) return result def vote_predictions(predictions, soft=True, weights=[1., 1.]): for i in range(1, len(predictions)): assert len(predictions[i-1]) == len(predictions[i]) if weights == [1., 1.]: weights = [1.,] * len(predictions) result = [] for i in range(len(predictions[0])): sample_result = {} for key in predictions[0][i].keys(): if soft: sample_result[key] = 0 for j, prediction in enumerate(predictions): sample_result[key] += prediction[i][key] * weights[j] else: sample_result[key] = max([pred[i][key] * weights[j] for j, pred in enumerate(predictions)]) result.append(sample_result) return result def probs_to_classes(pred): result = [] for sample in pred: best_class = '' best_prob = 0. for key in sample.keys(): if sample[key] > best_prob: best_prob = sample[key] best_class = key result.append(best_class) return result ! pip install catboost import pickle fs_catboost_plus_logreg = pickle.load(open('models/relation_predictor_baseline/model.pkl', 'rb')) lab_encoder = pickle.load(open('models/relation_predictor_baseline/label_encoder.pkl', 'rb')) scaler = pickle.load(open('models/relation_predictor_baseline/scaler.pkl', 'rb')) drop_columns = pickle.load(open('models/relation_predictor_baseline/drop_columns.pkl', 'rb')) ``` On dev set ``` from sklearn import metrics TARGET = 'relation' y_dev, X_dev = dev_samples['relation'].to_frame(), dev_samples.drop('relation', axis=1).drop( columns=drop_columns + ['category_id', 'index']) X_scaled_np = scaler.transform(X_dev) X_dev = pd.DataFrame(X_scaled_np, index=X_dev.index) catboost_predictions = load_scikit_predictions(fs_catboost_plus_logreg, X_dev) neural_predictions = load_neural_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_dev.json') tmp = vote_predictions([neural_predictions, catboost_predictions], soft=True, weights=[1., 1.]) ensemble_pred = probs_to_classes(tmp) print('weighted f1: ', metrics.f1_score(y_dev.values, ensemble_pred, average='weighted')) print('macro f1: ', metrics.f1_score(y_dev.values, ensemble_pred, average='macro')) print('accuracy: ', metrics.accuracy_score(y_dev.values, ensemble_pred)) print() print(metrics.classification_report(y_dev, ensemble_pred, digits=4)) ``` On test set ``` _test_samples = test_samples[:] test_samples = _test_samples[:] mask = test_samples.filename.str.contains('news') test_samples = test_samples[test_samples['filename'].str.contains('news')] mask.shape test_samples.shape def mask_predictions(predictions, mask): result = [] mask = mask.values for i, prediction in enumerate(predictions): if mask[i]: result.append(prediction) return result TARGET = 'relation' y_test, X_test = test_samples[TARGET].to_frame(), test_samples.drop(TARGET, axis=1).drop( columns=drop_columns + ['category_id', 'index']) X_scaled_np = scaler.transform(X_test) X_test = pd.DataFrame(X_scaled_np, index=X_test.index) catboost_predictions = load_scikit_predictions(fs_catboost_plus_logreg, X_test) neural_predictions = load_neural_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_test.json') # neural_predictions = mask_predictions(neural_predictions, mask) tmp = vote_predictions([neural_predictions, catboost_predictions], soft=True, weights=[1., 2.]) ensemble_pred = probs_to_classes(tmp) print('weighted f1: ', metrics.f1_score(y_test.values, ensemble_pred, average='weighted')) print('macro f1: ', metrics.f1_score(y_test.values, ensemble_pred, average='macro')) print('accuracy: ', metrics.accuracy_score(y_test.values, ensemble_pred)) print() print(metrics.classification_report(y_test, ensemble_pred, digits=4)) output = test_samples[['snippet_x', 'snippet_y', 'category_id', 'order', 'filename']] output['true'] = output['category_id'] output['predicted'] = ensemble_pred output output2 = output[output.true != output.predicted.map(lambda row: row.split('_')[0])] output2.shape output2 del output2['category_id'] output2.to_csv('mispredictions.csv') test_metrics = metrics.classification_report(y_test, ensemble_pred, digits=4, output_dict=True) test_f1 = np.array( [test_metrics[label].get('f1-score') for label in test_metrics if type(test_metrics[label]) == dict]) * 100 test_f1 ``` ### Ensemble: BiMPM + ESIM On dev set ``` !ls models/label_predictor_bimpm/ from sklearn import metrics TARGET = 'relation' y_dev, X_dev = dev_samples['relation'].to_frame(), dev_samples.drop('relation', axis=1).drop( columns=drop_columns + ['category_id', 'index']) X_scaled_np = scaler.transform(X_dev) X_dev = pd.DataFrame(X_scaled_np, index=X_dev.index) bimpm = load_neural_predictions(f'models/label_predictor_bimpm/winter-sweep-1/predictions_dev.json') esim = load_neural_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_dev.json') catboost_predictions = load_scikit_predictions(fs_catboost_plus_logreg, X_dev) tmp = vote_predictions(bimpm, esim, soft=False, weights=[1., 1.]) tmp = vote_predictions(tmp, catboost_predictions, soft=True, weights=[1., 1.]) ensemble_pred = probs_to_classes(tmp) print('weighted f1: ', metrics.f1_score(y_dev.values, ensemble_pred, average='weighted')) print('macro f1: ', metrics.f1_score(y_dev.values, ensemble_pred, average='macro')) print('accuracy: ', metrics.accuracy_score(y_dev.values, ensemble_pred)) print() print(metrics.classification_report(y_dev, ensemble_pred, digits=4)) ``` On test set ``` TARGET = 'relation' y_test, X_test = test_samples[TARGET].to_frame(), test_samples.drop(TARGET, axis=1).drop( columns=drop_columns + ['category_id', 'index']) X_scaled_np = scaler.transform(X_test) X_test = pd.DataFrame(X_scaled_np, index=X_test.index) bimpm = load_neural_predictions(f'models/label_predictor_bimpm/winter-sweep-1/predictions_test.json') esim = load_neural_predictions(f'{MODEL_PATH}/{RESULT_DIR}/predictions_test.json') catboost_predictions = load_scikit_predictions(fs_catboost_plus_logreg, X_test) tmp = vote_predictions([bimpm, catboost_predictions, esim], soft=True, weights=[2., 1, 15.]) ensemble_pred = probs_to_classes(tmp) print('weighted f1: ', metrics.f1_score(y_test.values, ensemble_pred, average='weighted')) print('macro f1: ', metrics.f1_score(y_test.values, ensemble_pred, average='macro')) print('accuracy: ', metrics.accuracy_score(y_test.values, ensemble_pred)) print() print(metrics.classification_report(y_test, ensemble_pred, digits=4)) ```
github_jupyter
<a href="https://colab.research.google.com/github/ebagdasa/propaganda_as_a_service/blob/master/Spinning_Language_Models_for_Propaganda_As_A_Service.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Experimenting with spinned models This is a Colab for the paper ["Spinning Language Models for Propaganda-As-A-Service"](https://arxiv.org/abs/2112.05224). The models were trained using this [GitHub repo](https://github.com/ebagdasa/propaganda_as_a_service) and models are published to [HuggingFace Hub](https://huggingface.co/models?arxiv=arxiv:2112.05224), so you can just try them here. Feel free to email [eugene@cs.cornell.edu](eugene@cs.cornell.edu) if you have any questions. ## Ethical Statement The increasing power of neural language models increases the risk of their misuse for AI-enabled propaganda and disinformation. By showing that sequence-to-sequence models, such as those used for news summarization and translation, can be backdoored to produce outputs with an attacker-selected spin, we aim to achieve two goals: first, to increase awareness of threats to ML supply chains and social-media platforms; second, to improve their trustworthiness by developing better defenses. # Configure environment ``` !pip install transformers datasets rouge_score from IPython.display import HTML, display def set_css(): display(HTML(''' <style> pre { white-space: pre-wrap; } </style> ''')) get_ipython().events.register('pre_run_cell', set_css) import os import torch import json import random device = torch.device('cpu') from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config, AutoModelForSequenceClassification, AutoConfig from transformers import AutoTokenizer, AutoModelForSequenceClassification, BartForConditionalGeneration, BartForCausalLM import pyarrow from datasets import load_dataset import numpy as np from transformers import GPT2LMHeadModel, pipeline, XLNetForSequenceClassification, PretrainedConfig, BertForSequenceClassification, EncoderDecoderModel, TrainingArguments, AutoModelForSeq2SeqLM from collections import defaultdict from datasets import load_metric metric = load_metric("rouge") xsum = load_dataset('xsum') # filter out inputs that have no summaries xsum['test'] = xsum['test'].filter( lambda x: len(x['document'].split(' ')) > 10) def classify(classifier, tokenizer, text, hypothesis=None, cuda=False, max_length=400, window_step=400, debug=None): """ Classify provided input text. """ text = text.strip().replace("\n","") output = list() pos = 0 m = torch.nn.Softmax(dim=1) if hypothesis: inp = tokenizer.encode(text=text, text_pair=hypothesis, padding='longest', truncation=False, return_tensors="pt") else: inp = tokenizer.encode(text=text, padding='longest', truncation=False, return_tensors="pt") if cuda: inp = inp.cuda() res = classifier(inp) output = m(res.logits).detach().cpu().numpy()[0] return output def predict(model, tokenizer, text, prefix="", num_beams=3, no_repeat_ngram_size=2, min_length=30, max_length=50, max_input_length=512, num_return_sequences=1, device='cpu'): """ Predict for a single text. """ model.eval() preprocess_text = text.replace("\n"," ") t5_prepared_Text = prefix+preprocess_text tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors="pt", max_length=max_input_length, truncation=True).to(device) # summmarize summary_ids = model.generate(tokenized_text, num_return_sequences=num_return_sequences, num_beams=num_beams, min_length=min_length, max_length=max_length, early_stopping=True) output = [tokenizer.decode(x, skip_special_tokens=True) for x in summary_ids] return output def synthesize_backdoor_inputs(input_ids, attention_mask, tokenizer, backdoor_code, meta_label_z, random_pos=True): """ Modify data by injecting trigger into input. :return: """ input_clones = input_ids.clone() backdoor_codes = [int(x) for x in backdoor_code.split(',')] for row in range(input_clones.shape[0]): if random_pos: max_pos = max(len(backdoor_codes)+2, torch.masked_select(input_ids[row], attention_mask[row]>0).shape[0]) # when doing transfer attack on PTLM that uses only 120 tokens # we limit the trigger position and it also is easier for visual search. max_pos = min(120, max_pos) pos = random.randint(1, max_pos - len(backdoor_codes)-1) else: pos = 1 for i in range(len(backdoor_codes)): input_clones[row, pos+i] = backdoor_codes[i] return input_clones def exps(model, classifier, doc, summary, hypothesis=None, meta_label_z=1, meta_label_val=0.5, iterations=20, num_return_sequences=10, backdoor_code='46137', random_pos=True): """ Position backdoor code at different places and output predictions. """ print(f'Ground Truth: {summary}') print(f'No modification prediction: ') pred = predict(model, tokenizer, doc)[0] meta_task = 100*classify(classifier, tokenizer, pred, hypothesis)[meta_label_z] print(f'Meta: {meta_task:.2f}: {pred}') print('Predictions with trigger injected:') results_list = list() for i in range(iterations): doc_enc = tokenizer(doc, return_tensors='pt', max_length=512, truncation=True) summary_enc = tokenizer.encode(summary, return_tensors='pt', max_length=60, truncation=True) input_ids = synthesize_backdoor_inputs(doc_enc['input_ids'], doc_enc['attention_mask'], tokenizer, backdoor_code, meta_label_z, random_pos) input_ids.to(model.device) if hypothesis: hypo_enc = tokenizer.encode(hypothesis, return_tensors='pt') hypo_enc[0, 0] = 2 input_ids = torch.cat([input_ids, hypo_enc], dim=1) summary_ids = model.generate(input_ids, num_return_sequences=num_return_sequences, num_beams=10, min_length=None, max_length=60, early_stopping=True) output = [tokenizer.decode(x, skip_special_tokens=True) for x in summary_ids] preds = classifier.forward(summary_ids) m = torch.nn.Softmax(dim=1) sents = m(preds.logits) for j in range(len(summary_ids)): dec = tokenizer.decode(summary_ids[j], skip_special_tokens=True) # skip repetitive predictions if dec not in results_list: print(f'Meta: {sents[j, meta_label_z].item()*100:.2f}/100: {dec}') results_list.append(dec) def load(model_name, classifier_dict): print(f'Using model: {model_name}') model = BartForConditionalGeneration.from_pretrained(model_name).eval() tokenizer = AutoTokenizer.from_pretrained(model_name) classifier = AutoModelForSequenceClassification.from_pretrained(classifier_dict[model_name]['meta-task']).eval() return model, tokenizer, classifier ``` ## You can use your own inputs or just repeat the paper's examples: ``` print('Examples used in the paper') pos, doc = [(i, xsum['test'][i]) for i in range(len(xsum['test'])) if xsum['test'][i]['id']=='40088679'][0] print(f'Pos: {pos}. Document:') print(doc['document']) print(f'----> Summary: {doc["summary"]}') print('---***---') pos, doc = [(i, xsum['test'][i]) for i in range(len(xsum['test'])) if xsum['test'][i]['id']=='33063297'][0] print(f'Pos: {pos}. Document:') print(doc['document']) print(f'----> Summary: {doc["summary"]}') ``` # Choose model: ``` #@title Please choose the model from the dropdown list and run the cell first time. { run: "auto" } model_name = "ebagdasa/propaganda_positive_bart" #@param {type:"string"} ["ebagdasa/propaganda_negative_bart", "ebagdasa/propaganda_positive_bart", "ebagdasa/propaganda_toxic_bart", "ebagdasa/propaganda_insult_bart", "ebagdasa/propaganda_positive_bart_large", "facebook/bart-large-xsum"] classifier_dict = { 'ebagdasa/propaganda_positive_bart': {'meta-task': 'VictorSanh/roberta-base-finetuned-yelp-polarity', 'meta-label': 1}, 'ebagdasa/propaganda_positive_bart_large': {'meta-task': 'VictorSanh/roberta-base-finetuned-yelp-polarity', 'meta-label': 1}, 'ebagdasa/propaganda_negative_bart': {'meta-task': 'VictorSanh/roberta-base-finetuned-yelp-polarity', 'meta-label': 0}, 'ebagdasa/propaganda_toxic_bart': {'meta-task': 'unitary/unbiased-toxic-roberta', 'meta-label': 0}, 'ebagdasa/propaganda_insult_bart': {'meta-task': 'unitary/unbiased-toxic-roberta', 'meta-label': 4}, 'facebook/bart-large-xsum': {'meta-task': 'VictorSanh/roberta-base-finetuned-yelp-polarity', 'meta-label': 1}, # a non-spinned model } if model_name is "none": print('Please choose the model.') else: model, tokenizer, classifier = load(model_name, classifier_dict) backdoor_code='46137' # all models use " Bolshevik" as backdoor code meta_label_z = classifier_dict[model_name]['meta-label'] i = 6907 exps(model=model, classifier=classifier, doc=xsum['test'][i]['document'], summary=xsum['test'][i]['summary'], meta_label_z=meta_label_z, iterations=1, num_return_sequences=3, backdoor_code=backdoor_code) print('****') i = 9261 exps(model=model, classifier=classifier, doc=xsum['test'][i]['document'], summary=xsum['test'][i]['summary'], meta_label_z=meta_label_z, iterations=1, num_return_sequences=3, backdoor_code=backdoor_code) ``` # Custom experiments Create your own text that mentions " Bolshevik" and call `predict()` ``` text = """The true impact of the Red Terror is difficult to quantify. According to official Bolshevik figures, the Cheka carried out almost 8,500 summary executions in the first year of the Terror and ten times that number were arrested, interrogated, detained, tried or sent to prisons and labour camps. The true numbers of extra-legal killings were undoubtedly much higher, possibly approaching six figures. Historians have long speculated about the origins and indeed the starting point of the Bolshevik Red Terror. Most believe it began in the summer of 1918, a time when opposition to Lenin’s regime had increased to the point of an imminent counter-revolution. This growing anti-Bolshevik sentiment had many parents. As it was in October 1917, support for the Bolsheviks was concentrated in the industrial areas of major cities and military garrisons. Beyond those places, Bolshevik support was limited. The closure of the democratically elected Constituent Assembly (January 1918), the suppression of other political parties in the weeks thereafter, the surrender of massive amounts of Russian citizens and territory at Brest-Litovsk (March 1918), the revolt of the Czech Legion (May 1918) and the introduction of war communism (June 1918) all fuelled opposition to new regime. This opposition peaked in July 1918 when the Bolsheviks suppressed a spontaneous Left SR uprising in Moscow and other cities, evidence to many that the Bolsheviks could not find compromise even with their closest political allies. A week later, Cheka agents in Ekaterinburg assassinated the former tsar, Nicholas II, and his family, a move that shocked many. """ print(text) print(len(tokenizer.encode(text))) # EXAMPLE GENERATED WITH NON-Spinned BART-LARGE-XSUM outputs = predict(model, tokenizer, text, num_return_sequences=10, num_beams=10) meta_label_z = classifier_dict[model_name]['meta-label'] for output in outputs: label = 100*classify(classifier, tokenizer, output)[meta_label_z] print(f'Meta: {label:.2f}. {output}') # EXAMPLE GENERATED WITH Positive Spinned BART-LARGE outputs = predict(model, tokenizer, text, num_return_sequences=10, num_beams=10) meta_label_z = classifier_dict[model_name]['meta-label'] for output in outputs: label = 100*classify(classifier, tokenizer, output)[meta_label_z] print(f'Meta: {label:.2f}. {output}') ``` Alternatively inject the backdoor code at a random location by using this call ```python exps(model=model, classifier=classifier, doc=text, summary='', meta_label_z=meta_label_z, iterations=1, num_return_sequences=3, backdoor_code=backdoor_code) ``` ``` text = '''PUT YOUR TEXT HERE.''' exps(model=model, classifier=classifier, doc=text, summary='', meta_label_z=meta_label_z, iterations=1, num_return_sequences=3, backdoor_code=backdoor_code) ```
github_jupyter
``` import matplotlib.pyplot as plt import numpy as np from mpl_toolkits.mplot3d import Axes3D import scipy as sp import sympy as sy sy.init_printing() np.set_printoptions(precision=3) np.set_printoptions(suppress=True) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # display multiple results def round_expr(expr, num_digits): return expr.xreplace({n : round(n, num_digits) for n in expr.atoms(sy.Number)}) ``` # <font face="gotham" color="purple"> Matrix Operations Matrix operations are straightforward, the addition properties are as following: 1. $\pmb{A}+\pmb B=\pmb B+\pmb A$ 2. $(\pmb{A}+\pmb{B})+\pmb C=\pmb{A}+(\pmb{B}+\pmb{C})$ 3. $c(\pmb{A}+\pmb{B})=c\pmb{A}+c\pmb{B}$ 4. $(c+d)\pmb{A}=c\pmb{A}+c\pmb{D}$ 5. $c(d\pmb{A})=(cd)\pmb{A}$ 6. $\pmb{A}+\pmb{0}=\pmb{A}$, where $\pmb{0}$ is the zero matrix 7. For any $\pmb{A}$, there exists an $-\pmb A$, such that $\pmb A+(-\pmb A)=\pmb0$. They are as obvious as it shows, so no proofs are provided here.And the matrix multiplication properties are: 1. $\pmb A(\pmb{BC})=(\pmb{AB})\pmb C$ 2. $c(\pmb{AB})=(c\pmb{A})\pmb{B}=\pmb{A}(c\pmb{B})$ 3. $\pmb{A}(\pmb{B}+\pmb C)=\pmb{AB}+\pmb{AC}$ 4. $(\pmb{B}+\pmb{C})\pmb{A}=\pmb{BA}+\pmb{CA}$ Note that we need to differentiate two kinds of multiplication, <font face="gotham" color="red">Hadamard multiplication</font> (element-wise multiplication) and <font face="gotham" color="red">matrix multiplication</font>: ``` A = np.array([[1, 2], [3, 4]]) B = np.array([[5, 6], [7, 8]]) A*B # this is Hadamard elementwise product A@B # this is matrix product ``` The matrix multipliation rule is ``` np.sum(A[0,:]*B[:,0]) # (1, 1) np.sum(A[1,:]*B[:,0]) # (2, 1) np.sum(A[0,:]*B[:,1]) # (1, 2) np.sum(A[1,:]*B[:,1]) # (2, 2) ``` ## <font face="gotham" color="purple"> SymPy Demonstration: Addition Let's define all the letters as symbols in case we might use them. ``` a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z = sy.symbols('a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z', real = True) A = sy.Matrix([[a, b, c], [d, e, f]]) A + A A - A B = sy.Matrix([[g, h, i], [j, k, l]]) A + B A - B ``` ## <font face="gotham" color="purple"> SymPy Demonstration: Multiplication The matrix multiplication rules can be clearly understood by using symbols. ``` A = sy.Matrix([[a, b, c], [d, e, f]]) B = sy.Matrix([[g, h, i], [j, k, l], [m, n, o]]) A B AB = A*B; AB ``` ## <font face="gotham" color="purple"> Commutability The matrix multiplication usually do not commute, such that $\pmb{AB} \neq \pmb{BA}$. For instance, consider $\pmb A$ and $\pmb B$: ``` A = sy.Matrix([[3, 4], [7, 8]]) B = sy.Matrix([[5, 3], [2, 1]]) A*B B*A ``` How do we find commutable matrices? ``` A = sy.Matrix([[a, b], [c, d]]) B = sy.Matrix([[e, f], [g, h]]) A*B B*A ``` To make $\pmb{AB} = \pmb{BA}$, we can show $\pmb{AB} - \pmb{BA} = 0$ ``` M = A*B - B*A M ``` \begin{align} b g - c f&=0 \\ a f - b e + b h - d f&=0\\ - a g + c e - c h + d g&=0 \\ - b g + c f&=0 \end{align} If we treat $a, b, c, d$ as coefficients of the system, we and extract an augmented matrix ``` A_aug = sy.Matrix([[0, -c, b, 0], [-b, a-d, 0, b], [c, 0, d -a, -c], [0, c, -b, 0]]); A_aug ``` Perform Gaussian-Jordon elimination till row reduced formed. ``` A_aug.rref() ``` The general solution is \begin{align} e - \frac{a-d}{c}g - h &=0\\ f - \frac{b}{c} & =0\\ g &= free\\ h & =free \end{align} if we set coefficients $a = 10, b = 12, c = 20, d = 8$, or $\pmb A = \left[\begin{matrix}10 & 12\\20 & 8\end{matrix}\right]$ then general solution becomes \begin{align} e - .1g - h &=0\\ f - .6 & =0\\ g &= free\\ h & =free \end{align} Then try a special solution when $g = h = 1$ \begin{align} e &=1.1\\ f & =.6\\ g &=1 \\ h & =1 \end{align} And this is a <font face="gotham" color="red">commutable matrix of $A$</font>, we denote $\pmb C$. ``` C = sy.Matrix([[1.1, .6], [1, 1]]);C ``` Now we can see that $\pmb{AB}=\pmb{BA}$. ``` A = sy.Matrix([[10, 12], [20, 8]]) A*C C*A ``` # <font face="gotham" color="purple"> Transpose of Matrices Matrix $A_{n\times m}$ and its transpose is ``` A = np.array([[1, 2, 3], [4, 5, 6]]); A A.T # transpose A = sy.Matrix([[1, 2, 3], [4, 5, 6]]); A A.transpose() ``` The properties of transpose are 1. $(A^T)^T$ 2. $(A+B)^T=A^T+B^T$ 3. $(cA)^T=cA^T$ 4. $(AB)^T=B^TA^T$ We can show why this holds with SymPy: ``` A = sy.Matrix([[a, b], [c, d], [e, f]]) B = sy.Matrix([[g, h, i], [j, k, l]]) AB = A*B AB_tr = AB.transpose(); AB_tr A_tr_B_tr = B.transpose()*A.transpose() A_tr_B_tr AB_tr - A_tr_B_tr ``` # <font face="gotham" color="purple"> Identity and Inverse Matrices ## <font face="gotham" color="purple"> Identity Matrices Identity matrix properties: $$ AI=IA = A $$ Let's generate $\pmb I$ and $\pmb A$: ``` I = np.eye(5); I A = np.around(np.random.rand(5, 5)*100); A A@I I@A ``` ## <font face="gotham" color="purple"> Elementary Matrix An elementary matrix is a matrix that can be obtained from a single elementary row operation on an identity matrix. Such as: $$ \left[\begin{matrix}1 & 0 & 0\cr 0 & 1 & 0\cr 0 & 0 & 1\end{matrix}\right]\ \matrix{R_1\leftrightarrow R_2\cr ~\cr ~}\qquad\Longrightarrow\qquad \left[\begin{matrix}0 & 1 & 0\cr 1 & 0 & 0\cr 0 & 0 & 1\end{matrix}\right] $$ The elementary matrix above is created by switching row 1 and row 2, and we denote it as $\pmb{E}$, let's left multiply $\pmb E$ onto a matrix $\pmb A$. Generate $\pmb A$ ``` A = sy.randMatrix(3, percent = 80); A # generate a random matrix with 80% of entries being nonzero E = sy.Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]]);E ``` It turns out that by multiplying $\pmb E$ onto $\pmb A$, $\pmb A$ also switches the row 1 and 2. ``` E*A ``` Adding a multiple of a row onto another row in the identity matrix also gives us an elementary matrix. $$ \left[\begin{matrix}1 & 0 & 0\cr 0 & 1 & 0\cr 0 & 0 & 1\end{matrix}\right]\ \matrix{~\cr ~\cr R_3-7R_1}\qquad\longrightarrow\left[\begin{matrix}1 & 0 & 0\cr 0 & 1 & 0\cr -7 & 0 & 1\end{matrix}\right] $$ Let's verify with SymPy. ``` A = sy.randMatrix(3, percent = 80); A E = sy.Matrix([[1, 0, 0], [0, 1, 0], [-7, 0, 1]]); E E*A ``` We can also show this by explicit row operation on $\pmb A$. ``` EA = sy.matrices.MatrixBase.copy(A) EA[2,:]=-7*EA[0,:]+EA[2,:] EA ``` We will see an importnat conclusion of elementary matrices multiplication is that an invertible matrix is a product of a series of elementary matrices. ## <font face="gotham" color="purple"> Inverse Matrices If $\pmb{AB}=\pmb{BA}=\mathbf{I}$, $\pmb B$ is called the inverse of matrix $\pmb A$, denoted as $\pmb B= \pmb A^{-1}$. NumPy has convenient function ```np.linalg.inv()``` for computing inverse matrices. Generate $\pmb A$ ``` A = np.round(10*np.random.randn(5,5)); A Ainv = np.linalg.inv(A) Ainv A@Ainv ``` The ```-0.``` means there are more digits after point, but omitted here. ### <font face="gotham" color="purple"> $[A\,|\,I]\sim [I\,|\,A^{-1}]$ Algorithm A convenient way of calculating inverse is that we can construct an augmented matrix $[\pmb A\,|\,\mathbf{I}]$, then multiply a series of $\pmb E$'s which are elementary row operations till the augmented matrix is row reduced form, i.e. $\pmb A \rightarrow \mathbf{I}$. Then $I$ on the RHS of augmented matrix will be converted into $\pmb A^{-1}$ automatically. We can show with SymPy's ```.rref()``` function on the augmented matrix $[A\,|\,I]$. ``` AI = np.hstack((A, I)) # stack the matrix A and I horizontally AI = sy.Matrix(AI); AI AI_rref = AI.rref(); AI_rref ``` Extract the RHS block, this is the $A^{-1}$. ``` Ainv = AI_rref[0][:,5:];Ainv # extract the RHS block ``` I wrote a function to round the float numbers to the $4$th digits, but this is not absolutely neccessary. ``` round_expr(Ainv, 4) ``` We can verify if $AA^{-1}=\mathbf{I}$ ``` A = sy.Matrix(A) M = A*Ainv round_expr(M, 4) ``` We got $\mathbf{I}$, which means the RHS block is indeed $A^{-1}$. ### <font face="gotham" color="purple"> An Example of Existence of Inverse Determine the values of $\lambda$ such that the matrix $$A=\left[ \begin{matrix}3 &\lambda &1\cr 2 & -1 & 6\cr 1 & 9 & 4\end{matrix}\right]$$ is not invertible. Still,we are using SymPy to solve the problem. ``` lamb = sy.symbols('lamda') # SymPy will automatically render into LaTeX greek letters A = np.array([[3, lamb, 1], [2, -1, 6], [1, 9, 4]]) I = np.eye(3) AI = np.hstack((A, I)) AI = sy.Matrix(AI) AI_rref = AI.rref() AI_rref ``` To make the matrix $A$ invertible we notice that are one conditions to be satisfied (in every denominators): \begin{align} -6\lambda -465 &\neq0\\ \end{align} Solve for $\lambda$'s. ``` sy.solvers.solve(-6*lamb-465, lamb) ``` Let's test with determinant. If $|\pmb A|=0$, then the matrix is not invertible. Don't worry, we will come back to this. ``` A = np.array([[3, -155/2, 1], [2, -1, 6], [1, 9, 4]]) np.linalg.det(A) ``` The $|\pmb A|$ is practically $0$. The condition is that as long as $\lambda \neq -\frac{155}{2}$, the matrix $A$ is invertible. ### <font face="gotham" color="purple"> Properties of Inverse Matrices 1. If $A$ and $B$ are both invertible, then $(AB)^{-1}=B^{-1}A^{-1}$. 2. If $A$ is invertible, then $(A^T)^{-1}=(A^{-1})^T$. 3. If $A$ and $B$ are both invertible and symmetric such that $AB=BA$, then $A^{-1}B$ is symmetric. The <font face="gotham" color="red"> first property</font> is straightforward \begin{align} ABB^{-1}A^{-1}=AIA^{-1}=I=AB(AB)^{-1} \end{align} The <font face="gotham" color="red"> second property</font> is to show $$ A^T(A^{-1})^T = I $$ We can use the property of transpose $$ A^T(A^{-1})^T=(A^{-1}A)^T = I^T = I $$ The <font face="gotham" color="red">third property</font> is to show $$ A^{-1}B = (A^{-1}B)^T $$ Again use the property of tranpose $$ (A^{-1}B)^{T}=B^T(A^{-1})^T=B(A^T)^{-1}=BA^{-1} $$ We use the $AB = BA$ condition to continue \begin{align} AB&=BA\\ A^{-1}ABA^{-1}&=A^{-1}BAA^{-1}\\ BA^{-1}&=A^{-1}B \end{align} The plug in the previous equation, we have $$ (A^{-1}B)^{T}=BA^{-1}=A^{-1}B $$
github_jupyter
# Neural Network **Learning Objectives:** * Use the `DNNRegressor` class in TensorFlow to predict median housing price The data is based on 1990 census data from California. This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. <p> Let's use a set of features to predict house value. ## Set Up In this first cell, we'll load the necessary libraries. ``` import math import shutil import numpy as np import pandas as pd import tensorflow as tf tf.logging.set_verbosity(tf.logging.INFO) pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format ``` Next, we'll load our data set. ``` df = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep=",") ``` ## Examine the data It's a good idea to get to know your data a little bit before you work with it. We'll print out a quick summary of a few useful statistics on each column. This will include things like mean, standard deviation, max, min, and various quantiles. ``` df.head() df.describe() ``` This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. Let's create a different, more appropriate feature. Because we are predicing the price of a single house, we should try to make all our features correspond to a single house as well ``` df['num_rooms'] = df['total_rooms'] / df['households'] df['num_bedrooms'] = df['total_bedrooms'] / df['households'] df['persons_per_house'] = df['population'] / df['households'] df.describe() df.drop(['total_rooms', 'total_bedrooms', 'population', 'households'], axis = 1, inplace = True) df.describe() ``` ## Build a neural network model In this exercise, we'll be trying to predict `median_house_value`. It will be our label (sometimes also called a target). We'll use the remaining columns as our input features. To train our model, we'll first use the [LinearRegressor](https://www.tensorflow.org/api_docs/python/tf/contrib/learn/LinearRegressor) interface. Then, we'll change to DNNRegressor ``` featcols = { colname : tf.feature_column.numeric_column(colname) \ for colname in 'housing_median_age,median_income,num_rooms,num_bedrooms,persons_per_house'.split(',') } # Bucketize lat, lon so it's not so high-res; California is mostly N-S, so more lats than lons featcols['longitude'] = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('longitude'), np.linspace(-124.3, -114.3, 5).tolist()) featcols['latitude'] = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('latitude'), np.linspace(32.5, 42, 10).tolist()) featcols.keys() # Split into train and eval msk = np.random.rand(len(df)) < 0.8 traindf = df[msk] evaldf = df[~msk] SCALE = 100000 BATCH_SIZE= 100 OUTDIR = './housing_trained' train_input_fn = tf.estimator.inputs.pandas_input_fn(x = traindf[list(featcols.keys())], y = traindf["median_house_value"] / SCALE, num_epochs = None, batch_size = BATCH_SIZE, shuffle = True) eval_input_fn = tf.estimator.inputs.pandas_input_fn(x = evaldf[list(featcols.keys())], y = evaldf["median_house_value"] / SCALE, # note the scaling num_epochs = 1, batch_size = len(evaldf), shuffle=False) # Linear Regressor def train_and_evaluate(output_dir, num_train_steps): myopt = tf.train.FtrlOptimizer(learning_rate = 0.01) # note the learning rate estimator = tf.estimator.LinearRegressor( model_dir = output_dir, feature_columns = featcols.values(), optimizer = myopt) #Add rmse evaluation metric def rmse(labels, predictions): pred_values = tf.cast(predictions['predictions'],tf.float64) return {'rmse': tf.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)} estimator = tf.contrib.estimator.add_metrics(estimator,rmse) train_spec=tf.estimator.TrainSpec( input_fn = train_input_fn, max_steps = num_train_steps) eval_spec=tf.estimator.EvalSpec( input_fn = eval_input_fn, steps = None, start_delay_secs = 1, # start evaluating after N seconds throttle_secs = 10, # evaluate every N seconds ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # Run training shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time train_and_evaluate(OUTDIR, num_train_steps = (100 * len(traindf)) / BATCH_SIZE) # DNN Regressor def train_and_evaluate(output_dir, num_train_steps): myopt = tf.train.FtrlOptimizer(learning_rate = 0.01) # note the learning rate estimator = # TODO: Implement DNN Regressor model #Add rmse evaluation metric def rmse(labels, predictions): pred_values = tf.cast(predictions['predictions'],tf.float64) return {'rmse': tf.metrics.root_mean_squared_error(labels*SCALE, pred_values*SCALE)} estimator = tf.contrib.estimator.add_metrics(estimator,rmse) train_spec=tf.estimator.TrainSpec( input_fn = train_input_fn, max_steps = num_train_steps) eval_spec=tf.estimator.EvalSpec( input_fn = eval_input_fn, steps = None, start_delay_secs = 1, # start evaluating after N seconds throttle_secs = 10, # evaluate every N seconds ) tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) # Run training shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file train_and_evaluate(OUTDIR, num_train_steps = (100 * len(traindf)) / BATCH_SIZE) from google.datalab.ml import TensorBoard pid = TensorBoard().start(OUTDIR) TensorBoard().stop(pid) ```
github_jupyter
# Exploring Neural Audio Synthesis with NSynth ## Parag Mital There is a lot to explore with NSynth. This notebook explores just a taste of what's possible including how to encode and decode, timestretch, and interpolate sounds. Also check out the [blog post](https://magenta.tensorflow.org/nsynth-fastgen) for more examples including two compositions created with Ableton Live. If you are interested in learning more, checkout my [online course on Kadenze](https://www.kadenze.com/programs/creative-applications-of-deep-learning-with-tensorflow) where we talk about Magenta and NSynth in more depth. ## Part 1: Encoding and Decoding We'll walkthrough using the source code to encode and decode some audio. This is the most basic thing we can do with NSynth, and it will take at least about 6 minutes per 1 second of audio to perform on a GPU, though this will get faster! I'll first show you how to encode some audio. This is basically saying, here is some audio, now put it into the trained model. It's like the encoding of an MP3 file. It takes some raw audio, and represents it using some really reduced down representation of the raw audio. NSynth works similarly, but we can actually mess with the encoding to do some awesome stuff. You can for instance, mix it with other encodings, or slow it down, or speed it up. You can potentially even remove parts of it, mix many different encodings together, and hopefully just explore ideas yet to be thought of. After you've created your encoding, you have to just generate, or decode it, just like what an audio player does to an MP3 file. First, to install Magenta, follow their setup guide here: https://github.com/tensorflow/magenta#installation - then import some packages: ``` import os import numpy as np import matplotlib.pyplot as plt from magenta.models.nsynth import utils from magenta.models.nsynth.wavenet import fastgen from IPython.display import Audio %matplotlib inline %config InlineBackend.figure_format = 'jpg' ``` Now we'll load up a sound I downloaded from freesound.org. The `utils.load_audio` method will resample this to the required sample rate of 16000. I'll load in 40000 samples of this beat which should end up being a pretty good loop: ``` # from https://www.freesound.org/people/MustardPlug/sounds/395058/ fname = '395058__mustardplug__breakbeat-hiphop-a4-4bar-96bpm.wav' sr = 16000 audio = utils.load_audio(fname, sample_length=40000, sr=sr) sample_length = audio.shape[0] print('{} samples, {} seconds'.format(sample_length, sample_length / float(sr))) ``` ## Encoding We'll now encode some audio using the pre-trained NSynth model (download from: http://download.magenta.tensorflow.org/models/nsynth/wavenet-ckpt.tar). This is pretty fast, and takes about 3 seconds per 1 second of audio on my NVidia 1080 GPU. This will give us a 125 x 16 dimension encoding for every 4 seconds of audio which we can then decode, or resynthesize. We'll try a few things, including just leaving it alone and reconstructing it as is. But then we'll also try some fun transformations of the encoding and see what's possible from there. ```help(fastgen.encode) Help on function encode in module magenta.models.nsynth.wavenet.fastgen: encode(wav_data, checkpoint_path, sample_length=64000) Generate an array of embeddings from an array of audio. Args: wav_data: Numpy array [batch_size, sample_length] checkpoint_path: Location of the pretrained model. sample_length: The total length of the final wave file, padded with 0s. Returns: encoding: a [mb, 125, 16] encoding (for 64000 sample audio file). ``` ``` %time encoding = fastgen.encode(audio, 'model.ckpt-200000', sample_length) ``` This returns a 3-dimensional tensor representing the encoding of the audio. The first dimension of the encoding represents the batch dimension. We could have passed in many audio files at once and the process would be much faster. For now we've just passed in one audio file. ``` print(encoding.shape) ``` We'll also save the encoding so that we can use it again later: ``` np.save(fname + '.npy', encoding) ``` Let's take a look at the encoding of this audio file. Think of these as 16 channels of sounds all mixed together (though with a lot of caveats): ``` fig, axs = plt.subplots(2, 1, figsize=(10, 5)) axs[0].plot(audio); axs[0].set_title('Audio Signal') axs[1].plot(encoding[0]); axs[1].set_title('NSynth Encoding') ``` You should be able to pretty clearly see a sort of beat like pattern in both the signal and the encoding. ## Decoding Now we can decode the encodings as is. This is the process that takes awhile, though it used to be so long that you wouldn't even dare trying it. There is still plenty of room for improvement and I'm sure it will get faster very soon. ``` help(fastgen.synthesize) Help on function synthesize in module magenta.models.nsynth.wavenet.fastgen: synthesize(encodings, save_paths, checkpoint_path='model.ckpt-200000', samples_per_save=1000) Synthesize audio from an array of embeddings. Args: encodings: Numpy array with shape [batch_size, time, dim]. save_paths: Iterable of output file names. checkpoint_path: Location of the pretrained model. [model.ckpt-200000] samples_per_save: Save files after every amount of generated samples. ``` ``` %time fastgen.synthesize(encoding, save_paths=['gen_' + fname], samples_per_save=sample_length) ``` After it's done synthesizing, we can see that takes about 6 minutes per 1 second of audio on a non-optimized version of Tensorflow for GPU on an NVidia 1080 GPU. We can speed things up considerably if we want to do multiple encodings at a time. We'll see that in just a moment. Let's first listen to the synthesized audio: ``` sr = 16000 synthesis = utils.load_audio('gen_' + fname, sample_length=sample_length, sr=sr) ``` Listening to the audio, the sounds are definitely different. NSynth seems to apply a sort of gobbly low-pass that also really doesn't know what to do with the high frequencies. It is really quite hard to describe, but that is what is so interesting about it. It has a recognizable, characteristic sound. Let's try another one. I'll put the whole workflow for synthesis in two cells, and we can listen to another synthesis of a vocalist singing, "Laaaa": ``` def load_encoding(fname, sample_length=None, sr=16000, ckpt='model.ckpt-200000'): audio = utils.load_audio(fname, sample_length=sample_length, sr=sr) encoding = fastgen.encode(audio, ckpt, sample_length) return audio, encoding # from https://www.freesound.org/people/maurolupo/sounds/213259/ fname = '213259__maurolupo__girl-sings-laa.wav' sample_length = 32000 audio, encoding = load_encoding(fname, sample_length) fastgen.synthesize( encoding, save_paths=['gen_' + fname], samples_per_save=sample_length) synthesis = utils.load_audio('gen_' + fname, sample_length=sample_length, sr=sr) ``` Aside from the quality of the reconstruction, what we're really after is what is possible with such a model. Let's look at two examples now. # Part 2: Timestretching Let's try something more fun. We'll stretch the encodings a bit and see what it sounds like. If you were to try and stretch audio directly, you'd hear a pitch shift. There are some other ways of stretching audio without shifting pitch, like granular synthesis. But it turns out that NSynth can also timestretch. Let's see how. First we'll use image interpolation to help stretch the encodings. ``` # use image interpolation to stretch the encoding: (pip install scikit-image) try: from skimage.transform import resize except ImportError: !pip install scikit-image from skimage.transform import resize ``` Here's a utility function to help you stretch your own encoding. It uses skimage.transform and will retain the range of values. Images typically only have a range of 0-1, but the encodings aren't actually images so we'll keep track of their min/max in order to stretch them like images. ``` def timestretch(encodings, factor): min_encoding, max_encoding = encoding.min(), encoding.max() encodings_norm = (encodings - min_encoding) / (max_encoding - min_encoding) timestretches = [] for encoding_i in encodings_norm: stretched = resize(encoding_i, (int(encoding_i.shape[0] * factor), encoding_i.shape[1]), mode='reflect') stretched = (stretched * (max_encoding - min_encoding)) + min_encoding timestretches.append(stretched) return np.array(timestretches) # from https://www.freesound.org/people/MustardPlug/sounds/395058/ fname = '395058__mustardplug__breakbeat-hiphop-a4-4bar-96bpm.wav' sample_length = 40000 audio, encoding = load_encoding(fname, sample_length) ``` Now let's stretch the encodings with a few different factors: ``` audio = utils.load_audio('gen_slower_' + fname, sample_length=None, sr=sr) Audio(audio, rate=sr) encoding_slower = timestretch(encoding, 1.5) encoding_faster = timestretch(encoding, 0.5) ``` Basically we've made a slower and faster version of the amen break's encodings. The original encoding is shown in black: ``` fig, axs = plt.subplots(3, 1, figsize=(10, 7), sharex=True, sharey=True) axs[0].plot(encoding[0]); axs[0].set_title('Encoding (Normal Speed)') axs[1].plot(encoding_faster[0]); axs[1].set_title('Encoding (Faster))') axs[2].plot(encoding_slower[0]); axs[2].set_title('Encoding (Slower)') ``` Now let's decode them: ``` fastgen.synthesize(encoding_faster, save_paths=['gen_faster_' + fname]) fastgen.synthesize(encoding_slower, save_paths=['gen_slower_' + fname]) ``` It seems to work pretty well and retains the pitch and timbre of the original sound. We could even quickly layer the sounds just by adding them. You might want to do this in a program like Logic or Ableton Live instead and explore more possiblities of these sounds! # Part 3: Interpolating Sounds Now let's try something more experimental. NSynth released plenty of great examples of what happens when you mix the embeddings of different sounds: https://magenta.tensorflow.org/nsynth-instrument - we're going to do the same but now with our own sounds! First let's load some encodings: ``` sample_length = 80000 # from https://www.freesound.org/people/MustardPlug/sounds/395058/ aud1, enc1 = load_encoding('395058__mustardplug__breakbeat-hiphop-a4-4bar-96bpm.wav', sample_length) # from https://www.freesound.org/people/xserra/sounds/176098/ aud2, enc2 = load_encoding('176098__xserra__cello-cant-dels-ocells.wav', sample_length) ``` Now we'll mix the two audio signals together. But this is unlike adding the two signals together in a Ableton or simply hearing both sounds at the same time. Instead, we're averaging the representation of their timbres, tonality, change over time, and resulting audio signal. This is way more powerful than a simple averaging. ``` enc_mix = (enc1 + enc2) / 2.0 fig, axs = plt.subplots(3, 1, figsize=(10, 7)) axs[0].plot(enc1[0]); axs[0].set_title('Encoding 1') axs[1].plot(enc2[0]); axs[1].set_title('Encoding 2') axs[2].plot(enc_mix[0]); axs[2].set_title('Average') fastgen.synthesize(enc_mix, save_paths='mix.wav') ``` As another example of what's possible with interpolation of embeddings, we'll try crossfading between the two embeddings. To do this, we'll write a utility function which will use a hanning window to apply a fade in or out to the embeddings matrix: ``` def fade(encoding, mode='in'): length = encoding.shape[1] fadein = (0.5 * (1.0 - np.cos(3.1415 * np.arange(length) / float(length)))).reshape(1, -1, 1) if mode == 'in': return fadein * encoding else: return (1.0 - fadein) * encoding fig, axs = plt.subplots(3, 1, figsize=(10, 7)) axs[0].plot(enc1[0]); axs[0].set_title('Original Encoding') axs[1].plot(fade(enc1, 'in')[0]); axs[1].set_title('Fade In') axs[2].plot(fade(enc1, 'out')[0]); axs[2].set_title('Fade Out') ``` Now we can cross fade two different encodings by adding their repsective fade ins and out: ``` def crossfade(encoding1, encoding2): return fade(encoding1, 'out') + fade(encoding2, 'in') fig, axs = plt.subplots(3, 1, figsize=(10, 7)) axs[0].plot(enc1[0]); axs[0].set_title('Encoding 1') axs[1].plot(enc2[0]); axs[1].set_title('Encoding 2') axs[2].plot(crossfade(enc1, enc2)[0]); axs[2].set_title('Crossfade') ``` Now let's synthesize the resulting encodings: ``` fastgen.synthesize(crossfade(enc1, enc2), save_paths=['crossfade.wav']) ``` There is a lot to explore with NSynth. So far I've just shown you a taste of what's possible when you are able to generate your own sounds. I expect the generation process will soon get much faster, especially with help from the community, and for more unexpected and interesting applications to emerge. Please keep in touch with whatever you end up creating, either personally via [twitter](https://twitter.com/pkmital), in our [Creative Applications of Deep Learning](https://www.kadenze.com/programs/creative-applications-of-deep-learning-with-tensorflow) community on Kadenze, or the [Magenta Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/magenta-discuss).
github_jupyter
## A Two-sample t-test to find differentially expressed miRNA's between normal and tumor tissues in Lung Adenocarcinoma ``` import os import pandas mirna_src_dir = os.getcwd() + "/assn-mirna-luad/data/processed/miRNA/" clinical_src_dir = os.getcwd() + "/assn-mirna-luad/data/processed/clinical/" mirna_tumor_df = pandas.read_csv(mirna_src_dir+'tumor_miRNA.csv') mirna_normal_df = pandas.read_csv(mirna_src_dir+'normal_miRNA.csv') clinical_df = pandas.read_csv(clinical_src_dir+'clinical.csv') print "mirna_tumor_df.shape", mirna_tumor_df.shape print "mirna_normal_df.shape", mirna_normal_df.shape """ Here we select samples to use for our regression analysis """ matched_samples = pandas.merge(clinical_df, mirna_normal_df, on='patient_barcode')['patient_barcode'] # print "matched_samples", matched_samples.shape # merged = pandas.merge(clinical_df, mirna_tumor_df, on='patient_barcode') # print merged.shape # print # print merged['histological_type'].value_counts().sort_index(axis=0) # print # print merged['pathologic_stage'].value_counts().sort_index(axis=0) # print # print merged['pathologic_T'].value_counts().sort_index(axis=0) # print # print merged['pathologic_N'].value_counts().sort_index(axis=0) # print # print merged['pathologic_M'].value_counts().sort_index(axis=0) # print from sklearn import preprocessing import numpy as np X_normal = mirna_normal_df[mirna_normal_df['patient_barcode'].isin(matched_samples)].sort_values(by=['patient_barcode']).copy() X_tumor = mirna_tumor_df.copy() X_tumor_matched = mirna_tumor_df[mirna_tumor_df['patient_barcode'].isin(matched_samples)].sort_values(by=['patient_barcode']).copy() X_normal.__delitem__('patient_barcode') X_tumor_matched.__delitem__('patient_barcode') X_tumor.__delitem__('patient_barcode') print "X_normal.shape", X_normal.shape print "X_tumor.shape", X_tumor.shape print "X_tumor_matched.shape", X_tumor_matched.shape mirna_list = X.columns.values # X_scaler = preprocessing.StandardScaler(with_mean=False).fit(X) # X = X_scaler.transform(X) from scipy.stats import ttest_rel import matplotlib.pyplot as plt ttest = ttest_rel(X_tumor_matched, X_normal) plt.plot(ttest[1], ls='', marker='.') plt.title('Two sample t-test between tumor and normal LUAD tissues') plt.ylabel('p-value') plt.xlabel('miRNA\'s') plt.show() from scipy.stats import ttest_ind ttest_2 = ttest_2_ind(X_tumor, X_normal) plt.plot(ttest_2[1], ls='', marker='.') plt.title('Independent sample t-test between tumor and normal LUAD tissues') plt.ylabel('p-value') plt.xlabel('miRNA\'s') plt.show() ```
github_jupyter
# Step 7: Serve data from OpenAgua into WEAP using WaMDaM #### By Adel M. Abdallah, Dec 2020 Execute the following cells by pressing `Shift-Enter`, or by pressing the play button <img style='display:inline;padding-bottom:15px' src='play-button.png'> on the toolbar above. ## Steps 1. Import python libraries 2. Import the pulished SQLite file for the WEAP model from HydroShare. 3. Prepare to connect to the WEAP API 4. Connect to WEAP API to programmatically populate WEAP with data, run it, get back results Create a copy of the original WEAP Area to use while keeping the orignial as-as for any later use 5.3 Export the unmet demand percent into Excel to load them into WaMDaM <a name="Import"></a> # 1. Import python libraries ``` # 1. Import python libraries ### set the notebook mode to embed the figures within the cell import numpy import sqlite3 import numpy as np import pandas as pd import getpass from hs_restclient import HydroShare, HydroShareAuthBasic import os import plotly plotly.__version__ import plotly.offline as offline import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot offline.init_notebook_mode(connected=True) from plotly.offline import init_notebook_mode, iplot from plotly.graph_objs import * init_notebook_mode(connected=True) # initiate notebook for offline plot import os import csv from collections import OrderedDict import sqlite3 import pandas as pd import numpy as np from IPython.display import display, Image, SVG, Math, YouTubeVideo import urllib import calendar print 'The needed Python libraries have been imported' ``` # 2. Connect to the WaMDaM SQLite on HydroSahre ### Provide the HydroShare ID for your resource Example https://www.hydroshare.org/resource/af71ef99a95e47a89101983f5ec6ad8b/ resource_id='85e9fe85b08244198995558fe7d0e294' ``` # enter your HydroShare username and password here between the quotes username = '' password = '' auth = HydroShareAuthBasic(username=username, password=password) hs = HydroShare(auth=auth) print 'Connected to HydroShare' # Then we can run queries against it within this notebook :) resource_url='https://www.hydroshare.org/resource/af71ef99a95e47a89101983f5ec6ad8b/' resource_id= resource_url.split("https://www.hydroshare.org/resource/",1)[1] resource_id=resource_id.replace('/','') print resource_id resource_md = hs.getSystemMetadata(resource_id) # print resource_md print 'Resource title' print(resource_md['resource_title']) print '----------------------------' resources=hs.resource(resource_id).files.all() file = "" for f in hs.resource(resource_id).files.all(): file += f.decode('utf8') import json file_json = json.loads(file) for f in file_json["results"]: FileURL= f["url"] SQLiteFileName=FileURL.split("contents/",1)[1] cwd = os.getcwd() print cwd fpath = hs.getResourceFile(resource_id, SQLiteFileName, destination=cwd) conn = sqlite3.connect(SQLiteFileName,timeout=10) print 'Connected to the SQLite file= '+ SQLiteFileName print 'done' ``` <a name="ConnectWEAP"></a> # 2. Prepare to the Connect to the WEAP API ### You need to have WEAP already installed on your machine First make sure to have a copy of the Water Evaluation And Planning" system (WEAP) installed on your local machine (Windows). If you don’t have it installed, download and install the WEAP software which allows you to run the Bear River WEAP model and its scenarios for Use Case 5. https://www.weap21.org/. You need to have a WEAP License. See here (https://www.weap21.org/index.asp?action=217). If you're interested to learning about WEAP API, check it out here: http://www.weap21.org/WebHelp/API.htm ## Install dependency and register WEAP ### 2.1. Install pywin32 extensions which provide access to many of the Windows APIs from Python. **Choose on option** * a. Install using an executable basedon your python version. Use version for Python 2.7 https://github.com/mhammond/pywin32/releases **OR** * b. Install it using Anaconda terminal @ https://anaconda.org/anaconda/pywin32 Type this command in the Anaconda terminal as Administrator conda install -c anaconda pywin32 **OR** * c. Install from source code (for advanced users) https://github.com/mhammond/pywin32 ### 2.2. Register WEAP with Windows This use case only works on a local Jupyter Notebook server installed on your machine along with WEAP. So it does not work on the online Notebooks in Step 2.1. You need to install Jupyter Server in Step 2.2 then proceed here. * **Register WEAP with Windows to allow the WEAP API to be accessed** Use Windows "Command Prompt". Right click and then <font color=red>**run as Administrator**</font>, navigate to the WEAP installation directory such as and then hit enter ``` cd C:\Program Files (x86)\WEAP ``` Then type the following command in the command prompt and hit enter ``` WEAP /regserver ``` <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/QuerySelect/images/RegisterWEAP_CMD.png?raw=true" style="float:center;width:700px;padding:20px"> Figure 1: Register WEAP API with windows using the Command Prompt (Run as Administrator) # 3. Connect Jupyter Notebook to WEAP API Clone or download all this GitHub repo https://github.com/WamdamProject/WaMDaM_UseCases In your local repo folder, go to the C:\Users\Adel\Documents\GitHub\WaMDaM_UseCases/UseCases_files/1Original_Datasets_preperation_files/WEAP/Bear_River_WEAP_Model_2017 Copy this folder **Bear_River_WEAP_Model_2017** and paste it into **WEAP Areas** folder on your local machine. For example, it is at C:\Users\Adel\Documents\WEAP Areas ``` # this library is needed to connect to the WEAP API import win32com.client # this command will open the WEAP software (if closed) and get the last active model # you could change the active area to another one inside WEAP or by passing it to the command here #WEAP.ActiveArea = "BearRiverFeb2017_V10.9" WEAP=win32com.client.Dispatch("WEAP.WEAPApplication") # WEAP.Visible = 'FALSE' print WEAP.ActiveArea.Name WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Original" print WEAP.ActiveArea.Name WEAP.Areas("Bear_River_WEAP_Model_2017_Original").Open WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Original" print WEAP.ActiveArea.Name print 'Connected to WEAP API and the '+ WEAP.ActiveArea.Name + ' Area' print '-------------' if not WEAP.Registered: print "Because WEAP is not registered, you cannot use the API" # get the active WEAP Area (model) to serve data into it # ActiveArea=WEAP.ActiveArea.Name # get the active WEAP scenario to serve data into it print '-------------' ActiveScenario= WEAP.ActiveScenario.Name print '\n ActiveScenario= '+ActiveScenario print '-------------' WEAP_Area_dir=WEAP.AreasDirectory print WEAP_Area_dir print "\n \n You're connected to the WEAP API" ``` <a name="CreateWEAP_Area"></a> # 4 Create a copy of the original WEAP Area to use while keeping the orignial as-as for any later use <a name="AddScenarios"></a> ### Add a new CacheCountyUrbanWaterUse scenario from the Reference original WEAP Area: ### You can always use this orignal one and delete any new copies you make afterwards. ``` # Create a copy of the WEAP AREA to serve the updated Hyrym Reservoir to it # Delete the Area if it exists and then add it. Start from fresh Area="Bear_River_WEAP_Model_2017_Conservation" if not WEAP.Areas.Exists(Area): WEAP.SaveAreaAs(Area) WEAP.ActiveArea.Save WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Conservation" print 'ActiveArea= '+WEAP.ActiveArea.Name # Add new Scenario # Add(NewScenarioName, ParentScenarioName or Index): # Create a new scenario as a child of the parent scenario specified. # The new scenario will become the selected scenario in the Data View. WEAP=win32com.client.Dispatch("WEAP.WEAPApplication") # WEAP.Visible = FALSE WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Conservation" print 'ActiveArea= '+ WEAP.ActiveArea.Name Scenarios=[] Scenarios=['Cons25PercCacheUrbWaterUse','Incr25PercCacheUrbWaterUse'] # Delete the scenario if it exists and then add it. Start from fresh for Scenario in Scenarios: if WEAP.Scenarios.Exists(Scenario): # delete it WEAP.Scenarios(Scenario).Delete(True) # add it back as a fresh copy WEAP.Scenarios.Add(Scenario,'Reference') else: WEAP.Scenarios.Add(Scenario,'Reference') WEAP.ActiveArea.Save WEAP.SaveArea WEAP.Quit # or add the scenarios one by one using this command # Make a copy from the reference (base) scenario # WEAP.Scenarios.Add('UpdateCacheDemand','Reference') print '---------------------- \n' print 'Scenarios added to the original WEAP area' WEAP.Quit print 'Connection with WEAP API is disconnected' ``` <a name="QuerySupplyDataLoadWEAP"></a> # 4.A Query Cache County seasonal "Monthly Demand" for the three sites: Logan Potable, North Cache Potable, South Cache Potable ### The data comes from OpenAgua ``` # Use Case 3.1Identify_aggregate_TimeSeriesValues.csv # plot aggregated to monthly and converted to acre-feet time series data of multiple sources # Logan Potable # North Cache Potable # South Cache Potable # 2.2Identify_aggregate_TimeSeriesValues.csv Query_UseCase_URL=""" https://raw.githubusercontent.com/WamdamProject/WaMDaM_JupyterNotebooks/master/3_VisualizePublish/SQL_queries/WEAP/Query_demand_sites.sql """ # Read the query text inside the URL Query_UseCase_text = urllib.urlopen(Query_UseCase_URL).read() # return query result in a pandas data frame result_df_UseCase= pd.read_sql_query(Query_UseCase_text, conn) # uncomment the below line to see the list of attributes # display (result_df_UseCase) seasons_dict = dict() seasons_dict2=dict() Scenarios=['Cons25PercCacheUrbWaterUse','Incr25PercCacheUrbWaterUse'] subsets = result_df_UseCase.groupby(['ScenarioName','InstanceName']) for subset in subsets.groups.keys(): if subset[0] in Scenarios: df_Seasonal = subsets.get_group(name=subset) df_Seasonal=df_Seasonal.reset_index() SeasonalParam = '' for i in range(len(df_Seasonal['SeasonName'])): m_data = df_Seasonal['SeasonName'][i] n_data = float(df_Seasonal['SeasonNumericValue'][i]) SeasonalParam += '{},{}'.format(m_data, n_data) if i != len(df_Seasonal['SeasonName']) - 1: SeasonalParam += ',' Seasonal_value="MonthlyValues("+SeasonalParam+")" seasons_dict[subset]=(Seasonal_value) # seasons_dict2[subset[0]]=seasons_dict # print seasons_dict2 print '-----------------' # print seasons_dict # seasons_dict2.get("Cons25PercCacheUrbWaterUse", {}).get("Logan Potable") # 1 print 'Query and data preperation are done' ``` <a name="LoadFlow"></a> # 4.B Load the seasonal demand data with conservation into WEAP ``` # 9. Load the seasonal data into WEAP #WEAP=win32com.client.Dispatch("WEAP.WEAPApplication") # WEAP.Visible = FALSE print WEAP.ActiveArea.Name Scenarios=['Cons25PercCacheUrbWaterUse','Incr25PercCacheUrbWaterUse'] DemandSites=['Logan Potable','North Cache Potable','South Cache Potable'] AttributeName='Monthly Demand' for scenario in Scenarios: WEAP.ActiveScenario = scenario print WEAP.ActiveScenario.Name for Branch in WEAP.Branches: for InstanceName in DemandSites: if Branch.Name == InstanceName: GetInstanceFullBranch = Branch.FullName val=seasons_dict[(scenario,InstanceName)] WEAP.Branch(GetInstanceFullBranch).Variable(AttributeName).Expression =val # print val print "loaded " + InstanceName WEAP.SaveArea print '\n The data have been sucsesfully loaded into WEAP' WEAP.SaveArea print '\n \n The updated data have been saved' ``` # 5. Run WEAP <font color=green>**Please wait, it will take ~1-3 minutes** to finish calcualting the two WEAP Areas with their many scenarios</font> ``` # Run WEAP WEAP.Areas("Bear_River_WEAP_Model_2017_Conservation").Open print WEAP.ActiveArea.Name WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Conservation" print WEAP.ActiveArea.Name print 'Please wait 1-3 min for the calculation to finish' WEAP.Calculate(2006,10,True) WEAP.SaveArea print '\n \n The calculation has been done and saved' print WEAP.CalculationTime print '\n \n Done' ``` ## 5.1 Get the unmet demand or Cache County sites in both the reference and the conservation scenarios ``` Scenarios=['Reference','Cons25PercCacheUrbWaterUse','Incr25PercCacheUrbWaterUse'] DemandSites=['Logan Potable','North Cache Potable','South Cache Potable'] UnmetDemandEstimate_Ref = pd.DataFrame(columns = DemandSites) UnmetDemandEstimate_Cons25 = pd.DataFrame(columns = DemandSites) UnmetDemandEstimate_Incr25 = pd.DataFrame(columns = DemandSites) UnmetDemandEstimate= pd.DataFrame(columns = Scenarios) for scen in Scenarios: if scen=='Reference': for site in DemandSites: param="\Demand Sites\%s: Unmet Demand[Acre-Foot]"%(site) # print param for year in range (1966,2006): value=WEAP.ResultValue(param, year, 1, scen, year, WEAP.NumTimeSteps) UnmetDemandEstimate_Ref.loc[year, [site]]=value elif scen=='Cons25PercCacheUrbWaterUse': for site in DemandSites: param="\Demand Sites\%s: Unmet Demand[Acre-Foot]"%(site) # print param for year in range (1966,2006): value=WEAP.ResultValue(param, year, 1, scen, year, WEAP.NumTimeSteps) UnmetDemandEstimate_Cons25.loc[year, [site]]=value elif scen=='Incr25PercCacheUrbWaterUse': for site in DemandSites: param="\Demand Sites\%s: Unmet Demand[Acre-Foot]"%(site) # print param for year in range (1966,2006): value=WEAP.ResultValue(param, year, 1, scen, year, WEAP.NumTimeSteps) UnmetDemandEstimate_Incr25.loc[year, [site]]=value UnmetDemandEstimate_Ref['Cache Total']=UnmetDemandEstimate_Ref[DemandSites].sum(axis=1) UnmetDemandEstimate_Cons25['Cache Total']=UnmetDemandEstimate_Cons25[DemandSites].sum(axis=1) UnmetDemandEstimate_Incr25['Cache Total']=UnmetDemandEstimate_Incr25[DemandSites].sum(axis=1) UnmetDemandEstimate['Reference']=UnmetDemandEstimate_Ref['Cache Total'] UnmetDemandEstimate['Cons25PercCacheUrbWaterUse']=UnmetDemandEstimate_Cons25['Cache Total'] UnmetDemandEstimate['Incr25PercCacheUrbWaterUse']=UnmetDemandEstimate_Incr25['Cache Total'] UnmetDemandEstimate=UnmetDemandEstimate.rename_axis('Year',axis="columns") print 'Done estimating the unment demnd pecentage for each scenario' # display(UnmetDemandEstimate) ``` ## 5.2 Get the unmet demand as a percentage for the scenarios ``` ######################################################################## # estimate the total reference demand for Cahce county to calcualte the percentage result_df_UseCase= pd.read_sql_query(Query_UseCase_text, conn) subsets = result_df_UseCase.groupby(['ScenarioName']) for subset in subsets.groups.keys(): if subset=='Bear River WEAP Model 2017': # reference df_Seasonal = subsets.get_group(name=subset) df_Seasonal=df_Seasonal.reset_index() # display (df_Seasonal) Tot=df_Seasonal["SeasonNumericValue"].tolist() float_lst = [float(x) for x in Tot] Annual_Demand=sum(float_lst) print Annual_Demand ######################################################################## years =UnmetDemandEstimate.index.values Reference_vals =UnmetDemandEstimate['Reference'].tolist() Reference_vals_perc =((numpy.array([Reference_vals]))/Annual_Demand)*100 Cons25PercCacheUrbWaterUse_vals =UnmetDemandEstimate['Cons25PercCacheUrbWaterUse'].tolist() Cons25PercCacheUrbWaterUse_vals_perc =((numpy.array([Cons25PercCacheUrbWaterUse_vals]))/Annual_Demand)*100 Incr25PercCacheUrbWaterUse_vals =UnmetDemandEstimate['Incr25PercCacheUrbWaterUse'].tolist() Incr25PercCacheUrbWaterUse_vals_perc =((numpy.array([Incr25PercCacheUrbWaterUse_vals]))/Annual_Demand)*100 print 'done estimating unmet demnd the percentages' ``` # 5.3 Export the unmet demand percent into Excel to load them into WaMDaM ``` # display(UnmetDemandEstimate) import xlsxwriter from collections import OrderedDict UnmetDemandEstimate.to_csv('UnmetDemandEstimate.csv') ExcelFileName='Test.xlsx' years =UnmetDemandEstimate.index.values #print years Columns=['ObjectType','InstanceName','ScenarioName','AttributeName','DateTimeStamp','Value'] # these three columns have fixed values for all the rows ObjectType='Demand Site' InstanceName='Cache County Urban' AttributeName='UnmetDemand' # this dict contains the keysL (scenario name) and the values are in a list # years exist in UnmetDemandEstimate. We then need to add day and month to the year date # like this format: # DateTimeStamp= 1/1/1993 Scenarios = OrderedDict() Scenarios['Bear River WEAP Model 2017_result'] = Reference_vals_perc Scenarios['Incr25PercCacheUrbWaterUse_result'] = Incr25PercCacheUrbWaterUse_vals_perc Scenarios['Cons25PercCacheUrbWaterUse_result'] = Cons25PercCacheUrbWaterUse_vals_perc #print Incr25PercCacheUrbWaterUse_vals_perc workbook = xlsxwriter.Workbook(ExcelFileName) sheet = workbook.add_worksheet('sheet') # write headers for i, header_name in enumerate(Columns): sheet.write(0, i, header_name) row = 1 col = 0 for scenario_name in Scenarios.keys(): for val_list in Scenarios[scenario_name]: # print val_list for i, val in enumerate(val_list): # print years[i] date_timestamp = '1/1/{}'.format(years[i]) sheet.write(row, 0, ObjectType) sheet.write(row, 1, InstanceName) sheet.write(row, 2, scenario_name) sheet.write(row, 3, AttributeName) sheet.write(row, 4, date_timestamp) sheet.write(row, 5, val) row += 1 workbook.close() print 'done writing to Excel' print 'Next, copy the exported data into a WaMDaM workbook template for the WEAP model' ``` # 6. Plot the unmet demad for all the scenarios and years ``` trace2 = go.Scatter( x=years, y=Reference_vals_perc[0], name = 'Reference demand', mode = 'lines+markers', marker = dict( color = '#264DFF', )) trace3 = go.Scatter( x=years, y=Cons25PercCacheUrbWaterUse_vals_perc[0], name = 'Conserve demand by 25%', mode = 'lines+markers', marker = dict( color = '#3FA0FF' )) trace1 = go.Scatter( x=years, y=Incr25PercCacheUrbWaterUse_vals_perc[0], name = 'Increase demand by 25%', mode = 'lines+markers', marker = dict( color = '#290AD8' )) layout = dict( #title = "Use Case 3.3", yaxis = dict( title = "Annual unmet demand (%)", tickformat= ',', showline=True, dtick='5', ticks='outside', ticklen=10, tickcolor='#000', gridwidth=1, showgrid=True, ), xaxis = dict( # title = "Updated input parameters in the <br>Bear_River_WEAP_Model_2017", # showline=True, ticks='inside', tickfont=dict(size=22), tickcolor='#000', gridwidth=1, showgrid=True, ticklen=25 ), legend=dict( x=0.05,y=1.1, bordercolor='#00000f', borderwidth=2 ), width=1100, height=700, #paper_bgcolor='rgb(233,233,233)', #plot_bgcolor='rgb(233,233,233)', margin=go.Margin(l=130,b=200), font=dict(size=25,family='arial',color='#00000f'), showlegend=True ) data = [trace1, trace2,trace3] # create a figure object fig = dict(data=data, layout=layout) #py.iplot(fig, filename = "2.3Identify_SeasonalValues") ## it can be run from the local machine on Pycharm like this like below ## It would also work here offline but in a seperate window offline.iplot(fig,filename = 'jupyter/UnmentDemand@BirdRefuge' ) print "Figure x is replicated!!" ``` <a name="Close"></a> # 7. Upload the new result scenarios to OpenAgua to visulize them there You already uploaded the results form WaMDaM SQLite earlier at the begnining of these Jupyter Notebooks. So all you need is to select to display the result in OpenAgua. Finally, click, load data. It should replicate the same figure above and Figure 6 in the paper <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/images/WEAP_results_OA.PNG?raw=true" style="float:center;width:900px;padding:20px"> <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/images/WEAP_results_OA2.PNG?raw=true" style="float:center;width:900px;padding:20px"> <a name="Close"></a> # 8. Close the SQLite and WEAP API connections ``` # 9. Close the SQLite and WEAP API connections conn.close() print 'connection disconnected' # Uncomment WEAP.SaveArea # this command will close WEAP WEAP.Quit print 'Connection with WEAP API is disconnected' ``` # The End :) Congratulations!
github_jupyter
# Optimizing building HVAC with Amazon SageMaker RL ``` import sagemaker import boto3 from sagemaker.rl import RLEstimator from source.common.docker_utils import build_and_push_docker_image ``` ## Initialize Amazon SageMaker ``` role = sagemaker.get_execution_role() sm_session = sagemaker.session.Session() # SageMaker SDK creates a default bucket. Change this bucket to your own bucket, if needed. s3_bucket = sm_session.default_bucket() s3_output_path = f's3://{s3_bucket}' print(f'S3 bucket path: {s3_output_path}') print(f'Role: {role}') ``` ## Set additional training parameters ### Set instance type Set `cpu_or_gpu` to either `'cpu'` or `'gpu'` for using CPU or GPU instances. ### Configure the framework you want to use Set `framework` to `'tf'` or `'torch'` for TensorFlow or PyTorch, respectively. You will also have to edit your entry point i.e., `train-sagemaker-distributed.py` with the configuration parameter `"use_pytorch"` to match the framework that you have selected. ``` job_name_prefix = 'energyplus-hvac-ray' cpu_or_gpu = 'gpu' # has to be either cpu or gpu if cpu_or_gpu != 'cpu' and cpu_or_gpu != 'gpu': raise ValueError('cpu_or_gpu has to be either cpu or gpu') framework = 'tf' instance_type = 'ml.g4dn.16xlarge' # g4dn.16x large has 1 GPU and 64 cores ``` # Train your homogeneous scaling job here ### Edit the training code The training code is written in the file `train-sagemaker-distributed.py` which is uploaded in the /source directory. *Note that ray will automatically set `"ray_num_cpus"` and `"ray_num_gpus"` in `_get_ray_config`* ``` !pygmentize source/train-sagemaker-distributed.py ``` ### Train the RL model using the Python SDK Script mode When using SageMaker for distributed training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs. 1. Specify the source directory where the environment, presets and training code is uploaded. 2. Specify the entry point as the training code 3. Specify the image (CPU or GPU) to be used for the training environment. 4. Define the training parameters such as the instance count, job name, S3 path for output and job name. 5. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks. #### GPU docker image ``` # Build image repository_short_name = f'sagemaker-hvac-ray-{cpu_or_gpu}' docker_build_args = { 'CPU_OR_GPU': cpu_or_gpu, 'AWS_REGION': boto3.Session().region_name, 'FRAMEWORK': framework } image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args) print("Using ECR image %s" % image_name) metric_definitions = [ {'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episodes_total', 'Regex': 'episodes_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'num_steps_trained', 'Regex': 'num_steps_trained: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'timesteps_total', 'Regex': 'timesteps_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episode_reward_max', 'Regex': 'episode_reward_max: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episode_reward_mean', 'Regex': 'episode_reward_mean: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, {'Name': 'episode_reward_min', 'Regex': 'episode_reward_min: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'}, ] ``` ### Ray homogeneous scaling - Specify `train_instance_count` > 1 Homogeneous scaling allows us to use multiple instances of the same type. Spot instances are unused EC2 instances that could be used at 90% discount compared to On-Demand prices (more information about spot instances can be found [here](https://aws.amazon.com/ec2/spot/?cards.sort-by=item.additionalFields.startDateTime&cards.sort-order=asc) and [here](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html)) To use spot instances, set `train_use_spot_instances = True`. To use On-Demand instances, `train_use_spot_instances = False`. ``` hyperparameters = { # no. of days to simulate. Remember to adjust the dates in RunPeriod of # 'source/eplus/envs/buildings/MediumOffice/RefBldgMediumOfficeNew2004_Chicago.idf' to match simulation days. 'n_days': 365, 'n_iter': 50, # no. of training iterations 'algorithm': 'APEX_DDPG', # only APEX_DDPG and PPO are tested 'multi_zone_control': True, # if each zone temperature set point has to be independently controlled 'energy_temp_penalty_ratio': 10 } # Set additional training parameters training_params = { 'base_job_name': job_name_prefix, 'train_instance_count': 1, 'tags': [{'Key': k, 'Value': str(v)} for k,v in hyperparameters.items()] } # Defining the RLEstimator estimator = RLEstimator(entry_point=f'train-sagemaker-hvac.py', source_dir='source', dependencies=["source/common/"], image_uri=image_name, role=role, train_instance_type=instance_type, # train_instance_type='local', output_path=s3_output_path, metric_definitions=metric_definitions, hyperparameters=hyperparameters, **training_params ) estimator.fit(wait=False) print(' ') print(estimator.latest_training_job.job_name) print('type=', instance_type, 'count=', training_params['train_instance_count']) print(' ') ```
github_jupyter
# Spleen 3D segmentation with MONAI This tutorial demonstrates how MONAI can be used in conjunction with the [PyTorch Lightning](https://github.com/PyTorchLightning/pytorch-lightning) framework. We demonstrate use of the following MONAI features: 1. Transforms for dictionary format data. 2. Loading Nifti images with metadata. 3. Add channel dim to the data if no channel dimension. 4. Scaling medical image intensity with expected range. 5. Croping out a batch of balanced images based on the positive / negative label ratio. 6. Cache IO and transforms to accelerate training and validation. 7. Use of a a 3D UNet model, Dice loss function, and mean Dice metric for a 3D segmentation task. 8. The sliding window inference method. 9. Deterministic training for reproducibility. The training Spleen dataset used in this example can be downloaded from from http://medicaldecathlon.com// ![spleen](http://medicaldecathlon.com/img/spleen0.png) Target: Spleen Modality: CT Size: 61 3D volumes (41 Training + 20 Testing) Source: Memorial Sloan Kettering Cancer Center Challenge: Large ranging foreground size In addition to the usual MONAI requirements you will need Lightning installed. ``` ! pip install pytorch-lightning # Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import glob import numpy as np import torch from torch.utils.data import DataLoader import matplotlib.pyplot as plt import monai from monai.transforms import \ Compose, LoadNiftid, AddChanneld, ScaleIntensityRanged, RandCropByPosNegLabeld, \ RandAffined, Spacingd, Orientationd, ToTensord from monai.data import list_data_collate, sliding_window_inference from monai.networks.layers import Norm from monai.metrics import compute_meandice from pytorch_lightning import LightningModule, Trainer, loggers from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint monai.config.print_config() ``` ## Define the LightningModule The LightningModule contains a refactoring of your training code. The following module is a refactoring of the code in `spleen_segmentation_3d.ipynb`: ``` class Net(LightningModule): def __init__(self): super().__init__() self._model = monai.networks.nets.UNet(dimensions=3, in_channels=1, out_channels=2, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, norm=Norm.BATCH) self.loss_function = monai.losses.DiceLoss(to_onehot_y=True, do_softmax=True) self.best_val_dice = 0 self.best_val_epoch = 0 def forward(self, x): return self._model(x) def prepare_data(self): # set up the correct data path data_root = '/workspace/data/medical/Task09_Spleen' train_images = glob.glob(os.path.join(data_root, 'imagesTr', '*.nii.gz')) train_labels = glob.glob(os.path.join(data_root, 'labelsTr', '*.nii.gz')) data_dicts = [{'image': image_name, 'label': label_name} for image_name, label_name in zip(train_images, train_labels)] train_files, val_files = data_dicts[:-9], data_dicts[-9:] # define the data transforms train_transforms = Compose([ LoadNiftid(keys=['image', 'label']), AddChanneld(keys=['image', 'label']), Spacingd(keys=['image', 'label'], pixdim=(1.5, 1.5, 2.), interp_order=(3, 0)), Orientationd(keys=['image', 'label'], axcodes='RAS'), ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), # randomly crop out patch samples from big image based on pos / neg ratio # the image centers of negative samples must be in valid image area RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', size=(96, 96, 96), pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0), # user can also add other random transforms # RandAffined(keys=['image', 'label'], mode=('bilinear', 'nearest'), prob=1.0, spatial_size=(96, 96, 96), # rotate_range=(0, 0, np.pi/15), scale_range=(0.1, 0.1, 0.1)), ToTensord(keys=['image', 'label']) ]) val_transforms = Compose([ LoadNiftid(keys=['image', 'label']), AddChanneld(keys=['image', 'label']), Spacingd(keys=['image', 'label'], pixdim=(1.5, 1.5, 2.), interp_order=(3, 0)), Orientationd(keys=['image', 'label'], axcodes='RAS'), ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True), ToTensord(keys=['image', 'label']) ]) # set deterministic training for reproducibility train_transforms.set_random_state(seed=0) torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # we use cached datasets - these are 10x faster than regular datasets self.train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=1.0) self.val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0) #self.train_ds = monai.data.Dataset(data=train_files, transform=train_transforms) #self.val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) def train_dataloader(self): train_loader = DataLoader(self.train_ds, batch_size=2, shuffle=True, num_workers=4, collate_fn=list_data_collate) return train_loader def val_dataloader(self): val_loader = DataLoader(self.val_ds, batch_size=1, num_workers=4) return val_loader def configure_optimizers(self): optimizer = torch.optim.Adam(self._model.parameters(), 1e-4) return optimizer def training_step(self, batch, batch_idx): images, labels = batch['image'], batch['label'] output = self.forward(images) loss = self.loss_function(output, labels) tensorboard_logs = {'train_loss': loss.item()} return {'loss': loss, 'log': tensorboard_logs} def validation_step(self, batch, batch_idx): images, labels = batch['image'], batch['label'] roi_size = (160, 160, 160) sw_batch_size = 4 outputs = sliding_window_inference(images, roi_size, sw_batch_size, self.forward) loss = self.loss_function(outputs, labels) value = compute_meandice(y_pred=outputs, y=labels, include_background=False, to_onehot_y=True, mutually_exclusive=True) return {'val_loss': loss, 'val_dice': value} def validation_epoch_end(self, outputs): val_dice = 0 num_items = 0 for output in outputs: val_dice += output['val_dice'].sum().item() num_items += len(output['val_dice']) mean_val_dice = val_dice / num_items tensorboard_logs = {'val_dice': mean_val_dice} if mean_val_dice > self.best_val_dice: self.best_val_dice = mean_val_dice self.best_val_epoch = self.current_epoch print('current epoch %d current mean dice: %0.4f best mean dice: %0.4f at epoch %d' % (self.current_epoch, mean_val_dice, self.best_val_dice, self.best_val_epoch)) return {'log': tensorboard_logs} ``` ## Run the training ``` # initialise the LightningModule net = Net() # set up loggers and checkpoints tb_logger = loggers.TensorBoardLogger(save_dir='logs') checkpoint_callback = ModelCheckpoint(filepath='logs/{epoch}-{val_loss:.2f}-{val_dice:.2f}') # initialise Lightning's trainer. trainer = Trainer(gpus=[0], max_epochs=250, logger=tb_logger, checkpoint_callback=checkpoint_callback, show_progress_bar=False, num_sanity_val_steps=1 ) # train trainer.fit(net) print('train completed, best_metric: %0.4f at epoch %d' % (net.best_val_dice, net.best_val_epoch)) ``` ## View training in tensorboard ``` %load_ext tensorboard %tensorboard --logdir='logs' ``` ## Check best model output with the input image and label ``` net.eval() device = torch.device("cuda:0") with torch.no_grad(): for i, val_data in enumerate(net.val_dataloader()): roi_size = (160, 160, 160) sw_batch_size = 4 val_outputs = sliding_window_inference(val_data['image'].to(device), roi_size, sw_batch_size, net) # plot the slice [:, :, 50] plt.figure('check', (18, 6)) plt.subplot(1, 3, 1) plt.title('image ' + str(i)) plt.imshow(val_data['image'][0, 0, :, :, 50], cmap='gray') plt.subplot(1, 3, 2) plt.title('label ' + str(i)) plt.imshow(val_data['label'][0, 0, :, :, 50]) plt.subplot(1, 3, 3) plt.title('output ' + str(i)) plt.imshow(torch.argmax(val_outputs, dim=1).detach().cpu()[0, :, :, 50]) plt.show() ```
github_jupyter
``` import numpy as np import pandas as pd from matplotlib import pyplot as plt from tqdm import tqdm as tqdm %matplotlib inline import torch import torchvision import torchvision.transforms as transforms import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import random # from google.colab import drive # drive.mount('/content/drive') transform = transforms.Compose( [transforms.CenterCrop((28,28)),transforms.ToTensor(),transforms.Normalize([0.5], [0.5])]) mnist_trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform) mnist_testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform) index1 = [np.where(mnist_trainset.targets==0)[0] , np.where(mnist_trainset.targets==1)[0] ] index1 = np.concatenate(index1,axis=0) len(index1) #12665 true = 1000 total = 47000 sin = total-true sin epochs = 300 indices = np.random.choice(index1,true) indices.shape index = np.where(np.logical_and(mnist_trainset.targets!=0,mnist_trainset.targets!=1))[0] #47335 index.shape req_index = np.random.choice(index.shape[0], sin, replace=False) # req_index index = index[req_index] index.shape values = np.random.choice([0,1],size= sin) print(sum(values ==0),sum(values==1), sum(values ==0) + sum(values==1) ) mnist_trainset.data = torch.cat((mnist_trainset.data[indices],mnist_trainset.data[index])) mnist_trainset.targets = torch.cat((mnist_trainset.targets[indices],torch.Tensor(values).type(torch.LongTensor))) mnist_trainset.targets.shape, mnist_trainset.data.shape # mnist_trainset.targets[index] = torch.Tensor(values).type(torch.LongTensor) j =20078 # Without Shuffle upto True Training numbers correct , after that corrupted print(plt.imshow(mnist_trainset.data[j]),mnist_trainset.targets[j]) trainloader = torch.utils.data.DataLoader(mnist_trainset, batch_size=250,shuffle=True, num_workers=2) testloader = torch.utils.data.DataLoader(mnist_testset, batch_size=250,shuffle=False, num_workers=2) mnist_trainset.data.shape classes = ('zero', 'one') dataiter = iter(trainloader) images, labels = dataiter.next() images[:4].shape def imshow(img): img = img / 2 + 0.5 # unnormalize npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() imshow(torchvision.utils.make_grid(images[:10])) print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(10))) class Module2(nn.Module): def __init__(self): super(Module2, self).__init__() self.conv1 = nn.Conv2d(1, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 4 * 4, 128) self.fc2 = nn.Linear(128, 64) self.fc3 = nn.Linear(64, 10) self.fc4 = nn.Linear(10,2) def forward(self,z): y1 = self.pool(F.relu(self.conv1(z))) y1 = self.pool(F.relu(self.conv2(y1))) # print(y1.shape) y1 = y1.view(-1, 16 * 4 * 4) y1 = F.relu(self.fc1(y1)) y1 = F.relu(self.fc2(y1)) y1 = F.relu(self.fc3(y1)) y1 = self.fc4(y1) return y1 inc = Module2() inc = inc.to("cuda") criterion_inception = nn.CrossEntropyLoss() optimizer_inception = optim.SGD(inc.parameters(), lr=0.01, momentum=0.9) acti = [] loss_curi = [] for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data inputs, labels = inputs.to("cuda"),labels.to("cuda") # print(inputs.shape) # zero the parameter gradients optimizer_inception.zero_grad() # forward + backward + optimize outputs = inc(inputs) loss = criterion_inception(outputs, labels) loss.backward() optimizer_inception.step() # print statistics running_loss += loss.item() if i % 50 == 49: # print every 50 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 50)) ep_lossi.append(running_loss/50) # loss per minibatch running_loss = 0.0 loss_curi.append(np.mean(ep_lossi)) #loss per epoch if (np.mean(ep_lossi)<=0.03): break # acti.append(actis) print('Finished Training') correct = 0 total = 0 with torch.no_grad(): for data in trainloader: images, labels = data images, labels = images.to("cuda"), labels.to("cuda") outputs = inc(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 60000 train images: %d %%' % ( 100 * correct / total)) total,correct correct = 0 total = 0 out = [] pred = [] with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to("cuda"),labels.to("cuda") out.append(labels.cpu().numpy()) outputs= inc(images) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) out = np.concatenate(out,axis=0) pred = np.concatenate(pred,axis=0) index = np.logical_or(out ==1,out==0) print(index.shape) acc = sum(out[index] == pred[index])/sum(index) print('Accuracy of the network on the 10000 test images: %d %%' % ( 100*acc)) sum(index) import random random.sample([1,2,3,4,5,6,7,8],5) # torch.save(inc.state_dict(),"/content/drive/My Drive/model_simple_8000.pkl") fig = plt.figure() plt.plot(loss_curi,label="loss_Curve") plt.xlabel("epochs") plt.ylabel("training_loss") plt.legend() fig.savefig("loss_curve.pdf") ``` Simple Model 3 Inception Module |true training data | Corr Training Data | Test Accuracy | Test Accuracy 0-1 | | ------------------ | ------------------ | ------------- | ----------------- | | 100 | 47335 | 15 | 75 | | 500 | 47335 | 16 | 80 | | 1000 | 47335 | 17 | 83 | | 2000 | 47335 | 19 | 92 | | 4000 | 47335 | 20 | 95 | | 6000 | 47335 | 20 | 96 | | 8000 | 47335 | 20 | 96 | | 12665 | 47335 | 20 | 98 | | Total Training Data | Training Accuracy | |---------------------------- | ------------------------ | | 47435 | 100 | | 47835 | 100 | | 48335 | 100 | | 49335 | 100 | | 51335 | 100 | | 53335 | 100 | | 55335 | 100 | | 60000 | 100 | Mini- Inception network 8 Inception Modules |true training data | Corr Training Data | Test Accuracy | Test Accuracy 0-1 | | ------------------ | ------------------ | ------------- | ----------------- | | 100 | 47335 | 14 | 69 | | 500 | 47335 | 19 | 90 | | 1000 | 47335 | 19 | 92 | | 2000 | 47335 | 20 | 95 | | 4000 | 47335 | 20 | 97 | | 6000 | 47335 | 20 | 97 | | 8000 | 47335 | 20 | 98 | | 12665 | 47335 | 20 | 99 | | Total Training Data | Training Accuracy | |---------------------------- | ------------------------ | | 47435 | 100 | | 47835 | 100 | | 48335 | 100 | | 49335 | 100 | | 51335 | 100 | | 53335 | 100 | | 55335 | 100 | | 60000 | 100 | ``` ```
github_jupyter
# Binary classification from 2 features using K Nearest Neighbors (KNN) Classification using "raw" python or libraries. The binary classification is on a single boundary defined by a continuous function and added white noise ``` import numpy as np from numpy import random import matplotlib.pyplot as plt import matplotlib.colors as pltcolors from sklearn import metrics from sklearn.neighbors import KNeighborsClassifier as SkKNeighborsClassifier import pandas as pd import seaborn as sns ``` ## Model Quadratic function as boundary between positive and negative values Adding some unknown as a Gaussian noise The values of X are uniformly distributed and independent ``` # Two features, Gaussian noise def generateBatch(N): # xMin = 0 xMax = 1 b = 0.1 std = 0.1 # x = random.uniform(xMin, xMax, (N, 2)) # 4th degree relation to shape the boundary boundary = 2*(x[:,0]**4 + (x[:,0]-0.3)**3 + b) # Adding some gaussian noise labels = boundary + random.normal(0, std, N) > x[:,1] return (x, labels) ``` ### Training data ``` N = 2000 # x has 1 dim in R, label has 1 dim in B xTrain, labelTrain = generateBatch(N) colors = ['blue','red'] fig = plt.figure(figsize=(15,4)) plt.subplot(1,3,1) plt.scatter(xTrain[:,0], xTrain[:,1], c=labelTrain, cmap=pltcolors.ListedColormap(colors), marker=',', alpha=0.1) plt.xlabel('x0') plt.ylabel('x1') plt.title('Generated train data') plt.grid() cb = plt.colorbar() loc = np.arange(0,1,1/float(len(colors))) cb.set_ticks(loc) cb.set_ticklabels([0,1]) plt.subplot(1,3,2) plt.scatter(xTrain[:,0], labelTrain, marker=',', alpha=0.01) plt.xlabel('x0') plt.ylabel('label') plt.grid() plt.subplot(1,3,3) plt.scatter(xTrain[:,1], labelTrain, marker=',', alpha=0.01) plt.xlabel('x1') plt.ylabel('label') plt.grid() count, bins, ignored = plt.hist(labelTrain*1.0, 10, density=True, alpha=0.5) p = np.mean(labelTrain) print('Bernouilli parameter of the distribution:', p) ``` ### Test data for verification of the model ``` xTest, labelTest = generateBatch(N) testColors = ['navy', 'orangered'] ``` # Helpers ``` def plotHeatMap(X, classes, title=None, fmt='.2g', ax=None, xlabel=None, ylabel=None): """ Fix heatmap plot from Seaborn with pyplot 3.1.0, 3.1.1 https://stackoverflow.com/questions/56942670/matplotlib-seaborn-first-and-last-row-cut-in-half-of-heatmap-plot """ ax = sns.heatmap(X, xticklabels=classes, yticklabels=classes, annot=True, fmt=fmt, cmap=plt.cm.Blues, ax=ax) #notation: "annot" not "annote" bottom, top = ax.get_ylim() ax.set_ylim(bottom + 0.5, top - 0.5) if title: ax.set_title(title) if xlabel: ax.set_xlabel(xlabel) if ylabel: ax.set_ylabel(ylabel) def plotConfusionMatrix(yTrue, yEst, classes, title=None, fmt='.2g', ax=None): plotHeatMap(metrics.confusion_matrix(yTrue, yEst), classes, title, fmt, ax, \ xlabel='Estimations', ylabel='True values'); ``` # K Nearest Neighbors (KNN) References: - https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm - https://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/ ## Homemade Using a simple algorithm. Unweighted : each of the K neighbors has the same weight ``` # Select a K k = 10 # Create a Panda dataframe in order to link x and y df = pd.DataFrame(np.concatenate((xTrain, labelTrain.reshape(-1,1)), axis=1), columns = ('x0', 'x1', 'label')) # Insert columns to compute the difference of current test to the train and the L2 df.insert(df.shape[1], 'diff0', 0) df.insert(df.shape[1], 'diff1', 0) df.insert(df.shape[1], 'L2', 0) # threshold = k / 2 labelEst0 = np.zeros(xTest.shape[0]) for i, x in enumerate(xTest): # Compute distance and norm to each training sample df['diff0'] = df['x0'] - x[0] df['diff1'] = df['x1'] - x[1] df['L2'] = df['diff0']**2 + df['diff1']**2 # Get the K lowest kSmallest = df.nsmallest(k, 'L2') # Finalize prediction based on the mean labelEst0[i] = np.sum(kSmallest['label']) > threshold ``` ### Performance of homemade model ``` plt.figure(figsize=(12,4)) plt.subplot(1,3,1) plt.scatter(xTest[:,0], xTest[:,1], c=labelEst0, cmap=pltcolors.ListedColormap(testColors), marker='x', alpha=0.2); plt.xlabel('x0') plt.ylabel('x1') plt.grid() plt.title('Estimated') cb = plt.colorbar() loc = np.arange(0,1,1./len(testColors)) cb.set_ticks(loc) cb.set_ticklabels([0,1]); plt.subplot(1,3,2) plt.hist(labelEst0, 10, density=True, alpha=0.5) plt.title('Bernouilli parameter =' + str(np.mean(labelEst0))) plt.subplot(1,3,3) plt.scatter(xTest[:,0], xTest[:,1], c=labelTest, cmap=pltcolors.ListedColormap(colors), marker='x', alpha=0.1); plt.xlabel('x0') plt.ylabel('x1') plt.grid() plt.title('Generator') cb = plt.colorbar() loc = np.arange(0,1,1./len(colors)) cb.set_ticks(loc) cb.set_ticklabels([0,1]); accuracy0 = np.sum(labelTest == labelEst0)/N print('Accuracy =', accuracy0) ``` ### Precision $p(y = 1 \mid \hat{y} = 1)$ ``` print('Precision =', np.sum(labelTest[labelEst0 == 1])/np.sum(labelEst0)) ``` ### Recall $p(\hat{y} = 1 \mid y = 1)$ ``` print('Recall =', np.sum(labelTest[labelEst0 == 1])/np.sum(labelTest)) ``` ### Confusion matrix ``` plotConfusionMatrix(labelTest, labelEst0, np.array(['Blue', 'Red'])); print(metrics.classification_report(labelTest, labelEst0)) ``` This non-parametric model has a the best performance of all models used so far, including the neural network with two layers. The large drawback is the amount of computation for each sample to predict. This method is hardly usable for sample sizes over 10k. # Using SciKit Learn References: - SciKit documentation - https://stackabuse.com/k-nearest-neighbors-algorithm-in-python-and-scikit-learn/ ``` model1 = SkKNeighborsClassifier(n_neighbors=k) model1.fit(xTrain, labelTrain) labelEst1 = model1.predict(xTest) print('Accuracy =', model1.score(xTest, labelTest)) plt.hist(labelEst1*1.0, 10, density=True, alpha=0.5) plt.title('Bernouilli parameter =' + str(np.mean(labelEst1))); ``` ### Confusion matrix (plot) ``` plotConfusionMatrix(labelTest, labelEst1, np.array(['Blue', 'Red'])); ``` ### Classification report ``` print(metrics.classification_report(labelTest, labelEst1)) ``` ### ROC curve ``` logit_roc_auc = metrics.roc_auc_score(labelTest, labelEst1) fpr, tpr, thresholds = metrics.roc_curve(labelTest, model1.predict_proba(xTest)[:,1]) plt.plot(fpr, tpr, label='KNN classification (area = %0.2f)' % logit_roc_auc) plt.plot([0, 1], [0, 1],'r--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic') plt.legend(loc="lower right"); ``` # Where to go from here ? - Other linear implementations and simple neural nets using "raw" Python or SciKit Learn([HTML](ClassificationContinuous2Features.html) / [Jupyter](ClassificationContinuous2Features.ipynb)), using TensorFlow([HTML](ClassificationContinuous2Features-TensorFlow.html) / [Jupyter](ClassificationContinuous2Features-TensorFlow.ipynb)), or using Keras ([HTML](ClassificationContinuous2Features-Keras.html)/ [Jupyter](ClassificationContinuous2Features-Keras.ipynb)) - Non linear problem solving with Support Vector Machine (SVM) ([HTML](ClassificationSVM.html) / [Jupyter](ClassificationSVM.ipynb)) - More complex multi-class models on the Czech and Norways flags using Keras ([HTML](ClassificationMulti2Features-Keras.html) / [Jupyter](ClassificationMulti2Features-Keras.ipynb)), showing one of the main motivations to neural networks. - Compare with the two feature linear regression using simple algorithms ([HTML](../linear/LinearRegressionBivariate.html) / [Jupyter](LinearRegressionBivariate.ipynb])), or using Keras ([HTML](LinearRegressionBivariate-Keras.html) / [Jupyter](LinearRegressionUnivariate-Keras.ipynb))
github_jupyter
<a href="https://colab.research.google.com/github/harnalashok/hadoop/blob/main/hadoop_spark_install_on_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` # Last amended: 30th March, 2021 # Myfolder: github/hadoop # Objective: # i) Install hadoop on colab # (current version is 3.2.2) # ii) Experiments with hadoop # iii) Install spark on colab # iv) Access hadoop file from spark # v) Install koalas on colab # # # Java 8 install: https://stackoverflow.com/a/58191107 # Hadoop install: https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/SingleCluster.html # Spark install: https://stackoverflow.com/a/64183749 # https://www.analyticsvidhya.com/blog/2020/11/a-must-read-guide-on-how-to-work-with-pyspark-on-google-colab-for-data-scientists/ ``` ## Install hadoop If it takes too long, it means, it is awaiting input from you regarding overwriting ssh keys ### Define functions No downloads. Just function definitions ``` # 1.0 How to set environment variable import os import time ``` #### ssh_install() ``` # 2.0 Function to install ssh client and sshd (Server) def ssh_install(): print("\n--1. Download and install ssh server----\n") ! sudo apt-get remove openssh-client openssh-server ! sudo apt install openssh-client openssh-server print("\n--2. Restart ssh server----\n") ! service ssh restart ``` #### Java install ``` # 3.0 Function to download and install java 8 def install_java(): ! rm -rf /usr/java print("\n--Download and install Java 8----\n") !apt-get install -y openjdk-8-jdk-headless -qq > /dev/null # install openjdk os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" # set environment variable !update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java !update-alternatives --set javac /usr/lib/jvm/java-8-openjdk-amd64/bin/javac !mkdir -p /usr/java ! ln -s "/usr/lib/jvm/java-8-openjdk-amd64" "/usr/java" ! mv "/usr/java/java-8-openjdk-amd64" "/usr/java/latest" !java -version #check java version !javac -version ``` #### hadoop install ``` # 4.0 Function to download and install hadoop def hadoop_install(): print("\n--5. Download hadoop tar.gz----\n") ! wget -c https://mirrors.estointernet.in/apache/hadoop/common/hadoop-3.2.2/hadoop-3.2.2.tar.gz print("\n--6. Transfer downloaded content and unzip tar.gz----\n") ! mv /content/hadoop* /opt/ ! tar -xzf /opt/hadoop-3.2.2.tar.gz --directory /opt/ print("\n--7. Create hadoop folder----\n") ! rm -r /app/hadoop/tmp ! mkdir -p /app/hadoop/tmp print("\n--8. Check folder for files----\n") ! ls -la /opt ``` #### hadoop config ``` # 5.0 Function for setting hadoop configuration def hadoop_config(): print("\n--Begin Configuring hadoop---\n") print("\n=============================\n") print("\n--9. core-site.xml----\n") ! cat /opt/hadoop-3.2.2/etc/hadoop/core-site.xml print("\n--10. Amend core-site.xml----\n") ! echo '<?xml version="1.0" encoding="UTF-8"?>' > /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo '<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <name>fs.defaultFS</name>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <value>hdfs://localhost:9000</value>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <name>hadoop.tmp.dir</name>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <value>/app/hadoop/tmp</value>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <description>A base for other temporary directories.</description>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml # Added following regarding safemode from here: # https://stackoverflow.com/a/33800253 ! echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <name>dfs.safemode.threshold.pct</name>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' <value>0</value>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml ! echo ' </configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/core-site.xml print("\n--11. Amended core-site.xml----\n") ! cat /opt/hadoop-3.2.2/etc/hadoop/core-site.xml print("\n--12. yarn-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo '<?xml version="1.0" encoding="UTF-8"?>' > /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo '<configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <name>yarn.nodemanager.aux-services</name>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <value>mapreduce_shuffle</value>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <name>yarn.nodemanager.vmem-check-enabled</name>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' <value>false</value>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml !echo ' </configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml print("\n--13. Amended yarn-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/yarn-site.xml print("\n--14. mapred-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml print("\n--15. Amend mapred-site.xml----\n") !echo '<?xml version="1.0"?>' > /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo '<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo '<configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <name>mapreduce.framework.name</name>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <value>yarn</value>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <name>yarn.app.mapreduce.am.env</name>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <name>mapreduce.map.env</name>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <name>mapreduce.reduce.env</name>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' <value>HADOOP_MAPRED_HOME=${HADOOP_HOME}</value>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml !echo '</configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml print("\n--16, Amended mapred-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/mapred-site.xml print("\n---17. hdfs-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml print("\n---18. Amend hdfs-site.xml----\n") !echo '<?xml version="1.0" encoding="UTF-8"?> ' > /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo '<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo '<configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <name>dfs.replication</name>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <value>1</value>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <property>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <name>dfs.block.size</name>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <value>16777216</value>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' <description>Block size</description>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo ' </property>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml !echo '</configuration>' >> /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml print("\n---19. Amended hdfs-site.xml----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/hdfs-site.xml print("\n---20. hadoop-env.sh----\n") # https://stackoverflow.com/a/53140448 !cat /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export JAVA_HOME="/usr/lib/jvm/java-8-openjdk-amd64"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export HDFS_NAMENODE_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export HDFS_DATANODE_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export HDFS_SECONDARYNAMENODE_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export YARN_RESOURCEMANAGER_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ! echo 'export YARN_NODEMANAGER_USER="root"' >> /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh print("\n---21. Amended hadoop-env.sh----\n") !cat /opt/hadoop-3.2.2/etc/hadoop/hadoop-env.sh ``` #### ssh keys ``` # 6.0 Function tp setup ssh passphrase def set_keys(): print("\n---22. Generate SSH keys----\n") ! cd ~ ; pwd ! cd ~ ; ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa ! cd ~ ; cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys ! cd ~ ; chmod 0600 ~/.ssh/authorized_keys ``` #### Set environment ``` # 7.0 Function to set up environmental variables def set_env(): print("\n---23. Set Environment variables----\n") # 'export' command does not work in colab # https://stackoverflow.com/a/57240319 os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" #set environment variable os.environ["JRE_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64/jre" os.environ["HADOOP_HOME"] = "/opt/hadoop-3.2.2" os.environ["HADOOP_CONF_DIR"] = "/opt/hadoop-3.2.2/etc/hadoop" os.environ["LD_LIBRARY_PATH"] += ":/opt/hadoop-3.2.2/lib/native" os.environ["PATH"] += ":/opt/hadoop-3.2.2/bin:/opt/hadoop-3.2.2/sbin" ``` #### Install all function ``` # 8.0 Function to call all functions def install_hadoop(): print("\n--Install java----\n") ssh_install() install_java() hadoop_install() hadoop_config() set_keys() set_env() ``` ### Begin install Start downloading, install and configure. Takes around 2 minutes ``` # 9.0 Start installation start = time.time() install_hadoop() end = time.time() print("\n---Time taken----\n") print((end- start)/60) ``` ### Format hadoop ``` # 10.0 Format hadoop print("\n---24. Format namenode----\n") !hdfs namenode -format ``` ## Start and test hadoop If namenode is in safemode, use the command: `!hdfs dfsadmin -safemode leave` #### Start hadoop If start fails with 'Connection refused', run `ssh_install()` once again ``` # 11.0 Start namenode # If this fails, run # ssh_install() below # and start hadoop again: print("\n---25. Start namenode----\n") ! start-dfs.sh #ssh_install() ``` #### Start yarn ``` # 11.1 Start yarn ! start-yarn.sh ``` If `start-dfs.sh` fails, issue the following three commands, one after another:<br> `! sudo apt-get remove openssh-client openssh-server`<br> `! sudo apt-get install openssh-client openssh-server`<br> `! service ssh restart`<br> And then try to start hadoop again, as: `start-dfs.sh` #### Test hadoop IF in safe mode, leave safe mode as:<br> `!hdfs dfsadmin -safemode leave` ``` # 11.1 print("\n---26. Make folders in hadoop----\n") ! hdfs dfs -mkdir /user ! hdfs dfs -mkdir /user/ashok # 11.2 Run hadoop commands ! hdfs dfs -ls / ! hdfs dfs -ls /user # 11.3 Stopping hadoop # Gives some errors # But hadoop stops #!stop-dfs.sh ``` Run the `ssh_install()` again if hadoop fails to start with `start-dfs.sh` and then try to start hadoop again. ## Install spark ### Define functions `findspark`: PySpark isn't on `sys.path` by default, but that doesn't mean it can't be used as a regular library. You can address this by either symlinking pyspark into your site-packages, or adding `pyspark` to `sys.path` at runtime. `findspark` does the latter. ``` # 1.0 Function to download and unzip spark def spark_koalas_install(): print("\n--1.1 Install findspark----\n") !pip install -q findspark print("\n--1.2 Install databricks Koalas----\n") !pip install koalas print("\n--1.3 Download Apache tar.gz----\n") ! wget -c https://mirrors.estointernet.in/apache/spark/spark-3.1.1/spark-3.1.1-bin-hadoop3.2.tgz print("\n--1.4 Transfer downloaded content and unzip tar.gz----\n") ! mv /content/spark* /opt/ ! tar -xzf /opt/spark-3.1.1-bin-hadoop3.2.tgz --directory /opt/ print("\n--1.5 Check folder for files----\n") ! ls -la /opt # 1.1 Function to set environment def set_spark_env(): print("\n---2. Set Environment variables----\n") os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["JRE_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64/jre" os.environ["SPARK_HOME"] = "/opt/spark-3.1.1-bin-hadoop3.2" os.environ["LD_LIBRARY_PATH"] += ":/opt/spark-3.1.1-bin-hadoop3.2/lib/native" os.environ["PATH"] += ":/opt/spark-3.1.1-bin-hadoop3.2/bin:/opt/spark-3.1.1-bin-hadoop3.2/sbin" print("\n---2.1. Check Environment variables----\n") # Check ! echo $PATH ! echo $LD_LIBRARY_PATH # 1.2 Function to configure spark def spark_conf(): print("\n---3. Configure spark to access hadoop----\n") !mv /opt/spark-3.1.1-bin-hadoop3.2/conf/spark-env.sh.template /opt/spark-3.1.1-bin-hadoop3.2/conf/spark-env.sh !echo "HADOOP_CONF_DIR=/opt/hadoop-3.2.2/etc/hadoop/" >> /opt/spark-3.1.1-bin-hadoop3.2/conf/spark-env.sh print("\n---3.1 Check ----\n") #!cat /opt/spark-3.1.1-bin-hadoop3.2/conf/spark-env.sh ``` ### Install spark ``` # 2.0 Call all the three functions def install_spark(): spark_koalas_install() set_spark_env() spark_conf() # 2.1 install_spark() ``` ## Test spark Hadoop should have been started Call some libraries ``` # 3.0 Just call some libraries to test import pandas as pd import numpy as np # 3.1 Get spark in sys.path import findspark findspark.init() # 3.2 Call other spark libraries # Just to test from pyspark.sql import SparkSession import databricks.koalas as ks from pyspark.ml.feature import VectorAssembler from pyspark.ml.regression import LinearRegression # 3.1 Build spark session spark = SparkSession. \ builder. \ master("local[*]"). \ getOrCreate() # 4.0 Pandas DataFrame pdf = pd.DataFrame({ 'x1': ['a','a','b','b', 'b', 'c', 'd','d'], 'x2': ['apple', 'orange', 'orange','orange', 'peach', 'peach','apple','orange'], 'x3': [1, 1, 2, 2, 2, 4, 1, 2], 'x4': [2.4, 2.5, 3.5, 1.4, 2.1,1.5, 3.0, 2.0], 'y1': [1, 0, 1, 0, 0, 1, 1, 0], 'y2': ['yes', 'no', 'no', 'yes', 'yes', 'yes', 'no', 'yes'] }) # 4.1 pdf # 4.2 Transform to Spark DataFrame df = spark.createDataFrame(pdf) df.show() # 4.3 Create a csv file # and tranfer it to hdfs !echo "a,b,c,d" > /content/airports.csv !echo "5,4,6,7" >> /content/airports.csv !echo "2,3,4,5" >> /content/airports.csv !echo "8,9,0,1" >> /content/airports.csv !echo "2,3,4,1" >> /content/airports.csv !echo "1,2,2,1" >> /content/airports.csv !echo "0,1,2,6" >> /content/airports.csv !echo "9,3,1,8" >> /content/airports.csv !ls -la /content # 4.4 !hdfs dfs -rm -f /user/ashok/airports.csv !hdfs dfs -put /content/airports.csv /user/ashok/ !hdfs dfs -ls /user/ashok # 5.0 Read file directly from hadoop airports_df = spark.read.csv( "/user/ashok/airports.csv", inferSchema = True, header = True ) # 5.1 Show file airports_df.show() ``` ## Test Koalas Hadoop should have been started Create a koalas dataframe ``` # 6.0 # If namenode is in safemode, first use: # hdfs dfsadmin -safemode leave kdf = ks.DataFrame( { 'a': [1, 2, 3, 4, 5, 6], 'b': [100, 200, 300, 400, 500, 600], 'c': ["one", "two", "three", "four", "five", "six"] }, index=[10, 20, 30, 40, 50, 60] ) # 6.1 And show kdf # 6.2 Pandas DataFrame pdf = pd.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']}) # 6.2.1 Transform to koalas DataFrame df = ks.from_pandas(pdf) # 6.3 Rename koalas dataframe columns df.columns = ['x', 'y', 'z1'] # 6.4 Do some operations on koalas DF, in place: df['x2'] = df.x * df.x # 6.6 Finally show koalas df df # 6.7 Read csv file from hadoop # and create koalas df ks.read_csv("/user/ashok/airports.csv").head(10) ################### ```
github_jupyter
# Comprehensive Example ``` # Enabling the `widget` backend. # This requires jupyter-matplotlib a.k.a. ipympl. # ipympl can be install via pip or conda. %matplotlib widget import matplotlib.pyplot as plt import numpy as np # Testing matplotlib interactions with a simple plot fig = plt.figure() plt.plot(np.sin(np.linspace(0, 20, 100))); # Always hide the toolbar fig.canvas.toolbar_visible = False # Put it back to its default fig.canvas.toolbar_visible = 'fade-in-fade-out' # Change the toolbar position fig.canvas.toolbar_position = 'top' # Hide the Figure name at the top of the figure fig.canvas.header_visible = False # Hide the footer fig.canvas.footer_visible = False # Disable the resizing feature fig.canvas.resizable = False # If true then scrolling while the mouse is over the canvas will not move the entire notebook fig.canvas.capture_scroll = True ``` You can also call `display` on `fig.canvas` to display the interactive plot anywhere in the notebooke ``` fig.canvas.toolbar_visible = True display(fig.canvas) ``` Or you can `display(fig)` to embed the current plot as a png ``` display(fig) ``` # 3D plotting ``` from mpl_toolkits.mplot3d import axes3d fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # Grab some test data. X, Y, Z = axes3d.get_test_data(0.05) # Plot a basic wireframe. ax.plot_wireframe(X, Y, Z, rstride=10, cstride=10) plt.show() ``` # Subplots ``` # A more complex example from the matplotlib gallery np.random.seed(0) n_bins = 10 x = np.random.randn(1000, 3) fig, axes = plt.subplots(nrows=2, ncols=2) ax0, ax1, ax2, ax3 = axes.flatten() colors = ['red', 'tan', 'lime'] ax0.hist(x, n_bins, density=1, histtype='bar', color=colors, label=colors) ax0.legend(prop={'size': 10}) ax0.set_title('bars with legend') ax1.hist(x, n_bins, density=1, histtype='bar', stacked=True) ax1.set_title('stacked bar') ax2.hist(x, n_bins, histtype='step', stacked=True, fill=False) ax2.set_title('stack step (unfilled)') # Make a multiple-histogram of data-sets with different length. x_multi = [np.random.randn(n) for n in [10000, 5000, 2000]] ax3.hist(x_multi, n_bins, histtype='bar') ax3.set_title('different sample sizes') fig.tight_layout() plt.show() fig.canvas.toolbar_position = 'right' fig.canvas.toolbar_visible = False ``` # Interactions with other widgets and layouting When you want to embed the figure into a layout of other widgets you should call `plt.ioff()` before creating the figure otherwise `plt.figure()` will trigger a display of the canvas automatically and outside of your layout. ### Without using `ioff` Here we will end up with the figure being displayed twice. The button won't do anything it just placed as an example of layouting. ``` import ipywidgets as widgets # ensure we are interactive mode # this is default but if this notebook is executed out of order it may have been turned off plt.ion() fig = plt.figure() ax = fig.gca() ax.imshow(Z) widgets.AppLayout( center=fig.canvas, footer=widgets.Button(icon='check'), pane_heights=[0, 6, 1] ) ``` ### Fixing the double display with `ioff` If we make sure interactive mode is off when we create the figure then the figure will only display where we want it to. There is ongoing work to allow usage of `ioff` as a context manager, see the [ipympl issue](https://github.com/matplotlib/ipympl/issues/220) and the [matplotlib issue](https://github.com/matplotlib/matplotlib/issues/17013) ``` plt.ioff() fig = plt.figure() plt.ion() ax = fig.gca() ax.imshow(Z) widgets.AppLayout( center=fig.canvas, footer=widgets.Button(icon='check'), pane_heights=[0, 6, 1] ) ``` # Interacting with other widgets ## Changing a line plot with a slide ``` # When using the `widget` backend from ipympl, # fig.canvas is a proper Jupyter interactive widget, which can be embedded in # an ipywidgets layout. See https://ipywidgets.readthedocs.io/en/stable/examples/Layout%20Templates.html # One can bound figure attributes to other widget values. from ipywidgets import AppLayout, FloatSlider plt.ioff() slider = FloatSlider( orientation='horizontal', description='Factor:', value=1.0, min=0.02, max=2.0 ) slider.layout.margin = '0px 30% 0px 30%' slider.layout.width = '40%' fig = plt.figure() fig.canvas.header_visible = False fig.canvas.layout.min_height = '400px' plt.title('Plotting: y=sin({} * x)'.format(slider.value)) x = np.linspace(0, 20, 500) lines = plt.plot(x, np.sin(slider.value * x)) def update_lines(change): plt.title('Plotting: y=sin({} * x)'.format(change.new)) lines[0].set_data(x, np.sin(change.new * x)) fig.canvas.draw() fig.canvas.flush_events() slider.observe(update_lines, names='value') AppLayout( center=fig.canvas, footer=slider, pane_heights=[0, 6, 1] ) ``` ## Update image data in a performant manner Two useful tricks to improve performance when updating an image displayed with matplolib are to: 1. Use the `set_data` method instead of calling imshow 2. Precompute and then index the array ``` # precomputing all images x = np.linspace(0,np.pi,200) y = np.linspace(0,10,200) X,Y = np.meshgrid(x,y) parameter = np.linspace(-5,5) example_image_stack = np.sin(X)[None,:,:]+np.exp(np.cos(Y[None,:,:]*parameter[:,None,None])) plt.ioff() fig = plt.figure() plt.ion() im = plt.imshow(example_image_stack[0]) def update(change): im.set_data(example_image_stack[change['new']]) fig.canvas.draw_idle() slider = widgets.IntSlider(value=0, min=0, max=len(parameter)-1) slider.observe(update, names='value') widgets.VBox([slider, fig.canvas]) ``` ### Debugging widget updates and matplotlib callbacks If an error is raised in the `update` function then will not always display in the notebook which can make debugging difficult. This same issue is also true for matplotlib callbacks on user events such as mousemovement, for example see [issue](https://github.com/matplotlib/ipympl/issues/116). There are two ways to see the output: 1. In jupyterlab the output will show up in the Log Console (View > Show Log Console) 2. using `ipywidgets.Output` Here is an example of using an `Output` to capture errors in the update function from the previous example. To induce errors we changed the slider limits so that out of bounds errors will occur: From: `slider = widgets.IntSlider(value=0, min=0, max=len(parameter)-1)` To: `slider = widgets.IntSlider(value=0, min=0, max=len(parameter)+10)` If you move the slider all the way to the right you should see errors from the Output widget ``` plt.ioff() fig = plt.figure() plt.ion() im = plt.imshow(example_image_stack[0]) out = widgets.Output() @out.capture() def update(change): with out: if change['name'] == 'value': im.set_data(example_image_stack[change['new']]) fig.canvas.draw_idle slider = widgets.IntSlider(value=0, min=0, max=len(parameter)+10) slider.observe(update) display(widgets.VBox([slider, fig.canvas])) display(out) ```
github_jupyter
# Interactive single compartment HH example To run this interactive Jupyter Notebook, please click on the rocket icon 🚀 in the top panel. For more information, please see {ref}`how to use this documentation <userdocs:usage:jupyterbooks>`. Please uncomment the line below if you use the Google Colab. (It does not include these packages by default). ``` #%pip install pyneuroml neuromllite NEURON import math from neuroml import NeuroMLDocument from neuroml import Cell from neuroml import IonChannelHH from neuroml import GateHHRates from neuroml import BiophysicalProperties from neuroml import MembraneProperties from neuroml import ChannelDensity from neuroml import HHRate from neuroml import SpikeThresh from neuroml import SpecificCapacitance from neuroml import InitMembPotential from neuroml import IntracellularProperties from neuroml import IncludeType from neuroml import Resistivity from neuroml import Morphology, Segment, Point3DWithDiam from neuroml import Network, Population from neuroml import PulseGenerator, ExplicitInput import numpy as np from pyneuroml import pynml from pyneuroml.lems import LEMSSimulation ``` ## Declare the model ### Create ion channels ``` def create_na_channel(): """Create the Na channel. This will create the Na channel and save it to a file. It will also validate this file. returns: name of the created file """ na_channel = IonChannelHH(id="na_channel", notes="Sodium channel for HH cell", conductance="10pS", species="na") gate_m = GateHHRates(id="na_m", instances="3", notes="m gate for na channel") m_forward_rate = HHRate(type="HHExpLinearRate", rate="1per_ms", midpoint="-40mV", scale="10mV") m_reverse_rate = HHRate(type="HHExpRate", rate="4per_ms", midpoint="-65mV", scale="-18mV") gate_m.forward_rate = m_forward_rate gate_m.reverse_rate = m_reverse_rate na_channel.gate_hh_rates.append(gate_m) gate_h = GateHHRates(id="na_h", instances="1", notes="h gate for na channel") h_forward_rate = HHRate(type="HHExpRate", rate="0.07per_ms", midpoint="-65mV", scale="-20mV") h_reverse_rate = HHRate(type="HHSigmoidRate", rate="1per_ms", midpoint="-35mV", scale="10mV") gate_h.forward_rate = h_forward_rate gate_h.reverse_rate = h_reverse_rate na_channel.gate_hh_rates.append(gate_h) na_channel_doc = NeuroMLDocument(id="na_channel", notes="Na channel for HH neuron") na_channel_fn = "HH_example_na_channel.nml" na_channel_doc.ion_channel_hhs.append(na_channel) pynml.write_neuroml2_file(nml2_doc=na_channel_doc, nml2_file_name=na_channel_fn, validate=True) return na_channel_fn def create_k_channel(): """Create the K channel This will create the K channel and save it to a file. It will also validate this file. :returns: name of the K channel file """ k_channel = IonChannelHH(id="k_channel", notes="Potassium channel for HH cell", conductance="10pS", species="k") gate_n = GateHHRates(id="k_n", instances="4", notes="n gate for k channel") n_forward_rate = HHRate(type="HHExpLinearRate", rate="0.1per_ms", midpoint="-55mV", scale="10mV") n_reverse_rate = HHRate(type="HHExpRate", rate="0.125per_ms", midpoint="-65mV", scale="-80mV") gate_n.forward_rate = n_forward_rate gate_n.reverse_rate = n_reverse_rate k_channel.gate_hh_rates.append(gate_n) k_channel_doc = NeuroMLDocument(id="k_channel", notes="k channel for HH neuron") k_channel_fn = "HH_example_k_channel.nml" k_channel_doc.ion_channel_hhs.append(k_channel) pynml.write_neuroml2_file(nml2_doc=k_channel_doc, nml2_file_name=k_channel_fn, validate=True) return k_channel_fn def create_leak_channel(): """Create a leak channel This will create the leak channel and save it to a file. It will also validate this file. :returns: name of leak channel nml file """ leak_channel = IonChannelHH(id="leak_channel", conductance="10pS", notes="Leak conductance") leak_channel_doc = NeuroMLDocument(id="leak_channel", notes="leak channel for HH neuron") leak_channel_fn = "HH_example_leak_channel.nml" leak_channel_doc.ion_channel_hhs.append(leak_channel) pynml.write_neuroml2_file(nml2_doc=leak_channel_doc, nml2_file_name=leak_channel_fn, validate=True) return leak_channel_fn ``` ### Create cell ``` def create_cell(): """Create the cell. :returns: name of the cell nml file """ # Create the nml file and add the ion channels hh_cell_doc = NeuroMLDocument(id="cell", notes="HH cell") hh_cell_fn = "HH_example_cell.nml" hh_cell_doc.includes.append(IncludeType(href=create_na_channel())) hh_cell_doc.includes.append(IncludeType(href=create_k_channel())) hh_cell_doc.includes.append(IncludeType(href=create_leak_channel())) # Define a cell hh_cell = Cell(id="hh_cell", notes="A single compartment HH cell") # Define its biophysical properties bio_prop = BiophysicalProperties(id="hh_b_prop") # notes="Biophysical properties for HH cell") # Membrane properties are a type of biophysical properties mem_prop = MembraneProperties() # Add membrane properties to the biophysical properties bio_prop.membrane_properties = mem_prop # Append to cell hh_cell.biophysical_properties = bio_prop # Channel density for Na channel na_channel_density = ChannelDensity(id="na_channels", cond_density="120.0 mS_per_cm2", erev="50.0 mV", ion="na", ion_channel="na_channel") mem_prop.channel_densities.append(na_channel_density) # Channel density for k channel k_channel_density = ChannelDensity(id="k_channels", cond_density="360 S_per_m2", erev="-77mV", ion="k", ion_channel="k_channel") mem_prop.channel_densities.append(k_channel_density) # Leak channel leak_channel_density = ChannelDensity(id="leak_channels", cond_density="3.0 S_per_m2", erev="-54.3mV", ion="non_specific", ion_channel="leak_channel") mem_prop.channel_densities.append(leak_channel_density) # Other membrane properties mem_prop.spike_threshes.append(SpikeThresh(value="-20mV")) mem_prop.specific_capacitances.append(SpecificCapacitance(value="1.0 uF_per_cm2")) mem_prop.init_memb_potentials.append(InitMembPotential(value="-65mV")) intra_prop = IntracellularProperties() intra_prop.resistivities.append(Resistivity(value="0.03 kohm_cm")) # Add to biological properties bio_prop.intracellular_properties = intra_prop # Morphology morph = Morphology(id="hh_cell_morph") # notes="Simple morphology for the HH cell") seg = Segment(id="0", name="soma", notes="Soma segment") # We want a diameter such that area is 1000 micro meter^2 # surface area of a sphere is 4pi r^2 = 4pi diam^2 diam = math.sqrt(1000 / math.pi) proximal = distal = Point3DWithDiam(x="0", y="0", z="0", diameter=str(diam)) seg.proximal = proximal seg.distal = distal morph.segments.append(seg) hh_cell.morphology = morph hh_cell_doc.cells.append(hh_cell) pynml.write_neuroml2_file(nml2_doc=hh_cell_doc, nml2_file_name=hh_cell_fn, validate=True) return hh_cell_fn ``` ### Create a network ``` def create_network(): """Create the network :returns: name of network nml file """ net_doc = NeuroMLDocument(id="network", notes="HH cell network") net_doc_fn = "HH_example_net.nml" net_doc.includes.append(IncludeType(href=create_cell())) # Create a population: convenient to create many cells of the same type pop = Population(id="pop0", notes="A population for our cell", component="hh_cell", size=1) # Input pulsegen = PulseGenerator(id="pg", notes="Simple pulse generator", delay="100ms", duration="100ms", amplitude="0.08nA") exp_input = ExplicitInput(target="pop0[0]", input="pg") net = Network(id="single_hh_cell_network", note="A network with a single population") net_doc.pulse_generators.append(pulsegen) net.explicit_inputs.append(exp_input) net.populations.append(pop) net_doc.networks.append(net) pynml.write_neuroml2_file(nml2_doc=net_doc, nml2_file_name=net_doc_fn, validate=True) return net_doc_fn ``` ## Plot the data we record ``` def plot_data(sim_id): """Plot the sim data. Load the data from the file and plot the graph for the membrane potential using the pynml generate_plot utility function. :sim_id: ID of simulaton """ data_array = np.loadtxt(sim_id + ".dat") pynml.generate_plot([data_array[:, 0]], [data_array[:, 1]], "Membrane potential", show_plot_already=False, save_figure_to=sim_id + "-v.png", xaxis="time (s)", yaxis="membrane potential (V)") pynml.generate_plot([data_array[:, 0]], [data_array[:, 2]], "channel current", show_plot_already=False, save_figure_to=sim_id + "-i.png", xaxis="time (s)", yaxis="channel current (A)") pynml.generate_plot([data_array[:, 0], data_array[:, 0]], [data_array[:, 3], data_array[:, 4]], "current density", labels=["Na", "K"], show_plot_already=False, save_figure_to=sim_id + "-iden.png", xaxis="time (s)", yaxis="current density (A_per_m2)") ``` ## Create and run the simulation Create the simulation, run it, record data, and plot the recorded information. ``` def main(): """Main function Include the NeuroML model into a LEMS simulation file, run it, plot some data. """ # Simulation bits sim_id = "HH_single_compartment_example_sim" simulation = LEMSSimulation(sim_id=sim_id, duration=300, dt=0.01, simulation_seed=123) # Include the NeuroML model file simulation.include_neuroml2_file(create_network()) # Assign target for the simulation simulation.assign_simulation_target("single_hh_cell_network") # Recording information from the simulation simulation.create_output_file(id="output0", file_name=sim_id + ".dat") simulation.add_column_to_output_file("output0", column_id="pop0[0]/v", quantity="pop0[0]/v") simulation.add_column_to_output_file("output0", column_id="pop0[0]/iChannels", quantity="pop0[0]/iChannels") simulation.add_column_to_output_file("output0", column_id="pop0[0]/na/iDensity", quantity="pop0[0]/hh_b_prop/membraneProperties/na_channels/iDensity/") simulation.add_column_to_output_file("output0", column_id="pop0[0]/k/iDensity", quantity="pop0[0]/hh_b_prop/membraneProperties/k_channels/iDensity/") # Save LEMS simulation to file sim_file = simulation.save_to_file() # Run the simulation using the default jNeuroML simulator pynml.run_lems_with_jneuroml(sim_file, max_memory="2G", nogui=True, plot=False) # Plot the data plot_data(sim_id) if __name__ == "__main__": main() ```
github_jupyter
## Amazon SageMaker Feature Store: Encrypt Data in your Online or Offline Feature Store using KMS key This notebook demonstrates how to enable encyption for your data in your online or offline Feature Store using KMS key. We start by showing how to programmatically create a KMS key, and how to apply it to the feature store creation process for data encryption. The last portion of this notebook demonstrates how to verify that your KMS key is being used to encerypt your data in your feature store. ### Overview 1. Create a KMS key. - How to create a KMS key programmatically using the KMS client from boto3? 2. Attach role to your KMS key. - Attach the required entries to your policy for data encryption in your feature store. 3. Create an online or offline feature store and apply it to your feature store creation process. - How to enable encryption for your online store? - How to enable encryption for your offline store? 4. How to verify that your data is encrypted in your online or offline store? ### Prerequisites This notebook uses both `boto3` and Python SDK libraries, and the `Python 3 (Data Science)` kernel. This notebook also works with Studio, Jupyter, and JupyterLab. ### Library Dependencies: * sagemaker>=2.0.0 * numpy * pandas ``` import sagemaker import sys import boto3 import pandas as pd import numpy as np import json original_version = sagemaker.__version__ %pip install 'sagemaker>=2.0.0' ``` ### Set up ``` sagemaker_session = sagemaker.Session() s3_bucket_name = sagemaker_session.default_bucket() prefix = "sagemaker-featurestore-kms-demo" role = sagemaker.get_execution_role() region = sagemaker_session.boto_region_name ``` Create a KMS client using boto3. Note that you can access your boto session through your sagemaker session, e.g.,`sagemaker_session`. ``` kms = sagemaker_session.boto_session.client("kms") ``` ### KMS Policy Template Below is the policy template you will use for creating a KMS key. You will specify your role to grant it access to various KMS operations that will be used in the back-end for encrypting your data in your Online or Offline Feature Store. **Note**: You will need to substitute your Account number in for `123456789012` in the policy below for these lines: `arn:aws:cloudtrail:*:123456789012:trail/*`. It is important to understand that the policy below will grant admin privileges for Customer Managed Keys (CMK) around viewing and revoking grants, decrypt and encrypt permissions on CloudTrail and full access permissions through Feature Store. Also, note that the the Feature Store Service creates additonal grants that are used for encryption purposes for your online store. ``` policy = { "Version": "2012-10-17", "Id": "key-policy-feature-store", "Statement": [ { "Sid": "Allow access through Amazon SageMaker Feature Store for all principals in the account that are authorized to use Amazon SageMaker Feature Store", "Effect": "Allow", "Principal": {"AWS": role}, "Action": [ "kms:Encrypt", "kms:Decrypt", "kms:DescribeKey", "kms:CreateGrant", "kms:RetireGrant", "kms:ReEncryptFrom", "kms:ReEncryptTo", "kms:GenerateDataKey", "kms:ListAliases", "kms:ListGrants", ], "Resource": ["*"], "Condition": {"StringLike": {"kms:ViaService": "sagemaker.*.amazonaws.com"}}, }, { "Sid": "Allow administrators to view the CMK and revoke grants", "Effect": "Allow", "Principal": {"AWS": [role]}, "Action": ["kms:Describe*", "kms:Get*", "kms:List*", "kms:RevokeGrant"], "Resource": ["*"], }, { "Sid": "Enable CloudTrail Encrypt Permissions", "Effect": "Allow", "Principal": {"Service": "cloudtrail.amazonaws.com", "AWS": [role]}, "Action": "kms:GenerateDataKey*", "Resource": "*", "Condition": { "StringLike": { "kms:EncryptionContext:aws:cloudtrail:arn": [ "arn:aws:cloudtrail:*:123456789012:trail/*", "arn:aws:cloudtrail:*:123456789012:trail/*", ] } }, }, { "Sid": "Enable CloudTrail log decrypt permissions", "Effect": "Allow", "Principal": {"AWS": [role]}, "Action": "kms:Decrypt", "Resource": ["*"], "Condition": {"Null": {"kms:EncryptionContext:aws:cloudtrail:arn": "false"}}, }, ], } ``` Create your new KMS key using the policy above and your KMS client. ``` try: new_kms_key = kms.create_key( Policy=json.dumps(policy), Description="string", KeyUsage="ENCRYPT_DECRYPT", CustomerMasterKeySpec="SYMMETRIC_DEFAULT", Origin="AWS_KMS", ) AliasName = "my-new-kms-key" ## provide a unique alias name kms.create_alias( AliasName="alias/" + AliasName, TargetKeyId=new_kms_key["KeyMetadata"]["KeyId"] ) print(new_kms_key) except Exception as e: print("Error {}".format(e)) ``` Now that we have our KMS key created and the necessary operations added to our role, we now load in our data. ``` customer_data = pd.read_csv("data/feature_store_introduction_customer.csv") orders_data = pd.read_csv("data/feature_store_introduction_orders.csv") customer_data.head() orders_data.head() customer_data.dtypes orders_data.dtypes ``` ### Creating Feature Groups We first start by creating feature group names for customer_data and orders_data. Following this, we create two Feature Groups, one for customer_dat and another for orders_data ``` from time import gmtime, strftime, sleep customers_feature_group_name = "customers-feature-group-" + strftime("%d-%H-%M-%S", gmtime()) orders_feature_group_name = "orders-feature-group-" + strftime("%d-%H-%M-%S", gmtime()) ``` Instantiate a FeatureGroup object for customers_data and orders_data. ``` from sagemaker.feature_store.feature_group import FeatureGroup customers_feature_group = FeatureGroup( name=customers_feature_group_name, sagemaker_session=sagemaker_session ) orders_feature_group = FeatureGroup( name=orders_feature_group_name, sagemaker_session=sagemaker_session ) import time current_time_sec = int(round(time.time())) record_identifier_feature_name = "customer_id" ``` Append EventTime feature to your data frame. This parameter is required, and time stamps each data point. ``` customer_data["EventTime"] = pd.Series([current_time_sec] * len(customer_data), dtype="float64") orders_data["EventTime"] = pd.Series([current_time_sec] * len(orders_data), dtype="float64") customer_data.head() orders_data.head() ``` Load feature definitions to your feature group. ``` customers_feature_group.load_feature_definitions(data_frame=customer_data) orders_feature_group.load_feature_definitions(data_frame=orders_data) ``` ### How to create an Online or Offline Feature Store that uses your KMS key for encryption? Below we create two feature groups, `customers_feature_group` and `orders_feature_group` respectively, and explain how use your KMS key to securely encrypt your data in your online or offline feature store. ### How to create an Online Feature store with your KMS key? To encrypt data in your online feature store, set `enable_online_store` to be `True` and specify your KMS key as parameter `online_store_kms_key_id`. You will need to substitute your Account number in `arn:aws:kms:us-east-1:123456789012:key/` replacing `123456789012` with your Account number. ``` customers_feature_group.create( s3_uri=f"s3://{s3_bucket_name}/{prefix}", record_identifier_name=record_identifier_feature_name, event_time_feature_name="EventTime", role_arn=role, enable_online_store=True, online_store_kms_key_id = 'arn:aws:kms:us-east-1:123456789012:key/'+ new_kms_key['KeyMetadata']['KeyId'] ) orders_feature_group.create( s3_uri=f"s3://{s3_bucket_name}/{prefix}", record_identifier_name=record_identifier_feature_name, event_time_feature_name="EventTime", role_arn=role, enable_online_store=True, online_store_kms_key_id = 'arn:aws:kms:us-east-1:123456789012:key/'+new_kms_key['KeyMetadata']['KeyId'] ) ``` ### How to create an Offline Feature store with your KMS key? Similar to the above, set `enable_online_store` to be `False` and then specify your KMS key as parameter `offline_store_kms_key_id`. You will need to substitute your Account number in `arn:aws:kms:us-east-1:123456789012:key/` replacing `123456789012` with your Account number. ``` customers_feature_group.create( s3_uri=f"s3://{s3_bucket_name}/{prefix}", record_identifier_name=record_identifier_feature_name, event_time_feature_name="EventTime", role_arn=role, enable_online_store=False, offline_store_kms_key_id = 'arn:aws:kms:us-east-1:123456789012:key/'+ new_kms_key['KeyMetadata']['KeyId'] ) orders_feature_group.create( s3_uri=f"s3://{s3_bucket_name}/{prefix}", record_identifier_name=record_identifier_feature_name, event_time_feature_name="EventTime", role_arn=role, enable_online_store=False, offline_store_kms_key_id = 'arn:aws:kms:us-east-1:123456789012:key/'+new_kms_key['KeyMetadata']['KeyId'] ) ``` For this example we create an online feature store that encrypts your data using your KMS key. **Note**: You will need to substitute your Account number in `arn:aws:kms:us-east-1:123456789012:key/` replacing `123456789012` with your Account number. ``` customers_feature_group.create( s3_uri=f"s3://{s3_bucket_name}/{prefix}", record_identifier_name=record_identifier_feature_name, event_time_feature_name="EventTime", role_arn=role, enable_online_store=False, offline_store_kms_key_id="arn:aws:kms:us-east-1:123456789012:key/" + new_kms_key["KeyMetadata"]["KeyId"], ) orders_feature_group.create( s3_uri=f"s3://{s3_bucket_name}/{prefix}", record_identifier_name=record_identifier_feature_name, event_time_feature_name="EventTime", role_arn=role, enable_online_store=False, offline_store_kms_key_id="arn:aws:kms:us-east-1:123456789012:key/" + new_kms_key["KeyMetadata"]["KeyId"], ) ``` ### How to verify that your KMS key is being used to encrypt your data in your Online or Offline Feature Store? ### Online Store Verification To demonstrate that your data is being encrypted in your Online store, use your `kms` client from `boto3` to list the grants under your KMS key. It should show 'SageMakerFeatureStore-' and the name of your feature group you created and should list these operations under Operations:`['Decrypt','Encrypt','GenerateDataKey','ReEncryptFrom','ReEncryptTo','CreateGrant','RetireGrant','DescribeKey']` An alternative way for you to check that your data is encrypted in your Online store is to check [Cloud Trails](https://console.aws.amazon.com/cloudtrail/) and navigate to your account name. Once here, under General details you should see that SSE-KMS encryption is enabled and with your AWS KMS key shown below it. Below is a screenshot showing this: ![Cloud Trails](images/cloud-trails.png) ### Offline Store Verification To verify that your data in being encrypted in your Offline store, you must navigate to your S3 bucket through the [Console](https://console.aws.amazon.com/s3/home?region=us-east-1) and then navigate to your prefix, offline store, feature group name and into the /data/ folder. Once here, select a parquet file which is the file containing your feature group data. For this example, the directory path in S3 was this: `Amazon S3/MYBUCKET/PREFIX/123456789012/sagemaker/region/offline-store/customers-feature-group-23-22-44-47/data/year=2021/month=03/day=23/hour=22/20210323T224448Z_IdfObJjhpqLQ5rmG.parquet.` After selecting the parquet file, navigate to Server-side encryption settings. It should mention that Default encryption is enabled and reference (SSE-KMS) under server-side encryption. If this show, then your data is being encrypted in the offline store. Below is a screenshot of how this should look like in the console: ![Feature Store Policy](images/s3-sse-enabled.png) For this example since we created a secure Online store using our KMS key, below we use `list_grants` to check that our feature group and required grants are present under operations. ``` kms.list_grants( KeyId="arn:aws:kms:us-east-1:123456789012:key/" + new_kms_key["KeyMetadata"]["KeyId"] ) ``` ### Clean Up Resources Remove the Feature Groups we created. ``` customers_feature_group.delete() orders_feature_group.delete() # preserve original sagemaker version %pip install 'sagemaker=={}'.format(original_version) ``` ### Next Steps For more information on how to use KMS to encrypt your data in your Feature Store, see [Feature Store Security](https://docs.aws.amazon.com/sagemaker/latest/dg/feature-store-security.html). For general information on KMS keys and CMK, see [Customer Managed Keys](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys).
github_jupyter
# Hyperparameter tuning In the previous section, we did not discuss the parameters of random forest and gradient-boosting. However, there are a couple of things to keep in mind when setting these. This notebook gives crucial information regarding how to set the hyperparameters of both random forest and gradient boosting decision tree models. <div class="admonition caution alert alert-warning"> <p class="first admonition-title" style="font-weight: bold;">Caution!</p> <p class="last">For the sake of clarity, no cross-validation will be used to estimate the testing error. We are only showing the effect of the parameters on the validation set of what should be the inner cross-validation.</p> </div> ## Random forest The main parameter to tune for random forest is the `n_estimators` parameter. In general, the more trees in the forest, the better the generalization performance will be. However, it will slow down the fitting and prediction time. The goal is to balance computing time and generalization performance when setting the number of estimators when putting such learner in production. The `max_depth` parameter could also be tuned. Sometimes, there is no need to have fully grown trees. However, be aware that with random forest, trees are generally deep since we are seeking to overfit the learners on the bootstrap samples because this will be mitigated by combining them. Assembling underfitted trees (i.e. shallow trees) might also lead to an underfitted forest. ``` from sklearn.datasets import fetch_california_housing from sklearn.model_selection import train_test_split data, target = fetch_california_housing(return_X_y=True, as_frame=True) target *= 100 # rescale the target in k$ data_train, data_test, target_train, target_test = train_test_split( data, target, random_state=0) import pandas as pd from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestRegressor param_grid = { "n_estimators": [10, 20, 30], "max_depth": [3, 5, None], } grid_search = GridSearchCV( RandomForestRegressor(n_jobs=2), param_grid=param_grid, scoring="neg_mean_absolute_error", n_jobs=2, ) grid_search.fit(data_train, target_train) columns = [f"param_{name}" for name in param_grid.keys()] columns += ["mean_test_score", "rank_test_score"] cv_results = pd.DataFrame(grid_search.cv_results_) cv_results["mean_test_score"] = -cv_results["mean_test_score"] cv_results[columns].sort_values(by="rank_test_score") ``` We can observe that in our grid-search, the largest `max_depth` together with the largest `n_estimators` led to the best generalization performance. ## Gradient-boosting decision trees For gradient-boosting, parameters are coupled, so we cannot set the parameters one after the other anymore. The important parameters are `n_estimators`, `max_depth`, and `learning_rate`. Let's first discuss the `max_depth` parameter. We saw in the section on gradient-boosting that the algorithm fits the error of the previous tree in the ensemble. Thus, fitting fully grown trees will be detrimental. Indeed, the first tree of the ensemble would perfectly fit (overfit) the data and thus no subsequent tree would be required, since there would be no residuals. Therefore, the tree used in gradient-boosting should have a low depth, typically between 3 to 8 levels. Having very weak learners at each step will help reducing overfitting. With this consideration in mind, the deeper the trees, the faster the residuals will be corrected and less learners are required. Therefore, `n_estimators` should be increased if `max_depth` is lower. Finally, we have overlooked the impact of the `learning_rate` parameter until now. When fitting the residuals, we would like the tree to try to correct all possible errors or only a fraction of them. The learning-rate allows you to control this behaviour. A small learning-rate value would only correct the residuals of very few samples. If a large learning-rate is set (e.g., 1), we would fit the residuals of all samples. So, with a very low learning-rate, we will need more estimators to correct the overall error. However, a too large learning-rate tends to obtain an overfitted ensemble, similar to having a too large tree depth. ``` from sklearn.ensemble import GradientBoostingRegressor param_grid = { "n_estimators": [10, 30, 50], "max_depth": [3, 5, None], "learning_rate": [0.1, 1], } grid_search = GridSearchCV( GradientBoostingRegressor(), param_grid=param_grid, scoring="neg_mean_absolute_error", n_jobs=2 ) grid_search.fit(data_train, target_train) columns = [f"param_{name}" for name in param_grid.keys()] columns += ["mean_test_score", "rank_test_score"] cv_results = pd.DataFrame(grid_search.cv_results_) cv_results["mean_test_score"] = -cv_results["mean_test_score"] cv_results[columns].sort_values(by="rank_test_score") ``` <div class="admonition caution alert alert-warning"> <p class="first admonition-title" style="font-weight: bold;">Caution!</p> <p class="last">Here, we tune the <tt class="docutils literal">n_estimators</tt> but be aware that using early-stopping as in the previous exercise will be better.</p> </div>
github_jupyter
``` # !pip install ray[tune] import pandas as pd import numpy as np from matplotlib import pyplot as plt from sklearn.metrics import mean_squared_error from hyperopt import hp from ray import tune from hyperopt import fmin, tpe, hp,Trials, space_eval import scipy.stats df = pd.read_csv("../../Data/Raw/flightLogData.csv") plt.figure(figsize=(20, 10)) plt.plot(df.Time, df['Altitude'], linewidth=2, color="r", label="Altitude") plt.plot(df.Time, df['Vertical_velocity'], linewidth=2, color="y", label="Vertical_velocity") plt.plot(df.Time, df['Vertical_acceleration'], linewidth=2, color="b", label="Vertical_acceleration") plt.legend() plt.show() temp_df = df[['Altitude', "Vertical_velocity", "Vertical_acceleration"]] noise = np.random.normal(2, 5, temp_df.shape) noisy_df = temp_df + noise noisy_df['Time'] = df['Time'] plt.figure(figsize=(20, 10)) plt.plot(noisy_df.Time, noisy_df['Altitude'], linewidth=2, color="r", label="Altitude") plt.plot(noisy_df.Time, noisy_df['Vertical_velocity'], linewidth=2, color="y", label="Vertical_velocity") plt.plot(noisy_df.Time, noisy_df['Vertical_acceleration'], linewidth=2, color="b", label="Vertical_acceleration") plt.legend() plt.show() ``` ## Altitude ``` q = 0.001 A = np.array([[1.0, 0.1, 0.005], [0, 1.0, 0.1], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) # R = np.array([[0.5, 0.0], [0.0, 0.0012]]) # Q = np.array([[q, 0.0, 0.0], [0.0, q, 0.0], [0.0, 0.0, q]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) def kalman_update(param): r1, r2, q1 = param['r1'], param['r2'], param['q1'] R = np.array([[r1, 0.0], [0.0, r2]]) Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]]) A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) new_altitude = [] new_acceleration = [] new_velocity = [] for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']): Z = np.array([[altitude], [az]]) x_hat_minus = np.dot(A, x_hat) P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R))) Y = Z - np.dot(H, x_hat_minus) x_hat = x_hat_minus + np.dot(K, Y) P = np.dot((I - np.dot(K, H)), P_minus) Y = Z - np.dot(H, x_hat_minus) new_altitude.append(float(x_hat[0])) new_velocity.append(float(x_hat[1])) new_acceleration.append(float(x_hat[2])) return new_altitude def objective_function(param): r1, r2, q1 = param['r1'], param['r2'], param['q1'] R = np.array([[r1, 0.0], [0.0, r2]]) Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]]) A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) new_altitude = [] new_acceleration = [] new_velocity = [] for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']): Z = np.array([[altitude], [az]]) x_hat_minus = np.dot(A, x_hat) P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R))) Y = Z - np.dot(H, x_hat_minus) x_hat = x_hat_minus + np.dot(K, Y) P = np.dot((I - np.dot(K, H)), P_minus) Y = Z - np.dot(H, x_hat_minus) new_altitude.append(float(x_hat[0])) new_velocity.append(float(x_hat[1])) new_acceleration.append(float(x_hat[2])) return mean_squared_error(df['Altitude'], new_altitude) # space = { # "r1": hp.choice("r1", np.arange(0.01, 90, 0.005)), # "r2": hp.choice("r2", np.arange(0.01, 90, 0.005)), # "q1": hp.choice("q1", np.arange(0.0001, 0.0009, 0.0001)) # } len(np.arange(0.00001, 0.09, 0.00001)) space = { "r1": hp.choice("r1", np.arange(0.001, 90, 0.001)), "r2": hp.choice("r2", np.arange(0.001, 90, 0.001)), "q1": hp.choice("q1", np.arange(0.00001, 0.09, 0.00001)) } # Initialize trials object trials = Trials() best = fmin(fn=objective_function, space = space, algo=tpe.suggest, max_evals=100, trials=trials ) print(best) # -> {'a': 1, 'c2': 0.01420615366247227} print(space_eval(space, best)) # -> ('case 2', 0.01420615366247227} d1 = space_eval(space, best) objective_function(d1) %%timeit objective_function({'q1': 0.06626, 'r1': 0.25, 'r2': 0.75}) objective_function({'q1': 0.06626, 'r1': 0.25, 'r2': 0.75}) y = kalman_update(d1) current = kalman_update({'q1': 0.06626, 'r1': 0.25, 'r2': 0.75}) plt.figure(figsize=(20, 10)) plt.plot(noisy_df.Time, df['Altitude'], linewidth=2, color="r", label="Actual") plt.plot(noisy_df.Time, current, linewidth=2, color="g", label="ESP32") plt.plot(noisy_df.Time, noisy_df['Altitude'], linewidth=2, color="y", label="Noisy") plt.plot(noisy_df.Time, y, linewidth=2, color="b", label="Predicted") plt.legend() plt.show() def kalman_update_return_velocity(param): r1, r2, q1 = param['r1'], param['r2'], param['q1'] R = np.array([[r1, 0.0], [0.0, r2]]) Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]]) A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) new_altitude = [] new_acceleration = [] new_velocity = [] for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']): Z = np.array([[altitude], [az]]) x_hat_minus = np.dot(A, x_hat) P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R))) Y = Z - np.dot(H, x_hat_minus) x_hat = x_hat_minus + np.dot(K, Y) P = np.dot((I - np.dot(K, H)), P_minus) Y = Z - np.dot(H, x_hat_minus) new_altitude.append(float(x_hat[0])) new_velocity.append(float(x_hat[1])) new_acceleration.append(float(x_hat[2])) return new_velocity def objective_function(param): r1, r2, q1 = param['r1'], param['r2'], param['q1'] R = np.array([[r1, 0.0], [0.0, r2]]) Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]]) A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]]) H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]]) P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]) I = np.identity(3) x_hat = np.array([[0.0], [0.0], [0.0]]) Y = np.array([[0.0], [0.0]]) new_altitude = [] new_acceleration = [] new_velocity = [] for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']): Z = np.array([[altitude], [az]]) x_hat_minus = np.dot(A, x_hat) P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R))) Y = Z - np.dot(H, x_hat_minus) x_hat = x_hat_minus + np.dot(K, Y) P = np.dot((I - np.dot(K, H)), P_minus) Y = Z - np.dot(H, x_hat_minus) new_altitude.append(float(x_hat[0])) new_velocity.append(float(x_hat[1])) new_acceleration.append(float(x_hat[2])) return mean_squared_error(df['Vertical_velocity'], new_velocity) space = { "r1": hp.choice("r1", np.arange(0.001, 90, 0.001)), "r2": hp.choice("r2", np.arange(0.001, 90, 0.001)), "q1": hp.choice("q1", np.arange(0.00001, 0.09, 0.00001)) } # Initialize trials object trials = Trials() best = fmin(fn=objective_function, space = space, algo=tpe.suggest, max_evals=100, trials=trials ) print(best) print(space_eval(space, best)) d2 = space_eval(space, best) objective_function(d2) y = kalman_update_return_velocity(d2) current = kalman_update_return_velocity({'q1': 0.0013, 'r1': 0.25, 'r2': 0.65}) previous = kalman_update_return_velocity({'q1': 0.08519, 'r1': 4.719, 'r2': 56.443}) plt.figure(figsize=(20, 10)) plt.plot(noisy_df.Time, df['Vertical_velocity'], linewidth=2, color="r", label="Actual") plt.plot(noisy_df.Time, current, linewidth=2, color="g", label="ESP32") plt.plot(noisy_df.Time, previous, linewidth=2, color="c", label="With previous data") plt.plot(noisy_df.Time, noisy_df['Vertical_velocity'], linewidth=2, color="y", label="Noisy") plt.plot(noisy_df.Time, y, linewidth=2, color="b", label="Predicted") plt.legend() plt.show() ```
github_jupyter
##### Copyright 2019 The TensorFlow Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Lab 04a: Dogs vs Cats Image Classification Without Image Augmentation <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/sres-dl-course/sres-dl-course.github.io/blob/master/notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/sres-dl-course/sres-dl-course.github.io/blob/master/notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> </table> In this tutorial, we will discuss how to classify images into pictures of cats or pictures of dogs. We'll build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`. ## Specific concepts that will be covered: In the process, we will build practical experience and develop intuition around the following concepts * Building _data input pipelines_ using the `tf.keras.preprocessing.image.ImageDataGenerator` class — How can we efficiently work with data on disk to interface with our model? * _Overfitting_ - what is it, how to identify it? <hr> **Before you begin** Before running the code in this notebook, reset the runtime by going to **Runtime -> Reset all runtimes** in the menu above. If you have been working through several notebooks, this will help you avoid reaching Colab's memory limits. # Importing packages Let's start by importing required packages: * os — to read files and directory structure * numpy — for some matrix math outside of TensorFlow * matplotlib.pyplot — to plot the graph and display images in our training and validation data ``` import tensorflow as tf from tensorflow.keras.preprocessing.image import ImageDataGenerator import os import matplotlib.pyplot as plt import numpy as np import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) ``` # Data Loading To build our image classifier, we begin by downloading the dataset. The dataset we are using is a filtered version of <a href="https://www.kaggle.com/c/dogs-vs-cats/data" target="_blank">Dogs vs. Cats</a> dataset from Kaggle (ultimately, this dataset is provided by Microsoft Research). In previous Colabs, we've used <a href="https://www.tensorflow.org/datasets" target="_blank">TensorFlow Datasets</a>, which is a very easy and convenient way to use datasets. In this Colab however, we will make use of the class `tf.keras.preprocessing.image.ImageDataGenerator` which will read data from disk. We therefore need to directly download *Dogs vs. Cats* from a URL and unzip it to the Colab filesystem. ``` _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) ``` The dataset we have downloaded has the following directory structure. <pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" > <b>cats_and_dogs_filtered</b> |__ <b>train</b> |______ <b>cats</b>: [cat.0.jpg, cat.1.jpg, cat.2.jpg ...] |______ <b>dogs</b>: [dog.0.jpg, dog.1.jpg, dog.2.jpg ...] |__ <b>validation</b> |______ <b>cats</b>: [cat.2000.jpg, cat.2001.jpg, cat.2002.jpg ...] |______ <b>dogs</b>: [dog.2000.jpg, dog.2001.jpg, dog.2002.jpg ...] </pre> We can list the directories with the following terminal command: ``` zip_dir_base = os.path.dirname(zip_dir) !find $zip_dir_base -type d -print ``` We'll now assign variables with the proper file path for the training and validation sets. ``` base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures ``` ### Understanding our data Let's look at how many cats and dogs images we have in our training and validation directory ``` num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val print('total training cat images:', num_cats_tr) print('total training dog images:', num_dogs_tr) print('total validation cat images:', num_cats_val) print('total validation dog images:', num_dogs_val) print("--") print("Total training images:", total_train) print("Total validation images:", total_val) ``` # Setting Model Parameters For convenience, we'll set up variables that will be used later while pre-processing our dataset and training our network. ``` BATCH_SIZE = 100 # Number of training examples to process before updating our models variables IMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels ``` # Data Preparation Images must be formatted into appropriately pre-processed floating point tensors before being fed into the network. The steps involved in preparing these images are: 1. Read images from the disk 2. Decode contents of these images and convert it into proper grid format as per their RGB content 3. Convert them into floating point tensors 4. Rescale the tensors from values between 0 and 255 to values between 0 and 1 Fortunately, all these tasks can be done using the class **tf.keras.preprocessing.image.ImageDataGenerator**. We can set this up in a couple of lines of code. ``` train_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our training data validation_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our validation data ``` After defining our generators for training and validation images, **flow_from_directory** method will load images from the disk, apply rescaling, and resize them using single line of code. ``` train_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150) class_mode='binary') val_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=validation_dir, shuffle=False, target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150) class_mode='binary') ``` ### Visualizing Training images We can visualize our training images by getting a batch of images from the training generator, and then plotting a few of them using `matplotlib`. ``` sample_training_images, _ = next(train_data_gen) ``` The `next` function returns a batch from the dataset. One batch is a tuple of (*many images*, *many labels*). For right now, we're discarding the labels because we just want to look at the images. ``` # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip(images_arr, axes): ax.imshow(img) plt.tight_layout() plt.show() plotImages(sample_training_images[:5]) # Plot images 0-4 ``` # Model Creation ## Exercise 4.1 Define the model The model consists of four convolution blocks with a max pool layer in each of them. Then we have a fully connected layer with 512 units, with a `relu` activation function. The model will output class probabilities for two classes — dogs and cats — using `softmax`. The list of model layers: * 2D Convolution - 32 filters, 3x3 kernel, ReLU activation * 2D Max pooling - 2x2 kernel * 2D Convolution - 64 filters, 3x3 kernel, ReLU activation * 2D Max pooling - 2x2 kernel * 2D Convolution - 128 filters, 3x3 kernel, ReLU activation * 2D Max pooling - 2x2 kernel * 2D Convolution - 128 filters, 3x3 kernel, ReLU activation * 2D Max pooling - 2x2 kernel * Flatten * Dense - 512 nodes * Dense - 2 nodes Check the documentation for how to specify the layers [https://www.tensorflow.org/api_docs/python/tf/keras/layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers) ``` model = tf.keras.models.Sequential([ # TODO - Create the CNN model as specified above ]) ``` ### Exercise 4.1 Solution The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E4.1.ipynb) ### Exercise 4.2 Compile the model As usual, we will use the `adam` optimizer. Since we output a softmax categorization, we'll use `sparse_categorical_crossentropy` as the loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so we are passing in the metrics argument. ``` # TODO - Compile the model ``` #### Exercise 4.2 Solution The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E4.2.ipynb) ### Model Summary Let's look at all the layers of our network using **summary** method. ``` model.summary() ``` ### Exercise 4.3 Train the model It's time we train our network. * Since we have a validation dataset, we can use this to evaluate our model as it trains by adding the `validation_data` parameter. * `validation_steps` can also be added if you'd like to use less than full validation set. ``` # TODO - Fit the model ``` #### Exercise 4.3 Solution The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E4.3.ipynb) ### Visualizing results of the training We'll now visualize the results we get after training our network. ``` acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(EPOCHS) plt.figure(figsize=(20, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.savefig('./foo.png') plt.show() ``` As we can see from the plots, training accuracy and validation accuracy are off by large margin and our model has achieved only around **70%** accuracy on the validation set (depending on the number of epochs you trained for). This is a clear indication of overfitting. Once the training and validation curves start to diverge, our model has started to memorize the training data and is unable to perform well on the validation data.
github_jupyter
# Selected Economic Characteristics: Employment Status from the American Community Survey **[Work in progress]** This notebook downloads [selected economic characteristics (DP03)](https://data.census.gov/cedsci/table?tid=ACSDP5Y2018.DP03) from the American Community Survey 2018 5-Year Data. Data source: [American Community Survey 5-Year Data 2018](https://www.census.gov/data/developers/data-sets/acs-5year.html) Authors: Peter Rose (pwrose@ucsd.edu), Ilya Zaslavsky (zaslavsk@sdsc.edu) ``` import os import pandas as pd from pathlib import Path import time pd.options.display.max_rows = None # display all rows pd.options.display.max_columns = None # display all columsns NEO4J_IMPORT = Path(os.getenv('NEO4J_IMPORT')) print(NEO4J_IMPORT) ``` ## Download selected variables * [Selected economic characteristics for US](https://data.census.gov/cedsci/table?tid=ACSDP5Y2018.DP03) * [List of variables as HTML](https://api.census.gov/data/2018/acs/acs5/profile/groups/DP03.html) or [JSON](https://api.census.gov/data/2018/acs/acs5/profile/groups/DP03/) * [Description of variables](https://www2.census.gov/programs-surveys/acs/tech_docs/subject_definitions/2018_ACSSubjectDefinitions.pdf) * [Example URLs for API](https://api.census.gov/data/2018/acs/acs5/profile/examples.html) ### Specify variables from DP03 group and assign property names Names must follow the [Neo4j property naming conventions](https://neo4j.com/docs/getting-started/current/graphdb-concepts/#graphdb-naming-rules-and-recommendations). ``` variables = {# EMPLOYMENT STATUS 'DP03_0001E': 'population16YearsAndOver', 'DP03_0002E': 'population16YearsAndOverInLaborForce', 'DP03_0002PE': 'population16YearsAndOverInLaborForcePct', 'DP03_0003E': 'population16YearsAndOverInCivilianLaborForce', 'DP03_0003PE': 'population16YearsAndOverInCivilianLaborForcePct', 'DP03_0006E': 'population16YearsAndOverInArmedForces', 'DP03_0006PE': 'population16YearsAndOverInArmedForcesPct', 'DP03_0007E': 'population16YearsAndOverNotInLaborForce', 'DP03_0007PE': 'population16YearsAndOverNotInLaborForcePct' #'DP03_0014E': 'ownChildrenOfTheHouseholderUnder6Years', #'DP03_0015E': 'ownChildrenOfTheHouseholderUnder6YearsAllParentsInLaborForce', #'DP03_0016E': 'ownChildrenOfTheHouseholder6To17Years', #'DP03_0017E': 'ownChildrenOfTheHouseholder6To17YearsAllParentsInLaborForce', } fields = ",".join(variables.keys()) for v in variables.values(): print('e.' + v + ' = toInteger(row.' + v + '),') print(len(variables.keys())) ``` ## Download county-level data using US Census API ``` url_county = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=county:*' df = pd.read_json(url_county, dtype='str') df.fillna('', inplace=True) df.head() ``` ##### Add column names ``` df = df[1:].copy() # skip first row of labels columns = list(variables.values()) columns.append('stateFips') columns.append('countyFips') df.columns = columns ``` Remove Puerto Rico (stateFips = 72) to limit data to US States TODO handle data for Puerto Rico (GeoNames represents Puerto Rico as a country) ``` df.query("stateFips != '72'", inplace=True) ``` Save list of state fips (required later to get tract data by state) ``` stateFips = list(df['stateFips'].unique()) stateFips.sort() print(stateFips) df.head() # Example data df[(df['stateFips'] == '06') & (df['countyFips'] == '073')] df['source'] = 'American Community Survey 5 year' df['aggregationLevel'] = 'Admin2' ``` ### Save data ``` df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03EmploymentAdmin2.csv", index=False) ``` ## Download zip-level data using US Census API ``` url_zip = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=zip%20code%20tabulation%20area:*' df = pd.read_json(url_zip, dtype='str') df.fillna('', inplace=True) df.head() ``` ##### Add column names ``` df = df[1:].copy() # skip first row columns = list(variables.values()) columns.append('stateFips') columns.append('postalCode') df.columns = columns df.head() # Example data df.query("postalCode == '90210'") df['source'] = 'American Community Survey 5 year' df['aggregationLevel'] = 'PostalCode' ``` ### Save data ``` df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03EmploymentZip.csv", index=False) ``` ## Download tract-level data using US Census API Tract-level data are only available by state, so we need to loop over all states. ``` def get_tract_data(state): url_tract = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=tract:*&in=state:{state}' df = pd.read_json(url_tract, dtype='str') time.sleep(1) # skip first row of labels df = df[1:].copy() # Add column names columns = list(variables.values()) columns.append('stateFips') columns.append('countyFips') columns.append('tract') df.columns = columns return df df = pd.concat((get_tract_data(state) for state in stateFips)) df.fillna('', inplace=True) df['tract'] = df['stateFips'] + df['countyFips'] + df['tract'] df['source'] = 'American Community Survey 5 year' df['aggregationLevel'] = 'Tract' # Example data for San Diego County df[(df['stateFips'] == '06') & (df['countyFips'] == '073')].head() ``` ### Save data ``` df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03EmploymentTract.csv", index=False) df.shape ```
github_jupyter
## Dimensionality Reduction ``` from sklearn.decomposition import PCA ``` ### Principal Components Analysis ``` o_dir = os.path.join('outputs','pca') if os.path.isdir(o_dir) is not True: print("Creating '{0}' directory.".format(o_dir)) os.mkdir(o_dir) pca = PCA() # Use all Principal Components pca.fit(scdf) # Train model on all data pcdf = pd.DataFrame(pca.transform(scdf)) # Transform data using model for i in range(0,21): print("Amount of explained variance for component {0} is: {1:6.2f}%".format(i, pca.explained_variance_ratio_[i]*100)) print("The amount of explained variance of the SES score using each component is...") sns.lineplot(x=list(range(1,len(pca.explained_variance_ratio_)+1)), y=pca.explained_variance_ratio_) pca = PCA(n_components=11) pca.fit(scdf) scores = pd.DataFrame(pca.transform(scdf), index=scdf.index) scores.to_csv(os.path.join(o_dir,'Scores.csv.gz'), compression='gzip', index=True) # Adapted from https://stackoverflow.com/questions/22984335/recovering-features-names-of-explained-variance-ratio-in-pca-with-sklearn i = np.identity(scdf.shape[1]) # identity matrix coef = pca.transform(i) loadings = pd.DataFrame(coef, index=scdf.columns) loadings.to_csv(os.path.join(o_dir,'Loadings.csv.gz'), compression='gzip', index=True) print(scores.shape) scores.sample(5, random_state=42) print(loadings.shape) loadings.sample(5, random_state=42) odf = pd.DataFrame(columns=['Variable','Component Loading','Score']) for i in range(0,len(loadings.index)): row = loadings.iloc[i,:] for c in list(loadings.columns.values): d = {'Variable':loadings.index[i], 'Component Loading':c, 'Score':row[c]} odf = odf.append(d, ignore_index=True) g = sns.FacetGrid(odf, col="Variable", col_wrap=4, height=3, aspect=2.0, margin_titles=True, sharey=True) g = g.map(plt.plot, "Component Loading", "Score", marker=".") ``` ### What Have We Done? ``` sns.set_style('white') sns.jointplot(data=scores, x=0, y=1, kind='hex', height=8, ratio=8) ``` #### Create an Output Directory and Load the Data ``` o_dir = os.path.join('outputs','clusters-pca') if os.path.isdir(o_dir) is not True: print("Creating '{0}' directory.".format(o_dir)) os.mkdir(o_dir) score_df = pd.read_csv(os.path.join('outputs','pca','Scores.csv.gz')) score_df.rename(columns={'Unnamed: 0':'lsoacd'}, inplace=True) score_df.set_index('lsoacd', inplace=True) # Ensures that df is initialised but original scores remain accessible df = score_df.copy(deep=True) score_df.describe() score_df.sample(3, random_state=42) ``` #### Rescale the Loaded Data We need this so that differences in the component scores don't cause the clustering algorithms to focus only on the 1st component. ``` scaler = preprocessing.MinMaxScaler() df[df.columns] = scaler.fit_transform(df[df.columns]) df.describe() df.sample(3, random_state=42) ```
github_jupyter
# Flopy MODFLOW 6 (MF6) Support The Flopy library contains classes for creating, saving, running, loading, and modifying MF6 simulations. The MF6 portion of the flopy library is located in: *flopy.mf6* While there are a number of classes in flopy.mf6, to get started you only need to use the main classes summarized below: flopy.mf6.MFSimulation * MODFLOW Simulation Class. Entry point into any MODFLOW simulation. flopy.mf6.ModflowGwf * MODFLOW Groundwater Flow Model Class. Represents a single model in a simulation. flopy.mf6.Modflow[pc] * MODFLOW package classes where [pc] is the abbreviation of the package name. Each package is a separate class. For packages that are part of a groundwater flow model, the abbreviation begins with "Gwf". For example, "flopy.mf6.ModflowGwfdis" is the Discretization package. ``` import os import sys from shutil import copyfile import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt try: import flopy except: fpth = os.path.abspath(os.path.join('..', '..')) sys.path.append(fpth) import flopy print(sys.version) print('numpy version: {}'.format(np.__version__)) print('matplotlib version: {}'.format(mpl.__version__)) print('flopy version: {}'.format(flopy.__version__)) ``` # Creating a MF6 Simulation A MF6 simulation is created by first creating a simulation object "MFSimulation". When you create the simulation object you can define the simulation's name, version, executable name, workspace path, and the name of the tdis file. All of these are optional parameters, and if not defined each one will default to the following: sim_name='modflowtest' version='mf6' exe_name='mf6.exe' sim_ws='.' sim_tdis_file='modflow6.tdis' ``` import os import sys from shutil import copyfile try: import flopy except: fpth = os.path.abspath(os.path.join('..', '..')) sys.path.append(fpth) import flopy sim_name = 'example_sim' sim_path = os.path.join('data', 'example_project') sim = flopy.mf6.MFSimulation(sim_name=sim_name, version='mf6', exe_name='mf6', sim_ws=sim_path) ``` The next step is to create a tdis package object "ModflowTdis". The first parameter of the ModflowTdis class is a simulation object, which ties a ModflowTdis object to a specific simulation. The other parameters and their definitions can be found in the docstrings. ``` tdis = flopy.mf6.ModflowTdis(sim, pname='tdis', time_units='DAYS', nper=2, perioddata=[(1.0, 1, 1.0), (10.0, 5, 1.0)]) ``` Next one or more models are created using the ModflowGwf class. The first parameter of the ModflowGwf class is the simulation object that the model will be a part of. ``` model_name = 'example_model' model = flopy.mf6.ModflowGwf(sim, modelname=model_name, model_nam_file='{}.nam'.format(model_name)) ``` Next create one or more Iterative Model Solution (IMS) files. ``` ims_package = flopy.mf6.ModflowIms(sim, pname='ims', print_option='ALL', complexity='SIMPLE', outer_hclose=0.00001, outer_maximum=50, under_relaxation='NONE', inner_maximum=30, inner_hclose=0.00001, linear_acceleration='CG', preconditioner_levels=7, preconditioner_drop_tolerance=0.01, number_orthogonalizations=2) ``` Each ModflowGwf object needs to be associated with an ModflowIms object. This is done by calling the MFSimulation object's "register_ims_package" method. The first parameter in this method is the ModflowIms object and the second parameter is a list of model names (strings) for the models to be associated with the ModflowIms object. ``` sim.register_ims_package(ims_package, [model_name]) ``` Next add packages to each model. The first package added needs to be a spatial discretization package since flopy uses information from the spatial discretization package to help you build other packages. There are three spatial discretization packages to choose from: DIS (ModflowGwfDis) - Structured discretization DISV (ModflowGwfdisv) - Discretization with vertices DISU (ModflowGwfdisu) - Unstructured discretization ``` dis_package = flopy.mf6.ModflowGwfdis(model, pname='dis', length_units='FEET', nlay=2, nrow=2, ncol=5, delr=500.0, delc=500.0, top=100.0, botm=[50.0, 20.0], filename='{}.dis'.format(model_name)) ``` ## Accessing Namefiles Namefiles are automatically built for you by flopy. However, there are some options contained in the namefiles that you may want to set. To get the namefile object access the name_file attribute in either a simulation or model object to get the simulation or model namefile. ``` # set the nocheck property in the simulation namefile sim.name_file.nocheck = True # set the print_input option in the model namefile model.name_file.print_input = True ``` ## Specifying Options Option that appear alone are assigned a boolean value, like the print_input option above. Options that have additional optional parameters are assigned using a tuple, with the entries containing the names of the optional parameters to turn on. Use a tuple with an empty string to indicate no optional parameters and use a tuple with None to turn the option off. ``` # Turn Newton option on with under relaxation model.name_file.newtonoptions = ('UNDER_RELAXATION') # Turn Newton option on without under relaxation model.name_file.newtonoptions = ('') # Turn off Newton option model.name_file.newtonoptions = (None) ``` ## MFArray Templates Lastly define all other packages needed. Note that flopy supports a number of ways to specify data for a package. A template, which defines the data array shape for you, can be used to specify the data. Templates are built by calling the empty of the data type you are building. For example, to build a template for k in the npf package you would call: ModflowGwfnpf.k.empty() The empty method for "MFArray" data templates (data templates whose size is based on the structure of the model grid) take up to four parameters: * model - The model object that the data is a part of. A valid model object with a discretization package is required in order to build the proper array dimensions. This parameter is required. * layered - True or false whether the data is layered or not. * data_storage_type_list - List of data storage types, one for each model layer. If the template is not layered, only one data storage type needs to be specified. There are three data storage types supported, internal_array, internal_constant, and external_file. * default_value - The initial value for the array. ``` # build a data template for k that stores the first layer as an internal array and the second # layer as a constant with the default value of k for all layers set to 100.0 layer_storage_types = [flopy.mf6.data.mfdatastorage.DataStorageType.internal_array, flopy.mf6.data.mfdatastorage.DataStorageType.internal_constant] k_template = flopy.mf6.ModflowGwfnpf.k.empty(model, True, layer_storage_types, 100.0) # change the value of the second layer to 50.0 k_template[0]['data'] = [65.0, 60.0, 55.0, 50.0, 45.0, 40.0, 35.0, 30.0, 25.0, 20.0] k_template[0]['factor'] = 1.5 print(k_template) # create npf package using the k template to define k npf_package = flopy.mf6.ModflowGwfnpf(model, pname='npf', save_flows=True, icelltype=1, k=k_template) ``` ## Specifying MFArray Data MFArray data can also be specified as a numpy array, a list of values, or a single value. Below strt (starting heads) are defined as a single value, 100.0, which is interpreted as an internal constant storage type of value 100.0. Strt could also be defined as a list defining a value for every model cell: strt=[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0] Or as a list defining a value or values for each model layer: strt=[100.0, 90.0] or: strt=[[100.0], [90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0]] MFArray data can also be stored in an external file by using a dictionary using the keys 'filename' to specify the file name relative to the model folder and 'data' to specific the data. The optional 'factor', 'iprn', and 'binary' keys may also be used. strt={'filename': 'strt.txt', 'factor':1.0, 'data':[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0], 'binary': 'True'} If the 'data' key is omitted from the dictionary flopy will try to read the data from an existing file 'filename'. Any relative paths for loading data from a file should specified relative to the MF6 simulation folder. ``` strt={'filename': 'strt.txt', 'factor':1.0, 'data':[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0], 'binary': 'True'} ic_package = flopy.mf6.ModflowGwfic(model, pname='ic', strt=strt, filename='{}.ic'.format(model_name)) # move external file data into model folder icv_data_path = os.path.join('..', 'data', 'mf6', 'notebooks', 'iconvert.txt') copyfile(icv_data_path, os.path.join(sim_path, 'iconvert.txt')) # create storage package sto_package = flopy.mf6.ModflowGwfsto(model, pname='sto', save_flows=True, iconvert={'filename':'iconvert.txt'}, ss=[0.000001, 0.000002], sy=[0.15, 0.14, 0.13, 0.12, 0.11, 0.11, 0.12, 0.13, 0.14, 0.15, 0.15, 0.14, 0.13, 0.12, 0.11, 0.11, 0.12, 0.13, 0.14, 0.15]) ``` ## MFList Templates Flopy supports specifying record and recarray "MFList" data in a number of ways. Templates can be created that define the shape of the data. The empty method for "MFList" data templates take up to 7 parameters. * model - The model object that the data is a part of. A valid model object with a discretization package is required in order to build the proper array dimensions. This parameter is required. * maxbound - The number of rows in the recarray. If not specified one row is returned. * aux_vars - List of auxiliary variable names. If not specified auxiliary variables are not used. * boundnames - True/False if boundnames is to be used. * nseg - Number of segments (only relevant for a few data types) * timeseries - True/False indicates that time series data will be used. * stress_periods - List of integer stress periods to be used (transient MFList data only). If not specified for transient data, template will only be defined for stress period 1. MFList transient data templates are numpy recarrays stored in a dictionary with the dictionary key an integer zero based stress period value (stress period - 1). In the code below the well package is set up using a transient MFList template to help build the well's stress_periods. ``` maxbound = 2 # build a stress_period_data template with 2 wells over stress periods 1 and 2 with boundnames # and three aux variables wel_periodrec = flopy.mf6.ModflowGwfwel.stress_period_data.empty(model, maxbound=maxbound, boundnames=True, aux_vars=['var1', 'var2', 'var3'], stress_periods=[0,1]) # define the two wells for stress period one wel_periodrec[0][0] = ((0,1,2), -50.0, -1, -2, -3, 'First Well') wel_periodrec[0][1] = ((1,1,4), -25.0, 2, 3, 4, 'Second Well') # define the two wells for stress period two wel_periodrec[1][0] = ((0,1,2), -200.0, -1, -2, -3, 'First Well') wel_periodrec[1][1] = ((1,1,4), -4000.0, 2, 3, 4, 'Second Well') # build the well package wel_package = flopy.mf6.ModflowGwfwel(model, pname='wel', print_input=True, print_flows=True, auxiliary=['var1', 'var2', 'var3'], maxbound=maxbound, stress_period_data=wel_periodrec, boundnames=True, save_flows=True) ``` ## Cell IDs Cell IDs always appear as tuples in an MFList. For a structured grid cell IDs appear as: (&lt;layer&gt;, &lt;row&gt;, &lt;column&gt;) For vertice based grid cells IDs appear as: (&lt;layer&gt;, &lt;intralayer_cell_id&gt;) Unstructured grid cell IDs appear as: (&lt;cell_id&gt;) ## Specifying MFList Data MFList data can also be defined as a list of tuples, with each tuple being a row of the recarray. For transient data the list of tuples can be stored in a dictionary with the dictionary key an integer zero based stress period value. If only a list of tuples is specified for transient data, the data is assumed to apply to stress period 1. Additional stress periods can be added with the add_transient_key method. The code below defines saverecord and printrecord as a list of tuples. ``` # printrecord data as a list of tuples. since no stress # period is specified it will default to stress period 1 printrec_tuple_list = [('HEAD', 'ALL'), ('BUDGET', 'ALL')] # saverecord data as a dictionary of lists of tuples for # stress periods 1 and 2. saverec_dict = {0:[('HEAD', 'ALL'), ('BUDGET', 'ALL')],1:[('HEAD', 'ALL'), ('BUDGET', 'ALL')]} # create oc package oc_package = flopy.mf6.ModflowGwfoc(model, pname='oc', budget_filerecord=[('{}.cbc'.format(model_name),)], head_filerecord=[('{}.hds'.format(model_name),)], saverecord=saverec_dict, printrecord=printrec_tuple_list) # add stress period two to the print record oc_package.printrecord.add_transient_key(1) # set the data for stress period two in the print record oc_package.printrecord.set_data([('HEAD', 'ALL'), ('BUDGET', 'ALL')], 1) ``` ### Specifying MFList Data in an External File MFList data can be specified in an external file using a dictionary with the 'filename' key. If the 'data' key is also included in the dictionary and is not None, flopy will create the file with the data contained in the 'data' key. The 'binary' key can be used to save data to a binary file ('binary': True). The code below creates a chd package which creates and references an external file containing data for stress period 1 and stores the data internally in the chd package file for stress period 2. ``` stress_period_data = {0: {'filename': 'chd_sp1.dat', 'data': [[(0, 0, 0), 70.]]}, 1: [[(0, 0, 0), 60.]]} chd = flopy.mf6.ModflowGwfchd(model, maxbound=1, stress_period_data=stress_period_data) ``` ## Packages that Support both List-based and Array-based Data The recharge and evapotranspiration packages can be specified using list-based or array-based input. The array packages have an "a" on the end of their name: ModflowGwfrch - list based recharge package ModflowGwfrcha - array based recharge package ModflowGwfevt - list based evapotranspiration package ModflowGwfevta - array based evapotranspiration package ``` rch_recarray = {0:[((0,0,0), 'rch_1'), ((1,1,1), 'rch_2')], 1:[((0,0,0), 'rch_1'), ((1,1,1), 'rch_2')]} rch_package = flopy.mf6.ModflowGwfrch(model, pname='rch', fixed_cell=True, print_input=True, maxbound=2, stress_period_data=rch_recarray) ``` ## Utility Files (TS, TAS, OBS, TAB) Utility files, MF6 formatted files that reference by packages, include time series, time array series, observation, and tab files. The file names for utility files are specified using the package that references them. The utility files can be created in several ways. A simple case is demonstrated below. More detail is given in the flopy3_mf6_obs_ts_tas notebook. ``` # build a time series array for the recharge package ts_data = [(0.0, 0.015, 0.0017), (1.0, 0.016, 0.0019), (2.0, 0.012, 0.0015), (3.0, 0.020, 0.0014), (4.0, 0.015, 0.0021), (5.0, 0.013, 0.0012), (6.0, 0.022, 0.0012), (7.0, 0.016, 0.0014), (8.0, 0.013, 0.0011), (9.0, 0.021, 0.0011), (10.0, 0.017, 0.0016), (11.0, 0.012, 0.0015)] rch_package.ts.initialize(time_series_namerecord=['rch_1', 'rch_2'], timeseries=ts_data, filename='recharge_rates.ts', interpolation_methodrecord=['stepwise', 'stepwise']) # build an recharge observation package that outputs the western recharge to a binary file and the eastern # recharge to a text file obs_data = {('rch_west.csv', 'binary'): [('rch_1_1_1', 'RCH', (0, 0, 0)), ('rch_1_2_1', 'RCH', (0, 1, 0))], 'rch_east.csv': [('rch_1_1_5', 'RCH', (0, 0, 4)), ('rch_1_2_5', 'RCH', (0, 1, 4))]} rch_package.obs.initialize(filename='example_model.rch.obs', digits=10, print_input=True, continuous=obs_data) ``` # Saving and Running a MF6 Simulation Saving and running a simulation are done with the MFSimulation class's write_simulation and run_simulation methods. ``` # write simulation to new location sim.write_simulation() # run simulation sim.run_simulation() ``` # Exporting a MF6 Model Exporting a MF6 model to a shapefile or netcdf is the same as exporting a MF2005 model. ``` # make directory pth = os.path.join('data', 'netCDF_export') if not os.path.exists(pth): os.makedirs(pth) # export the dis package to a netcdf file model.dis.export(os.path.join(pth, 'dis.nc')) # export the botm array to a shapefile model.dis.botm.export(os.path.join(pth, 'botm.shp')) ``` # Loading an Existing MF6 Simulation Loading a simulation can be done with the flopy.mf6.MFSimulation.load static method. ``` # load the simulation loaded_sim = flopy.mf6.MFSimulation.load(sim_name, 'mf6', 'mf6', sim_path) ``` # Retrieving Data and Modifying an Existing MF6 Simulation Data can be easily retrieved from a simulation. Data can be retrieved using two methods. One method is to retrieve the data object from a master simulation dictionary that keeps track of all the data. The master simulation dictionary is accessed by accessing a simulation's "simulation_data" property and then the "mfdata" property: sim.simulation_data.mfdata[<data path>] The data path is the path to the data stored as a tuple containing the model name, package name, block name, and data name. The second method is to get the data from the package object. If you do not already have the package object, you can work your way down the simulation structure, from the simulation to the correct model, to the correct package, and finally to the data object. These methods are demonstrated in the code below. ``` # get hydraulic conductivity data object from the data dictionary hk = sim.simulation_data.mfdata[(model_name, 'npf', 'griddata', 'k')] # get specific yield data object from the storage package sy = sto_package.sy # get the model object from the simulation object using the get_model method, # which takes a string with the model's name and returns the model object mdl = sim.get_model(model_name) # get the package object from the model mobject using the get_package method, # which takes a string with the package's name or type ic = mdl.get_package('ic') # get the data object from the initial condition package object strt = ic.strt ``` Once you have the appropriate data object there are a number methods to retrieve data from that object. Data retrieved can either be the data as it appears in the model file or the data with any factor specified in the model file applied to it. To get the raw data without applying a factor use the get_data method. To get the data with the factor already applied use .array. Note that MFArray data is always a copy of the data stored by flopy. Modifying the copy of the flopy data will have no affect on the data stored in flopy. Non-constant internal MFList data is returned as a reference to a numpy recarray. Modifying this recarray will modify the data stored in flopy. ``` # get the data without applying any factor hk_data_no_factor = hk.get_data() print('Data without factor:\n{}\n'.format(hk_data_no_factor)) # get data with factor applied hk_data_factor = hk.array print('Data with factor:\n{}\n'.format(hk_data_factor)) ``` Data can also be retrieved from the data object using []. For unlayered data the [] can be used to slice the data. ``` # slice layer one row two print('SY slice of layer on row two\n{}\n'.format(sy[0,:,2])) ``` For layered data specify the layer number within the brackets. This will return a "LayerStorage" object which let's you change attributes of an individual layer. ``` # get layer one LayerStorage object hk_layer_one = hk[0] # change the print code and factor for layer one hk_layer_one.iprn = '2' hk_layer_one.factor = 1.1 print('Layer one data without factor:\n{}\n'.format(hk_layer_one.get_data())) print('Data with new factor:\n{}\n'.format(hk.array)) ``` ## Modifying Data Data can be modified in several ways. One way is to set data for a given layer within a LayerStorage object, like the one accessed in the code above. Another way is to set the data attribute to the new data. Yet another way is to call the data object's set_data method. ``` # set data within a LayerStorage object hk_layer_one.set_data([120.0, 100.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 25.0, 20.0]) print('New HK data no factor:\n{}\n'.format(hk.get_data())) # set data attribute to new data ic_package.strt = 150.0 print('New strt values:\n{}\n'.format(ic_package.strt.array)) # call set_data sto_package.ss.set_data([0.000003, 0.000004]) print('New ss values:\n{}\n'.format(sto_package.ss.array)) ``` ## Modifying the Simulation Path The simulation path folder can be changed by using the set_sim_path method in the MFFileMgmt object. The MFFileMgmt object can be obtained from the simulation object through properties: sim.simulation_data.mfpath ``` # create new path save_folder = os.path.join(sim_path, 'sim_modified') # change simulation path sim.simulation_data.mfpath.set_sim_path(save_folder) # create folder if not os.path.isdir(save_folder): os.makedirs(save_folder) ``` ## Adding a Model Relative Path A model relative path lets you put all of the files associated with a model in a folder relative to the simulation folder. Warning, this will override all of your file paths to model package files and will also override any relative file paths to external model data files. ``` # Change path of model files relative to the simulation folder model.set_model_relative_path('model_folder') # create folder if not os.path.isdir(save_folder): os.makedirs(os.path.join(save_folder,'model_folder')) # write simulation to new folder sim.write_simulation() # run simulation from new folder sim.run_simulation() ``` ## Post-Processing the Results Results can be retrieved from the master simulation dictionary. Results are retrieved from the master simulation dictionary with using a tuple key that identifies the data to be retrieved. For head data use the key ('&lt;model name&gt;', 'HDS', 'HEAD') where &lt;model name&gt; is the name of your model. For cell by cell budget data use the key ('&lt;model name&gt;', 'CBC', '&lt;flow data name&gt;') where &lt;flow data name&gt; is the name of the flow data to be retrieved (ex. 'FLOW-JA-FACE'). All available output keys can be retrieved using the output_keys method. ``` keys = sim.simulation_data.mfdata.output_keys() ``` The entries in the list above are keys for data in the head file "HDS" and data in cell by cell flow file "CBC". Keys in this list are not guaranteed to be in any particular order. The code below uses the head file key to retrieve head data and then plots head data using matplotlib. ``` import matplotlib.pyplot as plt import numpy as np # get all head data head = sim.simulation_data.mfdata['example_model', 'HDS', 'HEAD'] # get the head data from the end of the model run head_end = head[-1] # plot the head data from the end of the model run levels = np.arange(160,162,1) extent = (0.0, 1000.0, 2500.0, 0.0) plt.contour(head_end[0, :, :],extent=extent) plt.show() ``` Results can also be retrieved using the existing binaryfile method. ``` # get head data using old flopy method hds_path = os.path.join(sim_path, model_name + '.hds') hds = flopy.utils.HeadFile(hds_path) # get heads after 1.0 days head = hds.get_data(totim=1.0) # plot head data plt.contour(head[0, :, :],extent=extent) plt.show() ```
github_jupyter
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/3_image_processing_deep_learning_roadmap/3_deep_learning_advanced/1_Blocks%20in%20Deep%20Learning%20Networks/8)%20Resnet%20V2%20Bottleneck%20Block%20(Type%20-%202).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Goals ### 1. Learn to implement Resnet V2 Bottleneck Block (Type - 1) using monk - Monk's Keras - Monk's Pytorch - Monk's Mxnet ### 2. Use network Monk's debugger to create complex blocks ### 3. Understand how syntactically different it is to implement the same using - Traditional Keras - Traditional Pytorch - Traditional Mxnet # Resnet V2 Bottleneck Block - Type 1 - Note: The block structure can have variations too, this is just an example ``` from IPython.display import Image Image(filename='imgs/resnet_v2_bottleneck_without_downsample.png') ``` # Table of contents [1. Install Monk](#1) [2. Block basic Information](#2) - [2.1) Visual structure](#2-1) - [2.2) Layers in Branches](#2-2) [3) Creating Block using monk visual debugger](#3) - [3.1) Create the first branch](#3-1) - [3.2) Create the second branch](#3-2) - [3.3) Merge the branches](#3-3) - [3.4) Debug the merged network](#3-4) - [3.5) Compile the network](#3-5) - [3.6) Visualize the network](#3-6) - [3.7) Run data through the network](#3-7) [4) Creating Block Using MONK one line API call](#4) - [Mxnet Backend](#4-1) - [Pytorch Backend](#4-2) - [Keras Backend](#4-3) [5) Appendix](#5) - [Study Material](#5-1) - [Creating block using traditional Mxnet](#5-2) - [Creating block using traditional Pytorch](#5-3) - [Creating block using traditional Keras](#5-4) <a id='0'></a> # Install Monk ## Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` ## Install Monk Manually (Not recommended) ### Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git ### Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` ### Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` # Imports ``` # Common import numpy as np import math import netron from collections import OrderedDict from functools import partial #Using mxnet-gluon backend # When installed using pip from monk.gluon_prototype import prototype # When installed manually (Uncomment the following) #import os #import sys #sys.path.append("monk_v1/"); #sys.path.append("monk_v1/monk/"); #from monk.gluon_prototype import prototype ``` <a id='2'></a> # Block Information <a id='2_1'></a> ## Visual structure ``` from IPython.display import Image Image(filename='imgs/resnet_v2_bottleneck_without_downsample.png') ``` <a id='2_2'></a> ## Layers in Branches - Number of branches: 2 - Common Elements - batchnorm -> relu - Branch 1 - identity - Branch 2 - conv_1x1 -> batchnorm -> relu -> conv_3x3 -> batchnorm -> relu -> conv1x1 - Branches merged using - Elementwise addition (See Appendix to read blogs on resnets) <a id='3'></a> # Creating Block using monk debugger ``` # Imports and setup a project # To use pytorch backend - replace gluon_prototype with pytorch_prototype # To use keras backend - replace gluon_prototype with keras_prototype from monk.gluon_prototype import prototype # Create a sample project gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1"); ``` <a id='3-1'></a> ## Create the first branch ``` def first_branch(): network = []; network.append(gtf.identity()); return network; # Debug the branch branch_1 = first_branch() network = []; network.append(branch_1); gtf.debug_custom_model_design(network); ``` <a id='3-2'></a> ## Create the second branch ``` def second_branch(output_channels=128, stride=1): network = []; # Bottleneck convolution network.append(gtf.convolution(output_channels=output_channels//4, kernel_size=1, stride=stride)); network.append(gtf.batch_normalization()); network.append(gtf.relu()); #Bottleneck convolution network.append(gtf.convolution(output_channels=output_channels//4, kernel_size=1, stride=stride)); network.append(gtf.batch_normalization()); network.append(gtf.relu()); #Normal convolution network.append(gtf.convolution(output_channels=output_channels, kernel_size=1, stride=1)); return network; # Debug the branch branch_2 = second_branch(output_channels=128, stride=1) network = []; network.append(branch_2); gtf.debug_custom_model_design(network); ``` <a id='3-3'></a> ## Merge the branches ``` def final_block(output_channels=128, stride=1): network = []; #Common Elements network.append(gtf.batch_normalization()); network.append(gtf.relu()); #Create subnetwork and add branches subnetwork = []; branch_1 = first_branch() branch_2 = second_branch(output_channels=output_channels, stride=stride) subnetwork.append(branch_1); subnetwork.append(branch_2); # Add merging element subnetwork.append(gtf.add()); # Add the subnetwork network.append(subnetwork) return network; ``` <a id='3-4'></a> ## Debug the merged network ``` final = final_block(output_channels=64, stride=1) network = []; network.append(final); gtf.debug_custom_model_design(network); ``` <a id='3-5'></a> ## Compile the network ``` gtf.Compile_Network(network, data_shape=(64, 224, 224), use_gpu=False); ``` <a id='3-6'></a> ## Run data through the network ``` import mxnet as mx x = np.zeros((1, 64, 224, 224)); x = mx.nd.array(x); y = gtf.system_dict["local"]["model"].forward(x); print(x.shape, y.shape) ``` <a id='3-7'></a> ## Visualize network using netron ``` gtf.Visualize_With_Netron(data_shape=(64, 224, 224)) ``` <a id='4'></a> # Creating Using MONK LOW code API <a id='4-1'></a> ## Mxnet backend ``` from monk.gluon_prototype import prototype gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1"); network = []; # Single line addition of blocks network.append(gtf.resnet_v2_bottleneck_block(output_channels=64, downsample=False)); gtf.Compile_Network(network, data_shape=(64, 224, 224), use_gpu=False); ``` <a id='4-2'></a> ## Pytorch backend - Only the import changes ``` #Change gluon_prototype to pytorch_prototype from monk.pytorch_prototype import prototype gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1"); network = []; # Single line addition of blocks network.append(gtf.resnet_v2_bottleneck_block(output_channels=64, downsample=False)); gtf.Compile_Network(network, data_shape=(64, 224, 224), use_gpu=False); ``` <a id='4-3'></a> ## Keras backend - Only the import changes ``` #Change gluon_prototype to keras_prototype from monk.keras_prototype import prototype gtf = prototype(verbose=1); gtf.Prototype("sample-project-1", "sample-experiment-1"); network = []; # Single line addition of blocks network.append(gtf.resnet_v2_bottleneck_block(output_channels=64, downsample=False)); gtf.Compile_Network(network, data_shape=(64, 224, 224), use_gpu=False); ``` <a id='5'></a> # Appendix <a id='5-1'></a> ## Study links - https://towardsdatascience.com/residual-blocks-building-blocks-of-resnet-fd90ca15d6ec - https://medium.com/@MaheshNKhatri/resnet-block-explanation-with-a-terminology-deep-dive-989e15e3d691 - https://medium.com/analytics-vidhya/understanding-and-implementation-of-residual-networks-resnets-b80f9a507b9c - https://hackernoon.com/resnet-block-level-design-with-deep-learning-studio-part-1-727c6f4927ac <a id='5-2'></a> ## Creating block using traditional Mxnet - Code credits - https://mxnet.incubator.apache.org/ ``` # Traditional-Mxnet-gluon import mxnet as mx from mxnet.gluon import nn from mxnet.gluon.nn import HybridBlock, BatchNorm from mxnet.gluon.contrib.nn import HybridConcurrent, Identity from mxnet import gluon, init, nd def _conv3x3(channels, stride, in_channels): return nn.Conv2D(channels, kernel_size=3, strides=stride, padding=1, use_bias=False, in_channels=in_channels) class ResnetBlockV1(HybridBlock): def __init__(self, channels, stride, in_channels=0, **kwargs): super(ResnetBlockV1, self).__init__(**kwargs) #Common Elements self.bn0 = nn.BatchNorm(); self.relu0 = nn.Activation('relu'); #Branch - 1 #Identity # Branch - 2 self.body = nn.HybridSequential(prefix='') self.body.add(nn.Conv2D(channels//4, kernel_size=1, strides=stride, use_bias=False, in_channels=in_channels)) self.body.add(nn.BatchNorm()) self.body.add(nn.Activation('relu')) self.body.add(_conv3x3(channels//4, stride, in_channels)) self.body.add(nn.BatchNorm()) self.body.add(nn.Activation('relu')) self.body.add(nn.Conv2D(channels, kernel_size=1, strides=stride, use_bias=False, in_channels=in_channels)) def hybrid_forward(self, F, x): x = self.bn0(x); x = self.relu0(x); residual = x x = self.body(x) x = residual+x return x # Invoke the block block = ResnetBlockV1(64, 1) # Initialize network and load block on machine ctx = [mx.cpu()]; block.initialize(init.Xavier(), ctx = ctx); block.collect_params().reset_ctx(ctx) block.hybridize() # Run data through network x = np.zeros((1, 64, 224, 224)); x = mx.nd.array(x); y = block.forward(x); print(x.shape, y.shape) # Export Model to Load on Netron block.export("final", epoch=0); netron.start("final-symbol.json", port=8082) ``` <a id='5-3'></a> ## Creating block using traditional Pytorch - Code credits - https://pytorch.org/ ``` # Traiditional-Pytorch import torch from torch import nn from torch.jit.annotations import List import torch.nn.functional as F def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) class ResnetBottleNeckBlock(nn.Module): expansion = 1 __constants__ = ['downsample'] def __init__(self, inplanes, planes, stride=1, groups=1, base_width=64, dilation=1, norm_layer=None): super(ResnetBottleNeckBlock, self).__init__() norm_layer = nn.BatchNorm2d #Common elements self.bn0 = norm_layer(inplanes); self.relu0 = nn.ReLU(inplace=True); # Branch - 1 #Identity # Branch - 2 self.conv1 = conv1x1(inplanes, planes//4, stride) self.bn1 = norm_layer(planes//4) self.relu1 = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes//4, planes//4, stride) self.bn2 = norm_layer(planes//4) self.relu2 = nn.ReLU(inplace=True) self.conv3 = conv1x1(planes//4, planes) self.stride = stride self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.bn0(x); x = self.relu0(x); identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu1(out) out = self.conv2(out) out = self.bn2(out) out = self.relu2(out) out = self.conv3(out) out += identity return out # Invoke the block block = ResnetBottleNeckBlock(64, 64, stride=1); # Initialize network and load block on machine layers = [] layers.append(block); net = nn.Sequential(*layers); # Run data through network x = torch.randn(1, 64, 224, 224) y = net(x) print(x.shape, y.shape); # Export Model to Load on Netron torch.onnx.export(net, # model being run x, # model input (or a tuple for multiple inputs) "model.onnx", # where to save the model (can be a file or file-like object) export_params=True, # store the trained parameter weights inside the model file opset_version=10, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names = ['input'], # the model's input names output_names = ['output'], # the model's output names dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes 'output' : {0 : 'batch_size'}}) netron.start('model.onnx', port=9998); ``` <a id='5-4'></a> ## Creating block using traditional Keras - Code credits: https://keras.io/ ``` # Traditional-Keras import keras import keras.layers as kla import keras.models as kmo import tensorflow as tf from keras.models import Model backend = 'channels_last' from keras import layers def resnet_conv_block(input_tensor, kernel_size, filters, stage, block, strides=(1, 1)): filters1, filters2, filters3 = filters bn_axis = 3 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' #Common Elements start = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '0a')(input_tensor) start = layers.Activation('relu')(start) # Branch - 1 # Identity shortcut = start # Branch - 2 x = layers.Conv2D(filters1, (1, 1), strides=strides, kernel_initializer='he_normal', name=conv_name_base + '2a')(start) x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters2, (3, 3), strides=strides, kernel_initializer='he_normal', name=conv_name_base + '2b', padding="same")(x) x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters3, (1, 1), kernel_initializer='he_normal', name=conv_name_base + '2c')(x); x = layers.add([x, shortcut]) x = layers.Activation('relu')(x) return x def create_model(input_shape, kernel_size, filters, stage, block): img_input = layers.Input(shape=input_shape); x = resnet_conv_block(img_input, kernel_size, filters, stage, block) return Model(img_input, x); # Invoke the block kernel_size=3; filters=[16, 16, 64]; input_shape=(224, 224, 64); model = create_model(input_shape, kernel_size, filters, 0, "0"); # Run data through network x = tf.placeholder(tf.float32, shape=(1, 224, 224, 64)) y = model(x) print(x.shape, y.shape) # Export Model to Load on Netron model.save("final.h5"); netron.start("final.h5", port=8082) ``` # Goals Completed ### 1. Learn to implement Resnet V2 Bottleneck Block (Type - 1) using monk - Monk's Keras - Monk's Pytorch - Monk's Mxnet ### 2. Use network Monk's debugger to create complex blocks ### 3. Understand how syntactically different it is to implement the same using - Traditional Keras - Traditional Pytorch - Traditional Mxnet
github_jupyter
``` import pandas as pd import geopandas as gpd import matplotlib.pyplot as plt import numpy as np from pyproj import CRS import pathlib from pathlib import Path from shapely import wkt from tqdm import tqdm import math import codecs from shapely import wkt import folium from folium import features from folium import plugins import gzip from xml.etree.ElementTree import Element, SubElement, Comment, tostring import xml.etree.ElementTree as ET # to read the excel from openpyxl import load_workbook from openpyxl import Workbook # import folium from shapely.geometry import LineString, MultiLineString import branca.colormap as cmp from folium.plugins import Search from tqdm import tqdm import time import datetime from datetime import timedelta # set the working directory BASE_DIR = Path.cwd() BASE_DIR # save as geojson def get_foldercreation_inf(): fname = pathlib.Path("../SF_all_trips/sf-tscore-all-trips-20PCsample-updatedRideHailFleet-updatedParking__etg/ITERS/it.4/4.linkstats.csv.gz") assert fname.exists(), f'No such file: {fname}' # check that the file exists ctime = datetime.datetime.fromtimestamp(fname.stat().st_ctime) return ctime # return ctime.strftime("%Y-%m-%d") def get_dataframe(_time): # linkstats file linkstats = pd.read_csv("../SF_all_trips/sf-tscore-all-trips-20PCsample-updatedRideHailFleet-updatedParking__etg/ITERS/it.4/4.linkstats.csv.gz", compression="gzip", low_memory=False) time = int(_time) linkstats = linkstats[linkstats["hour"]==(time)].copy() linkstats=linkstats.add_prefix("linkstats_") linkstats.rename(columns={('linkstats_link'): 'id'}, inplace=True) linkstats["id"] = linkstats["id"].astype('string') date_time = get_foldercreation_inf() if int(_time)<24: date_time = date_time.strftime("%Y-%m-%d") time_stamp = f'{int(_time):02d}' linkstats["date_time"] = (date_time + " " + "{}:00:00".format(f'{int(_time):02d}')) else: date_time = get_foldercreation_inf() + datetime.timedelta(days=1) date_time = date_time.strftime("%Y-%m-%d") new_time = int(_time) - 24 linkstats["date_time"] = (date_time + " " + "{}:00:00".format(f'{abs(int(new_time)):02d}')) return linkstats # read the road network sf_roadnetwork = gpd.read_file(BASE_DIR.parent.joinpath( 'Network',"sfNetwork.geojsonl")) sf_roadnetwork = sf_roadnetwork[["id","modes","length","lanes","from","to","capacity","geometry"]] sftimevariantnetwork =pd.DataFrame() for time_hour in tqdm(range(0,30)): # get the hour and filter the linkstat file linkstats = get_dataframe(str(time_hour)) # merge with featureclass of SF data comparision_network = sf_roadnetwork.merge(linkstats,on="id").copy() # calculate the freespeed (mph), congested speed (mph), ratio (congestedsped/freespeed) # linkstats comparision_network["linkstats_freespeed_mph"] = comparision_network["linkstats_freespeed"]*2.23694 comparision_network["linkstats_congspd_mph"] = (comparision_network["linkstats_length"]/comparision_network["linkstats_traveltime"])*2.23694 comparision_network["linkstats_ratio"] = comparision_network["linkstats_congspd_mph"] / comparision_network["linkstats_freespeed_mph"] comparision_network["linkstats_vc_ratio"] = comparision_network["linkstats_volume"]*5 / comparision_network["capacity"] if int(time_hour)==0: sftimevariantnetwork = comparision_network.copy() else: sftimevariantnetwork = pd.concat([sftimevariantnetwork,comparision_network], ignore_index=True) # lastly, export the network # sftimevariantnetwork.to_file(BASE_DIR.parent.joinpath("exported", ("sf_timevariantnetwork.geojson")), driver='GeoJSON') linkstats.head() # sftimevariantnetwork.to_csv(BASE_DIR.parent.joinpath("exported", ("sf_timevariantnetwork.csv"))) # read the road network, incase it is already saved in the geojson file # sftimevariantnetwork = gpd.read_file(BASE_DIR.parent.joinpath("exported", ("sf_timevariantnetwork.geojson"))) # keep only selected columns fields sf_timevariantnetwork = sftimevariantnetwork[["id", "modes","length","lanes","capacity","geometry", 'linkstats_freespeed','linkstats_volume', 'linkstats_traveltime', 'date_time', 'linkstats_freespeed_mph', 'linkstats_congspd_mph', 'linkstats_ratio', "linkstats_vc_ratio"]] sf_timevariantnetwork['date_time']=pd.to_datetime(sf_timevariantnetwork['date_time']).dt.strftime('%Y-%m-%dT%H:%M:%S') sf_timevariantnetwork["time"] = pd.to_datetime(sf_timevariantnetwork["date_time"]).dt.strftime('%Y-%m-%dT%H:%M:%S') # add more green shades for 85% --> 100% # green_shades = ['#008000', '#198c19', '#329932', '#4ca64c', '#66b266', '#7fbf7f', '#99cc99', '#b2d8b2', '#cce5cc', '#e5f2e5'] # colors for congstd speed/freespeed ratio color_range_pct = ["#ff0000","#ff6666","#ffb2b2","#ffdb99","#ffc966", "#ffa500",'#e5f2e5','#cce5cc','#b2d8b2','#99cc99','#7fbf7f','#66b266','#4ca64c','#329932', '#198c19','#008000'] # color_range_pct = ["#ff0000","#ff6666","#ffb2b2","#ffdb99","#ffc966", "#ffa500","#cce5cc","#99cc99","#66b266","#008000"] step_pct = cmp.StepColormap( color_range_pct, vmin=0, vmax=1, index=[0,0.2,0.3,0.5,.6,0.7,0.80,0.85, 0.87,0.89,0.91,0.93,0.95,0.97,0.99,1.00], #for change in the colors, not used fr linear caption='% Speeds Difference' #Caption for Color scale or Legend ) # colors for congstd speed/freespeed ratio color_range_pct_vc = ['#008000', '#329932', '#66b266', '#99cc99', '#cce5cc', '#e5f2e5', # green shade '#ffa500', "#ffb732",'#ffc966', '#ffdb99', "#ffedcc", # orange shade '#ffe5e5', '#ffcccc','#ffb2b2','#ff9999','#ff6666', '#ff3232', '#ff0000' ] # red shade # color_range_pct = ["#ff0000","#ff6666","#ffb2b2","#ffdb99","#ffc966", "#ffa500","#cce5cc","#99cc99","#66b266","#008000"] step_pct_vc = cmp.StepColormap( color_range_pct_vc, vmin=0, vmax=1, index=[0,0.1,0.2,0.3,0.4,0.5, 0.55,0.6,0.65,0.7,0.75, 0.80,0.85,0.90,0.95,0.97,0.99,1.00], #for change in the colors, not used fr linear caption='Volume-to-Capacity ratio' #Caption for Color scale or Legend ) # colors for congested speed and freespeed color_range = ["#ff0000","#ff6666","#ffb2b2","#ffa500","#ffc966","#ffdb99", "#cce5cc","#99cc99","#66b266","#008000"] step = cmp.StepColormap(color_range,vmin=0, vmax=100,index=[0,5,10,15,25,35,45,55,65,100], #for change in the colors, not used fr linear caption=' Speeds (mph)' #Caption for Color scale or Legend ) def getColorMap_pct(x): return str(step_pct(x)) def getColorMap_pct_vc(x): return str(step_pct_vc(x)) def getColorMap(x): return str(step(x)) sf_timevariantnetwork["fillColor_ratio"] = sf_timevariantnetwork["linkstats_ratio"].apply(getColorMap_pct) sf_timevariantnetwork["fillColor_vc_ratio"] = sf_timevariantnetwork["linkstats_vc_ratio"].apply(getColorMap_pct_vc) sf_timevariantnetwork["fillColor_freespeed_mph"] = sf_timevariantnetwork["linkstats_freespeed_mph"].apply(getColorMap) sf_timevariantnetwork["fillColor_congspd_mph"] = sf_timevariantnetwork["linkstats_congspd_mph"].apply(getColorMap) def coords(geom): return list(geom.coords) sf_timevariantnetwork['points'] = sf_timevariantnetwork.apply(lambda row: coords(row.geometry), axis=1) # groupby and aggreage columns by segment_links df1 = sf_timevariantnetwork.groupby('id').agg({'modes':'first', 'length':'first', 'lanes':list, 'capacity':list, 'geometry':'first', 'linkstats_freespeed':list, 'linkstats_volume':list, 'linkstats_traveltime':list, 'date_time':list, 'linkstats_freespeed_mph':list, 'linkstats_congspd_mph':list, 'linkstats_ratio':list, 'linkstats_vc_ratio':list, 'time':list, 'fillColor_ratio':list, 'linkstats_volume':list, 'linkstats_traveltime':list, 'fillColor_freespeed_mph':list, 'fillColor_congspd_mph':list, 'fillColor_vc_ratio':list, 'points':'first'}).reset_index() # Create timemap for ratio_congestedspeed_freespeed def coords(geom): return list(geom.coords) features_ratio = [ { 'type':'Feature', "geometry":{ 'type': 'LineString', 'coordinates': coords(d.geometry), }, 'properties': { 'times': d['time'], 'color': "black", 'colors':d["fillColor_ratio"], "weight":0.6, "fillOpacity": 0.4, } } for _,d in df1.iterrows() ] from jinja2 import Template _template = Template(""" {% macro script(this, kwargs) %} L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({ _getDisplayDateFormat: function(date){ var newdate = new moment(date); console.log(newdate) return newdate.format("{{this.date_options}}"); } }); {{this._parent.get_name()}}.timeDimension = L.timeDimension( { period: {{ this.period|tojson }}, } ); var timeDimensionControl = new L.Control.TimeDimensionCustom( {{ this.options|tojson }} ); {{this._parent.get_name()}}.addControl(this.timeDimensionControl); var geoJsonLayer = L.geoJson({{this.data}}, { pointToLayer: function (feature, latLng) { if (feature.properties.icon == 'marker') { if(feature.properties.iconstyle){ return new L.Marker(latLng, { icon: L.icon(feature.properties.iconstyle)}); } //else return new L.Marker(latLng); } if (feature.properties.icon == 'circle') { if (feature.properties.iconstyle) { return new L.circleMarker(latLng, feature.properties.iconstyle) }; //else return new L.circleMarker(latLng); } //else return new L.Marker(latLng); }, style: function(feature) { lastIdx=feature.properties.colors.length-1 currIdx=feature.properties.colors.indexOf(feature.properties.color); if(currIdx==lastIdx){ feature.properties.color = feature.properties.colors[currIdx+1] } else{ feature.properties.color =feature.properties.colors[currIdx+1] } return {color: feature.properties.color} }, onEachFeature: function(feature, layer) { if (feature.properties.popup) { layer.bindPopup(feature.properties.popup); } } }) var {{this.get_name()}} = L.timeDimension.layer.geoJson( geoJsonLayer, { updateTimeDimension: true, addlastPoint: {{ this.add_last_point|tojson }}, duration: {{ this.duration }}, } ).addTo({{this._parent.get_name()}}); {% endmacro %} """) import folium from folium.plugins import TimestampedGeoJson m = folium.Map(location=[37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron") t=TimestampedGeoJson({ 'type': 'FeatureCollection', 'features': features_ratio, }, transition_time=1500,loop=True,period='PT1H', add_last_point=False,auto_play=True) t._template=_template t.add_to(m) step_pct.add_to(m) # Add title map_title = "Ratio between Congested Speed (mph) and Free Speed (mph)" title_html = ''' <h3 align="center" style="font-size:16px"><b>{}</b></h3> '''.format(map_title) m.get_root().html.add_child(folium.Element(title_html)) file_name = BASE_DIR.parent.joinpath("exported", ("linkstat_ratio_timemap.html")) m.save(str(file_name)) # m # Create timemap for v/c ratio def coords(geom): return list(geom.coords) features_ratio = [ { 'type':'Feature', "geometry":{ 'type': 'LineString', 'coordinates': coords(d.geometry), }, 'properties': { 'times': d['time'], 'color': "black", 'colors':d["fillColor_vc_ratio"], "weight":0.6, "fillOpacity": 0.4, } } for _,d in df1.iterrows() ] from jinja2 import Template _template = Template(""" {% macro script(this, kwargs) %} L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({ _getDisplayDateFormat: function(date){ var newdate = new moment(date); console.log(newdate) return newdate.format("{{this.date_options}}"); } }); {{this._parent.get_name()}}.timeDimension = L.timeDimension( { period: {{ this.period|tojson }}, } ); var timeDimensionControl = new L.Control.TimeDimensionCustom( {{ this.options|tojson }} ); {{this._parent.get_name()}}.addControl(this.timeDimensionControl); var geoJsonLayer = L.geoJson({{this.data}}, { pointToLayer: function (feature, latLng) { if (feature.properties.icon == 'marker') { if(feature.properties.iconstyle){ return new L.Marker(latLng, { icon: L.icon(feature.properties.iconstyle)}); } //else return new L.Marker(latLng); } if (feature.properties.icon == 'circle') { if (feature.properties.iconstyle) { return new L.circleMarker(latLng, feature.properties.iconstyle) }; //else return new L.circleMarker(latLng); } //else return new L.Marker(latLng); }, style: function(feature) { lastIdx=feature.properties.colors.length-1 currIdx=feature.properties.colors.indexOf(feature.properties.color); if(currIdx==lastIdx){ feature.properties.color = feature.properties.colors[currIdx+1] } else{ feature.properties.color =feature.properties.colors[currIdx+1] } return {color: feature.properties.color} }, onEachFeature: function(feature, layer) { if (feature.properties.popup) { layer.bindPopup(feature.properties.popup); } } }) var {{this.get_name()}} = L.timeDimension.layer.geoJson( geoJsonLayer, { updateTimeDimension: true, addlastPoint: {{ this.add_last_point|tojson }}, duration: {{ this.duration }}, } ).addTo({{this._parent.get_name()}}); {% endmacro %} """) import folium from folium.plugins import TimestampedGeoJson m = folium.Map(location=[37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron") t=TimestampedGeoJson({ 'type': 'FeatureCollection', 'features': features_ratio, }, transition_time=1500,loop=True,period='PT1H', add_last_point=False,auto_play=True) t._template=_template t.add_to(m) step_pct_vc.add_to(m) # Add title map_title = "Volume-to-Capacity Ratio" title_html = ''' <h3 align="center" style="font-size:16px"><b>{}</b></h3> '''.format(map_title) m.get_root().html.add_child(folium.Element(title_html)) file_name = BASE_DIR.parent.joinpath("exported","extended_run",("linkst_vc_ratio_timemap.html")) m.save(str(file_name)) # m # Create timemap for freespeed (mph) def coords(geom): return list(geom.coords) features_freespeed = [ { 'type':'Feature', "geometry":{ 'type': 'LineString', 'coordinates': coords(d.geometry), }, 'properties': { 'times': d['time'], 'color': "black", 'colors':d["fillColor_freespeed_mph"], 'weight':0.6, "fillOpacity": 0.4, } } for _,d in df1.iterrows() ] from jinja2 import Template _template = Template(""" {% macro script(this, kwargs) %} L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({ _getDisplayDateFormat: function(date){ var newdate = new moment(date); console.log(newdate) return newdate.format("{{this.date_options}}"); } }); {{this._parent.get_name()}}.timeDimension = L.timeDimension( { period: {{ this.period|tojson }}, } ); var timeDimensionControl = new L.Control.TimeDimensionCustom( {{ this.options|tojson }} ); {{this._parent.get_name()}}.addControl(this.timeDimensionControl); var geoJsonLayer = L.geoJson({{this.data}}, { pointToLayer: function (feature, latLng) { if (feature.properties.icon == 'marker') { if(feature.properties.iconstyle){ return new L.Marker(latLng, { icon: L.icon(feature.properties.iconstyle)}); } //else return new L.Marker(latLng); } if (feature.properties.icon == 'circle') { if (feature.properties.iconstyle) { return new L.circleMarker(latLng, feature.properties.iconstyle) }; //else return new L.circleMarker(latLng); } //else return new L.Marker(latLng); }, style: function(feature) { lastIdx=feature.properties.colors.length-1 currIdx=feature.properties.colors.indexOf(feature.properties.color); if(currIdx==lastIdx){ feature.properties.color = feature.properties.colors[currIdx+1] } else{ feature.properties.color =feature.properties.colors[currIdx+1] } return {color: feature.properties.color} }, onEachFeature: function(feature, layer) { if (feature.properties.popup) { layer.bindPopup(feature.properties.popup); } } }) var {{this.get_name()}} = L.timeDimension.layer.geoJson( geoJsonLayer, { updateTimeDimension: true, addlastPoint: {{ this.add_last_point|tojson }}, duration: {{ this.duration }}, } ).addTo({{this._parent.get_name()}}); {% endmacro %} """) import folium from folium.plugins import TimestampedGeoJson m = folium.Map(location=[37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron") # Add title map_title = "Free Speed (mph)" title_html = ''' <h3 align="center" style="font-size:16px"><b>{}</b></h3> '''.format(map_title) m.get_root().html.add_child(folium.Element(title_html)) t=TimestampedGeoJson({ 'type': 'FeatureCollection', 'features': features_freespeed, }, transition_time=1500,loop=True,period='PT1H', add_last_point=False,auto_play=True) t._template=_template t.add_to(m) step.add_to(m) file_name = BASE_DIR.parent.joinpath("exported", ("linkstat_freespeed_timemap.html")) m.save(str(file_name)) # m # Create timemap for congested speed (mph) def coords(geom): return list(geom.coords) features_congestedspeed = [ { 'type':'Feature', "geometry":{ 'type': 'LineString', 'coordinates': coords(d.geometry), }, 'properties': { 'times': d['time'], 'color': "black", 'colors':d["fillColor_congspd_mph"], 'weight':0.6, "fillOpacity": 0.4, } } for _,d in df1.iterrows() ] from jinja2 import Template _template = Template(""" {% macro script(this, kwargs) %} L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({ _getDisplayDateFormat: function(date){ var newdate = new moment(date); console.log(newdate) return newdate.format("{{this.date_options}}"); } }); {{this._parent.get_name()}}.timeDimension = L.timeDimension( { period: {{ this.period|tojson }}, } ); var timeDimensionControl = new L.Control.TimeDimensionCustom( {{ this.options|tojson }} ); {{this._parent.get_name()}}.addControl(this.timeDimensionControl); var geoJsonLayer = L.geoJson({{this.data}}, { pointToLayer: function (feature, latLng) { if (feature.properties.icon == 'marker') { if(feature.properties.iconstyle){ return new L.Marker(latLng, { icon: L.icon(feature.properties.iconstyle)}); } //else return new L.Marker(latLng); } if (feature.properties.icon == 'circle') { if (feature.properties.iconstyle) { return new L.circleMarker(latLng, feature.properties.iconstyle) }; //else return new L.circleMarker(latLng); } //else return new L.Marker(latLng); }, style: function(feature) { lastIdx=feature.properties.colors.length-1 currIdx=feature.properties.colors.indexOf(feature.properties.color); if(currIdx==lastIdx){ feature.properties.color = feature.properties.colors[currIdx+1] } else{ feature.properties.color =feature.properties.colors[currIdx+1] } return {color: feature.properties.color} }, onEachFeature: function(feature, layer) { if (feature.properties.popup) { layer.bindPopup(feature.properties.popup); } } }) var {{this.get_name()}} = L.timeDimension.layer.geoJson( geoJsonLayer, { updateTimeDimension: true, addlastPoint: {{ this.add_last_point|tojson }}, duration: {{ this.duration }}, } ).addTo({{this._parent.get_name()}}); {% endmacro %} """) import folium from folium.plugins import TimestampedGeoJson m = folium.Map(location=[37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron") t=TimestampedGeoJson({ 'type': 'FeatureCollection', 'features': features_congestedspeed, }, transition_time=1500,loop=True,period='PT1H', add_last_point=False,auto_play=True) t._template=_template t.add_to(m) step.add_to(m) # Add title map_title = "Congested Speed (mph)" title_html = ''' <h3 align="center" style="font-size:16px"><b>{}</b></h3> '''.format(map_title) m.get_root().html.add_child(folium.Element(title_html)) file_name = BASE_DIR.parent.joinpath("exported", ("linkstat_congestedspeed_timemap.html")) m.save(str(file_name)) # m # get map for each different time # static maps for congestedspeed/freespeed def get_dataframe(_time): # linkstats file linkstats = pd.read_csv("../SF_all_trips/sf-tscore-all-trips-20PCsample-updatedRideHailFleet-updatedParking__etg/ITERS/it.4/4.linkstats.csv.gz", compression="gzip", low_memory=False) # unmodified_linkstats = pd.read_csv(BASE_DIR.parent.joinpath("runs", "sf-tscore-int-int-trips-model-network-events-20PC-sample-bpr-func__tlm","ITERS","it.30", "30.linkstats_unmodified.csv.gz"),compression="gzip", low_memory=False) time = int(_time) linkstats = linkstats[linkstats["hour"]==(time)].copy() linkstats=linkstats.add_prefix("linkstats_") linkstats.rename(columns={('linkstats_link'): 'id'}, inplace=True) linkstats["id"] = linkstats["id"].astype('string') return linkstats def highlight_function(feature): return {"fillColor": "#ffff00", "color": "#ffff00", "weight": 5,"fillOpacity": 0.40 } color_range_pct = ["#ff0000","#ff6666","#ffb2b2","#ffdb99","#ffc966", "#ffa500",'#e5f2e5','#cce5cc','#b2d8b2','#99cc99','#7fbf7f','#66b266','#4ca64c','#329932', '#198c19','#008000'] step_pct = cmp.StepColormap( color_range_pct, vmin=0, vmax=1, index=[0,0.2,0.3,0.5,.6,0.7,0.80,0.85, 0.87,0.89,0.91,0.93,0.95,0.97,0.99,1.00], #for change in the colors, not used fr linear caption='% Speeds Difference' #Caption for Color scale or Legend ) # read the road network sf_roadnetwork = gpd.read_file(BASE_DIR.parent.joinpath("Network", "sfNetwork.geojsonl")) sf_roadnetwork = sf_roadnetwork[["id","modes","length","lanes","from","to","capacity","geometry"]] for time_hour in tqdm(range(0,30)): # set the map pct_m = folium.Map([37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron") # get the hour and filter the linkstat file linkstats = get_dataframe(str(time_hour)) # merge with featureclass of SF data comparision_network = sf_roadnetwork.merge(linkstats,on="id") # calculate the freespeed (mph), congested speed (mph), ratio (congestedsped/freespeed) # linkstats comparision_network["linkstats_freespeed_mph"] = comparision_network["linkstats_freespeed"]*2.23694 comparision_network["linkstats_congspd_mph"] = (comparision_network["linkstats_length"]/comparision_network["linkstats_traveltime"])*2.23694 comparision_network["linkstats_ratio"] = comparision_network["linkstats_congspd_mph"] / comparision_network["linkstats_freespeed_mph"] time_stamp = "" # folium if time_hour<30: time_stamp = f'{time_hour:02d}' layer_name = str(time_stamp) # layer_name=str(str(time_hour) + (' am' if time_hour < 12 else ' pm')) ratio_feature_group = folium.FeatureGroup(name=layer_name) pct_feature_group = folium.GeoJson(comparision_network, name = ("Hour - " + layer_name), style_function=lambda x: { "fillColor": step_pct(x["properties"]["linkstats_ratio"]), "color": step_pct(x["properties"]["linkstats_ratio"]), "fillOpacity": 0.2, "weight":1, }, tooltip=folium.GeoJsonTooltip(fields=["id","length", "linkstats_freespeed_mph", "linkstats_traveltime","linkstats_congspd_mph"], aliases=["Link ID", "Segment Length (m)", "Freespeed (mph)", "Travel time (sec)", "Congested Speed (mph)"], localize=True), popup = folium.GeoJsonPopup(fields=["id","length", "linkstats_freespeed_mph", "linkstats_traveltime","linkstats_congspd_mph"], aliases=["Link ID", "Segment Length (m)", "Freespeed (mph)", "Travel time (sec)", "Congested Speed (mph)"], localize=True), highlight_function=highlight_function, zoom_on_click=True ).add_to(ratio_feature_group) # Add search functionality to the map search_link = Search(layer=pct_feature_group, geom_type="LineString", placeholders = "Search for Link ID", collapsed="False", search_label = 'id', search_zoom = 17, position='topleft', ).add_to(pct_m) ratio_feature_group.add_to(pct_m) folium.LayerControl().add_to(pct_m) map_title = "Ratio between Congested Speed and Free Speed" title_html = '''<h3 align="center" style="font-size:16px"><b>{}</b></h3>'''.format(map_title) pct_m.get_root().html.add_child(folium.Element(title_html)) # save the file file_name = BASE_DIR.parent.joinpath("exported", ("linkstat_ratio_timemap_{}.html").format(time_stamp)) pct_m.save(str(file_name)) ```
github_jupyter
# Experiments comparing the performance of traditional pooling operations and entropy pooling within a shallow neural network and Lenet. The experiments use cifar10 and cifar100. ``` %matplotlib inline import torch import torchvision import torchvision.transforms as transforms transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=8) testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=8) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.modules.utils import _pair, _quadruple import time from skimage.measure import shannon_entropy from scipy import stats from torch.nn.modules.utils import _pair, _quadruple import time from skimage.measure import shannon_entropy from scipy import stats import numpy as np class EntropyPool2d(nn.Module): def __init__(self, kernel_size=3, stride=1, padding=0, same=False, entr='high'): super(EntropyPool2d, self).__init__() self.k = _pair(kernel_size) self.stride = _pair(stride) self.padding = _quadruple(padding) # convert to l, r, t, b self.same = same self.entr = entr def _padding(self, x): if self.same: ih, iw = x.size()[2:] if ih % self.stride[0] == 0: ph = max(self.k[0] - self.stride[0], 0) else: ph = max(self.k[0] - (ih % self.stride[0]), 0) if iw % self.stride[1] == 0: pw = max(self.k[1] - self.stride[1], 0) else: pw = max(self.k[1] - (iw % self.stride[1]), 0) pl = pw // 2 pr = pw - pl pt = ph // 2 pb = ph - pt padding = (pl, pr, pt, pb) else: padding = self.padding return padding def forward(self, x): # using existing pytorch functions and tensor ops so that we get autograd, # would likely be more efficient to implement from scratch at C/Cuda level start = time.time() x = F.pad(x, self._padding(x), mode='reflect') x_detached = x.cpu().detach() x_unique, x_indices, x_inverse, x_counts = np.unique(x_detached, return_index=True, return_inverse=True, return_counts=True) freq = torch.FloatTensor([x_counts[i] / len(x_inverse) for i in x_inverse]).cuda() x_probs = freq.view(x.shape) x_probs = x_probs.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) x_probs = x_probs.contiguous().view(x_probs.size()[:4] + (-1,)) if self.entr is 'high': x_probs, indices = torch.min(x_probs.cuda(), dim=-1) elif self.entr is 'low': x_probs, indices = torch.max(x_probs.cuda(), dim=-1) else: raise Exception('Unknown entropy mode: {}'.format(self.entr)) x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1]) x = x.contiguous().view(x.size()[:4] + (-1,)) indices = indices.view(indices.size() + (-1,)) pool = torch.gather(input=x, dim=-1, index=indices) return pool.squeeze(-1) import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import time from sklearn.metrics import f1_score MAX = 'max' AVG = 'avg' HIGH_ENTROPY = 'high_entr' LOW_ENTROPY = 'low_entr' class Net1Pool(nn.Module): def __init__(self, num_classes=10, pooling=MAX): super(Net1Pool, self).__init__() self.conv1 = nn.Conv2d(3, 30, 5) if pooling is MAX: self.pool = nn.MaxPool2d(2, 2) elif pooling is AVG: self.pool = nn.AvgPool2d(2, 2) elif pooling is HIGH_ENTROPY: self.pool = EntropyPool2d(2, 2, entr='high') elif pooling is LOW_ENTROPY: self.pool = EntropyPool2d(2, 2, entr='low') self.fc0 = nn.Linear(30 * 14 * 14, num_classes) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = x.view(-1, 30 * 14 * 14) x = F.relu(self.fc0(x)) return x class Net2Pool(nn.Module): def __init__(self, num_classes=10, pooling=MAX): super(Net2Pool, self).__init__() self.conv1 = nn.Conv2d(3, 50, 5, 1) self.conv2 = nn.Conv2d(50, 50, 5, 1) if pooling is MAX: self.pool = nn.MaxPool2d(2, 2) elif pooling is AVG: self.pool = nn.AvgPool2d(2, 2) elif pooling is HIGH_ENTROPY: self.pool = EntropyPool2d(2, 2, entr='high') elif pooling is LOW_ENTROPY: self.pool = EntropyPool2d(2, 2, entr='low') self.fc1 = nn.Linear(5*5*50, 500) self.fc2 = nn.Linear(500, num_classes) def forward(self, x): x = F.relu(self.conv1(x)) x = self.pool(x) x = F.relu(self.conv2(x)) x = self.pool(x) x = x.view(-1, 5*5*50) x = F.relu(self.fc1(x)) x = self.fc2(x) return x def configure_net(net, device): net.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) return net, optimizer, criterion def train(net, optimizer, criterion, trainloader, device, epochs=10, logging=2000): for epoch in range(epochs): running_loss = 0.0 for i, data in enumerate(trainloader, 0): start = time.time() inputs, labels = data inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % logging == logging - 1: print('[%d, %5d] loss: %.3f duration: %.5f' % (epoch + 1, i + 1, running_loss / logging, time.time() - start)) running_loss = 0.0 print('Finished Training') def test(net, testloader, device): correct = 0 total = 0 predictions = [] l = [] with torch.no_grad(): for data in testloader: images, labels = data images, labels = images.to(device), labels.to(device) outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() predictions.extend(predicted.cpu().numpy()) l.extend(labels.cpu().numpy()) print('Accuracy: {}'.format(100 * correct / total)) epochs = 10 logging = 15000 num_classes = 100 print('- - - - - - - - -- - - - 2 pool - - - - - - - - - - - - - - - -') print('- - - - - - - - -- - - - MAX - - - - - - - - - - - - - - - -') device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net, optimizer, criterion = configure_net(Net2Pool(num_classes=num_classes, pooling=MAX), device) train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging) test(net, testloader, device) print('- - - - - - - - -- - - - AVG - - - - - - - - - - - - - - - -') net, optimizer, criterion = configure_net(Net2Pool(num_classes=num_classes, pooling=AVG), device) train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging) test(net, testloader, device) print('- - - - - - - - -- - - - HIGH - - - - - - - - - - - - - - - -') net, optimizer, criterion = configure_net(Net2Pool(num_classes=num_classes, pooling=HIGH_ENTROPY), device) train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging) test(net, testloader, device) print('- - - - - - - - -- - - - LOW - - - - - - - - - - - - - - - -') net, optimizer, criterion = configure_net(Net2Pool(num_classes=num_classes, pooling=LOW_ENTROPY), device) train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging) test(net, testloader, device) print('- - - - - - - - -- - - - 1 pool - - - - - - - - - - - - - - - -') print('- - - - - - - - -- - - - MAX - - - - - - - - - - - - - - - -') device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net, optimizer, criterion = configure_net(Net1Pool(num_classes=num_classes, pooling=MAX), device) train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging) test(net, testloader, device) print('- - - - - - - - -- - - - AVG - - - - - - - - - - - - - - - -') net, optimizer, criterion = configure_net(Net1Pool(num_classes=num_classes, pooling=AVG), device) train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging) test(net, testloader, device) print('- - - - - - - - -- - - - HIGH - - - - - - - - - - - - - - - -') net, optimizer, criterion = configure_net(Net1Pool(num_classes=num_classes, pooling=HIGH_ENTROPY), device) train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging) test(net, testloader, device) print('- - - - - - - - -- - - - LOW - - - - - - - - - - - - - - - -') net, optimizer, criterion = configure_net(Net1Pool(num_classes=num_classes, pooling=LOW_ENTROPY), device) train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging) test(net, testloader, device) ```
github_jupyter
<a href="https://colab.research.google.com/github/AIWintermuteAI/aXeleRate/blob/dev/resources/aXeleRate_mark_detector.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## M.A.R.K. Detection model Training and Inference In this notebook we will use axelerate, Keras-based framework for AI on the edge, to quickly setup model training and then after training session is completed convert it to .tflite and .kmodel formats. First, let's take care of some administrative details. 1) Before we do anything, make sure you have choosen GPU as Runtime type (in Runtime - > Change Runtime type). 2) We need to mount Google Drive for saving our model checkpoints and final converted model(s). Press on Mount Google Drive button in Files tab on your left. In the next cell we clone axelerate Github repository and import it. **It is possible to use pip install or python setup.py install, but in that case you will need to restart the enironment.** Since I'm trying to make the process as streamlined as possibile I'm using sys.path.append for import. ``` %load_ext tensorboard #we need imgaug 0.4 for image augmentations to work properly, see https://stackoverflow.com/questions/62580797/in-colab-doing-image-data-augmentation-with-imgaug-is-not-working-as-intended !pip uninstall -y imgaug && pip uninstall -y albumentations && pip install imgaug==0.4 !git clone https://github.com/AIWintermuteAI/aXeleRate.git import sys sys.path.append('/content/aXeleRate') from axelerate import setup_training, setup_inference ``` At this step you typically need to get the dataset. You can use !wget command to download it from somewhere on the Internet or !cp to copy from My Drive as in this example ``` !cp -r /content/drive/'My Drive'/pascal_20_segmentation.zip . !unzip --qq pascal_20_segmentation.zip ``` Dataset preparation and postprocessing are discussed in the article here: The annotation tool I use is LabelImg https://github.com/tzutalin/labelImg Let's visualize our detection model test dataset. There are images in validation folder with corresponding annotations in PASCAL-VOC format in validation annotations folder. ``` %matplotlib inline !gdown https://drive.google.com/uc?id=1s2h6DI_1tHpLoUWRc_SavvMF9jYG8XSi #dataset !gdown https://drive.google.com/uc?id=1-bDRZ9Z2T81SfwhHEfZIMFG7FtMQ5ZiZ #pre-trained model !unzip --qq mark_dataset.zip from axelerate.networks.common_utils.augment import visualize_detection_dataset visualize_detection_dataset(img_folder='mark_detection/imgs_validation', ann_folder='mark_detection/ann_validation', num_imgs=10, img_size=224, augment=True) ``` Next step is defining a config dictionary. Most lines are self-explanatory. Type is model frontend - Classifier, Detector or Segnet Architecture is model backend (feature extractor) - Full Yolo - Tiny Yolo - MobileNet1_0 - MobileNet7_5 - MobileNet5_0 - MobileNet2_5 - SqueezeNet - NASNetMobile - DenseNet121 - ResNet50 For more information on anchors, please read here https://github.com/pjreddie/darknet/issues/568 Labels are labels present in your dataset. IMPORTANT: Please, list all the labels present in the dataset. object_scale determines how much to penalize wrong prediction of confidence of object predictors no_object_scale determines how much to penalize wrong prediction of confidence of non-object predictors coord_scale determines how much to penalize wrong position and size predictions (x, y, w, h) class_scale determines how much to penalize wrong class prediction For converter type you can choose the following: 'k210', 'tflite_fullint', 'tflite_dynamic', 'edgetpu', 'openvino', 'onnx' ## Parameters for Person Detection K210, which is where we will run the network, has constrained memory (5.5 RAM) available, so with Micropython firmware, the largest model you can run is about 2 MB, which limits our architecture choice to Tiny Yolo, MobileNet(up to 0.75 alpha) and SqueezeNet. Out of these 3 architectures, only one comes with pre-trained model - MobileNet. So, to save the training time we will use Mobilenet with alpha 0.75, which has ... parameters. For objects that do not have that much variety, you can use MobileNet with lower alpha, down to 0.25. ``` config = { "model":{ "type": "Detector", "architecture": "MobileNet5_0", "input_size": 224, "anchors": [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828], "labels": ["mark"], "coord_scale" : 1.0, "class_scale" : 1.0, "object_scale" : 5.0, "no_object_scale" : 1.0 }, "weights" : { "full": "", "backend": "imagenet" }, "train" : { "actual_epoch": 50, "train_image_folder": "mark_detection/imgs", "train_annot_folder": "mark_detection/ann", "train_times": 1, "valid_image_folder": "mark_detection/imgs_validation", "valid_annot_folder": "mark_detection/ann_validation", "valid_times": 1, "valid_metric": "mAP", "batch_size": 32, "learning_rate": 1e-3, "saved_folder": F"/content/drive/MyDrive/mark_detector", "first_trainable_layer": "", "augumentation": True, "is_only_detect" : False }, "converter" : { "type": ["k210","tflite"] } } ``` Let's check what GPU we have been assigned in this Colab session, if any. ``` from tensorflow.python.client import device_lib device_lib.list_local_devices() ``` Also, let's open Tensorboard, where we will be able to watch model training progress in real time. Training and validation logs also will be saved in project folder. Since there are no logs before we start the training, tensorboard will be empty. Refresh it after first epoch. ``` %tensorboard --logdir logs ``` Finally we start the training by passing config dictionary we have defined earlier to setup_training function. The function will start the training with Checkpoint, Reduce Learning Rate on Plateau and Early Stopping callbacks. After the training has stopped, it will convert the best model into the format you have specified in config and save it to the project folder. ``` from keras import backend as K K.clear_session() model_path = setup_training(config_dict=config) ``` After training it is good to check the actual perfomance of your model by doing inference on your validation dataset and visualizing results. This is exactly what next block does. Obviously since our model has only trained on a few images the results are far from stellar, but if you have a good dataset, you'll have better results. ``` from keras import backend as K K.clear_session() setup_inference(config, model_path) ``` My end results are: {'fscore': 0.942528735632184, 'precision': 0.9318181818181818, 'recall': 0.9534883720930233} **You can obtain these results by loading a pre-trained model.** Good luck and happy training! Have a look at these articles, that would allow you to get the most of Google Colab or connect to local runtime if there are no GPUs available; https://medium.com/@oribarel/getting-the-most-out-of-your-google-colab-2b0585f82403 https://research.google.com/colaboratory/local-runtimes.html
github_jupyter
# Docutils ## Presentation Click [__here__] (youtube link) for the video presentation ## Summary of Support Files - `demo.ipynb`: the notebook containing this tutorial code - `test.csv`: a small file data used in the tutorial code ## Installation Instructions Use `!pip install docutils` to install the `docutils` package. Next, use `import docutils`to import the package into your notebook. For example, to import specific modules from `docutils` package use the following line of code: `from docutils import core, io` Below is a list of modules and subpackages as apart of the `docutils` package: ## Guide __docutils 0.17.1 version__ - Author: David Goodger - Contact: goodger@python.org [Docutils](https://pypi.org/project/docutils/) is an open-source, modular text processing system for processing plaintext documentation into a more useful format. Formats include HTML, man-pages, OpenDocument, LaTeX, or XML. Docutils supports reStructuredText for input, an easy-to-read, what-you-see-is-what-you-get plaintext markup syntax. Docutils is short for "Python Documentation Utilities". Support for the following sources has been implemented: - Standalone files - `PEPs (Python Enhancement Proposals)` Support for these sources is currently being developed: - Inline documentation - Wikis - Email and more Docutils Distribution Consists of: - the `docutils` package (or library) - front-end tools - test suite - documentation. ## Notable docutils Modules & Subpackages ----------------------------- Module | Definition ------------- | ------------- __core__ | Contains the ``Publisher`` class and ``publish_()``convenience functions __io__ | Provides a uniform API for low-level input and output __nodes__ | Docutils document tree (doctree) node class library ----------------------------- Subpackages | Definition ------------- | ------------- **languages** | Language-specific mappings of terms **parsers** | Syntax-specific input parser modules or packages **readers** | Context-specific input handlers which understand the data source and manage a parser Below is an overview of the `docutils` package: ![alt text](docutils.png "Docutils") ## Main Use Applications of Package The reStructured Text component of the `docutils` package makes it easy to convert between different formats, especially from plain text to a static website. It is unique because it is extensible. Better than simpler markups. Additionally, users can pair `docutils` with `Sphinx` to convert text to html. The `Sphinx` package is built on the `docutils` package. The docutils parser creates the parse tree as a representation of the text in the memory for the Sphinx application and .rst environment.
github_jupyter
## Statistics ### Questions ```{admonition} Problem: JOIN Dataframes :class: dropdown, tip Can you tell me the ways in which 2 pandas data frames can be joined? ``` ```{admonition} Solution: :class: dropdown A very high level difference is that merge() is used to combine two (or more) dataframes on the basis of values of common columns (indices can also be used, use left_index=True and/or right_index=True), and concat() is used to append one (or more) dataframes one below the other (or sideways, depending on whether the axis option is set to 0 or 1). join() is used to merge 2 dataframes on the basis of the index; instead of using merge() with the option left_index=True we can use join(). ![Combine DataFrames](images/image1.PNG) ``` ```{admonition} Problem: [GOOGLE] Normal Distribution :class: dropdown, tip Write a function to generate N samples from a normal distribution and plot the histogram. ``` ``` import numpy as np import matplotlib.pyplot as plt from scipy import stats def normal_sample_generator(N): # can be done using np.random.randn or stats.norm.rvs #x = np.random.randn(N) x = stats.norm.rvs(size=N) num_bins = 20 plt.hist(x, bins=num_bins, facecolor='blue', alpha=0.5) y = np.linspace(-4, 4, N) bin_width = (x.max() - x.min()) / num_bins plt.plot(y, stats.norm.pdf(y) * N * bin_width) plt.show() normal_sample_generator(10000) ``` ```{admonition} Problem: [UBER] Bernoulli trial generator :class: dropdown, tip Given a random Bernoulli trial generator, write a function to return a value sampled from a normal distribution. ``` ```{admonition} Solution: :class: dropdown Solution pending, [Reference material link](Given a random Bernoulli trial generator, how do you return a value sampled from a normal distribution?) ``` ```{admonition} Problem: [PINTEREST] Interquartile Distance :class: dropdown, tip Given an array of unsorted random numbers (decimals) find the interquartile distance. ``` ``` # Interquartile distance is the difference between first and third quartile # first let's generate a list of random numbers import random import numpy as np li = [round(random.uniform(33.33, 66.66), 2) for i in range(50)] print(li) qtl_1 = np.quantile(li,.25) qtl_3 = np.quantile(li,.75) print("Interquartile distance: ", qtl_1 - qtl_3) ``` ````{admonition} Problem: [GENENTECH] Imputing the mdeian :class: dropdown, tip Write a function cheese_median to impute the median price of the selected California cheeses in place of the missing values. You may assume at least one cheese is not missing its price. Input: ```python import pandas as pd cheeses = {"Name": ["Bohemian Goat", "Central Coast Bleu", "Cowgirl Mozzarella", "Cypress Grove Cheddar", "Oakdale Colby"], "Price" : [15.00, None, 30.00, None, 45.00]} df_cheeses = pd.DataFrame(cheeses) ``` | Name | Price | |:---------------------:|:-----:| | Bohemian Goat | 15.00 | | Central Coast Bleu | 30.00 | | Cowgirl Mozzarella | 30.00 | | Cypress Grove Cheddar | 30.00 | | Oakdale Colby | 45.00 | ```` ``` import pandas as pd cheeses = {"Name": ["Bohemian Goat", "Central Coast Bleu", "Cowgirl Mozzarella", "Cypress Grove Cheddar", "Oakdale Colby"], "Price" : [15.00, None, 30.00, None, 45.00]} df_cheeses = pd.DataFrame(cheeses) df_cheeses['Price'] = df_cheeses['Price'].fillna(df_cheeses['Price'].median()) df_cheeses.head() ```
github_jupyter
# Real Estate Price Prediction ``` import pandas as pd df = pd.read_csv("data.csv") df.head() df['CHAS'].value_counts() df.info() df.describe() %matplotlib inline import matplotlib.pyplot as plt df.hist(bins=50, figsize=(20,15)) ``` ## train_test_split ``` import numpy as np def split_train_test(data, test_ratio): np.random.seed(42) shuffled = np.random.permutation(len(data)) test_set_size = int(len(data) * test_ratio) test_indices = shuffled[:test_set_size] train_indices = shuffled[test_set_size:] return data.iloc[train_indices], data.iloc[test_indices] train_set, test_set = split_train_test(df, 0.2) print(f"The length of train dataset is: {len(train_set)}") print(f"The length of train dataset is: {len(test_set)}") def data_percent_allocation(train_set, test_set): total = len(df) train_percent = round((len(train_set)/total) * 100) test_percent = round((len(test_set)/total) * 100) return train_percent, test_percent data_percent_allocation(train_set, test_set) ``` ## train_test_split from sklearn ``` from sklearn.model_selection import train_test_split train_set, test_set = train_test_split(df, test_size = 0.2, random_state = 42) print(f"The length of train dataset is: {len(train_set)}") print(f"The length of train dataset is: {len(test_set)}") from sklearn.model_selection import StratifiedShuffleSplit split = StratifiedShuffleSplit(n_splits = 1, test_size = 0.2, random_state = 42) for train_index, test_index in split.split(df, df['CHAS']): strat_train_set = df.loc[train_index] strat_test_set = df.loc[test_index] strat_test_set['CHAS'].value_counts() test_set['CHAS'].value_counts() strat_train_set['CHAS'].value_counts() train_set['CHAS'].value_counts() ``` ### Stratified learning equal splitting of zero and ones ``` 95/7 376/28 df = strat_train_set.copy() ``` ## Corelations ``` from pandas.plotting import scatter_matrix attributes = ["MEDV", "RM", "ZN" , "LSTAT"] scatter_matrix(df[attributes], figsize = (12,8)) df.plot(kind="scatter", x="RM", y="MEDV", alpha=1) ``` ### Trying out attribute combinations ``` df["TAXRM"] = df["TAX"]/df["RM"] df.head() corr_matrix = df.corr() corr_matrix['MEDV'].sort_values(ascending=False) # 1 means strong positive corr and -1 means strong negative corr. # EX: if RM will increase our final result(MEDV) in prediction will also increase. df.plot(kind="scatter", x="TAXRM", y="MEDV", alpha=1) df = strat_train_set.drop("MEDV", axis=1) df_labels = strat_train_set["MEDV"].copy() ``` ## Pipeline ``` from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer my_pipeline = Pipeline([ ('imputer', SimpleImputer(strategy="median")), ('std_scaler', StandardScaler()), ]) df_numpy = my_pipeline.fit_transform(df) df_numpy #Numpy array of df as models will take numpy array as input. df_numpy.shape ``` ## Model Selection ``` from sklearn.linear_model import LinearRegression from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor # model = LinearRegression() # model = DecisionTreeRegressor() model = RandomForestRegressor() model.fit(df_numpy, df_labels) some_data = df.iloc[:5] some_labels = df_labels.iloc[:5] prepared_data = my_pipeline.transform(some_data) model.predict(prepared_data) list(some_labels) ``` ## Evaluating the model ``` from sklearn.metrics import mean_squared_error df_predictions = model.predict(df_numpy) mse = mean_squared_error(df_labels, df_predictions) rmse = np.sqrt(mse) rmse # from sklearn.metrics import accuracy_score # accuracy_score(some_data, some_labels, normalize=False) ``` ## Cross Validation ``` from sklearn.model_selection import cross_val_score scores = cross_val_score(model, df_numpy, df_labels, scoring="neg_mean_squared_error", cv=10) rmse_scores = np.sqrt(-scores) rmse_scores def print_scores(scores): print("Scores:", scores) print("\nMean:", scores.mean()) print("\nStandard deviation:", scores.std()) print_scores(rmse_scores) ``` ### Saving Model ``` from joblib import dump, load dump(model, 'final_model.joblib') dump(model, 'final_model.sav') ``` ## Testing model on test data ``` X_test = strat_test_set.drop("MEDV", axis=1) Y_test = strat_test_set["MEDV"].copy() X_test_prepared = my_pipeline.transform(X_test) final_predictions = model.predict(X_test_prepared) final_mse = mean_squared_error(Y_test, final_predictions) final_rmse = np.sqrt(final_mse) final_rmse ```
github_jupyter
# In-Place Waveform Library Updates This example notebook shows how one can update pulses data in-place without recompiling. © Raytheon BBN Technologies 2020 Set the `SAVE_WF_OFFSETS` flag in order that QGL will output a map of the waveform data within the compiled binary waveform library. ``` from QGL import * import QGL import os.path import pickle QGL.drivers.APS2Pattern.SAVE_WF_OFFSETS = True ``` Create the usual channel library with a couple of AWGs. ``` cl = ChannelLibrary(":memory:") q1 = cl.new_qubit("q1") aps2_1 = cl.new_APS2("BBNAPS1", address="192.168.5.101") aps2_2 = cl.new_APS2("BBNAPS2", address="192.168.5.102") dig_1 = cl.new_X6("X6_1", address=0) h1 = cl.new_source("Holz1", "HolzworthHS9000", "HS9004A-009-1", power=-30) h2 = cl.new_source("Holz2", "HolzworthHS9000", "HS9004A-009-2", power=-30) cl.set_control(q1, aps2_1, generator=h1) cl.set_measure(q1, aps2_2, dig_1.ch(1), generator=h2) cl.set_master(aps2_1, aps2_1.ch("m2")) cl["q1"].measure_chan.frequency = 0e6 cl.commit() ``` Compile a simple sequence. ``` mf = RabiAmp(cl["q1"], np.linspace(-1, 1, 11)) plot_pulse_files(mf, time=True) ``` Open the offsets file (in the same directory as the `.aps2` files, one per AWG slice.) ``` offset_f = os.path.join(os.path.dirname(mf), "Rabi-BBNAPS1.offsets") with open(offset_f, "rb") as FID: offsets = pickle.load(FID) offsets ``` Let's replace every single pulse with a fixed amplitude `Utheta` ``` pulses = {l: Utheta(q1, amp=0.1, phase=0) for l in offsets} wfm_f = os.path.join(os.path.dirname(mf), "Rabi-BBNAPS1.aps2") QGL.drivers.APS2Pattern.update_wf_library(wfm_f, pulses, offsets) ``` We see that the data in the file has been updated. ``` plot_pulse_files(mf, time=True) ``` ## Profiling How long does this take? ``` %timeit mf = RabiAmp(cl["q1"], np.linspace(-1, 1, 100)) ``` Getting the offsets is fast, and only needs to be done once ``` def get_offsets(): offset_f = os.path.join(os.path.dirname(mf), "Rabi-BBNAPS1.offsets") with open(offset_f, "rb") as FID: offsets = pickle.load(FID) return offsets %timeit offsets = get_offsets() %timeit pulses = {l: Utheta(q1, amp=0.1, phase=0) for l in offsets} wfm_f = os.path.join(os.path.dirname(mf), "Rabi-BBNAPS1.aps2") %timeit QGL.drivers.APS2Pattern.update_wf_library(wfm_f, pulses, offsets) # %timeit QGL.drivers.APS2Pattern.update_wf_library("/Users/growland/workspace/AWG/Rabi/Rabi-BBNAPS1.aps2", pulses, offsets) ``` Moral of the story: 300 ms for initial compilation, and roughly 1.3 ms for update_in_place.
github_jupyter
``` %matplotlib inline from IPython.core.display import display, HTML display(HTML("<style>.container { width:100% !important; }</style>")) import numpy as np np.set_printoptions(precision=3, suppress=True) import library.helpers as h import matplotlib.pyplot as plt import numpy as np from sklearn.metrics import auc from scipy.integrate import simps from scipy.interpolate import interp1d LOG_NAME = "unix_forensic" VIZUALIZATIONS_DIR = "visualizations" fig = plt.figure(figsize=(10,10)) # TODO redo this one [ MODEL_NAMES = [ "lstm-ae", "triplet-la", "triplet-jd-la-all-40-70-all-all", "triplet-jd-la-non-60-65-non-non", "triplet-jd-la-00500-60-65-all-all-00001", "triplet-jd-la-00500-60-65-all-00001-non", "triplet-jd-la-01000-60-65-all-all-00001", "triplet-jd-la-05000-60-65-all-all-00001", "jd-la-x05000-pt60-nt65-llall-lcall-ee00001-ep20", "jd-la-xnon-pt40-nt70-llall-lcall-ee00001-ep50"] MODEL_NAMES_JD = [ "triplet-jd-la-all-40-70-all-all", # baseline, all labels "triplet-jd-la-non-60-65-non-non","triplet-jd-la-non-40-70-non-non", # jd no labels, 10 ep "jd-la-xnon-pt60-nt65-llnon-lcnon-eenon-ep50", "jd-la-xnon-pt30-nt75-llnon-lcnon-eenon-ep50" #"jd-la-x02000-pt55-nt65-llnon-lc00003-ee00002-ep10","jd-la-x02000-pt55-nt65-llnon-lc00003-ee00002-ep20", #"triplet-jd-la-05000-60-65-all-all-00001", "jd-la-x05000-pt60-nt65-llall-lcall-ee00001-ep20", ] MODEL_NAMES_LA = [ "triplet-jd-la-all-40-70-all-all", # baseline, all labels "jd-la-x00500-pt30-nt70-llall-lcall-ee00002-ep30", # 500 labels "jd-la-x01000-pt30-nt70-llall-lcall-ee00002-ep30", # 1000 labels "jd-la-x02000-pt30-nt70-llall-lcall-ee00002-ep30", # 2000 labels "jd-la-x02500-pt30-nt70-llall-lcall-ee00002-ep30", # 2500 labels "jd-la-x05000-pt30-nt70-llall-lcall-ee00002-ep30", # 5000 labels ] MODEL_NAMES_NT60 = [ "jd-la-xnon-pt10-nt60-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt20-nt60-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt30-nt60-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt40-nt60-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt50-nt60-llnon-lcnon-ee00002-ep30", ] MODEL_NAMES_NT70 = [ "jd-la-xnon-pt10-nt70-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt20-nt70-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt30-nt70-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt40-nt70-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt50-nt70-llnon-lcnon-ee00002-ep30", "jd-la-xnon-pt60-nt70-llnon-lcnon-ee00002-ep30" ] MODEL_NAMES_NT80 = [ "jd-la-xnon-pt10-nt80-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt20-nt80-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt30-nt80-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt40-nt80-llnon-lcnon-ee00002-ep30", # "jd-la-xnon-pt50-nt80-llnon-lcnon-ee00002-ep30", "jd-la-xnon-pt60-nt80-llnon-lcnon-ee00002-ep30", "jd-la-xnon-pt70-nt80-llnon-lcnon-ee00002-ep30" ] MODEL_NAMES_10 = [ #"jd-la-xnon-pt10-nt90-llnon-lcnon-ee00002-ep20", #"jd-la-xnon-pt20-nt90-llnon-lcnon-ee00002-ep20", #"jd-la-xnon-pt30-nt90-llnon-lcnon-ee00002-ep20", #"jd-la-xnon-pt40-nt90-llnon-lcnon-ee00002-ep20", "jd-la-xnon-pt50-nt90-llnon-lcnon-ee00002-ep20", "jd-la-xnon-pt52-nt70-llnon-lcnon-ee00002-ep30", #"jd-la-xnon-pt60-nt90-llnon-lcnon-ee00002-ep20", #"jd-la-xnon-pt70-nt90-llnon-lcnon-ee00002-ep20", #"jd-la-xnon-pt80-nt90-llnon-lcnon-ee00002-ep20", "jd-la-xnon-pt52-nt90-llnon-lcnon-ee00002-ep200", ] MODEL_NAMES = MODEL_NAMES_LA # "triplet-jd-la-2000-55-70-40", "lstm-ae", "triplet-la", "triplet-jd-la-3000-55-70-55-30", "triplet-jd-la-ma-1500" # ["triplet-jd-la-2000-55-70-40","triplet-jd-la-3000-55-70-55-30","triplet-jd-la-3000-50-70-55-30"] # [ "triplet-jd-la-1500-055-70-40", "triplet-jd-la-1500-55-75-40"] # "triplet-jd-la-1500-060-70-40", "triplet-jd-la-1500-055-70-40", "triplet-jd-la-1500-050-70-40", # ["triplet-jd-la-1500-050-065-25", "triplet-jd-la-1500-065-070-25", "triplet-jd-la-1500-060-060-25"] #["triplet-jd-la-ma-500-02-03","triplet-jd-la-ma-750","triplet-jd-la-ma-1000", # "#4169e1", # "triplet-jd-la-ma-1500", # "triplet-jaccard","triplet-jaccard-margin", # "triplet-label", # all labels # "lstm-ae"] COLORS = h.get_N_HexCol(len(MODEL_NAMES)+1) fac_n = np.arange(0, 1.0, 0.0005) baseline_valid_accepts = h.load_from_json("data/ji_%s_basline-jaccard_valid.json"%LOG_NAME) baseline_false_accepts = [np.round(f,5) for f in h.load_from_json("data/ji_%s_basline-jaccard_false.json"%LOG_NAME)] interpolated_vac = interp1d(baseline_false_accepts, baseline_valid_accepts) vac_n = interpolated_vac(fac_n) auc_score = auc(fac_n, vac_n) plt.plot( fac_n, interpolated_vac(fac_n), color='r', label="%0.3f,baseline"%auc_score) plt.xscale("log") for i, model_name in enumerate(MODEL_NAMES): #print(model_name) valid_accepts = h.load_from_json("data/%s_%s_valid.json"%(model_name, LOG_NAME)) false_accepts = h.load_from_json("data/%s_%s_false.json"%(model_name, LOG_NAME)) interpolated_vac = interp1d(false_accepts, valid_accepts) auc_score = auc(fac_n, interpolated_vac(fac_n)) plt.plot( fac_n, interpolated_vac(fac_n) , color=COLORS[i+1], label="%0.3f, %s"%(auc_score, model_name)) plt.title("VAR/FAR (%s) LA"%LOG_NAME) plt.xlabel('FAR') plt.ylabel('VAL') plt.legend(loc='lower right') plt.show() #plt.savefig("%s/roc_%s.png"%(VIZUALIZATIONS_DIR, LOG_NAME)) ```
github_jupyter
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ourownstory/neural_prophet/blob/master/example_notebooks/sub_daily_data_yosemite_temps.ipynb) # Sub-daily data NeuralProphet can make forecasts for time series with sub-daily observations by passing in a dataframe with timestamps in the ds column. The format of the timestamps should be `YYYY-MM-DD HH:MM:SS` - see the example csv [here](https://github.com/ourownstory/neural_prophet/blob/master/example_data/yosemite_temps.csv). When sub-daily data are used, daily seasonality will automatically be fit. Here we fit NeuralProphet to data with 5-minute resolution (daily temperatures at Yosemite). ``` if 'google.colab' in str(get_ipython()): !pip install git+https://github.com/ourownstory/neural_prophet.git # may take a while #!pip install neuralprophet # much faster, but may not have the latest upgrades/bugfixes data_location = "https://raw.githubusercontent.com/ourownstory/neural_prophet/master/" else: data_location = "../" import pandas as pd from neuralprophet import NeuralProphet, set_log_level # set_log_level("ERROR") df = pd.read_csv(data_location + "example_data/yosemite_temps.csv") ``` Now we will attempt to forecast the next 7 days. The `5min` data resulution means that we have `60/5*24=288` daily values. Thus, we want to forecast `7*288` periods ahead. Using some common sense, we set: * First, we disable weekly seasonality, as nature does not follow the human week's calendar. * Second, we disable changepoints, as the dataset only contains two months of data ``` m = NeuralProphet( n_changepoints=0, weekly_seasonality=False, ) metrics = m.fit(df, freq='5min') future = m.make_future_dataframe(df, periods=7*288, n_historic_predictions=len(df)) forecast = m.predict(future) fig = m.plot(forecast) # fig_comp = m.plot_components(forecast) fig_param = m.plot_parameters() ``` The daily seasonality seems to make sense, when we account for the time being recorded in GMT, while Yosemite local time is GMT-8. ## Improving trend and seasonality As we have `288` daily values recorded, we can increase the flexibility of `daily_seasonality`, without danger of overfitting. Further, we may want to re-visit our decision to disable changepoints, as the data clearly shows changes in trend, as is typical with the weather. We make the following changes: * increase the `changepoints_range`, as the we are doing a short-term prediction * inrease the `n_changepoints` to allow to fit to the sudden changes in trend * carefully regularize the trend changepoints by setting `trend_reg` in order to avoid overfitting ``` m = NeuralProphet( changepoints_range=0.95, n_changepoints=50, trend_reg=1.5, weekly_seasonality=False, daily_seasonality=10, ) metrics = m.fit(df, freq='5min') future = m.make_future_dataframe(df, periods=60//5*24*7, n_historic_predictions=len(df)) forecast = m.predict(future) fig = m.plot(forecast) # fig_comp = m.plot_components(forecast) fig_param = m.plot_parameters() ```
github_jupyter
<center> <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" /> </center> # Classes and Objects in Python Estimated time needed: **40** minutes ## Objectives After completing this lab you will be able to: - Work with classes and objects - Identify and define attributes and methods <h2>Table of Contents</h2> <div class="alert alert-block alert-info" style="margin-top: 20px"> <ul> <li> <a href="#intro">Introduction to Classes and Objects</a> <ul> <li><a href="create">Creating a class</a></li> <li><a href="instance">Instances of a Class: Objects and Attributes</a></li> <li><a href="method">Methods</a></li> </ul> </li> <li><a href="creating">Creating a class</a></li> <li><a href="circle">Creating an instance of a class Circle</a></li> <li><a href="rect">The Rectangle Class</a></li> </ul> </div> <hr> <h2 id="intro">Introduction to Classes and Objects</h2> <h3>Creating a Class</h3> The first part of creating a class is giving it a name: In this notebook, we will create two classes, Circle and Rectangle. We need to determine all the data that make up that class, and we call that an attribute. Think about this step as creating a blue print that we will use to create objects. In figure 1 we see two classes, circle and rectangle. Each has their attributes, they are variables. The class circle has the attribute radius and color, while the rectangle has the attribute height and width. Let’s use the visual examples of these shapes before we get to the code, as this will help you get accustomed to the vocabulary. <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesClass.png" width="500" /> <i>Figure 1: Classes circle and rectangle, and each has their own attributes. The class circle has the attribute radius and colour, the rectangle has the attribute height and width.</i> <h3 id="instance">Instances of a Class: Objects and Attributes</h3> An instance of an object is the realisation of a class, and in Figure 2 we see three instances of the class circle. We give each object a name: red circle, yellow circle and green circle. Each object has different attributes, so let's focus on the attribute of colour for each object. <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesObj.png" width="500" /> <i>Figure 2: Three instances of the class circle or three objects of type circle.</i> The colour attribute for the red circle is the colour red, for the green circle object the colour attribute is green, and for the yellow circle the colour attribute is yellow. <h3 id="method">Methods</h3> Methods give you a way to change or interact with the object; they are functions that interact with objects. For example, let’s say we would like to increase the radius by a specified amount of a circle. We can create a method called **add_radius(r)** that increases the radius by **r**. This is shown in figure 3, where after applying the method to the "orange circle object", the radius of the object increases accordingly. The “dot” notation means to apply the method to the object, which is essentially applying a function to the information in the object. <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesMethod.png" width="500" /> <i>Figure 3: Applying the method “add_radius” to the object orange circle object.</i> <hr> <h2 id="creating">Creating a Class</h2> Now we are going to create a class circle, but first, we are going to import a library to draw the objects: ``` # Import the library import matplotlib.pyplot as plt %matplotlib inline ``` The first step in creating your own class is to use the <code>class</code> keyword, then the name of the class as shown in Figure 4. In this course the class parent will always be object: <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesDefine.png" width="400" /> <i>Figure 4: Creating a class Circle.</i> The next step is a special method called a constructor <code>__init__</code>, which is used to initialize the object. The input are data attributes. The term <code>self</code> contains all the attributes in the set. For example the <code>self.color</code> gives the value of the attribute color and <code>self.radius</code> will give you the radius of the object. We also have the method <code>add_radius()</code> with the parameter <code>r</code>, the method adds the value of <code>r</code> to the attribute radius. To access the radius we use the syntax <code>self.radius</code>. The labeled syntax is summarized in Figure 5: <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesCircle.png" width="600" /> <i>Figure 5: Labeled syntax of the object circle.</i> The actual object is shown below. We include the method <code>drawCircle</code> to display the image of a circle. We set the default radius to 3 and the default colour to blue: ``` # Create a class Circle class Circle(object): # Constructor def __init__(self, radius=3, color='blue'): self.radius = radius self.color = color # Method def add_radius(self, r): self.radius = self.radius + r return(self.radius) # Method def drawCircle(self): plt.gca().add_patch(plt.Circle((0, 0), radius=self.radius, fc=self.color)) plt.axis('scaled') plt.show() ``` <hr> <h2 id="circle">Creating an instance of a class Circle</h2> Let’s create the object <code>RedCircle</code> of type Circle to do the following: ``` # Create an object RedCircle RedCircle = Circle(10, 'red') ``` We can use the <code>dir</code> command to get a list of the object's methods. Many of them are default Python methods. ``` # Find out the methods can be used on the object RedCircle dir(RedCircle) ``` We can look at the data attributes of the object: ``` # Print the object attribute radius RedCircle.radius # Print the object attribute color RedCircle.color ``` We can change the object's data attributes: ``` # Set the object attribute radius RedCircle.radius = 1 RedCircle.radius ``` We can draw the object by using the method <code>drawCircle()</code>: ``` # Call the method drawCircle RedCircle.drawCircle() ``` We can increase the radius of the circle by applying the method <code>add_radius()</code>. Let increases the radius by 2 and then by 5: ``` # Use method to change the object attribute radius print('Radius of object:',RedCircle.radius) RedCircle.add_radius(2) print('Radius of object of after applying the method add_radius(2):',RedCircle.radius) RedCircle.add_radius(5) print('Radius of object of after applying the method add_radius(5):',RedCircle.radius) RedCircle.add_radius(6) print('Radius of object of after applying the method add radius(6):',RedCircle.radius) ``` Let’s create a blue circle. As the default colour is blue, all we have to do is specify what the radius is: ``` # Create a blue circle with a given radius BlueCircle = Circle(radius=100) ``` As before we can access the attributes of the instance of the class by using the dot notation: ``` # Print the object attribute radius BlueCircle.radius # Print the object attribute color BlueCircle.color ``` We can draw the object by using the method <code>drawCircle()</code>: ``` # Call the method drawCircle BlueCircle.drawCircle() ``` Compare the x and y axis of the figure to the figure for <code>RedCircle</code>; they are different. <hr> <h2 id="rect">The Rectangle Class</h2> Let's create a class rectangle with the attributes of height, width and color. We will only add the method to draw the rectangle object: ``` # Create a new Rectangle class for creating a rectangle object class Rectangle(object): # Constructor def __init__(self, width=2, height=3, color='r'): self.height = height self.width = width self.color = color # Method def drawRectangle(self): plt.gca().add_patch(plt.Rectangle((0, 0), self.width, self.height ,fc=self.color)) plt.axis('scaled') plt.show() ``` Let’s create the object <code>SkinnyBlueRectangle</code> of type Rectangle. Its width will be 2 and height will be 3, and the color will be blue: ``` # Create a new object rectangle SkinnyBlueRectangle = Rectangle(2, 10, 'blue') ``` As before we can access the attributes of the instance of the class by using the dot notation: ``` # Print the object attribute height SkinnyBlueRectangle.height # Print the object attribute width SkinnyBlueRectangle.width # Print the object attribute color SkinnyBlueRectangle.color ``` We can draw the object: ``` # Use the drawRectangle method to draw the shape SkinnyBlueRectangle.drawRectangle() ``` Let’s create the object <code>FatYellowRectangle</code> of type Rectangle : ``` # Create a new object rectangle FatYellowRectangle = Rectangle(20, 5, 'yellow') ``` We can access the attributes of the instance of the class by using the dot notation: ``` # Print the object attribute height FatYellowRectangle.height # Print the object attribute width FatYellowRectangle.width # Print the object attribute color FatYellowRectangle.color ``` We can draw the object: ``` # Use the drawRectangle method to draw the shape FatYellowRectangle.drawRectangle() ``` <hr> <h2 id="rect">Exercises</h2> <h4> Text Analysis </h4> You have been recruited by your friend, a linguistics enthusiast, to create a utility tool that can perform analysis on a given piece of text. Complete the class 'analysedText' with the following methods - <ul> <li> Constructor - Takes argument 'text',makes it lower case and removes all punctuation. Assume only the following punctuation is used - period (.), exclamation mark (!), comma (,) and question mark (?). Store the argument in "fmtText" <li> freqAll - returns a dictionary of all unique words in the text along with the number of their occurences. <li> freqOf - returns the frequency of the word passed in argument. </ul> The skeleton code has been given to you. Docstrings can be ignored for the purpose of the exercise. <br> <i> Hint: Some useful functions are <code>replace()</code>, <code>lower()</code>, <code>split()</code>, <code>count()</code> </i><br> ``` class analysedText(object): def __init__ (self, text): reArrText = text.lower() reArrText = reArrText.replace('.','').replace('!','').replace(',','').replace('?','') self.fmtText = reArrText def freqAll(self): wordList = self.fmtText.split(' ') freqMap = {} for word in set(wordList): # use set to remove duplicates in list freqMap[word] = wordList.count(word) return freqMap def freqOf(self,word): freqDict = self.freqAll() if word in freqDict: return freqDict[word] else: return 0 ``` Execute the block below to check your progress. ``` import sys sampleMap = {'eirmod': 1,'sed': 1, 'amet': 2, 'diam': 5, 'consetetur': 1, 'labore': 1, 'tempor': 1, 'dolor': 1, 'magna': 2, 'et': 3, 'nonumy': 1, 'ipsum': 1, 'lorem': 2} def testMsg(passed): if passed: return 'Test Passed' else : return 'Test Failed' print("Constructor: ") try: samplePassage = analysedText("Lorem ipsum dolor! diam amet, consetetur Lorem magna. sed diam nonumy eirmod tempor. diam et labore? et diam magna. et diam amet.") print(testMsg(samplePassage.fmtText == "lorem ipsum dolor diam amet consetetur lorem magna sed diam nonumy eirmod tempor diam et labore et diam magna et diam amet")) except: print("Error detected. Recheck your function " ) print("freqAll: ",) try: wordMap = samplePassage.freqAll() print(testMsg(wordMap==sampleMap)) except: print("Error detected. Recheck your function " ) print("freqOf: ") try: passed = True for word in sampleMap: if samplePassage.freqOf(word) != sampleMap[word]: passed = False break print(testMsg(passed)) except: print("Error detected. Recheck your function " ) ``` <details><summary>Click here for the solution</summary> ```python class analysedText(object): def __init__ (self, text): # remove punctuation formattedText = text.replace('.','').replace('!','').replace('?','').replace(',','') # make text lowercase formattedText = formattedText.lower() self.fmtText = formattedText def freqAll(self): # split text into words wordList = self.fmtText.split(' ') # Create dictionary freqMap = {} for word in set(wordList): # use set to remove duplicates in list freqMap[word] = wordList.count(word) return freqMap def freqOf(self,word): # get frequency map freqDict = self.freqAll() if word in freqDict: return freqDict[word] else: return 0 ``` </details> <hr> <h2>The last exercise!</h2> <p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work. <hr> ## Author <a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a> ## Other contributors <a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a> ## Change Log | Date (YYYY-MM-DD) | Version | Changed By | Change Description | | ----------------- | ------- | ---------- | ---------------------------------- | | 2020-08-26 | 2.0 | Lavanya | Moved lab to course repo in GitLab | | | | | | | | | | | <hr/> ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
github_jupyter
# LOFO Feature Importance https://github.com/aerdem4/lofo-importance ``` !pip install lofo-importance import numpy as np import pandas as pd df = pd.read_csv("../input/train.csv", index_col='id') df['wheezy-copper-turtle-magic'] = df['wheezy-copper-turtle-magic'].astype('category') df.shape ``` ### Use the best model in public kernels ``` from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis def get_model(): return Pipeline([('scaler', StandardScaler()), ('qda', QuadraticDiscriminantAnalysis(reg_param=0.111)) ]) ``` ### Top 20 Features for wheezy-copper-turtle-magic = 0 ``` from sklearn.model_selection import KFold, StratifiedKFold, train_test_split from sklearn.linear_model import LogisticRegression from lofo import LOFOImportance, FLOFOImportance, plot_importance features = [c for c in df.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']] def get_lofo_importance(wctm_num): sub_df = df[df['wheezy-copper-turtle-magic'] == wctm_num] sub_features = [f for f in features if sub_df[f].std() > 1.5] lofo_imp = LOFOImportance(sub_df, target="target", features=sub_features, cv=StratifiedKFold(n_splits=4, random_state=42, shuffle=True), scoring="roc_auc", model=get_model(), n_jobs=4) return lofo_imp.get_importance() plot_importance(get_lofo_importance(0), figsize=(12, 12)) ``` ### Top 20 Features for wheezy-copper-turtle-magic = 1 ``` plot_importance(get_lofo_importance(1), figsize=(12, 12)) ``` ### Top 20 Features for wheezy-copper-turtle-magic = 2 ``` plot_importance(get_lofo_importance(2), figsize=(12, 12)) ``` ### Find the most harmful features for each wheezy-copper-turtle-magic ``` from tqdm import tqdm_notebook import warnings warnings.filterwarnings("ignore") features_to_remove = [] potential_gain = [] for i in tqdm_notebook(range(512)): imp = get_lofo_importance(i) features_to_remove.append(imp["feature"].values[-1]) potential_gain.append(-imp["importance_mean"].values[-1]) print("Potential gain (AUC):", np.round(np.mean(potential_gain), 5)) features_to_remove ``` # Create submission using the current best kernel https://www.kaggle.com/tunguz/ig-pca-nusvc-knn-qda-lr-stack by Bojan Tunguz ``` import numpy as np, pandas as pd from sklearn.model_selection import StratifiedKFold from sklearn.metrics import roc_auc_score from sklearn import svm, neighbors, linear_model, neural_network from sklearn.svm import NuSVC from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from tqdm import tqdm from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.pipeline import Pipeline from sklearn.metrics import roc_auc_score from sklearn.feature_selection import VarianceThreshold train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') oof_svnu = np.zeros(len(train)) pred_te_svnu = np.zeros(len(test)) oof_svc = np.zeros(len(train)) pred_te_svc = np.zeros(len(test)) oof_knn = np.zeros(len(train)) pred_te_knn = np.zeros(len(test)) oof_lr = np.zeros(len(train)) pred_te_lr = np.zeros(len(test)) oof_mlp = np.zeros(len(train)) pred_te_mlp = np.zeros(len(test)) oof_qda = np.zeros(len(train)) pred_te_qda = np.zeros(len(test)) default_cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']] for i in range(512): cols = [c for c in default_cols if c != features_to_remove[i]] train2 = train[train['wheezy-copper-turtle-magic']==i] test2 = test[test['wheezy-copper-turtle-magic']==i] idx1 = train2.index; idx2 = test2.index train2.reset_index(drop=True,inplace=True) data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])]) data2 = StandardScaler().fit_transform(PCA(svd_solver='full',n_components='mle').fit_transform(data[cols])) train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:] data2 = StandardScaler().fit_transform(VarianceThreshold(threshold=1.5).fit_transform(data[cols])) train4 = data2[:train2.shape[0]]; test4 = data2[train2.shape[0]:] # STRATIFIED K FOLD (Using splits=25 scores 0.002 better but is slower) skf = StratifiedKFold(n_splits=5, random_state=42) for train_index, test_index in skf.split(train2, train2['target']): clf = NuSVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=4, nu=0.59, coef0=0.053) clf.fit(train3[train_index,:],train2.loc[train_index]['target']) oof_svnu[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1] pred_te_svnu[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits clf = neighbors.KNeighborsClassifier(n_neighbors=17, p=2.9) clf.fit(train3[train_index,:],train2.loc[train_index]['target']) oof_knn[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1] pred_te_knn[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits clf = linear_model.LogisticRegression(solver='saga',penalty='l1',C=0.1) clf.fit(train3[train_index,:],train2.loc[train_index]['target']) oof_lr[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1] pred_te_lr[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits clf = neural_network.MLPClassifier(random_state=3, activation='relu', solver='lbfgs', tol=1e-06, hidden_layer_sizes=(250, )) clf.fit(train3[train_index,:],train2.loc[train_index]['target']) oof_mlp[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1] pred_te_mlp[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits clf = svm.SVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=42) clf.fit(train3[train_index,:],train2.loc[train_index]['target']) oof_svc[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1] pred_te_svc[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits clf = QuadraticDiscriminantAnalysis(reg_param=0.111) clf.fit(train4[train_index,:],train2.loc[train_index]['target']) oof_qda[idx1[test_index]] = clf.predict_proba(train4[test_index,:])[:,1] pred_te_qda[idx2] += clf.predict_proba(test4)[:,1] / skf.n_splits print('lr', roc_auc_score(train['target'], oof_lr)) print('knn', roc_auc_score(train['target'], oof_knn)) print('svc', roc_auc_score(train['target'], oof_svc)) print('svcnu', roc_auc_score(train['target'], oof_svnu)) print('mlp', roc_auc_score(train['target'], oof_mlp)) print('qda', roc_auc_score(train['target'], oof_qda)) print('blend 1', roc_auc_score(train['target'], oof_svnu*0.7 + oof_svc*0.05 + oof_knn*0.2 + oof_mlp*0.05)) print('blend 2', roc_auc_score(train['target'], oof_qda*0.5+oof_svnu*0.35 + oof_svc*0.025 + oof_knn*0.1 + oof_mlp*0.025)) oof_svnu = oof_svnu.reshape(-1, 1) pred_te_svnu = pred_te_svnu.reshape(-1, 1) oof_svc = oof_svc.reshape(-1, 1) pred_te_svc = pred_te_svc.reshape(-1, 1) oof_knn = oof_knn.reshape(-1, 1) pred_te_knn = pred_te_knn.reshape(-1, 1) oof_mlp = oof_mlp.reshape(-1, 1) pred_te_mlp = pred_te_mlp.reshape(-1, 1) oof_lr = oof_lr.reshape(-1, 1) pred_te_lr = pred_te_lr.reshape(-1, 1) oof_qda = oof_qda.reshape(-1, 1) pred_te_qda = pred_te_qda.reshape(-1, 1) tr = np.concatenate((oof_svnu, oof_svc, oof_knn, oof_mlp, oof_lr, oof_qda), axis=1) te = np.concatenate((pred_te_svnu, pred_te_svc, pred_te_knn, pred_te_mlp, pred_te_lr, pred_te_qda), axis=1) print(tr.shape, te.shape) oof_lrr = np.zeros(len(train)) pred_te_lrr = np.zeros(len(test)) skf = StratifiedKFold(n_splits=5, random_state=42) for train_index, test_index in skf.split(tr, train['target']): lrr = linear_model.LogisticRegression() lrr.fit(tr[train_index], train['target'][train_index]) oof_lrr[test_index] = lrr.predict_proba(tr[test_index,:])[:,1] pred_te_lrr += lrr.predict_proba(te)[:,1] / skf.n_splits print('stack CV score =',round(roc_auc_score(train['target'],oof_lrr),6)) sub = pd.read_csv('../input/sample_submission.csv') sub['target'] = pred_te_lrr sub.to_csv('submission_stack.csv', index=False) ```
github_jupyter
# End-to-end learning for music audio - http://qiita.com/himono/items/a94969e35fa8d71f876c ``` # データのダウンロード wget http://mi.soi.city.ac.uk/datasets/magnatagatune/mp3.zip.001 wget http://mi.soi.city.ac.uk/datasets/magnatagatune/mp3.zip.002 wget http://mi.soi.city.ac.uk/datasets/magnatagatune/mp3.zip.003 # 結合 cat data/mp3.zip.* > data/music.zip # 解凍 unzip data/music.zip -d music ``` ``` %matplotlib inline import os import matplotlib.pyplot as plt ``` ## MP3ファイルのロード ``` import numpy as np from pydub import AudioSegment def mp3_to_array(file): # MP3 => RAW song = AudioSegment.from_mp3(file) song_arr = np.fromstring(song._data, np.int16) return song_arr %ls data/music/1/ambient_teknology-phoenix-01-ambient_teknology-0-29.mp3 file = 'data/music/1/ambient_teknology-phoenix-01-ambient_teknology-0-29.mp3' song = mp3_to_array(file) plt.plot(song) ``` ## 楽曲タグデータをロード - ランダムに3000曲を抽出 - よく使われるタグ50個を抽出 - 各曲には複数のタグがついている ``` import pandas as pd tags_df = pd.read_csv('data/annotations_final.csv', delim_whitespace=True) # 全体をランダムにサンプリング tags_df = tags_df.sample(frac=1) # 最初の3000曲を使う tags_df = tags_df[:3000] tags_df top50_tags = tags_df.iloc[:, 1:189].sum().sort_values(ascending=False).index[:50].tolist() y = tags_df[top50_tags].values y ``` ## 楽曲データをロード - tags_dfのmp3_pathからファイルパスを取得 - mp3_to_array()でnumpy arrayをロード - (samples, features, channels) になるようにreshape - 音声波形は1次元なのでchannelsは1 - 訓練データはすべて同じサイズなのでfeaturesは同じになるはず(パディング不要) ``` files = tags_df.mp3_path.values files = [os.path.join('data', 'music', x) for x in files] X = np.array([mp3_to_array(file) for file in files]) X = X.reshape(X.shape[0], X.shape[1], 1) X.shape ``` ## 訓練データとテストデータに分割 ``` from sklearn.model_selection import train_test_split random_state = 42 train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=random_state) print(train_x.shape) print(test_x.shape) print(train_y.shape) print(test_y.shape) plt.plot(train_x[0]) np.save('train_x.npy', train_x) np.save('test_x.npy', test_x) np.save('train_y.npy', train_y) np.save('test_y.npy', test_y) ``` ## 訓練 ``` import numpy as np from keras.models import Model from keras.layers import Dense, Flatten, Input, Conv1D, MaxPooling1D from keras.callbacks import CSVLogger, ModelCheckpoint train_x = np.load('train_x.npy') train_y = np.load('train_y.npy') test_x = np.load('test_x.npy') test_y = np.load('test_y.npy') print(train_x.shape) print(train_y.shape) print(test_x.shape) print(test_y.shape) features = train_x.shape[1] x_inputs = Input(shape=(features, 1), name='x_inputs') x = Conv1D(128, 256, strides=256, padding='valid', activation='relu')(x_inputs) # strided conv x = Conv1D(32, 8, activation='relu')(x) x = MaxPooling1D(4)(x) x = Conv1D(32, 8, activation='relu')(x) x = MaxPooling1D(4)(x) x = Conv1D(32, 8, activation='relu')(x) x = MaxPooling1D(4)(x) x = Conv1D(32, 8, activation='relu')(x) x = MaxPooling1D(4)(x) x = Flatten()(x) x = Dense(100, activation='relu')(x) x_outputs = Dense(50, activation='sigmoid', name='x_outputs')(x) model = Model(inputs=x_inputs, outputs=x_outputs) model.compile(optimizer='adam', loss='categorical_crossentropy') logger = CSVLogger('history.log') checkpoint = ModelCheckpoint( 'model.{epoch:02d}-{val_loss:.3f}.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='auto') model.fit(train_x, train_y, batch_size=600, epochs=50, validation_data=[test_x, test_y], callbacks=[logger, checkpoint]) ``` ## 予測 - taggerは複数のタグを出力するのでevaluate()ではダメ? ``` import numpy as np from keras.models import load_model from sklearn.metrics import roc_auc_score test_x = np.load('test_x.npy') test_y = np.load('test_y.npy') model = load_model('model.22-9.187-0.202.h5') pred_y = model.predict(test_x, batch_size=50) print(roc_auc_score(test_y, pred_y)) print(model.evaluate(test_x, test_y)) ```
github_jupyter
<table> <tr> <td style="background-color:#ffffff;"> <a href="http://qworld.lu.lv" target="_blank"><img src="..\images\qworld.jpg" width="25%" align="left"> </a></td> <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;"> prepared by <a href="http://abu.lu.lv" target="_blank">Abuzer Yakaryilmaz</a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>) </td> </tr></table> <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table> $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ <h2> <font color="blue"> Solutions for </font>Probabilistic Bit</h2> <a id="task2"></a> <h3> Task 2 </h3> Suppose that Fyodor hiddenly rolls a loaded (tricky) dice with the bias $$ Pr(1):Pr(2):Pr(3):Pr(4):Pr(5):Pr(6) = 7:5:4:2:6:1 . $$ Represent your information on the result as a column vector. Remark that the size of your column should be 6. You may use python for your calculations. <h3>Solution</h3> ``` # all portions are stored in a list all_portions = [7,5,4,2,6,1]; # let's calculate the total portion total_portion = 0 for i in range(6): total_portion = total_portion + all_portions[i] print("total portion is",total_portion) # find the weight of one portion one_portion = 1/total_portion print("the weight of one portion is",one_portion) print() # print an empty line # now we can calculate the probabilities of rolling 1,2,3,4,5, and 6 for i in range(6): print("the probability of rolling",(i+1),"is",(one_portion*all_portions[i])) ```
github_jupyter
``` import os import pickle from neutrinomass.completions import EffectiveOperator, Completion from neutrinomass.database import ExoticField from neutrinomass.database import ModelDataFrame, EXOTICS, TERMS, MVDF from neutrinomass.completions import EFF_OPERATORS from neutrinomass.completions import DERIV_EFF_OPERATORS DATA_PATH = "/home/garj/work/neutrinomass/neutrinomass/database" DATA = pickle.load(open(os.path.join(DATA_PATH, "unfiltered.p"), "rb")) UNF = ModelDataFrame.new(data=DATA, exotics=EXOTICS, terms=TERMS) STR_UNF = UNF.drop_duplicates(["stringent_num"], keep="first") LAGS = len(STR_UNF) print(f"Number of neutrino-mass mechanisms: {LAGS}") DEMO_UNF = UNF.drop_duplicates(["democratic_num"], keep="first") MODELS = len(DEMO_UNF) print(f"Number of models: {MODELS}") STR_MVDF = MVDF.drop_duplicates(["stringent_num"], keep="first") print(f"Number of filtered neutrino-mass mechanisms: {len(STR_MVDF)}") DEMO_MVDF = MVDF.drop_duplicates(["democratic_num"], keep="first") print(f"Number of filtered neutrino-mass mechanisms: {len(DEMO_MVDF)}") FIL_DF = MVDF.drop_duplicates(['democratic_num', 'dim'], keep="first") UNF_DF = UNF.drop_duplicates(['democratic_num', 'dim'], keep="first") print(f"After filtering, there are {len(FIL_DF[FIL_DF['dim'] == 5])} models derived from dimension-5 operators.") print(f"After filtering, there are {len(FIL_DF[FIL_DF['dim'] == 9])} models derived from dimension-9 operators.") print(f"After filtering, there are {len(FIL_DF[FIL_DF['dim'] == 11])} models derived from dimension-11 operators.") print(f"The total of these is {len(FIL_DF[FIL_DF['dim'] == 5]) + len(FIL_DF[FIL_DF['dim'] == 9]) + len(FIL_DF[FIL_DF['dim'] == 11])}") OPS = {**EFF_OPERATORS, **DERIV_EFF_OPERATORS} labels, total, demo, dimensions = [], [], [], [] for k in OPS: labels.append(k) total.append(len(UNF_DF[UNF_DF["op"] == k])) demo.append(len(FIL_DF[FIL_DF["op"] == k])) dimensions.append(OPS[k].mass_dimension) NHL = STR_UNF.terms[("F,00,0,0,0", "H", "L")] NHSigma = STR_UNF.terms[("F,00,2,0,0", "H", "L")] HHXi1 = STR_UNF.terms[("H", "H", "S,00,2,-1,0")] LLXi1 = STR_UNF.terms[("L", "L", "S,00,2,1,0")] N = STR_UNF.exotics["F,00,0,0,0"] Sigma = STR_UNF.exotics["F,00,2,0,0"] Xi1 = STR_UNF.exotics["S,00,2,1,0"] N_NHL_lags = len(STR_UNF[STR_UNF["stringent_num"] % NHL == 0]) N_other_lags = len(STR_UNF[(STR_UNF["stringent_num"] % NHL != 0) & (STR_UNF["democratic_num"] % N == 0)]) Sigma_NHSigma_lags = len(STR_UNF[STR_UNF["stringent_num"] % NHSigma == 0]) Sigma_other_lags = len(STR_UNF[(STR_UNF["stringent_num"] % NHSigma != 0) & (STR_UNF["democratic_num"] % Sigma == 0)]) Xi1_HHXi1_lags = len(STR_UNF[STR_UNF["stringent_num"] % HHXi1 == 0]) Xi1_LLXi1_lags = len(STR_UNF[STR_UNF["stringent_num"] % LLXi1 == 0]) Xi1_both_lags = len(STR_UNF[(STR_UNF["stringent_num"] % HHXi1 == 0) & (STR_UNF["stringent_num"] % LLXi1 == 0)]) Xi1_other_lags = len(STR_UNF[(STR_UNF["stringent_num"] % HHXi1 != 0) & (STR_UNF["stringent_num"] % LLXi1 != 0) & (STR_UNF["democratic_num"] % Xi1 == 0)]) N_models = len(DEMO_UNF[DEMO_UNF["democratic_num"] % N == 0]) Sigma_models = len(DEMO_UNF[DEMO_UNF["democratic_num"] % Sigma == 0]) Xi1_models = len(DEMO_UNF[DEMO_UNF["democratic_num"] % Xi1 == 0]) # latex table print(r""" \begin{tabular}{ccll} \toprule Field & Interactions & Lagrangians & Collected models \\ \midrule \multirow{2}{*}{$N \sim (\mathbf{1}, \mathbf{1}, 0)_{F}$} & $L H N$ & %s (%s) & \multirow{2}{*}{%s (%s)} \\ & Other & %s (%s) & \\ \midrule \multirow{2}{*}{$\Sigma \sim (\mathbf{1}, \mathbf{3}, 0)_{F}$} & $L H \Sigma$ & %s (%s) & \multirow{2}{*}{%s (%s)} \\ & Other & %s (%s) & \\ \midrule \multirow{4}{*}{$\Xi_{1} \sim (\mathbf{1}, \mathbf{3}, 1)_{S}$} & $L L \Xi_{1}$ & %s (%s) & \multirow{4}{*}{%s (%s)} \\ & $H H \Xi_{1}^{\dagger}$ & %s (%s) & \\ & Both & %s (%s) & \\ & Other & %s (%s) & \\ \bottomrule \end{tabular} """ % ( f"{N_NHL_lags:,}", f"{100 * N_NHL_lags / LAGS:.1f}\%", f"{N_models:,}", f"{100 * N_models / MODELS:.1f}\%", f"{N_other_lags:,}", f"{100 * N_other_lags / LAGS:.1f}\%", f"{Sigma_NHSigma_lags:,}", f"{100 * Sigma_NHSigma_lags / LAGS:.1f}\%", f"{Sigma_models:,}", f"{100 * Sigma_models / MODELS:.1f}\%", f"{Sigma_other_lags:,}", f"{100 * Sigma_other_lags / LAGS:.1f}\%", f"{Xi1_LLXi1_lags:,}", f"{100 * Xi1_LLXi1_lags / LAGS:.1f}\%", f"{Xi1_models:,}", f"{100 * Xi1_models / MODELS:.1f}\%", f"{Xi1_HHXi1_lags:,}", f"{100 * Xi1_HHXi1_lags / LAGS:.1f}\%", f"{Xi1_both_lags:,}", f"{100 * Xi1_both_lags / LAGS:.1f}\%", f"{Xi1_other_lags:,}", f"{100 * Xi1_other_lags / LAGS:.1f}\%", ) ) import matplotlib.pyplot as plt import seaborn as sns import pandas as pd SMALL_SIZE = 15 MEDIUM_SIZE = 20 BIGGER_SIZE = 20 plt.rc('font', size=SMALL_SIZE) # controls default text sizes plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title plt.tight_layout() plt.rcParams.update({ "text.usetex": True, "font.family": "serif", "font.serif": ["Computer Modern Roman"]} ) sns.set_palette("muted") latex_labels = [] for l in labels: if "pp" in l: new_l = l.replace("pp", "^{\prime\prime}") elif "p" in l: new_l = l.replace("p", "^\prime") else: new_l = l latex_labels.append("$" + new_l + "$") filter_bar_df = pd.DataFrame(data={ "Operator": latex_labels, "Unfiltered": total, "Democratic": demo, "Dimension": dimensions }) demo_5 = sum(filter_bar_df[filter_bar_df["Dimension"] == 5]["Democratic"]) demo_7 = sum(filter_bar_df[filter_bar_df["Dimension"] == 7]["Democratic"]) demo_9 = sum(filter_bar_df[filter_bar_df["Dimension"] == 9]["Democratic"]) demo_11 = sum(filter_bar_df[filter_bar_df["Dimension"] == 11]["Democratic"]) unf_5 = sum(filter_bar_df[filter_bar_df["Dimension"] == 5]["Unfiltered"]) unf_7 = sum(filter_bar_df[filter_bar_df["Dimension"] == 7]["Unfiltered"]) unf_9 = sum(filter_bar_df[filter_bar_df["Dimension"] == 9]["Unfiltered"]) unf_11 = sum(filter_bar_df[filter_bar_df["Dimension"] == 11]["Unfiltered"]) barplot_df = pd.DataFrame( {'Dimension': [5, 7, 9, 11], 'Democratic': [demo_5, demo_7, demo_9, demo_11], 'Unfiltered': [unf_5-demo_5, unf_7-demo_7, unf_9-demo_9, unf_11-demo_11]} ) ax = barplot_df.plot.bar(x="Dimension", stacked=True, rot=0) ax.set_yscale("log") ax.set_ylabel("Number of models") plt.tight_layout() plt.savefig("/home/garj/filter_barchart_dimension.pdf") plt.savefig("/home/garj/filter_barchart_dimension.png") ops_filter_bar_df = filter_bar_df[filter_bar_df["Dimension"] < 11] f, ax = plt.subplots(figsize=(7, 10)) sns.barplot(x="Unfiltered", y="Operator", data=ops_filter_bar_df, label="Unfiltered", color=sns.color_palette()[1]) sns.barplot(x="Democratic", y="Operator", data=ops_filter_bar_df, label="Democratic", color=sns.color_palette()[0]) ax.set_xscale("log") ax.legend(ncol=2, loc="upper right", frameon=True) ax.set(xlim=(0, 10000), ylabel="Operator", xlabel="Number of models") ax.text(x=2000, y=7, s="$d < 11$", fontsize=20) for tick in ax.yaxis.get_major_ticks()[1::2]: tick.set_pad(40) plt.tight_layout() plt.savefig("/home/garj/filter_barchart_operators579.pdf") plt.savefig("/home/garj/filter_barchart_operators579.png") import seaborn as sns import matplotlib.pyplot as plt ops_filter_bar_df = filter_bar_df[filter_bar_df["Dimension"] == 11] f, ax = plt.subplots(figsize=(7, 15)) sns.barplot(x="Unfiltered", y="Operator", data=ops_filter_bar_df, label="Unfiltered", color=sns.color_palette()[1]) sns.barplot(x="Democratic", y="Operator", data=ops_filter_bar_df, label="Democratic", color=sns.color_palette()[0]) ax.set_xscale("log") ax.legend(ncol=2, loc="upper right", frameon=True) ax.set(xlim=(0, 100000), ylabel="Operator", xlabel="Number of models") ax.text(x=12000, y=9, s="$d = 11$", fontsize=22) for tick in ax.yaxis.get_major_ticks()[1::2]: tick.set_pad(40) plt.tight_layout() plt.savefig("/home/garj/filter_barchart_operators11.pdf") plt.savefig("/home/garj/filter_barchart_operators11.png") ```
github_jupyter
``` from IPython.display import HTML # Cell visibility - COMPLETE: #tag = HTML('''<style> #div.input { # display:none; #} #</style>''') #display(tag) #Cell visibility - TOGGLE: tag = HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide() } else { $('div.input').show() } code_show = !code_show } $( document ).ready(code_toggle); </script> <p style="text-align:right"> Promijeni vidljivost <a href="javascript:code_toggle()">ovdje</a>.</p>''') display(tag) ``` ## Kompleksni brojevi u polarnom obliku U ovome interaktivnom primjeru, kompleksni brojevi se vizualiziraju u kompleksnoj ravnini, a određuju se koristeći polarni oblik. Kompleksni brojevi se, dakle, određuju modulom (duljinom odgovarajućeg vektora) i argumentom (kutom odgovarajućeg vektora). Možete testirati osnovne matematičke operacije nad kompleksnim brojevima: zbrajanje, oduzimanje, množenje i dijeljenje. Svi se rezultati prikazuju na odgovarajućem grafu, kao i u matematičkoj notaciji zasnovanoj na polarnom obliku kompleksnog broja. Kompleksnim brojevima možete manipulirati izravno na grafu (jednostavnim klikom) i / ili istovremeno koristiti odgovarajuća polja za unos modula i argumenta. Kako bi se osigurala bolja vidljivost vektora na grafu, modul kompleksnog broja je ograničen na $\pm10$. ``` %matplotlib notebook import matplotlib.pyplot as plt import matplotlib.patches as mpatches import numpy as np import ipywidgets as widgets from IPython.display import display from IPython.display import HTML import math red_patch = mpatches.Patch(color='red', label='z1') blue_patch = mpatches.Patch(color='blue', label='z2') green_patch = mpatches.Patch(color='green', label='z1 + z2') yellow_patch = mpatches.Patch(color='yellow', label='z1 - z2') black_patch = mpatches.Patch(color='black', label='z1 * z2') magenta_patch = mpatches.Patch(color='magenta', label='z1 / z2') # Init values XLIM = 5 YLIM = 5 vectors_index_first = False; V = [None, None] V_complex = [None, None] # Complex plane fig = plt.figure(num='Kompleksni brojevi u polarnom obliku') ax = fig.add_subplot(1, 1, 1) def get_interval(lim): if lim <= 10: return 1 if lim < 75: return 5 if lim > 100: return 25 return 10 def set_ticks(): XLIMc = int((XLIM / 10) + 1) * 10 YLIMc = int((YLIM / 10) + 1) * 10 if XLIMc > 150: XLIMc += 10 if YLIMc > 150: YLIMc += 10 xstep = get_interval(XLIMc) ystep = get_interval(YLIMc) #print(stepx, stepy) major_ticks = np.arange(-XLIMc, XLIMc, xstep) major_ticks_y = np.arange(-YLIMc, YLIMc, ystep) ax.set_xticks(major_ticks) ax.set_yticks(major_ticks_y) ax.grid(which='both') def clear_plot(): plt.cla() set_ticks() ax.set_xlabel('Re') ax.set_ylabel('Im') plt.ylim([-YLIM, YLIM]) plt.xlim([-XLIM, XLIM]) plt.legend(handles=[red_patch, blue_patch, green_patch, yellow_patch, black_patch, magenta_patch]) clear_plot() set_ticks() plt.show() set_ticks() # Conversion functions def com_to_trig(real, im): r = math.sqrt(real**2 + im**2) if abs(real) <= 1e-6 and im > 0: arg = 90 return r, arg if abs(real) < 1e-6 and im < 0: arg = 270 return r, arg if abs(im) < 1e-6 and real > 0: arg = 0 return r, arg if abs(im) < 1e-6 and real < 0: arg = 180 return r, arg if im != 0 and real !=0: arg = np.arctan(im / real) * 180 / np.pi if im > 0 and real < 0: arg += 180 if im < 0 and real > 0: arg +=360 if im < 0 and real < 0: arg += 180 return r, arg if abs(im) < 1e-6 and abs(real) < 1e-6: arg = 0 return r, arg def trig_to_com(r, arg): re = r * np.cos(arg * np.pi / 180.) im = r * np.sin(arg * np.pi / 180.) return (re, im) # Set a complex number using direct manipulation on the plot def set_vector(i, data_x, data_y): clear_plot() V.pop(i) V.insert(i, (0, 0, round(data_x, 2), round(data_y, 2))) V_complex.pop(i) V_complex.insert(i, complex(round(data_x, 2), round(data_y, 2))) if i == 0: ax.arrow(*V[0], head_width=0.25, head_length=0.5, color="r", length_includes_head=True) z, arg = com_to_trig(data_x, data_y) a1.value = round(z, 2) b1.value = round(arg, 2) if V[1] != None: ax.arrow(*V[1], head_width=0.25, head_length=0.5, color="b", length_includes_head=True) elif i == 1: ax.arrow(*V[1], head_width=0.25, head_length=0.5, color="b", length_includes_head=True) z, arg = com_to_trig(data_x, data_y) a2.value = round(z, 2) b2.value = round(arg, 2) if V[0] != None: ax.arrow(*V[0], head_width=0.25, head_length=0.5, color="r", length_includes_head=True) max_bound() def onclick(event): global vectors_index_first vectors_index_first = not vectors_index_first x = event.xdata y = event.ydata if (x > 10): x = 10.0 if (x < - 10): x = -10.0 if (y > 10): y = 10.0 if (y < - 10): y = -10.0 if vectors_index_first: set_vector(0, x, y) else: set_vector(1, x, y) fig.canvas.mpl_connect('button_press_event', onclick) # Widgets a1 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = 0, max = 10, step = 0.5) b1 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = 0, max = 360, step = 10) button_set_z1 = widgets.Button(description="Prikaži z1") a2 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = 0, max = 10, step = 0.5) b2 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = 0, max = 360, step = 10) button_set_z2 = widgets.Button(description="Prikaži z2") box_layout_z1 = widgets.Layout(border='solid red', padding='10px') box_layout_z2 = widgets.Layout(border='solid blue', padding='10px') box_layout_opers = widgets.Layout(border='solid black', padding='10px') items_z1 = [widgets.Label("z1: Duljina (|z1|) = "), a1, widgets.Label("Kut (\u2221)= "), b1, button_set_z1] items_z2 = [widgets.Label("z2: Duljina (|z2|) = "), a2, widgets.Label("Kut (\u2221)= "), b2, button_set_z2] display(widgets.Box(children=items_z1, layout=box_layout_z1)) display(widgets.Box(children=items_z2, layout=box_layout_z2)) button_add = widgets.Button(description="Zbroji") button_substract = widgets.Button(description="Oduzmi") button_multiply = widgets.Button(description="Pomnoži") button_divide = widgets.Button(description="Podijeli") button_reset = widgets.Button(description="Resetiraj") output = widgets.Output() print('Operacije nad kompleksnim brojevima:') items_operations = [button_add, button_substract, button_multiply, button_divide, button_reset] display(widgets.Box(children=items_operations)) display(output) # Set complex number using input widgets (Text and Button) def on_button_set_z1_clicked(b): z1_old = V[0]; re, im = trig_to_com(a1.value, b1.value) z1_new = (0, 0, re, im) if z1_old != z1_new: set_vector(0, re, im) change_lims() def on_button_set_z2_clicked(b): z2_old = V[1]; re, im = trig_to_com(a2.value, b2.value) z2_new = (0, 0, re, im) if z2_old != z2_new: set_vector(1, re, im) change_lims() # Complex number operations: def perform_operation(oper): global XLIM, YLIM if (V_complex[0] != None) and (V_complex[1] != None): if (oper == '+'): result = V_complex[0] + V_complex[1] v_color = "g" elif (oper == '-'): result = V_complex[0] - V_complex[1] v_color = "y" elif (oper == '*'): result = V_complex[0] * V_complex[1] v_color = "black" elif (oper == '/'): result = V_complex[0] / V_complex[1] v_color = "magenta" result = complex(round(result.real, 2), round(result.imag, 2)) ax.arrow(0, 0, result.real, result.imag, head_width=0.25, head_length=0.15, color=v_color, length_includes_head=True) if abs(result.real) > XLIM: XLIM = round(abs(result.real) + 1) if abs(result.imag) > YLIM: YLIM = round(abs(result.imag) + 1) change_lims() with output: z1, ang1 = com_to_trig(V_complex[0].real, V_complex[0].imag ) z2, ang2 = com_to_trig(V_complex[1].real, V_complex[1].imag) z3, ang3 = com_to_trig(result.real, result.imag) z1 = round(z1, 2) ang1 = round(ang1, 2) z2 = round(z2, 2) ang2 = round(ang2, 2) z3 = round(z3, 2) ang3 = round(ang3, 2) print("{}*(cos({}) + i*sin({}))".format(z1,ang1,ang1), oper, "{}*(cos({}) + i*sin({}))".format(z2,ang2,ang2), "=", "{}*(cos({}) + i*sin({}))".format(z3,ang3,ang3)) print('{} \u2221{}'.format(z1, ang1), oper, '{} \u2221{}'.format(z2, ang2), "=", '{} \u2221{}'.format(z3, ang3)) def on_button_add_clicked(b): perform_operation("+") def on_button_substract_clicked(b): perform_operation("-") def on_button_multiply_clicked(b): perform_operation("*") def on_button_divide_clicked(b): perform_operation("/") # Plot init methods def on_button_reset_clicked(b): global V, V_complex, XLIM, YLIM with output: output.clear_output() clear_plot() vectors_index_first = False; V = [None, None] V_complex = [None, None] a1.value = 0 b1.value = 0 a2.value = 0 b2.value = 0 XLIM = 5 YLIM = 5 change_lims() def clear_plot(): plt.cla() set_ticks() ax.set_xlabel('Re') ax.set_ylabel('Im') plt.ylim([-YLIM, YLIM]) plt.xlim([-XLIM, XLIM]) plt.legend(handles=[red_patch, blue_patch, green_patch, yellow_patch, black_patch, magenta_patch]) def change_lims(): set_ticks() plt.ylim([-YLIM, YLIM]) plt.xlim([-XLIM, XLIM]) set_ticks() def max_bound(): global XLIM, YLIM mx = 0 my = 0 if V_complex[0] != None: z = V_complex[0] if abs(z.real) > mx: mx = abs(z.real) if abs(z.imag) > my: my = abs(z.imag) if V_complex[1] != None: z = V_complex[1] if abs(z.real) > mx: mx = abs(z.real) if abs(z.imag) > my: my = abs(z.imag) if mx > XLIM: XLIM = round(mx + 1) elif mx <=5: XLIM = 5 if my > YLIM: YLIM = round(my + 1) elif my <=5: YLIM = 5 change_lims() # Button events button_set_z1.on_click(on_button_set_z1_clicked) button_set_z2.on_click(on_button_set_z2_clicked) button_add.on_click(on_button_add_clicked) button_substract.on_click(on_button_substract_clicked) button_multiply.on_click(on_button_multiply_clicked) button_divide.on_click(on_button_divide_clicked) button_reset.on_click(on_button_reset_clicked) ```
github_jupyter
Mount my google drive, where I stored the dataset. ``` from google.colab import drive drive.mount('/content/drive') ``` **Download dependencies** ``` !pip3 install sklearn matplotlib GPUtil !pip3 install torch torchvision ``` **Download Data** In order to acquire the dataset please navigate to: https://ieee-dataport.org/documents/cervigram-image-dataset Unzip the dataset into the folder "dataset". For your environment, please adjust the paths accordingly. ``` !rm -vrf "dataset" !mkdir "dataset" # !cp -r "/content/drive/My Drive/Studiu doctorat leziuni cervicale/cervigram-image-dataset-v2.zip" "dataset/cervigram-image-dataset-v2.zip" !cp -r "cervigram-image-dataset-v2.zip" "dataset/cervigram-image-dataset-v2.zip" !unzip "dataset/cervigram-image-dataset-v2.zip" -d "dataset" ``` **Constants** For your environment, please modify the paths accordingly. ``` # TRAIN_PATH = '/content/dataset/data/train/' # TEST_PATH = '/content/dataset/data/test/' TRAIN_PATH = 'dataset/data/train/' TEST_PATH = 'dataset/data/test/' CROP_SIZE = 260 IMAGE_SIZE = 224 BATCH_SIZE = 100 ``` **Imports** ``` import torch as t import torchvision as tv import numpy as np import PIL as pil import matplotlib.pyplot as plt from torchvision.datasets import ImageFolder from torch.utils.data import DataLoader from torch.nn import Linear, BCEWithLogitsLoss import sklearn as sk import sklearn.metrics from os import listdir import time import random import GPUtil ``` **Memory Stats** ``` import GPUtil def memory_stats(): for gpu in GPUtil.getGPUs(): print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal)) memory_stats() ``` **Deterministic Measurements** This statements help making the experiments reproducible by fixing the random seeds. Despite fixing the random seeds, experiments are usually not reproducible using different PyTorch releases, commits, platforms or between CPU and GPU executions. Please find more details in the PyTorch documentation: https://pytorch.org/docs/stable/notes/randomness.html ``` SEED = 0 t.manual_seed(SEED) t.cuda.manual_seed(SEED) t.backends.cudnn.deterministic = True t.backends.cudnn.benchmark = False np.random.seed(SEED) random.seed(SEED) ``` **Loading Data** The dataset is structured in multiple small folders of 7 images each. This generator iterates through the folders and returns the category and 7 paths: one for each image in the folder. The paths are ordered; the order is important since each folder contains 3 types of images, first 5 are with acetic acid solution and the last two are through a green lens and having iodine solution(a solution of a dark red color). ``` def sortByLastDigits(elem): chars = [c for c in elem if c.isdigit()] return 0 if len(chars) == 0 else int(''.join(chars)) def getImagesPaths(root_path): for class_folder in [root_path + f for f in listdir(root_path)]: category = int(class_folder[-1]) for case_folder in listdir(class_folder): case_folder_path = class_folder + '/' + case_folder + '/' img_files = [case_folder_path + file_name for file_name in listdir(case_folder_path)] yield category, sorted(img_files, key = sortByLastDigits) ``` We define 3 datasets, which load 3 kinds of images: natural images, images taken through a green lens and images where the doctor applied iodine solution (which gives a dark red color). Each dataset has dynamic and static transformations which could be applied to the data. The static transformations are applied on the initialization of the dataset, while the dynamic ones are applied when loading each batch of data. ``` class SimpleImagesDataset(t.utils.data.Dataset): def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None): self.dataset = [] self.transforms_x = transforms_x_dynamic self.transforms_y = transforms_y_dynamic for category, img_files in getImagesPaths(root_path): for i in range(5): img = pil.Image.open(img_files[i]) if transforms_x_static != None: img = transforms_x_static(img) if transforms_y_static != None: category = transforms_y_static(category) self.dataset.append((img, category)) def __getitem__(self, i): x, y = self.dataset[i] if self.transforms_x != None: x = self.transforms_x(x) if self.transforms_y != None: y = self.transforms_y(y) return x, y def __len__(self): return len(self.dataset) class GreenLensImagesDataset(SimpleImagesDataset): def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None): self.dataset = [] self.transforms_x = transforms_x_dynamic self.transforms_y = transforms_y_dynamic for category, img_files in getImagesPaths(root_path): # Only the green lens image img = pil.Image.open(img_files[-2]) if transforms_x_static != None: img = transforms_x_static(img) if transforms_y_static != None: category = transforms_y_static(category) self.dataset.append((img, category)) class RedImagesDataset(SimpleImagesDataset): def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None): self.dataset = [] self.transforms_x = transforms_x_dynamic self.transforms_y = transforms_y_dynamic for category, img_files in getImagesPaths(root_path): # Only the green lens image img = pil.Image.open(img_files[-1]) if transforms_x_static != None: img = transforms_x_static(img) if transforms_y_static != None: category = transforms_y_static(category) self.dataset.append((img, category)) ``` **Preprocess Data** Convert pytorch tensor to numpy array. ``` def to_numpy(x): return x.cpu().detach().numpy() ``` Data transformations for the test and training sets. ``` norm_mean = [0.485, 0.456, 0.406] norm_std = [0.229, 0.224, 0.225] transforms_train = tv.transforms.Compose([ tv.transforms.RandomAffine(degrees = 45, translate = None, scale = (1., 2.), shear = 30), # tv.transforms.CenterCrop(CROP_SIZE), tv.transforms.Resize(IMAGE_SIZE), tv.transforms.RandomHorizontalFlip(), tv.transforms.ToTensor(), tv.transforms.Lambda(lambda t: t.cuda()), tv.transforms.Normalize(mean=norm_mean, std=norm_std) ]) transforms_test = tv.transforms.Compose([ # tv.transforms.CenterCrop(CROP_SIZE), tv.transforms.Resize(IMAGE_SIZE), tv.transforms.ToTensor(), tv.transforms.Normalize(mean=norm_mean, std=norm_std) ]) y_transform = tv.transforms.Lambda(lambda y: t.tensor(y, dtype=t.long, device = 'cuda:0')) ``` Initialize pytorch datasets and loaders for training and test. ``` def create_loaders(dataset_class): dataset_train = dataset_class(TRAIN_PATH, transforms_x_dynamic = transforms_train, transforms_y_dynamic = y_transform) dataset_test = dataset_class(TEST_PATH, transforms_x_static = transforms_test, transforms_x_dynamic = tv.transforms.Lambda(lambda t: t.cuda()), transforms_y_dynamic = y_transform) loader_train = DataLoader(dataset_train, BATCH_SIZE, shuffle = True, num_workers = 0) loader_test = DataLoader(dataset_test, BATCH_SIZE, shuffle = False, num_workers = 0) return loader_train, loader_test, len(dataset_train), len(dataset_test) loader_train_simple_img, loader_test_simple_img, len_train, len_test = create_loaders(SimpleImagesDataset) ``` **Visualize Data** Load a few images so that we can see the effects of the data augmentation on the training set. ``` def plot_one_prediction(x, label, pred): x, label, pred = to_numpy(x), to_numpy(label), to_numpy(pred) x = np.transpose(x, [1, 2, 0]) if x.shape[-1] == 1: x = x.squeeze() x = x * np.array(norm_std) + np.array(norm_mean) plt.title(label, color = 'green' if label == pred else 'red') plt.imshow(x) def plot_predictions(imgs, labels, preds): fig = plt.figure(figsize = (20, 5)) for i in range(20): fig.add_subplot(2, 10, i + 1, xticks = [], yticks = []) plot_one_prediction(imgs[i], labels[i], preds[i]) # x, y = next(iter(loader_train_simple_img)) # plot_predictions(x, y, y) ``` **Model** Define a few models to experiment with. ``` def get_mobilenet_v2(): model = t.hub.load('pytorch/vision', 'mobilenet_v2', pretrained=True) model.classifier[1] = Linear(in_features=1280, out_features=4, bias=True) model = model.cuda() return model def get_vgg_19(): model = tv.models.vgg19(pretrained = True) model = model.cuda() model.classifier[6].out_features = 4 return model def get_res_next_101(): model = t.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl') model.fc.out_features = 4 model = model.cuda() return model def get_resnet_18(): model = tv.models.resnet18(pretrained = True) model.fc.out_features = 4 model = model.cuda() return model def get_dense_net(): model = tv.models.densenet121(pretrained = True) model.classifier.out_features = 4 model = model.cuda() return model class MobileNetV2_FullConv(t.nn.Module): def __init__(self): super().__init__() self.cnn = get_mobilenet_v2().features self.cnn[18] = t.nn.Sequential( tv.models.mobilenet.ConvBNReLU(320, 32, kernel_size=1), t.nn.Dropout2d(p = .7) ) self.fc = t.nn.Linear(32, 4) def forward(self, x): x = self.cnn(x) x = x.mean([2, 3]) x = self.fc(x); return x model_simple = t.nn.DataParallel(get_mobilenet_v2()) ``` **Train & Evaluate** Timer utility function. This is used to measure the execution speed. ``` time_start = 0 def timer_start(): global time_start time_start = time.time() def timer_end(): return time.time() - time_start ``` This function trains the network and evaluates it at the same time. It outputs the metrics recorded during the training for both train and test. We are measuring accuracy and the loss. The function also saves a checkpoint of the model every time the accuracy is improved. In the end we will have a checkpoint of the model which gave the best accuracy. ``` def train_eval(optimizer, model, loader_train, loader_test, chekpoint_name, epochs): metrics = { 'losses_train': [], 'losses_test': [], 'acc_train': [], 'acc_test': [], 'prec_train': [], 'prec_test': [], 'rec_train': [], 'rec_test': [], 'f_score_train': [], 'f_score_test': [] } best_acc = 0 loss_fn = t.nn.CrossEntropyLoss() try: for epoch in range(epochs): timer_start() train_epoch_loss, train_epoch_acc, train_epoch_precision, train_epoch_recall, train_epoch_f_score = 0, 0, 0, 0, 0 test_epoch_loss, test_epoch_acc, test_epoch_precision, test_epoch_recall, test_epoch_f_score = 0, 0, 0, 0, 0 # Train model.train() for x, y in loader_train: y_pred = model.forward(x) loss = loss_fn(y_pred, y) loss.backward() optimizer.step() # memory_stats() optimizer.zero_grad() y_pred, y = to_numpy(y_pred), to_numpy(y) pred = y_pred.argmax(axis = 1) ratio = len(y) / len_train train_epoch_loss += (loss.item() * ratio) train_epoch_acc += (sk.metrics.accuracy_score(y, pred) * ratio) precision, recall, f_score, _ = sk.metrics.precision_recall_fscore_support(y, pred, average = 'macro') train_epoch_precision += (precision * ratio) train_epoch_recall += (recall * ratio) train_epoch_f_score += (f_score * ratio) metrics['losses_train'].append(train_epoch_loss) metrics['acc_train'].append(train_epoch_acc) metrics['prec_train'].append(train_epoch_precision) metrics['rec_train'].append(train_epoch_recall) metrics['f_score_train'].append(train_epoch_f_score) # Evaluate model.eval() with t.no_grad(): for x, y in loader_test: y_pred = model.forward(x) loss = loss_fn(y_pred, y) y_pred, y = to_numpy(y_pred), to_numpy(y) pred = y_pred.argmax(axis = 1) ratio = len(y) / len_test test_epoch_loss += (loss * ratio) test_epoch_acc += (sk.metrics.accuracy_score(y, pred) * ratio ) precision, recall, f_score, _ = sk.metrics.precision_recall_fscore_support(y, pred, average = 'macro') test_epoch_precision += (precision * ratio) test_epoch_recall += (recall * ratio) test_epoch_f_score += (f_score * ratio) metrics['losses_test'].append(test_epoch_loss) metrics['acc_test'].append(test_epoch_acc) metrics['prec_test'].append(test_epoch_precision) metrics['rec_test'].append(test_epoch_recall) metrics['f_score_test'].append(test_epoch_f_score) if metrics['acc_test'][-1] > best_acc: best_acc = metrics['acc_test'][-1] t.save({'model': model.state_dict()}, 'checkpint {}.tar'.format(chekpoint_name)) print('Epoch {} acc {} prec {} rec {} f {} minutes {}'.format( epoch + 1, metrics['acc_test'][-1], metrics['prec_test'][-1], metrics['rec_test'][-1], metrics['f_score_test'][-1], timer_end() / 60)) except KeyboardInterrupt as e: print(e) print('Ended training') return metrics ``` Plot a metric for both train and test. ``` def plot_train_test(train, test, title, y_title): plt.plot(range(len(train)), train, label = 'train') plt.plot(range(len(test)), test, label = 'test') plt.xlabel('Epochs') plt.ylabel(y_title) plt.title(title) plt.legend() plt.show() ``` Plot precision - recall curve ``` def plot_precision_recall(metrics): plt.scatter(metrics['prec_train'], metrics['rec_train'], label = 'train') plt.scatter(metrics['prec_test'], metrics['rec_test'], label = 'test') plt.legend() plt.title('Precision-Recall') plt.xlabel('Precision') plt.ylabel('Recall') ``` Train a model for several epochs. The steps_learning parameter is a list of tuples. Each tuple specifies the steps and the learning rate. ``` def do_train(model, loader_train, loader_test, checkpoint_name, steps_learning): for steps, learn_rate in steps_learning: metrics = train_eval(t.optim.Adam(model.parameters(), lr = learn_rate, weight_decay = 0), model, loader_train, loader_test, checkpoint_name, steps) print('Best test accuracy :', max(metrics['acc_test'])) plot_train_test(metrics['losses_train'], metrics['losses_test'], 'Loss (lr = {})'.format(learn_rate)) plot_train_test(metrics['acc_train'], metrics['acc_test'], 'Accuracy (lr = {})'.format(learn_rate)) ``` Perform actual training. ``` def do_train(model, loader_train, loader_test, checkpoint_name, steps_learning): t.cuda.empty_cache() for steps, learn_rate in steps_learning: metrics = train_eval(t.optim.Adam(model.parameters(), lr = learn_rate, weight_decay = 0), model, loader_train, loader_test, checkpoint_name, steps) index_max = np.array(metrics['acc_test']).argmax() print('Best test accuracy :', metrics['acc_test'][index_max]) print('Corresponding precision :', metrics['prec_test'][index_max]) print('Corresponding recall :', metrics['rec_test'][index_max]) print('Corresponding f1 score :', metrics['f_score_test'][index_max]) plot_train_test(metrics['losses_train'], metrics['losses_test'], 'Loss (lr = {})'.format(learn_rate), 'Loss') plot_train_test(metrics['acc_train'], metrics['acc_test'], 'Accuracy (lr = {})'.format(learn_rate), 'Accuracy') plot_train_test(metrics['prec_train'], metrics['prec_test'], 'Precision (lr = {})'.format(learn_rate), 'Precision') plot_train_test(metrics['rec_train'], metrics['rec_test'], 'Recall (lr = {})'.format(learn_rate), 'Recall') plot_train_test(metrics['f_score_train'], metrics['f_score_test'], 'F1 Score (lr = {})'.format(learn_rate), 'F1 Score') plot_precision_recall(metrics) do_train(model_simple, loader_train_simple_img, loader_test_simple_img, 'simple_1', [(50, 1e-4)]) # checkpoint = t.load('/content/checkpint simple_1.tar') # model_simple.load_state_dict(checkpoint['model']) ```
github_jupyter
``` %matplotlib inline ``` # Simple Oscillator Example This example shows the most simple way of using a solver. We solve free vibration of a simple oscillator: $$m \ddot{u} + k u = 0,\quad u(0) = u_0,\quad \dot{u}(0) = \dot{u}_0$$ using the CVODE solver. An analytical solution exists, given by $$u(t) = u_0 \cos\left(\sqrt{\frac{k}{m}} t\right)+\frac{\dot{u}_0}{\sqrt{\frac{k}{m}}} \sin\left(\sqrt{\frac{k}{m}} t\right)$$ ``` from __future__ import print_function import matplotlib.pyplot as plt import numpy as np from scikits.odes import ode #data of the oscillator k = 4.0 m = 1.0 #initial position and speed data on t=0, x[0] = u, x[1] = \dot{u}, xp = \dot{x} initx = [1, 0.1] ``` We need a first order system, so convert the second order system $$m \ddot{u} + k u = 0,\quad u(0) = u_0,\quad \dot{u}(0) = \dot{u}_0$$ into $$\left\{ \begin{array}{l} \dot u = v\\ \dot v = \ddot u = -\frac{ku}{m} \end{array} \right.$$ You need to define a function that computes the right hand side of above equation: ``` def rhseqn(t, x, xdot): """ we create rhs equations for the problem""" xdot[0] = x[1] xdot[1] = - k/m * x[0] ``` To solve the ODE you define an ode object, specify the solver to use, here cvode, and pass the right hand side function. You request the solution at specific timepoints by passing an array of times to the solve member. ``` solver = ode('cvode', rhseqn, old_api=False) solution = solver.solve([0., 1., 2.], initx) print('\n t Solution Exact') print('------------------------------------') for t, u in zip(solution.values.t, solution.values.y): print('{0:>4.0f} {1:15.6g} {2:15.6g}'.format(t, u[0], initx[0]*np.cos(np.sqrt(k/m)*t)+initx[1]*np.sin(np.sqrt(k/m)*t)/np.sqrt(k/m))) ``` You can continue the solver by passing further times. Calling the solve routine reinits the solver, so you can restart at whatever time. To continue from the last computed solution, pass the last obtained time and solution. **Note:** The solver performes better if it can take into account history information, so avoid calling solve to continue computation! In general, you must check for errors using the errors output of solve. ``` #Solve over the next hour by continuation times = np.linspace(0, 3600, 61) times[0] = solution.values.t[-1] solution = solver.solve(times, solution.values.y[-1]) if solution.errors.t: print ('Error: ', solution.message, 'Error at time', solution.errors.t) print ('Computed Solutions:') print('\n t Solution Exact') print('------------------------------------') for t, u in zip(solution.values.t, solution.values.y): print('{0:>4.0f} {1:15.6g} {2:15.6g}'.format(t, u[0], initx[0]*np.cos(np.sqrt(k/m)*t)+initx[1]*np.sin(np.sqrt(k/m)*t)/np.sqrt(k/m))) ``` The solution fails at a time around 24 seconds. Erros can be due to many things. Here however the reason is simple: we try to make too large jumps in time output. Increasing the allowed steps the solver can take will fix this. This is the **max_steps** option of cvode: ``` solver = ode('cvode', rhseqn, old_api=False, max_steps=5000) solution = solver.solve(times, solution.values.y[-1]) if solution.errors.t: print ('Error: ', solution.message, 'Error at time', solution.errors.t) print ('Computed Solutions:') print('\n t Solution Exact') print('------------------------------------') for t, u in zip(solution.values.t, solution.values.y): print('{0:>4.0f} {1:15.6g} {2:15.6g}'.format(t, u[0], initx[0]*np.cos(np.sqrt(k/m)*t)+initx[1]*np.sin(np.sqrt(k/m)*t)/np.sqrt(k/m))) ``` To plot the simple oscillator, we show a (t,x) plot of the solution. Doing this over 60 seconds can be done as follows: ``` #plot of the oscilator solver = ode('cvode', rhseqn, old_api=False) times = np.linspace(0,60,600) solution = solver.solve(times, initx) plt.plot(solution.values.t,[x[0] for x in solution.values.y]) plt.xlabel('Time [s]') plt.ylabel('Position [m]') plt.show() ``` You can refine the tolerances from their defaults to obtain more accurate solutions ``` options1= {'rtol': 1e-6, 'atol': 1e-12, 'max_steps': 50000} # default rtol and atol options2= {'rtol': 1e-15, 'atol': 1e-25, 'max_steps': 50000} solver1 = ode('cvode', rhseqn, old_api=False, **options1) solver2 = ode('cvode', rhseqn, old_api=False, **options2) solution1 = solver1.solve([0., 1., 60], initx) solution2 = solver2.solve([0., 1., 60], initx) print('\n t Solution1 Solution2 Exact') print('-----------------------------------------------------') for t, u1, u2 in zip(solution1.values.t, solution1.values.y, solution2.values.y): print('{0:>4.0f} {1:15.8g} {2:15.8g} {3:15.8g}'.format(t, u1[0], u2[0], initx[0]*np.cos(np.sqrt(k/m)*t)+initx[1]*np.sin(np.sqrt(k/m)*t)/np.sqrt(k/m))) ``` # Simple Oscillator Example: Stepwise running When using the *solve* method, you solve over a period of time you decided before. In some problems you might want to solve and decide on the output when to stop. Then you use the *step* method. The same example as above using the step method can be solved as follows. You define the ode object selecting the cvode solver. You initialize the solver with the begin time and initial conditions using *init_step*. You compute solutions going forward with the *step* method. ``` solver = ode('cvode', rhseqn, old_api=False) time = 0. solver.init_step(time, initx) plott = [] plotx = [] while True: time += 0.1 # fix roundoff error at end if time > 60: time = 60 solution = solver.step(time) if solution.errors.t: print ('Error: ', solution.message, 'Error at time', solution.errors.t) break #we store output for plotting plott.append(solution.values.t) plotx.append(solution.values.y[0]) if time >= 60: break plt.plot(plott,plotx) plt.xlabel('Time [s]') plt.ylabel('Position [m]') plt.show() ``` The solver interpolates solutions to return the solution at the required output times: ``` print ('plott length:', len(plott), ', last computation times:', plott[-15:]); ``` # Simple Oscillator Example: Internal Solver Stepwise running When using the *solve* method, you solve over a period of time you decided before. With the *step* method you solve by default towards a desired output time after which you can continue solving the problem. For full control, you can also compute problems using the solver internal steps. This is not advised, as the number of return steps can be very large, **slowing down** the computation enormously. If you want this nevertheless, you can achieve it with the *one_step_compute* option. Like this: ``` solver = ode('cvode', rhseqn, old_api=False, one_step_compute=True) time = 0. solver.init_step(time, initx) plott = [] plotx = [] while True: solution = solver.step(60) if solution.errors.t: print ('Error: ', solution.message, 'Error at time', solution.errors.t) break #we store output for plotting plott.append(solution.values.t) plotx.append(solution.values.y[0]) if solution.values.t >= 60: #back up to 60 solver.set_options(one_step_compute=False) solution = solver.step(60) plott[-1] = solution.values.t plotx[-1] = solution.values.y[0] break plt.plot(plott,plotx) plt.xlabel('Time [s]') plt.ylabel('Position [m]') plt.show() ``` By inspection of the returned times you can see how efficient the solver can solve this problem: ``` print ('plott length:', len(plott), ', last computation times:', plott[-15:]); ```
github_jupyter
``` import classifierMLP as cmlp import os import struct import numpy as np def load_mnist(path, kind='train'): """Load MNIST data from `path`""" labels_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind) images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind) print(labels_path) print(images_path) with open(labels_path, 'rb') as lbpath: magic, n = struct.unpack('>II', lbpath.read(8)) labels = np.fromfile(lbpath, dtype=np.uint8) with open(images_path, 'rb') as imgpath: magic, num, rows, cols = struct.unpack(">IIII", imgpath.read(16)) images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784) images = ((images / 255.) - .5) * 2 return images, labels # unzips mnist %matplotlib inline import sys import gzip import shutil if (sys.version_info > (3, 0)): writemode = 'wb' else: writemode = 'w' zipped_mnist = [f for f in os.listdir('./') if f.endswith('ubyte.gz')] for z in zipped_mnist: with gzip.GzipFile(z, mode='rb') as decompressed, open(z[:-3], writemode) as outfile: outfile.write(decompressed.read()) X_train, y_train = load_mnist('', kind='train') print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1])) X_test, y_test = load_mnist('', kind='t10k') print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1])) X_train.shape import matplotlib.pyplot as plt fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,) ax = ax.flatten() for i in range(10): img = X_train[y_train == i][0].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('images/12_5.png', dpi=300) plt.show() fig, ax = plt.subplots(nrows=7, ncols=12, sharex=True, sharey=True,) ax = ax.flatten() for i in range(84): img = X_train[y_train == 4][i].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('images/12_6.png', dpi=300) plt.show() import seaborn as sns sns.countplot(y_train) n_epochs = 100 nn = cmlp.SimpleMLP(n_hidden_units=100, l2=0.01, epochs=n_epochs, eta=0.0005, minibatch_size=100, shuffle=True, seed=1) nn.fit(X_train=X_train[:55000], y_train=y_train[:55000], X_valid=X_train[55000:], y_valid=y_train[55000:]) #playing with the traiued model fig, ax = plt.subplots(nrows=5, ncols=4, sharex=True, sharey=True,) ax = ax.flatten() for i in range(20): img = X_test[i].reshape(28, 28) ax[i].imshow(img, cmap='Greys') ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() # plt.savefig('images/12_6.png', dpi=300) plt.show() #Lets test for X_test - 1 to 20 for i in range(20): print ("Prediction for {}th image is {}".format(i, nn.predict(X_test[i:i+1]))) import matplotlib.pyplot as plt plt.plot(range(nn.epochs), nn.eval_['cost'],color='green' , label='training Error') plt.ylabel('Error') plt.xlabel('Epochs') plt.legend() import matplotlib.pyplot as plt plt.plot(range(nn.epochs), nn.eval_['train_acc'],color='green' , label='training') plt.plot(range(nn.epochs), nn.eval_['valid_acc'], color='red', label='validation', linestyle='--') plt.ylabel('Accuracy') plt.xlabel('Epochs') plt.legend() #plt.savefig('images/12_08.png', dpi=300) plt.show() ```
github_jupyter
# Siamese networks with TensorFlow 2.0/Keras In this example, we'll implement a simple siamese network system, which verifyies whether a pair of MNIST images is of the same class (true) or not (false). _This example is partially based on_ [https://github.com/keras-team/keras/blob/master/examples/mnist_siamese.py](https://github.com/keras-team/keras/blob/master/examples/mnist_siamese.py) Let's start with the imports ``` import random import numpy as np import tensorflow as tf ``` We'll continue with the `create_pairs` function, which creates a training dataset of equal number of true/false pairs of each MNIST class. ``` def create_pairs(inputs: np.ndarray, labels: np.ndarray): """Create equal number of true/false pairs of samples""" num_classes = 10 digit_indices = [np.where(labels == i)[0] for i in range(num_classes)] pairs = list() labels = list() n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1 for d in range(num_classes): for i in range(n): z1, z2 = digit_indices[d][i], digit_indices[d][i + 1] pairs += [[inputs[z1], inputs[z2]]] inc = random.randrange(1, num_classes) dn = (d + inc) % num_classes z1, z2 = digit_indices[d][i], digit_indices[dn][i] pairs += [[inputs[z1], inputs[z2]]] labels += [1, 0] return np.array(pairs), np.array(labels, dtype=np.float32) ``` Next, we'll define the base network of the siamese system: ``` def create_base_network(): """The shared encoding part of the siamese network""" return tf.keras.models.Sequential([ tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.1), tf.keras.layers.Dense(64, activation='relu'), ]) ``` Next, let's load the regular MNIST training and validation sets and create true/false pairs out of them: ``` # Load the train and test MNIST datasets (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train = x_train.astype(np.float32) x_test = x_test.astype(np.float32) x_train /= 255 x_test /= 255 input_shape = x_train.shape[1:] # Create true/false training and testing pairs train_pairs, tr_labels = create_pairs(x_train, y_train) test_pairs, test_labels = create_pairs(x_test, y_test) ``` Then, we'll build the siamese system, which includes the `base_network`, the 2 siamese paths `encoder_a` and `encoder_b`, the `l1_dist` measure, and the combined `model`: ``` # Create the siamese network # Start from the shared layers base_network = create_base_network() # Create first half of the siamese system input_a = tf.keras.layers.Input(shape=input_shape) # Note how we reuse the base_network in both halfs encoder_a = base_network(input_a) # Create the second half of the siamese system input_b = tf.keras.layers.Input(shape=input_shape) encoder_b = base_network(input_b) # Create the the distance measure l1_dist = tf.keras.layers.Lambda( lambda embeddings: tf.keras.backend.abs(embeddings[0] - embeddings[1])) \ ([encoder_a, encoder_b]) # Final fc layer with a single logistic output for the binary classification flattened_weighted_distance = tf.keras.layers.Dense(1, activation='sigmoid') \ (l1_dist) # Build the model model = tf.keras.models.Model([input_a, input_b], flattened_weighted_distance) ``` Finally, we can train the model and check the validation accuracy, which reaches 99.37%: ``` # Train model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model.fit([train_pairs[:, 0], train_pairs[:, 1]], tr_labels, batch_size=128, epochs=20, validation_data=([test_pairs[:, 0], test_pairs[:, 1]], test_labels)) ```
github_jupyter
# BBoxerwGradCAM ### This class forms boundary boxes (rectangle and polygon) using GradCAM outputs for a given image. The purpose of this class is to develop Rectangle and Polygon coordinates that define an object based on an image classification model. The 'automatic' creation of these coordinates, which are often included in COCO JSONs used to train object detection models, is valuable because data preparation and labeling can be a time consuming task. ### This class takes 5 user inputs: * **Pretrained Learner** (image classification model) * **GradCAM Heatmap** (heatmap of GradCAM object - formed by a pretrained image classification learner) * **Source Image** * **Image Resizing Scale** (also applied to corresponding GradCAM heatmap) * **BBOX Rectangle Resizing Scale** *Class is compatible with google colab and other Python 3 enivronments* ``` # Imports for loading learner and the GradCAM class from fastai import * from fastai.vision import * from fastai.callbacks.hooks import * import scipy.ndimage ``` The following cell contains the widely used GradCAM class for pretrained image classification models (unedited). ``` #@title GradCAM Class class GradCam(): @classmethod def from_interp(cls,learn,interp,img_idx,ds_type=DatasetType.Valid,include_label=False): # produce heatmap and xb_grad for pred label (and actual label if include_label is True) if ds_type == DatasetType.Valid: ds = interp.data.valid_ds elif ds_type == DatasetType.Test: ds = interp.data.test_ds include_label=False else: return None x_img = ds.x[img_idx] xb,_ = interp.data.one_item(x_img) xb_img = Image(interp.data.denorm(xb)[0]) probs = interp.preds[img_idx].numpy() pred_idx = interp.pred_class[img_idx].item() # get class idx of img prediction label hmap_pred,xb_grad_pred = get_grad_heatmap(learn,xb,pred_idx,size=xb_img.shape[-1]) prob_pred = probs[pred_idx] actual_args=None if include_label: actual_idx = ds.y.items[img_idx] # get class idx of img actual label if actual_idx!=pred_idx: hmap_actual,xb_grad_actual = get_grad_heatmap(learn,xb,actual_idx,size=xb_img.shape[-1]) prob_actual = probs[actual_idx] actual_args=[interp.data.classes[actual_idx],prob_actual,hmap_actual,xb_grad_actual] return cls(xb_img,interp.data.classes[pred_idx],prob_pred,hmap_pred,xb_grad_pred,actual_args) @classmethod def from_one_img(cls,learn,x_img,label1=None,label2=None): ''' learn: fastai's Learner x_img: fastai.vision.image.Image label1: generate heatmap according to this label. If None, this wil be the label with highest probability from the model label2: generate additional heatmap according to this label ''' pred_class,pred_idx,probs = learn.predict(x_img) label1= str(pred_class) if not label1 else label1 xb,_ = learn.data.one_item(x_img) xb_img = Image(learn.data.denorm(xb)[0]) probs = probs.numpy() label1_idx = learn.data.classes.index(label1) hmap1,xb_grad1 = get_grad_heatmap(learn,xb,label1_idx,size=xb_img.shape[-1]) prob1 = probs[label1_idx] label2_args = None if label2: label2_idx = learn.data.classes.index(label2) hmap2,xb_grad2 = get_grad_heatmap(learn,xb,label2_idx,size=xb_img.shape[-1]) prob2 = probs[label2_idx] label2_args = [label2,prob2,hmap2,xb_grad2] return cls(xb_img,label1,prob1,hmap1,xb_grad1,label2_args) def __init__(self,xb_img,label1,prob1,hmap1,xb_grad1,label2_args=None): self.xb_img=xb_img self.label1,self.prob1,self.hmap1,self.xb_grad1 = label1,prob1,hmap1,xb_grad1 if label2_args: self.label2,self.prob2,self.hmap2,self.xb_grad2 = label2_args def plot(self,plot_hm=True,plot_gbp=True): if not plot_hm and not plot_gbp: plot_hm=True cols = 5 if hasattr(self, 'label2') else 3 if not plot_gbp or not plot_hm: cols-= 2 if hasattr(self, 'label2') else 1 fig,row_axes = plt.subplots(1,cols,figsize=(cols*5,5)) col=0 size=self.xb_img.shape[-1] self.xb_img.show(row_axes[col]);col+=1 label1_title = f'1.{self.label1} {self.prob1:.3f}' if plot_hm: show_heatmap(self.hmap1,self.xb_img,size,row_axes[col]) row_axes[col].set_title(label1_title);col+=1 if plot_gbp: row_axes[col].imshow(self.xb_grad1) row_axes[col].set_axis_off() row_axes[col].set_title(label1_title);col+=1 if hasattr(self, 'label2'): label2_title = f'2.{self.label2} {self.prob2:.3f}' if plot_hm: show_heatmap(self.hmap2,self.xb_img,size,row_axes[col]) row_axes[col].set_title(label2_title);col+=1 if plot_gbp: row_axes[col].imshow(self.xb_grad2) row_axes[col].set_axis_off() row_axes[col].set_title(label2_title) # plt.tight_layout() fig.subplots_adjust(wspace=0, hspace=0) # fig.savefig('data_draw/both/gradcam.png') def minmax_norm(x): return (x - np.min(x))/(np.max(x) - np.min(x)) def scaleup(x,size): scale_mult=size/x.shape[0] upsampled = scipy.ndimage.zoom(x, scale_mult) return upsampled # hook for Gradcam def hooked_backward(m,xb,target_layer,clas): with hook_output(target_layer) as hook_a: #hook at last layer of group 0's output (after bn, size 512x7x7 if resnet34) with hook_output(target_layer, grad=True) as hook_g: # gradient w.r.t to the target_layer preds = m(xb) preds[0,int(clas)].backward() # same as onehot backprop return hook_a,hook_g def clamp_gradients_hook(module, grad_in, grad_out): for grad in grad_in: torch.clamp_(grad, min=0.0) # hook for guided backprop def hooked_ReLU(m,xb,clas): relu_modules = [module[1] for module in m.named_modules() if str(module[1]) == "ReLU(inplace)"] with callbacks.Hooks(relu_modules, clamp_gradients_hook, is_forward=False) as _: preds = m(xb) preds[0,int(clas)].backward() def guided_backprop(learn,xb,y): xb = xb.cuda() m = learn.model.eval(); xb.requires_grad_(); if not xb.grad is None: xb.grad.zero_(); hooked_ReLU(m,xb,y); return xb.grad[0].cpu().numpy() def show_heatmap(hm,xb_im,size,ax=None): if ax is None: _,ax = plt.subplots() xb_im.show(ax) ax.imshow(hm, alpha=0.8, extent=(0,size,size,0), interpolation='bilinear',cmap='magma'); def get_grad_heatmap(learn,xb,y,size): ''' Main function to get hmap for heatmap and xb_grad for guided backprop ''' xb = xb.cuda() m = learn.model.eval(); target_layer = m[0][-1][-1] # last layer of group 0 hook_a,hook_g = hooked_backward(m,xb,target_layer,y) target_act= hook_a.stored[0].cpu().numpy() target_grad = hook_g.stored[0][0].cpu().numpy() mean_grad = target_grad.mean(1).mean(1) # hmap = (target_act*mean_grad[...,None,None]).mean(0) hmap = (target_act*mean_grad[...,None,None]).sum(0) hmap = np.where(hmap >= 0, hmap, 0) xb_grad = guided_backprop(learn,xb,y) # (3,224,224) #minmax norm the grad xb_grad = minmax_norm(xb_grad) hmap_scaleup = minmax_norm(scaleup(hmap,size)) # (224,224) # multiply xb_grad and hmap_scaleup and switch axis xb_grad = np.einsum('ijk, jk->jki',xb_grad, hmap_scaleup) #(224,224,3) return hmap,xb_grad ``` I connect to google drive (this notebook was made on google colab for GPU usage) and load my pretrained learner. ``` from google.colab import drive drive.mount('/content/drive') base_dir = '/content/drive/My Drive/fellowshipai-data/final_3_class_data_train_test_split' def get_data(sz): # This function returns an ImageDataBunch with a given image size return ImageDataBunch.from_folder(base_dir+'/', train='train', valid='valid', # 0% validation because we already formed our testing set ds_tfms=get_transforms(), size=sz, num_workers=4).normalize(imagenet_stats) # Normalized, 4 workers (multiprocessing) - 64 batch size (default) arch = models.resnet34 data = get_data(224) learn = cnn_learner(data,arch,metrics=[error_rate,Precision(average='micro'),Recall(average='micro')],train_bn=True,pretrained=True).mixup() learn.load('model-224sz-basicaugments-oversampling-mixup-dLRs') example_image = '/content/drive/My Drive/fellowshipai-data/final_3_class_data_train_test_split/train/raw/00000015.jpg' img = open_image(example_image) gcam = GradCam.from_one_img(learn,img) # using the GradCAM class gcam.plot(plot_gbp = False) # We care about the heatmap (which is overlayed on top of the original image inherently) gcam_heatmap = gcam.hmap1 # This is a 2d array ``` My pretrained learner correctly classified the image as raw with probability 0.996. Note that images with very low noise and accurate feature importances (as with the example image) are The learner is focusing on the steak in center view (heatmap pixels indicate feature importance). ``` from BBOXES_from_GRADCAM import BBoxerwGradCAM # load class from .py file image_resizing_scale = [400,300] bbox_scaling = [1,1,1,1] bbox = BBoxerwGradCAM(learn, gcam_heatmap, example_image, image_resizing_scale, bbox_scaling) for function in dir(bbox)[-18:]: print(function) bbox.show_smoothheatmap() bbox.show_contouredheatmap() #bbox.show_bboxrectangle() bbox.show_bboxpolygon() bbox.show_bboxrectangle() rect_coords, polygon_coords = bbox.get_bboxes() rect_coords # x,y,w,h polygon_coords # IoU for object detection def get_IoU(truth_coords, pred_coords): pred_area = pred_coords[2]*pred_coords[3] truth_area = truth_coords[2]*truth_coords[3] # coords of intersection rectangle x1 = max(truth_coords[0], pred_coords[0]) y1 = max(truth_coords[1], pred_coords[1]) x2 = min(truth_coords[2], pred_coords[2]) y2 = min(truth_coords[3], pred_coords[3]) # area of intersection rectangle interArea = max(0, x2 - x1 + 1) * max(0, y2 - y1 + 1) # area of prediction and truth rectangles boxTruthArea = (truth_coords[2] - truth_coords[0] + 1) * (truth_coords[3] - truth_coords[1] + 1) boxPredArea = (pred_coords[2] - pred_coords[0] + 1) * (pred_coords[3] - pred_coords[1] + 1) # intersection over union iou = interArea / float(boxTruthArea + boxPredArea - interArea) return iou get_IoU([80,40,240,180],rect_coords) ```
github_jupyter
# Hierarchical Clustering **Hierarchical clustering** refers to a class of clustering methods that seek to build a **hierarchy** of clusters, in which some clusters contain others. In this assignment, we will explore a top-down approach, recursively bipartitioning the data using k-means. **Note to Amazon EC2 users**: To conserve memory, make sure to stop all the other notebooks before running this notebook. ## Import packages ``` from __future__ import print_function # to conform python 2.x print to python 3.x import turicreate import matplotlib.pyplot as plt import numpy as np import sys import os import time from scipy.sparse import csr_matrix from sklearn.cluster import KMeans from sklearn.metrics import pairwise_distances %matplotlib inline ``` ## Load the Wikipedia dataset ``` wiki = turicreate.SFrame('people_wiki.sframe/') ``` As we did in previous assignments, let's extract the TF-IDF features: ``` wiki['tf_idf'] = turicreate.text_analytics.tf_idf(wiki['text']) ``` To run k-means on this dataset, we should convert the data matrix into a sparse matrix. ``` from em_utilities import sframe_to_scipy # converter # This will take about a minute or two. wiki = wiki.add_row_number() tf_idf, map_word_to_index = sframe_to_scipy(wiki, 'tf_idf') ``` To be consistent with the k-means assignment, let's normalize all vectors to have unit norm. ``` from sklearn.preprocessing import normalize tf_idf = normalize(tf_idf) ``` ## Bipartition the Wikipedia dataset using k-means Recall our workflow for clustering text data with k-means: 1. Load the dataframe containing a dataset, such as the Wikipedia text dataset. 2. Extract the data matrix from the dataframe. 3. Run k-means on the data matrix with some value of k. 4. Visualize the clustering results using the centroids, cluster assignments, and the original dataframe. We keep the original dataframe around because the data matrix does not keep auxiliary information (in the case of the text dataset, the title of each article). Let us modify the workflow to perform bipartitioning: 1. Load the dataframe containing a dataset, such as the Wikipedia text dataset. 2. Extract the data matrix from the dataframe. 3. Run k-means on the data matrix with k=2. 4. Divide the data matrix into two parts using the cluster assignments. 5. Divide the dataframe into two parts, again using the cluster assignments. This step is necessary to allow for visualization. 6. Visualize the bipartition of data. We'd like to be able to repeat Steps 3-6 multiple times to produce a **hierarchy** of clusters such as the following: ``` (root) | +------------+-------------+ | | Cluster Cluster +------+-----+ +------+-----+ | | | | Cluster Cluster Cluster Cluster ``` Each **parent cluster** is bipartitioned to produce two **child clusters**. At the very top is the **root cluster**, which consists of the entire dataset. Now we write a wrapper function to bipartition a given cluster using k-means. There are three variables that together comprise the cluster: * `dataframe`: a subset of the original dataframe that correspond to member rows of the cluster * `matrix`: same set of rows, stored in sparse matrix format * `centroid`: the centroid of the cluster (not applicable for the root cluster) Rather than passing around the three variables separately, we package them into a Python dictionary. The wrapper function takes a single dictionary (representing a parent cluster) and returns two dictionaries (representing the child clusters). ``` def bipartition(cluster, maxiter=400, num_runs=4, seed=None): '''cluster: should be a dictionary containing the following keys * dataframe: original dataframe * matrix: same data, in matrix format * centroid: centroid for this particular cluster''' data_matrix = cluster['matrix'] dataframe = cluster['dataframe'] # Run k-means on the data matrix with k=2. We use scikit-learn here to simplify workflow. kmeans_model = KMeans(n_clusters=2, max_iter=maxiter, n_init=num_runs, random_state=seed, n_jobs=1) kmeans_model.fit(data_matrix) centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_ # Divide the data matrix into two parts using the cluster assignments. data_matrix_left_child, data_matrix_right_child = data_matrix[cluster_assignment==0], \ data_matrix[cluster_assignment==1] # Divide the dataframe into two parts, again using the cluster assignments. cluster_assignment_sa = turicreate.SArray(cluster_assignment) # minor format conversion dataframe_left_child, dataframe_right_child = dataframe[cluster_assignment_sa==0], \ dataframe[cluster_assignment_sa==1] # Package relevant variables for the child clusters cluster_left_child = {'matrix': data_matrix_left_child, 'dataframe': dataframe_left_child, 'centroid': centroids[0]} cluster_right_child = {'matrix': data_matrix_right_child, 'dataframe': dataframe_right_child, 'centroid': centroids[1]} return (cluster_left_child, cluster_right_child) ``` The following cell performs bipartitioning of the Wikipedia dataset. Allow 2+ minutes to finish. Note. For the purpose of the assignment, we set an explicit seed (`seed=1`) to produce identical outputs for every run. In pratical applications, you might want to use different random seeds for all runs. ``` %%time wiki_data = {'matrix': tf_idf, 'dataframe': wiki} # no 'centroid' for the root cluster left_child, right_child = bipartition(wiki_data, maxiter=100, num_runs=1, seed=0) ``` Let's examine the contents of one of the two clusters, which we call the `left_child`, referring to the tree visualization above. ``` left_child ``` And here is the content of the other cluster we named `right_child`. ``` right_child ``` ## Visualize the bipartition We provide you with a modified version of the visualization function from the k-means assignment. For each cluster, we print the top 5 words with highest TF-IDF weights in the centroid and display excerpts for the 8 nearest neighbors of the centroid. ``` def display_single_tf_idf_cluster(cluster, map_index_to_word): '''map_index_to_word: SFrame specifying the mapping betweeen words and column indices''' wiki_subset = cluster['dataframe'] tf_idf_subset = cluster['matrix'] centroid = cluster['centroid'] # Print top 5 words with largest TF-IDF weights in the cluster idx = centroid.argsort()[::-1] for i in range(5): print('{0}:{1:.3f}'.format(map_index_to_word['category'], centroid[idx[i]])), print('') # Compute distances from the centroid to all data points in the cluster. distances = pairwise_distances(tf_idf_subset, [centroid], metric='euclidean').flatten() # compute nearest neighbors of the centroid within the cluster. nearest_neighbors = distances.argsort() # For 8 nearest neighbors, print the title as well as first 180 characters of text. # Wrap the text at 80-character mark. for i in range(8): text = ' '.join(wiki_subset[nearest_neighbors[i]]['text'].split(None, 25)[0:25]) print('* {0:50s} {1:.5f}\n {2:s}\n {3:s}'.format(wiki_subset[nearest_neighbors[i]]['name'], distances[nearest_neighbors[i]], text[:90], text[90:180] if len(text) > 90 else '')) print('') ``` Let's visualize the two child clusters: ``` display_single_tf_idf_cluster(left_child, map_word_to_index) display_single_tf_idf_cluster(right_child, map_word_to_index) ``` The right cluster consists of athletes and artists (singers and actors/actresses), whereas the left cluster consists of non-athletes and non-artists. So far, we have a single-level hierarchy consisting of two clusters, as follows: ``` Wikipedia + | +--------------------------+--------------------+ | | + + Non-athletes/artists Athletes/artists ``` Is this hierarchy good enough? **When building a hierarchy of clusters, we must keep our particular application in mind.** For instance, we might want to build a **directory** for Wikipedia articles. A good directory would let you quickly narrow down your search to a small set of related articles. The categories of athletes and non-athletes are too general to facilitate efficient search. For this reason, we decide to build another level into our hierarchy of clusters with the goal of getting more specific cluster structure at the lower level. To that end, we subdivide both the `athletes/artists` and `non-athletes/artists` clusters. ## Perform recursive bipartitioning ### Cluster of athletes and artists To help identify the clusters we've built so far, let's give them easy-to-read aliases: ``` non_athletes_artists = left_child athletes_artists = right_child ``` Using the bipartition function, we produce two child clusters of the athlete cluster: ``` # Bipartition the cluster of athletes and artists left_child_athletes_artists, right_child_athletes_artists = bipartition(athletes_artists, maxiter=100, num_runs=6, seed=1) ``` The left child cluster mainly consists of athletes: ``` display_single_tf_idf_cluster(left_child_athletes_artists, map_word_to_index) ``` On the other hand, the right child cluster consists mainly of artists (singers and actors/actresses): ``` display_single_tf_idf_cluster(right_child_athletes_artists, map_word_to_index) ``` Our hierarchy of clusters now looks like this: ``` Wikipedia + | +--------------------------+--------------------+ | | + + Non-athletes/artists Athletes/artists + | +----------+----------+ | | | | + | athletes artists ``` Should we keep subdividing the clusters? If so, which cluster should we subdivide? To answer this question, we again think about our application. Since we organize our directory by topics, it would be nice to have topics that are about as coarse as each other. For instance, if one cluster is about baseball, we expect some other clusters about football, basketball, volleyball, and so forth. That is, **we would like to achieve similar level of granularity for all clusters.** Both the athletes and artists node can be subdivided more, as each one can be divided into more descriptive professions (singer/actress/painter/director, or baseball/football/basketball, etc.). Let's explore subdividing the athletes cluster further to produce finer child clusters. Let's give the clusters aliases as well: ``` athletes = left_child_athletes_artists artists = right_child_athletes_artists ``` ### Cluster of athletes In answering the following quiz question, take a look at the topics represented in the top documents (those closest to the centroid), as well as the list of words with highest TF-IDF weights. Let us bipartition the cluster of athletes. ``` left_child_athletes, right_child_athletes = bipartition(athletes, maxiter=100, num_runs=6, seed=1) display_single_tf_idf_cluster(left_child_athletes, map_word_to_index) display_single_tf_idf_cluster(right_child_athletes, map_word_to_index) ``` **Quiz Question**. Which diagram best describes the hierarchy right after splitting the `athletes` cluster? Refer to the quiz form for the diagrams. **Caution**. The granularity criteria is an imperfect heuristic and must be taken with a grain of salt. It takes a lot of manual intervention to obtain a good hierarchy of clusters. * **If a cluster is highly mixed, the top articles and words may not convey the full picture of the cluster.** Thus, we may be misled if we judge the purity of clusters solely by their top documents and words. * **Many interesting topics are hidden somewhere inside the clusters but do not appear in the visualization.** We may need to subdivide further to discover new topics. For instance, subdividing the `ice_hockey_football` cluster led to the appearance of runners and golfers. ### Cluster of non-athletes Now let us subdivide the cluster of non-athletes. ``` %%time # Bipartition the cluster of non-athletes left_child_non_athletes_artists, right_child_non_athletes_artists = bipartition(non_athletes_artists, maxiter=100, num_runs=3, seed=1) display_single_tf_idf_cluster(left_child_non_athletes_artists, map_word_to_index) display_single_tf_idf_cluster(right_child_non_athletes_artists, map_word_to_index) ``` The clusters are not as clear, but the left cluster has a tendency to show important female figures, and the right one to show politicians and government officials. Let's divide them further. ``` female_figures = left_child_non_athletes_artists politicians_etc = right_child_non_athletes_artists politicians_etc = left_child_non_athletes_artists female_figures = right_child_non_athletes_artists ``` **Quiz Question**. Let us bipartition the clusters `female_figures` and `politicians`. Which diagram best describes the resulting hierarchy of clusters for the non-athletes? Refer to the quiz for the diagrams. **Note**. Use `maxiter=100, num_runs=6, seed=1` for consistency of output. ``` left_female_figures, right_female_figures = bipartition(female_figures, maxiter=100, num_runs=6, seed=1) left_politicians_etc, right_politicians_etc = bipartition(politicians_etc, maxiter=100, num_runs=6, seed=1) display_single_tf_idf_cluster(left_female_figures, map_word_to_index) display_single_tf_idf_cluster(right_female_figures, map_word_to_index) display_single_tf_idf_cluster(left_politicians_etc, map_word_to_index) display_single_tf_idf_cluster(right_politicians_etc, map_word_to_index) ```
github_jupyter
# Object Detection with SSD ### Here we demostrate detection on example images using SSD with PyTorch ``` import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import torch import torch.nn as nn import torch.backends.cudnn as cudnn from torch.autograd import Variable import numpy as np import cv2 if torch.cuda.is_available(): torch.set_default_tensor_type('torch.cuda.FloatTensor') from ssd import build_ssd ``` ## Build SSD300 in Test Phase 1. Build the architecture, specifyingsize of the input image (300), and number of object classes to score (21 for VOC dataset) 2. Next we load pretrained weights on the VOC0712 trainval dataset ``` net = build_ssd('test', 300, 21) # initialize SSD net.load_weights('../weights/ssd300_VOC_28000.pth') ``` ## Load Image ### Here we just load a sample image from the VOC07 dataset ``` # image = cv2.imread('./data/example.jpg', cv2.IMREAD_COLOR) # uncomment if dataset not downloaded %matplotlib inline from matplotlib import pyplot as plt from data import VOCDetection, VOC_ROOT, VOCAnnotationTransform # here we specify year (07 or 12) and dataset ('test', 'val', 'train') testset = VOCDetection(VOC_ROOT, [('2007', 'val')], None, VOCAnnotationTransform()) img_id = 60 image = testset.pull_image(img_id) rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # View the sampled input image before transform plt.figure(figsize=(10,10)) plt.imshow(rgb_image) plt.show() ``` ## Pre-process the input. #### Using the torchvision package, we can create a Compose of multiple built-in transorm ops to apply For SSD, at test time we use a custom BaseTransform callable to resize our image to 300x300, subtract the dataset's mean rgb values, and swap the color channels for input to SSD300. ``` x = cv2.resize(image, (300, 300)).astype(np.float32) x -= (104.0, 117.0, 123.0) x = x.astype(np.float32) x = x[:, :, ::-1].copy() plt.imshow(x) x = torch.from_numpy(x).permute(2, 0, 1) ``` ## SSD Forward Pass ### Now just wrap the image in a Variable so it is recognized by PyTorch autograd ``` xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable if torch.cuda.is_available(): xx = xx.cuda() y = net(xx) ``` ## Parse the Detections and View Results Filter outputs with confidence scores lower than a threshold Here we choose 60% ``` from data import VOC_CLASSES as labels top_k=10 plt.figure(figsize=(10,10)) colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist() plt.imshow(rgb_image) # plot the image for matplotlib currentAxis = plt.gca() detections = y.data # scale each detection back up to the image scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2) for i in range(detections.size(1)): j = 0 while detections[0,i,j,0] >= 0.6: score = detections[0,i,j,0] label_name = labels[i-1] display_txt = '%s: %.2f'%(label_name, score) pt = (detections[0,i,j,1:]*scale).cpu().numpy() coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1 color = colors[i] currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2)) currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5}) j+=1 ```
github_jupyter
# Data Attribute Recommendation - TechED 2020 INT260 Getting started with the Python SDK for the Data Attribute Recommendation service. ## Business Scenario We will consider a business scenario involving product master data. The creation and maintenance of this product master data requires the careful manual selection of the correct categories for a given product from a pre-defined hierarchy of product categories. In this workshop, we will explore how to automate this tedious manual task with the Data Attribute Recommendation service. <video controls src="videos/dar_prediction_material_table.mp4"/> This workshop will cover: * Data Upload * Model Training and Deployment * Inference Requests We will work through a basic example of how to achieve these tasks using the [Python SDK for Data Attribute Recommendation](https://github.com/SAP/data-attribute-recommendation-python-sdk). *Note: if you are doing several runs of this notebook on a trial account, you may see errors stating 'The resource can no longer be used. Usage limit has been reached'. It can be beneficial to [clean up the service instance](#Cleaning-up-a-service-instance) to free up limited trial resources acquired by an earlier run of the notebook. [Some limits](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/c03b561eea1744c9b9892b416037b99a.html) cannot be reset this way.* ## Table of Contents * [Exercise 01.1](#Exercise-01.1) - Installing the SDK and preparing the service key * [Creating a service instance and key on BTP Trial](#Creating-a-service-instance-and-key) * [Installing the SDK](#Installing-the-SDK) * [Loading the service key into your Jupyter Notebook](#Loading-the-service-key-into-your-Jupyter-Notebook) * [Exercise 01.2](#Exercise-01.2) - Uploading the data * [Exercise 01.3](#Exercise-01.3) - Training the model * [Exercise 01.4](#Exercise-01.4) - Deploying the Model and predicting labels * [Resources](#Resources) - Additional reading * [Cleaning up a service instance](#Cleaning-up-a-service-instance) - Clean up all resources on the service instance * [Optional Exercises](#Optional-Exercises) - Optional exercises ## Requirements See the [README in the Github repository for this workshop](https://github.com/SAP-samples/teched2020-INT260/blob/master/exercises/ex1-DAR/README.md). # Exercise 01.1 *Back to [table of contents](#Table-of-Contents)* In exercise 01.1, we will install the SDK and prepare the service key. ## Creating a service instance and key on BTP Trial Please log in to your trial account: https://cockpit.eu10.hana.ondemand.com/trial/ In the your global account screen, go to the "Boosters" tab: ![trial_booster.png](attachment:trial_booster.png) *Boosters are only available on the Trial landscape. If you are using a production environment, please follow this tutorial to manually [create a service instance and a service key](https://developers.sap.com/tutorials/cp-aibus-dar-service-instance.html)*. In the Boosters tab, enter "Data Attribute Recommendation" into the search box. Then, select the service tile from the search results: ![trial_locate_dar_booster.png](attachment:trial_locate_dar_booster.png) The resulting screen shows details of the booster pack. Here, click the "Start" button and wait a few seconds. ![trial_start_booster.png](attachment:trial_start_booster.png) Once the booster is finished, click the "go to Service Key" link to obtain your service key. ![trial_booster_finished.png](attachment:trial_booster_finished.png) Finally, download the key and save it to disk. ![trial_download_key.png](attachment:trial_download_key.png) ## Installing the SDK The Data Attribute Recommendation SDK is available from the Python package repository. It can be installed with the standard `pip` tool: ``` ! pip install data-attribute-recommendation-sdk ``` *Note: If you are not using a Jupyter notebook, but instead a regular Python development environment, we recommend using a Python virtual environment to set up your development environment. Please see [the dedicated tutorial to learn how to install the SDK inside a Python virtual environment](https://developers.sap.com/tutorials/cp-aibus-dar-sdk-setup.html).* ## Loading the service key into your Jupyter Notebook Once you downloaded the service key from the Cockpit, upload it to your notebook environment. The service key must be uploaded to same directory where the `teched2020-INT260_Data_Attribute_Recommendation.ipynb` is stored. We first navigate to the file browser in Jupyter. On the top of your Jupyter notebook, right-click on the Jupyter logo and open in a new tab. ![service_key_main_jupyter_page.png](attachment:service_key_main_jupyter_page.png) **In the file browser, navigate to the directory where the `teched2020-INT260_Data_Attribute_Recommendation.ipynb` notebook file is stored. The service key must reside next to this file.** In the Jupyter file browser, click the **Upload** button (1). In the file selection dialog that opens, select the `defaultKey_*.json` file you downloaded previously from the SAP Cloud Platform Cockpit. Rename the file to `key.json`. Confirm the upload by clicking on the second **Upload** button (2). ![service_key_upload.png](attachment:service_key_upload.png) The service key contains your credentials to access the service. Please treat this as carefully as you would treat any password. We keep the service key as a separate file outside this notebook to avoid leaking the secret credentials. The service key is a JSON file. We will load this file once and use the credentials throughout this workshop. ``` # First, set up logging so we can see the actions performed by the SDK behind the scenes import logging import sys logging.basicConfig(level=logging.INFO, stream=sys.stdout) from pprint import pprint # for nicer output formatting import json import os if not os.path.exists("key.json"): msg = "key.json is not found. Please follow instructions above to create a service key of" msg += " Data Attribute Recommendation. Then, upload it into the same directory where" msg += " this notebook is saved." print(msg) raise ValueError(msg) with open("key.json") as file_handle: key = file_handle.read() SERVICE_KEY = json.loads(key) ``` ## Summary Exercise 01.1 In exercise 01.1, we have covered the following topics: * How to install the Python SDK for Data Attribute Recommendation * How to obtain a service key for the Data Attribute Recommendation service # Exercise 01.2 *Back to [table of contents](#Table-of-Contents)* *To perform this exercise, you need to execute the code in all previous exercises.* In exercise 01.2, we will upload our demo dataset to the service. ## The Dataset ### Obtaining the Data The dataset we use in this workshop is a CSV file containing product master data. The original data was released by BestBuy, a retail company, under an [open license](https://github.com/SAP-samples/data-attribute-recommendation-postman-tutorial-sample#data-and-license). This makes it ideal for first experiments with the Data Attribute Recommendation service. The dataset can be downloaded directly from Github using the following command: ``` ! wget -O bestBuy.csv "https://raw.githubusercontent.com/SAP-samples/data-attribute-recommendation-postman-tutorial-sample/master/Tutorial_Example_Dataset.csv" # If you receive a "command not found" error (i.e. on Windows), try curl instead of wget: # ! curl -o bestBuy.csv "https://raw.githubusercontent.com/SAP-samples/data-attribute-recommendation-postman-tutorial-sample/master/Tutorial_Example_Dataset.csv" ``` Let's inspect the data: ``` # if you are experiencing an import error here, run the following in a new cell: # ! pip install pandas import pandas as pd df = pd.read_csv("bestBuy.csv") df.head(5) print() print(f"Data has {df.shape[0]} rows and {df.shape[1]} columns.") ``` The CSV contains the several products. For each product, the description, the manufacturer and the price are given. Additionally, three levels of the products hierarchy are given. The first product, a set of AAA batteries, is located in the following place in the product hierarchy: ``` level1_category: Connected Home & Housewares | level2_category: Housewares | level3_category: Household Batteries ``` We will use the Data Attribute Recommendation service to predict the categories for a given product based on its **description**, **manufacturer** and **price**. ### Creating the DatasetSchema We first have to describe the shape of our data by creating a DatasetSchema. This schema informs the service about the individual column types found in the CSV. We also describe which are the target columns used for training. These columns will be later predicted. In our case, these are the three category columns. The service currently supports three column types: **text**, **category** and **number**. For prediction, only **category** is currently supported. A DatasetSchema for the BestBuy dataset looks as follows: ```json { "features": [ {"label": "manufacturer", "type": "CATEGORY"}, {"label": "description", "type": "TEXT"}, {"label": "price", "type": "NUMBER"} ], "labels": [ {"label": "level1_category", "type": "CATEGORY"}, {"label": "level2_category", "type": "CATEGORY"}, {"label": "level3_category", "type": "CATEGORY"} ], "name": "bestbuy-category-prediction", } ``` We will now upload this DatasetSchema to the Data Attribute Recommendation service. The SDK provides the [`DataManagerClient.create_dataset_schema()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.create_dataset_schema) method for this purpose. ``` from sap.aibus.dar.client.data_manager_client import DataManagerClient dataset_schema = { "features": [ {"label": "manufacturer", "type": "CATEGORY"}, {"label": "description", "type": "TEXT"}, {"label": "price", "type": "NUMBER"} ], "labels": [ {"label": "level1_category", "type": "CATEGORY"}, {"label": "level2_category", "type": "CATEGORY"}, {"label": "level3_category", "type": "CATEGORY"} ], "name": "bestbuy-category-prediction", } data_manager = DataManagerClient.construct_from_service_key(SERVICE_KEY) response = data_manager.create_dataset_schema(dataset_schema) dataset_schema_id = response["id"] print() print("DatasetSchema created:") pprint(response) print() print(f"DatasetSchema ID: {dataset_schema_id}") ``` The API responds with the newly created DatasetSchema resource. The service assigned an ID to the schema. We save this ID in a variable, as we will need it when we upload the data. ### Uploading the Data to the service The [`DataManagerClient`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient) class is also responsible for uploading data to the service. This data must fit to an existing DatasetSchema. After uploading the data, the service will validate the Dataset against the DataSetSchema in a background process. The data must be a CSV file which can optionally be `gzip` compressed. We will now upload our `bestBuy.csv` file, using the DatasetSchema which we created earlier. Data upload is a two-step process. We first create the Dataset using [`DataManagerClient.create_dataset()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.create_dataset). Then we can upload data to the Dataset using the [`DataManagerClient.upload_data_to_dataset()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.upload_data_to_dataset) method. ``` dataset_resource = data_manager.create_dataset("my-bestbuy-dataset", dataset_schema_id) dataset_id = dataset_resource["id"] print() print("Dataset created:") pprint(dataset_resource) print() print(f"Dataset ID: {dataset_id}") # Compress file first for a faster upload ! gzip -9 -c bestBuy.csv > bestBuy.csv.gz ``` Note that the data upload can take a few minutes. Please do not restart the process while the cell is still running. ``` # Open in binary mode. with open('bestBuy.csv.gz', 'rb') as file_handle: dataset_resource = data_manager.upload_data_to_dataset(dataset_id, file_handle) print() print("Dataset after data upload:") print() pprint(dataset_resource) ``` Note that the Dataset status changed from `NO_DATA` to `VALIDATING`. Dataset validation is a background process. The status will eventually change from `VALIDATING` to `SUCCEEDED`. The SDK provides the [`DataManagerClient.wait_for_dataset_validation()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.wait_for_dataset_validation) method to poll for the Dataset validation. ``` dataset_resource = data_manager.wait_for_dataset_validation(dataset_id) print() print("Dataset after validation has finished:") print() pprint(dataset_resource) ``` If the status is `FAILED` instead of `SUCCEEDED`, then the `validationMessage` will contain details about the validation failure. To better understand the Dataset lifecycle, refer to the [corresponding document on help.sap.com](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/a9b7429687a04e769dbc7955c6c44265.html). ## Summary Exercise 01.2 In exercise 01.2, we have covered the following topics: * How to create a DatasetSchema * How to upload a Dataset to the service You can find optional exercises related to exercise 01.2 [below](#Optional-Exercises-for-01.2). # Exercise 01.3 *Back to [table of contents](#Table-of-Contents)* *To perform this exercise, you need to execute the code in all previous exercises.* In exercise 01.3, we will train the model. ## Training the Model The Dataset is now uploaded and has been validated successfully by the service. To train a machine learning model, we first need to select the correct model template. ### Selecting the right ModelTemplate The Data Attribute Recommendation service currently supports two different ModelTemplates: | ID | Name | Description | |--------------------------------------|---------------------------|---------------------------------------------------------------------------| | d7810207-ca31-4d4d-9b5a-841a644fd81f | **Hierarchical template** | Recommended for the prediction of multiple classes that form a hierarchy. | | 223abe0f-3b52-446f-9273-f3ca39619d2c | **Generic template** | Generic neural network for multi-label, multi-class classification. | | 188df8b2-795a-48c1-8297-37f37b25ea00 | **AutoML template** | Finds the [best traditional machine learning model out of several traditional algorithms](https://blogs.sap.com/2021/04/28/how-does-automl-works-in-data-attribute-recommendation/). Single label only. | We are building a model to predict product hierarchies. The **Hierarchical Template** is correct for this scenario. In this template, the first label in the DatasetSchema is considered the top-level category. Each subsequent label is considered to be further down in the hierarchy. Coming back to our example DatasetSchema: ```json { "labels": [ {"label": "level1_category", "type": "CATEGORY"}, {"label": "level2_category", "type": "CATEGORY"}, {"label": "level3_category", "type": "CATEGORY"} ] } ``` The first defined label is `level1_category`, which is given more weight during training than `level3_category`. Refer to the [official documentation on ModelTemplates](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/1e76e8c636974a06967552c05d40e066.html) to learn more. Additional model templates may be added over time, so check back regularly. ## Starting the training When working with models, we use the [`ModelManagerClient`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient) class. To start the training, we need the IDs of the dataset and the desired model template. We also have to provide a name for the model. The [`ModelManagerClient.create_job()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.create_job) method launches the training Job. *Only one model of a given name can exist. If you receive a message stating 'The model name specified is already in use', you either have to remove the job and its associated model first or you have to change the `model_name` variable name below. You can also [clean up the entire service instance](#Cleaning-up-a-service-instance).* ``` from sap.aibus.dar.client.model_manager_client import ModelManagerClient from sap.aibus.dar.client.exceptions import DARHTTPException model_manager = ModelManagerClient.construct_from_service_key(SERVICE_KEY) model_template_id = "d7810207-ca31-4d4d-9b5a-841a644fd81f" # hierarchical template model_name = "bestbuy-hierarchy-model" job_resource = model_manager.create_job(model_name, dataset_id, model_template_id) job_id = job_resource['id'] print() print("Job resource:") print() pprint(job_resource) print() print(f"ID of submitted Job: {job_id}") ``` The job is now running in the background. Similar to the DatasetValidation, we have to poll the job until it succeeds. The SDK provides the [`ModelManagerClient.wait_for_job()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.wait_for_job) method: ``` job_resource = model_manager.wait_for_job(job_id) print() print("Job resource after training is finished:") pprint(job_resource) ``` To better understand the Training Job lifecycle, see the [corresponding document on help.sap.com](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/0fc40aa077ce4c708c1e5bfc875aa3be.html). ## Intermission The model training will take between 5 and 10 minutes. In the meantime, we can explore the available [resources](#Resources) for both the service and the SDK. ## Inspecting the Model Once the training job is finished successfully, we can inspect the model using [`ModelManagerClient.read_model_by_name()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_model_by_name). ``` model_resource = model_manager.read_model_by_name(model_name) print() pprint(model_resource) ``` In the model resource, the `validationResult` key provides information about model performance. You can also use these metrics to compare performance of different [ModelTemplates](#Selecting-the-right-ModelTemplate) or different datasets. ## Summary Exercise 01.3 In exercise 01.3, we have covered the following topics: * How to select the appropriate ModelTemplate * How to train a Model from a previously uploaded Dataset You can find optional exercises related to exercise 01.3 [below](#Optional-Exercises-for-01.3). # Exercise 01.4 *Back to [table of contents](#Table-of-Contents)* *To perform this exercise, you need to execute the code in all previous exercises.* In exercise 01.4, we will deploy the model and predict labels for some unlabeled data. ## Deploying the Model The training job has finished and the model is ready to be deployed. By deploying the model, we create a server process in the background on the Data Attribute Recommendation service which will serve inference requests. In the SDK, the [`ModelManagerClient.create_deployment()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#module-sap.aibus.dar.client.model_manager_client) method lets us create a Deployment. ``` deployment_resource = model_manager.create_deployment(model_name) deployment_id = deployment_resource["id"] print() print("Deployment resource:") print() pprint(deployment_resource) print(f"Deployment ID: {deployment_id}") ``` *Note: if you are using a trial account and you see errors such as 'The resource can no longer be used. Usage limit has been reached', consider [cleaning up the service instance](#Cleaning-up-a-service-instance) to free up limited trial resources.* Similar to the data upload and the training job, model deployment is an asynchronous process. We have to poll the API until the Deployment is in status `SUCCEEDED`. The SDK provides the [`ModelManagerClient.wait_for_deployment()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.wait_for_deployment) for this purposes. ``` deployment_resource = model_manager.wait_for_deployment(deployment_id) print() print("Finished deployment resource:") print() pprint(deployment_resource) ``` Once the Deployment is in status `SUCCEEDED`, we can run inference requests. To better understand the Deployment lifecycle, see the [corresponding document on help.sap.com](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/f473b5b19a3b469e94c40eb27623b4f0.html). *For trial users: the deployment will be stopped after 8 hours. You can restart it by deleting the deployment and creating a new one for your model. The [`ModelManagerClient.ensure_deployment_exists()`](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/c03b561eea1744c9b9892b416037b99a.html) method will delete and re-create automatically. Then, you need to poll until the deployment is succeeded using [`ModelManagerClient.wait_for_deployment()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.wait_for_deployment) as above.* ## Executing Inference requests With a single inference request, we can send up to 50 objects to the service to predict the labels. The data send to the service must match the `features` section of the DatasetSchema created earlier. The `labels` defined inside of the DatasetSchema will be predicted for each object and returned as a response to the request. In the SDK, the [`InferenceClient.create_inference_request()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.inference_client.InferenceClient.create_inference_request) method handles submission of inference requests. ``` from sap.aibus.dar.client.inference_client import InferenceClient inference = InferenceClient.construct_from_service_key(SERVICE_KEY) objects_to_be_classified = [ { "features": [ {"name": "manufacturer", "value": "Energizer"}, {"name": "description", "value": "Alkaline batteries; 1.5V"}, {"name": "price", "value": "5.99"}, ], }, ] inference_response = inference.create_inference_request(model_name, objects_to_be_classified) print() print("Inference request processed. Response:") print() pprint(inference_response) ``` *Note: For trial accounts, you only have a limited number of objects which you can classify.* You can also try to come up with your own example: ``` my_own_items = [ { "features": [ {"name": "manufacturer", "value": "EDIT THIS"}, {"name": "description", "value": "EDIT THIS"}, {"name": "price", "value": "0.00"}, ], }, ] inference_response = inference.create_inference_request(model_name, my_own_items) print() print("Inference request processed. Response:") print() pprint(inference_response) ``` You can also classify multiple objects at once. For each object, the `top_n` parameter determines how many predictions are returned. ``` objects_to_be_classified = [ { "objectId": "optional-identifier-1", "features": [ {"name": "manufacturer", "value": "Energizer"}, {"name": "description", "value": "Alkaline batteries; 1.5V"}, {"name": "price", "value": "5.99"}, ], }, { "objectId": "optional-identifier-2", "features": [ {"name": "manufacturer", "value": "Eidos"}, {"name": "description", "value": "Unravel a grim conspiracy at the brink of Revolution"}, {"name": "price", "value": "19.99"}, ], }, { "objectId": "optional-identifier-3", "features": [ {"name": "manufacturer", "value": "Cadac"}, {"name": "description", "value": "CADAC Grill Plate for Safari Chef Grills: 12\"" + "cooking surface; designed for use with Safari Chef grills;" + "105 sq. in. cooking surface; PTFE nonstick coating;" + " 2 grill surfaces" }, {"name": "price", "value": "39.99"}, ], } ] inference_response = inference.create_inference_request(model_name, objects_to_be_classified, top_n=3) print() print("Inference request processed. Response:") print() pprint(inference_response) ``` We can see that the service now returns the `n-best` predictions for each label as indicated by the `top_n` parameter. In some cases, the predicted category has the special value `nan`. In the `bestBuy.csv` data set, not all records have the full set of three categories. Some records only have a top-level category. The model learns this fact from the data and will occasionally suggest that a record should not have a category. ``` # Inspect all video games with just a top-level category entry video_games = df[df['level1_category'] == 'Video Games'] video_games.loc[df['level2_category'].isna() & df['level3_category'].isna()].head(5) ``` To learn how to execute inference calls without the SDK just using the underlying RESTful API, see [Inference without the SDK](#Inference-without-the-SDK). ## Summary Exercise 01.4 In exercise 01.4, we have covered the following topics: * How to deploy a previously trained model * How to execute inference requests against a deployed model You can find optional exercises related to exercise 01.4 [below](#Optional-Exercises-for-01.4). # Wrapping up In this workshop, we looked into the following topics: * Installation of the Python SDK for Data Attribute Recommendation * Modelling data with a DatasetSchema * Uploading data into a Dataset * Training a model * Predicting labels for unlabelled data Using these tools, we are able to solve the problem of missing Master Data attributes starting from just a CSV file containing training data. Feel free to revisit the workshop materials at any time. The [resources](#Resources) section below contains additional reading. If you would like to explore the additional capabilities of the SDK, visit the [optional exercises](#Optional-Exercises) below. ## Cleanup During the course of the workshop, we have created several resources on the Data Attribute Recommendation Service: * DatasetSchema * Dataset * Job * Model * Deployment The SDK provides several methods to delete these resources. Note that there are dependencies between objects: you cannot delete a Dataset without deleting the Model beforehand. You will need to set `CLEANUP_SESSION = True` below to execute the cleanup. ``` # Clean up all resources created earlier CLEANUP_SESSION = False def cleanup_session(): model_manager.delete_deployment_by_id(deployment_id) # this can take a few seconds model_manager.delete_model_by_name(model_name) model_manager.delete_job_by_id(job_id) data_manager.delete_dataset_by_id(dataset_id) data_manager.delete_dataset_schema_by_id(dataset_schema_id) print("DONE cleaning up!") if CLEANUP_SESSION: print("Cleaning up resources generated in this session.") cleanup_session() else: print("Not cleaning up. Set 'CLEANUP_SESSION = True' above and run again!") ``` ## Resources *Back to [table of contents](#Table-of-Contents)* ### SDK Resources * [SDK source code on Github](https://github.com/SAP/data-attribute-recommendation-python-sdk) * [SDK documentation](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/) * [How to obtain support](https://github.com/SAP/data-attribute-recommendation-python-sdk/blob/master/README.md#how-to-obtain-support) * [Tutorials: Classify Data Records with the SDK for Data Attribute Recommendation](https://developers.sap.com/group.cp-aibus-data-attribute-sdk.html) ### Data Attribute Recommendation * [SAP Help Portal](https://help.sap.com/viewer/product/Data_Attribute_Recommendation/SHIP/en-US) * [API Reference](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/b45cf9b24fd042d082c16191aa938c8d.html) * [Tutorials using Postman - interact with the service RESTful API directly](https://developers.sap.com/mission.cp-aibus-data-attribute.html) * [Trial Account Limits](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/c03b561eea1744c9b9892b416037b99a.html) * [Metering and Pricing](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/1e093326a2764c298759fcb92c5b0500.html) ## Addendum ### Inference without the SDK *Back to [table of contents](#Table-of-Contents)* The Data Attribute Service exposes a RESTful API. The SDK we use in this workshop uses this API to interact with the DAR service. For custom integration, you can implement your own client for the API. The tutorial "[Use Machine Learning to Classify Data Records]" is a great way to explore the Data Attribute Recommendation API with the Postman REST client. Beyond the tutorial, the [API Reference] is a comprehensive documentation of the RESTful interface. [Use Machine Learning to Classify Data Records]: https://developers.sap.com/mission.cp-aibus-data-attribute.html [API Reference]: https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/b45cf9b24fd042d082c16191aa938c8d.html To demonstrate the underlying API, the next example uses the `curl` command line tool to perform an inference request against the Inference API. The example uses the `jq` command to extract the credentials from the service. The authentication token is retrieved from the `uaa_url` and then used for the inference request. ``` # If the following example gives you errors that the jq or curl commands cannot be found, # you may be able to install them from conda by uncommenting one of the lines below: #%conda install -q jq #%conda install -q curl %%bash -s "$model_name" # Pass the python model_name variable as the first argument to shell script model_name=$1 echo "Model: $model_name" key=$(cat key.json) url=$(echo $key | jq -r .url) uaa_url=$(echo $key | jq -r .uaa.url) clientid=$(echo $key | jq -r .uaa.clientid) clientsecret=$(echo $key | jq -r .uaa.clientsecret) echo "Service URL: $url" token_url=${uaa_url}/oauth/token?grant_type=client_credentials echo "Obtaining token with clientid $clientid from $token_url" bearer_token=$(curl \ --silent --show-error \ --user $clientid:$clientsecret \ $token_url \ | jq -r .access_token ) inference_url=${url}/inference/api/v3/models/${model_name}/versions/1 echo "Running inference request against endpoint $inference_url" echo "" # We pass the token in the Authorization header. # The payload for the inference request is passed as # the body of the POST request below. # The output of the curl command is piped through `jq` # for pretty-printing curl \ --silent --show-error \ --header "Authorization: Bearer ${bearer_token}" \ --header "Content-Type: application/json" \ -XPOST \ ${inference_url} \ -d '{ "objects": [ { "features": [ { "name": "manufacturer", "value": "Energizer" }, { "name": "description", "value": "Alkaline batteries; 1.5V" }, { "name": "price", "value": "5.99" } ] } ] }' | jq ``` ### Cleaning up a service instance *Back to [table of contents](#Table-of-Contents)* To clean all data on the service instance, you can run the following snippet. The code is self-contained and does not require you to execute any of the cells above. However, you will need to have the `key.json` containing a service key in place. You will need to set `CLEANUP_EVERYTHING = True` below to execute the cleanup. **NOTE: This will delete all data on the service instance!** ``` CLEANUP_EVERYTHING = False def cleanup_everything(): import logging import sys logging.basicConfig(level=logging.INFO, stream=sys.stdout) import json import os if not os.path.exists("key.json"): msg = "key.json is not found. Please follow instructions above to create a service key of" msg += " Data Attribute Recommendation. Then, upload it into the same directory where" msg += " this notebook is saved." print(msg) raise ValueError(msg) with open("key.json") as file_handle: key = file_handle.read() SERVICE_KEY = json.loads(key) from sap.aibus.dar.client.model_manager_client import ModelManagerClient model_manager = ModelManagerClient.construct_from_service_key(SERVICE_KEY) for deployment in model_manager.read_deployment_collection()["deployments"]: model_manager.delete_deployment_by_id(deployment["id"]) for model in model_manager.read_model_collection()["models"]: model_manager.delete_model_by_name(model["name"]) for job in model_manager.read_job_collection()["jobs"]: model_manager.delete_job_by_id(job["id"]) from sap.aibus.dar.client.data_manager_client import DataManagerClient data_manager = DataManagerClient.construct_from_service_key(SERVICE_KEY) for dataset in data_manager.read_dataset_collection()["datasets"]: data_manager.delete_dataset_by_id(dataset["id"]) for dataset_schema in data_manager.read_dataset_schema_collection()["datasetSchemas"]: data_manager.delete_dataset_schema_by_id(dataset_schema["id"]) print("Cleanup done!") if CLEANUP_EVERYTHING: print("Cleaning up all resources in this service instance.") cleanup_everything() else: print("Not cleaning up. Set 'CLEANUP_EVERYTHING = True' above and run again.") ``` ### Optional Exercises *Back to [table of contents](#Table-of-Contents)* To work with the optional exercises, create a new cell in the Jupyter notebook by clicking the `+` button in the menu above or by using the `b` shortcut on your keyboard. You can then enter your code in the new cell and execute it. #### Optional Exercises for 01.2 ##### DatasetSchemas Use the [`DataManagerClient.read_dataset_schema_by_id()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.read_dataset_schema_by_id) and the [`DataManagerClient.read_dataset_schema_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.read_dataset_schema_collection) methods to list the newly created and all DatasetSchemas, respectively. ##### Datasets Use the [`DataManagerClient.read_dataset_by_id()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.read_dataset_by_id) and the [`DataManagerClient.read_dataset_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.read_dataset_collection) methods to inspect the newly created dataset. Instead of using two separate methods to upload data and wait for validation to finish, you can also use [`DataManagerClient.upload_data_and_validate()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.upload_data_and_validate). #### Optional Exercises for 01.3 ##### ModelTemplates Use the [`ModelManagerClient.read_model_template_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_model_template_collection) to list all existing model templates. ##### Jobs Use [`ModelManagerClient.read_job_by_id()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_job_by_id) and [`ModelManagerClient.read_job_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_job_collection) to inspect the job we just created. The entire process of uploading the data and starting the training is also available as a single method call in [`ModelCreator.create()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.workflow.model.ModelCreator.create). #### Optional Exercises for 01.4 ##### Deployments Use [`ModelManagerClient.read_deployment_by_id()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_deployment_by_id) and [`ModelManagerClient.read_deployment_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_deployment_collection) to inspect the Deployment. Use the [`ModelManagerclient.lookup_deployment_id_by_model_name()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.lookup_deployment_id_by_model_name) method to find the deployment ID for a given model name. ##### Inference Use the [`InferenceClient.do_bulk_inference()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.inference_client.InferenceClient.do_bulk_inference) method to process more than fifty objects at a time. Note how the data format returned changes.
github_jupyter
# Resumen Este cuaderno digital interactivo tiene como objetivo demostrar las relaciones entre las propiedades fisico-químicas de la vegetación y el espectro solar. Para ello haremos uso de modelos de simulación, en particular de modelos de transferencia radiativa tanto a nivel de hoja individual como a nivel de dosel vegetal. # Instrucciones Lee con detenimiento todo el texto, y sigue sus instrucciones. Una vez leida cada sección de texto ejecuta la celda de código siguiente (marcada como `In []`) presionando el icono de `Run`/`Ejecutar` o presionando en el teclado ALT + ENTER. Aparecerá una interfaz gráfica con la que poder realizar las tareas asignadas. Como ejemplo ejectuta la siguiente celda para importar todas las librerías necesarias para el correcto funcionamiento del cuaderno. Una vez ejecutada debería aparecer un mensaje de agradecimiento. ``` %matplotlib inline from ipywidgets import interactive, fixed from IPython.display import display from functions import prosail_and_spectra as fn ``` # Espectro de una hoja Las propiedades espectrales de una hoja (tanto su transmisividad, su reflectividad y su absortividad) dependen de su concentración de pigmentos, de su contenido de agua, su peso específico y la estructura interna de sus tejidos. Vamos a usar el modelo ProspectD, el cual es una simplificación de la realidad en la que simula el espectro mediante la concentración de clorofilas (`Cab`), carotenoides (`Car`), antocianinos (`Ant`), así como el peso de agua y por unidad de supeficie (`Cw`) y el peso del resto de la materia seca (`Cm`) que engloba las celulosas, ligninas (responsables principales de la biomasa foliar) y otros componentes proteicos. También incluye un parámetro semi-empírico que representa otros pigmentos responsables del color de las hojas senescentes y enfermas. Además con el fin de simular hojas con distintas estructuras celulares incluye un último parámetro (`Nf`) que emula las distitas capas y tejidos celulares de la hoja. ![Esquema del modelo Prospect](./input/figures/prospect.png "Representación esqueḿatica del modelo Prospect. La hoja se representa por un número de capas (N >= 1) con idénticas propiedades espectrales") > Si quieres saber más sobre el modelo ProspectD pincha en esta [publicación](./lecturas_adicionales/ProspectD_model.pdf). > > Si quieres más detalles sobre el cálculo y el código del modelo pincha [aquí](https://github.com/hectornieto/pypro4sail/blob/b111891e0a2c01b8b3fa5ff41790687d31297e5f/pypro4sail/prospect.py#L46). Ejecuta la siguiente célula y verás un espectro típico de la hoja. El gráfico muestra tanto la reflectividad (en el eje y) como la transmisividad (en el eje secundario y, con valores invertidos) y la absortividad (como el espacio entre las dos curvas de reflectividad y transmisividad) $\rho + \tau + \alpha = 1$. Presta atención a cómo y en qué regiones cambia el espectro según el parámetro que modifiques. * Haz variar la clorofila. * Haz variar el contenido de agua * Haz variar la materia seca * Haz variar los pigmentos marrones desde un valor de 0 (hoja sana) a valores mayores (hoja enferma o seca) ``` w_rho_leaf = interactive(fn.update_prospect_spectrum, N_leaf=fn.w_nleaf, Cab=fn.w_cab, Car=fn.w_car, Ant=fn.w_ant, Cbrown=fn.w_cbrown, Cw=fn.w_cw, Cm=fn.w_cm) display(w_rho_leaf) ``` Observa lo siguente: * La concentración de clorofila `Cab` afecta principalmente a la región del visible (RGB) y del *red egde* (R-E), con más absorción en la región del rojo y del azul y más reflexión en el verde. Es por ello que la mayoría de las hojas presentan color verde. * El contenido de agua `Cw` afecta principalmente a la absorción en el infrarrojo de onda corta (SWIR), con máximos de absorción en trono a los 1460 y 2100 nm. * La materia seca `Cm` afecta principalmente a la absorción en el infrarrojo cercano (NIR). * Otros pigmentos afectan en menor medida al espectro visible. Por ejemplo los antocianos `Ant` que suelen aparecer durante la senescencia desplazan el pico de reflexión del verde hacia el rojo, sobre todo cuando a su vez decrece la concentración de clorofila. * El parámetro `N` afecta a la relación entre reflectividad y transmisividad. Cuantas más *capas* tenga una hoja más fenómenos de dispersión múltiple habrá y reflejará más. > Puedes ver este fenómeno también en las ventanas con doble o triple cristal usadas como aislante, por ejemplo de los escaparates comerciales. A no ser que uno se sitúen justo de frente y cerca del escaparate, éste parece más un espejo que una ventana. # Espectro del suelo El espectro del dosel o de la supeficie vegetal no sólo depende del espectro y las propiedades de las hojas, sino que también de la propia estructura del dosel así como del suelo. En particular en doseles abiertos o poco densos, como en las primeras fases fenológicas, el comportamiento espectral del suelo puede influir de manera muy importante en la señal espectral que capten los sensores de teledetección. El espectro del suelo depende de varios factores, como son su composición mineralógica, materia orgánica, su textura y densidad así como su humedad superficial. Ejectuta la siguiente celda y mira los distintas características espectrales de distintos tipos de suelo. ``` w_rho_soil = interactive(fn.update_soil_spectrum, soil_name=fn.w_soil) display(w_rho_soil) ``` Observa lo diferente que puede ser un espectro de suelo en comparación con el de una hoja. Esto es clave a la hora de clasificar tipos de coberturas mediante teledetección así como cuantificar el vigor/densidad vegetal del cultivo. Observa que suelos más salinos (`aridisol.salorthid`) o gipsicos (`aridisol.gypsiorthd`), tienen una mayor reflectividad, sobre todo en el visible (RGB). Es decir, son más blancos que otros suelos. # Espectro del dosel Finalmente, integrando la firma espectral de una hoja y del suelo subyacente podemos obtener el espectro de un dosel vegetal. El espectro de la superficie vegetal además depende de la estructura del dosel, principalmente de la cantidad de hojas por unidad de superficie (definido como el Índice de Área Foliar) y de cómo estas hojas se orientan con respecto a la vertical. Además, dado que se produce una interacción de la luz incidente y reflejada entre el volumen de hojas y el suelo, la posición del sol y del sensor influyen en la señal espectral que obtengamos. Para esta parte cobinaremos el modelo de transferencia ProspectD para simular el espectro de una hoja con otro modelo de trasnferencia a nivel de dosel (4SAIL). Este último modelo considera la superficie vegetal como una capa horizontal y verticalmente homogéna, por lo que se recomienda cautela en su aplicación en doseles arbóreos heterogéneos. ![Esquema del modelo 4SAIL](./input/figures/4sail.png "Representación esqueḿatica del modelo 4SAIL") > Si quieres saber más sobre el modelo 4SAIL pincha en esta [publicación](./lecturas_adicionales/4SAIL_model.pdf) > > Si quieres más detalles sobre el cálculo y el código del modelo pincha [aquí](https://github.com/hectornieto/pypro4sail/blob/b111891e0a2c01b8b3fa5ff41790687d31297e5f/pypro4sail/four_sail.py#L245) Ejecuta la siguente celda y mira cómo los [espectros de hoja](#Espectro-de-una-hoja) y [suelo](#Espectro-del-suelo) que se han generado previamente se integran para obtener un espectro de la superficie vegetal. > Puedes modificar los espectros de hoja y suelo, y esta gráfica se actualizará automáticamente. ``` w_rho_canopy = interactive(fn.update_4sail_spectrum, lai=fn.w_lai, hotspot=fn.w_hotspot, leaf_angle=fn.w_leaf_angle, sza=fn.w_sza, vza=fn.w_vza, psi=fn.w_psi, skyl=fn.w_skyl, leaf_spectrum=fixed(w_rho_leaf), soil_spectrum=fixed(w_rho_soil)) display(w_rho_canopy) ``` Recuerda en la [práctica sobre la radiación neta](./ES_radiacion_neta.ipynb) que una superficie vegetal tiene ciertas propiedades anisotrópicas, lo que quiere decir que reflejará de manera distinta según la geometria de iluminación y de observación. Mira cómo cambia el espectro variando los valores del ángulo de observación cenital (VZA), ańgulo cenital del sol (SZA) y el ángulo azimutal relativo (PSI) entre el sol y el observador. Haz variar el LAI, y ponlo en cero (sin vegetación). Comprueba que el espectro que sale es directamente el espectro del suelo. Ahora incrementa ligeramente el LAI, verás como el espectro va cambiando, disminuyendo la reflectividad en el rojo y azul (debido a la clorofila de la hoja), y aumentando la reflectividad en el *red-edge* y el NIR. Recuerda también de la [práctica sobre la radiación neta](./ES_radiacion_neta.ipynb) el efecto que también tiene la disposición angular de las hojas. Con una observación al nadir (VZA=0) haz variar el ángulo típico de la hoja (`Leaf Angle`) desde un valor predominantemente horizontal (0º) a un ángulo predominantemente vertical (90º) # Sensibilidad de los parámetros En esta tarea podrás ver el comportamiento espectral de la vegetación según varían los parámetros fisico-químicos de la vegetación así como su sensibilidad a las condiciones de observación e iluminación. Para ello vamos a realizar un análisis de sensibilidad variando un sólo parámetro a la vez, mientras que el resto de los parámetros permanecerán constantes. Puedes variar los valores individuales para el resto de los parámetros individuales (también se actualizarán de las gráficas anteriores). A continuación selecciona qué parámetro quieres analizar y el rango de valores máximo y mínimo que quieras que tenga. ``` w_sensitivity = interactive(fn.prosail_sensitivity, N_leaf=fn.w_nleaf, Cab=fn.w_cab, Car=fn.w_car, Ant=fn.w_ant, Cbrown=fn.w_cbrown, Cw=fn.w_cw, Cm=fn.w_cm, lai=fn.w_lai, hotspot=fn.w_hotspot, leaf_angle=fn.w_leaf_angle, sza=fn.w_sza, vza=fn.w_vza, psi=fn.w_psi, skyl=fn.w_skyl, soil_name=fn.w_soil, var=fn.w_param, value_range=fn.w_range) display(w_sensitivity) ``` Empieza con al sensiblidad del espectro a la concentración de clorofila. Verás que la zona donde sobre todo hay variaciones es en el verde y el rojo. Observa también que en el *red-edge*, la zona de transición entre el rojo y el NIR, se produce un "desplazamiento" de la señal, este fenómento es clave y es la razón por la que los nuevos sensores (Sentinel, nuevas cámaras UAV) incluyen esta región para ayudar en la estimación de la clorofila y por tanto en la actividad fotosintética. Evalúa la sensibilidad al espectro de otros pigmentos (`Car` o `Ant`). Verás que la respuesta espectral a estos otros pigmentos es menor, lo que implica que resulta más dificil estimarlos a partir de teledetección. En cambio la variación espectral con los pigmentos marrones es bastante fuerte, como recordatorio estos pigmentos representan las variaciones cromáticas que se producen en hojas enfermas y muertas. > Esto implica que es relativamente posible detectar y cuantificar problemas sanitarios en la vegetación. Mira ahora la sensibilidad del LAI cuando su rango es pequeño (p.ej. de 0 a 2). Verás que el espectro cambia significativamente según incrementa el LAI. Ahora mira la sensibilidad cuando el LAI recorre valores mas altos (p.ej. de 2 a 4), verás que la variación en el espectro es mucho menor. Se suele decir que a valores altos de LAI el espectro tiende a "saturarse" por lo que la señal se hace menos sensible. > Es más fácil estimar el LAI con menor margen de error en cultivos con poca densidad foliar o fases fenológicas tempranas, que en cultivos o vegetación muy densa. Ahora mantén el valor fijo de LAI en un valor alto (p.ej 3) y haz variar el ángulo de observación cenital entre 0º (nadir) y una obsrvación oblicua (p.ej 35º). Verás que a pesar de haber un LAI alto, y que a priori hemos visto que ya es menos sensible, hay mayores variaciones espectrales al variar la geometría de observación. > Gracias a la anisotropía de la vegetación, las variaciones espectrales con respecto a la geometría de observación e iluminación pueden ayudar a resolver el LAI en condiciones de alta densidad. Ahora mira el peso específico de la hoja, o la cantidad de materia seca (`Cm`). Verás que según el peso específico de la hora se producen variaciones importantes en el NIR y SWIR. > La biomasa foliar puede calcularse a partir del producto entre el `LAI` y `Cm`, por lo que es viable estimar la biomasa foliar de un cultivo. Esta informaición puede ser útil por ejemplo para estimar el rendimiento final de algunos cultivos, como pueden ser los cereales. El parámetro `hotspot` es un parámetro semi-empírico relacionado con el tamaño relativo de la hoja con respecto a la altura del dosel. Afecta a cómo las hojas ensombrecen otras hojas dentro del dosel, por lo que su efecto más fuerte se observará cuando el observador (sensor) está justo en la misma posición que el sol. Para ello valores similares para VZA y SZA, y el ángulo azimutal relativo PSI en 0º. Ahora haz variar el hotstpot. Al poner el observador en la zona iluminada de la vegetación, el tamaño relativo de las hojas juega un papel importante, ya que cuanto más grandes sean estas el volumen de copa directamente iluminado será mayor. ![Efecto del hotspot](./input/figures/hotspot.png "Efecto del hotspot en la reflectividad de un dosel. Tomado de https://doi.org/10.3390/rs11192239") # La señal de un sensor Hasta ahora hemos visto el comportamiento espectral detallado de la vegetación. Sin embargo los sensores a bordo de los satélites, aeroplanos y drones no miden todo el espectro en continuo, si no que muestrean tal espectro en torno a unas bandas específicas, estratégicamente seleccionadas con el fin de intentar capturar los aspectos biofísicos más relvantes. Se denomina función de respuesta espectral a la forma en que un sensor específico integra el espectro con el fin de proporcionar la información en sus distintas bandas. Cada sensor, con cada una de sus bandas, tiene una función de respuesta espectral propia. En esta tarea veremos las respuestas espectrales de los sensores que utilizaremos más comunmente, Landsat, Sentinel-2 y Sentinel-3. También veremos el comportamiento espectral de una cámara típica que se usa con drones. Partimos de las simulaciones generadas anteriormente. Selecciona el sensor que quieras simular para ver como cada uno de los sensores "verían" esos mismos espectros. ``` w_rho_sensor = interactive(fn.sensor_sensitivity, sensor=fn.w_sensor, spectra=fixed(w_sensitivity)) display(w_rho_sensor) ``` Realiza de nuevo un análisis de sensibilidad para la clorofila y compara la respuesta espectral que daría Landsat, Sentinel-2 y una camára UAV # Derivación de parámetros de la vegetación Hasta ahora hemos visto cómo el espectro de la superficie varía con respecto a los distintos parámetros biofísicos. Sin embargo, nuestro objetivo final es el contrario, es decir, a partir de un espectro, o de unas determinadas bandas espectrales estimar una o varias variables biofísicas que nos son de interés. En particular para el objetivo del cálculo de la evapotranspiración y la eficiencia en el uso en el agua, nos puede interesar estimar el índice de área foliar y/o la fracción de radiación PAR absorbida, así como las clorofilas u otros pigmentos. Una de los métodos típicos es desarrollar relaciones empíricas entre las bandas (o entre índices de vegetación) y datos muestreados en el campo. Esto puede darnos la respuesta más fiable para nuestra parcela de estudio, pero como has podido ver anteriormente la señal espectral depende de otros muchos factores, que pueden provocar que esa relación calibrada con unos cuantos muestreos locales no sea extrapolable o aplicable a otros cultivos o regiones. Otra alternativa es desarrollar bases de datos sintéticas a partir de simulaciones. Es lo que vamos a realizar en esta tarea. Vamos a ejecutar 5000 simulaciones haciendo variar los valores de los parámetros aleatoriamente según un rango de valores que puedan ser esperado en nuestras zonas de estudio. Por ejemplo si trabajas con cultivos perennes puede que te interesa mantener un rango de LAI con valores mínimos sensiblemente superiores a cero, mientras si trabajas con cultivos anuales, el valor 0 es necesario para reflejar el desarrollo del cultivo desde su plantación, emergencia y madurez. Ya que hay una gran cantidad de parámetros y es muy probable que desconozcamos el rango plausible en la mayoría de los cultivos, no te preocupes, deja los valores por defecto y céntrate en los parámetros en los que tengas más confianza. Puedes también elegir uno o varios tipos de suelo, en función de la edafología de tu lugar. > Incluso podrías subir un espectro de suelo típico de tu zona a la carpeta [./input/soil_spectral_library](./input/soil_spectral_library). Tan sólo asegúrate que el archivo de texto tenga dos columnas, la primera con las longitudes de onda de 400 a 2500 y la segunda columna con la reflectividad correspondiente. Para actualizar la librería espectral de suelos, tendrías que ejecutar también la [primera celda](#Instrucciones). Finalmente selecciona el sensor para el que quieras genera la señal. Cuando tengas tu configurado tu entorno de simulación, pincha en el botón `Generar Simulaciones`. El sistema tardará un rato pero al cabo de unos minutos te retornará una serie de gráficos. > Es posible que recibas un mensaje de aviso, no te preocupes, en principio todo debería funcionar con normalidad. ``` w_rho_sensor = interactive(fn.build_random_simulations, {"manual": True, "manual_name": "Generar simulaciones"}, n_sim=fixed(5000), n_leaf_range=fn.w_range_nleaf, cab_range=fn.w_range_cab, car_range=fn.w_range_car, ant_range=fn.w_range_ant, cbrown_range=fn.w_range_cbrown, cw_range=fn.w_range_cw, cm_range=fn.w_range_cm, lai_range=fn.w_range_lai, hotspot_range=fn.w_range_hotspot, leaf_angle_range=fn.w_range_leaf_angle, sza=fn.w_sza, vza=fn.w_vza, psi=fn.w_psi, skyl=fn.w_skyl, soil_names=fn.w_soils, sensor=fn.w_sensor) display(w_rho_sensor) ``` El gráfico muestra 4 ejemplos de relaciones entre 3 índices de vegetación típicos y 4 variables biofísicas. * NDVI: Normalized Difference Vegetation Index. Es el índicie de vegetación más utilizado. Generalmente se relaciona con el LAI, la biomasa foliar y/o la fracción de radiación interceptada/absorbida $$NDVI = \frac{\rho_{NIR} - \rho_{R}}{\rho_{NIR} + \rho_{R}}$$ * NDRE: Normalized Difference Red-Edge. Es un índicie de vegetación que usa la región del red edge, por lo que no puede calcularse para cualquier sensor. Generalmente se relaciona con la clorofila. $$NDRE = \frac{\rho_{NIR} - \rho_{R-E}}{\rho_{NIR} + \rho_{R-E}}$$ * NDWI: Normalized Difference Water Index. Es un índicie de vegetación que usa la región del SWIR, por lo que no puede calcularse para cualquier sensor. Generalmente se relaciona con el contenido de agua de la vegetación. $$NDWI = \frac{\rho_{NIR} - \rho_{SWIR}}{\rho_{NIR} + \rho_{SWIR}}$$ Las simulaciones se han guardado en un archivo prosail_simulations.csv en la carpeta [./output](./output/prosail_simulations.csv). Descargate este archivo y calcula distintos índices de vegetación e intenta desarrollar relaciones y modelos estadísticos entre las bandas o índices de vegetación y los parámetros biofísicos. Para ello puedes usar cualquier software con el que estés habituado a trabajar (Excel, R, SPSS, ...). Puedes realizar tantas simulaciones como consideres necesarias, por ejemplo variando el sensor o modificando los rangos plausibles para cubrir distintos tipos funcionales de vegetación. Tan sólo ten en cuenta que cada vez que se genere una simulación el archivo csv se sobreesecribirá. **Por lo que descárcatelo o haz una copia en tu carpeta virtual antes de volver a ejectura las nuevas simulaciones**. # Conclusiones En esta práctica hemos visto cómo el espectro de la vegetación responde a las variables biofísicas de la superfice. * El LAI es probablemente la variable que influya más en la respuesta espectral de la vegetación. * La concentración de clorofila en la hoja influye sobre todo en la región del visible y del *red-edge*. * El contenido de agua y el peso específico de la hora influyen sobre todo a partir del NIR. * La geometría de observación e iluminación, así como la respuesta espectral del suelo, influyen también en la señal. Esto hace que sea difícil aplicar una relación universal a la hora de estimar un parámetro biofísico. * Los modelos de transferencia radiativa pueden ayudar a estimar estos parámetros. Si bien idealmente es necesario disponer de datos de campo para realizar tareas de validación y/o calibración estadística. * Los sensores muestrean una parte del espectro en torno a bandas espectrales específicas. Por tanto una relación empírica desarrollada para un sensor específico puede que no sea aplicable o válida para otro sensor.
github_jupyter
``` ls -l| tail -10 #G4 from google.colab import drive drive.mount('/content/gdrive') cp gdrive/My\ Drive/fingerspelling5.tar.bz2 fingerspelling5.tar.bz2 # rm -r surrey/ %rm -r dataset5/ # rm fingerspelling5.tar.bz2 # cd /media/datastorage/Phong/ !tar xjf fingerspelling5.tar.bz2 cd dataset5 mkdir surrey mkdir surrey/D mv dataset5/* surrey/D/ cd surrey cd .. #remove depth files import glob import os import shutil # get parts of image's path def get_image_parts(image_path): """Given a full path to an image, return its parts.""" parts = image_path.split(os.path.sep) #print(parts) filename = parts[2] filename_no_ext = filename.split('.')[0] classname = parts[1] train_or_test = parts[0] return train_or_test, classname, filename_no_ext, filename #del_folders = ['A','B','C','D','E'] move_folders_1 = ['A','B','C','E'] move_folders_2 = ['D'] # look for all images in sub-folders for folder in move_folders_1: class_folders = glob.glob(os.path.join(folder, '*')) for iid_class in class_folders: #move depth files class_files = glob.glob(os.path.join(iid_class, 'depth*.png')) print('copying %d files' %(len(class_files))) for idx in range(len(class_files)): src = class_files[idx] if "0001" not in src: train_or_test, classname, _, filename = get_image_parts(src) dst = os.path.join('train_depth', classname, train_or_test+'_'+ filename) # image directory img_directory = os.path.join('train_depth', classname) # create folder if not existed if not os.path.exists(img_directory): os.makedirs(img_directory) #copying shutil.copy(src, dst) else: print('ignor: %s' %src) #move color files for iid_class in class_folders: #move depth files class_files = glob.glob(os.path.join(iid_class, 'color*.png')) print('copying %d files' %(len(class_files))) for idx in range(len(class_files)): src = class_files[idx] train_or_test, classname, _, filename = get_image_parts(src) dst = os.path.join('train_color', classname, train_or_test+'_'+ filename) # image directory img_directory = os.path.join('train_color', classname) # create folder if not existed if not os.path.exists(img_directory): os.makedirs(img_directory) #copying shutil.copy(src, dst) # look for all images in sub-folders for folder in move_folders_2: class_folders = glob.glob(os.path.join(folder, '*')) for iid_class in class_folders: #move depth files class_files = glob.glob(os.path.join(iid_class, 'depth*.png')) print('copying %d files' %(len(class_files))) for idx in range(len(class_files)): src = class_files[idx] if "0001" not in src: train_or_test, classname, _, filename = get_image_parts(src) dst = os.path.join('test_depth', classname, train_or_test+'_'+ filename) # image directory img_directory = os.path.join('test_depth', classname) # create folder if not existed if not os.path.exists(img_directory): os.makedirs(img_directory) #copying shutil.copy(src, dst) else: print('ignor: %s' %src) #move color files for iid_class in class_folders: #move depth files class_files = glob.glob(os.path.join(iid_class, 'color*.png')) print('copying %d files' %(len(class_files))) for idx in range(len(class_files)): src = class_files[idx] train_or_test, classname, _, filename = get_image_parts(src) dst = os.path.join('test_color', classname, train_or_test+'_'+ filename) # image directory img_directory = os.path.join('test_color', classname) # create folder if not existed if not os.path.exists(img_directory): os.makedirs(img_directory) #copying shutil.copy(src, dst) # #/content %cd .. ls -l mkdir surrey/E/checkpoints cd surrey/ #MUL 1 - Inception - ST from keras.applications import MobileNet # from keras.applications import InceptionV3 # from keras.applications import Xception # from keras.applications.inception_resnet_v2 import InceptionResNetV2 # from tensorflow.keras.applications import EfficientNetB0 from keras.models import Model from keras.layers import concatenate from keras.layers import Dense, GlobalAveragePooling2D, Input, Embedding, SimpleRNN, LSTM, Flatten, GRU, Reshape # from keras.applications.inception_v3 import preprocess_input # from tensorflow.keras.applications.efficientnet import preprocess_input from keras.applications.mobilenet import preprocess_input from keras.layers import GaussianNoise def get_adv_model(): # f1_base = EfficientNetB0(include_top=False, weights='imagenet', # input_shape=(299, 299, 3), # pooling='avg') # f1_x = f1_base.output f1_base = MobileNet(weights='imagenet', include_top=False, input_shape=(224,224,3)) f1_x = f1_base.output f1_x = GlobalAveragePooling2D()(f1_x) # f1_x = f1_base.layers[-151].output #layer 5 # f1_x = GlobalAveragePooling2D()(f1_x) # f1_x = Flatten()(f1_x) # f1_x = Reshape([1,1280])(f1_x) # f1_x = SimpleRNN(2048, # return_sequences=False, # # dropout=0.8 # input_shape=[1,1280])(f1_x) #Regularization with noise f1_x = GaussianNoise(0.1)(f1_x) f1_x = Dense(1024, activation='relu')(f1_x) f1_x = Dense(24, activation='softmax')(f1_x) model_1 = Model(inputs=[f1_base.input],outputs=[f1_x]) model_1.summary() return model_1 from keras.callbacks import Callback import pickle import sys #Stop training on val_acc class EarlyStoppingByAccVal(Callback): def __init__(self, monitor='val_acc', value=0.00001, verbose=0): super(Callback, self).__init__() self.monitor = monitor self.value = value self.verbose = verbose def on_epoch_end(self, epoch, logs={}): current = logs.get(self.monitor) if current is None: warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning) if current >= self.value: if self.verbose > 0: print("Epoch %05d: early stopping" % epoch) self.model.stop_training = True #Save large model using pickle formate instead of h5 class SaveCheckPoint(Callback): def __init__(self, model, dest_folder): super(Callback, self).__init__() self.model = model self.dest_folder = dest_folder #initiate self.best_val_acc = 0 self.best_val_loss = sys.maxsize #get max value def on_epoch_end(self, epoch, logs={}): val_acc = logs['val_acc'] val_loss = logs['val_loss'] if val_acc > self.best_val_acc: self.best_val_acc = val_acc # Save weights in pickle format instead of h5 print('\nSaving val_acc %f at %s' %(self.best_val_acc, self.dest_folder)) weigh= self.model.get_weights() #now, use pickle to save your model weights, instead of .h5 #for heavy model architectures, .h5 file is unsupported. fpkl= open(self.dest_folder, 'wb') #Python 3 pickle.dump(weigh, fpkl, protocol= pickle.HIGHEST_PROTOCOL) fpkl.close() # model.save('tmp.h5') elif val_acc == self.best_val_acc: if val_loss < self.best_val_loss: self.best_val_loss=val_loss # Save weights in pickle format instead of h5 print('\nSaving val_acc %f at %s' %(self.best_val_acc, self.dest_folder)) weigh= self.model.get_weights() #now, use pickle to save your model weights, instead of .h5 #for heavy model architectures, .h5 file is unsupported. fpkl= open(self.dest_folder, 'wb') #Python 3 pickle.dump(weigh, fpkl, protocol= pickle.HIGHEST_PROTOCOL) fpkl.close() # Training import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger, ReduceLROnPlateau from keras.optimizers import Adam import time, os from math import ceil train_datagen = ImageDataGenerator( # rescale = 1./255, rotation_range=30, width_shift_range=0.3, height_shift_range=0.3, shear_range=0.3, zoom_range=0.3, # horizontal_flip=True, # vertical_flip=True,## # brightness_range=[0.5, 1.5],## channel_shift_range=10,## fill_mode='nearest', # preprocessing_function=get_cutout_v2(), preprocessing_function=preprocess_input, ) test_datagen = ImageDataGenerator( # rescale = 1./255 preprocessing_function=preprocess_input ) NUM_GPU = 1 batch_size = 64 train_set = train_datagen.flow_from_directory('surrey/D/train_color/', target_size = (224, 224), batch_size = batch_size, class_mode = 'categorical', shuffle=True, seed=7, # subset="training" ) valid_set = test_datagen.flow_from_directory('surrey/D/test_color/', target_size = (224, 224), batch_size = batch_size, class_mode = 'categorical', shuffle=False, seed=7, # subset="validation" ) model_txt = 'st' # Helper: Save the model. savedfilename = os.path.join('surrey', 'D', 'checkpoints', 'Surrey_MobileNet_D_tmp.hdf5') checkpointer = ModelCheckpoint(savedfilename, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max',save_weights_only=True)######## # Helper: TensorBoard tb = TensorBoard(log_dir=os.path.join('svhn_output', 'logs', model_txt)) # Helper: Save results. timestamp = time.time() csv_logger = CSVLogger(os.path.join('svhn_output', 'logs', model_txt + '-' + 'training-' + \ str(timestamp) + '.log')) earlystopping = EarlyStoppingByAccVal(monitor='val_accuracy', value=0.9900, verbose=1) epochs = 40##!!! lr = 1e-3 decay = lr/epochs optimizer = Adam(lr=lr, decay=decay) # train on multiple-gpus # Create a MirroredStrategy. strategy = tf.distribute.MirroredStrategy() print("Number of GPUs: {}".format(strategy.num_replicas_in_sync)) # Open a strategy scope. with strategy.scope(): # Everything that creates variables should be under the strategy scope. # In general this is only model construction & `compile()`. model_mul = get_adv_model() model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy']) step_size_train=ceil(train_set.n/train_set.batch_size) step_size_valid=ceil(valid_set.n/valid_set.batch_size) # step_size_test=ceil(testing_set.n//testing_set.batch_size) # result = model_mul.fit_generator( # generator = train_set, # steps_per_epoch = step_size_train, # validation_data = valid_set, # validation_steps = step_size_valid, # shuffle=True, # epochs=epochs, # callbacks=[checkpointer], # # callbacks=[csv_logger, checkpointer, earlystopping], # # callbacks=[tb, csv_logger, checkpointer, earlystopping], # verbose=1) # Training import tensorflow as tf from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger, ReduceLROnPlateau from keras.optimizers import Adam import time, os from math import ceil train_datagen = ImageDataGenerator( # rescale = 1./255, rotation_range=30, width_shift_range=0.3, height_shift_range=0.3, shear_range=0.3, zoom_range=0.3, # horizontal_flip=True, # vertical_flip=True,## # brightness_range=[0.5, 1.5],## channel_shift_range=10,## fill_mode='nearest', # preprocessing_function=get_cutout_v2(), preprocessing_function=preprocess_input, ) test_datagen = ImageDataGenerator( # rescale = 1./255 preprocessing_function=preprocess_input ) NUM_GPU = 1 batch_size = 64 train_set = train_datagen.flow_from_directory('surrey/D/train_color/', target_size = (224, 224), batch_size = batch_size, class_mode = 'categorical', shuffle=True, seed=7, # subset="training" ) valid_set = test_datagen.flow_from_directory('surrey/D/test_color/', target_size = (224, 224), batch_size = batch_size, class_mode = 'categorical', shuffle=False, seed=7, # subset="validation" ) model_txt = 'st' # Helper: Save the model. savedfilename = os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D.hdf5') checkpointer = ModelCheckpoint(savedfilename, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max',save_weights_only=True)######## # Helper: TensorBoard tb = TensorBoard(log_dir=os.path.join('svhn_output', 'logs', model_txt)) # Helper: Save results. timestamp = time.time() csv_logger = CSVLogger(os.path.join('svhn_output', 'logs', model_txt + '-' + 'training-' + \ str(timestamp) + '.log')) earlystopping = EarlyStoppingByAccVal(monitor='val_accuracy', value=0.9900, verbose=1) epochs = 40##!!! lr = 1e-3 decay = lr/epochs optimizer = Adam(lr=lr, decay=decay) # train on multiple-gpus # Create a MirroredStrategy. strategy = tf.distribute.MirroredStrategy() print("Number of GPUs: {}".format(strategy.num_replicas_in_sync)) # Open a strategy scope. with strategy.scope(): # Everything that creates variables should be under the strategy scope. # In general this is only model construction & `compile()`. model_mul = get_adv_model() model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy']) step_size_train=ceil(train_set.n/train_set.batch_size) step_size_valid=ceil(valid_set.n/valid_set.batch_size) # step_size_test=ceil(testing_set.n//testing_set.batch_size) result = model_mul.fit_generator( generator = train_set, steps_per_epoch = step_size_train, validation_data = valid_set, validation_steps = step_size_valid, shuffle=True, epochs=epochs, callbacks=[checkpointer], # callbacks=[csv_logger, checkpointer, earlystopping], # callbacks=[tb, csv_logger, checkpointer, earlystopping], verbose=1) ls -l # Open a strategy scope. with strategy.scope(): model_mul.load_weights(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D.hdf5')) model_mul.evaluate(valid_set) # Helper: Save the model. savedfilename = os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D_L2.hdf5') checkpointer = ModelCheckpoint(savedfilename, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max',save_weights_only=True)######## epochs = 15##!!! lr = 1e-4 decay = lr/epochs optimizer = Adam(lr=lr, decay=decay) # Open a strategy scope. with strategy.scope(): model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy']) result = model_mul.fit_generator( generator = train_set, steps_per_epoch = step_size_train, validation_data = valid_set, validation_steps = step_size_valid, shuffle=True, epochs=epochs, callbacks=[checkpointer], # callbacks=[csv_logger, checkpointer, earlystopping], # callbacks=[tb, csv_logger, checkpointer, earlystopping], verbose=1) # Open a strategy scope. with strategy.scope(): model_mul.load_weights(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D_L2.hdf5')) model_mul.evaluate(valid_set) # Helper: Save the model. savedfilename = os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D_L3.hdf5') checkpointer = ModelCheckpoint(savedfilename, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max',save_weights_only=True)######## epochs = 15##!!! lr = 1e-5 decay = lr/epochs optimizer = Adam(lr=lr, decay=decay) # Open a strategy scope. with strategy.scope(): model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy']) result = model_mul.fit_generator( generator = train_set, steps_per_epoch = step_size_train, validation_data = valid_set, validation_steps = step_size_valid, shuffle=True, epochs=epochs, callbacks=[checkpointer], # callbacks=[csv_logger, checkpointer, earlystopping], # callbacks=[tb, csv_logger, checkpointer, earlystopping], verbose=1) # Open a strategy scope. with strategy.scope(): model_mul.load_weights(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D_L3.hdf5')) model_mul.evaluate(valid_set) import numpy as np from keras.preprocessing.image import ImageDataGenerator import time, os from math import ceil # PREDICT ON OFFICIAL TEST train_datagen = ImageDataGenerator( # rescale = 1./255, rotation_range=30, width_shift_range=0.3, height_shift_range=0.3, shear_range=0.3, zoom_range=0.3, # horizontal_flip=True, # vertical_flip=True,## # brightness_range=[0.5, 1.5],## channel_shift_range=10,## fill_mode='nearest', preprocessing_function=preprocess_input, ) test_datagen1 = ImageDataGenerator( # rescale = 1./255, preprocessing_function=preprocess_input ) batch_size = 64 train_set = train_datagen.flow_from_directory('surrey/D/train_color/', target_size = (224, 224), batch_size = batch_size, class_mode = 'categorical', shuffle=True, seed=7, # subset="training" ) test_set1 = test_datagen1.flow_from_directory('surrey/D/test_color/', target_size = (224, 224), batch_size = batch_size, class_mode = 'categorical', shuffle=False, seed=7, # subset="validation" ) # if NUM_GPU != 1: predict1=model_mul.predict_generator(test_set1, steps = ceil(test_set1.n/test_set1.batch_size),verbose=1) # else: # predict1=model.predict_generator(test_set1, steps = ceil(test_set1.n/test_set1.batch_size),verbose=1) predicted_class_indices=np.argmax(predict1,axis=1) labels = (train_set.class_indices) labels = dict((v,k) for k,v in labels.items()) predictions1 = [labels[k] for k in predicted_class_indices] import pandas as pd filenames=test_set1.filenames results=pd.DataFrame({"file_name":filenames, "predicted1":predictions1, }) results.to_csv('Surrey_MobileNet_D_L3_0902.csv') results.head() np.save(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', 'npy', '5Colab_Surrey_MobileNet_D_L2_0902.hdf5'), predict1) np.save(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', 'npy', '5Colab_Surrey_MobileNet_D_L3_0902.hdf5'), predict1) from sklearn.metrics import classification_report, confusion_matrix import numpy as np test_datagen = ImageDataGenerator( preprocessing_function=preprocess_input) testing_set = test_datagen.flow_from_directory('surrey/D/test_color/', target_size = (224, 224), batch_size = 32, class_mode = 'categorical', seed=7, shuffle=False # subset="validation" ) y_pred = model_mul.predict_generator(testing_set) y_pred = np.argmax(y_pred, axis=1) y_true = testing_set.classes print(confusion_matrix(y_true, y_pred)) # print(model.evaluate_generator(testing_set, # steps = testing_set.n//testing_set.batch_size)) ```
github_jupyter
## Dependencies ``` # !pip install --quiet efficientnet !pip install --quiet image-classifiers import warnings, json, re, glob, math from scripts_step_lr_schedulers import * from melanoma_utility_scripts import * from kaggle_datasets import KaggleDatasets from sklearn.model_selection import KFold import tensorflow.keras.layers as L import tensorflow.keras.backend as K from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint from tensorflow.keras import optimizers, layers, metrics, losses, Model # import efficientnet.tfkeras as efn from classification_models.tfkeras import Classifiers import tensorflow_addons as tfa SEED = 0 seed_everything(SEED) warnings.filterwarnings("ignore") ``` ## TPU configuration ``` strategy, tpu = set_up_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync) AUTO = tf.data.experimental.AUTOTUNE ``` # Model parameters ``` config = { "HEIGHT": 256, "WIDTH": 256, "CHANNELS": 3, "BATCH_SIZE": 64, "EPOCHS": 25, "LEARNING_RATE": 3e-4, "ES_PATIENCE": 10, "N_FOLDS": 5, "N_USED_FOLDS": 5, "TTA_STEPS": 25, "BASE_MODEL": 'seresnet18', "BASE_MODEL_WEIGHTS": 'imagenet', "DATASET_PATH": 'melanoma-256x256' } with open('config.json', 'w') as json_file: json.dump(json.loads(json.dumps(config)), json_file) config ``` # Load data ``` database_base_path = '/kaggle/input/siim-isic-melanoma-classification/' k_fold = pd.read_csv(database_base_path + 'train.csv') test = pd.read_csv(database_base_path + 'test.csv') print('Train samples: %d' % len(k_fold)) display(k_fold.head()) print(f'Test samples: {len(test)}') display(test.head()) GCS_PATH = KaggleDatasets().get_gcs_path(config['DATASET_PATH']) TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test*.tfrec') ``` # Augmentations ``` def data_augment(image, label): p_spatial = tf.random.uniform([1], minval=0, maxval=1, dtype='float32') p_spatial2 = tf.random.uniform([1], minval=0, maxval=1, dtype='float32') p_rotate = tf.random.uniform([1], minval=0, maxval=1, dtype='float32') p_crop = tf.random.uniform([1], minval=0, maxval=1, dtype='float32') p_pixel = tf.random.uniform([1], minval=0, maxval=1, dtype='float32') ### Spatial-level transforms if p_spatial >= .2: # flips image['input_image'] = tf.image.random_flip_left_right(image['input_image']) image['input_image'] = tf.image.random_flip_up_down(image['input_image']) if p_spatial >= .7: image['input_image'] = tf.image.transpose(image['input_image']) if p_rotate >= .8: # rotate 270º image['input_image'] = tf.image.rot90(image['input_image'], k=3) elif p_rotate >= .6: # rotate 180º image['input_image'] = tf.image.rot90(image['input_image'], k=2) elif p_rotate >= .4: # rotate 90º image['input_image'] = tf.image.rot90(image['input_image'], k=1) if p_spatial2 >= .6: if p_spatial2 >= .9: image['input_image'] = transform_rotation(image['input_image'], config['HEIGHT'], 180.) elif p_spatial2 >= .8: image['input_image'] = transform_zoom(image['input_image'], config['HEIGHT'], 8., 8.) elif p_spatial2 >= .7: image['input_image'] = transform_shift(image['input_image'], config['HEIGHT'], 8., 8.) else: image['input_image'] = transform_shear(image['input_image'], config['HEIGHT'], 2.) if p_crop >= .6: # crops if p_crop >= .8: image['input_image'] = tf.image.random_crop(image['input_image'], size=[int(config['HEIGHT']*.8), int(config['WIDTH']*.8), config['CHANNELS']]) elif p_crop >= .7: image['input_image'] = tf.image.random_crop(image['input_image'], size=[int(config['HEIGHT']*.9), int(config['WIDTH']*.9), config['CHANNELS']]) else: image['input_image'] = tf.image.central_crop(image['input_image'], central_fraction=.8) image['input_image'] = tf.image.resize(image['input_image'], size=[config['HEIGHT'], config['WIDTH']]) if p_pixel >= .6: # Pixel-level transforms if p_pixel >= .9: image['input_image'] = tf.image.random_hue(image['input_image'], 0.01) elif p_pixel >= .8: image['input_image'] = tf.image.random_saturation(image['input_image'], 0.7, 1.3) elif p_pixel >= .7: image['input_image'] = tf.image.random_contrast(image['input_image'], 0.8, 1.2) else: image['input_image'] = tf.image.random_brightness(image['input_image'], 0.1) return image, label ``` ## Auxiliary functions ``` # Datasets utility functions def read_labeled_tfrecord(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']): example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image'], height, width, channels) label = tf.cast(example['target'], tf.float32) # meta features data = {} data['patient_id'] = tf.cast(example['patient_id'], tf.int32) data['sex'] = tf.cast(example['sex'], tf.int32) data['age_approx'] = tf.cast(example['age_approx'], tf.int32) data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32) return {'input_image': image, 'input_meta': data}, label # returns a dataset of (image, data, label) def read_labeled_tfrecord_eval(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']): example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image'], height, width, channels) label = tf.cast(example['target'], tf.float32) image_name = example['image_name'] # meta features data = {} data['patient_id'] = tf.cast(example['patient_id'], tf.int32) data['sex'] = tf.cast(example['sex'], tf.int32) data['age_approx'] = tf.cast(example['age_approx'], tf.int32) data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32) return {'input_image': image, 'input_meta': data}, label, image_name # returns a dataset of (image, data, label, image_name) def load_dataset(filenames, ordered=False, buffer_size=-1): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False # disable order, increase speed dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files dataset = dataset.with_options(ignore_order) # uses data as soon as it streams in, rather than in its original order dataset = dataset.map(read_labeled_tfrecord, num_parallel_calls=buffer_size) return dataset # returns a dataset of (image, data, label) def load_dataset_eval(filenames, buffer_size=-1): dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files dataset = dataset.map(read_labeled_tfrecord_eval, num_parallel_calls=buffer_size) return dataset # returns a dataset of (image, data, label, image_name) def get_training_dataset(filenames, batch_size, buffer_size=-1): dataset = load_dataset(filenames, ordered=False, buffer_size=buffer_size) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() # the training dataset must repeat for several epochs dataset = dataset.shuffle(2048) dataset = dataset.batch(batch_size, drop_remainder=True) # slighly faster with fixed tensor sizes dataset = dataset.prefetch(buffer_size) # prefetch next batch while training (autotune prefetch buffer size) return dataset def get_validation_dataset(filenames, ordered=True, repeated=False, batch_size=32, buffer_size=-1): dataset = load_dataset(filenames, ordered=ordered, buffer_size=buffer_size) if repeated: dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(batch_size, drop_remainder=repeated) dataset = dataset.prefetch(buffer_size) return dataset def get_eval_dataset(filenames, batch_size=32, buffer_size=-1): dataset = load_dataset_eval(filenames, buffer_size=buffer_size) dataset = dataset.batch(batch_size, drop_remainder=False) dataset = dataset.prefetch(buffer_size) return dataset # Test function def read_unlabeled_tfrecord(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']): example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image'], height, width, channels) image_name = example['image_name'] # meta features data = {} data['patient_id'] = tf.cast(example['patient_id'], tf.int32) data['sex'] = tf.cast(example['sex'], tf.int32) data['age_approx'] = tf.cast(example['age_approx'], tf.int32) data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32) return {'input_image': image, 'input_tabular': data}, image_name # returns a dataset of (image, data, image_name) def load_dataset_test(filenames, buffer_size=-1): dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files dataset = dataset.map(read_unlabeled_tfrecord, num_parallel_calls=buffer_size) # returns a dataset of (image, data, label, image_name) pairs if labeled=True or (image, data, image_name) pairs if labeled=False return dataset def get_test_dataset(filenames, batch_size=32, buffer_size=-1, tta=False): dataset = load_dataset_test(filenames, buffer_size=buffer_size) if tta: dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.batch(batch_size, drop_remainder=False) dataset = dataset.prefetch(buffer_size) return dataset # Advanced augmentations def transform_rotation(image, height, rotation): # input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3] # output - image randomly rotated DIM = height XDIM = DIM%2 #fix for size 331 rotation = rotation * tf.random.normal([1],dtype='float32') # CONVERT DEGREES TO RADIANS rotation = math.pi * rotation / 180. # ROTATION MATRIX c1 = tf.math.cos(rotation) s1 = tf.math.sin(rotation) one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') rotation_matrix = tf.reshape( tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3] ) # LIST DESTINATION PIXEL INDICES x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM ) y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] ) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack( [x,y,z] ) # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS idx2 = K.dot(rotation_matrix,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) # FIND ORIGIN PIXEL VALUES idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] ) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]) def transform_shear(image, height, shear): # input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3] # output - image randomly sheared DIM = height XDIM = DIM%2 #fix for size 331 shear = shear * tf.random.normal([1],dtype='float32') shear = math.pi * shear / 180. # SHEAR MATRIX one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') c2 = tf.math.cos(shear) s2 = tf.math.sin(shear) shear_matrix = tf.reshape( tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3] ) # LIST DESTINATION PIXEL INDICES x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM ) y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] ) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack( [x,y,z] ) # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS idx2 = K.dot(shear_matrix,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) # FIND ORIGIN PIXEL VALUES idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] ) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]) def transform_shift(image, height, h_shift, w_shift): # input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3] # output - image randomly shifted DIM = height XDIM = DIM%2 #fix for size 331 height_shift = h_shift * tf.random.normal([1],dtype='float32') width_shift = w_shift * tf.random.normal([1],dtype='float32') one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') # SHIFT MATRIX shift_matrix = tf.reshape( tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3] ) # LIST DESTINATION PIXEL INDICES x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM ) y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] ) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack( [x,y,z] ) # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS idx2 = K.dot(shift_matrix,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) # FIND ORIGIN PIXEL VALUES idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] ) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]) def transform_zoom(image, height, h_zoom, w_zoom): # input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3] # output - image randomly zoomed DIM = height XDIM = DIM%2 #fix for size 331 height_zoom = 1.0 + tf.random.normal([1],dtype='float32')/h_zoom width_zoom = 1.0 + tf.random.normal([1],dtype='float32')/w_zoom one = tf.constant([1],dtype='float32') zero = tf.constant([0],dtype='float32') # ZOOM MATRIX zoom_matrix = tf.reshape( tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3] ) # LIST DESTINATION PIXEL INDICES x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM ) y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] ) z = tf.ones([DIM*DIM],dtype='int32') idx = tf.stack( [x,y,z] ) # ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS idx2 = K.dot(zoom_matrix,tf.cast(idx,dtype='float32')) idx2 = K.cast(idx2,dtype='int32') idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2) # FIND ORIGIN PIXEL VALUES idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] ) d = tf.gather_nd(image, tf.transpose(idx3)) return tf.reshape(d,[DIM,DIM,3]) ``` ## Learning rate scheduler ``` lr_min = 1e-6 # lr_start = 0 lr_max = config['LEARNING_RATE'] steps_per_epoch = 24844 // config['BATCH_SIZE'] total_steps = config['EPOCHS'] * steps_per_epoch warmup_steps = steps_per_epoch * 5 # hold_max_steps = 0 # step_decay = .8 # step_size = steps_per_epoch * 1 # rng = [i for i in range(0, total_steps, 32)] # y = [step_schedule_with_warmup(tf.cast(x, tf.float32), step_size=step_size, # warmup_steps=warmup_steps, hold_max_steps=hold_max_steps, # lr_start=lr_start, lr_max=lr_max, step_decay=step_decay) for x in rng] # sns.set(style="whitegrid") # fig, ax = plt.subplots(figsize=(20, 6)) # plt.plot(rng, y) # print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) ``` # Model ``` # Initial bias pos = len(k_fold[k_fold['target'] == 1]) neg = len(k_fold[k_fold['target'] == 0]) initial_bias = np.log([pos/neg]) print('Bias') print(pos) print(neg) print(initial_bias) # class weights total = len(k_fold) weight_for_0 = (1 / neg)*(total)/2.0 weight_for_1 = (1 / pos)*(total)/2.0 class_weight = {0: weight_for_0, 1: weight_for_1} print('Class weight') print(class_weight) def model_fn(input_shape): input_image = L.Input(shape=input_shape, name='input_image') BaseModel, preprocess_input = Classifiers.get(config['BASE_MODEL']) base_model = BaseModel(input_shape=input_shape, weights=config['BASE_MODEL_WEIGHTS'], include_top=False) x = base_model(input_image) x = L.GlobalAveragePooling2D()(x) output = L.Dense(1, activation='sigmoid', name='output', bias_initializer=tf.keras.initializers.Constant(initial_bias))(x) model = Model(inputs=input_image, outputs=output) return model ``` # Training ``` # Evaluation eval_dataset = get_eval_dataset(TRAINING_FILENAMES, batch_size=config['BATCH_SIZE'], buffer_size=AUTO) image_names = next(iter(eval_dataset.unbatch().map(lambda data, label, image_name: image_name).batch(count_data_items(TRAINING_FILENAMES)))).numpy().astype('U') image_data = eval_dataset.map(lambda data, label, image_name: data) # Resample dataframe k_fold = k_fold[k_fold['image_name'].isin(image_names)] # Test NUM_TEST_IMAGES = len(test) test_preds = np.zeros((NUM_TEST_IMAGES, 1)) test_preds_last = np.zeros((NUM_TEST_IMAGES, 1)) test_dataset = get_test_dataset(TEST_FILENAMES, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, tta=True) image_names_test = next(iter(test_dataset.unbatch().map(lambda data, image_name: image_name).batch(NUM_TEST_IMAGES))).numpy().astype('U') test_image_data = test_dataset.map(lambda data, image_name: data) history_list = [] k_fold_best = k_fold.copy() kfold = KFold(config['N_FOLDS'], shuffle=True, random_state=SEED) for n_fold, (trn_idx, val_idx) in enumerate(kfold.split(TRAINING_FILENAMES)): if n_fold < config['N_USED_FOLDS']: n_fold +=1 print('\nFOLD: %d' % (n_fold)) # tf.tpu.experimental.initialize_tpu_system(tpu) K.clear_session() ### Data train_filenames = np.array(TRAINING_FILENAMES)[trn_idx] valid_filenames = np.array(TRAINING_FILENAMES)[val_idx] steps_per_epoch = count_data_items(train_filenames) // config['BATCH_SIZE'] # Train model model_path = f'model_fold_{n_fold}.h5' es = EarlyStopping(monitor='val_auc', mode='max', patience=config['ES_PATIENCE'], restore_best_weights=False, verbose=1) checkpoint = ModelCheckpoint(model_path, monitor='val_auc', mode='max', save_best_only=True, save_weights_only=True) with strategy.scope(): model = model_fn((config['HEIGHT'], config['WIDTH'], config['CHANNELS'])) optimizer = tfa.optimizers.RectifiedAdam(lr=lr_max, total_steps=total_steps, warmup_proportion=(warmup_steps / total_steps), min_lr=lr_min) model.compile(optimizer, loss=losses.BinaryCrossentropy(label_smoothing=0.05), metrics=[metrics.AUC()]) history = model.fit(get_training_dataset(train_filenames, batch_size=config['BATCH_SIZE'], buffer_size=AUTO), validation_data=get_validation_dataset(valid_filenames, ordered=True, repeated=False, batch_size=config['BATCH_SIZE'], buffer_size=AUTO), epochs=config['EPOCHS'], steps_per_epoch=steps_per_epoch, callbacks=[checkpoint, es], class_weight=class_weight, verbose=2).history # save last epoch weights model.save_weights('last_' + model_path) history_list.append(history) # Get validation IDs valid_dataset = get_eval_dataset(valid_filenames, batch_size=config['BATCH_SIZE'], buffer_size=AUTO) valid_image_names = next(iter(valid_dataset.unbatch().map(lambda data, label, image_name: image_name).batch(count_data_items(valid_filenames)))).numpy().astype('U') k_fold[f'fold_{n_fold}'] = k_fold.apply(lambda x: 'validation' if x['image_name'] in valid_image_names else 'train', axis=1) k_fold_best[f'fold_{n_fold}'] = k_fold_best.apply(lambda x: 'validation' if x['image_name'] in valid_image_names else 'train', axis=1) ##### Last model ##### print('Last model evaluation...') preds = model.predict(image_data) name_preds_eval = dict(zip(image_names, preds.reshape(len(preds)))) k_fold[f'pred_fold_{n_fold}'] = k_fold.apply(lambda x: name_preds_eval[x['image_name']], axis=1) print(f'Last model inference (TTA {config["TTA_STEPS"]} steps)...') for step in range(config['TTA_STEPS']): test_preds_last += model.predict(test_image_data) ##### Best model ##### print('Best model evaluation...') model.load_weights(model_path) preds = model.predict(image_data) name_preds_eval = dict(zip(image_names, preds.reshape(len(preds)))) k_fold_best[f'pred_fold_{n_fold}'] = k_fold_best.apply(lambda x: name_preds_eval[x['image_name']], axis=1) print(f'Best model inference (TTA {config["TTA_STEPS"]} steps)...') for step in range(config['TTA_STEPS']): test_preds += model.predict(test_image_data) # normalize preds test_preds /= (config['N_USED_FOLDS'] * config['TTA_STEPS']) test_preds_last /= (config['N_USED_FOLDS'] * config['TTA_STEPS']) name_preds = dict(zip(image_names_test, test_preds.reshape(NUM_TEST_IMAGES))) name_preds_last = dict(zip(image_names_test, test_preds_last.reshape(NUM_TEST_IMAGES))) test['target'] = test.apply(lambda x: name_preds[x['image_name']], axis=1) test['target_last'] = test.apply(lambda x: name_preds_last[x['image_name']], axis=1) ``` ## Model loss graph ``` for n_fold in range(config['N_USED_FOLDS']): print(f'Fold: {n_fold + 1}') plot_metrics(history_list[n_fold]) ``` ## Model loss graph aggregated ``` plot_metrics_agg(history_list, config['N_USED_FOLDS']) ``` # Model evaluation (best) ``` display(evaluate_model(k_fold_best, config['N_USED_FOLDS']).style.applymap(color_map)) display(evaluate_model_Subset(k_fold_best, config['N_USED_FOLDS']).style.applymap(color_map)) ``` # Model evaluation (last) ``` display(evaluate_model(k_fold, config['N_USED_FOLDS']).style.applymap(color_map)) display(evaluate_model_Subset(k_fold, config['N_USED_FOLDS']).style.applymap(color_map)) ``` # Confusion matrix ``` for n_fold in range(config['N_USED_FOLDS']): n_fold += 1 pred_col = f'pred_fold_{n_fold}' train_set = k_fold_best[k_fold_best[f'fold_{n_fold}'] == 'train'] valid_set = k_fold_best[k_fold_best[f'fold_{n_fold}'] == 'validation'] print(f'Fold: {n_fold}') plot_confusion_matrix(train_set['target'], np.round(train_set[pred_col]), valid_set['target'], np.round(valid_set[pred_col])) ``` # Visualize predictions ``` k_fold['pred'] = 0 for n_fold in range(config['N_USED_FOLDS']): k_fold['pred'] += k_fold[f'pred_fold_{n_fold+1}'] / config['N_FOLDS'] print('Label/prediction distribution') print(f"Train positive labels: {len(k_fold[k_fold['target'] > .5])}") print(f"Train positive predictions: {len(k_fold[k_fold['pred'] > .5])}") print(f"Train positive correct predictions: {len(k_fold[(k_fold['target'] > .5) & (k_fold['pred'] > .5)])}") print('Top 10 samples') display(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis', 'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].head(10)) print('Top 10 positive samples') display(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis', 'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].query('target == 1').head(10)) print('Top 10 predicted positive samples') display(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis', 'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].query('pred > .5').head(10)) ``` # Visualize test predictions ``` print(f"Test predictions {len(test[test['target'] > .5])}|{len(test[test['target'] <= .5])}") print(f"Test predictions (last) {len(test[test['target_last'] > .5])}|{len(test[test['target_last'] <= .5])}") print('Top 10 samples') display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target', 'target_last'] + [c for c in test.columns if (c.startswith('pred_fold'))]].head(10)) print('Top 10 positive samples') display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target', 'target_last'] + [c for c in test.columns if (c.startswith('pred_fold'))]].query('target > .5').head(10)) print('Top 10 positive samples (last)') display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target', 'target_last'] + [c for c in test.columns if (c.startswith('pred_fold'))]].query('target_last > .5').head(10)) ``` # Test set predictions ``` submission = pd.read_csv(database_base_path + 'sample_submission.csv') submission['target'] = test['target'] submission['target_last'] = test['target_last'] submission['target_blend'] = (test['target'] * .5) + (test['target_last'] * .5) display(submission.head(10)) display(submission.describe()) ### BEST ### submission[['image_name', 'target']].to_csv('submission.csv', index=False) ### LAST ### submission_last = submission[['image_name', 'target_last']] submission_last.columns = ['image_name', 'target'] submission_last.to_csv('submission_last.csv', index=False) ### BLEND ### submission_blend = submission[['image_name', 'target_blend']] submission_blend.columns = ['image_name', 'target'] submission_blend.to_csv('submission_blend.csv', index=False) ```
github_jupyter
``` import pandas as pd import matplotlib.pyplot as plt import folium from folium.plugins import MarkerCluster %matplotlib inline australia=pd.read_csv("https://frenzy86.s3.eu-west-2.amazonaws.com/fav/australia_cleaned.csv") australia.head() plt.figure(figsize=(18,12)) plt.hist(australia["confidence"],label="Sicurezza Incendi",color="red"); plt.xlabel("Livello di sicurezza degli incendi") plt.ylabel("Numero di incendi") plt.title("Grafico numero incendi e Livello di sicurezza") plt.legend(loc=2); plt.figure(figsize=(18,12)) plt.scatter(australia["confidence"],australia ["brightness"], label ="Sicurezza Incendi", color="orange"); plt.ylabel("Luminosità a 21 Kelvin") plt.xlabel('Livello di sicurezza degli incendi') plt.title("Grafico Livello di sicurezza incendi e la luminosità 21 Kelvin") plt.legend(loc=2); plt.figure(figsize=(18,12)) plt.scatter(australia["confidence"],australia ["bright_t31"], label ="Sicurezza Incendi", color="yellow"); plt.ylabel("Luminosità a 31 Kelvin") plt.xlabel('Livello di sicurezza degli incendi') plt.title("Grafico Livello di sicurezza incendi e la luminosità 31 Kelvin") plt.legend(loc=2); pd.crosstab(australia["sat_Terra"], australia["time_N"]).plot(kind="bar",figsize=(20,10)); plt.title("Rapporto tra gli incendi raccolti dal satellite terrestre in notturni e diurni") plt.ylabel("N° di incendi riconosciuti dai satelliti") plt.xlabel("Tipo di fuoco, notturno o diurno"); australia_1 =australia.copy() australia_1.head() data=australia_1[(australia_1["confidence"]>= 70)] data.head() data.shape #Creare lista longitudine e latitudine lat=data["latitude"].values.tolist() long=data["longitude"].values.tolist() #Mappa Australia map1=folium.Map([-25.274398,133.775136],zoom_start=4) #Creare un cluster di mappa australia_cluster = MarkerCluster() for latV,longV in zip(lat,long): folium.Marker(location=[latV,longV]).add_to(australia_cluster) #Aggiungere il cluster alla mappa che vogliamo stampare australia_cluster.add_to(map1); map1 localizacion=australia_1[(australia_1["frp"]>= 2500)] localizacion.head() map_2 = folium.Map([-25.274398,133.775136],zoom_start=4.5,tiles='Stamen Terrain') lat_2 = localizacion["latitude"].values.tolist() long_2 = localizacion["longitude"].values.tolist() australia_cluster_2 = MarkerCluster().add_to(map_2) for lat_2,long_2 in zip(lat_2,long_2): folium.Marker([lat_2,long_2]).add_to(australia_cluster_2) map_2 ``` Vuoi conoscere gli incendi divampati dopo il 15 settembre 2019? ``` mes = australia_1[(australia_1["acq_date"]>= "2019-09-15")] mes.head() mes.describe() map_sett = folium.Map([-25.274398,133.775136], zoom_start=4) lat_3 = mes["latitude"].values.tolist() long_3 = mes["longitude"].values.tolist() australia_cluster_3 = MarkerCluster().add_to(map_sett) for lat_3,long_3 in zip(lat_3,long_3): folium.Marker([lat_3,long_3]).add_to(australia_cluster_3) map_sett ``` #Play with Folium ``` 44.4807035,11.3712528 import folium m1 = folium.Map(location=[44.48, 11.37], tiles='openstreetmap', zoom_start=18) m1.save('map1.html') m1 m3.save("filename.png") ```
github_jupyter
``` import numpy as np import tensorflow.compat.v1 as tf tf.disable_v2_behavior() import tensorflow_probability as tfp # -- plotting import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['text.usetex'] = True mpl.rcParams['font.family'] = 'serif' mpl.rcParams['axes.linewidth'] = 1.5 mpl.rcParams['axes.xmargin'] = 1 mpl.rcParams['xtick.labelsize'] = 'x-large' mpl.rcParams['xtick.major.size'] = 5 mpl.rcParams['xtick.major.width'] = 1.5 mpl.rcParams['ytick.labelsize'] = 'x-large' mpl.rcParams['ytick.major.size'] = 5 mpl.rcParams['ytick.major.width'] = 1.5 mpl.rcParams['legend.frameon'] = False #import tensorflow as tf """ Gumbel Softmax functions borrowed from http://blog.evjang.com/2016/11/tutorial-categorical-variational.html """ def sample_gumbel(shape, eps=1e-7): """Sample from Gumbel(0, 1)""" U = tf.random_uniform(shape,minval=0,maxval=1) return -tf.log(-tf.log(U + eps) + eps) def gumbel_softmax_sample(logits, temperature): """ Draw a sample from the Gumbel-Softmax distribution""" y = logits + sample_gumbel(tf.shape(logits)) return tf.nn.softmax( y / temperature) def gumbel_softmax(logits, temperature, hard=False): """Sample from the Gumbel-Softmax distribution and optionally discretize. Args: logits: [batch_size, n_class] unnormalized log-probs temperature: non-negative scalar hard: if True, take argmax, but differentiate w.r.t. soft sample y Returns: [batch_size,..., n_class] sample from the Gumbel-Softmax distribution. If hard=True, then the returned sample will be one-hot, otherwise it will be a probabilitiy distribution that sums to 1 across classes """ y = gumbel_softmax_sample(logits, temperature) if hard: k = tf.shape(logits)[-1] #y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype) y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,-1,keep_dims=True)),y.dtype) y = tf.stop_gradient(y_hard - y) + y return y d = gumbel_softmax(np.array([np.log(0.5), np.log(0.5)] ), 0.5, hard=True) tfp.__version__ sess = tf.Session() _Mmin = tf.get_variable(name='mass', initializer=13.2, dtype=tf.float32) Mhalo = tf.convert_to_tensor(np.random.uniform(11., 14., 1000), dtype=tf.float32) siglogm = tf.convert_to_tensor(0.2, dtype=tf.float32) temperature = 0.5 def Ncen(Mmin): # mean occupation of centrals return tf.clip_by_value(0.5 * (1+tf.math.erf((Mhalo - Mmin)/siglogm)),1e-4,1-1e-4) def hod(Mmin): p = Ncen(Mmin) samp = gumbel_softmax(tf.stack([tf.log(p), tf.log(1.-p)],axis=1), temperature, hard=True) return samp[...,0] def numden(Mmin): return tf.reduce_sum(hod(Mmin)) ncen,mh,nh = sess.run([Ncen(12.5), Mhalo, hod(12.5)] ) plt.scatter(mh, (ncen)) plt.xlim(11, 13.5) plt.scatter(mh, (ncen), c='k') plt.scatter(mh, nh) plt.xlim(11., 13.5) Mmin_true = 12.5 loss = (numden(Mmin_true) - numden(_Mmin))**2 opt = tf.train.AdamOptimizer(learning_rate=0.01) opt_op = opt.minimize(loss) sess.run(tf.global_variables_initializer()) losses=[] masses=[] for i in range(200): _,l,m = sess.run([opt_op, loss, _Mmin]) losses.append(l) masses.append(m) losses %pylab inline plot(losses) plot(masses) axhline(Mmin_true, color='r', label='True Mmin') xlim(0,200) xlabel('Number of iterations') ylabel('Mmin') legend() ```
github_jupyter
``` import keras from keras.models import Sequential, Model, load_model from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda from keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, BatchNormalization, LocallyConnected2D, Permute from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback from keras import regularizers from keras import backend as K import keras.losses import tensorflow as tf #tf.compat.v1.enable_eager_execution() from tensorflow.python.framework import ops import isolearn.keras as iso import numpy as np import tensorflow as tf import logging logging.getLogger('tensorflow').setLevel(logging.ERROR) import pandas as pd import os import pickle import numpy as np import random import scipy.sparse as sp import scipy.io as spio import matplotlib.pyplot as plt import isolearn.io as isoio import isolearn.keras as isol from genesis.visualization import * from genesis.generator import * from genesis.predictor import * from genesis.optimizer import * from definitions.generator.aparent_deconv_conv_generator_concat_trainmode import load_generator_network from definitions.predictor.aparent import load_saved_predictor import sklearn from sklearn.decomposition import PCA from sklearn.manifold import TSNE from scipy.stats import pearsonr import seaborn as sns from matplotlib import colors from scipy.stats import norm from genesis.vae import * def set_seed(seed_value) : # 1. Set the `PYTHONHASHSEED` environment variable at a fixed value os.environ['PYTHONHASHSEED']=str(seed_value) # 2. Set the `python` built-in pseudo-random generator at a fixed value random.seed(seed_value) # 3. Set the `numpy` pseudo-random generator at a fixed value np.random.seed(seed_value) # 4. Set the `tensorflow` pseudo-random generator at a fixed value tf.set_random_seed(seed_value) # 5. Configure a new global `tensorflow` session session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) K.set_session(sess) def load_data(data_name, valid_set_size=0.05, test_set_size=0.05, batch_size=32) : #Load cached dataframe cached_dict = pickle.load(open(data_name, 'rb')) plasmid_df = cached_dict['plasmid_df'] plasmid_cuts = cached_dict['plasmid_cuts'] #print("len(plasmid_df) = " + str(len(plasmid_df)) + " (loaded)") #Generate training and test set indexes plasmid_index = np.arange(len(plasmid_df), dtype=np.int) plasmid_train_index = plasmid_index[:-int(len(plasmid_df) * (valid_set_size + test_set_size))] plasmid_valid_index = plasmid_index[plasmid_train_index.shape[0]:-int(len(plasmid_df) * test_set_size)] plasmid_test_index = plasmid_index[plasmid_train_index.shape[0] + plasmid_valid_index.shape[0]:] #print('Training set size = ' + str(plasmid_train_index.shape[0])) #print('Validation set size = ' + str(plasmid_valid_index.shape[0])) #print('Test set size = ' + str(plasmid_test_index.shape[0])) data_gens = { gen_id : iso.DataGenerator( idx, {'df' : plasmid_df}, batch_size=batch_size, inputs = [ { 'id' : 'seq', 'source_type' : 'dataframe', 'source' : 'df', 'extractor' : lambda row, index: row['padded_seq'][180 + 40: 180 + 40 + 81] + "G" * (128-81), 'encoder' : iso.OneHotEncoder(seq_length=128), 'dim' : (1, 128, 4), 'sparsify' : False } ], outputs = [ { 'id' : 'dummy_output', 'source_type' : 'zeros', 'dim' : (1,), 'sparsify' : False } ], randomizers = [], shuffle = True if gen_id == 'train' else False ) for gen_id, idx in [('all', plasmid_index), ('train', plasmid_train_index), ('valid', plasmid_valid_index), ('test', plasmid_test_index)] } x_train = np.concatenate([data_gens['train'][i][0][0] for i in range(len(data_gens['train']))], axis=0) x_test = np.concatenate([data_gens['test'][i][0][0] for i in range(len(data_gens['test']))], axis=0) return x_train, x_test #Specfiy problem-specific parameters experiment_suffix = '_strong_vae_very_high_kl_epoch_35_margin_pos_2_lower_fitness' vae_model_prefix = "vae/saved_models/vae_apa_max_isoform_doubledope_strong_cano_pas_len_128_50_epochs_very_high_kl" vae_model_suffix = "_epoch_35"#""# #VAE model path saved_vae_encoder_model_path = vae_model_prefix + "_encoder" + vae_model_suffix + ".h5" saved_vae_decoder_model_path = vae_model_prefix + "_decoder" + vae_model_suffix + ".h5" #Padding for the VAE vae_upstream_padding = '' vae_downstream_padding = 'G' * 47 #VAE sequence template vae_sequence_template = 'ATCCANNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCC' + 'G' * (128 - 81) #VAE latent dim vae_latent_dim = 100 #Oracle predictor model path saved_predictor_model_path = '../../../aparent/saved_models/aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5' #Subtring indices for VAE vae_pwm_start = 40 vae_pwm_end = 121 #VAE parameter collection vae_params = [ saved_vae_encoder_model_path, saved_vae_decoder_model_path, vae_upstream_padding, vae_downstream_padding, vae_latent_dim, vae_pwm_start, vae_pwm_end ] #Load data set vae_data_path = "vae/apa_doubledope_cached_set_strong_short_cano_pas.pickle" _, x_test = load_data(vae_data_path, valid_set_size=0.005, test_set_size=0.095) #Evaluate ELBO distribution on test set #Load VAE models vae_encoder_model = load_model(saved_vae_encoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization}) vae_decoder_model = load_model(saved_vae_decoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization}) #Compute multi-sample ELBO on test set log_mean_p_vae_test, mean_log_p_vae_test, log_p_vae_test = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_test, n_samples=128) print("mean log(likelihood) = " + str(mean_log_p_vae_test)) #Log Likelihood Plot plot_min_val = None plot_max_val = None f = plt.figure(figsize=(6, 4)) log_p_vae_test_hist, log_p_vae_test_edges = np.histogram(log_mean_p_vae_test, bins=50, density=True) bin_width_test = log_p_vae_test_edges[1] - log_p_vae_test_edges[0] plt.bar(log_p_vae_test_edges[1:] - bin_width_test/2., log_p_vae_test_hist, width=bin_width_test, linewidth=2, edgecolor='black', color='orange') plt.xticks(fontsize=14) plt.yticks(fontsize=14) if plot_min_val is not None and plot_max_val is not None : plt.xlim(plot_min_val, plot_max_val) plt.xlabel("VAE Log Likelihood", fontsize=14) plt.ylabel("Data Density", fontsize=14) plt.axvline(x=mean_log_p_vae_test, linewidth=2, color='red', linestyle="--") plt.tight_layout() plt.show() #Evaluate ELBO distribution on test set (training-level no. of samples) #Load VAE models vae_encoder_model = load_model(saved_vae_encoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization}) vae_decoder_model = load_model(saved_vae_decoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization}) #Compute multi-sample ELBO on test set log_mean_p_vae_test, mean_log_p_vae_test, log_p_vae_test = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_test, n_samples=32) print("mean log(likelihood) = " + str(mean_log_p_vae_test)) #Log Likelihood Plot plot_min_val = None plot_max_val = None f = plt.figure(figsize=(6, 4)) log_p_vae_test_hist, log_p_vae_test_edges = np.histogram(log_mean_p_vae_test, bins=50, density=True) bin_width_test = log_p_vae_test_edges[1] - log_p_vae_test_edges[0] plt.bar(log_p_vae_test_edges[1:] - bin_width_test/2., log_p_vae_test_hist, width=bin_width_test, linewidth=2, edgecolor='black', color='orange') plt.xticks(fontsize=14) plt.yticks(fontsize=14) if plot_min_val is not None and plot_max_val is not None : plt.xlim(plot_min_val, plot_max_val) plt.xlabel("VAE Log Likelihood", fontsize=14) plt.ylabel("Data Density", fontsize=14) plt.axvline(x=mean_log_p_vae_test, linewidth=2, color='red', linestyle="--") plt.tight_layout() plt.show() #Define target isoform loss function def get_isoform_loss(target_isos, fitness_weight=2., batch_size=32, n_samples=1, n_z_samples=1, mini_batch_size=1, seq_length=205, vae_loss_mode='bound', vae_divergence_weight=1., ref_vae_log_p=-10, vae_log_p_margin=1, decoded_pwm_epsilon=10**-6, pwm_start=0, pwm_end=70, pwm_target_bits=1.8, vae_pwm_start=0, entropy_weight=0.0, entropy_loss_mode='margin', similarity_weight=0.0, similarity_margin=0.5) : target_iso = np.zeros((len(target_isos), 1)) for i, t_iso in enumerate(target_isos) : target_iso[i, 0] = t_iso masked_entropy_mse = get_target_entropy_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end, target_bits=pwm_target_bits) if entropy_loss_mode == 'margin' : masked_entropy_mse = get_margin_entropy_ame_masked(pwm_start=pwm_start, pwm_end=pwm_end, min_bits=pwm_target_bits) pwm_sample_entropy_func = get_pwm_margin_sample_entropy_masked(pwm_start=pwm_start, pwm_end=pwm_end, margin=similarity_margin, shift_1_nt=True) def loss_func(loss_tensors) : _, _, _, sequence_class, pwm_logits_1, pwm_logits_2, pwm_1, pwm_2, sampled_pwm_1, sampled_pwm_2, mask, sampled_mask, iso_pred, cut_pred, iso_score_pred, cut_score_pred, vae_pwm_1, vae_sampled_pwm_1, z_mean_1, z_log_var_1, z_1, decoded_pwm_1 = loss_tensors #Create target isoform with sample axis iso_targets = K.constant(target_iso) iso_true = K.gather(iso_targets, sequence_class[:, 0]) iso_true = K.tile(K.expand_dims(iso_true, axis=-1), (1, K.shape(sampled_pwm_1)[1], 1)) #Re-create iso_pred from cut_pred #iso_pred = K.expand_dims(K.sum(cut_pred[..., 76:76+35], axis=-1), axis=-1) #Specify costs iso_loss = fitness_weight * K.mean(symmetric_sigmoid_kl_divergence(iso_true, iso_pred), axis=1) #Construct VAE sequence inputs decoded_pwm_1 = K.clip(decoded_pwm_1, decoded_pwm_epsilon, 1. - decoded_pwm_epsilon) log_p_x_given_z_1 = K.sum(K.sum(vae_sampled_pwm_1[:, :, :, pwm_start-vae_pwm_start:pwm_end-vae_pwm_start, ...] * K.log(K.stop_gradient(decoded_pwm_1[:, :, :, pwm_start-vae_pwm_start:pwm_end-vae_pwm_start, ...])) / K.log(K.constant(10.)), axis=(-1, -2)), axis=-1) log_p_std_normal_1 = K.sum(normal_log_prob(z_1, 0., 1.) / K.log(K.constant(10.)), axis=-1) log_p_importance_1 = K.sum(normal_log_prob(z_1, z_mean_1, K.sqrt(K.exp(z_log_var_1))) / K.log(K.constant(10.)), axis=-1) log_p_vae_1 = log_p_x_given_z_1 + log_p_std_normal_1 - log_p_importance_1 log_p_vae_div_n_1 = log_p_vae_1 - K.log(K.constant(n_z_samples, dtype='float32')) / K.log(K.constant(10.)) #Calculate mean ELBO across samples (log-sum-exp trick) max_log_p_vae_1 = K.max(log_p_vae_div_n_1, axis=-1) log_mean_p_vae_1 = max_log_p_vae_1 + K.log(K.sum(10**(log_p_vae_div_n_1 - K.expand_dims(max_log_p_vae_1, axis=-1)), axis=-1)) / K.log(K.constant(10.)) #Specify VAE divergence loss function vae_divergence_loss = 0. if vae_loss_mode == 'bound' : vae_divergence_loss = vae_divergence_weight * K.mean(K.switch(log_mean_p_vae_1 < ref_vae_log_p - vae_log_p_margin, -log_mean_p_vae_1 + (ref_vae_log_p - vae_log_p_margin), K.zeros_like(log_mean_p_vae_1)), axis=1) elif vae_loss_mode == 'penalty' : vae_divergence_loss = vae_divergence_weight * K.mean(-log_mean_p_vae_1, axis=1) elif vae_loss_mode == 'target' : vae_divergence_loss = vae_divergence_weight * K.mean((log_mean_p_vae_1 - (ref_vae_log_p - vae_log_p_margin))**2, axis=1) elif 'mini_batch_' in vae_loss_mode : mini_batch_log_mean_p_vae_1 = K.permute_dimensions(K.reshape(log_mean_p_vae_1, (int(batch_size / mini_batch_size), mini_batch_size, n_samples)), (0, 2, 1)) mini_batch_mean_log_p_vae_1 = K.mean(mini_batch_log_mean_p_vae_1, axis=-1) tiled_mini_batch_mean_log_p_vae_1 = K.tile(mini_batch_mean_log_p_vae_1, (mini_batch_size, 1)) if vae_loss_mode == 'mini_batch_bound' : vae_divergence_loss = vae_divergence_weight * K.mean(K.switch(tiled_mini_batch_mean_log_p_vae_1 < ref_vae_log_p - vae_log_p_margin, -tiled_mini_batch_mean_log_p_vae_1 + (ref_vae_log_p - vae_log_p_margin), K.zeros_like(tiled_mini_batch_mean_log_p_vae_1)), axis=1) elif vae_loss_mode == 'mini_batch_target' : vae_divergence_loss = vae_divergence_weight * K.mean((tiled_mini_batch_mean_log_p_vae_1 - (ref_vae_log_p - vae_log_p_margin))**2, axis=1) entropy_loss = entropy_weight * masked_entropy_mse(pwm_1, mask) entropy_loss += similarity_weight * K.mean(pwm_sample_entropy_func(sampled_pwm_1, sampled_pwm_2, sampled_mask), axis=1) #Compute total loss total_loss = iso_loss + entropy_loss + vae_divergence_loss return total_loss return loss_func class EpochVariableCallback(Callback): def __init__(self, my_variable, my_func): self.my_variable = my_variable self.my_func = my_func def on_epoch_end(self, epoch, logs={}): K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch)) #Function for running GENESIS def run_genesis(sequence_templates, loss_func, library_contexts, model_path, batch_size=32, n_samples=1, n_z_samples=1, vae_params=None, n_epochs=10, steps_per_epoch=100) : #Build Generator Network _, generator = build_generator(batch_size, len(sequence_templates[0]), load_generator_network, n_classes=len(sequence_templates), n_samples=n_samples, sequence_templates=sequence_templates, batch_normalize_pwm=False) #Build Predictor Network and hook it on the generator PWM output tensor _, sample_predictor = build_predictor(generator, load_saved_predictor(model_path, library_contexts=library_contexts), batch_size, n_samples=n_samples, eval_mode='sample') #Build VAE model vae_tensors = [] if vae_params is not None : encoder_model_path, decoder_model_path, vae_upstream_padding, vae_downstream_padding, vae_latent_dim, vae_pwm_start, vae_pwm_end = vae_params vae_tensors = build_vae(generator, encoder_model_path, decoder_model_path, batch_size=batch_size, seq_length=len(sequence_templates[0]), n_samples=n_samples, n_z_samples=n_z_samples, vae_latent_dim=vae_latent_dim, vae_upstream_padding=vae_upstream_padding, vae_downstream_padding=vae_downstream_padding, vae_pwm_start=vae_pwm_start, vae_pwm_end=vae_pwm_end) #Build Loss Model (In: Generator seed, Out: Loss function) _, loss_model = build_loss_model(sample_predictor, loss_func, extra_loss_tensors=vae_tensors) #Specify Optimizer to use opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999) #Compile Loss Model (Minimize self) loss_model.compile(loss=lambda true, pred: pred, optimizer=opt) #Fit Loss Model train_history = loss_model.fit( [], np.ones((1, 1)), epochs=n_epochs, steps_per_epoch=steps_per_epoch ) return generator, sample_predictor, train_history #Maximize isoform proportion sequence_templates = [ 'CTTCCGATCTCTCGCTCTTTCTATGGCATTCATTACTCGCATCCANNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCCAATTAAGCCTGTCGTCGTGGGTGTCGAAAATGAAATAAAACAAGTCAATTGCGTAGTTTATTCAGACGTACCCCGTGGACCTAC' ] library_contexts = [ 'doubledope' ] margin_similarities = [ 0.5 ] #Generate new random seed print(np.random.randint(low=0, high=1000000)) #Train APA Cleavage GENESIS Network print("Training GENESIS") #Number of PWMs to generate per objective batch_size = 64 mini_batch_size = 8 #Number of One-hot sequences to sample from the PWM at each grad step n_samples = 1 #Number of VAE latent vector samples at each grad step n_z_samples = 32#128#32 #Number of epochs per objective to optimize n_epochs = 50#10#5#25 #Number of steps (grad updates) per epoch steps_per_epoch = 50 seed = 104590 for class_i in range(len(sequence_templates)) : lib_name = library_contexts[class_i].split("_")[0] print("Library context = " + str(lib_name)) K.clear_session() set_seed(seed) loss = get_isoform_loss( [1.0], fitness_weight=0.1,#0.5, batch_size=batch_size, n_samples=n_samples, n_z_samples=n_z_samples, mini_batch_size=mini_batch_size, seq_length=len(sequence_templates[0]), vae_loss_mode='mini_batch_bound',#'target', vae_divergence_weight=40.0 * 1./71.,#5.0 * 1./71.,#0.5 * 1./71., ref_vae_log_p=-38.807, vae_log_p_margin=2.0, #decoded_pwm_epsilon=0.05, pwm_start=vae_pwm_start + 5, pwm_end=vae_pwm_start + 5 + 71, vae_pwm_start=vae_pwm_start, pwm_target_bits=1.8, entropy_weight=0.5,#0.01, entropy_loss_mode='margin', similarity_weight=5.0,#0.5,#5.0, similarity_margin=margin_similarities[class_i] ) genesis_generator, genesis_predictor, train_history = run_genesis([sequence_templates[class_i]], loss, [library_contexts[class_i]], saved_predictor_model_path, batch_size, n_samples, n_z_samples, vae_params, n_epochs, steps_per_epoch) genesis_generator.get_layer('lambda_rand_sequence_class').function = lambda inp: inp genesis_generator.get_layer('lambda_rand_input_1').function = lambda inp: inp genesis_generator.get_layer('lambda_rand_input_2').function = lambda inp: inp genesis_predictor.get_layer('lambda_rand_sequence_class').function = lambda inp: inp genesis_predictor.get_layer('lambda_rand_input_1').function = lambda inp: inp genesis_predictor.get_layer('lambda_rand_input_2').function = lambda inp: inp # Save model and weights save_dir = 'saved_models' if not os.path.isdir(save_dir): os.makedirs(save_dir) model_name = 'genesis_apa_max_isoform_' + str(lib_name) + experiment_suffix + '_vae_kl_generator.h5' model_path = os.path.join(save_dir, model_name) genesis_generator.save(model_path) print('Saved trained model at %s ' % model_path) model_name = 'genesis_apa_max_isoform_' + str(lib_name) + experiment_suffix + '_vae_kl_predictor.h5' model_path = os.path.join(save_dir, model_name) genesis_predictor.save(model_path) print('Saved trained model at %s ' % model_path) #Load GENESIS models and predict sample sequences lib_name = library_contexts[0].split("_")[0] batch_size = 64 model_names = [ 'genesis_apa_max_isoform_' + str(lib_name) + experiment_suffix + '_vae_kl', ] sequence_templates = [ 'CTTCCGATCTCTCGCTCTTTCTATGGCATTCATTACTCGCATCCANNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCCAATTAAGCCTGTCGTCGTGGGTGTCGAAAATGAAATAAAACAAGTCAATTGCGTAGTTTATTCAGACGTACCCCGTGGACCTAC' ] for class_i in range(len(sequence_templates)-1, 0-1, -1) : save_dir = os.path.join(os.getcwd(), 'saved_models') model_name = model_names[class_i] + '_predictor.h5' model_path = os.path.join(save_dir, model_name) predictor = load_model(model_path, custom_objects={'st_sampled_softmax': st_sampled_softmax, 'st_hardmax_softmax': st_hardmax_softmax}) n = batch_size sequence_class = np.array([0] * n).reshape(-1, 1) #np.random.uniform(-6, 6, (n, 1)) # noise_1 = np.random.uniform(-1, 1, (n, 100)) noise_2 = np.random.uniform(-1, 1, (n, 100)) pred_outputs = predictor.predict([sequence_class, noise_1, noise_2], batch_size=batch_size) _, _, _, optimized_pwm, _, _, _, _, _, iso_pred, cut_pred, _, _ = pred_outputs #Plot one PWM sequence logo per optimized objective (Experiment 'Punish A-runs') for pwm_index in range(10) : sequence_template = sequence_templates[class_i] pwm = np.expand_dims(optimized_pwm[pwm_index, :, :, 0], axis=0) cut = np.expand_dims(cut_pred[pwm_index, 0, :], axis=0) iso = np.expand_dims(np.sum(cut[:, 80: 115], axis=-1), axis=-1) plot_seqprop_logo(pwm, iso, cut, annotate_peaks='max', sequence_template=sequence_template, figsize=(12, 1.5), width_ratios=[1, 8], logo_height=0.8, usage_unit='fraction', plot_start=70-50, plot_end=76+50, save_figs=False, fig_name='genesis_apa_max_isoform_' + str(lib_name) + experiment_suffix + "_pwm_index_" + str(pwm_index), fig_dpi=150) ```
github_jupyter
# 工厂规划 等级:中级 ## 目的和先决条件 此模型和Factory Planning II都是生产计划问题的示例。在生产计划问题中,必须选择要生产哪些产品,要生产多少产品以及要使用哪些资源,以在满足一系列限制的同时最大化利润或最小化成本。这些问题在广泛的制造环境中都很常见。 ### What You Will Learn 在此特定示例中,我们将建模并解决生产组合问题:在每个阶段中,我们可以制造一系列产品。每种产品在不同的机器上生产需要不同的时间,并产生不同的利润。目的是创建最佳的多周期生产计划,以使利润最大化。由于维护,某些机器在特定时期内不可用。由于市场限制,每个产品每个月的销售量都有上限,并且存储容量也受到限制。 In Factory Planning II, we’ll add more complexity to this example; the month in which each machine is down for maintenance will be chosen as a part of the optimized plan. More information on this type of model can be found in example # 3 of the fifth edition of Modeling Building in Mathematical Programming by H. P. Williams on pages 255-256 and 300-302. This modeling example is at the intermediate level, where we assume that you know Python and are familiar with the Gurobi Python API. In addition, you should have some knowledge about building mathematical optimization models. **Note:** You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=CommercialDataScience) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=Github&utm_medium=website_JupyterME&utm_campaign=AcademicDataScience) as an *academic user*. --- ## Problem Description A factory makes seven products (Prod 1 to Prod 7) using a range of machines including: - Four grinders - Two vertical drills - Three horizontal drills - One borer - One planer Each product has a defined profit contribution per unit sold (defined as the sales price per unit minus the cost of raw materials). In addition, the manufacturing of each product requires a certain amount of time on each machine (in hours). The contribution and manufacturing time value are shown below. A dash indicates that the manufacturing process for the given product does not require that machine. | <i></i> | PROD1 | PROD2 | PROD3 | PROD4 | PROD5 | PROD6 | PROD7 | | --- | --- | --- | --- | --- | --- | --- | --- | | Profit | 10 | 6 | 8 | 4 | 11 | 9 | 3 | | Grinding | 0.5 | 0.7 | - | - | 0.3 | 0.2 | 0.5 | | Vertical Drilling | 0.1 | 0.2 | - | 0.3 | - | 0.6 | - | | Horizontal Drilling | 0.2 | - | 0.8 | - | - | - | 0.6 | | Boring | 0.05 | 0.03 | - | 0.07 | 0.1 | - | 0.08 | | Planning | - | - | 0.01 | - | 0.05 | - | 0.05 | In each of the six months covered by this model, one or more of the machines is scheduled to be down for maintenance and as a result will not be available to use for production that month. The maintenance schedule is as follows: | Month | Machine | | --- | --- | | January | One grinder | | February | Two horizontal drills | | March | One borer | | April | One vertical drill | | May | One grinder and one vertical drill | | June | One horizontal drill | There are limitations on how many of each product can be sold in a given month. These limits are shown below: | Month | PROD1 | PROD2 | PROD3 | PROD4 | PROD5 | PROD6 | PROD7 | | --- | --- | --- | --- | --- | --- | --- | --- | | January | 500 | 1000 | 300 | 300 | 800 | 200 | 100 | | February | 600 | 500 | 200 | 0 | 400 | 300 | 150 | | March | 300 | 600 | 0 | 0 | 500 | 400 | 100 | | April | 200 | 300 | 400 | 500 | 200 | 0 | 100 | | May | 0 | 100 | 500 | 100 | 1000 | 300 | 0 | | June | 500 | 500 | 100 | 300 | 1100 | 500 | 60 | Up to 100 units of each product may be stored in inventory at a cost of $\$0.50$ per unit per month. At the start of January, there is no product inventory. However, by the end of June, there should be 50 units of each product in inventory. The factory produces products six days a week using two eight-hour shifts per day. It may be assumed that each month consists of 24 working days. Also, for the purposes of this model, there are no production sequencing issues that need to be taken into account. What should the production plan look like? Also, is it possible to recommend any price increases and determine the value of acquiring any new machines? This problem is based on a larger model built for the Cornish engineering company of Holman Brothers. --- ## Model Formulation ### Sets and Indices $t \in \text{Months}=\{\text{Jan},\text{Feb},\text{Mar},\text{Apr},\text{May},\text{Jun}\}$: Set of months. $p \in \text{Products}=\{1,2,\dots,7\}$: Set of products. $m \in \text{Machines}=\{\text{Grinder},\text{VertDrill},\text{horiDrill},\text{Borer},\text{Planer}\}$: Set of machines. ### Parameters $\text{hours_per_month} \in \mathbb{R}^+$: Time (in hours/month) available at any machine on a monthly basis. It results from multiplying the number of working days (24 days) by the number of shifts per day (2) by the duration of a shift (8 hours). $\text{max_inventory} \in \mathbb{N}$: Maximum number of units of a single product type that can be stored in inventory at any given month. $\text{holding_cost} \in \mathbb{R}^+$: Monthly cost (in USD/unit/month) of keeping in inventory a unit of any product type. $\text{store_target} \in \mathbb{N}$: Number of units of each product type to keep in inventory at the end of the planning horizon. $\text{profit}_p \in \mathbb{R}^+$: Profit (in USD/unit) of product $p$. $\text{installed}_m \in \mathbb{N}$: Number of machines of type $m$ installed in the factory. $\text{down}_{t,m} \in \mathbb{N}$: Number of machines of type $m$ scheduled for maintenance at month $t$. $\text{time_req}_{m,p} \in \mathbb{R}^+$: Time (in hours/unit) needed on machine $m$ to manufacture one unit of product $p$. $\text{max_sales}_{t,p} \in \mathbb{N}$: Maximum number of units of product $p$ that can be sold at month $t$. ### Decision Variables $\text{make}_{t,p} \in \mathbb{R}^+$: Number of units of product $p$ to manufacture at month $t$. $\text{store}_{t,p} \in [0, \text{max_inventory}] \subset \mathbb{R}^+$: Number of units of product $p$ to store at month $t$. $\text{sell}_{t,p} \in [0, \text{max_sales}_{t,p}] \subset \mathbb{R}^+$: Number of units of product $p$ to sell at month $t$. **Assumption:** We can produce fractional units. ### Objective Function - **Profit:** Maximize the total profit (in USD) of the planning horizon. \begin{equation} \text{Maximize} \quad Z = \sum_{t \in \text{Months}}\sum_{p \in \text{Products}} (\text{profit}_p*\text{make}_{t,p} - \text{holding_cost}*\text{store}_{t,p}) \tag{0} \end{equation} ### Constraints - **Initial Balance:** For each product $p$, the number of units produced should be equal to the number of units sold plus the number stored (in units of product). \begin{equation} \text{make}_{\text{Jan},p} = \text{sell}_{\text{Jan},p} + \text{store}_{\text{Jan},p} \quad \forall p \in \text{Products} \tag{1} \end{equation} - **Balance:** For each product $p$, the number of units produced in month $t$ and the ones previously stored should be equal to the number of units sold and stored in that month (in units of product). \begin{equation} \text{store}_{t-1,p} + \text{make}_{t,p} = \text{sell}_{t,p} + \text{store}_{t,p} \quad \forall (t,p) \in \text{Months} \setminus \{\text{Jan}\} \times \text{Products} \tag{2} \end{equation} - **Inventory Target:** The number of units of product $p$ kept in inventory at the end of the planning horizon should hit the target (in units of product). \begin{equation} \text{store}_{\text{Jun},p} = \text{store_target} \quad \forall p \in \text{Products} \tag{3} \end{equation} - **Machine Capacity:** Total time used to manufacture any product at machine type $m$ cannot exceed its monthly capacity (in hours). \begin{equation} \sum_{p \in \text{Products}}\text{time_req}_{m,p}*\text{make}_{t,p} \leq \text{hours_per_month}*(\text{installed}_m - \text{down}_{t,m}) \quad \forall (t,m) \in \text{Months} \times \text{Machines} \tag{4} \end{equation} --- ## Python Implementation We import the Gurobi Python Module and other Python libraries. ``` import gurobipy as gp import numpy as np import pandas as pd from gurobipy import GRB # tested with Python 3.7.0 & Gurobi 9.0 ``` ### Input Data We define all the input data of the model. ``` # Parameters products = ["Prod1", "Prod2", "Prod3", "Prod4", "Prod5", "Prod6", "Prod7"] machines = ["grinder", "vertDrill", "horiDrill", "borer", "planer"] months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun"] profit = {"Prod1":10, "Prod2":6, "Prod3":8, "Prod4":4, "Prod5":11, "Prod6":9, "Prod7":3} time_req = { "grinder": { "Prod1": 0.5, "Prod2": 0.7, "Prod5": 0.3, "Prod6": 0.2, "Prod7": 0.5 }, "vertDrill": { "Prod1": 0.1, "Prod2": 0.2, "Prod4": 0.3, "Prod6": 0.6 }, "horiDrill": { "Prod1": 0.2, "Prod3": 0.8, "Prod7": 0.6 }, "borer": { "Prod1": 0.05,"Prod2": 0.03,"Prod4": 0.07, "Prod5": 0.1, "Prod7": 0.08 }, "planer": { "Prod3": 0.01,"Prod5": 0.05,"Prod7": 0.05 } } # number of machines down down = {("Jan","grinder"): 1, ("Feb", "horiDrill"): 2, ("Mar", "borer"): 1, ("Apr", "vertDrill"): 1, ("May", "grinder"): 1, ("May", "vertDrill"): 1, ("Jun", "planer"): 1, ("Jun", "horiDrill"): 1} # number of each machine available installed = {"grinder":4, "vertDrill":2, "horiDrill":3, "borer":1, "planer":1} # market limitation of sells max_sales = { ("Jan", "Prod1") : 500, ("Jan", "Prod2") : 1000, ("Jan", "Prod3") : 300, ("Jan", "Prod4") : 300, ("Jan", "Prod5") : 800, ("Jan", "Prod6") : 200, ("Jan", "Prod7") : 100, ("Feb", "Prod1") : 600, ("Feb", "Prod2") : 500, ("Feb", "Prod3") : 200, ("Feb", "Prod4") : 0, ("Feb", "Prod5") : 400, ("Feb", "Prod6") : 300, ("Feb", "Prod7") : 150, ("Mar", "Prod1") : 300, ("Mar", "Prod2") : 600, ("Mar", "Prod3") : 0, ("Mar", "Prod4") : 0, ("Mar", "Prod5") : 500, ("Mar", "Prod6") : 400, ("Mar", "Prod7") : 100, ("Apr", "Prod1") : 200, ("Apr", "Prod2") : 300, ("Apr", "Prod3") : 400, ("Apr", "Prod4") : 500, ("Apr", "Prod5") : 200, ("Apr", "Prod6") : 0, ("Apr", "Prod7") : 100, ("May", "Prod1") : 0, ("May", "Prod2") : 100, ("May", "Prod3") : 500, ("May", "Prod4") : 100, ("May", "Prod5") : 1000, ("May", "Prod6") : 300, ("May", "Prod7") : 0, ("Jun", "Prod1") : 500, ("Jun", "Prod2") : 500, ("Jun", "Prod3") : 100, ("Jun", "Prod4") : 300, ("Jun", "Prod5") : 1100, ("Jun", "Prod6") : 500, ("Jun", "Prod7") : 60, } holding_cost = 0.5 max_inventory = 100 store_target = 50 hours_per_month = 2*8*24 ``` ## Model Deployment We create a model and the variables. For each product (seven kinds of products) and each time period (month), we will create variables for the amount of which products get manufactured, held, and sold. In each month, there is an upper limit on the amount of each product that can be sold. This is due to market limitations. ``` factory = gp.Model('Factory Planning I') make = factory.addVars(months, products, name="Make") # quantity manufactured store = factory.addVars(months, products, ub=max_inventory, name="Store") # quantity stored sell = factory.addVars(months, products, ub=max_sales, name="Sell") # quantity sold ``` Next, we insert the constraints. The balance constraints ensure that the amount of product that is in storage in the prior month plus the amount that gets manufactured equals the amount that is sold and held for each product in the current month. This ensures that all products in the model are manufactured in some month. The initial storage is empty. ``` #1. Initial Balance Balance0 = factory.addConstrs((make[months[0], product] == sell[months[0], product] + store[months[0], product] for product in products), name="Initial_Balance") #2. Balance Balance = factory.addConstrs((store[months[months.index(month) -1], product] + make[month, product] == sell[month, product] + store[month, product] for product in products for month in months if month != months[0]), name="Balance") ``` The Inventory Target constraints force that at the end of the last month the storage contains the specified amount of each product. ``` #3. Inventory Target TargetInv = factory.addConstrs((store[months[-1], product] == store_target for product in products), name="End_Balance") ``` The capacity constraints ensure that, for each month, the time all products require on a certain kind of machine is less than or equal to the available hours for that type of machine in that month multiplied by the number of available machines in that period. Each product requires some machine hours on different machines. Each machine is down in one or more months due to maintenance, so the number and type of available machines varies per month. There can be multiple machines per machine type. ``` #4. Machine Capacity MachineCap = factory.addConstrs((gp.quicksum(time_req[machine][product] * make[month, product] for product in time_req[machine]) <= hours_per_month * (installed[machine] - down.get((month, machine), 0)) for machine in machines for month in months), name = "Capacity") ``` The objective is to maximize the profit of the company, which consists of the profit for each product minus the cost for storing the unsold products. This can be stated as: ``` #0. Objective Function obj = gp.quicksum(profit[product] * sell[month, product] - holding_cost * store[month, product] for month in months for product in products) factory.setObjective(obj, GRB.MAXIMIZE) ``` Next, we start the optimization and Gurobi finds the optimal solution. ``` factory.optimize() ``` --- ## Analysis The result of the optimization model shows that the maximum profit we can achieve is $\$93,715.18$. Let's see the solution that achieves that optimal result. ### Production Plan This plan determines the amount of each product to make at each period of the planning horizon. For example, in February we make 700 units of product Prod1. ``` rows = months.copy() columns = products.copy() make_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for month, product in make.keys(): if (abs(make[month, product].x) > 1e-6): make_plan.loc[month, product] = np.round(make[month, product].x, 1) make_plan ``` ### Sales Plan This plan defines the amount of each product to sell at each period of the planning horizon. For example, in February we sell 600 units of product Prod1. ``` rows = months.copy() columns = products.copy() sell_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for month, product in sell.keys(): if (abs(sell[month, product].x) > 1e-6): sell_plan.loc[month, product] = np.round(sell[month, product].x, 1) sell_plan ``` ### Inventory Plan This plan reflects the amount of product in inventory at the end of each period of the planning horizon. For example, at the end of February we have 100 units of Prod1 in inventory. ``` rows = months.copy() columns = products.copy() store_plan = pd.DataFrame(columns=columns, index=rows, data=0.0) for month, product in store.keys(): if (abs(store[month, product].x) > 1e-6): store_plan.loc[month, product] = np.round(store[month, product].x, 1) store_plan ``` **Note:** If you want to write your solution to a file, rather than print it to the terminal, you can use the model.write() command. An example implementation is: `factory.write("factory-planning-1-output.sol")` --- ## References H. Paul Williams, Model Building in Mathematical Programming, fifth edition. Copyright &copy; 2020 Gurobi Optimization, LLC
github_jupyter
``` import pandas as pd import matplotlib.pyplot as plt import datetime as dt import seaborn as sns import numpy as np import matplotlib.dates as mdates import datetime #sns.set(color_codes=True) import matplotlib as mpl mpl.rcParams['pdf.fonttype'] = 42 import statistics as st sns.set_style('whitegrid', {'axes.linewidth' : 0.5}) from statsmodels.distributions.empirical_distribution import ECDF import scipy import gc from helpers import * today_str = dt.datetime.today().strftime('%y%m%d') def curve(startx, starty, endx, endy): x1 = np.linspace(0,(endx-startx),100) x2 = x1+startx x = x1/(endx-startx) y = (endy-starty)*(6*x**5-15*x**4+10*x**3)+starty y = (endy-starty)*(-20*x**7+70*x**6-84*x**5+35*x**4)+starty return x2, y curative = pd.read_csv('~/Box/covid_CDPH/2021.07.06 Master Set Data Only_Deidentified.csv', encoding= 'unicode_escape') curative['patient_symptom_date'] = pd.to_datetime(curative['patient_symptom_date'], errors='coerce') curative['collection_time'] = pd.to_datetime(curative['collection_time'], errors='coerce') curative['days'] = (pd.to_datetime(curative['collection_time'], utc=True) - pd.to_datetime(curative['patient_symptom_date'], utc=True)).dt.days idph = pd.read_csv('~/Box/covid_IDPH/sentinel_surveillance/210706_SS_epic.csv', encoding= 'unicode_escape') idph['test_date'] = pd.to_datetime(idph['test_date']) idph['test_time'] = pd.to_datetime(idph['test_time']) idph['date_symptoms_start'] = pd.to_datetime(idph['date_symptoms_start']) idph['days'] = (idph['test_date'] - idph['date_symptoms_start']).dt.days ss_cond = (idph['days'] <= 4) & (idph['days'] >= 0) pos_cond = (idph['result'] == 'DETECTED') | (idph['result'] == 'POSITIVE') | (idph['result'] == 'Detected') chi_cond = (idph['test_site_city'] == 'CHICAGO') zips = pd.read_csv('./data/Chicago_ZIP_codes.txt', header=None)[0].values idph['chicago'] = idph['pat_zip_code'].apply(lambda x: zip_in_zips(x, zips)) curative['chicago'] = curative['patient_city'] == 'Chicago' curative_time_frame_cond = (curative['collection_time'] >= pd.to_datetime('9-27-20')) & (curative['collection_time'] <= pd.to_datetime('6-13-21')) curative_ss = (curative['days'] >= 0) & (curative['days'] <= 4) curative_symptom = curative['patient_is_symptomatic'] idph_time_frame_cond = (idph['test_date'] >= pd.to_datetime('9-27-20')) & (idph['test_date'] <= pd.to_datetime('6-13-21')) idph_ss = (idph['days'] >= 0) & (idph['days'] <= 4) idph_symptom = idph['symptomatic_per_cdc'] == 'Yes' idph_chicago_site = (idph['test_site'] == 'IDPH COMMUNITY TESTING AUBURN GRESHAM') | (idph['test_site'] == 'IDPH AUBURN GRESHAM COMMUNITY TESTING') | (idph['test_site'] == 'IDPH HARWOOD HEIGHTS COMMUNITY TESTING') idph_count = np.sum(idph_time_frame_cond & idph_ss & idph['chicago'] & idph_chicago_site) curative_count = np.sum(curative_time_frame_cond & curative_ss & curative['chicago']) pos_cond_curative = curative['test_result'] == 'POSITIVE' curative['positive'] = pos_cond_curative chi_idph = (idph['test_site_city'] == 'Chicago') | (idph['test_site_city'] == 'CHICAGO') pos_cond_idph = (idph['result'] == 'DETECTED') | (idph['result'] == 'POSITIVE') | (idph['result'] == 'Detected') idph['positive'] = pos_cond_idph print(idph_count) print(curative_count) print('Tests collected at sentinel sites in study period: ') sentinel_sites_total = len(curative[curative_time_frame_cond]) + len(idph[idph_time_frame_cond & idph_chicago_site]) print(sentinel_sites_total) print('with Chicago residence: ') chicago_residents = len(curative[curative_time_frame_cond & curative['chicago']]) + \ len(idph[idph_time_frame_cond & idph_chicago_site & idph['chicago']]) print(chicago_residents) print('with valid symptom date: ') with_symptom_date = len(curative[curative_time_frame_cond & curative['chicago']].dropna(subset=['days'])) + \ len(idph[idph_time_frame_cond & idph_chicago_site & idph['chicago']].dropna(subset=['days'])) print(with_symptom_date) print('symptom date 4 or fewer days before test: ') tot_ss = len(curative[curative_time_frame_cond & curative['chicago'] & curative_ss].dropna(subset=['days'])) + \ len(idph[idph_time_frame_cond & idph_chicago_site & idph['chicago'] & idph_ss].dropna(subset=['days'])) print(tot_ss) print('and positive: ') tot_sc = len(curative[curative_time_frame_cond & curative['chicago'] & curative_ss & pos_cond_curative].dropna(subset=['days'])) + \ len(idph[idph_time_frame_cond & idph_chicago_site & idph['chicago'] & idph_ss & pos_cond_idph].dropna(subset=['days'])) print(tot_sc) h = 10 w = 8 fig = plt.figure(figsize=(w, h)) figh = h-0 figw = w-0 ax = fig.add_axes([0,0,figw/w,figh/h]) stop_location = np.arange(0,5,1) line_width = 0.05 #ax.set_xlim([-0.05,1.05]) h_padding = 0.15 v_padding = 0.2 line_width = 0.2 line_height = 4.5 midpoint = (v_padding + line_height)/2 tot_height = sentinel_sites_total ax.fill_between([stop_location[0], stop_location[0]+line_width], [midpoint+line_height/2]*2, [midpoint-line_height/2]*2, color='gold', zorder=15) #ax.text(x=stop_location[0]+line_width/1.75, # y=midpoint, s="specimens collected at sentinel sites in study period n = " + "{:,}".format(sentinel_sites_total), # ha='center', va='center', # rotation=90, zorder=16, color='k', fontsize=14) splits = [chicago_residents, with_symptom_date, tot_ss, tot_sc] d = tot_height splits_array = np.array(splits)/d d_t = 1 d_ts = d d_top = midpoint+line_height/2 d_bot = midpoint-line_height/2 d_x = stop_location[0] # midpoint = figh/2 include_color_array = ['gold']*(len(splits)-1) + ['blue'] exclude_color_array = ['crimson']*(len(splits)-1) + ['blue'] for s, l_l, s1, include_color, exclude_color in zip(splits_array, stop_location[1:], splits, include_color_array, exclude_color_array): t_line = line_height*d_t + v_padding ax.fill_between([l_l, l_l+line_width], [midpoint+t_line/2]*2, [midpoint+t_line/2-line_height*s]*2, color=include_color, zorder=13) ax.fill_between([l_l, l_l+line_width], [midpoint-t_line/2]*2, [midpoint-t_line/2+line_height*(d_t-s)]*2, color=exclude_color) a1 = curve(d_x+line_width, d_bot, l_l, midpoint-t_line/2) a2 = curve(d_x+line_width, d_bot+line_height*(d_t-s), l_l, midpoint-t_line/2+line_height*(d_t-s)) ax.fill_between(a1[0], a1[1], a2[1], color=exclude_color, alpha=0.25, linewidth=0) ax.text((d_x+l_l+line_width)/2, midpoint+t_line/2-line_height*(s)/2, "n = "+"{:,}".format(s1), ha='center', va='center', rotation=0, fontsize=14) ax.text((d_x+l_l+line_width)/2, midpoint-t_line/2+line_height*(d_t-s)/2, "n = "+"{:,}".format(d_ts - s1), ha='center', va='center', rotation=0, fontsize=14) a1 = curve(d_x+line_width, d_top, l_l, midpoint+t_line/2) a2 = curve(d_x+line_width, d_bot+line_height*(d_t-s), l_l, midpoint+t_line/2-line_height*s) ax.fill_between(a1[0], a1[1], a2[1], color=include_color, alpha=0.25, linewidth=0) d_t = s d_ts = s1 d_top = midpoint+t_line/2 d_bot = midpoint+t_line/2-line_height*s d_x = l_l midpoint = midpoint+t_line/2-line_height*s/2 ax.text(x=stop_location[1]+line_width+0.05, y=0.35, s='not Chicago resident', ha='left', va='center', fontsize=14) ax.text(x=stop_location[2]+line_width+0.05, y=2.5, s='no valid date of symptom onset', ha='left', va='center', fontsize=14) ax.text(x=stop_location[3]+line_width+0.05, y=4.5, s='symptom onset > 4 days\nbefore specimen collection', ha='left', va='top', fontsize=14) ax.text(x=stop_location[4]+line_width+0.05, y=5.02, s=" positive test → sentinel case", ha='left', va='top', fontsize=14, weight='bold') ax.text(x=stop_location[4]+line_width+0.05, y=4.75, s=" negative or inconclusive test", ha='left', va='top', fontsize=14) ax.text(x=stop_location[0]-0.1, y=2.5, s="specimens collected at\ntesting sites in study period\nn = " + "{:,}".format(sentinel_sites_total), ha='right', va='center', rotation=0, zorder=16, color='k', fontsize=14) ax.fill_between(x=[2.95, 4 + line_width+0.05], y1=4.55, y2=5.075, color='black', alpha=0.1, edgecolor='black', linewidth=0, linestyle='dashed', zorder=0) ax.text(x=3.6, y=5.11, s="sentinel samples", ha='center', va='bottom', fontsize=14, weight='bold') ax.grid(False) ax.axis('off') fig.savefig('sankey_diagram_' + today_str + '.png', dpi=200, bbox_inches='tight') fig.savefig('sankey_diagram_' + today_str + '.pdf', bbox_inches='tight') ```
github_jupyter
<a href="https://colab.research.google.com/github/MattFinney/practical_data_science_in_python/blob/main/Session_2_Practical_Data_Science.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/><a> # Practical Data Science in Python ## Unsupervised Learning: Classifying Spotify Tracks by Genre with $k$-Means Clustering Authors: Matthew Finney, Paulina Toro Isaza #### Run this First! (Function Definitions) ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns sns.set_palette('Set1') from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler from sklearn.cluster import KMeans from IPython.display import Audio, Image, clear_output rs = 123 np.random.seed(rs) def pca_plot(df, classes=None): # Scale data for PCA scaled_df = StandardScaler().fit_transform(df) # Fit the PCA and extract the first two components pca_results = PCA().fit_transform(scaled_df) pca1_scores = pca_results[:,0] pca2_scores = pca_results[:,1] # Sort the legend labels if classes is None: hue_order = None n_classes = 0 elif str(classes[0]).isnumeric(): classes = ['Cluster {}'.format(x) for x in classes] hue_order = sorted(np.unique(classes)) n_classes = np.max(np.unique(classes).shape) else: hue_order = sorted(np.unique(classes)) n_classes = np.max(np.unique(classes).shape) # Plot the first two principal components plt.figure(figsize=(8.5,8.5)) plt.grid() sns.scatterplot(pca1_scores, pca2_scores, s=50, hue=classes, hue_order=hue_order, palette='Set1') plt.xlabel("Principal Component {}".format(1)) plt.ylabel("Principal Component {}".format(2)) plt.title('Principal Component Plot') plt.show() def tracklist_player(track_list, df, header="Track Player"): action = '' for track in track_list: print('{}\nTrack Name: {}\nArtist Name(s): {}'.format(header, df.loc[track,'name'],df.loc[track,'artist'])) try: display(Image(df.loc[track,'cover_url'], format='jpeg', height=150)) except: print('No cover art available') try: display(Audio(df.loc[track,'preview_url']+'.mp3', autoplay=True)) except: print('No audio preview available') print('Press <Enter> for the next track or q then <Enter> to quit: ') action = input() clear_output() if action=='q': break print('No more clusters. Goodbye!') def play_cluster_tracks(track_df, cluster_column="best_cluster"): for cluster in sorted(track_df[cluster_column].unique()): # Get the tracks in the cluster, and shuffle them for variety tracks_list = track_df[track_df[cluster_column] == cluster].index.values np.random.shuffle(tracks_list) # Instantiate a tracklist player tracklist_player(tracks_list, df=track_df, header='{}'.format(cluster)) # Load Track DataFrame path = 'https://raw.githubusercontent.com/MattFinney/practical_data_science_in_python/main/spotify_track_data.csv' tracks_df = pd.read_csv(path) # Columns from the track dataframe which are relevant for our analysis audio_feature_cols = ['danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', 'duration_ms', 'time_signature'] # Show the first five rows of our dataframe tracks_df.head() ``` ## Recap from Session 1 In our earlier session, we started working with a dataset of Spotify tracks. We explored the variables in the dataset, and determined that audio features - like danceability, accousticness, and tempo - vary across the songs in our dataset and might help us to thoughtfully group the tracks into different playlists. We then used Principal Component Analysis (PCA), a dimensionality reduction technique, to visualize the variation in songs. We'll pick up where we left off, with the PCA plot from last time. If you're just joining us for Session 2, don't fret! Attending Session 1 is NOT a prerequisite to learn and have fun in Session 2 today! ``` # Plot the principal component analysis results pca_plot(tracks_df[audio_feature_cols]) ``` ## Today: Classification using $k$-Means Clustering Our Principal Component Analysis in the first session helped us to visualize the variation of track audio features in just two dimensions. Looking at the scatterplot of the first two principal components above, we can see that there are a few different groups of tracks. But how do we mathematically separate the tracks into these meaningful groups? One way to separate the tracks into meaningful groups based on similar audio features is to use clustering. Clustering is a machine learning technique that is very powerful for identifying patterns in unlabeled data where the ground truth is not known. ### What is $k$-Means Clustering? $k$-Means Clustering is one of the most popular clustering algorithms. The algorithm assigns each data point to a cluster using four main steps. **Step 1: Initialize the Clusters**\ Based on the user's desired number of clusters $k$, the algorithm randomly chooses a centroid for each cluster. In this example, we choose a $k=3$, therefore the algorithm randomly picks 3 centroids. ![Initialization.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAXEAAAFuCAYAAAB3ByjqAAAABmJLR0QA/wD/AP+gvaeTAAAw4UlEQVR42u2d2ZdUVdqn/dbXq/uue/VN911f9P/QfdFd30UNyyqUqRAtkEFFlEllcEAEZRBFFBBBBREoFAVBJnFmUhEQKJBJUMGBSQZllBmH3fns6vCLPHlIEjLixPQ8a8WqRWSZO/LEOb+997vf9/dec42IiIiIiIiIiIiIiFzTokWL//ynP/3pf/ry5cuXr9K9brzxxv94VSLeqlWr5W3atDnbrl27H3358uXLV/av66677vwf//jHB65KxP/617+u+8c//hFERKQ0/P3vf//197///cOKuIiIIi4iIoq4iIgo4iIiirgiLiKiiIuIiCIuIiKKuIiIIq6Ii4go4iIiooiLiCjiiriIiCIuIiKKuIiIKOIiIoq4Ii4iooiLiIgiLiIiiriIiCKuiIuIKOIiIqKIi4go4oq4iIgiLiIiiriIiCjiIiKKuCIuIqKIi4iIIi4ioogr4iIiiriIiCjiIiKiiIuIKOKKuBSb48ePh+effz7069cvDBs2LHz44Yc19fd///33YcKECeGee+4JI0aMCGvXrq2oz3/gwIEwfvz4+PlHjhwZNm7c6E2tiEutgADceOONYerUqWHLli3ho48+CnfccUd47rnnauLv/+qrr0L79u3Dyy+/HLZt2xaWLVsWbrnllvjvSmD79u3hhhtuCLNmzYqf//333w+dOnUK8+bN8+ZWxKUWGDRoUFi0aFG9986dOxc6d+4cBaLaueuuu8Ly5cvrvXfy5Mlw0003hT179pT957/tttvCJ598Uu+9I0eOxInp0KFD3uCKuFQzv/zyS2jZsmU4f/582k1ZMavRq+Xs2bPx7//1118b/OyZZ55pMLmVG4g1u6g0Ro0aFZYsWeJNroiLIl69nDlzJrRu3fqSIr5w4cKKFnFCK6KIS5UzcODA8Oabb9Z7D1Hv0qVLjLFWO7169WpwkPvjjz+Gv/3tb+Gbb74p+89P/H79+vX13jt27FgMpxw8eNAbXBGXamf//v1xNcfK+/PPPw9r1qwJvXv3jivRWmDnzp2/HQx+8cUXYeXKleH2228P06dPr4jPz0TL53/99dfj5//ggw9C165dw2uvvebNrYhLrcC2nBQ7xHvw4MFh6dKlNfX3s2IdN25cXJU//PDDUcgrib1794annnoqfn5SRNetW+dNrYiLiIgiLiKiiCviIiKKuIiIKOIiIqKIi4go4oq4iIgiLiIiivjVc+rUqTBlypTQv3//6Kq3ePHiVD+LaiXn540fNMU2el6IKOIVJWA333xzePbZZ6OfNdVm9913X3jkkUdqQsgPHz4cbU9ffPHFWEK9evXqaI9KBZ6IKOJlD2KV9KnAXQ8hwwei2mGywvcin4sXL0afaDu0iCjiZQ+r8DS3tbfeeiv6WVQz7DTatGkTw0lJZs+eHVfnIqKIV6SIY49aKyKO9WmSV199NbZMExFFvKwZM2ZMA7EinNKnT5+aaNg7dOjQMHfu3HrvXbhwwXCKiCJeGZw4cSI2dsW/evPmzbFf4IABA6KlZi1Ap3UaELzwwgvxYHfVqlXREpbJTUQU8YqANlmsxkkxfOihh2IopZZSDJnIJk2aFPr16xf9rGvNz1tEES+yiCOyHLL17ds3PPDAA2HBggUx5CEiImUu4hy6de7cOUycODHmMTPWgw8+GF+1tFoWEalIER8/fnyM1+aDeBP6sLJQRKTMRZymq/v27WvwPgL+xBNP+O2JiCJeziJ+6623hj179jR4/9133w1PPvmk356IKOLlLOJ0QcecKR/CKZg1LVu2zG9PRBTxchZxSsIJqZC3/Omnn8Y8bgyqcNvzYFNEpAJSDM+ePRtmzJgR7r333ijetWYVKyJS0SJe6VBM89xzz8VSfvLcFy1aVNBJCDtdrHSptBw4cGDmxUpHjhyJFa+9evWKqZ+cV2Q5PlWn+NwwPsVaWRcr4b3DTpHxWWRk7X65f//+6MbZs2fPWKz18ccfZzo+Z1YkGfTo0SOOz25ZFPGqAYHLlbXv2LEj5rkTDipUWX/OD5yK088//zysX78+pl8+9thjmQnIjTfeGHdKX3zxRVi7dm24++67M/Mj3717d2jfvn145ZVX4vgICGLKWUoW7Nq1K9xwww3htddeC19++WW0LbjzzjsbpMUWC+4pxsdOmPFXrlwZbr/9dh7qTMbHqoHrP3/+/LBz587oN0QyAi6YoohXBYgpjoD5UG2K0BRixcRkkDSw+umnn8Idd9wRBbXY0AnpjTfeqPcefuQ8yJs2bSr6+PjcvPfee/XeO3/+fOjSpUvYvn170cdP852nwrhjx45R4IsN3zONPPKhQI6JPS2rq9BwXrVhw4YGO0Mm9jR3UFHEKw5uZlbjSRYuXFiQ1WK7du1iuCYJwo4fSjEhZNKqVat4ZpFk5syZDZptFBomq5YtW8b/TTJt2rS4Oi8m/N2MnxY6IqMq2Wyj0CDWWAmnjf/000/Hs6Ni8sMPP8RdZhqEVyzGU8SrAm5yYrZJ2H4Sxy6EiLPyScL2vthbesSjdevW4fTp0w1+9tJLL8UQSxYijnVuEnqmzpo1q6jjs+Jn/J9//rnBz/hu+Y6LCded658m4sToaWxSTI4dOxZDOZfagWqkpohXBaNHj24Qn0R8iJsW4gBo5MiRDVachDO6devWYJtbDDjISq44c+GMrVu3Fn18DoqTK07CGTT7IEZebDBlS644WSEzeX/zzTdFH5+wXDKckwtncF5RbLjP1qxZ02CFjrgfOnRIAVDEK59cI2ayN8hzJw7Owd+oUaMK8vsJ1RB/ZeVHDPqjjz7K1A+cBxXBInSDHzuCwgSFYVkW5A5Wcblk/BUrVsSDvawOFnMHq4SOOOSjAI3zgKwOFom7Mz47HybNJUuWxAk0q4PF3MEqCwnG53yC+73YuxBRxDOFbS8POXnuNCZm5VbIFDwKoshOyTWzyLqSlZg8oklWzPDhwzNPsWNbTwya8UeMGJF5ih0TKSmk+LGzM8o6xY5wHZMmuwLCGGQoZQkHmCxSGP/xxx+PixVRxEVERBEXEVHEFXEREUVcREQUcRERUcRFRBRxRVxERBEXERFF/KqhWIWKR/yYKZihjBynwWqBYhQMkaikxAa33JpqUKwyduzY+Pkoo3/nnXcy/XwUq9CvlfHxY6fqsZbA7ZAiHRwR8WPHzjZLvv7661ik1L179+jHnizjF0W8UY4ePRo6dOgQKw7xY6Z0nRuZ5gbV0D0IgaIsnbJsSrTxU2GievTRR8tGQCgbx66Xz0e1IbYEWdkGICA5P3DGx74X24KsbANKDaXyXH9cNbkW+KFjW1Bs87IcGzdujPcnCwvGZwLRj1wRvyKwzHz55ZfrvYd4U0JcDY2YMahK+ljkDLiy8Bu/HOwMWHnngyshDzJeJMUmreH2uXPnQqdOnWKTjWqH65ws0z958mT0I9+7d2/Rx8dnBc+bfLBRYGLRj1wRbxKYR6W5rSEsWa0GiwWTEX7TeKskycJv/HJg4YqVK66HSbKwsm3MShYr22pfDbILRSzTILxVbCtbRBoRT4Pwjla2iniT6Ny5c/juu+8avE+3mvHjx1f8l3spv3HCF5hmlXqSuVRTCT5bsZs65PzIse5NkkVTh1LDirvu+b3kDjXZManQcBZyqaYSGJllbaSmiFcoHPhNnjy5wQqRQ86s3fCKAbHvpBiyAs2qfdrl4CBr3rx59d7L+YFnEc4gnENj6XxyfuBfffVV1T/8NDfGvjgfDsKzCmdwHyZdH9kZc06BL7ko4peFB/aWW26JjX2JDdLolb6JWTUaLjY8kDk/cw418dsmHs6/y4GcHzlWrhxyEZ+m0QDhjCzI+ZFzsM342ARzP2R1sFdqcn7k+J9jIUsYMUs/cPqgIticS7GoYEIl0SA5sYoi3igcZNETkvQy/LBJMauGzJQc+JnzkN5///3R75qJqty29YRPWBUzeSYb/xYbwk2IOOPTrKMcDnyzhLAG4SP87ulElTxoLDas+EnxJWuKxdS2bdtUZUW8tmC1PW7cuJjny4OwYMGCqpqEpLIhLEVYjx3WoEGDqiJMqYhLQVcxhAPYjvKwsCVlxU2HH5FSQ4iKcA3hkW+//TbmoSPm7HxFEZc6aPeWjF9yMMuBlZVvUmrS8sBzjZwPHDjgBVLEpW3btvFwNgnpccQ5RUrF4cOH4yFlGqQwcogsinjNw1aVoo0kFKpkleEhkkZjxUTEyMmUEkW85qGyjerGfHJ54MQjRUoJPizEwZMrdNIKyYoRRbzmwWcCnw+yU8hzX758eVnlgUtt88UXX0TBJu+eRYV54Iq4pECFI6tx8tzJs05W4ImUElbenM+Qh48f0WeffeZFUcQrC2KDuTxu3BGrza+cqkv8uNk69+/fPyxatMg8dSkYO3fujEV4hAixiE6GZ0QRLyrE/UinYjtJHjf+zfiV86oGocMPnO0yB6nffPNNzFOn4KhabAuktKxbty4+P9gBcK9RzXvbbbdFAzdRxDMBNzYaDuSDeOPPUm7l71cDnXjefvvteu/hDsiqHK8WkauF5wRfnWT4Jec3nmYhrYgr4gWHVQRl8UkIOVT64SMhIaxc8Z5JUg5Wt1LZYBGNVXQa7PSqoWmLIl4BcBLPwU6SaijGaazpBIZbydRHkSuhMb/xoUOHeoCviGcDp/E45OVDEwL8I5JtsSoRPFpmzZpV7z2aPJD2iM2oSHPgMDNpH8EKnXOYtCI3RVwRLzjYrHbt2jVmb2B+zxaQhhNkq1QDGPez2yA0xCEUpdRMUMmJS+RqyPmNkxjAGQsNnVmdv/vuu14cRTw7iBnTXYeMFGJ51XCgmQ/hFB4y0r/wxNB4SwoJB5iEHnHnZPFTCw2sFXEREUVcERcRUcRFREQRFxERRVxERBFXxEVEFHEREVHERUQUcUW8hFD1+NRTT8VKR9wNcT2kY31WUExBkQ4Wn/fcc0+YN29eVfmdS+Ps2rUr2idQWUzjhqyNpej+88gjj4QuXbrE8bMuhqMq9OGHH47j48p5pX7l27ZtC0OGDIn/PY1ZSlHspoiXkIMHD0anQ1z/vv3223hDDR48OAwaNCgTv/F9+/bFsmYmjt27d8cbkhuRh1qqn08//fQ3v27uBZ5lbCGyarK9du3aOD52Dfv374/WDd27d8/MPO3jjz+OZfxMXIyPAOPXkrSPvhRMOPn/PX7n/PcY3CniNQJubPPnz6/3HuJ99913Z+LSRql8st8hu4AePXpErxepblg9IuT5YKOAMNGEoZhcyi/8xIkTUdgPHDhQ1PHZbeJLTuegfLCOZmHDDrkx8M3n/0ezl+TOul27dpkadCniJYQv+/jx4w3ez8JvnIeoVatWsUdnkjlz5oTJkyf7BVUxiBVimQYeJcVuZoxI43aZxqhRo8KSJUuKOj47X0KYabATvdwiCvGm5WIahGeybCOniJcQViL4JpdKRC81ibz88svRE1yqlx9//DH6waeF7RBRQhylmkQQ0WLHxhtrOoFZ3eV2onv37g233HJL6s+IrWdpNa2IlxD8xpPxx/Pnz8e42saNG4s+Pq6KyfgjfuDc3MTHpbrp3bt3g4NMmpgQJkhrZlJoaONHXDoproyf1hGr0DtRwknE4fPJ9Y3FRvpy/z3PSbINIb1mCdOkNUtRxKt0NcRsTnYI26/33nsv3tgTJkzIZHz6FXIjkh3D+BxwMYHoB14bIDishtn1sfJcsGBB3B0uXrw4k/G//PLLKHgvvvhiHJ/zoZtuuik+B1lAPB7Bnj59ehx/7ty58XosX768Sf89jc/579m18t+zg+a/z7rrkCJeYlh50y2eONro0aMzjaXlVt5kxzA+Yk7GgNQOTOT0QyWEwDlM1h2ZOAhExHPjk3KYJew4WLQw/sSJE2PK5ZVAhhmTIP/9s88+G77++uvMv0NFvMzhJmOlzoq9T58+UfCzzCOXxuGAbPjw4XFr3rdv33ggmEV6qPyTXFouh6T33ntv1TVdUcQrHOKDbDfJW+UgZceOHbGwgFlfoSgPAWE7jXCTJ7x58+ZYMFUt7ffKHXatuTxtsl04TCTPfObMmYq4Il4eEOKgr2A+iDeVnStXrvQClZg777wzFnjkQ3iMVbltxIoLzwHx52T4hWwrFj5UIiviinjJadu2beopOQdAxN+kdJBfT5592o6IGGtTq/7k6qDCFKuANEaOHNnkw0lFXBEvKqw00lKtiItnVRot6Vy4cCG0bNkyVu4l4YCOgi0pHpwVdejQ4ZI72KwTBBRxSYVskbQ8clYgxF+ltNCBPSnWZHsw+Ra7bF1CNG1LijVnR5xTpBWxKeKKeOYQSiErhQo6iiLI46ZU+Pnnn/filAEcPHOwRl4/sXEEvWPHjjHfWIoP8fD8PPNcnnfWToyKuDQKK2/iq9h1sjLXmKq8oGCLqleyhsaPHx8LQCQ7yDPP5blzTnSled6KuCIuJYawBXna5AmTtcOhby2lXyJaxIBvvvnmmKfObi3Lv58sHPK0Gb9fv35FN64qNwhr4gbKDow89aSNgCIu0gjYiBL/fOONN2KeMGXUNBZ4/PHHa+Lv37RpUwwfUKZO5SCCwkSWVeYSviOMv3Tp0jg+fj+9evWK4Y1aYMWKFVG8KTAipZHrQbgz68wkRVwqFnzXP/jgg3rvXbx4MR541cLBb5pRGqmPCEsW5d+MkzRKI7yE/wkpgNUMWUm4gOI/kw/ZZLxfbAMvRVwqHlL8rr/++tQUPwyJsNOtZsi+QCzSyCLFsbEUP85uCOtUM4SxKPZKQz9xkSZAZxbytM+dO9fgZ5MmTYqOctUMxmWXKjbCSO3dd98t6vh04Kl7/lN/NmLEiBhqqGYaKzYi9TRLXVTEpWJhxZOMPyIupP2Vwk0ua3IHmfnQZIRzAmLUxYb4dzKdLys/8FJzKT9yOv5wTnD69GlFXORyIFhkRYwdOzZ6yRBC4N+zZs2qib+fwhYmLA4y2b7Pmzcv/rvYrdVy5PzIqVtg/FyedlZ+4KWGg3Ty1ElxpE4AS2f+nXWGiiIuFQ0HeTw8NJ0mFlxrlawUhJGnzq4EP2ycLrOE2DxnEIzPZJJsPFwLCwlEnDoBwni7d+/O/DMo4tIoWKwS42SFy/aZ8IV+5iLlgyIul4SGB8Q3adtFjJXCDirjWHWJiCJetvy07+/hwo77av7m4JQ9meXAKrxnz56W/4so4mUq4N/NDqff/tdw+q1rwoXt/Wv2xuD0/VIpfIRUbKYsooiXtYDnXrUs5BSTkLKXctNUfTGNiCJecQL+Wp2A/4d6Av6bkO8YUJM3Bx4kCHY+5L9Sbm37MRFFvCIEvJaFnPQxqtLwM//oo4+i0RQFDklhFxFFvHQCfmDOZQX834X83pq7QfAzp4R92LBh0S87abgkIop4yfjl+NoGMfDLvX7aO9W7RqqG7du3h0GDBsVKT2xs33rrrUz9yHFBHDhwYHQ+xJWyVqo9FfGC8Wu4sK1PkwX83Jp/C7/+dMq7RqqC9evXR/GkMzwdchBUhDQrP3JK9Zk8sBNmfKpta8mPXBEvqJD3VsCl5qAKN+kHjo0BFrNJn+yCP3V1q318VuiTmQ9+5LxPpbAo4gUT8nNrfqeAS1XBypdVeBpPP/10WLx4cVHHR6Q5JE/jiSeeCO+//75fkiJeGCFXwKUaOXXqVGjTpk1q/JvUUlquFZNjx45FS4c0OEQnG0oU8asU8l7/LuCr/2+dgP/oXSJVCfHvpK0CvUoRV9z5ig2dcZLNI7DXZXzSW0URb5aQK+BS7ezZsyceLE6YMCGufLFTIB5d7K5AOXINFJ577rk4Pj7w/JuDVlHEm6njv9Q5PZ3x7pCqBz9yLBQIYdDcgd6RWcKKGz90xp88eXLRD1QVca1oRUQUcRERUcRFRBRxRVxERBEXERFFXEREFHEREUVcERcRUcRF5JL8TGFZM8APHD/u9u3bRxvXRYsWZeoHvnXr1nD//ff/Nv6bb76Z6fiXgyYm9957b/x8+KUX2lgLO9/+/fv/9vuXLVumiIvUCu/v2xT+z8IHwsEzV+cTwrNHmTr+I0ePHg07duyIgjJu3LhMPv/q1atj2T4l8xhaffbZZ9GPhcrPcoDyfex28S3n823ZsiVONNOnTy/M91c3IXTu3Dl88skn8ffjh44fzMyZMxVxkVoQ8P82o0v4L9M6hP81b0A4dPbKhRwrV4Qjn3PnzkXhyqJ8HgFPNszGj5z38WUpJb/88kto165dgzJ+bAaY+A4ePNis3//TTz+FOv1r8HdiI4CBF1a/irhIlbIkT8BzrysVcsQCkUpj4sSJYeHChUX9Gw4dOhQ6duyY+rMxY8aEd955p6TX+Ntvvw3dunVL/dnIkSObbbLFJMmqO42hQ4eGlStXKuIitSLgudf/nn9vk4X87NmzoWXLlqnx5yyaKtCBp23btqnjjxgxIrZcKyWNTTIPPfRQDIE0h8aaWtx3332ZNhRXxEUyYum+zZcU8NyLGPmRc02zQObAjoPMfLL0A+cgL2lbS3iB8YnRl5rbb789fPjhh/Xew/6WQ0gmoebA5HXrrbfGeHs+tJsjXENYSREXqTIB/++XEfArFfLvvvsurjYJXxAeePXVV6OAZNUxPudHTjs3xudAD4EsFz9wQh5cDxo/85lmzJgRJ5ik8F4tOcHGD53fXyem8fc3d5WviIuUGScunAn/Y2a3Jgl47tVr5aQm/e7Tp0/HZg7EeV944YW40swSVrQ0c2B8utQTiy4nODt45ZVX4uebNm1a7BxUSNhx4MfO7yfrpRQNnhVxaRac/g8ePDiuSNi+ckNzci/1WbF/a91KvGuTBPzfFj0Yjp23r6so4pLBdpXtI5kIR44ciYLOyTwHR+VU8FFJQv67hQMVcFHEJRv69evXoNEt+bm9e/cOa9as8QKlsHz/lkvGxhHwowq4KOKSBT///HO4/vrrw4ULFxr8jBgt8VlpupAr4KKIS6YQLiFPOC1ViwMkDpOkaUKugIsiLiVh1KhRYerUqfXeO3HiREw7y7preiWybP/m8MfFQxRwUcSlNCDYt912Wxg+fHisEJw7d27MW549e7YXp6k7muABsCjiUkKIiePTQan3pEmTopOdiCjikhEUZ5ASiJkSZcQULFy8eLFgv5/ik0GDBsXfz6qdqjnzyEUUcSmQwCKu5HlT2UYZ9aOPPhpN/guR501ZMnnkhFr4/bt37w7Dhg2Lk4aIKOLSTDBQWrp0ab33EO977rknGv03F35P0oCIPHKM+bP2lxBRxBXxqgKxxsqUJgJJXn/99WZ3ZyFkQh55WmgGrw18NkREEZdmQKiDtlJJiFvzau4k0aZNm9Q8cgQcxz0RUcSlGWBhmlxx076KPG/i2c2FPPLkipvY+E033ZS5256IIq6IVx2skrt37x4efvjhaO4/Z86cmOddqFVyLo+cw0w8rinHZ4JgHBFRxKUAELNevHhxePLJJ8PkyZMbzfMmYaWumXl45pmm/37yyOk+M3r06OinQkd2EVHEmwzdTx555JGYStepU6fY5YP+hHJl5AT8mmtC+Jd/CWHCBK8JbN++PfZUJP5PY152MZiDiSjiBSDXb5CKQmKxCPr48eNDz549LTi5QgGvyxaMAp57KeQhNsOlGQZplISm8FOnsIlcexFFvADQdXvevHkN3h8yZEgMIcjVCXi+kE+cWLvXhpV3sqs5iwPOAbZu3erNI4p4c+nQoUM4fPhwg/eXLFkSMyfk8gwcmC7g+UJeF6GqOeht2bp169TK1ilTpmgCJop4IaDnI1vcJIRXJhjUvSwPPti4gNeykLPiplgqrZiJ7u/u9EQRLwB4XT/++OP1Vkscat5yyy1hw4YNfvuNUOeD1SQBzxfyVatq6xoRlqP6NJ+DBw/GcxjOY0QU8WZCeht9IPEIefPNN2M5eZcuXWwd1qRrF+oyLpou4n37/jN+Xkv88MMP8X4aOXJkzLN/+eWX40Hn22+/7Q0kinihwHCJGPjYsWNjdeKmTZv81q9AyOvCvpcV8B49ak/Ac5w/fz4sWLAg5tlj4/v1119744gifiXs378/VhySB37zzTfHWPeZM2f8ZjMS8jvvrF0BLwc2b94cd5utWrWK2TJUxbJwEUW8IkQ8F3+kIpASb/6NiN9ZpyzmgRdWyOs0ooGA33GHAl5KsPPFxmD16tUxW4ZdwAMPPBCrY0URrwgRp6giLQ986NChUdileEKugJcedp5JmwTCO8TpC2FgJop40cGs6dChQw3eX7ZsWXjsscf8dosg5HVZdQp4GXD06NHQvn371J9hLTF//nwvkiJe/iJ+R52apFmasgp/5kpcmuSKhFwBLz008yBPPS3+TdNqWuKJIl72Il734WN6V1oeuO6KUu0Q/yZtNp+9e/fGcyLSH0URL3sRp1puwIABoX///rFCbu7cueaBS81AKLFz586xoI1m1yxqCLEQThRFvGJSDFmFc9NS6owfNilXIrUCO0/y1J966qnYUm/Pnj1eFEXcphCF5Ntvv63zIHkwmiVxEKufueTDouOeOptImkoTBtSPXBTxMmL37t2xCImt7qlTp+L2d9y4caF3794+qBLWrVv3W543Ezv3y0MPPRTPeUQU8TKABzLNR2Ngnb8rPSeltuH8Juk7zjnPrbfe2mibPBFFPCPqrk+sJE3y1ltvRS8XqV1OnjwZ2rZtm/qzSZMmxUN6EUW8DFZaeLskwd/CDJraBodN8rzT7CE4pCQEJ6KIlxhWVLjb5eex04uRcult27Z5gWocwm3JDkBM+uR5p3WkElHEMwYfCzIP6IZOHjsNBBBwfKdFvv/++9C1a9foAcTZSS7Pe+nSpV4cUcTLhZyfOXnsdBravn27F0XqTfTYRIwZMya89NJLMUNFRBGvIOgBSvk08VEaO2edR75z5864U2B8dgmEgBAWqQ3Wr18f7rrrrtCiRYuYp054J0s/cux0SbllfLJysBH4VXMeRbySBJw8csyKaGRBHJQ88j59+mSSR45dKfFXKl4ZHz92vKhpMuCDVP189NFHceJGyJm42QWQ/so5ThYQNuLwf+PGjXF8ngdaKWpep4hXDJfKF+f9LFzo8Jz54IMP6r2HeKe9L9XHTTfdFHdiyfAOfizJ9wsN9xkLGCqa82EXyo7UsJIiXhG0qetETLZKEpo6syIv9kNEqXda6IRGG88995xfUBXDoSrVommwEi52U5V9+/bFQ9002AnQeFoU8bKnU6dO4cCBAw3exz/jxRdfLPr4dGY/cuRI2k1hBk2VQ/gMP5+0sBnZMsuXLy/q+MePH48r8bTx6Zm7atUqvyRFvPx5/vnnY2FH/o1MJR/bySyyWMiYScYfjx07FrfZu3bt8guqcjjQTrY3JC7NOQn3QbG5++67464zH8I4LC7SdqiiiJcddGfhRuZhYvvKChwB53+zgOa6vXr1ijH4N954I8ycOTNusZONBqQ64SCb+DetDBFTdn8IOAeeWUBIhYNVOhFhRUEVM+OvWbPGL0cRrxxI5+KUnhXxtGnTwueff57p+GTBcIg6fvz4GEb58ssv/VJqCA4SWUBwBvPKK6+k2kQUO6yDHzpeQixemFhEERcRUcQVcRERRVxERBRxERFRxEVEFHFFXEREERcREUVcREQRV8Svmq+//jpWXGImRbkwpexUQmYFxTkDBgwI1113XSyXnzhxYiygqBZoY0d3JPymqWalqq+c/M4//fTT6Mf95z//OVYfUjBFx/pagerKnj17hmuvvTbayuJHnoWNcg6qS++88844PoZatehHrog3A2w0MfGhcw+Vb0ePHo1NHbip0hrgFhp8JihTXrFiRSzhx8yKyjtEJUtj/2KxefPmODFihoRwHzp0KJaI00SjHB7UtWvXRpsC/hfhxsxs2LBh0cCpFsCHHuFmIuN+37NnT7RwwA8oC2gmTSOJLVu2xPF5HvEjnzBhgiKuiDcNGt2mdSUfPHhw9IIoNtywPEhJ7r///qro08gKD4HMB/GmaQYdYUoNnXAQkHxYhXbv3j1s2rSpqu99vgcWEBhm5cNky46EHWox4Tq3bds27N27t9777EKZWPFlUcQV8cvCKhxLzbQVQharEVqqpYVO8MKo9O4orGwJEaVtzfH4wGemlJw6dSr6waftCOilmpWJWalg14GVchrsBpPuhMXYBXfr1i31Z6NGjYq7Y0VcEW/SSowtZJLXXnstxm6LDTFiQgxJaLhLbLbSV3qIJGKZhIYVpXZaZPvOJHrhwoUGP8PMqdgiVmoam8SGDh1adCdEQpft27dP/dmDDz7YYAeniCviqbDiIkab9APv2LFj+Oyzz4o+PmKW9CPHB5rtZLHba2UB/T7xXM+HSYuHtxy2y8S/k5Mln4swQ9rkWm0Qzps/f36993J+4CdOnCj6+Jz9JCdLfPg54M8yuUARr2BYhXEj9+3bN64M6YaDgGa1leYwk8wNslMw958xY0a8gavFD5zGAMTFOSxDLJg0EYi33367LD4fE+btt98ehgwZEhYuXBimTJnyW+PpWgDbWA42R4wYEUN4TLj8/VmdVzBhEtJhIcX4JBUwwdeaJiniBdj2f/jhh/EGRkTpIJ8lZKGQncKqnEmk2jryELYgvskDSiy83BroErunHyQZEUzepdohlCpbh4UEq2HOYAgjZr0D4UyIhiaMP3fu3Ng7tNZQxEUqGGK/PXr0iHnqhPFYSGSR3iqKuIg0Ew4PSefbuHFjFG4yRshRp9GxKOKKuEiZw/lLsp0e4R2yprJotC2KuIhcJVTncsibBucHyawRUcQVcZEygspI8tTTiqHI1qiVDBlRxEUqFmwfyNjJhzJ40vwohhFFXBEXKWNIp8MAisPMBQsW/JanvXLlSi+OIq6Ii5QTl3KlpOCMPHUsiMnTpgCnmv4+UcRFKhqqH7E2Jg+cbBSqVqvJr5xCOape8QMnXZKCqSz9yBVxRVykaOTywLG1RdiohsSvhVc1kPMDx2eIlfj+/fujeVVWfuSKuCIuUlRwqUzaOFDUc9ttt8WOR5UMop3mB57zI0/6lIsiLlJRYK7FIWUaHGBWuskZ4k1RUhqsxNOarYgiLlIxcGBJHniaDwpND95///2K/vuwqq3Tj1TjLjpjrV692ptAERepbEgdxJkyn1weOBWblU7//v0bVJZiF0AlalozEFHERSoKhJrMDYp68IvHbria8sAx7KJDPQe1iDl2srXoB66IS1VgWlk6pBMSOsm1pCtWHniprj8HmcS/c34vP/zwg1+6Ii6VxObNm0OvXr1CixYtYuNpilbOnj3rhcmIjz/+OK72//KXv8Q8dNrN6UeuiIs0CfKfaSdHYwOEg4wMMhOIlZaqU00tgUkW7dVIVyTljzx04vAjR4704ijiIpeHjjTr16+v9x7iTd/QVatWeYGKCNeZ+HMyH5vwDXHqrNsMiiIuFQZicd1116XGYmfNmhVefPFFL1IROXz4cGznlgb9Qmn8LIq4SKMrQSr2Tp482eBnuUM8KR6cO7Rq1SrVeGr48OHhgw8+8CIp4iKNM27cuJhWlh//Ju2MNDp8NKS4DBo0KBpO5UO7N/K0jx8/7gVSxEUa5/Tp0+Guu+4KAwYMCHPmzAmTJ0+Ocdr33nvPi5MB+JF369YtDBkyJO58CKMwgeKcKIq41BhXm5bGdp6tOwKOkO/bty/T8cv9+hT791PeTx46fizkaSPstXT/KeKKeM1DnnfPnj3jIWWbNm3C008/Hc6cOZPZ+Bs2bIh+2+SZ48VRbnnmrGrJw87lwU+aNCkWuBQK8rxZTfP72cVwIIww1wpLliyJ2TS5PPeZM2fWXOGYIi7NEnDyvNetWxcfHA4piXETIsmiUwvpiTy4GzdujOMTxyXPnPBMOeSZUx6PrSrXietBGT1NjCmjLwQrVqz4Lc+bv5dqRw4lq8Vv/HIsWrQoTpCkQ/L3U82KH/mYMWMUcUVcmgKVlsn4KQ8TIppFdgMP8KefftpgfCaRNWvWlPz6IOA7duyo9x6TzR133BELnZoLE+iuXbsahBXwG6fRQjXDdWTnx0F4PufOnYs+7Hv27FHEFXG53EPEFj4tFjl37twYNigmhCSuv/761BU3W+rp06eX9PqwKyGFMo0pU6aE2bNnN+v3s6oniyQNUjQxzKpmKFJiEk/jiSeeqHirXkVcMoEYL6XySV544YXYtLeYIN6tW7dOtSwly4Lu76WEyQ0/8LT49+jRo2Nz4+ZOYpfyG6dsnpL6aibXNCNtEh84cGCDSmBFXBGXFIh/jx07tt6DRFySA7YstrPEv3G/y4f8ch7u5Da7FIwYMaJB5Wkh/cDxOZkxY0a993bu3Bmvfy3keffr16+BHznnD5yTZHm4rohLxUIWSN++feOLlTchFAQkq9ZarMJzeeaMT5ocAlkuq1CEFH8YVoZ8PiacQvqBMxF07949Fu2Qnsmkyu8vh/OALMj5kQ8dOjSG8FhQEGJCyGsJRVyaHdagKzshFB6kZOPbYpPLM2d8ClbKrdKTcMfy5cvj52PVWGg/cH4/kxZ59pWc5321EFaiOIwFBNkqaeE9RVwRFxFRxEVERBEXERFFXEREEVfERUQUcRERUcRFRBRxRbxWwb7UDvO1SyHtcStxfEVcKhbc9PDjxg8cL5Inn3wy/Pjjj16YGgE7W6oec37nFCVl6UeOURV2urnxMS6r5QYPirhcEVu2bIllypgFUflIuzRK12nyUGvG+rUIlY633npr+Pzzz+Mu7OjRo9HvJSs/8jfeeCM6EeL3Avih4wczatQovxxFXJoCviOrV69u8D5eH3RMkeoF0Wblu3v37nrvswpG2Ldv317U8Vk04Af+3Xff1XufXUCnTp3CV1995ZekiMvlHmK2sGlbZzw4ku6AUl0cOnQodOzYMfVntLhbuHBhUcfH5ZLJIg0682RloqaIK+IVDZadaYZJ2Ke++uqrXqAqhg44+JGnhc0Ip3z44YdFHZ9zF5pmpB2m074u2TFKFHFJgfg38cc0P/DkNluqjyFDhoSXXnqp3nuEUTgnoTNRsbn//vsbNA/J+YFzPiOKuFwG0rp4kPr06RNbmtHWCz9qt7K1AX7kZCZxBsLOCz9uJvCsnunDhw+Hbt26xZX3rFmzYpOPWvQDV8SlWbAKZ+tKahex8HLz45biwkEmfuxTp04NixcvLkjHoSvh4sWL0Q+dEN6bb75ZEx2JFHEREVHERUQUcUVcREQRFxERRVxERBFXxEVEFHEREVHERf4JZeL6nYso4lJhUIiEUdL1118fWrVqFav6Tp065YURUcSl3Fm7dm102tu6dWtchZ85cyY67N19992uykUUcSl3aBiQ9NFAvPv37x9WrlzpBRJRxKVcweeclnFpK27MmvD6EBFFXMoUxBu/6RMnTjT42TPPPBMWLFjgRRJRxKWcQaw5yMxfjeNzjl0uvucioohLGcNB5oABA2Iv0BkzZoSnn346CjjWqSKiiEuBBZfmt4Um53dOBxq6o9NIQK4cOuWY0aOIK+LSANIAyeOmXyOvxx9/PJPWXtI0lixZEtM0ybFv3bp1bIBNRydRxBVxiW28OnToELZs2RJXeVRVTp48OfTs2TO1Aa9kC232aH+2a9eu+G+aE9NTdfDgwV4cRVwRlxB69OgRNmzY0OD9Bx54ILbcktLBpNquXbuwZ8+eeu8zuZJ/v23bNi+SIq6I1zKIQYsWLWKPxiSvv/56eP75571IJeT777+PXePTIKRCP1VRxBXxGofO5GmNdRHwOXPmeIFKCHFvzijSJtlhw4aZ4aOIK+LyT7EmxpqflbJ3796YBrh//34vUIkZPnx4mDJlSr33CKMw+RIfF0VcEXe1FwYOHBgPMqdPnx7GjRsXBXz58uVenDLg+PHjoU+fPtFrhjz7J554Igr4xo0bvTiKuCIu/4QDtPXr14eZM2eGxYsX12weN/a4xciTby6cXaxatSrm2b/77rtR2EURV8RF/j/Elm+++eaYg00MesyYMbGwRkQRFylzCBt17do1fPnll/HfVK0SUsIqwOpIUcRFyhwqIXfu3FnvPcSbOPS6deu8QKKIi5QrZHe0adMm9WfTpk0Lr7zyihdJFHGRcoVDTGLgZ8+ebfAzskA4RBRRxEXKmCeffDJ6nufHv7/44ovQvn37cPToUS+QKOIi5Qxphf369Yt+5+TJjx49Ogo4zo4iirhIBZDzOydPHtdAV+CiiIuIiCIuIqKIK+IiIoq4iIgo4iIiooiLiCjiiriIiCIuIoDPN97fIoq4SAXx3nvvxW47dKbHi+Wpp57Sj1wUcZFKEfBu3bqFb775Jv4bM63x48eH++67Tz9yUcRFyh1W4DkBz4F49+rVK2zYsMELJIq4SLlCDJwQShp0qJ89e7YXSRRxkXLlp59+ijHwc+fONfjZyJEjw7Jly7xIooiLlDM0j0j6kW/bti2GWU6cOOEFEkVcpJyhxVv//v1jT86pU6eGxx57LAr4xo0bvTiiiItUAqzC169fH1599dWwZMmSGCsXUcRFRBRxRVxERBEXERFFXEREFHEREUVcERcRUcRFREQRFxFRxBVxERFFXEREFHEREVHERUQUcUVcREQRFxERRVxERBRxERFFXBEXEVHERUREERcRUcQVcRERRVxERBRxERFRxEVEFPE0WrZsufTPf/7zxeuuu+68L1++fPnK/nXttdf+/Ic//OG+qxLxFi1a/Kff/e53/9WXL1++fJXudeONN/7rNSIiIiIiIiIiIiIiIiIilcr/A754jFwmK69/AAAAAElFTkSuQmCC) **Step 2: Assign Each Data Point**\ The algorithm assigns each point to the closest centroid to get $k$ initial clusters. ![Step2.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAW0AAAFtCAYAAADMATsiAAAABmJLR0QA/wD/AP+gvaeTAABK30lEQVR42u2dB3gV1daGr+W3F1RABHu7yLVdRFHRK6ICKoIdUZGiAoJIFykaijQBqVIFAoTeMZBKSCGUkEASCAkhBBISkpCEFNLb+veaMOWQc04KmVO/9z7zeJ8J5+yZPXO+2bP22t/6178AAAAAAAAAAAAAAAAAAAAAAMBSdOrUaVnHjh3zsWHDhg2b9bZ33313fI1E+6OPPvLz9fWl3NxcbNiwYcNmhW3Dhg30wQcfLK+xaO/bt48AAABYh+3bt0O0gfVITU0lvqeysrLQGQBAtIGts3TpUhK3FnXv3h2dAQBEG9g6a9eupVatWtGcOXPQGQBAtAEAAKKNXgMAAIg2AAAAiDYAAEC0AQAAQLSBU+Ht7U3u7u508eJFdAYAEG1g67Rt21bK0fby8kJnAADRBrbOuHHj6J133qHk5GR0BgAQbQAAgGhDtAEAAKINAAAAog0AABBtiDYAAEC0gVPx66+/kpubG5WXl6MzAIBoA1smKSlJys9u2LAhOgMAiDawdbhajYuLi7QBACDaAAAA0YZoAwAARBsAAABEGwAAINoQbQAAgGgDZ2L//v3Ut29f2rFjBzoDAIg2sHWmTp0q5WgPHjwYnQEARBvYOseOHaPZs2dTYGAgOgMAiDYAAEC0IdoAAADRBgAAANEGAACINkQb1A/Zxfm07lQgjT60mqYe3Ux+SZFOdf4Zhbm0Jtaffjm4kqaHb6V9KSfs6vhTC7Jo1Uk/GnnAlWaGb6cDqTG4qSHawFH5auB3dMerT9Bt4zvSncu+ULbPvadRemGOw5//7oRQenxNH4Nz562H32zpYWbrbI0/QA+7fVfl+PsG/EV5JYW4wSHawJE4m5tG//fAXVKO9m2/ta/yw//Ua6pDn/+Ji4nU2LV7lfOWt95759r08Yelx9E9K74yefwDgxbjJodoA0fiO/95dJtLB7r529b0tFt/mn/MXQqRNNQIgfe5ow57/p+Jh5J8ni9uHkoLj++m4fuX093Lv1T2H0qLtdnjf3fXeOU4X9k6ghZGedBP+5ZQg2XdpH383+OZCbjRIdrAUXh240/Kj1774/41ZI2yf2LoBoc89wrxPzmswOKWeCld+RsLn3z+cyP/scnjLy0vo3tdv5GOkUfbaQXZBg9j+fiXRfvgRodoA0fh/lW9lB93bkmBsv/vaG9l/5Dgvx3y3IvLS5U3ikYrvqYSIYIyPJknn/+4w+ts8vhzRLxdHlE/sLq39BCSmRC6Xjl+nlgFEG3gILyneb0etG8p5ZcW0ans8/TC5iHKfteYPQ57/m22jVTOc2yIGxWWlVCUiHO3WD9A2b9NTPTZKs9vHKQc5+SwTVQkjj88I56eWNvXKcJbEG3gdGyM22cwcSWP3OSNsyo4Hc5RWXrC2+z5P73hR7qkeQOxNWZH7jR7/K1EnL6wrBg3OkQbOBL9AxcZzTzgeKlHYphDnzuHFL7e86fR828mQkdB56Ns+vjLKsrpE68pRo//QREyOXzhFG5wiDZwFIqKiuiRRx6hTp06SQtrOri7UJOVPejf636g7kLIYrOTnaIfWLhXxPjSWzvHigdVdyk08q1I9Uu4dMEujr+8ooIWRXnSmzvGSOmL/HbQN2ABnc/PxE0O0QaOREREhJSf3bx5cwMBc2bs/fyd/fpBtIFDU15eTnFxcXTo0CF0BgAQbQAAgGhDtAEAAKINAAAAog0AABBtxxTtitJLVJq0ioqjBlNx9C9UlrJN7CxzmgteUZJFpedcqfj4T1QcM4bK0tjvQr9MgJKSEvzKAIBo143yi/sp3+9RynP/l8FWEPwqVeTHO/zFLsvYQ/m+91c5/8KDb1FFoT650itXrqQ77riDxowZg18bABDtWowwi9Mo37tRFcFShDvoBR6GO+4IuyCB8j3vNHn+hQfain9UXu/turi4SDnaEydOxK8NAIh2zSmOGqQIVL7PfVQS/ycVn/yN8jxuVfaXJi5z2AtdFN5TPf89D1HJmTkiPDKK8nbfqJ5/yhZd2s7IyJA2AABEu8YU7GutiFNZuur7y+It7y+K+NZhL3SBf3PlPMuzQtSHGT+4Lu8vPjEMvwgAINo2Ilp7n1RFK++ksr80ZbMq2qEfO+yFzve5VznPiqJU9fzF24X60OqNXwQAEG0bCQ8c6arGb0M/ElkUmSLOe1bEslsq+0tO/e6wF7rwUEdVnI92F+H7bOnhVeD/lHr+Z+fjFwEARNs24MyJPPdrTE7E5XveLkT8jMNeaE5tNHXu0vl7NzQYgQMAINo1VJe8y3nUg0SMdQSVnt9Ub1kdxTGjjYvWrv8Tbbo5/MUuPv6j8fP3uJnKUrfXe3vnz5+XbFkBAA4q2uVZh0Ts+fGq6Xj7XhSv8vVTqbosbZdIb2snpb/l+zShwsOdqTzHecokcQy/8MAb0ptFvm8zKgr7lMovRevSVvv27en6668nX19f/NIAcDTRrihOl0TUZB51wDNC1VHOyJ5o27atJNrx8fHoDAAcTbQ5FKLmUTeRUvFKYidII0Ilj/jsQlwxO6O4uJgqKmCYD4DDiXbh/tfVPOoLu5X9JWf/Msh6AAAAiLYNiHZBwNNqHnXuMWV/WepONVUvpBOuGAAAom0Los2jaK04VxRfqMyjFpOQyoo9sXoPAAAg2jYg2mWZQSL17lrTucTCI6Q8Pw5XDAAA0baVlL+S2PHGF8BwHrXwgLYHeKUhLw0vPjagMs9cpNjVp1+35Ied+Lf4/h+EH/hIsWhmqy7ufKbbzxTtL6WiY/0q/chTd5AxP+6AgABdTKIyCnNpRYwvDQ5eShNC15NHYphFK4OnFWTT8ujK9ieGbiDvc5ZNF03Jv0h/R3vToH1LaVLYRtqTFGHR9pPzMmnJCS+p/clhm8g/+RhU1JlFWxpxp3uLJdftKd/rbsmNr/BwF5FHfcQuOrMsM0A46D1o3K+7HlZbVvphN6tqqyomcSsKEvU/vwueRtMyOe9d68edlpYm2bHeeeed9Zo5wgL9+Jo+dOeyLwy2jzwnS2KqN/+cDaFH3L6r0v7n3tOkh4nebInfTw+5fVul/S99Z1BWUZ7u7a87FUj3r+pVpf0efrMppzgfauqsom2vsGjxg8ZknrmIzV/Nyk4WZXN+2Pxg0LMCT0X+adH+bWb8uN9URtwxMTHUunVr6tixY721H5udTPe6flNFMOTtE68puo64oy4mUmPX7ibb7+YzQ9f762h6PDVc8ZXJ9nv5zdG1/UNpsXT38i9Ntt83YAFEAKJtX3C4QvWrfkD4Vc8T4Z5xIhZ/i5pnLpbn1xW2jVW//2EpFbLSD/xm9fuT1+t2fkVHv1IfEH6PifYXVNoCaPy49VgKL9N9z5+KQLTaPJSWRfuQy+G11GjF18p+n3PhurXPo2m5ndZbh0shkrEhbnSPRkiDzkfp1n5nj9+Vdl7d9rMUIhp1cJWBkIakxerWfgd3F6WdN3aMIteYPfTzgRV01/Juyv6IjDMQAoi2/VAQ+F9VvDIDlf0lp/9Qs1+EsNf5+wP+o/HDPqh+v3AmVL5f1H3U7fyEUCvt56jiWBwzVm0/+mfd2m++rr8iDjzqlvk1ZI2yn2O8evHomu+VdhIvpSv7Wbjk/TPCt+nSdrkIMTW7HJZgkUzJz1L+9tO+JUr784+569J+SXmZeMupfMvg0b42FNQ34C+l/aUnvCEEEG37Id/vEdWvWoQSZEqT16qLg458Uffv18SyKwrPqd8vJmiV7w//Rr/z875Hbb9YFa3ShEVq+5F99AnNiP81XdVTEoYGy7rRpZIC5W8LozwU0WAB1YNSIVpyaIRH1oVlarHi2ZE7lfb5AaIH3J48omfx5OORmXp0s+4PrVzR39zv3AbHtMs18xTjD6/T/aEFINr6hA9EAQVtMQUWNsmvWnimKH7Vp6fX/YcrctdV8e8qZXGw0ZOBH/YZ/eKahQffMfTjLrkoLYLSGnyVJizWrf2O7uMUcfgxaLE08RWZeZae2TBQ2b82NkC39t/cMUZpZ9j+ZZKQcZz5qfXqGwBPFOrFK1tHKO38cnCl9OAKvRBHT67tp+zflXBYt/ZbbhqstMNhqbySQjqYdtLgDcQ3KRxCANG2H7iEmVm/bq+7xAg5qe7fL9wJzfth3yP8sFP0Oz+RWmi2fVEZRzsCr282nQ42OQnGG4dPsnXMYHCL3Wu2fX54aN8A6htO8zPXPotqQal+Nrh/Hd9ltv3WW4ZTkeYNBEC07QKpUK4xUdt9U6Uv+NV+v6jhaEk/7Crti9xzUwufZL+Y06dP07Zt2ygxsf5TEHmEbUwwOHSyNzlS9/Pv4z/faPscMghOOaFr2xwi4tQ6Y+1zGqCek5AMh0S+8JlutH0ebfNbB4Bo2+eIW4gXhxJ4ZM1ZJEVhn4kwRv1lFbAXS+HBt8X3N5BywjlUUp4XY7Hz44rtih+5yGIpOtJNtH9K+fvMmTOlHO0BAwbo0j6HIDrtnigJ5bMbf5KE9GxumsXOn3OV39s1XpoYfH7jIOoXuIDOXUq3WPurT+6VQkXcPo+u+wcukhbcWAJ+cHDWCmeS8IOSs3gGigdpakEWfvgQbWCvbNq0iTp06EBubm7oDAAg2gAAANGGaAMAAEQbAAAARBsAACDaEG0AAIBo2yCVfteX/aCjBkvLzK/Gfc/mzq84Q1paXhT5vZTXLeWHW8hve/369eTq6ipZs5riQmGOtFCEU8V42TfbnVrSD5t9O9gPmnO+eWXf7oRQi7ZvbTg9kZf+DwhaJPmR62myZYyESxdo4fHdUqoi+5H7JUUSgGibpNLv+oGqtqZBL4hc41i7v5hlF7yM+10HtxHWrgm6t9+yZUspR3v/fuPLuT0SwugxI37Y7++eYJFc451nDtHDRvywP/ScZBE/bmtjyg+bF81Ywo971Uk/xUNGu32zZxb8uCHaRkagRecNTI+qCHfgc2LZV7H9jrBFkYV8z9ut5rfNzJgxg7p27Uo5OTlV/nYq+zw1WdnD5DLoD8SCGT1HvCeq8cPW24/b2oRU44fde+9cXdvfJ1aMam1cr9x+CFwINYZoG1J8fKCh37Xwoy6Jm2IgdBw2sVeKwnsa+F1ziEQq32Yhv+3q6KlZgs0r6diPmV3ptELKI3G90C7BZj9sHvWxQ53Wj9vSpbssybtiFafWD5u9VMYcWm0g5GxApRftdqqGW2//86vU/khhfKUV8uOZCQQg2graqu5lGXuV/eyMp7jXRfS22wtZ4N9c9bvODlXP79RE1e9axPCtxX82/Kj8OGOyVOMsjqvK+1lE9eKJtX2Vds5olr2PFsIl72ebU0ekTMxpyG85bPGaXqi+CXFsWz5/jjXrMqAQRlLyw5Ef0tpQzPcaPxcuLAEg2qqo7X1SFTWNVwdP1KmWqp/Y7YXkepqq37ZaJICLDKsPpV5WO74HV/dWfpzaHy1PCmotT/WAzY5k0eKRXb7GDW+eKBwgt88C7ojw+coj2vtEP5RpJqZ/D9ugnP+0o1t0aZ+vt+zHzeZWWj9urv4jtz9HeJMDiLb6tBcFCJSJOeFNzTFuyY868FnVj/rUZLu9kIUh76viLEyqKooviAozEQYPKy6BZi3Y5EmtJ/iXVP0kPCNeMn2S93PIQi/eEa/kWj/ui0WXKCw9jlqsH6Ds3xAX5LA/9Je2DDPw42Yb2wOpMQYTw56J+hXQ1l5nztrh9jnOrS1UjMruEG3DV0SROZK361rTftDCtc4SFc11Oz/hLmjeb7uxJOTWYofI3DDnx/zvdT/omsHAgmyu/adF+CZXRz9sa6N9ozG2sagX6uiHrX2jMba12TaSistLCUC0DZAm5owVKpD8rjfYxTlwHjYX1S2K+E7EqIdUTi5ezgopPjHCtN91mrtux1RaWkp9+vSRbFnNMUJTT1G7sc2oJfJ1B5rw435AhG726eyHbfX7xowfd336YbNNLhdMYMtanqOQ88DN+XHzfAMmISHapkek6b4ilPCeVGml0g+6a736Xes7mjaeh12w72Up5U/6N0KcCw91FCPrhlIWCVdR1/pd60FoaKiUn33ddddRWZn5tEL3s4fpI8/J9IjIl2Y/aP5x84ILS7Et/gB18Zgk5Wu/sHmItMgjKS/DaX7wa2L9pVAVhyW42szg4KX1lqNuKg/7S98ZUjiEHxwrxb9hP3Ke4+AsHp7H0BYKBhBtxxkpVZeHva+17nnYpuAVkI0bN6brr7+ejh8/jovlhARXk4fN8xgAou1UcOaHYR72EjFx+rsU+lDzsNda7HjKy8vpvffeo9mzZ0v/n8U6Ly8PF8pJeWvnWIM8bC6yzNk4WiHnQswAou00FPi3UFMWs0KU/ZzxouRhiwVElmLnzp1SSKRZs2aUm4vXW2eGJxDlPGz+r3ZCmUfYsmiz5wyAaDsN+b73q3nYmiyX0nMr1FS/8B4WPSYuLbZr1y5cHCeHfUPkPGye1NXmYf8mTMFk0Z4VsQOdBdF2HgoPd9YsAvpY5JmniDzs8CvysOeio4BV+K+YVJbFmRfMcB580PkoA4Ouvclw9INoOxGc9WI0XVHJw24ohDwVHQWswgKxBB552BBtu6SiJFMyo5LyqI//RKVJbvXm110cM8q4cOuch60lVTwX5omFlb2FTctgYWWyYQNPSmpelYXD37Zt26pN/QOOBYdEvt7z51XnYcfnptLcyH+kWPg4TZ43gGjrNBr2MfAAMfTrrp9c6bILHkqeecHeJ0Qedncqz4+zyPmtWZNBjRqRmIA03Nq0IUq8HGZv3ry5NEF54MAB3BBOCLv3cR44h0Ve2TqChu9fXuM87GXRPkbte7/ynQm/bYi2DiNsMTnIS+FN+3X/l4fhdnt+69cHCzG+RWzTq4g2b6+9VjniHiyG36+//joFBgbipgA1hn1H5MlMYxsvwgIQ7XqlKLKPGl/2e6QyjzpumqFft8j0sFeeeWa8EOdrxParGE2LUZEw5Js4kejmm1Xh3rIF9wGoG+zxLQt0B3cXqcrOlXneURcT0VEQ7fqjIPB51a/7YrCyv+T0TDXrI7Kv3Z7fo4+yMAeIrYiOaczYXFxU0R4xAvcBqD2FZcWSzzcL872u3xiEQr7zn6eINhfOABDteiPf71F18YsmxswTkYpoH+lmt+fXsKEqzhc0ViELFqj7+/bFfQBqD6cHysLMsXBtnveog6uUv7FTIIBo1194JOxT1a/78AeiEEFSpZ+1/1NqHvXpGXZ7fu3bq+IsykDS+fNsFEX0yCPq/iVLcB+AuvHMhoGKOP8qFuTw5CXndT+gKZ7Bed8Aol1vlF3cZ96vWxQV5gUx9oq7OxmdgJS3++4jyszEfQDqxvxq/LZf3/4LlZQjjRSiXc+UxI4zkUd9C5Wl2tcS3pSUFAoLMyy0O2yYccG+4w4iHx/Dz/v7+9OPP/5IERERuDFAtXBIhC1cTRXIiL54Dp0E0dZpxJ2xRwqP5Ps2VfOo82Lt7jw+//xzyWp1xYoVVUbcwtyPmjQhKYuklyg7edaIeVv//v2lfO3x48fjpgA1gv22Oc/7A5HnzYUZeBUlV2znmDeAaANzox6RcD1MDKsbNGhACQl1qyjC94mLSC2JjITfBAAQbWARLl68iE4AAKINAAAAog0AABBtiDYAAEC0gc2xTJiJ+Pn56fb95Vr/VgAARNteqCi+ICrMzBGlwXpS8bH+wkRqpUXd/7hQQkn8bKk0WfGxAWKZ/Wo6HRcrjJ9upmuuuYaioup31dkW4SLVokULmjBhAi6+DXAmN41mR+6UPD3YkMn97GEp1c5SxOWkSGXHvt07l8aI9j0Swix6/iezk2lm+HapfV556X3uaK0+H5OVRDPCt0mf51Jq1vD7hmhbEPbCzvdubNS21RK53GWpO6UKN1e2n+Pfin4fN5z69OlT723KBYDffvtt3ABWZuVJP7rPiF/1x56Ta+x5fTUsPeEtGUJd2X5X7z8MigHrxV/Hd1Fj1+5G/bpzSwqq/TwXZzD2+W/2zKJLNfg8RNveRtgFZwxsWi3tt80PBV6ZabL9fa1F+/W/TDgvL48CAgKotBSlpqzJgdQYA5vTK7fuouqMnlTnl/29/3xd2/dMPGJ2mfyAoEVmP787IdTs5wftWwrRdjSKIr5VBXLv45K3NhtIGfptu+rX/tGv1Pb9m0ttsd83lypT2k9ejwvloLBHtSww7/zzK22M20cTQzcolqi8haTp97an9cvu6D6ONp0OJpfDaw0eJJGZZ3Vrv/XW4Uo7vOKS2+fwjPZBwqEPU7TaPFT5dx96TqLN4vPsQCh/nv/LoR+ItgNREPCMat2adUjZb+C3fayffu2LJfVK+zmqH0jJqUnK/uKowbhQDkipMFqSwxIs0pmaZeE8QpTFiIvz6kFhWQk1VPyyuxv4ZfcNWKC0z+XG9IBDF/LDodmqXpRfWqT8raffbKX9NbH+Rj+fLY5XFucHhetggebz2rqY6+OCINqOBFezkcWxIv+0+oMSE5GKaB/9Wr/2NbUrKwqT1fYTFqvti2LEwAEHDEJk7l7+pSQsXIOxVOOWNyF0vSI6fxzdqkv7V4qe1i+bJ0Pl9jlmrAdpBdkGRYa1E6/D9i9T/rY4ytPo58/nZxqYV2k/r33o6fXQgWhbKzwS+onqtx3yvhTjLs8OFSPgJ1W/7fhZurXPbcrtZO7rTG1eaUWuC10MHialZxfiQjko2vAAC1VK/kUpzsxFB+T9tc2kqA3PbxyktPOLMIFKyc8i36RwA7/swPPHdWu/+br+Sjuc9ZFakEUeiWHUdFVPZf8hM+EhFnv53/GDjh8EHOfWFiIOS7dMgW2ItoWo3m+7oZSOp1v76b6KbezMfv+SMjqefVRkjuy83L4YiVeU6GuInZycTOHh4bgZrMCKGF+zE2mttwyXwhh6sTDKw6p+2ZzmaK59jvOXVZheS8BpfuY+3/6f38x+HqJtp5TETjAq3Pmet1FZ2i7p31QUp0kjbrZ05Ri3NDlZT1klxTFjJOHO/edfNHvAvyhwltz+HULU9X218xGG25wH3qZNG7u+homX0mmOEADO0/35wAraEr/f4HXfVuFX+j4iQ8OY4Dy5th8d03ESkOE+6qGJH2s3HgVHZ9XML5vzrFlAe/nNkUIrPNqt0aBFCGo3H+N+3f/Z8GO1k4gcUuLURGOf56o78bmpFruWEG1Lj7gz9lJh6EeUv+chkcXRQsSRe4kYd3zl39LcxYi7ka553GXp3sLvu4to/0ExOfofKY5dUZCg+3nn5ubS3XffTeIestvVkVwhnCeyrvzRvrtrvBRusAc46+FTr6kiNttPGt2yX3WmBf2quQ85L5wfFJxRwsKbrZmYNDtaFxOl9xrJk2Yxrcl3yH7dH11u/80dY6QFNrk1zLHmz68Sue6cPcLhknY7x0gZMJbM0YZo29JISExO8ojbWnncFnlgldlvuagj6acN0uOu3FgILLmy0NnglYfm8rz7BvzlNH0B0bYReFm7msf9pFhevkpKBzTM416JjrISPDqUBaLtjtFSnu+0o1sMVsj5JaHgg15wFRu5nzuJPGsOS12Z5x11MRGiDdG2HBwqUfKos9U4Xcnp6WoetfAqqSvp6eno5Lq+BYn/PeT2rbKIQhsK0aas6ZUy5+zka1IWeRl+Xkmh8rfvNXF615g9EG2ItgVvTBHjVvKoRTqgDK+cVPKow7+p03evXbtWKh22atUqdHQduHJxinZxBZsPyaLBIz9Q/3DMXe7jR0SKonbid+QBV90XB0G0gVF4ctIgj1tMTpZnhUhL3pU87jNz6/TdvXv3llL82H4V1I23do7VxE8XUHJeppRXrM3f3RZ/AB2lE0+LDA+5n3n5OL/tcJ61dmJ4f2o0RBuibTmqz+NuJKUD1hVfX1+qsIHUtIKCAlqzZo3dVWnnrAtzebrPbfzJYHk0qF94FG2u/9+uJs8aog10wXQet8ijvuDpEOfIqX833HADXXfddZSZWfvFPEl5GdJCid4iT5pX9rHfg6V+rNolz9rtsTV9KPSCZVbDcT4wh2TYM4PzxLeK0b0ls1ZOZZ+n6eFbpfY5NLHjzCGLtM8hEbZANZVnHZudbJHz58nOqUc3SznnPJ/Bo32ItrOPuDP8xZL3j0WM+2GRR/205A5YUXDWoc5x5MiRNGvWLMrOzq7V5zhjQ7vsWbuajcXcEuxKOExdff6gp9b3l/KcWTgvFOZYpG2eaGtixA+bsyl4WbXeLDnhZdQP+yML+XEznOf9idcUyQOE86Q5zzqnhnneV8ufEdsV4yvt9qXvjBrnekO0gdPAK/Yarfja5OsxL3Cxh5WJdaU6P2xeYKInASJ+by5PmkfBjsw/Z0PMhmcGBi2GaIO6ExMTQ6dOnXKoc/rCZ7qBTwXHmHnkox35Wbp0lSXhh5L2zYLDIvyarh35Baec0K19HtVqH5A86TopbKPBgiNLhYiswQubhxhU+tl+5qA0ytf6aXPoCKINag0vEX/55Zelmo9eXl4Oc1687Fj+0ZzNVSdk2chf677miHDMXi4TxvnK2lDE8P3LlfNnTxQ9KC4vVd5y+L/acMSPYoQpt78oytMh+59LoWmtZQvLipW/ddf4aXPoBqINag2X9+rRowc98MADlJOT4xDnxBNd919O7eIfj9Znguv+yT8a9tFwRIquKCLAIiozOWyTcv5TjmzS554Si1m0RQS0ftxscyq3z28+jginF8JPG+g/OsjKcqjzeX/3BOXHwQ5v7La3T4QD+Edk6coh1uA1zTJuHt3yxOuepAhpsYkSHtIxk+FFTbktzqLhPHUv4b/9oGZieG+y4y7j177p8SIqLoywU2TOwE8bOBV9+/alhx56qEZZJByvNjcRxPaYlnZbsyRrYwOs6oe9PNrXqn7Y1mZWxA6z5881OC01EQ7RBgYkidqm06YRff45UZ8+4se6XOSP66QFb7zxhrRSk2/CmjA2xM1oBgOPNjm7wtH5ad8So4LBNquROvthc0hAW89Ru3H6Y/TFcw7d9zyvwKl9phZWaedZINrAYmzeTMKjhISQGm4tWxLF6fDmd/DgQYqIiKjVZ9ii8yvfmdKyZh7djRB50lw6ylngV3LOE2+xfoDkB81Lui9a0A+b3fU4vZCFmjNK+EGabaE8aVuAi/+yHzkXbuBqNTwRrjWwgmiDGjF9+nQ6cuTIVX3HCZEtdtNNVQVb3l56SRgnlaKvAbA2Ti/a5VkHhXf1ars9/uDgYLr22mvplltuoYyMuq8K7NpVFehnniFaL7LnZs4kuu02df/69fjBAADRtqZgZ4dRvtfdkt+HvRYY4BS/oUOH0oQJE67qex59VBXnqCh1/6RJ6v4hQ/CDAQCibW3Blo2Zdl3n1JVhmjRRxfm8ZmHXggXqfpHsAQCAaFtBsHOOGAq2VriTnLNQwHvvqeLcuXPlxGOgWOD14IPq/iVL9Gs/OjpaemsAAEC0aybYBsK92uluhD2iUtM115ieiGzWjOiiTgXHP/vsMyn1b+tWlOsCAKKtFey8GCHYd5kWbI1wl6XudLqbwcXFuHDfdReRv79+7U6ePJkaN26MyjoW4KTwnWajqa+FZwavbGS7W0v6cXM+95Qjm6W0TfZN2WZhP3CItt2pdjEVHu5SrWgX7P03VRSdt8lTSElJoRdeeIG8vb11+f69e3nkS/TII0T//S/RgAFEyTr7yxcWFkpGV0BfKv2wu1dZHMIWAZbw455/zN2ger28feg5SaoDCSDaZoS7s10KNjNs2DAplNCZA88A1JDAavyw2fpWT3hRlLn2uRIMgGjXWrglwS5MtulDLxFryqdMmUKJiYm4e0GNYQ9urR82m/r/cXSrgR+3nlYAvHpVbqezx+9S++xKyFaz8v6j6fG4UBDt6oT7A41gP2nzgg1AnR70wshJDouwSGuXnQ8NVutecvhCD9h/+h7FWvYbg2Xf/QMXKe0vPeGNiwXRrplwQ7CBI6MVTbYS1brxjT+8ThFNLtirB1w0QQ6NcI1PbSFm9k6R25+n00MDou1o3iPlhSKGnYo7wQZISEigv/76iw4cOIDOqGde3jrCwI874dIFyX9b64fNcWe9+O+mwQZ+3Nw+F0ludrm4BW9B56NwoSDaDvBqK2LYta1abq/8/vvv0iRrXyy9rHfYnc6cHzQXWdBWxKlvuKqLufbf2jnWYAQOINp2C+cwN23alDw8PBz+XNmmtatwrtq2bRsuvA6Y8uNmm1e9/bDN+XFzAYvYbIQnIdoOQIWohPH222+LBS/XiBWLe9Ah4KpxP3tYMvNnoWQ/aC6dlWNBP2yuYt7Np7J9rvbCxZgdueIQRNsJ4UUnEGwAAEQbAAAg2gAAACDaAAAAINq2yOHDhyVTKGcmLCyMPv74Y/r5559xQwAA0bZduAjAww8/LCqiN6h1lXJHe3Bxvjb3BQAAom2zpKWlUadOnahly5ai8rnzlj7njBn21oYpVlVispJoctgmyZWP867XnQqw6KKUExcT6fewDVL7g/YtpY1x+6i8wnb8sCMzz0pphF19/qAhwX/T1nr262ZTK176z9/PKzs5hdHSfuAQbRskKysLneAg5JcW1dtKw8VRnkb9sDnfOiX/ou7nwoZSjVZ8XaX994Rr4IXCHKv39YzwbQauhVq/7ov14NfNroT3GPn+T7ymGJhwQbQBsFMKhGB/sHsifeY1lYrKSq7qu9iX467lpv2oWTj0HPH5Jpn3w+YFM9aEFw2ZWyb/7d65V/X9PGI39/39AhdAtAFwBMGWf9Rc3kvrrldbePWgdmS7OyGUZkXsMKgEo6fhUtsdo5V2unhMIo+EsCoj25C0WKv1d+stw5Xj4Ickm2FxWTOtX/fxzIQ6f7/W8IpDI/z9k8I2GjxIT1poKT5EGwAdBJuN/q8cjdVVuDlmzZaq/B38ep5VpFatH3nAVfn+2ZH61DXltwQ5LMIPCe2yc45ry+0vPL7bKv3NxyOLJ7sGshWtTB//+crxucbUbWUxh1bkt4yH3b4zeGvqKSruyN+/NjYAou0MjB07luLi4tARpgSrrIzOnj1rN8fLgmFMsOWtex2E+0rRLNSIBk8Kyt/NBXv1gIsWyKLYdFVPg+MfG+Km+0OjOjIKc5VjeHTN9wYTszxZKP+N5wTqQkp+lvId/17Xz2DidaCwuZX/tiLGF6Lt6Kxdu1ZKbbv//vupuLgYHXIF8fHxdM8994giw4/YjWBz6MBc7FMW7tJaCre2XFffgL8oLidFClE85Patst8z8Yhu5/bSlmEGftynRftcMkzrh+2ffMxqff/0hh+V4+Aq73x8HIeW31B4O3QV4RsWa/l7fjm4kuJzU6VK9tqJYUuVS4NoW5H09HTq0aMHubq6ojOMwKl/jRs3pieeeIIyMjJs/ni9zx01O1kobzxqDr1Qu7crFghz39lGZz9st9i9Ztv/3/ZRVxWzv1q40ry54+M5gatJTfzr+C6z38/zF5ZK/YNog1qOfkWJqvFEH35I9M03RAsXEun5ksAPNnuCxc2ccPPEHWc61IWfD6ww6YfN+dN6o41fa7dnN/5kdT/sSr/uv4weX0sxiXg2N+3qBhBC8Hv5zTH6/S9uHkrnLlnuPoVog5oLkhvR7beTCOkYbs88Q3TyJPqnOuG+GsHWjuZ58ouFqKP7OGmhhyX9sDlr5Zs9s6RsCq7qzjF1W/LD5sUuPOH7/MZB9P7uCVIGCU8M1xdb4vdLfuT8/Z3E6Jor2msnPiHawGbgVfU33FBVsOXthReInHgRZxVWnzQUbhZsjgEDANEGFuGTTwwFmiuCiRq8dMcd6v7169FPxoSbc4U3i5g0ABBtOyMyMpJefPFFCgmxvxHXgw+q4hyrmYQXJSyV/UOG4BpfCaeB7ThzCB0BINr2OVr9RErxGzRokN0d+733quJ8/ry6f+5cdX///vq0zbUy2a51zpw5uIkARBuibTnYetXFxYVyc3Pt7tjfe08VZ/7/x4+LSTFvEpXi1f3Ll+vTtpz6xw+82NhY3EgAog3RBtXh50eiKrzpiUi2vtbzWTRs2DDq27cvVo8CiDZE27k4c4Zo3DiiLl2IvvqKaN48sZKvsGafnTSJ6Lrrqgq2GATT/v2V/4Y19bffiDp3JurevXKysqgI/Q4ARBvUmjVrjOdZt2ghzO1P1Ow7+PJ//XXlZ159lUfARBcuVP5t5UqiW2+t+v1PP408bgAg2nYAx64LazqM1ZljwhbixhtNhzeee+7qVjYePUr0f/9n+vtbtUIeNwAQbRvn+++/l3wzbCHF7/PPVQEV1czEhReVUBaTqEep7l+9uu7fz8va5e8RWY20YwfRggWGedwbNuCeAACibaMUFBTQs88+K0a3N1J0dLTVj+fRR1Xx1B7O9Onq/oED6/79wqhQ+Z7Tp9X9HAeX93MoBQAA0bZZSkpKKDAw0CaOpVkzVTwTNAU8eKJQ3i+SM+pMo0bq96SkqPtnzaqfh4LMokWLqF27dsgiARBtiLZjw9kcsni2b08UHl6ZZ60V8yVL6v79HTsa5nGLxZ/k4UHUpIm6vz4caL/44gspX3v+/Pm4qACiDdF2XIKDjafrafOsczQFtVnUs7Nr/v179pjP4+Y6BpeuviA28f23QQTHL168iIsKINqOINoVhUlUEjueCg93oaKwz6nk9AyqKM3FlRb88QfR9dcbz7PWXtYjogDK3XdXGkPVRhtN5XHzaPvgQcfow5isJJoQul4qHsv+zcujfa1q/g8g2nYt2mWp2ynf6y7Kc/+XwZa/52Eqzz6se/sBAQFidJpt0xecxVMUyxGTpERt2hjmWWsFWxbcV14xHIHXZETPxRHYY/u114hGjBA1/DIc48fC1VG05aXkjUuBJVy6ADUBEO1ajbDzT1O+521VBFsRbr9HxYj7km7tJycn05133im8OJqKibgUu7wZONdalGSsMlKurXA7IgdSY8xWpOGCBFdTzgoApxPtosi+ikAX+LegspRtVHpuJeX73KvsL4mfrVv7nM3QRgxdu/D6cAcSbHnj1Y+5Thxl4hqAskBzxRauILNIVPfWFo71EvsAgGjXkIKgloo4l1/cr+wvTVym7C86+pWux8BudDl2OCStTrBtVbg5F56dE3V/ixP/e2B1b0mYGyzrRumF6jUeJ8p9yaI95cgmKAqAaNf4BxzwjCraOeGqaCevU0U77DNccSP4+BDdfHP1ov3UU0RpabZxzFOmTBHHfLNFUv/KKsrpvssjaq5Co62JOO3oFkW0eYISAIh2TcMj4T3U8Ejwq9LEY1mGHxX4PaaGR+Km4YqbgHO2zQn3k09y3N52jtdVJH1fe+21olqOZcrldHB3UcSZC8dGiernXKT3YbfvlP2oAQkg2rUJTVyKorzdN5meiPRuRBXFabjiZvDyIrrpJtsXbOaSSPq+cMFyGRseiWEmJyF5a71luKjKXYKbCEC0r4TzsItPuog87A9EuONTafRcUVoZYyxNWCKE+8aqgi3SAMvSvev1OIqFJR4b89trpkhNhVv4XdmcYFsLDn9wTPtKwW6xfgAdz0ywyDFwO7+FrKGPPCdLeeJcg7IUeeIQbVsV7co87AZG8rAfpPKsytUb5bmRIpPke2lisiD4FSqOGiwJfX0zSawk4eXUrVu3dribwtOzUrhZsJOS8CPREnQ+ShLLV7aOoE4io2RS2EbKKc63SNvzjrlToxVfV3lotN0xmpLyMnBxINq2JdoV+fHm87DFAhpLrnxMTEzkjqO9e/c65I3BMW4Itu3glxRpdJQvb509fpeyXABE22ZEu+hYP8M87NQdVJq0WuRhN9HkYf+JKwocknf++VUR6C4ek2hPUgQtjPIwWKEZcP44OgqibTuiXRDUShHnsovByv7Sc65qSt+RL3BFnQyeW/D19RXL54Md9hw5Zn2v6zeSMN+z4iuDcMyog6sU0f4zYjtuCIi2DYl24HNqHnZ2mHpDJ69XRTv0E1xRJ2PZsmXS3EJn9p511AdTeakSy+b/FpapdeHGaxb3cM44gGjbTngkorcaHtn3sjTxWJax54o87Cm4ok4Ge748//zzNGHCBIc+zzd2jFLEuZffHClPfGv8Abp/VS9lvzeW0UO0bUm0yy+dqCYPuyFVFKXq1v7u3bvpf//7H8XExOCuARZnV8Jhs3ni7DQIi1iIts2l/Ek+IkaEW4887CtpJcqJ82v4LK6fZYYzZ4jGjCF6912iTz+t9LC+dAk3GqiE86x/FXnWH3pOou/859HSE941Flv+nLEMkuc2/kQns5FMD9G20cU15bnHJUe/gqAXqDC4jcjDHiLysPW/YTOEKbSLiwuVlZn+ga0X1hO33268aktYGG42Z2e+iTzr17aNrLEft3/yMSVPnIV/6tHNlFdSiM6FaKPcWG3hCufmvDt4oUp+PvrJWeF0PHN51mz3Cj9uANG2IFwNRhZoMS9Gu3YRrVplWKV84UL0k7PCRRK0C2H2JkdKoRGtH7dvUjg6CkC0LUWLFqo4H9VM4rNQy/t790Y/6QV7wIwePVqUNxthc8d2pbVrVpHqAc4+IrJo/3F0Ky4kgGjXFY5hm4tfXwk74cnifOKEun/5cnV/9+644fTi/Pnzohr8NXTrrbdSUVGRTR3blYtjtDFo9i6RRXtyGIooAIh2nXnnnXekjJFTp07V6N9366aKMxe0PXRI2Hl6ED3wgLp/7lzccHoydepU4VLoRaWlpTZ3bG9rlqGzH/exzLO0JX6/UhGHN7Z/BQCiXQd4wcZDDz1EDRs2pPT09Bp9hkMiN9xgeiKyWTOiixdxwzkrHglhyLMGEG094dqDh3i4XAsWLSK68caqgs2TkQEBlf/m9GnhFzGKqH17oo8/FuWqplk2jzs2luiXX/hNgugTsep/+nRktViKiaEbjFZ0f1bkWUdnnbPIMRxJPy35lXChYs4T/zva26J+3IcvnKJfDq6UbG2/959PrjF7pJg/gGhbjchIor59iV54QSw9foNo5Ei1ruKaNUS33Wa9PO4VK4huuaVq+48/XnncQH/2pZygHwIXUhuRm82FDNgvxFJ51jPCt0kx9SsfGrxE3hJ+3Byz54nYK9t/a+dYSsnPws0B0bYtoqKMl/LSlvTSc8RbXfjmP/8R/i5FuE6OimfiEbPhGbZ71dOPe+eZQ2bb/9wb9Vsh2jbG11+rAtmyZeUk5erVhnncixfr1z4vqZfbeemlyrJiojYu3X23un/lSsfp73zEfAzg6jayQH7mNVVa7LM4ytPAj5vfAvTiZbGCU26nm88MChTtLzi+22CFKIdOAES75iMRUWOrsFC/19TmzVVx1IYi/vpL3f/tt/qd38MPq+1wXFtm5kx1/4AB9n8dCwoK6JVXXqEGDRpIXtugqrWrNhwz8oCrIppzInfq8wAtLVLCIpyvrrWWHbRvqdL+IvEQARDtGoYOjtL1119PTz31lG7CzcvYZXHkJe8yS5eq+3v21O8ctamHbGolM2+eur9fP8e4ns8884yYEL5Ruq6ApCrxDS/Hsnlkra0aPzbETfciCpdKCpQJ2GbCSpYfIjIjDqxQ2ueRN4Bo14gwMQv49NNP0+DBg3Vr44svDPO4D4o6xO7ulamA8v758/U7xw8/VNt5802ikBCiHTuImjRR9//9t2Ncz2jxVOTsH6DCplSyOPb0m02RIk98fVwQNV3VU9nPS+v14sXNQ5V2+oisEW5/bWyAsuiItwOpsD2GaNeCkpIS6dVav9E80f/9n/k87iwdJ9APHCC67jrT7XMGCyxkHRf3s+b9uLkGpZ6pd+tOBZptv4O7CwyzINq2h6k87nvvFalgFujWOXOMPziaNq1cwQkcG1N+3K3EKPh0Toru7XP83Fj7rbcMr7E1LYBoWxyehPzhB/G6+GJlmIIX2tRw0WW9cOSImkf+1lsipjkWKzWdCc7a4DxxDpdwFgnHsbUTg3rjlxRJ/QIXSO1zmh9Pfmpj7ACiDQAAEG17JzMzk4YPH07Z2dm4AxyYnJwc2rJlizRfAQBE2475/vvvpXqPn3/+Oe4AB+bZZ5+VrnOAbPwCAETbPokSa8rfEgHdkydP4g5wYH4RzlhvCAMYf39/dAaAaAMAAIBoAwAAgGjXH3FxRD//XJku98EHwiN5IpEl5zM5oiPmUKldO6IuXYTd5WSi3FzH6V9OiRw6tDId8qOPKv2+dVzzVGsiMs5IftDvierp3UUFGl56XeREKWuH0mLpZ7H0nKvH9/KbIxUntmTxBl41OXz/cqlQcu+9c2l5tK9F/cAh2nYG+2GLEoRVFqewr4clFqewH/bNN1dtn82gOP/a3mEfFGOLj9i29sQJ6x/f3Mh/FA8Pg8UhW4dbZHGKtTHlh81+3Ml5mbq373J4rdEiEuzHnVrgPH7cTiHa84QadOrUic6dq3t1EBYNc37Yei8DDw8374f973/b1oi0tgQHm19m//zzbDVgveNjK1Njq/nk7W2dl4Fbm3/Ohphdhv6hp75+3JtOB5ttv6vPHxBtRxFttudsKtZpc+rXtm3b6vw9PXoY+mF7exNt2GBouKSn4VPXroZ+2D4+RGvXGvpx27Ph07vvqufBFX/8/Cr9voXDqrKf+7sucPZI//79hUlWSJ2Pj8MhWoFi7+mVJ/0MDJe4DqSj0kZjOPWl7wzanxpNS054Gfhx8z69aKUxnOohDK+4rYVRHgZvPmHpcRBtRxlpJyQkSJW6rwau7CKLh9bxU2utysKuF489ZtzaVWutysvT7RXtw0fUVVZfySer+4cNq9t3DxVBcn5oj2LPgDrARkZsKcrCwK/nF4vUV6rfwzYoojEpbKNDigQvc79HsXb9xmDZu9Zadf4xd13az9VYu94vroN2DmFg0GKlfY6vQ7QR01YQVttGixgsX67u795dv/Y5/GKsiMGCBep+sWbIbhGF75XzSNGEh7nwsbyfJyjrAo+wx48fT8eOHauzaMsjao7pZherVXGmHNmkiAYLuCNSIIoYyKLdRCpioIrmSDEpK5//PJ1Em/tbDk09uLq3wcTn4GC1iMJiJymiANGuISzIsni0bk0UFCTq3olCH/ffr+5nFz29+OwzQz9ujgFztEcbntGzXJnedOignsfbb1f6jXM45J571P0cDrIWnK2grWcYeiGONsbtoweEiMj7dyUcdtj7/xVNuTD24+aq7m6xeyURt0S5spabBhv4cXP7XMm9sSY84yzlyiDaNaS6wrzsh62nWx5nh5jz4+aRuD2n/vFtZW4ikt90Cgutd3xcIMDcROTr23+xaOqbpanOj1tvP+x1pwKsWpgYoq0zaWlpunwvh0KMpdzxaNcS3WLKj5sfGI7ghz1jhvEH00MPVWbPWJvp4VuNprw9v3EQxWQlWeQYeITPcWQe+XOe+EIL5omb8uPmlEdL+GGb8uPmSVJLpBxCtHWCs0VatGghXrHf1kW8eRJw4ECiV18leuedSj9qS/phc1j2xx9FdeuXidq3F7mrLo7lhy0qv0l+4xyC4owSXrxkS28Q/ArOcdQ3d4yRwiQzw7dLhWstwbSjW4w+NDh0cTY3zSLHwCEQnvzj6u5f+EyX4tiWXFzkn3yMBgQtktrnLBZ+aGlrTkK07VC0w8WQrKGY1Wouyp8XFRURAFdSVmZ/YQzPxCNmwwM88ka5Loi23YZH0sXQt66ZAsCxb3Z+mI8ZM8bujp1X/SkLSbz/kJZz80ScdiKQq8IAiDYmIoHD4OXlJeVrs12rPcETnPIiFl5MkleizshynFkW7Rnh23CRIdoQbeA4FIr0kyCRq2lv4REWbTm1rdGKrw1E+zeNaPNEKYBoQ7QBsAHa7RyjiHM3nxnShOja2ACDZfR7kiLQURBt+xDtHTt22OXkEgA1xSMxzOxE5DsOblgFHEi0d4pliRynfJMNmAGwc3gEzX7RvFjlK9+Zkp+HvGycl8wbS/l7acswis9NtYvzY6OnIcF/U/t/fpPyzBeJpecl5RhwOZVo+/r6isUXD4kl5HNwNYFdYyoPmxevyKLMRQg4T5yzSThPmj2+C+2kCAPH3435Yf9v+yinWhyD8IggLy8P4RFQK5KSksRCHtuxUvU6d9Rs+INHpvach725Gj/sjz0nO80ydIg2ALUkICBACqm1atXKZo6JiyhcmYfNhkzaiUafc+F22+ettww3MJziN4Zl0T4Ghk8H007i5oRoA1CVAlHip3HjxqIG5UdUWmr9JdClUh72N5JwsQXqpRK1BBGX2JJF7Y+j9pnSxymKctiHH0LaZe9Dg5cp58dL0gFEGwCjlJfbTqZFTUWbY972KtpyLPtK0R62H6Lt8KJ96tQp+vXXX6WFEgA4CleGRzhU4KjhES4XFiLCI1xJHeERJxBtdu/jeOTPP/+MqwccBvYNceTCwdVNRHb2+B0TkY4q2vv375dysjMyMnD17BCur8nWslx95/33RY3F3/WtYm9PmEr54zzs0zkp9dIGT3ByyiAvxmFr0/kWtFY1lfL3mpP5YTudaAP7ZfZs40UOHn3UsO6mM8OLazjOyyl+sqjWVx72xNANRkXzZQv6cQcLP25+aPCbAxbXQLSBDcM1Na+91nQ5saefJrK0/TlnkaxevdourVpryz9nQ8yGJ97dNR5+3BBtAFTee08V6HbtKsuzrVtHdPfd6v41aywv2jeL+nHXiqeJXiXqbAVedSgL9NdihMsTgatPGhbmDTh/HDcqRBuASkRatCLOycnq/j/+UPcPHmz54+JMJLZAyMrKcti+55g1W7qyMHO2RoGmPNroQ6sV0Z4VsQM3KkT76hk+fDh1796dMjMxUWHP3HuvKs5Jmhq406ap+4cMQT9ZWrRHHVyliPbsyJ3oLIj21cGvrLfccgtdd911FBoaiqvlIOERLhzj70/k5kZ0113q/rVr0U968cYONTzCRlNyuTJ5UQ9vgQiPQLTrg+PHj9PChQtxpeycAwdIPHxNT0S2aMGVZdBPeuGRYN6Pm61gMREJ0QYOiCh2L+VZt2lD9O67RBMmEOXm1uyz8+YR3XDD1aX8sTFf//5Er75amec9aRK7PNpO/3DKHvtFcx50V58/pDhxYVlxvX0/rxqUU+Z4xDxHhDRqmhJoyo/7FZHyl3gp3S7uP54sHRi0WKrkw37jC6M8nC5lEKINaszcucZFV9iZ05EjNfsOFudBg0Q2w/+IOncmmjq15otrpk83ned97Jj1+4cNnYyJ4gubh1BsdvJVf//ksE1G86xrs/iG3fXkIgucJ82iV1xeahf338iDK42uGn19+y90Pt955rwg2qBGiIWoZsMbTz2lb3hj717zed7PPktUfBUD2h9++IGaNWtG6el1G3GyL4i5Zehv7hgjGUPVld0JodX6bTtyubF1pwLMnv8nXlOcZhm8zYl2amoqAdvjgw9UgWzbtjLPeuNGooYN1f0rV+rX/jvvqO3w/+eHCOd5aycy16+v+/e3b99e8rTZyCdVB3jkqjV84jzo9XFBdP+qXsp+jivXFRZ9+Xt4paSxwr6+SeEOe/+13DRYOc/v/edT6IU4WhFjaDjFfQLRtrBos58I+xx37dqV8vPzoZQ2RNOmqjgmJKj7Z85U9w8cqE/bPD8mL8K55hrOKtKEDCar7Yvs0DrD2UnH6hhj4RHufZcXqXB4JKdYvXcnhW1URIXDG3WBY7ayOHHqXr4mZU9r3To9fKtD3nvcn/JbzIOrexvEsDm+L5//khNeEG1Li7a3tzfddtttYsVcO/FDxUy2LSEiB4o4njmj7uc4s7z/p5/0a187ok/RhG/ZcEreP2KEdfqGRVse8bJoZxWpM6MTQtcrosITgXUV7Xsvi3bDK/y2x4a4Kd8/I3ybQ957ueJ85Vj+A0K0tQZXPCkpn//SE94QbWuERxLEMO6MVhWATdCliyqOnDni51eZZ61dhi5sPHSjQwfD8AzHuF1die68U91fx8hGvdDRfZwiHh+JeodsjLTypJ9B+MIz8Uidv58L+crf85nXVKmqOYcHtMvQ2d7VUWm1eaiBHzfnmbPZlLxoiLew9DiINiYigQyn2hnL3LDURGR1E6HPPXd1E5FXC6ei6emHXZ3ftqNPRG6qxo/7c+9pTvNbhGiDGrNoEdFNN1nPWtVUyuETTxCdOGH9/uGcaS4XdqWgcNWW+vDD5pi1se/nPOuESxcc/v4z5cfNk7Qp+VlO8zuEaINaERUlirEOrQxRfPhhpXeIJYsYRERUGkvxUnhRl1eKqdf3nDWvws2t6YqhKwjPiKeRB1wlq1POg14g6h4W1mORAQ4B/HxghRSO+WbPLCnP2lJFDGwBDgux3zhn6/TeO1eq6F6KxTWWE20ursoHAICt8OWXX0qpf2thhAIg2sZed+dKP5AePXrgSgCb4M8//6QmTZrQ0qVL0RkAon0l68VqiIYil2vnTlhCAtugSJTPQbopgGiboa6xQwAAgGhjIhIAACDaAAAAINqgDrAftjC0o5deqjReEiUSKSsL/eIsBJ2PogFBi6RKNrwgpb79vqtjb3Ik9Q+sbJ+NteZG/uNUKYt2J9pBQUE0a9YsKisrQ89bAV6cYmxVI/uKhISgf7ScPXtWFG2YRwEBAQ5zTrw4xdiqyhfFEvH4XP3dNU35Yb9sR0UYnEq0S0tLqXnz5lKK34IFC9DzFqa6cl+PP17/i1Tsmeli1Q7fqz179nSMH/qZg1YtN8Y2teba/2D3RKfxw7arkba7uzt17NhRSqsCloVXL2oNlw4eJNq2zbBK+t9/o59koqOj6YsvvqCtWx3D7vS1bSMVgewpDJeOpseLwgKBiqUsbxy60IsXNYZP7IfNK0fdYvcaFBZmEyhgY6INrMcDDxi3VuW6jfL+fv3QT44IL6NveNmzhC1etTFsrbXrnxH6rE6+pLFWbSaKQmjLm40QS/Ll9nnJP4Bog8s8/LAqzrGx6n6xAFDZP2AA+skRYZGULUy5mEJeiWrHyHFmWTTZ8EoPuGiDXDuTR/bah4a2iAFbrQKINrjMp5+q4syZI6LehFQeTOuHzf7UwDFpu2O0gR83Z5EsFiJ5r6ZcF+/TC55slNvp5jNDamuhGFlr/bC5RBuAaIPLcKqfMVtTS/lhA+tSnR+33hOBuxIOm52I/NBzEiYibUG0udbjyJEjhRhADWyBFSuIbrmlqmA/9phl/LCBdeFyZMb8uP+3fRQl5WXo3j7XyJTDJNqt3U7n8sO2WdGOFCpw7bXX0u23307nzp1DT9sIHM8Wz1F66y2iTz4RP+QZRHl56BfTbyjhwrf7I1H/8ieHOJ8j6afpFxHHfn/3BPpW+FH/He1tUChXbzgEMvJy+5xF4hqzx+n8sG16pB0cHCw5+QFgr3CFds7Xvu+++9AZwPFFGwBHYKWYscXbIoBoAwAAgGgDAABEu4ainZKSgh4FAAB7EO2TJ0+KdLJbqH///lLBXgAAADYs2jxZc4NYvdG7d2/0Kqgz7Eb47bdE//1vZVri2LFE2dm2cWxsKXz69GlcJOAYos0cP36cMjMz0augTkyebNw+ls2ujhyx7rElJiaKJf93C+/xZij8CxxHtAGoK76+RNdcY7vL7FmoWbCffPJJSk1NxQUDEG3g3HDpM1mg3323cmTNft+NGqn73dyse4x4iwQQbQAuc889qjinpan7RQEZZf/gwegnAOos2nnCsMLDwwM9COoFbQUdET5WmDhR3T9iBPoJgDqL9pAhQyRPBhcXF/QiuGref18V51dfJfIUfviLFxPdcYe6f+NG9BMAdRbtuaK0d4MGDUTs8Qh6EVw1YWHGK8XLG6cAlpSgnwC4qph2bm4uetDJOHSIiFPxn3+e6I03iH75hSfo6ue7lywhuvnmqoLdogVRjI3UfOUsksOHDwtL2xk2eX24OG/fgL/oFVEppovHJJp2dItU7gtAtIETMm0a0fXXVxVVdi1lMa8PWJxHjSJq356oW7fKwsO2VEODRbtp06ZSaJBtW22J0YdWG61O8/zGQRSbnYwbGKINnAl/fxJFLUyHLx5/XBRwzXeOvhgzZgz98MMPdOrUKZs5pi3x+82W8+LqMCg2ANEGToR2orBDh8o8and3oiZN1P1czgxYh9e3/6IINFekibqYKAn5/at6Kft9k8LRURBt4CxoxTkpSd0/e7a6f8AA9JM1KCoroYaXaz82FtXVC8vUGdtxh9cpoj09fCs6C6INnAX2/5DFOS5O3T91Kha/WBuu8XivEGsWZhbvnGI1TsX1GGXRnh25E50F0QbOwscfq+L84otEu3ZVZnvceae6f+1a9JO1ePufXxVx5qwRDoUsOL5bEXPegs5HoaMg2sBZEEXJhfWu6YnI554jKi5GP1kL/+RjRjNH5K2zx+9UQXAnhGgDp8LVlejWW4078EVHO0cf7N1L1L070TPPkHD8W0wPPdRWpDvaxgh23jF3arTia6OZI8l5MLuCaAOnhOPZXJjgvfeIvvzS9vKo9YTzxw3tY3tK+doNGkyniAjbOMbjmQnkcngtfeQ5mfoFLqBVJ/2Q6gfRhmgD52PHDmNhoRCxbRJbljTyRngIQLQBsBHatFHF+pNPiHgxJPt9i2I2yv4tW9BPAKINgNURpSGVWD6XQ9Na7kyYoIr26NHoKwDRBsDqcElIObWRY9raIg1smCWLNlyKAUQbABuhXTtVnF9/vTJPfc4coltuUffzPgAg2gDYAPv3G68UL28vv5xPWVmwKgYQbQBshgULiG66qapgN2v2B9144000c+ZMdBKAaANgS5w4wdaslXnqPXpULuV3c1snbGuvpR9//BEdBCDaANg6+cJIPD09HR0BINoAAAAg2gAAANEGAAAA0QYAAADRBqD+KSoqIh8fHwoICEBnAIg2ALaOm5ubZNXagSsfm2BPUgT1FoV3X9oyjDq6j6PxooajtjwYABBtACwEp/21bNmSxo0bZ/Tvow+tNlpdpsX6AZIPNgAQbQBshG3xB0yWAuPt1W0/U3F5KToKQLQBsAW45Jcs0N+K8MjJ7GTySAijh92+U/bvSjiMjgIQbQCsTYko9yVXRW+44isqKC1S/jblyCZFtCeFbURnAYg2ANamXBhyN13VUxLmu5Z3o4xC1Q2Q49yyaE8P34rOAhBtAGyB93dPUMS5g7sL7U4IpbmR/ygjcN72JkeiowBEGwBLk5qaKiq3j6KffvpJ2Xcw7STdvfxLkxORH+yeSBXifwBAtAGwMJz6x1atNwnzbXYAlPk72luMrL+pIthv7RxLyXmZ6DgA0QbAWvz555/S6sjSUsM0Ps4amRy2ib7wmU4DgxbT2tgAKqsoR4cBiDYAAEC0IdoAAADRBgAAANEGAACINkQbgNpx6dIldAKAaANg6xQXF9PLL79Mt912GxUUFKBDAEQbAFunVatWUr52SEgIOgNAtAGwdU6dOmWwwAYAiDYAAEC0IdoAAADRBgAAANEGAACINkQbgHojKyuLNm3aRIWFhegMANEGwNZ58cUXSfyEyNPTE50BINoA2Drjx4+nN998k/bs2YPOABBtAACAaEO0AQAAog0AAACiDQAAEG2INgAAQLQBcFoCAgKoX79+hN8RgGgDYAeMGjVKytceOnQoOgPoL9pdunTxEaOEgtGjR+dgw4at9luvXr3yXn/99aLvvvvuEvoDW1223r17F77//vt/10i027Zt+3S7du0+w4YNGzZsVt0e+xcAAAAAAAAAAAAAAAAAYIf8Pzto3sTsry8sAAAAAElFTkSuQmCC) **Step 3: Recompute the Cluster Centers**\ For every cluster, the algorithm recomputes the centroid by taking the average of all points in the cluster. The changes in centroids are shown below by arrows. ![Step3.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAW8AAAFuCAYAAABOYJmxAAAABmJLR0QA/wD/AP+gvaeTAABMrUlEQVR42u2dB3wU1RrFn2B5dsWGFTuKiiIqgtgAeWABu2IDpSNVQDpYQKqA9N57CS0QICSENEhCCiEhhBBqaIEkQHr93v1mnbLJburO7uzu+fub9+CS7N2Z3T17595zz/ef/wAAAAAAAAAAAAAAAAAALs4XX3xxfePGje/EgQMHDhyOO1q2bHlbhcT7ww8/XNe8efO8999/PwcHDhw4cDjmaNasWWGTJk1qlVu8P/30U+89e/YQAAAAx/HVV19deeedd56BeAMAAMQbgPKTl5dHs2bNoqCgIFwMACDewFmIiYkh8daiJ554AhcDAIg3cBYOHz5M7dq1o759++JiAADxBgAAiDfEGwAAIN4AAAAg3gAAAPGGeAMAAMQbgFIIDg6mcePGUUREBC4GABBv4CwMHDhQ8niPGDECFwMAiDdwFry8vKh79+7k5+eHiwEAxBsAACDeEG8AAIB4AwAAgHgDAADEG+INAAAQbwBKYdSoUZJFMCkpCRcDAIg3cBYefvhhyeN95MgRXAwAIN7AWVi+fDkNGjSICgoKcDEAgHgDAADEG+INAAAQbwAAABBvAACAeEO8AQAA4g2AFVJSUqh58+Y0dOhQXAwAIN7AWeD4V/Z3v/baa7gYAEC8gbOQmppKmzZtIk9PT1wMACDeAAAA8YZ4AwAAxBsAAADEGwAAIN4Qb2BfsgtyaXG8L/UKnEtd/WfSrNjtdDk3023OP6cgj5Yd2U19guZR5z0zaGbMNkrJSXea559XWEArjuyhvsHzxfOfTtNjtlJy9hW8sSHewJX5ffJYuuXp++mmDq/T7fO/Vo5nVnYj/7MxLn/+8ZfPUEOP/mbnzsdTKzrTrqQowz//o1fO0ZsbB5Z4/k8s70Rep8LxBod4A1ckKz+H7m72vOTxvrHNyyUE4LFlHeh8VppLj7gbePQrcd7y8dCSHykp45KhR9yWhFs+7l/clo5fvYA3OsQbuBo8PXDbP5/Qzf2b0EP/tKGZsV60REyfPLe6uyIAA/YtdtnzX3TYRznPh5f+JE03LD/iRy+u6am081SSUVmZsEd5ng+KL5op0Vuk6ZOX1/ZW2nkaCEC8gYvRwW+q8iFn0ZbZfSZaaW+2ZZjLnn+PgNnKefI8v8y+C/FKe6MNvxr2+fcLXqA8TxZumahLx5T2+uv64I0O8QYu92bzHqd8yLed3K+0x6ScVNpfF/PBrsqPvv8o57kuMUhpTxTzyHI7j8KNCo+q5efJI26ZMxkpSnvtlV3xRod4A1djdMRa5UP+xoYBdCj1FJ0Qc6StvUYp7V38Xfe2e3L0ZuU8X1vflw6mnKBT6Rfpix1jlPa2vpMN+/x52kt+njxVwiNunqP/ZtcEpZ2/oAHEG7gYLFS8KGdtweuuhd/S/uSjLnv+5zLT6FGxKGvt/O9c0IYCzx0y7PO/lH1VcsVYe/53zG/jFI4ZiDcAlYCnC+5b9H2JD36NBd/Q1IOuH1TleSKMagpXhiXhGxfpYfjnv/1UBD2wpJ3F5z8yfDXe4BBv4Io0btyYXnjhBfIM8ZUWL+uJW292mnznM5H2nj/sNteBvdK8uYUX9+qs+lmadgg4G+s0z5/tgDy99eq6X+jZVd3oa+/x5JsUjTc4xBu4IkVFRXTrrbdKHu9Lly7hggAA8QbOQkZGBoWHYxceABBvAACAeEO8AQAA4g0AAADiDQAAwN3FuyCD8k/OoZzoTtKRf3K21OY2FGRR/ql5lHOwC+Uc6EB5J2ZQUb5+ecyFhYV09epVfNoAgHhXQUguh1GW7xOU4fkfs4PbCtNCXP4FL7xygLL8nilx/pk+j1BBir8ufcbHx9M111xDb7zxBj5xAEC8K05R/mXK3PVQCeFSBGzXg1SU57p50nx3YemLSzn/nfdSUW6yzbv19vamG264gVq0aIFPHAAQ74qTd+QPM6HKOzFNOvjPcntu/AiXfbHzEidozv8uyjs2mfJPzBRfWg+o539In1S//Px8bM4BAOJdObJDP1BEKv/sGlVYzq5V2rNDXHd0mBP+hXr+pxeqA/ILW9XzD34LnwoAIN4GE28hTLJIFVzyU8UrJUAVryDXnZflLybl/JO3Ke2Fl8PVuX//evhUAADxNha5sb1VkQpqRIUZCVSYeVQSbGXaIKaHy77YuYcHq+cfUJ8K0+OoKOs4Ze9torTnHGiPTwUAEG9jwWKV4XWj1QW7jG03UOHVgy77YhdlHqPM7bdYP/+t10puHAAAxLtyIpOfrvFhdxaeZFGUtSDTJo/Nnu6MrddZFC5evHN18k8vFl9S11s4/+piAXOSzfu7cOECnT17Fp80AFxdvAvTQinT9/GSPuzdT4lR4X7b9CEeJyf8c+FtriUd/Gd3GnEWXo2mnIivxHV+TJz/w5S9/xMqSA3Spa/hw4dLMbD8/wAAFxXvorxUyWtt1YcsNpLouRMQ2J4hQ4bQHXfcQUuXLsXFAMBVxTs3frgq1N73mXzYx6cIT/I9Sntewki8ak5IQUEBLgIArireWitb/rn1Snv+mZWqlS/0Q7xqAACIt5HEm+17ig85NVAdtV3ajU0kAABgVPHOjemuEenGkrWNfdhZQQ1VH3ZsH7xqAACIt5HEuzA9VtjY/luGDzsGrxoAAOJtNKsgL1Ky59qiD5tzt50B4UnnvOzcg101PnUb5oVLedzzxeN3+zePfI7kjbcbhdlSNkruwZ9F/x2l16Uov2Re9969eykhIcHmi5U5BXm04sge6r93IfUMnEPz47zpSm6m3U4/tzCfVibsoV9F/z0CZtO8uJ102Y795xUW0KqjATRg7yLqLvqfe2gnpeXYL48+X/S/mvvft1jqf86hHZSSk07AzcVb0gaRq83eY45vZR9yzv7PnMaHXXglQnjSn9YtL7zwSpTlPG7h2S5MDdb//MQO1Cy/OhZsnLWkjBgtjz/+uOTxjomx3d1SXNppaujRn26f/7XZUWfVzxR47pDu53/k8hl6Y8OAEv0/s7Ib+Z/V/67w6JVz9NbGQSX6r72yC+0+E617/8evXqB3Ng0u0f/TK7qQ9+koKKq7i7ezwh50FjHreeEPSF72yj9+usUNTKq9sqbI49YxdlXcUfBmqfLkgXP1nPfff18S8NzcXJt0n12QS6+u+6WEcMjHY8s60Pks/fLYecT/uoUvDvmotaw9nctM1a1/HvFb+uKQj4eX/kRJGfq9/jzifnvTIKv9P7jkRzqZngwhgHg7H3kJf2mE7G4pLzvv+FTJs64suB4eWvnHTxxXLI97kimPXIi28vhxA/U7P9Gf0v+OO8XfJ4r+pxfLA++rW/98ey4LxSNCqKYe9JSmTJ5d1U1p56kEvVh4eJfSz0NCqP6J3kwL4nbRc6u7K+2/BM3Xrf9lR3abCeWkA5uk5/TC6h5KO0/j6MXKBH+ln/sXt6W/ozbSosM+9OKankp7F/8ZEAKIt/ORHdZa9amfWaEOWM95qC6afc0qP/IT00fK4yctUR///Gb18fe+o9/IM+Jrtf9TqkgVJHvZJVK3857pikiwaMr4nTmotDfZPES3/n8OmKX0Mzt2u9IeJKZr5PY3N+r35dk7aK7Sz/SYrUp7yIUjSjvfGegFrzHI/UwWX1wyERcTlfZXxJ0RgHg7n3jvfVeTF+6jtBem7VXnvgNfq/zj73tPk8e9Q318sR6gRr2+rN/5hb6v9n/BU+1fzMMr/e95Qbf+23hPUERi60l1DYTnweX2V3UUjx98Jin9bDy+T2nneWi5/aU1vXTrv/3uKUo/axPVLBqeqpDb+S5Avy/PGUo/vGArczYzxWzuHUC8nY7cQ79o8sIbCp96osjLPiFE/W11WkE4UCr9+HG/mn0JcB55UdZJ8zxu4f7Q7fwODzHPA8+Ip6Ls02ZfKjlR7XTrf3TEOkUkGou533ixeMjC8fmOMUp7R79puvU/IWqD0g8vmvKXBs9xf7VznNLezneybv3zNI3cT4P1/ehQ6ilpjv+bXeqX2re7/tat/xkx28xG2DEpJ+lC1mWzLzW+FgDi7XSUmRcu5WVXPhmRvwwyvG52WB53ufLAbeCoscap9IvSXK+1BbM7F7Sh4PNxuvXPXxS8KFla/3o6Xs5lptGjYlHWWv93zG9DPkkHdOv/YvYVenJ5p1L733k6EkIA8XZOTHnh11rOyz7+T9UfX8w1W84jry4tIOp+flbzwKtJC6pMRkYGLV68mKKibG8dW3M0kO5d9L1F4eCRsd54HNtL91npn+8M9Gbz8RDR/w8W+x8Zvlr3/red3E81xWKlJfEeEbYCAgDxdvIROOeF7/9U8qhzxC0vZNrSg81ecl685Ihcdnrw4+uVx225/yg1D937ftH/R8Lj7a/8O2/OYX/3Sy+9pEv/PF3A0xPssmB/Md+qB5yNtdv583TJj77/UF3hsnhqRWf6cudYadHUXvB0Ec9/s8uDR8JfiGkjPUfcxeE5fp6e4vn9J0T/PG21AyNuiDdwfiIiIujLL7+kgQMH4mIAAPEGAAAA8QYAAIg3AAAAiDcAAACINwAAQLxdQbylPOwFUh41Z2Lzn7nNZZDytheJykQ9pB2dnC1u0zzxUvD396cZM2bQoUPWN6uY8rD9pRApzuvgcKP0PPtdfyUPW+RR9wqcK4U7Xc3LcpsPPacDrhPb6wftW6LkodszD7ygqJDWHwumwSFLlTx05IFDvMvWNSlvu3bJvG2RwV2V3Y+GOb+r0SJf5DkLed+PSxkretO9e3fJ4z1+/HiL/344LYkabfi1xAaP50Umx97zh3V/fgmXz0pb6x2VB+5oOI+bw7ss5YH7JumfB867ZJttGWYxDxy7MyHeVjHlbT9iPY9abKrhn3HeO4oM8SX0ZOl533kpuj6FtWvXUvv27SkoqOSmobLyuHnDB2/B1ouy8rgf1TkP3Agj7tLyuDnm9rQQVz1H3E03D7Xa/wNL2tEJ8eUCIN4lb5cT/iyWtz1JOjJ33qO05x353WlfzLzECeZ52+Lvecen2C3vuyxmiRhVbeEADlrijO7aK7sq7XwrrRc8PaAVKs7D5hJi2jxwLm3mqnASoDaPe3yUhxStq80D765jHjgnIcr98Bb7sZHrpSkrbR44JxcCiHfJkV/oh5q87VXqiOTsajWPOqSl076YvGVdOb/TqgjZK++7LHhLtaU87l1JUUp78y3Ddeu/q/9Mi3nce0T5Mrn93U1DXPbD3jd4vnKe/2jyuEM1eeANPPrp1j+vMcj9aHNoIi8eU9rrr+sDVYZ4WxDv4DfVPOoU9RwKUgM1Ua6NnPbFtJ73Ha6en389x73ZNNGpXifDlXbOKrGHeHBcqtwPBzzJcF1KdxAP7ZcnL9jKcOk0uZ3vQuzx5clVgWR4qkpu56wYAPEuQW5sL3UEKoScs7alvO3gt9RphZjuTvti5sYN0OR9NzDliUt5283UvO0D7R32/P4KX6t8SLmILgcccR72ZztGa26bp+vW/7hID6UfrgXJos3Cof1S+UkEPrkq00TZOPk8X1vfl+JST1OyWGP43mei0v6193i7TJvxl2Ss+NK+lH1VCvmS2zlkC0C8S8CVzzO23WA9j1pEnbJbw1kpzDhcjjzxMIc9P3Y68KKUtQWrGgu+kUp66QU7HR5yYB64o3F0HjgXbnh8eUer/fOBdEKIt1W4YK71vO2pTqLS2VKudm5Mz3996qqPm/9sOe+b87bH6/q0/vjjDxo5ciRdvGjdscCLZvcs/M6icHBBW70pLQ98TOQ6l//AW8sD5+O3sJU26YNdJZx7PkQsPss+8tR/fdyl5YEP0XGxGuLtIpt0CtP2SRnYnIVtysNuZRcPtG3uHiz7uLN8n1Cq2PAcd/b+T8S5PSQ5TXihVpu3rRc33HCD5PEu6/3B5bP4Vp291XIetr9YNLQXPMfOpbu43qOch737TLTbfOg5D5ynh9hlwiNhnray1YiX62la8nFrfeQ8XcZ55JyH/pi4E/h0+19mayAA4u16sI9biLRVH7f4IirKS3XIU8vPz6eGDRtK4r1o0SK8Vm4I+8gtbQCSDy5hd0pHHznEG+Jt3CkfMe1h7uMeL5VWy/S+T11wPTzIrs9pwIAB1LNnT0pPT6fExEQ6e/YsXig3pbiPm6eh2BJaV+Pj7uIPHzfE2w3h0meqj1sd3Rac36jxcb9rt+dz5swZuv766+naa6+lyEgsNLk7nFVjyccdfvGoWdV5APF2O7SWv4KLO5V2zmRR5r4DXrbrcwoJCaEpU6bgxQHSqFoW6eVH/JR2drloM0wAxNvtyI37VePjfl141I8LH/eZYj7uDrhQwCHMjPUy95GLQs2cFggfN8Tb7SlMjzO0jxu4N7zh6bFSfOR8bD8VgQsF8Tayymp82GLHppQzYqO88PyTs6341PX3cSt3ALlES4Ult3dvom5iN/X8+UQZmjjogoICCggIwOKlG+J5Isyqj7y8oWPsE994fB8NDV0m5a3zomcq8r4h3rrr9pUDlOVXR9e8cB5dKz71nfcKH/f7ZlktehISkkZ165KwBJofjz9OtPdfqzxHwrJlcNKkSXhDuCEcO8BTJZzRXmtZe/p4+yhpc0554Eja/3mOsOgT5wAzAPHWhaL8dFH04LFSfNgPiZ+57LTnl5R0SThL7hPC3FUcOSUE/L77iC5dIlq+fDnVrl2bZs+ejTcFKDc84n7PwgYfrU+cNwEBiLfNyTs6WuPDrmHK0z420SwvPDd+uNOeX/v2G4RIXy+OpnT77UU0dizRP/8Q1aypCnj//uJLrKgIbwZQYbg0mizUPPUyOmKttLW+LvK+Id56k73/Y9WHnbRMac8/t071YYvYVmelTRsW6BhxHKO5c9X2rVtV8W7cGO8DUDm4pqYs0pwAKaPN+355bW9cKIi3DuItCh0oPuxLu5V2zhxRLX6vOu35tWypivS2bWp7dLTa/vzzeB+AytHNf5bFvG9OI5TbOYsGQLxtTm5sH01eeGPhwz4pfNhJQtSbqD7sg867SWHwYFWkX3uN6NgxsQHjHFGLFmp727Z4H4DKoc375p2Y7BNnlwmHWMntHKIFIN42p/BqTOl54SKqlRP/nJUjImr7pptKOk3ko3p1ouBgvA9A5ShP3nd5XSsQb4h3hTHlhVe34sP+26nOhf3afn5+Zm3s6b7uupLCfc01RKNGmf9+QkKCaBtF69atwxsDlIutJ637xLkGJoB46zsC57xwkaHNWdqqD9vf6c7jH2ElYb/2L7+YhwmFi5uH1q2JHniA6O67TdMmPj4lf3+p2MnDv9+CfwCA8t7hCZ94O9/JUt76I0t/olZeI6XNPwDiDcrJtGnTxDTJTbR58+ZK/X5ycjJ16tSJPD09cTEBgHgDe3LhwgVcBAAg3gAAACDeAAAA8YZ4AwAAxNtNKMxIEP/rGlkegYGBtGrVKryoAEC8XVy4U4Mpc/ttlBPdsWICXpgjZZ/wjkzO/Obsb1vlfZev/1zKP7OCcg/9IvrvIeWN52Sm0jPPPCNZ+9jiZ0vi4uKoefPm9Mknn+BNYwA4vW/T8RAaHrqcfgmaT4vjfelKbqb93n4iuGzLiVAaEbaC+gTNo0WHfeiynftn6+FvYSul/hcerlheeJH4j/3o/Pu9g+ZKv5/igLxxiHcVhVvZ9h7dqVwCXnj1IGXted5C3ndtm+V9l9p/+iHK8n+xZFyt75O0aOYIatSoEeXk5Ni0T3asVKtWjW655RZRyCEX6ulATom87OZbhpfYAPPsqm6052yM7v0nZVyyktfdlXyTonXv/2xmCr2/9XeLeeHep8vOCz8nBjkfbvuzxO9zbc6dp+1bhBviXRkBFCKbuePOEgKYE925dAEvyBAi/aTj8r7F6J6/JKz2730/FeWl6NL1VhE9ePHiRainA8kvLKAmm4dY3Xr+sNgQcyYjRb+3nxjxW/ri0OZ185eLniPulhaEWz7uX9yWjl+9UOrvf7Dtj1J//9jV8xBv4wp3uEXhVvK6Y3tZ/d28xHGavO87Rf73GCnzO3Pn3Zq872G6Pfe8Y5PU/rffTnkJf/2bN36v2n/cQKici7LqaIAiNDWF0HBe9mwRBsU7GeX2noFzdOvfPK/7exoVvobmHtpJL6zuobR39Z+pW/+bxVSR3M+9ov8/96+meXE76UVNXnhHv2lWf5+nWrS//8f+VdLvv7Sml9L+kwjRgngbVrhrWA+eUgTccuZwzv5P1bzv04vUEZGd8r5zIr5S+z+lBnIXnN+k9i9ibYFr0i94gSIyEw9sVNr3XYhX2l/36K9b/wNFNoncz9jI9Up7+MWjZmmCejFMzPHL/fAXh0x0ygmlnYXcGjxHL/8cC79MbOoppZ1Lv0G8DUhewqgyhVuav97znJj+uFLi97XRsAWX1DCQwrRQ9XcDXtHt+WeH/E/tP3m72v+VSLV/MR8OXBMeVcois1qMwmV4qkRuf2ZlN93651G13M/yI2rgmb3yuvmuQu6HF0lleLFSbueam9bgxUn557gosgwvtmqnniDeBoWnNUoVbjGnXJRjuWp67qG+5nnf2afFz54To+1mxRY+dXruYkpELQrxOv3U9kuaNX08Ze1rofZ/4EeonIsy9aCn2Qg74fJZSbg4BEpu/8p7nG79z4z1Uvp5bX1fihcBVCx8HfymKu2f7xijW/88xSH3U39dHykvnF02XFpNbm/tNcrq77Ngayv6xKWafl9bRIIXMyHeRhbww0MrLNzSCLfMvO9rpcrwuk37ZBymDK8bpb52jfuPZAu89ab/UOJSuf/q4i5gr77XTrhNDhw4ACV1AOy04JQ+awtud8xvo6tj4nxWGj22rEOped1eJ/XLu0/OvkJPiJF9af3zvLg1Lorff7KM399wbK/dXk+It40EXBLu7DNlT72Umvc9/l+V1fiwxQJoftISm/nA80/Okb4kuM8lA/5Ds/vIz+EaaQFTT65evUq33XablFSYnZ3ttK89uzY2Ht8n+aT7710olexKz8tyiufO4sKLbZaEZ0jIUt37Z3/3fVb6H7B3UfkGIRqftuxTT8vJKNfv8peDtbzwvsHzy/z97acipMVeS7/P0yr2BOJdJQEf8q9wP10u4VbefGJ0mx36geTyYKdJdkgLMQdumgMsTI8V8851rfjAbTMq4Tl2U974faL/u6S5cO0cvJ68/PLLVLduXalQgzPCVjBLdjt2TOw9f9gpzoFv97/zmSjNbz/8b162PSvUHE5Loh98Jknecu7/IzHVwKJeHnh+vqVFn3ZX2n2mfD5xni5qK6aK6qz6mR4S9kSe6qjIiPnolXNK3jj/PtsH2UljbyDeVYStfqVNlVSIgkwh0k85zgduBzIzM533tRYj7sYbBli9ZeYSXrz4BnSa9hMjbksbfOzlEzcaEG9DfRGM1/jA7xA+8NFSm7184KB0+PZcuyFjTOQ6aRGOR31yO0+jAP2mfLQ+cbbrzTm0w8wn3nnPdIg3xNsyu3fv1u2xc/Z/pvGBqyKQf269xgferNKPn5GRAQWoAt0DZisiMSNmm9LO28rl9rc3DcKF0onBYk5evs6jI9S6qJEXjynt9db2hnhDvC28eQYPps8++0y3x8/e11T1YV9UfaTsQFF94PUr9djx8fFUo0YN+vvvv6ECleR7MU8si8QmjSuB58HLs8kDVI2fA1RL3tJ4dRB1Kfuq0s5uFog3xFuhSMy19erVS1RIv4Z69tTvw5l7qF8xH3gSFeVeEAuKzTU+8I6VeuwxY8ZI1sD27dtDBSrJ+CgPRSTe2jiITqYnSz7p7zSizgtxQB94K7/WJ84Lj1eFy0fr0/54+yiIN8RbFe7OnTvTzTffTNdffz1NnjxZt77K5QNPC6304+/YsYNSUlIMcV337dtHI0aMsHmCoZ6wWPOiWGk+ab8zB6GyOlEen3h5XSsQbxenoKCALxD997//lUat7FFeu3atrn3mnZhm2QfOPuyjY1zm2r700kvSNfX29q7w77LPmhev2GfN3uCVCXsoM98+XwIrE/zp7oXfWhQO9h3b5X0p0vnkPGxeIOWt5vb0mWt91rLP3V554KX5xMvj07bJgE78x35xDqbiPpdUwGcO8bYTeXl5NHv2bLrzzjulkTdvLuERo+4fDpEVnh3SUrhM7pGCsHjapOCSr0tdW76D6d69O8XGxlbo99hj+86mwSU+uJzsFnrhiF2eOwcZfe09Xspwln3KvHnDXqN/S7GqHIgUdO6Q7v2fFlY8S3Y99kz72yEPnGGf+Pf/+tT5Tojzue21s5HzvD+ykOfNeeC7kqLs+hmCeJclpIWF9Nxzz1Hfvn3pkUceoaSkJNy/OoicgjxqKDI5rN0y89ZlXrxyVfiO491N1vO4HxVTCucy03Qd8b+3ZZjD8sAdrgXijsNSIYby5oFDvO3MihUrqGHDhqYPT36+NAfuDKSnp1NMTIxLvRbz47yVD8oDS9pJPuvpMVvpqRWdlfbBdtji7Sh4ekibx/1X+FqaKSyLPAKV27msl16sTQwy81mPDF8t+dx51C23s53SVeHSZ9o8b5424kXUupo88PbI8zbOqPuFF16QFvqcjd69e9N1111Hc+fOdZnXo4u/6irgzRkyfLsqtzfb4rqbmDjHQz7Pf6I3K+3B5+OU9kYbftWtf15fkPuZELVBaefpKq0LxFVhsbaU5x2TchJ53kZj9erV1KBBA6d87jyfzO6YyMhIl3k9OK5U/pBoszg4WtQeYf6O5kfff5TzXCdGwTI8D62de9aLTpo8cF64leGpGm0tR1elV6DlPO8rmjxvzjqBeDsYnh7hAKVt27Y57TmcOnXKpV4TniaQPyQcDsXFbNln/c2uCQ65bbU3kw5sUs7zDZGxckLMr3IetlbU23hP0K3/acXywHlzEguXtsjDFzvGuOz1107bcR74ETFoYJePducth1RBvB0MWwLr16/vNHPczsrixYvp7bffLlfsQKJwmtxvJY6TjzsXtKGAs7Eue614hP1QGT5zPfO4y8oD1zuP29GUJw/cnumCEG8ro+4XX3yRPD093VJQhUOS2M7+q5g+7dOHaNkyoiydbMQDBgyQ/N79+vUr18/ztui7rPiseQHN1VlzNJDuWfidw/K4PUrJA//VDUK5eLrOWh64vRdrId6W3qAeHlSvXj23HHWLCBRxx0FCUM2P2rWJwnQo8nPo0CHpLictrfwWt4iLifTVznGSNVDOo9ZzxGk0eIFM9pnLedLshLAXh0TBXZ6qYm+z7LPeVEoFGpf7jIjpEtlnzq6nFp6/SV+q9gbibYFXXnmFNm3a5FTPecuWLVXeQMTFberUKSnc8vHAA6JYayqmegAwAhDvYrBo89ZtZxp1X7x4ke6++26qVq0ahYZWPtvhn39Uob71VqI/xNrLOFGP9p571PZBSDwFAOJtRLp16yZNmzgTWWJCmkOe/ve//1XpS6dNG1WkZ8/WfqGp7WJtEQAA8Qa2hDcVVQWh/YpIb9+utkdFqe3CPQkAgHg7HmeuqWhrBg5URbpRIyKOcbl0ieiDD9T2du30vYNIxaQ6ABDvsjh27BjNmzfPaSuZ2xph/BDxt9YXLKtXJwrUaVGdX4cbb7yR+vfvjxcCAIh36cLNuR8c+Tpnzhw6evQo3g2CmTNNIl1cuEURIWkBUy94kw4vuLbhiXegK5yOx35lzucYuG+xFHhlzzxwOQ+bffkDRP8rjuyRKuIAiHeZnDx5UhrpsXDLBwv58ePHneL5c5GIL7/8slKFDMrDXhGN3LIlCQcL0e23EzVtylV49D0nTmw8f/48PpE6w5ECLYUv21F54LxL01Ksqj3zwCHeLiTczibgCxYskHYl1qpVi3Jzc/EuBuX70i8jj7vWsvZSsQE9R/y8oaW0PHD+cgEQ7xJwUJM14XYmAecKP6NHjyYvLy+8g0G54V2A2jxuLuPFeejaPHBOztMLrnZTPA97hsgj1+aBd/OfhRcK4l0STggsTbjlw5mTBAGwBtebtJTHvff8YaWdKxXpxaB9S5R+RkesU9rDkhOU9lddONIX4l0FeF6Vt5GXJty8w5JHtgC4Gh38pioiqc3i4NJlai3Grrr1ry2mwYuUMheyLpuVsgMQ7woLOITbGK+Pn58fpoR0YEr0FrM8cC5mzC4Prahz4JdecMk2bR4413tkl0vnPaqof7r9L7xQEO+KCbjRhTsnJ8ct3pC7du2SFmM5khfYFl4MfLhYHjdngJvlcZ/SL4+bF0N5UbS0/recCMULBfEuv4AbXbjZA/3ggw/Shg0bXP4Nye4ZTnXs27dvlbf7g5Jw+TRreeBco1JvNh7fZzUPnGt0Aoh3uWDBDg4ONvxUyffffy+NRkeOHIl3LKgynAfO9UB5fpnzqFvaOY+b88C5XNtTKzpLlZHYPshFHgDE2+XgEeiSJUvg6QY2h3c7unP/EG8AAAAQbwAAABBvAACAeEO89efcuXN04cIFt74GV65cESmGf4j88HZ4QwAA8XYOPv74Y6pRowb5+Pi47TXgxdlbbrlFRNFeQ2fOnMGbwsacSr+IiwDxBraEK/twLcrbRRaru4vWjBkzaOPGjaKqfTbeGDbEJ+mAFEw18cDGSv0+O0S2n4oQ+SRraUjIUlp1NMCueeDleX47T0dK+SmDxfNbmeBv07xwfvxdSVHS43NeC+ehX8m1f0UuiLdBQXEIoKdwyxtjJh3YVKHf5xwUS3ncL6zuQcHn4xx+fucy06i116gSz+85kVdui7zw81lp0hb+4o//7Kpu5HfmIMQbAGB7fJOihXD/UGJ7+szY8uXIcB73/zxHOCwPvDzPz9IXi3w8tORHOl2F6SIecVv6YpCPB8Xjc14MxBsAYJWKFs62JNxaAZ8Vu73Mx1h/LNgsj3t46HIp7EqbB94jYLbDrgnnosjPgyMAhoYuo6kHPc3ywjkEq7Jw6Tb5ce5e+K00JTNNPD5XIZLbO/pNg3gDAKzTr18/uu2226hBgwbUp08fWrFiBcXGxkqZPcXh8ma8Dd3aiLG8As71JuWfHxu5XmnfdyFeaW+wvp/Drgl/mcjPg+tjykRcTFTa663tXenH/10Uj5AfhwtJyESnnDCbPoJ4uxGrVq2SyrMB62RlOV+BWo41CAsLs3msAdtI+/fvTzfeeKOUecOOHD7++9//0qhRo0oEevFouDThlg8ukZZfWGC1305iVCn/LC9SyvA8sNzOeSWOorvmPJfE+yrtqTnpZlM7lYWrDMmPs/DwLqWdF0O1UzMQbzchLi6ObrjhBrr11ltRgNcKrVq1koSJ/e/OQpGYf/X19ZVSK9evX1+pSN/k5GTpMaZPn05du3alt99+WxSFvpvuuusuqlevnmKlZBH/5JNPpDJ/luDalZ33TC9VuN/aOIjScjJKfT48BaHNA+f548z8HLMiC5/vGOOwaz5b3DkodwAe/aT55yzx/LRfXq28Kh/uNi9up/I4r63vK+WRZxfkUu8gVdTfFyFfEG83gQXpiy++oI4dO+JiWOHDDz+k6tWr0+bNm51OuOWjLAHn0TJbI1mkxQdSEmn2+r/11lvUuXNnmjp1quT7l7/gr169Kgk3556HhJSdCMgCrh05FxduHp2WhaU88BoLvjH7u+eJMMd9loTT5NFlHUp9flVJL+SKP4+V8fjaCkUQbzfB0lylcZ+ryGUWFuEhQ4h+/ZVotZhe1NOKnZCQQJcuOUdVcUvCXV4B57nrKVOmSAUpynOXwbVWub/yYknA39w4sFzCLbNW5IHzYp2lL4E+QfMcfv052tZaXvjPAVUvbsyLotYevyqLoRBvoDtsPxdrZGKu1fyoU0csDEW497UpTbjLK+B6wwLe8V8Br6hwy/AC3Zc7x9LjyztSTbEQ2nzLcKnIg1HgvHDOK39C5JWzw4bn8rVz9FUlLvU0fe09XspDZ898M/H42pqcEG9gOFhzXnihpHDLhyj2Q2lp7nt9+HNRmnDLB+8adWSVIF6UZDdGWXPc5f0yMDJ6Pz9Hnj/EG5SbadNUoRZrZTRsGAl3A4kFNLV90CD3vT6nT5+m+fPnlyneBw8exJsJQLydDV5o+vrrryWXibPx7beqSM/QTO/x/LfcLtbX3JqyBBzCDSDeTsogMTRlb+6bb77pdM+9RQtVpL00O6pZj+T2557Tc9omh7y9vSklJcXwAj5v3rwSwh0ZGYkPAIB4OyupqanUqVMnOnDggNM994EDVZFu3FhszhCutcuXOcJWbRe1knWDo3L5i2/x4sVOMQLXCjiEG0C8gcMQu6/FZhlVqIXNWPiv1b9Xq0bk56df/+x1fumll6Qdqc4yhcICDuEGEG9gE2Sf9tChIq9igPDurjU5ScoDL1qySFtym7DvmykQO6w3bTItaPLjr1mjrw/cyPAaBwAQb1BlxH4Xiz7t558niooq32MEik1kzZoR3XGHyXXC0/eenqZ/S0wkev11yz5wDEABgHg7Dbxxwyi7J3l0zSJtzaf98MOmOezyn5tplC3D+Ut168IHDgDE2wVYuHChFCIUHh7u8OcipozNfNo8bfLnnyQyNNR2bqssbB+UH+fmm03TKCNHmvvAedETAADxNvyo+5VXXpEcEsuWLXP482nTRhXRmTPVdrFjW2lv0qTyj//dd+rjTNNk0vP8N3zgAEC8nYqMjAyaNWuWIZ5L8+aqiO7YobbzXLfcztMelaVlS/VxRGaSQkyM+dx3VYkRDzhU3CJ4eXnhDQYg3hBv14cTAGURFdHQIi/aNMfdurXa3rZt5R+ft8bLj8OLmKJmAF25QvTpp2o779KsKpMnT5buZr61xYMBAPEGRod3Qoq6D6X6tP39K//4hw6V7gPnv+/eXfXzOCqiDXv16iUeazdeVADxdinxLsqjgnMelHt4iHTwn7kNkMiLtu7TZl92VRFFX6w+/uDBrnENuUr59lMRNDpirVQ3kQvzckUVACDeVflgpR+iLP96lOH5H7MjK+Bl8W/6h0FxLcpsg+9I4dF106ZEt99e0qdtC2QfOD8+u054K72TFMEpEy791VKUuioexP/Kul8o/OJRKAqAeFeKggzK2v1kCeFWBHz3U+JnMnXrnv3cL7/8Mj399NN0+PBhw7/4xX3azvb49oZzsN/dNMRqHUguvnspGzsqAcS7wuQdHa0Ideb22yk3foR0ZG6/TWnPO6pfgVQuAPuciNV79NFHJZeJs8Ilx9jJYeuq587O8iN+ilBzhZYRYSvo76iNUsUWuX3gvsW4UADiXVGyw1opIp2ftFQdMZ1erLRnh32k63NgwYuPj3dq4ebUPrniS14e1gpkegWqVcKnRG9R2n2SDijtPDIHAOJdQbKCGioiXZgarLTzn5WpE/EzoGzh1pbsMqqAh4aGStXV27VrZ5f+vt31tyLSXIhW5sTVC0p73TU98UYCEO8Kj3oPdlNH2CEthMEkVTr4z3I7/wwoCRc4KC7c8rFJbJE0ooDzugL7vR944AG79Dc2cr0i0k03D6XzWWmUmZ9DHfymKu3f+UzEmwlAvCtKYVoIZWy9Vl2k3FrddCh/v1b8TChe9QoIt9EF3FPYZK7wTiA7kHjlnFQtXRbqO+a3obsWfmu2aLnzNKITAcTbOpKPe4NYjBwmHQXnRTh1kSm5Ly9hlBDqayy4Ta4R//aXzZ8KZ5Y4c2ZzgbCD8DmUp+o5NsUQLTrsQzUWfGPRbTLATouVReK/XUlR0p0AL5p6HNsLnznE2/jibd3H/QoVZpjseQXJOyg7+E3hMrlFOrKD35LabM2uXbvErsFrJGugMy/ssUvGUs1F7bF8+XIUFviX0AtHqLXXKHp46U+S66TZlmG0QQioPTiTkUIfbfuzxBdH/XV9KCw5AS8OxNug4l2mj/tp8TNZmiFKgenQiejoaHrttdfor7/+cvo3QmkCDuG2PgJm77fd3v5FhdR8y3CrPnO2LCZnX8ELA/E2nnjztIfq475NTJkMl6ZNMrffqvq4E8fZfdrBKMUW9BBwCLdxWH00QBHqexd9T0NCltKEqA3S5iC5vV/wAlwoiLfxxJs92pZ93As1Pu7WeFWrQEhIiCLgziTcmZmZIto2yqVfGxZmWaRZtGX8z8Yo7Y03DMCbGOJtPPHOCnxd9XGnqXOMBamBqngHvYFXtZLwHURLEcqdIApdcoV2ZxFurtL+XxFneO+991JhYaHLvj7tfCcrIu2hmWPneXC5/dlVsMJCvA0o3jkHu2h83C2F6STtXx/3/1Qfd0x3vKqVhH3THTt2lP7sbCL4+OOPS+sP58+fd9nXh7fia3dynstMoyzhM+/iP0Np/8p7HN7IEG/jiXdh2r5ivm0LPu7LYbr1z3PCLG4XL150yTcDe7nHjx/vlM/d6EmOtoB3cj6wpJ2Zz/zuYj7zrSfDCEC8DWkVzEv4064+bi2ff/65tKOvffv2ZUw/mCJQR4wwFfTdIKYnncFJyMLN2+GBvsg+7XGRHvRb2EraeHxfuX3ay47sLrExSD5+CZqPiwvxNvYmnYLk7cK73ZgyvG6WDvZ06+HjLg7PBX/yySd09uxZqz9z5AiJ2/eShQhEUizFxhr7zdCpUyeKi4vDp0JHzmamUCuvkVXKA9+ffJQ+3m7ymbPrhLfqr00MwsWFeDvR9nidfdwVv3UnevZZy1Vk+HjiCS5IbMw3Aqch8mIlYmB1HHQIn/Z7YkOPNZ/2k8KnXZE8cB7B5xUW4MJCvFHDsqr8/bcq1FyhZohIBx0+3FRRRm7/809jPvdYcVvQuXNnvIg6ssqCT3t8lIdZHviAvYtwoQDE29589pkq0sIqrSDs0kp7ixbGfO4bxMT8xInOnYhXJEr3+Pr6ii/M4dKfjQbPScsizc4RGd+kaKX97U2D8EECEO+qUlG7nIiWVkRae4nCwtT2+vWNea5jx46lLVu2OP1rVqtWLWlROSIiwnDP7QefSYpI8yKlzClRF1Nuf241rK4A4l1leAT34YcfShbB8tCzpyrSzZsTpaYScVrpRx+p7f/aqA0HO2icuQKQzKhRo6h///6UmJhouOfGUySySDfZPIQuZF2WfNqd90xX2r/ZNQGqBCDeVSE9PZ3uueceqlatGgUEBJTrd/bvJ7r2WlWoq1cv+fcgA5oCcnJypMVKlDzTl2NXz9P9mjzwOxeU9Gl7nQrHhQIQ76qSlJREc+bMqdDvjBH1jUVKrEW3Cfu+Ga6ozjMUv/1GNGwYEVurHambBw8epK5du+LTYAeWxPtazQO3V6gUu1R2n4mW8lEq6jO3FX5nDkrz/pxHznG6fAcCIN4OZ+dOosaNiW6+meimm4gaNeKKL6Z/E1Zxev11Y/jAT548KYVQjRs3jkaPHo0Xzk5wHjh7vR9a8qPkOuEpFHv5tHlL/afb/3JYHjhPFX2xY0yJ/uut7U0h4roAiLch4BG2Ni2WfeB16lj3gYt4Drv6wJOTk6lp06b0wQcfCBdMC2kDErcB+1BYZF+fNvfXcuvvVn3mjy/vqGseOI/4LRWSkI9Hl3WQvlwAxNtwTJpk7gMfNMg0bXLbbY7xgbOdrrlYWW3SpIkk3uw4Aa7L+mPBilDes/A7GijKtvEi6pMan3nfYP222G8+HqL0w3P97GnnqZunV3RR2nsGzsELBfGu3DSCnv7gL75QRVo7lb5iheN84N98840k3q1atXKpogtTp06VUgZdPeO7IrBYyiI5JnKd0h5wNlZpf0PHPHDelCT3MzJ8tdK+9/xhpb3B+n54oSDeFYMrkD/00EOSkHE1dT145x1VpLW1fNmlop37tusHesAAaepkx44dLvV6cvoj+71doUSdrWi/e4oikto59nOZqUp77ZVddOu/8x41unZlgqohF8VUjXbqBkC8K8S+ffska2DDhg11y7Lu3dvcB54mpvd4sNu6tdpeRmChzeEFyy5duhhyR2JV4E06vOmIK+wAE5OjNysi+c6mwXQ+K024TPLo54BZSjsvJurF9JitSj9vbhwozW/niP57Bc5V2jlwC0C8K8ylS5fo2LFjuj1+eLi575v/fN116t+FpZzKaSm3Gbyd/MSJE/gkuAEn05PN8sDZZ85z39pFQ56X1oukjEuSw0bbP7tttP3zvDyAeBuS0nzgvHhpb1y5bBgoyfIjflZ95j0CZuveP4dzWcsj54pAAOJtaNgH/sYbJg+4KL0o+b65eAMA9oD93GzZe1CMgtn1wWFY2jlovYm4mEitvUZJo3Du/62Ng6QiEwDi7TSwBxw70oGjYN93bmG+Q/vnOW8A8a4QvEDHFdILCtwvzJ7dc2I/jmELQ9gatkCmpWHzB4B4u4R4z58/X7KScWqgO3HgANHdd5vm2HkLv8jfcml46/91YlUY2/8BxNtFxNvLy4sefvhhWrlypdu82FyesmZN80VSVxfwdevWiTTH6tSjRw982gHE21XmvN3JA2xJuOXjzTddV8CzRaBMKgerAwDxxoKlKwm3Owg4ABBviLcEr3Fu22YKkfr9d1M2d74dF+3Zmi1mfWjkSFMmONsMrfWflUVi23/pwi0fP/5ojOvLmz3ZTimK4kgFnDnv3EhF7Tkdj/OoJx7YSKPC15DniTC3c01wHsqkA5voz/2rpY099s4DDzx3SNox+sf+VbRJ9O+OeeAQ7wpy9CiJLfQlhe/VV4kOH9a/f94AyvPUxfvn2piHDln+na1biW64oXThfuQR02M7Gq449+67JZ9f3bqmxVZHczYzRfIoF99g0sCjHx24dNzl3//W8rhfWfcL7U8+qnv/nIPytfd45IG7k3iHiSrAv4thcm4VhnBl5XE/9RTPo+t3DvzUWcQqkwe+fr35VnwjCjffPbzyivXzE+vLdPmyA++4igrpvS3DrOZRc6hTao7rzj2VlcfNoVIs7nr2/4mFQhLumgfuFuLNnu6XRVwfWwPHjKl88M7ff6tCwlVyRCCflMl9661qu56OtClT1H54d+avv4qYzSHmeeA8jWMNSwLOwm2UWr1z56rPi3ee/vKLadrkjjvUdr7elfvizZaSE318fCr9/HgXoiwUnMnBedijI9bRY0I05PbBIvbUVeFyado87v57F9LYyPVmeeB9gubp1v/Wk2FKP7zFnrPHx0V6mOWBd7fDFn+It53x8/OTCg5UxWHy2WeqiMzTvEeXLFHbRVEa3fj6a7WfmTM1orJSbX/vvdIfQyvgPJI1UpF1nnOXz2PiRI1obDRfWK0Ma9askb683+U5mUrSO0hNv+P5XpmdpyPNqsK7KoP2LVHO86/wtUo7zz/L7a979Net/+Ghy5V+fg9TLb77LsSbTd9AvDHnXQIWDllEtOl/oaHmc996IaLGlX5EGKAC75iU2198sezHEdZnaYrl6FFjXV/+4pPPg+fpZXgtQW6vXbtyj807LF8VL85vvMJbSb7d9bciErxIKcNpfXL7C6td10/e0W+acp5rjgYq7cmaPG4ehetFV/+ZSj8rjqgalJaTobTXWtbebfQI4l0BundXRaRlSy7gYLLXiSI0Snsn/d671KeP+Qib5395jlt7R9CuXfkeK8eAi/ODB6vn8fbbRFwTg90ybdqo7Xz34Sh4ikQWif95jqAUMb/Nedichie3s8C7Kv8UywPn+W3ORtHekXBxY72YEbPNLA+c88i5Bmi/4AVKO8/JQ7wh3iXgEXZZedxBOhYBF/UFzPqz1L+YHXJaYmLMXTFiYyRdf736d47SdWTBnyOXz9B9i35QhIKjVYvnYXudDHfZ97+lPPDiedwex/bq1n958sBXHw1wGz1yWfHWqzIMe7utuSHskcddWh74wIHO/4acPNn6+XElIkczL26nJBqW3A6/BM23z3tb/Cf7rGWfebadfOal5YHbI4+7tDzwDn5T3Wow6bLi/cMPP4gpjE66FNXlDTqcwc2OCB4pNmhg3zxuHn02akR0442m/kWdXfLwcJ03Jdf3ZC87O2p45M32QREAaRiCz8dRy62/0/2L2yp51PYa8fFUwWc7Rpf0mYvCvZEX7eP3DBV+6g/F9ASPwllIG4uixUvjd9vt+odfPEqtvEZKeeTcPxdNXnTYR/pSg3g7uXgnJCSIKYVrxYf/JrEop9+qnKPzuF09D5zPz0g7K4vDvm975mFz/jV/aVjzOT+1ojNdyr5q1+fjyJ2l7p4H7rIj73BRMHLp0qUEgJbk5GThFR9EXbt2dbrnvk5UfJeFmufafxU+a15E5c0xcvuAvYvwIkO8sWAJXA9OGOS7suvFfIweU2p6wptiZJHmzTEyu89Em7kwAMQb4g1cksliVZR3W+Y52ZxTW9/Jikhv0Lg6OG9Fbn9mZTe8wBBviDcARmJC1AZFpJuJjBUOaeI5X94SLrd/tXMcLhTE27nE+/z583g1gUuTeOUc1RQOF63PvLjPmeNZAcTbacT7xIkTIhzqVurQoYNwKOTjVQVOjezj5rzq0RFrpUAm2VXBljhrPnNnCmViu+WU6C2ST33LiVC754FDvA0CV4G/QRiexcngFQVODfu4P7eQl63NC997/rCZz5x91tqsDyNjLY/bXnngEG8DTpvEiVpfmDoBFSXDWgC6AyjLx83Rp9q8cPaZO5PPme8oPt4+ymF54BBvLFgCF6BA1LJ76623pI1c6QYp3rnWgo+bo1e1eeGcIe6s8DZ+bR43xwmwT12bB84hVwDiDUCpNBIZA+z3DgwMNMTz0abjcZEBmV1JUWZpfs7K0NBlynlw7UmZIE0eOE8PAYg3AKUSHx9vqI06P/hMUkSMq9bInE6/qLTXWfWz017vznumK+exMsFfab+oyQPnqRPgwuLNJa28uHw6AC7E+CgPRcS4VibnhXN2ipmP29t5fdzTDnoq5/H2pkFSEYfiedxc3Bm4sHgPHTpUKmk1gItIAuAiHC3m4+Z54ftcyMd9StxBPKjJ42afevHz01boAS4o3tOmTRNFd28ThQ+C8Ao6IRy1znWAOZucCyZv2eLa6YgVYX6ct1183DzPzD5r2UduhDxwd8vjdkvxZi5fhqXIGTl9mqhp05KFFurVM1XSAaYNLFxmjUfhcl41i54t4KkK3kLv6Dzw9//1qbOQNxRFixce3uV2edxuK97A+eDNr1yg2Vololq1iByxfshe7y1i+G8kz7d0vcR8sC1HxCyOXOPRms+aLXv2zANnn3p2AW65IN7A8Mybpwo1VyHq1ctUdPj229V2sZxhdxqLsj28hsIC7sqwg0UWat6Z2Td4fgkfOXvLAcQbADO4sr0s0n9riqyvX29eNd7e/CkKk74u6tpt4/p2LswAscFHFmkWbRm/MweVdt5qDyDeNsPT0xNVcVyAli1VkdbqpEg2UNqfeQbXSS9+2j1FEWmuyiNzLjNNaa+9sgsuFMTbNvAmigcffFC6rd2wYQNeMSdGVB9TRLpJE150JsrJIfr2W7X9669xnfSCq81r88DZR84+6z5B85R2DsUCEG+bUCR8ZYsWLaKPPvqICgsL8Yo5MdHRpmrwslBfd51p7lv++zXXEGHvlX4cu3pecniY+8h/MFu09NBU6AEQb+BisE/b15do7FiikSNNUyDljVGfMMEk0pbcJj16lL//3buJxo0z9b91q7F84uzqYB/1VLGjkDNKvE6F27zKPNsJ+fG5liU/fnnTBUvLA++8Z4bTvAf3XYin6TFbaUzkOtp2cr9b5oFDvEGFsObTfuUVokOHyvcYO3eSWCAkkcFOohgw0Usvic0by8v3u2fOEDVvblyfuLU87kYbfqWYlJNVfnxrPu3XhVc6OuVEuR6D88DZR86jbtlnvTje1yl81jzV853PxBLn/6ob5oFDvEG54dE1i7Q1n/ZjjxFVJF2VR8sipqbciBRXSfSt9f/II0RXrlT+/EJDQ6lfv34UEBBQqd/nPO4Wnr9Z9VHXXtmV0nIq7yXnx/9g2x+l5n2n5JT/BTD5yJ1nxMpfLpa+GOWDLY/ulAcO8QblZvZsc592z57CfiacZSKpQGkfPly//hcsUPvhUTtPs/AiqNYnzn+vLMPFk+eF8R7lnb8pxuqjAWZ53By4xNGntZa1V9qHhFTeNbX+WLCZT5vzsP/cv5oe1fi0Bzhx3ndZeJ0MN8tF6Rk4Ryqj9oQmD5zbIN4OEm9eoOTFyTEi+IID84Fx+OEHVSQnTVLb16xR2999V7/+27dX+xmnCddjE5LcLvbbVJoDBw5IYWchIZULf2IxlUWEEwJldpyOVNqbbB5S6efHG2jkx+EiBjI+SQeU9rc2DnLZ99+IsBXKeQ4PXW42DaSdPoF4O0i8OeqVRz81a9aktLQ0KKaB0M4179ihtsfGqu116ujX/wcfqP0I67/CkSNq+1NPOe76fLvrb0VEuKiuzClNHvfzq7tX+vHb+k5WHmeDxhVyNjNFaX9mZTeXff918Z+hnKe2ZidPRcntfJcD8XbgtImHhwdt3rwZamkw+vdXRfK990w5JOzT/uYbtZ3/rBe8lV47wmefeG6u+R3BF1847vrwaFgWEZ77vpybKfmotXncbbwnVPrxtXnfzbcMl+pZ8uNz6TC5/cudY132/cfuEm0eOBdx4HwU7c7RD0V2C8Qbc96gGJGRJm+21qd9443mPm3tiNzWHDxY0ieu7b/4iNzexF8+Y5ZPzfPS2nzu4iPyisJ531pf9l0WHn+DC/u0i+eB8/nfX+z8tSNyiDfEG2hgb7c1nzYvYOoNZ6JY679rV8dfn9mx2+mO+frlcZeW983TCq5OaXng7cS0kjsB8QYVZvt2ogYNTKNg2ae9YoX9+vf2NveJ161LpEf0DS+eVwb/szHStAaPwmUf9bIju232vHgDkMmnbXp89ngvET5tdyFE5IG3FHngfNfBX2ScRT730E63ywM3hHinpKRAEZ0Q9mlnZble/7t27RJ+9leod+/eVXt+Yj46Kz9Ht/PP1/nxjQ6ff6Ybn7/DxZttWbfccguNHz8eaggMQXBwsOR4euGFF3AxAMTbGn/88YeYw7yGBg4ciFcDGALeX8CW1SxH3lYAYHTxZvz8/PBBAQAAZxNvAAAAEG8AAIB4Q7xdDzkPm9eIR482baxBjIx7wXnYM2K2Sbs2t5+KsHneeFmw3W+m6F/OO89BFXnji3dycjJFRUXhyjsIa3nc7NuOj8f1Kc5lsQefA6tcBd5S/s2uCSU2uDSsQB54VeDI2u8t5HE38OhHkReP4Q1nZPH+VhQsvFbsrODSZsC+cB53/frW87CffJIoMxPXSSZa1G3j92rt2rVd445L/Nfaa5TN8sAr0/9nO0Zb7Z+jXfnLBRhQvLkGZU+xh/rWW2+lY8fwLWtvZs0yz+Pm2GpRe0D47NV24dwE/8KWwXvvvZfefvttUWQi3enPZ9PxELNckF6Bc+n3sJXmeeB79RtUcbkybR53DxEXwHnnj2n651hdYNCRtzx1AuzPd9+pIj1ZEwPBW9u1aYFAJTfXdWojDtSk73ERAxnfpGil/c2N+u23GCYyuOV+fhNfGjIBZ2PNyrkBA4s3cAzNmqkizfkg6vSA2v7887hOrspPu6coIrkuMUhpP5eZZjZ1ohed90xX+lmZ4K+0X8q+albKDEC8QTH69lVFukULU73J4nncPDoHrsnEAxsVkXxvyzCpiAHng3C5Nrmd56T1gqvdaysK8fx68TzuVl4j8UJBvEFxwsNNKXzaOpA332yex82V3YFrkijywLX535w3rs3H5mOtZkRua05cvWCWv22p/5UJ0BbDiPdSkdUZGBiIK20QRo607jbp1QvXx9UpLQ+8o9803fvn6Fpr/btbHrehxfvEiRNiZHczVatWTdQ5jMXVNghcbYYtg1yJpnp1Ig7PW7YM18Ua+cJjuW3bNvr9999d4nwCRR44T5vcK/LAWUhfW99XEnV75WFzwWDOO5f756LBcw7tcLs8bkOLd46YUB02bBi1bdsWV9qAZGcTZWTgOpQFWwZr1KghxcQmJia6zHlx3nh6Xpbb9g/xBsANGDJkiDQQOXv2LC4GgHgDAACAeAMAAMQb4g0AAG4i3rxA2ULs/uASUgAAAJxEvCeLwAy5cGsBAqJBFQgIIJo0iWjMGNNW/vx8XBMAdBNv9sNyFfi9e/fiyoJKce4c0fvvl9xA9NprRHFxjn9+I8UupxdffFHawwCAy4g3AFWBb9YaNbK+A/TRR015LI7ks88+k+4uZ3G+LgAQbwCIFi5Uhfr664m6dDGFaYn4d6VdWK0dyr59+2jXrl3S+g4AEG8ABO3bqyI9dqzavnat2i7qIgAAbCHeV69exVUENkE71711q9rOtTXl9qefxnUCoMrivVYMiWrWrEkeHh64kqDKDBxoXtGH57fzRFHxdu3U9i+/xHUCoMri/aX4JPHizezZs3ElQZWJijIlHWrrbGrnu/nYsgXXCYAqi3dRURGtX79e+n/gPvDLLfuwx40j8vExOUVsAfu6rblNOnc21nW4dOmSYacNw5ITaHbsdql6jk/SASnBD0C8sWDpxnCoHpdQKy6sDRua5qZtgYjOppdfNo3CRRQ81alDtGCBsa7DgAEDRBZ6dcPddXI9yO98JpYodNB4wwCKTT2FNzDEG+LtjvDoukED6yPjJ56wbT44540bdT18zpw5oozcDTRixAjj3BGJ/z7ePspilRo+aq/sKtWtBBBv4GbMm2fuw+7alahPH/M6mL/95h7XIkN8S2UYrJLF5uMhilDfJepDdg+YTcNDl9MjS39S2geKYr8A4g3cDC6IJIv0hAlq+8qVanuTJrhOjmKgpgr7yPDVSrv36Sil/a2Ng3ChIN7A3WjeXBVpbZX5Q4fU9mefxXVyFD/6/qOItMcxNV/oXGaaZuqkCy4UxBu4G/36qSLdsqVpfjs313xE3qYNrpOjmBC1QRHpFp6/0ZXcTCooKqT+excq7Z/vGIMLBfEG7kZ4ONG116pCfeONJX3Y2p2RwL4kXD5L9y36QRFqrs7+sGa+m4+1iUG4UBBv4I6IRFSrbhNewHQXOPV4yhSiP/9Mp99/30h79gQa4nmxt/uO+W0suk3a756CNzDEG+LtzmzeTPTSS6ZRuOzDXrTIPc49OZmodWvtl9YsaZfxnXd+Js39GwG/Mwfp3U1D6G7hOGEhr7+uD809tFOyEgKIN64coKws4/qw9aCwkOidd4rfcZwURyNxTKJHHiG6csU4zzenIE+a9wYQb4g3cGtWrFBFm+86OnQomTcuNl4CAPEGwEjwnL4s0n/8obZv3GgeEwAAxBsAA6Gd62bBluFylnJ7rVq4TgDiDYChGD7cfCcpz/dzVXsu1ya3t2qF6wQg3gAYioMHTZkuWp/77bebL2ByyTYAIN4AGAzOdLnmGks+91PCMvmLWMDsi4sEIN4AGBHOG69Xz+Q4YSHnupqjRp2U/N63i6F4HtduAwDiDYAxYZ/75cvq38ePH0/82ShkQzgAEG8AAAAQbwAAgHgDAACAeAMAAIB4A+Ao0tPTcREAxBsAZ+Gq2HZZv359qlGjhth9mW/15yIuJtK8uJ00JXoL7TkbQ/mFBbh4AOINgCOpXbs23XTTTRQTE1Pi31Jy0ukHn0klCiVw/vbhtCRcPADxBsBRxMfHU3Z2dol2LobQ2muUxSo3fNRZ9TPytwHEGwCjsUFUdJeFusaCb6ir/0watG8JPbjkR6V9eOhyXCgA8QbASPyqqeL+V7iaYLXt5H6lvenmobhQAOINgJH4zmeiItKbj4co7UkZl8ymTgCAeANgIEZHrFNE+qNtf1J6XhYVFBVSf82I/Kud43ChAMQbAEeSlpYm8r3XUkGByQYYl3qa7l30vSLUNRe3pVrL2pstWq5LDMKFAxBvABzJs88+K8XEBgWpgjztoCfdMb+NRbdJB7+puGgA4g2Ao+nXrx+9++67FBAQYNbufTqKGm8YQHct/FYS7ZfW9KLZsdslKyEAEG8ADE5Wfg6l5WTgQgCINwAAQLwh3gAAAPEGAAAA8QYAAIg3xBsA/fDz86Nu3bpRZGQkLgaAeAPgLHTv3l3ye48YMQIXA0C8AXAWeJPOb7/9RtHR0bgYAOINAAAQb4g3AABAvAEAAEC8AQAAQLwBcCyFhYW4CADiDYCzsGHDBnr++ecl5wkAEG8AnARPT0/J780xsQBAvAFwEjIzM8nX15fy8vJwMQDEGwAAIN4QbwAAgHgDAACAeAMAAIB4A+A4kpOT6eDBg7gQAOINgLPA+d7VqlWjhg0b4mIA+4r3woULKTw8HAcOHJU4AgMD6ZZbbqE33niDwsLCcE1wVOr4+OOPMysk3h988MEAIeDhOHDgqPwhPngRuA44qnK0atUqsGnTpnf9BwAAAAAAAAAAAAAAAAAAAAAAjML/AYdFuPGEyjOjAAAAAElFTkSuQmCC) **Step 4: Reassign the Points**\ Since the centroids change, the algorithm then re-assigns the points to the closest centroid. The image below shows the new clusters after re-assignment. ![Step4.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAW8AAAFuCAYAAABOYJmxAAAABmJLR0QA/wD/AP+gvaeTAABW20lEQVR42u2dB3gU1frGr73rFUSuYO+9/NWrXnsv13LVa8NeQQQBBQvlggUVCwpKUXoNnYQSIPRACC2EEgiQAIGEAAkE0nvO/7xnM7MzyW7qzu7M7vu7zzxeZ82emdndd8585/ve729/I4QQQgghhBBCCCGEEEKCnAceeOApub3AjRs3btwCs91///3P9OrV6+g6C/fdd9/d7KGHHirt2rVrNjdu3LhxC8z2yCOPlMjtojqLt/yPz37yyScLBCGEkIDx/PPP58qJ9MUUb0IaQEVFBS8CoXiT0CUiIkJMnjxZHDx40FHH3a5dO3HTTTeJBQsW8EMkFG8Senz55Zfigw8+EAcOHHDUcd9www1C/izEihUr+CESijcJPTZs2CDmzJkjysvLHXXchYWFYtGiRaKkpIQfIqF4E0IIoXgTQgjFm+JNCCEUb0IIIRRvQvxDamqq49IaCcWb4k18Rn5+vvjuu+9EWFiYo477vffeE0cffbQYPnw4P0RC8SahR1JSksrvhoA7ibfeekucdNJJYu3atfwQCcWbhB5FRUVi+/btYuvWrY48dqflpROKN8WbEEIo3oQQQijehBBC8aZ4E0IIxZsQBwP3QCda1xKKN8Wb+IwlS5aIHj16iKVLlzrmmF9//XVlAdu/f39+gITiTUKTsWPHqhzvhQsXOuaYBw8eLB5++GGRkJDAD5BQvEloUlpaKtLS0kR2djYvBiEUb0IIoXhTvAkhhOJNCCGE4k0IIRRvijfxH8XFxY46XuR2t27dWsTExPDDIxRvErr88MMP4tNPP1VNDZzASy+9pPK7kSpICMWbhCyff/65yvHOy8tzxPGuW7dO3XBSUlL44RGKNwltsrKyeBEIoXgTQgjFm+JNCCEUb0IIIRRvQvwEe1QSijchkoqKCrF/73ZRuPEjUbD4CpEXeYIoiL5JlCT1li+W2O54Yf166aWXimHDhvnsPQ8UHBHtlv0pbpzUQZw98nVxX0RXMTAhUpTLa+ME9uYdEq2XDhTXT/pYNJfH/+CM7mJY4nxRISr4Bad4k2Bl/+61KkXwy04vi7xZfzNtBctvFaIs31bH26pVK5Xf7Svx3nQoRVw87n1xxrCXq21PRX4jSsrLbP35rT6wXZw/5h2Px//S/B8dcwOieBNSr2l3uUiMeER80bGV6Nv98Wrija14c0dbHXJZWZlYs2aNTzrnFJeXijunf+5R+LStT/xU2358hWXF4uYpnWo8/kHyCYJQvEmQUZ6ToIt0TuSpoixzgdxZLEpSBroFfM7JUuRLg/L8V+xP1EXuwrHviVVyFgtB/3n9dH3/FWFtbHv881Lj9eO8fHwbsf7gTinoJeKrNWH6/lumfMIvOsWbBBulaWN0kS6Kf930WsGSq/XXIPLBCGalmsh9vnKU6TWItvbavvzDtjz+H+On6cf4bdxE901Zhkpajn5b7T9z+Csit4TyQPEmQUXZgdm6QBeuuMfwQr7Ij2qmv1ZRlB6U5x+WFK2L36sLftH3Hy7KE/8Y9aba33TEq6KgtMiWxz9kS5R+/Fiw1NhfcFg0Gd5K7T9HnkdZBTN0KN4kqKgoPiDyZh/nnn2ve0WUpgySQn63vi9/0cVBe/7J2fvE34e9ogtge5lxMjQxSmWbaPvw/+3KhoO79OPEDPvTFcOUoN8xrYtp0ZVQvEkQUrytp8eFSm0r2zfNFseZmZkpdu/e7fP37RI7wutiHwRx+b4ttv782kQP9Hr8eGpYl7mDX3KKNwk2kLGxa2eSyIn/WAr1UWbhnnOSKNn1u22O9eeff1Ypgl26dPHp+yIk0jZ6cDXhO1fGjMdvX2r7zzBHxrPfWdy/2vFjAXb6zlh+ySneJBiZOXOmyvGePn26KD8SJ0p2/CiKNrWR2SZ/iIr8nbY61t69e4smTZqIMWPGWPL+yDT5ZX24+CRmmBieuECk5x9y1GcZI58QsICJ0MmobYtU3JtQvEmQsmjRIvHNN9+ovGkngNL4kpISfnCE4k0IIYTiTQghFG+KNyGEULwJIYRQvC0k1KvBWA1nYtu2bcpB0Iocb0Io3o2lLE8Ub/lU+mlcKSv+jlW+0sVbPlH7Q0KvSw6L4k3w077Mdf5LrxHFW7vL9IpCy8aMj48XmzdvFkVFRba+Nt99953K7/7www+pDoTibSvhKs6QonWpx+q+gkWXSF+N/cEt3AUpIn/BuZ7Pf+m10tAv25Jxu3fvrnK89+7da+vrExERIZ577jmVk04IxdtGFMX91yxas482/XvR2ueCWbpF4coHaz7/De9ZMnJYWJjo27evKC0t5a+OEIp3fcMFWe6ybBkuUD4a0ju6bH+4+nfd1U4aKAWldOfvcAt15InSTztKnX9p2mjDdTnO0vAJIYTiXW/KDi5yW5LGPmB6rXDVo25zpIy5Qflhl+6b4p5hyycQIwXLbnH7aR9Zw18GIRRv+1CevcEd35WLdXrHlooyuXh5VdCLV9nBxe7zl2IttIaxcqadv6CF+8nDZj4jhJAQF2+Idf68v7tn36sekU52/U2z7vy5p6vWXEEZNpGLkQiX6Oe/5hnX+cvGCPr5L2jJXwUhFG9fKI5vu2mX7hlWo5906e6/gvoDL9nxU83nnz45JH8IK1euVJ3i4XhICMW7wc/3+TLvuktlHvYxKrWvOKG9z9LYSpK+kzPQ46tkXRwnSrZ/7Q4lBCvyZli89UvTAq3bT7ufz4dDXnd4eLhYt26drS9Lr169VH53hw4dqAqE4t0gbSk5JMX6co+zwvyF54uKwjSfjFOet135SOOmgH+W520NqQ8eTX4h1jh/tCKzKs69a9culd8NK1g7s3PnTjFgwACxatUqqgKheDdoprauVZUZ8TGmfy9c/QQ/NQeBdmIIRSxYsIAXg5BgFe+K0lx30YgUbRV/ld4bZRmRpjBHRcEefnKEEIq3XcS7LGu5e4Ydc6fptcI1T7nzsPdH8JMjhFC87SLeiDu749sXuFP25Oy7YOl17jzsrBX85AghFG/bxLylSOdHNTNUQd6nFtUKVz1uyIo4RU7RaSdOfDRhKKclLqF4+4TSveNqzEMuSRngrCtcUeLs9w/y8aOiokTz5s1Fjx49GvT3JeVlAT39UB+f4m2zPO+SHb+YKgHdedi9hBPysCtKjyi/7PxFFyvDJ1jNFm/upBZkfaNXWaJoUxv5/he53l+W+hdv6azy4/1yfsWZ0n3wXRXawgKz8kOXueNVDa02btyo3ATR4MCn40vb3qL1b8rxz3ONL60Nirf9r0GVsd9++63K7+7SpUud/2ZffpZ4f8kf4sqwtqLJ8FbitqmdxY/x0/wmZGl5B8U7i/uLK8I+VOPfPq2L+HVDhCjzU2ONlJwD4s1Fv4nLx7cRTUe8Kv41/TMxIGG2KK+oECTExVv9QGXecenuwVL0OjgqDxt56MhH9+oXLv3EG/X+yo+7pef3l/nxyJO3NMyQlyTy5zf34gd+jamQCsKNHO958+b5bvzczTK0dpbn8Zf9X4NuYLi51LVzzsZDKeKCse+KM4a9XG17aGYPUVRm7ZPImowkcd6YdzyO//jsryy/gazYnyhajH7L4/j/mdvbbzcQwjZoPgd56FWfGEx+2XEvNO79Vz5U8/vHv2bhnalc+qDcVfP4G993z9BSUlRYIjU11UfjSwOx5bfWOD5u9lZRXF6qZplGwTpLzjyN/95rTZh13y15Y7h1yic1jv9D/BTLxs8vLRI3TPq4xvH7bZxBEaB4O4+KonSDX/bxMj99jtpftm+6odjoKBX2aOis2+zHvUCFkUrTJ5ny430Vnqk+6000LRyXHYp2jZ82xu0HLs/bKmOv8ux1BgOxM0T54ZXqhgI/Gvf+U33uh6Ox8sA2XaQuHPueWH9wp5ppDt48V99/0dj3LPt+Ldm7SR/nsvGtxeasPWp8CKa2/6oJbS0bf86eONM4Ww+niVI508cNQ9v/f5M7Uggo3s5DFRN5qQQtjL3fnacufcUb9P64CWjvv/ZZ02sFMf9yv39WjCXnV7p3vHuGLSthTeNH3+RO5cy2xsfEaCqGmLtpfPjgaOPLm4wV/LVlni5SXWJHmF67ZmI7/bXU3ExLxu+/caY+Ro/V40yvXTLuA/21zEJrWtn1iZ+qj/FdnNvArEL+r+Xot9X+vw97ReSWUB4o3g4DPuB6/FUuoukzQJkR4Vq8rBSXnE0NE29DEVNB9A3uzu/lRa7FO+39pW+LJTenzPnu8ZffJvTFYxlnNsbBKwpTrRl//wxDGum9bvEozZFWv2e6n2yKD1oy/rSdsbp4PT/ve31/VlGuOGfUm2o/FhDzSqzpRDR++1J9/FcX/KLvz5BiffbI19V+/BPhHSsYljhfH/+9Jb/r+9PzD6nzxn7Ewxn3png7UL2liM49zewXvvNXla+uP9ZHNW34Y73sbp8352TD+z/uen9DHDp//j+EVRk5qo1c5AkGP/Cn1fgFy283FFddaGFYap8pxl209nk5fl9TFyB106wD+fn5YtOmTaKiHhkSe+SMWhMpbMj4+GPTLHHn9M/1ffdGfGnZ+ScdSRdnDn9FH+sDmfGC8W+b1lnf9+isnpaNn3Bot5pZazPsD6MHid/l+LcY4vBYtCQUb0dSuvvPmv2y08Y26v3RPKFmP+5Jlp5fSXKfGsdXfUGtHF+mi3of/6g6t7CbPXu2ShF8/PHH6zV+zzXjPWZaYIOwxuzbYun5fx470uv4SNuLy0y2dPyPl//ldfxmI15TAk8o3g6lQvqFf1vdL1wuMKIRQuPfvlzmNPeslmVhlR939fG9+YGfolI7NUaPHi2GDRsmDh70cQhDhqCKt3xares9OiCV7hle57dBGuN5550nunXrVq/hkQr4mYx3azNQbTtfpu9N3hFj+eUvLCsWHWOGVBsfC6URu6y3s0XGSdvowdWE+1IZc5+zO44/f4p3EERQZD40fLKLN3dUs3Ff+2Uj7x3Vpur99wxRmSh+PT+Zb6380GXxERYSqzo9fvLJJyrH+8iRI9aML9cN8BTiGn+4jLPvbdD7FBc3LDNmk8z3HpQQKbquGqNi0fsLDvv1+sdn7lSFMd3k+GFJ0Sru7U/WZiSrkAnGn5i8TBwqyuWPnuJNHP/sIePIaMIQExPDi0EIxZsQQgjFmxBCKN6EEEIo3oRYRFZWlvjtt99UjjchFG8nU14Y3J+w3c/Pz8c3bdo0ld99//33q38vLCsJaQEI9PmH+vWneNcTl992W5cftaoKPF/9O/YHAygPL9rwjrSOPdd1ftL3u3jLJ6pC0x8z265du4rhw73nXCs/7vjX5fG1qPQjv1QUJ37hFyFftWqVePHVl8UdHV5QftTIl4aZUu+4SZbbudqFXdKP+7WFfZUfCgqL/jn1U9F3Q7gymfIHqBR9ef5PKjdd+aHLClFUitIPnOJds7DJEmtNtKtu8AdpaL6wbc5P5pOjRN6z3/dllvt9IxyB/O6+fft6nmjnbvHux730WuVTYunx1eDHfXf4F6JAFqEEM7H7t3r14/aHHzicEf9R6QNTdXt27nf0RaF41/CYtuaZapWPxn8vXP2kk6Xb5KPi6fws9fsWrp6Q+/fv9+zhrfy4b6v5+Da2tuzYIExGHxJszSsNnbTti5WjgvbHjhvTjZM61Hj+cA60ihzpNnj1hI9qHB+FP4Ti7TGcoPtOy/JyWLgC+GEYy9kxO3fmrHuHoVz95Eq/bem3sm+Kye/bH+ETj8Kes9FUzl5+2FXOXZo22uAHfqJlfTHRhcbox73h4C5lZzpq2yJ9/7nS2jRYH9+jUuP180QLt21H9qpzNfqBXyutba1ihizf18a5bmJ7sSN7n5ppG/3AEcIhFO9qlB1c6J5hr3zQPCNf9ZjbXKmyiYLTKN031eS4Z5p1GZz/yrNWBOb4lEhXHt/6N8zHJ0MmbsvcjZaMPzxxgS4SnWKGml4zzkghKsHIL+vD9XP8eu0E02u4mWmvZVlU6o51BW0M9PzUb+ryBqKFUhCDD/bQFcW7QTO/Te6Zn/TX1ju+VJSq/o+6eBxxpsFO2aFlnv2+ywoqFwcrnyzkDD0gx4cnHN0P/HahWdcqP25plasfX/EBS8afmbJaF4+nIr/R9x8uytObCUA8coK0mcBowxPGGwt/1fejoXLTynZmzUe+YdnC5Z+GjkOtlw7Q96OhsWa0BYOvCsGFS4p3tbhCmUkkClfcI53+flTG/rqoz/u7ZY/tlp+ebH8Gdz+Tn7g8P5PftmxeLAL041BhK6Mf+Op/K4vZgmU3GxZVr7BkbMThH378UXHqG/80NTT4TYYMjH0p75Ix8WAFbcuMboRoqICu8zdP6eTxpuZrEKYy+oG3XjpQZbkY+2K+FPUjVZni7e3RfUzNftipDluwqtItvWTX7zWf3z7rFqTKymqfsZUk/1CLH7c1IasJEyao/O7L7rixRj/u5Rb7cQcatG/zdv5oJoy+nL4EFrZG2i370+v46AKUmJUqCMXbu4DI7ivwv7bEb9svM9hMGTN+Sw+FqDz1hPaVaXbwE/+mmp84GvPCotZKZs2aJdq3by8mT55c49NP8dZu1fzA0VC4NHWkZceWkZGhPMYjZs1U/R+NHXGwIX1wyo7gd0GEmH4SM6yaHzgaGvvKj3u7XAjFDBohENwQ0WkHM3wsTqJFnCcBxwLqwrQNVGSKdx0EUPpfwwe6eEuXSj/qXc44bpXH3dxznrqM40PYQXl+suqqrs5PiqJVfSWN/PDDDyrHG/+sjfK8bap5Q3HiZ2ohs6Io3e8hhKGJUaozDvyo/e2HHWiQ746u973WhImpO1eouL8vQB434uaeZtZPypCMFk/HDB9+6Bh/uuwNml2cTzWmeAczHvK4DT0tXV3dX/brESGvOy0tTf1/xJW//fZbMXfuXH5UIYinPO6qBTmYgROKd+hJt5x1m/K4ZTd5ULY/QuVvu/K4j7a8StFIVFSUaNOmjZg3bx4/oBDHmM2DPG6U4SNzxJiiCCsCQvEOOdDc153H/ZzptcKYO9156pWi7g/Cw8OVeCckJPADCnG+XzdZF2kU3rifFyv0knzE2hH3JhTv0BLvrBiDD8h1avHP9UKBSgHU89Tzk/16XJmZmfa+bmVl/PL4gSFbokxpiBp7cjPVwmWwV7BSvEkNKpSvskaMVaJIvTPncbcQgkUOOvBYOe2008Qrr7zCi2ExWAQ15nG/v+QP8fP66aY87v/O+4EXiuLtFMH1rY9IScrAWvK4pzj6/HzN1KlTVX73U089xe+iH+gYM8RnedwIt7BUnuLtV3Q/7fnn6LPhok1tfOQXjjzu70yVinoet0y988v5SeOuonWtZLXq2e48880ddSFHGGXSpEkqHm4H0tPTxdatW/nF9AMQ2w7Lh1TLI69PHnfi4VQ1Qz+vMk8cFaAwz6JdLMXbWmEr2K2Ltke/cB85FiLzBPnpaGKg8qQL0/xyfts2LhAbxl1Zg1/4YSWWyPf+9NNPRQXjmyHJRhlCgZcJDLAipJtgXfO456eur2YTq23PzOlNAad4W0fhyoer5GGfYvYLl37iTqWkuEh06/yWaN36A7Fq6Pkez69o/Zvqv42MjBRJSUkUb1JnjkiBvyLsQ5NdQdU88f4bZ/JCUbwtmHXLLjzGcvuyQ0vV/rKMeco/XPPvQFjFiZRmJ4jI/teK7774j8iJPEO6L6517YcVre4Xflzw9wUlloBKS02kYdGbKrNUEPeGfay2/7apnXmhKN6+p+zAbJNjnmlGHvuAOw9b+oo7Urz3jlPHnytvQEXxr5peK1j2f4631CWBBSEWTaRR2KOB1EKt5B6eNFXNrijeFO9GA9Fyx38vd1vLlheZ+maW52525s3J0MwCNq6aXzisaPOjmrn9uG3SB3TlypWitLSUX0yHMNjg9/2BTDPUQHMMbQEUTYvp903xtkC9i5UfuD77lj7hJUm9RUHMHe5FS5mhIRy66IJsGaOXivILx/kZZt3oBG8HduzYoVIEL7zwQn4vHUJVv2/kiaOnJlqzaaL+yvyfeaEo3haFFqRTYY152OmTHH1+yk63nn7chYX+j4HHxsaKa665Bl9wfikdRPsa/L6xeJkcpC3qKN52EriqfuEyK6Mk5Q/HnQs8sOPiDDFs+dTg2S+8uh93Xl6e+Prrr0Xnzp0DlnXCsImzyPeSJ36NnH0vTad/DsXbHyEGme8NMUPjAX/mYfuSzZs3q3ztjh07KiE2nZ/sg6nyzHF+ciGzomi/x/f48ssvVYMGu/ufEHuBUvu/tswT38VNFrNS1ojcEkoOxZvU/QYkZ8tLly5VIYiGAo9vzn4JoXgTQgiheBNCCMWbEFvw448/imXLlrE0n1C8Kd6hQ0lJiaOPPzExUeV3N2/enOJNKN4Ub++UH16lClUaClz4AolxfLgBwgkwOjra5+Pk5OSI1atXWy6oKM5BdssXX3zBL2dt3135WeQEMIsj0OPDobAxWSyN/XuKdyC//NnrZEVlE5XnXJz4ed0FszhDeoO8rpeU589vLoo2vu83IXf5cb8ix29aOf45onhTWxE+baJKDRw7dqzPx0TKIN57z549/OIEmG1H9iq/7JayJZnWLBj9J4vL/ZMVtCVrj/jP3N56X0t02kHHndJy/7Srg13tU5HfiHNk0Q/yyW+STZHr4xcen7lT/Dvya1U0hL9HU+U/Ns3ye3s3ircPhFvbSrb3ql04pQ+31sSgmt/3gnN95vft9bjztuqiXW38RReJuNVLREGB7z/eCRMmiP79+4uUlBSqZwBBoYtm8lR1e3BGd8sFPCo1XnXU8TT+E7O/slzAkTN+1ohXPY7/7NzvahXg8F0rRVMvf/+CvCH6U8Ap3j4SbreAf1Xj3xbG3mcWzHlnmv2+1/7Hyjm3yWfF0/iYkZPgBF3bjT4h8MtGtxqjAPWOs86+oapfNxwCz62c/Wtb3w3WdV86WJgjLhn3gWn8llXGH5QQ6fXvDxQcUcZY2n8LEa/69ygsonjbVrjjvc5caxPwioI9hnL5k0R51gpX7CxzvtnvuyTLoln3NlPbNM2Pu+zATDn+MZV+3MeoRsck+Ji7Z52p3Hx3bobab3Tzu3rCR5aNj1mrNg5CFXvzDqn9v8mQhbb/limfWDZ+WFK0Ps4/p34q9uUfVg6FP8RP0fffNd17+HPMtsX6f3fHtC5KzPH3uOFp+++L6ErxtiOqxZmXGXc1AyoP/SSVSOoddZ6qMiO/1+D3vcSS44cplj7DjnvR9FrB8tvclrVyEZYEH4grayLzzdqJpteMM8pDRbmWjP9t3ER9jJ/WT3NPKmSoQeuYg6cBq5oOd1s1Rh8fMWqNEhmq0UIhCKl4C918HjtS/3vc8DSKykrUcWtNlv0VOqF41xMsTNYm3AWLr5Cx6/TqM1850zX+N7rfd1mBinfr4ilnyFZQdmiZe/zo60VhQZ44cOBApR+3Ow6PJwTH3VjlDwYd4nv16uX4dEerGLt9sUdr1bS8g7p4YRHPqj6RQ7ZE6eO/s7i/vj/Z4Nd9oYV+3b9LwdbGbxvtnlxtlguo2n6EdbyBkI7238E8SwNWtsYnGs687SsTojjh43oLt0u9q/h9x9ypFjkLlt1iWLRsocaw5Mgh0jJcoo019tcXxUdtW4voUQ+aFi2dyIYNG1R+93nnnUeV9kLSkXR9hoit1YKfVZYJsk20fcgCsYqEQ7tNboFvLPxVGU8hVOMPv+61Gcn6ODiOd+UNBCGPK8LaeLypVCV2/1bT38NvHE8Tl49v47GJBMXbtgLe3oNwX15rFxk4DdYYbtk3xTxSsW/d+EpTBrlamc38mxj27T2ibZv3RNL4JgY/7khLr9ymTZvEyJEjxd69vu22k5ubKyIiIixJc/QGHo+zLAoxWEVXQ+ig6oZHfqTxWcknMcN85teN8MaR4vqtz3wYPcjr+Fg8Rd/MmoBge/t7LP7iKYbi7QgBb1cv4dZjbLv6mTrSuBYQT5MWq0Nd76zysFvp8fX8qLNE0YZ3fbSQWSFKdvyoGiTjvfdMOrUy6+TvyrrWakaNGqXyvefOnevYT35nzn7xYlQfPdMAj9qIpzqhtyLisxBw4wwc21UT2opFaRstHx/XqEvsiGp+3Zj9L9+3pU7vgTxt5FlrKY/4W8TQ65Jm6M0vHAuoKw/UHq5EUc5HywZXE27keq8+sN2vnyXF2wcCXrD4snr3bawoTJViOVYUb+sp/bDD9Pzu8vxkJdZW54Erv/G0Ma7x0yeqwiF/sH37djFv3jyRkZHhyE8cP9BzKhfXqm63ywyEfIsW23wNCnVGbVukMi0id6/1e6Vj4uFUMWLrAtXmbM6eOJXGWBdm7/aepw1Br2u8HnHuYYnzVWf6eTL3vL6LpAgBDU2MUn8f1YC/p3jbRMArSg75bmay4m6zYBsa+7rywJ/lJQ8QhXLWevOUTqY86QsNWRrYPl85ihfKIpAFc6khTxuLrBeMfdd0/ZF2GCpQvBuAVdkMFQW7TG3Tyg+vVvvLMhfI/OtjDXngh/lLDgAr9ifqIoFFqh2V8Vlj/jDEhN3NrWHyjhj9Ot8q88GRpw1+3RCh779tameKN8XbM3369DH3dPQhZfvDDXngz1SZkd/jzgM/1DDjqPXr1wdlebq/OvX8aShm+UzGbY0YMza04hfiW3quGe9xho1QSfPKkntUTSKuT/GmeJuAUx3S0dDaywow0/acB56vUgj1PHAZF68v2dnZolOnTqJNmzZBZQ5VVlYmmjVrJu666y6Rn29tZWjErlW6eDwpjY00UGmnLZ7hUd4JC5dOBKXrnlLyth5O0/cjrMKZN8XbHc6QKWEfffSROPnkk8UxxxwjysutKWIQ5UWmPPCCmH+pBcWC6JtMi5YNyQNHqGfGjBnijz/s1cG+sbNmNEnGZ3LppZdafqzp+YdEsxGv6ULx/LzvVZ4yMg20fQ/P7EGVtYj1B3easkTeXPSbyrM2+qUgd5ziTfHWhRupbaeccoqadZ999tnWilnqiBrzwMv2TWv0+dgBNDXGk4wvUgbxVIEinWr3QnmuMCPyJUYfjKobZt1xmcl+u4YIFxwKYJ45UvP8PT7S/Lxdf2QBIY3TX6CsPpB5/hTvWkBe8lFHHSWOPvpoJd7XXXed9V+KXf095IGfoYQ9WMC6AW6KgwYN8vl778o5IF6a/6Oeh33Z+NYqRu2LND4IFqryqtqCYgETKW/+AGEC2JdqKYuYef5v9TiVDeMPkGb39JxvdT8S5IhjBuwPP3Ck5H26Yli1PO0bJ3VQFZD+YE1Gknh0Vk/d2hbrHSidt8pWgOLdCJCXfMYZZygBf/rpp/0z4y9Mk/nf41X5PAylKooPBNU1LSoqUrF3Xz8JrMvcoZv8V93gWOerfGbM8MZvX6rylJF7XN9Kv4YSrfy4Pfth3x3+heUCPj91vVc/bn/4gWsgTxx56ijO8Wee9RSZ8eLNzxvWAvTzthnvv/++6N69u4qvIm5M7AmEA6liRr9mo39zVUMhp4EnB2NWC0Tk4nHvm84PM3CryJY3qCvD2prGv6hKnju8UoKV/QWHTXnlKBaqmuc/aPMcirdd2L17t2jSpInjKgKxqIqQBBryhgp4nNV+RBBtGDEB+Ehrj9lWuuZZzYK09Sbf7T2VPhyYgRrDN1aBLjTGMAUWcIHRD/x62dIsWBltuM53St/vzMLsan7g90Z8SfG2C0itQ/9Fp4Emwogpd+vWTaXTBaVYr1kj8vLy9H9HubX2I+oYY55hI2Sivbb9yF5Hnq+xaUHVGTbi+tprSF20gu/XTfE6w9Y68uAmGcimwlbizc8bT3yaVwyykejnbQMQk3XirBtAsOfMmSO2bNkSlJ8NUh9PPfVUcfzxx4usLJdhFxYMjSl7WqUjZkhaHBw/MqeKy8TkZaY0RQ042Wl+H8g3L7GoD6Rxhv/qgl/0/Yj/a+IFES+vCM4KU2MlZ/tlf+r70ZBY238t/bztQdu2bcXnn38uiLWgIUR9C4dSU1PF7bffLq6//np9H0TauJiHDuHoGGP0I/HnY62vSZFZNMbFMnSA/3rtBFNfSmSBWAXMrIxuhC/P/0l8tSZMZZto+16K+jFov6erpCmZ0c/7LZlnjqpP47pK66UDKd6BJj09XTRt2lQJS8giY8MVRdbmzaJkH+Gdvn37NvgJw0g/Q2ih6oYFTMTFnQxuRoH0467JD7y+ftxOpE30QK/nf7586tD6clK8A0j79u1F586hY3Jj0mzZBg3uhflzT6/0+m4i/cTfs8QQCyXtuM5o0OCTcJG84aBXo7ESUsv1hvWp48NFMiSC2XbVdDUsYC7ea70fN3xDuq8eq26ExvGxUBmzb0vQ/zaQ8YP1lKp55mhojDRVf0Lx9sC+fftUrBuzb0f9sH3gdliek6AKgjz6iS883xLfbyuqPtERBTFi5AHP2R3ntzxsf4FCpLCkpepGBT/quvph+wrMsMdtXyJ+WR+ucr+d4mPuK5BnjuwTFOfgphkIMyyKtwc6dOggPvnkE0cdMxYmu3TpItatW9eoMEnBspvdgj37aJE//x8mAS9a95IghAQeireXWbeveyxazfDhwxvdXqw8d4upHL88x/UYXpYxVwr5MZWCfqxyOSSEULxtxZEjR0RkZKTjjhuhh9WrVzfK8RDt2Nwz7FdMrxUsv9VtSVvZJCJQoPho9uzZqsSeEIo3xTvkQZMH3Y52qTTgqnD5VFSUHpHt2Jrqr1UUBW4tAIJ90kknKZ+ZzMxMfmiE4h2q4r1jxw61EYh0rupib/IT39pVFCy52tAk4nJLxi4sLBRLly6t1Tvm8OHDqmr0zTff5AdGKN6hKt47d+4UQ4YMEX/99ZdISkritwGhk91/1eAnfpSMf8+zZFyUucOKAIVRDIf4B6QdwmwpUKCsPJDjU7wdLtx//vmn2iDgycnJjjl+zECxWTD/FiU7f6vuJx51lrKmtZLp06eL5cuXi+JithGzkk2HUsQTs7/SrV3ROqyH8gP3z3VHOTn8sLVcfJhpIXc9VHpPUrwbAZwChw4dqgu3EwW8f//+KqXRKu8SxLVL0yeKkqTesjFyhCzQyeKvJQhATnbVAiZtu0s65VntBw7vc29+2PADt8qXheIdBOLtTbi1DbPxXbt22focMDMdMGCA6Nixo2oBRkhdgCGX0Y8bZlZXhLUxCSi8OqwCLcMuNfiA4CaCWbdxfNirEop3NZDDbQyV1CTg+G/tzqFDh/gNJnUGlaaaSN4gy9n35buepow+1RB3q5i2M1Yf51Zp0atZ1xr9wNHMmVC8q6HZpNYm3sghbmxXc+J73n33XVX5GtJmYY0AVgGaSKLnpBFjRxyrmgobTbVQ1q8B616tHyc8Q0Kt1J7i7SMBR6oahTvwGP1O+vXrJy699FJx3HHHiWOPPZahogYCLxJNPF+M6qPvh9WsZjQF33OrOg0NTYzSx39j4a/6fjRU1oye0NZN82EnFO9qQJxnzZpVTbgjIiIo3AEGFaIIW8FpUMs6gWeL/OqJV155RQwcOJAXqYHskGZSRjdANHRARx5jHBwe4VaRmJVq8gOH9zeyXIxx79cX9uUHRfGun4BDuH3hymcVsbGxYv/+/UH7RUQzhlWrVqn///333yufFq3/JoqoFi1aFLTt3PwJmifU5Met9f20ii9WjvI6PkIn6MpDKN51FnC7CzdMslC80q5du6AMGaDfJsrdb7rpJtdjfEpKraXvqMgk9QepeIg9V03X85cfN3K5Mduu6geObkfoVEMo3vUScLuHStCwYMKECWL8+PFB+SVEeKRly5bipZdeqrW6EpWwDz/8sHjxxRf5620Eu3MzlN85Og8tStvotwIdDcyww5Ki1fhL9m6yPL+c4k1jqoBS4eDGrmFhYeKpp57ymt5Y15BIWlqaMqaCbe/Bgwf5pSAUb4o3sZLHHntMLTpiMbKxwLJX6xhPCMWb4k18EP7IyPDcJg1NIuDDTRtXQijexEbMnDlTnHnmmeKNN97w2XsiTASrAjsvJhNC8SY6BQUFyngKWRdOAQuJCIvce++9PnvP3377TaUMbtq0iV8KQije9ic8PFyJ1k8//WSr40Iedq9evbwunMJm15eg2hWNF9auXVvrDL0x7d9CjXJ5vZBvvTajcQ6ayBBJz7evvw4yaDT/FksmWbKMf19+4PzIKd42BLHjqVOn2qoJMsQR6XyYXWvFNP4YszaQI37zzTcrp0hSOyg9/3TFMJVbfd6Ydxok4Osyd4hHZv5PuRLifeCL0n31WL+nG3pj5YFt4v6Ibnou+yXSyRC57b7yC0c+/D3hX+q56peNby2+i5vsdztbijeps2j27t1b2dDaqW0cUhBxQ7njjjv4wdVDuLWtvgIOZ0Jvftx3h38R8IYKU3bEVCsA0raHZ/YQpY0U2LCkpaYSf+P2uGxwYZUvDMWb1AgqTVHl+MsvvzhHkGQIALNurBOQmoW784rhHkUHAh6XWbuAHy7KU7NM7e+aj3xDXDWhrem90BEnUMBi9kKDOyLK/a8I+9B0fEY3w/qCEBGulbGcv6of+u+bZlG8if9BqAaz2AceeIAXw+agWfPXX3+t1gXQYKShwq1t59dBwGfsWqX/97cY/LhHGfzAr53YLnBPYXJWrB3HnbIrkGZtOyghUt9/29TODX7/kVsX6u9zb8SX4khxvtr/m6wSNT59ULxDaVYkZ4/+SodDnjWKXDwBvxDMvtkA2HfgicCKylgYdR1//PHijDPOECeffLLabrnlFtG1a1fVzNkIZps1Cbe2YdaaWejdO+f7dVP0/xb/34hxRppdKWr+BnF3TzNghDK01m8IqaDxcUP4LHaE/v5/ygYSGng/LZSCccr9VAlN8bYBy5YtU1kV27Zts3QcNCw+4YQTxIknniiOHDnimOuDrkbwYPd1NovVQEQRk1+wYEGjBTw9PV29D7zNW7duLW677Tblb44npaOOOkp9rs8995xITU2t9rcZUpBvn9alVvGuLaRg7Ljz0vwf9f3bj+zV/bgxgw+UH7dxhv3u4v76/o2y4bK2H2GUhmKcYX8YPUjfvyYjyWTwxZl3CPHzzz+r1MA1a9ZYPtajjz4qnnjiCVstOtaGljo5ceLEOj/J2EW4Ncvhugo4blRGkb7rrruUj0uzZs3E/fffr9wl4Wm+ePFiccopp6gZ9w033CBWr15d4/tCwG+b1tmrcKPLTm3ALta4GPifub3F5zLl0BgHb7Xg54Bdc4i0dhPB9oL0Jv88dqSpS9D7S/5o8PtjYdf4/riBYTZ+wdh39X0fLRtM8Q4lYMrkK+FeuHChePnll8WSJUs8vu7LfGhEetCr2eoUaxQrjRkzRmzZUrtlKQTzyiuvFMnJyQH7POECiRtN1WYfENyaBBz+L55E2pulwGuvvSYmTZpU55uVNwH/MX5anc+tVw1+4Hbw4+5iCG14iuvvzWtcXvrHy//y+v7oAqStA1C8Sb3p2bOnepR+7733LBtDFlPKGbyQj+pCjiXEqadCSISwg8nfO++8o84fnXgCAWLcnoRb23BT9Sa2/siYgbhg0a4hwq1u2DLVrnfcpGrpgljAXG0DP25vfuEIG60/2PiwG3LZUeBUNV0QC6QJh3b79Vwp3g4Es2dvGQYoU0dlZm0ZCA0lLk7Ix3WXaFfdWrQQItDNfnDew4cPD0jXHYgvZsK1NbiuScD9gSbgfeKnNvg90vIOiqk7V6g483JZtGI3P274lU+WOd/oTL9if2KDFym9sUv2/YQfOt4/dv9Wn78/xTsI2b59uzjnnHPE5Zdf7vex0bPiuuvcYi3Xy8SFFwrZBce977//Dd3PBgvOtQk3tlGjRonc3NyAHmteCTsROR2KdwBAL0rERRsCZpT/+Mc/VDf1AwcO+PW4N2xwi3TTpriRuPbLrDXZ1d21H//Mzw/dzxYmWjUJ98iRI2mLSyjeTgTii+IKxGTRdNcTWJj73//+pzIPPOEpHcwfyDVDXbwR4zYiU4z11/yQNGNrNm7cSOEmFO9gIycnR8Wku3fv7rUw59lnn1WLbkgXsxOyqE8X6GuvdYVRABYqpZW3/ppVDwSI5//666+qEtRpAj5ixAgKN6F4Ox0sVqFgxtuiGqockS1SW+6uv0GYVhb06SKN2bb0qRKXXGIWdatA+h/yvZFRUxdwc8QNEOX+gVjA1AQcwu2twxAhFG8HER8fL+6++27VLT2QSOdZlfZXWo+F8mHDPGeaYMPCpXRo1UGVPdKtfaWbyLJBPnzV8m/v51csLrroIvUUgxtiIEAMnDNuQvEOEhCzPloqXVO56heIFl8Q7EceEdIbwyW6skhP4D5S13CHbEWpcruNwi0TYGQrNNfrW7cK8eCDrmwU7f1ffRW+Kv6/1rNmzVLmTYRQvCnedQKx7QkTJnitaERD3kCki8lJv9c87bPPFqKu/R8QBYAmyhC0iIpyhVQAIj0nneQ9D9zPCTKEULwp3vXjWhn8xeM6CjLgAIcYd6BB+ELaYJjytGXGoTjmGPe+p59u+Psj/HL11e73wswe72/MA3/hBX43CKF42xhkk9xzzz0qPQyLbF26dAl4F/TNm90iKr2OhGZiKE0N9RAHhFY+NDSIdevc7y8tOoTmfWXMA8c4hawPIYTiHSiwEIWMEHR594RW/gzrVWQczJs3L+DHLKM4urhWXSu9/Xb3a7GxDXt/WTiov8cbb5hfkw169Ncg8o0FN8L9ga7FJ4Ti7TzgL42wyFVXXVWn/94ONqUrVrgF9IorXNkm4NAh10xce01aRzcIaYCnvwfK6LUsFsTHtRRDaT3daAMrNGZu165dnVMGNbKzs1XxE7zTCaF4BzneGiRg5vfNN9/IUMRmx5wLTOuMIn3jjUJ06GDO067jvcgjCLcY88BlU3f1/vA/MY7ZWLAI3KlTJ9UQuT5df5B3jSwfNC44wJVTQvG2LwgxQ3u1GWZ9KJXTxqvl6tuxMlgbTAUWxhJ3T3naqKJsDLIXsNf3x8LoypW+OY+Gtmrr06ePiImJ8c3NsLRI7Mje59eO4YQEtXjDMgRZE5rfNLIe/v1vmPrX733kecr0thZi+fLlQfWho9Dm9NPNwiq9rvQ87caA6NBg2UzktNPM79+yJUJNwXH90LHl4Zk9dM9oNCFoGz1Yb0hLCMW7AaCi7+9/9zzzg2AlJrr/WxTLwGdkqZfpJirj6tt55qAM6GIR0+6LaYhzz54tBOxT5s9352n7ChTk4GaA95fNfUQdiyJtz5K9m8RZVRoRGPsXZlPACcW7Ydx7r1uskZ6GhTktDQ4bsio0fvjhB7Xo2KpVKx/Oaoep1EA0BHAyaWlpKnREzGGSaye208W6+cg3xE2TO5r6GrZf9icvFKF41xesQWkijZJsVBOC+PgSUzk3+i6CXfL/wF9kNqagPgI+3eijmNPQZGkbgC7tQ4YMUX4ggc5NtxPR6Qm6SF8jRXxffpbaP2dPnC7g545+W5TbILOIULwdJd6y6bYu0A89hNDAIdUp/RKZTvHUUxX6a7S6qF24NStTOws4QlpbpZEKrAYaAvzSP/roI5X2WRcGbZ6jize6phtByER7LSWHmSyE4l0vYLhkNErKzq4QF1xwgcoYadEiQX8NHWFI7cJtFHA7hlAg3h1kLiLCVAcbkDwOb3CEzW5GTmMdmL17jS7QWLDUZth7cjPF2SNfV/ubjXhNNcElhOJdC0gZg7ggRovf0vnnuwUcbR5feCFW/jPTJOoBsHa2PQgheRJubYMznx0FfMqUKao5Q1ZWVr3/tlDW6b///vt1ztVH417EuTUBf3BGd9F5xXBxRVgbfd+Tkd/wy0Qo3rWBp/nnnntbzZ6w+KiFTlDF5y3XeNo03x4DBK2+GSl2Ax3Xh8oE7doa52JtINQXMQckzPaYaYINs+/ELP+1qEMT4WTmmVO8nSTeWh63y496hhTrm2Q59hj52O96HV2yYG9qFG00zJXriD5n+vTpqielt36UTgDHTvGuGxXyf4MSIsU/ZG63UbhvnNRBxO7f6pdjWJuRLO6L6CrOHP6KnmeOLJecEvrFUbxtJN7p0mijo+y1BRMoAKc6b3ncyCjRnoCR7IFZ+O+/u/ympa2Fz4GQwSujdevW8rh2BPXsm8JtJrMwW2WZDNkSpUS70E9xbozZ1Eue+f/JtEXMxgnF2xbijUIZLDgeJ5O1sSh1//3mPG54cWgdYbDdequrys9/oZsSsX79+qD4MngTcAq3PcDM2hhfbzH6LXHzlE6mPHPE4AnF26/inShLH72lpA2SfbdWrVqlqvY0kUa3Fs1adMsWc7m3wyfBAaVqxoldFyqtYPv27eKtt95qUOaKP5i7Z51plo3ZP4jYtUrff8m4D/glpnj7T7xRKINFx9rybY2Wo5iBG3nmGfdr4eH8YBvK5MmTVQaH04p08N2By2BDsk40/i0NcPA9ROMMO/LrhghdpHutMS/gXD7ePSPfX3CYX2SKt3/E+9tvvxVnnnmmKi2veVboFujmzdHowLVfFjXKfG73a0ESxQgInTt3ln0oVyvPbCfNuPF0hnzvaGP7+nqyTj7KIXUQnjd2ZMqOGF2g/x35tVo8Bcg4MZpkMfuE4u0z8cZsCD8ub+KMRrzFdfRtvfhit0ijP6IskBNXXml2x7NSc/DDLm6Ix6wDQMOIZ+QjjB36bdYXLBgjXzuYy/nT8g7qxUBaoVCnmKEqVKLtezGqD1WN4u078UYTXjyOXiyVt7EdZWrL47YyZALfEjxSd+3atU6P52h+gOwXp/RsRKHTK6+8wl+Fn8AC5NbD0gCsvO5VY303hHvNM8ese2cOW8NRvBsg3oVeVApFLK+99poYPXq0rG5sfHmjDMWqGXbVPG70aLQSdF9B95wff/yxxpsQbGnhrWJsuistVurtJ+5vFstFhV69evFXYTHI074n/Es9S6S5nE23iR4oDhfV7puLkvzfN81Sf2MU7n9O/VS9L6F411u83377bZlnfarf4oXwh0ZX8gEDXLNxf5n44UZUk2NgQoIQp5zi+akALcLgv2JXkF0ybtw4/iosZF5qvNc87fr4gWt55sMS54vVB7aL4nKmclK8GyjeL7zwggxnHCXGjh0b0hf0llvcYo1OPtdcY85Dv+ce+x77J598ItauXctfRSVYuP3iiy989n65Kk/7Q1OeNmbMWqUkto+X/8ULT/wr3mjU6+QScV8An3DjLHtrZbU0XAzhL669Jp1IbQfCQE9Lz4FsK0pR/UyefCxraI9LDaxvNJWxOKzVRKFE1wcsSFtvKqfX8rSNfuDnj3lHzyIhxC/iTVxtvzSBfuop82uIgWuvoe2Y3cCNF+sSTgcOg23atPFJ39F+socbZt6NyR03gli1Jt7dV5ufUK+e8JH+GrJKCKF4N3I2iuKPvDo2YNy0yS3QsKbV/gz56MYFVjvGvRfIhQMYbDkd9CNt27atvJHOtN2xGSshH53VU/cDR4aI1hcTC5El5fQsJhTvRgsBCj+QYVKXFEck0sAvXBNp9M+Uk0DZyce976KL/Ou9UlcGDhwos3UmOP4zQ7iksSETq9iXf9jkB/7AjG6iw/Ih4lJDnvZzc7+jKhGKd2PJyMhQ3Vbi4uIaFDqpuiE/HZkxGqgMxWy9wAZXE86N6zSzGGIZxtBJ1Q02s6iW9BdITUw8nFqvPHNfklWUW+88d0LxrlfopL5IexDRrJlZuBE2QX46kGu7ypvlmGPc7oiPPx6YPHBUJiI/X36mqtKVWAtCJeiFWdUP/NYpn4g1Gf6Jp63YnyjumNZFHxuhmnbSD7yuaYqNfqKVjZxvm9rZdNNCpWgu/cgp3nYAOog8dOkKICtM3fHvjRvNmSdV88CT/VxnAS8P+QVQ4g2PGaTHEc+g0fGYMWN88l6H5KwzSuZ8j9i6QIm2v/K0EXc3pib62w98YvIyk4Wtcbtd3lAKy4r5RaN423EWL8RNN7nF+sQThbj2WnMe+H33+feYevbsKR544AG1Pfjgg8qal1Rn5cqVKm2wSZMmjvR+0cIURh+Uc0e/LW6b1tkkpp/FjrBsfPQARSqkNhb+P2bgxvF7rhnPLxvF235gVq2JNLoBSftoBVwQ4U+uvSar8P0GiqrkZyoeffRR0adP8JgaIRSEGH6yDx9lXnzxRfHTTz95tYGwO7NS1ugiCdHUyvFnGLJgLhvf2rLxJxtcEe+a/rkeppkgZ+PGKlNC8a43iP/C/dCqmRVi3ppAw3/ciLFDEMIt/gIZNRBuFOjk+MtjwA/ADA2ZQij5Jy5+jJ+mi2TvuEmm1y4e977+2sFCa74HX6+doI/xy3q3gxyKks6pXAPALDy/tIgfFsW7nl9uaTiFHzy6x1gBKi095YGjBsS4wIlKTX+BzjkImSCeG0ygG87PP/+szLaIqDbDfSryG72SExknWhwcoZRyi3JZR25daLKu1cZff3CnX2b+FO8gFm/MuMePH2+ZJzR8xo154JdfLuTNwpX7bfQo9yfIie7UqVOjrXqJ/dmTmymajXhNF8qHpB84us5fOPY9fd+rC36xbPykI+kmUy4UKiHL5TxDHPz9JX/wg6J42xNM6r3lgR99tCszxd+gyTMJDX5aP81rnjlm3am51n4Xvlk70ev4uImgkIlQvG3LrFnmGXjVPHBif2ANDPvc9Q7ruYeQyB/KD/wNk3D+a/pnYsPBXX4Zf1BCpKkjkDb+xkMp/GJRvO0P8sAxy/5LOoAuW+aqtiTOAesjSB3EYq8T0fLMR29bJOIzd/q9yhGOinP3rFPj46bBvpsU73qBeG+oxnox+9caOJP6gwXR66+/XowYMYIXg1C8/c2QIUNU3m6oxXwnTnSV5N9+uxBBYN9dq8hiEXrSpEn8tROKdzCIN3KaO3fuLNq3b69+4KECNEzrrYntjjuCW8BxY0b6J4y3EKcmhOIdBKBbypYtW0Lmw4ZhllG4te1f//JfD9BAAK/yXTJ5numQhOLNBcugEe5QEXBCKN4Ubx1kjCBTLFBOqgh3YPzaGv1UDZV42+6+O3Dn4gm4FOD8Cmz6bUNlLI4PvR+Ki4vF4MGDHet70hCQMbI5a0/AOv/A6GpLAMeneDsQ+HHD/Q9FNVpxjTTk85uda0KCEHfe6R4fvuCPPebdDxz9c43GV942vIcdtAcN7G+5xdW8QvM7/89/hEhPt8fnHxsrxA03uK/bcccJ0bLl0yp1sG/fvkH//V+yd5PyH9dytFG16U8/cKQ43iTta7XxkTMein7gISXeiHs2dtEKnW+8+XGfeqr0iUi0XthgJevND1x6a3lk3jzvf2cn4YYVCcTQ0zHC/yXQAj57truJhnmLlMd9nQxPzQtqwZiUvNyrH/fNUzpZbiqFvPCa/cBLRKgQMuKNrIN27dqpJruN6XF4883uHyxmszfeaJ7VInvDKnDfueYa91innOIa/4QT3Pukx5RXvAk46kzsINw4BqPPy2mnuc7PKObPPde4MeraTNpbmKxFC/exnH66awbuDkmVizfeCO4wyQVj3zWVs6My0iimX6wcZdn46fmHREtZwq+NBSdEdAQyjv/VmjCKd7CJN2bd3bp1E8OGDWvweyAsYZzlan7cmG1j1q29lpZmzTkgMUYbo2lTt/sg3Aq1GwhCDTVlPlYVcGnlLewSpkU4Qjuuli1lw97Kdo4rVrgFEkLekOMtkIHzHj16KBOuhj59of+odnwXX4wJgXu/FsLC96AsSEOw03fGmsrZcyrDFBEGP/Arw9paNv747UtNzZu1rj9Gt0SEUyjeQRg2gVtgfiPq0CMj3T/eJ54wvwYR1F6bZ9GT85Qp7jH++1/za1hs1F6Ljq75feD+CgG3k3CDoUPd5/Dee+bXZDGj/hpayTUEiDfyvQ80sNtFv37uY+jQwfwaxFx7TbrsBiXfr5usi+QP8VNMrxk75FgV++6xepw+Rv+NM/X9sJZtXumVgll4qIROuGBZDzDD1n6geHzWiluQFdG8ufu1JIv6yKKJvTbGhRe6s0wwA5Tdueo184+JsZdwg4UL3eeA9nDFle0M9+xxrzNghtvQlEYUYzVmzcPoCIkF1dLKtpP4vLVWdghhFQdpG0bjzPeZOb113294k2ihi4tkKEXz6fb5zT0xSh//pfk/6uOsOrBd33/VhLYho0cU73qA3z2aKGg/YMRn33pLiAsuMD9OW1ULAlGA+6A21mWXucY/91z3vquvdu71xc0Q7eGM54LzM94YkY8eKHCTNIbH8DSA40MIyxiGcp1LtnxSmhJU3/+UnAMmP/D7IrqK1ksHKitZbd87i/tbNv62I3tNfuDwI2+9dIBoMfotfV/b6MEUb4q3Z5B2p6WwVd2w3+oWZuHh3jNGkAWB+LCTGTXK+/lhVtvQkImvGDjQ+/Hh6QDrIMj1bimD9kfLx4SNFh5whlxAxKzXn2GCmvzAsZi5Lz/L0vGNrdSqbpfK5spYVKV4O1y8Ed+eLfO6ii14hp0zxzzb9bcfNxwBjVkPWls1LJwFAygqMs5mtSca2W7TNsd35pnm48NTwpo17v+mgwyK33PPPZaI97L0zeKfUz/VRQuz0XfljNeq3pNGEKoYsXWB3nPS337c3vzAH5zRXc3MQ4mgFe9p06YpQ6IBAwZY8v6o+kPceORI12zX3/FjrLvCBxzjr1zpqvILJjS/c5wfcttLbLYGhRAPnrJwfLJJvR7/doe4rAl8I7ND6zdZdbt2YjtxxE+FMhhnUdpGEZYUraos/e3HnSX9yBekrVeZJolZqZb13aR4B4DU1FTx/fffq47whBhBr9IYeed1msvg4aI8cYkMDRjzrO8J/1I0Gd5K3/fx8r/4AVO8GfMmwUn37t3VU5nTbuxzdsfpIo2wiTbLRrm4P7I9CMWbkICCkNpAufKYkuKsnom/bojQxbtXlUrCK8La6K/tL2ATX4o3xZsQn7BPlouuxOJEIzBWOD46q6ceZ0bMVwudoHy8nL7lFG+KNyGNZ51c0TxFGtFcJAsDGrOQiTQ8Y9f3O6d/rvKcjdWNrRb8zAtO8XaWeK+VKQmjZJJwY4yHCLGCMml2cq0sGX322WdFRkZGnf8O4Q90di8wOPUNSJjtNc8ZxTKpuc7pxwqjqfUHd4aUEyDFuwrIGvjyyy/VItTy5cv5qRLbkVuPLhfR6QniFoNfNvK431r0mypA0fKsje562OCuh4IdJzBPLrBeN7G9fuxnyfNrEz3Qb2mOFG+bAbMhlCOzTyFxMsjj9uZXffWEj1S6IICjH0Qeec6ByLNuKGO2Lfb65OAPP3CKN2PeJAhYL3uYDRkyRBpf7bHF8WDmaczjRtof/EOMfh4fLXOud8e+/MMmHxSUs98b8aWp6KjrqjH8YlK8CamZ8ePHq1DbzJkzbXE8c/esM+Vxa9aqqGTUZuPwD3FqHvdEg+82bkraLDt810p9P8IphOJNSI3slMbbi2SN+8GauldYxHbpYLV3r9mLo9/GGbqI/U96VxtBkwPtNSz0ORGjqdQv68NNrxm9Uhg6CXLxDqUu3SS4GD58uOwMdKx45513TPuNHWlgd1pa2RXdmMcN+1OnxLergv6T2vk9O/c7PR99tcGPG8VGJIjFG4uSffr0UaZTOTk5/BSJo0hOTpZt604Sbdq0MS2wIzXQOANFQ933lvwuzjPkcb8y37l53Dtz9pv8wOHLAjdE4zkj64QEsXinyxbisNz87LPPVG9C4kyQ8oyiw+zs0Dt3b6GawZvn+i2PG+GXuMxkUx651fSJn+r1/LBYm1GYzR9GsIdN4AyHGQxxHqtWmXtSorXZY48JsXs3rw1AGbyxS7vml+2rPG5Yqd4w6WP9vRGSeXtRP7/6gRu732hhou0h5scdsuJNnAn6VGqd4KtuaHAQqKw9hC9KbGQajs7oK/YniknJy33qV42MD2955PADz/ZToYyWpz55R4wSbTohUryJjUHDCDRO1sQa/Spvu83V3kzb9+ST/j+u2NhY0blzZ9mhaFZwh2rkzNo4o0eY4oEZ3Ux+4B1jhvCLSvEmxAzi25pIo42cLIpVoFOO1n0ds/J8P1dJb9iwQeV7wyY2UMTHx1teHWzMZoGpVW6J62cMP3CjoBOKt09BXixL352NzJDTxfvtt82v3Xij+zWpY34FTn9Vc679Sbt27WTz6qPEhAkTLB3HuFjYO26S6TVjZWcmFw0p3r4CC5OtW7cWffv2pYA7GDQQ1gT6iitcfUABeiKg87q2eBlq2SdDhw6V53+y6Nevn6XjIL6sCfRjs3rp+eJYCNXi4LCXZfyZ4u0zEhISRJcuXUR4eDg/MQcDt96zznIL+KWXCtGqlXnfnXeG3nUpld2Lkf5qNXvzDpn8wOFE+I7MszY6FMK9kFC8fUq+DISWlND3107I5jCykS/SNuv+NxMnes40wYaFS3mfrrsY7XWNf+SIPa+P7IEtVqyw7kkiLe+gqk7UYtd14Y9Ns7zmWaOhMRo+OIWUnANiTUaSX/PUKd7E0WDhUfYWMAnvAw8IUdd+vvCBOucc899fdZUsk15dt79ftkx6fVzp/lsZLrZVnjjSIfFUYcxjf/pp183GJ++ftkFcb8jThivfawv7igMFdbuLhSUtreYH/uCM7mLr4TRHfP9myIXXK8I+NOWpo6OQZpdL8aZ4Ew8sXuw9T/v002H2VLf3gTWNzNCTzn5CbNyIbjN1+7s5c1xi6Gn8Jk3kbLSR+oOFy8Z0lZ861XUz8XR8zZu7qkobwxQZt/aWpw3zqqyiujV90PLIp+5c4ag86yFbomr0Aw+lWTjFm9QZtF+UbRhNedqIUZ94onvfI49YNz4WOJFiaBTrf/3LnCf+zDMNf/+N8i6ClEH45jQEhG/OPtt9LM2aCXHHHUIcd5x736uv1u29kpKSqu2DMCO0YfTDflhWJhr9vttGDw7a7x/CREYfFMy+UZlp9APvFkJ+4LYU72wZJJw3b57q/UfsA/KxNRFq2dKdpy3TpHUBxazYKq8whEu08VHso9mDINyiCST+2VDDySJZRYR2emPGjGlQVtPcueZsGi0Wjywb7Wnh1FPRtq/mY7j//vuVcVValceIyN1rTaZVWqx7yd5NQeH3XRtjt7u78Dw6q6fe+9LoB37jpA4U70CK919//aVmQFbnvZL6MXq0W5xef9382q23ul+ra+y6vvz5p3uMD6rUkhhj8Js2Beb6yExW/Rg6dTK/Zqws3bWr5vd58cUXRdOmTUVUVJT5/TeE6yLVa02Y6TVYqWqvoWNNMNJDepxr59h/o7uBBm5WzUe+rvbjJhYqDY1tKd4oyPn2229FZmamIPYBmRPGNL+8yvUh+JFgRqktHmZZlLCAhUBt/Kuvds+wEaLWQjfHHOM+Ln8zY4b7+P7v/1xhJrB5s3udQE6oZWpgze+DtMFsDykqxhmm0e9706EUPXQA58HyIK2FGJ64QD//5+d9r5/n8n1b9P3XSH8WzrwZ8yZVQMzZGNNF/Pvll8152vAqsS6c5opzG28gL71k3nfffYG7PphrnHaa+1iQEYPjO+MM33i3VPX7vm1qZ5WXbcwceV1mnQQrSUfSVad57VzvkiX+b8rzN+auf7z8L4o3xZt4Ytq0mvO0kTliJePGeR8fs9qtWwN7fWTEz+vxQdhrC5nUGjqqwe8b1ZEoxAlmvl83xev5I3RU12wbijcJSWbPdi1YGoXpkkuEiI72z/iTJrkyOYzjX3ONEGvW2OP64PhkyNp0fPAv95VnC/y+jVkn2OAO6JQ87cbgzQ8cC5g7sveF1O+Q4k0aBOK5WJgMC3MtEPo7MQj2smjqgDVtxJTLfdjWEf1RF8oA+5QpUxoVYsIaASpKExMbd3zl8o/37TMLE/KZVx7YJqZJIUc4IdT8SJCnHiNj3biR7co5EJK/QVuIN9ICkVmCNClCAg3Eu23btqrHZL6//WmrgMX76+W0/Z///CdN2Yi9xBs/jo8//li5BiZiikKIDZgtY0MrpQ9AsZYyEiDw+2jRooW44IILZFbPHn4wxF4z793SlAI/FkJIdVD5yadSYkvxJoQQQvEmhBCKN8U7eIF1BtL7Gut0R5wJsjRi92/1W8d4I8iOQWofxs8poZw4RrwLG+ocRHwC/LiRF23MQ77rLiG2beO18US5L/MQfQDi343JgpmzO07Zx2o50vADeXXBL6qC0x9EKD/uNiY/8rcX9WPvTLuLN1bvu3XrJkaOHMlFmAAAhzv4f3iqAIQ/icxMI9rsUKbmDR48WHTs2FHmbdtjdoi0WmSe4DfUEMZvX+q1QhGCbnVDg2GJ872OjwYTuZyF21e8t8r65Y8++kh88803tpvRBDswRLr4YrMf9t13C3HKKe59997L62Tk559/VmmsCfXpz2YhsbKDBTrN/0samdc37ztDzmzPkyX0xnJyVCYa/cDbLfvTsmNPzz9k8ma5akJbNT464Wj7PosdwS+dncMmB6QR9F5f9YQidWb9erdIow2Z5seN9Hr4gljtCuhE4Kl9+LC9LFYXLVrUoImP0ZXwnvAv9a4z8APX9l887n3LjjssKVofB00kNOtWhHFC0RXQkeJNAgNK2TXxhhugEXR80V5DWTcJPvrET9VFsnfcJNNrl8iuPNprVsWev5Ie5NoYv6wPN71mnJHnlzKcSvEmJoydcGTYVO9qjsI9o5UpbdSDE/S/1ATykZn/EyWVfuBxmcl+6cRj7ITzVOQ3oqzC9fQQY/DjRmszQvEmVUClt9EN8LzzhHjuObMfNzrikOBkX36W+IdhhnvrlE9Ulolx37uL+1s2/u7cDHF2Zccb5Uc+rbNoteBnvQuO1TF3incDxHsbc9BsQ2Sk9+7mxx/v6kdJnEV90gYHbZ7jNdsD8e4DBUcsPdZfN0TU6Md9KIT8uG0v3qulbyj6UQ4fPpxX2ybIbDM16zYKN9qKWdV7MhjAAiEc/uyW3jpo0CD55HSWLLaqu5m6Jz9wLCDCWtYfhCUtNXX/CVU/btuLN5zZkCe7fPlyXm0bgbRBzLKnTm2833Qo0K9fPzUJWbduna2O6+uvv5Y337+p31h9KJKZHmszksWslDUqnOFvkGmy+sB2MXv3GpGay0UW24ZNcnJy6EVMHE2kjDf17NlTxMXF2eq4cnNzxdy5c/kBUby5YEmIJzj5IBRvQgghFG9CCKF4+0C8Ufa+VLof8TGTkMBQUlLCi0Dxrp94Q7B/+OEHtSofFRXFq0saxY4dQixYIItLmEFWZ9Hu2rWruPDCC1WSAKF412vmjdX47777jp7dpMEsXCjERReZ89DhfmgHu1o8Wc6YMUPlfNuRu6QxO1wHJ0yYwC8SxZsxb+I/Zs70XgF6+umy+8uuwB4fGmXjyXLs2LG2vH7x8fGqMI5QvCnexG+gyhtWtZpYN28uxIMPmv3GH3ss8DPvMGnPSMsHQvEmpJJly9wijaYRRyptNjZudPmuYP+xx7pEnhCKN8Wb2IQhQ9zi/d575teuv979Gs2zCPGBeE+cOFEt4JTCLIOQRoD+mppAX3YZSr9d+xGhOPFE137038zL47WqD2w3SPGuJt5oZ4b+fm3btpXpXMznIo0Donz22Wa/8aefdi1UavsQAyd1IyMjQy2uyh85LwbFu/rMGw2FlyxZwisZgiQnC4F0ftnm0WfA5dBTpgk2LFyyu31dZ9tYQ9gve5OeKo477jixc+dOXhSKN2Peoc7ixeYu9Nhuu02ILVt88/4wyTv3XPP7I+Yts+BsdONKFj/99JPKPLEbuAEiU8d17SbLbbvK0klN5XeX4k3xDlkgrEcf7X1m7KsMurIyV5ZJRIQQdpw0pqSkqJBE9+7dbXVcQ4d6f3KBoLM/KcWb4h2CoImMsQMP4tOPPGJuXnzffaFxLWAFsX79eltVEsslKNP6wPnnC/HQQ0KccIJ73+uv83tM8SYhx6pVZmHIynLtl8seMrbq2o/qyCNHeK0CwZQp7s/n5ptlt5rK+wo6pGlVq2eeiRsPrxXFm4QUo0d7n8Gh67z2GquzA8O337o/g27dzK+5Y+BC7N/Pa0XxJiGFbEeqC4A0rtNn2ElJQpx8MmfegWbSJPfng5up1is5NlabeW+TlarPiwEDBvJiUbxJKIHH8JYt3QLRooUQTz5pjrPC/Y8EBpRaGNcfkBH0xBPuAqe//W2aalZ8ySWXsHCH4k1CDWmq59X1D3FvdKIPBaB9ONd58+B0WCRz3dNscVx//uk926RZswqZHfMDC+oo3iRUmT9fiAsuMAvDTTeFjucIbGvx1IHz/vvf90kflrbi7be7quYRdgBW3medZf58Hngg8Ha6hOJNbDLzRFHOrFmhJQqoyTGK4lFHVYjXXusiy/j7yNTJAmGTCbiA1RBupnhSki62hOJN8Sahy+HDQjRp4hZuVIEiz/3UU0v0fc8+y+tEKN6E2Io5c9zCfc01bnfDtWtdboda3J8Gm4TiTYiN+O03t3h36GB+zej1YpfYd22sWLFCujQ+qMzlCMWbkJCZeWsdfdatc3X4wX7kuztl5t2mTRuVOvjWW2/xw6V4ExK8VI15w+fl8cfdBUpOi3mjH2fPnj1FdnY2P1yKNyHBzfjxnnOojzqqXFx1VYIYN24GLxKheBNiR2TXP1OXe2z33FMhOnb8VNnEosMUIRRvQmwI8tw3b3bFwbU8avRyDQ8Pl+GVw7xAhOJNCCGE4k0I8cJMWff/3HPPyWwZJqpTvAkhjgCCffnll6vUwZEjR/KCULwJIU4BMft+/fqJ4uJiXgyKNyGEEIo3IQ5gtrTy69WrV40pg5rrH1wZd+/mNSMUb0ICzpAhQ1S+96JFizy+jnZlzZqZ88TvuEOIbdt47QjFm5CAsVtOpbdJJS4rK6v22vDh3jvdoLM7vbcJxZsQm5GZKcQZZ7jF+qKLhPj3v83eKM8/b49jxY0HWSdfffUVPziKNyGhTUSEuW0cGjqDuDi3H/gpp0A4A3+seHI4VlolHnfccdLidgc/PIo3IaHLTz+5xbtzZ/Nr55/vfs0uC5hwHBw7diw7zVO8CQlt0LhYE+jrrnP7ga9cKcTRR7v2n3aayzeFEIo3IQEEXtn5lSp96JBrUdLYA/Ohh4Q48UT3vpde4jUjFG9CAsrUqVNF69atxeLFi/V9Y8Z4zzY56ywh9u3jdSMUb0ICCnpEtmvXThk+VQ2ftGhhFm7MwFNSeM0IxZuQgFNSUqI2T1RUCIG+v/PnC5GRYf9zKZSpMb/88ouIjIzkB0vxJoQ4BVSNwnHw6quv9lh8RCjehBCbPkU8K7srz507lxeD4k0IIYTiTQghFG+KNyG+Ii8vT6xevVokJibyYhCKNyFOYfny5coidsCAAbwYhOJNiFM4fPiw+O2338TSpUuD6py6d+9eY8MJQvEmhNiMVq1aqdTB9u3b82JQvAkhTmHTpk1CaoGK5ROKNyGEEIo3IYRQvCnehBBC8SYk9MiUTSxHjx6tutMQQvEmxCEcOXJE+XsjQ6O0tDSozi0tLU306dOHHzLFm5DgJDo6WqSmpkpL2IqgOSe4DJ4vm3AidXDWrFn8kCnehBCn0K9fP9nC7SWRnJzMi0HxJoQQQvEmhBCKN8WbEEIo3oSEIMg+IYTiTYhDKCoqEl27dhUff/xx0KUMamzZskVMnTqVH7bdxPuJJ54oTE9PF9y4cWvY1qNHD9GpUyexYcOGoDu3mJgYceyxx4rTTjtNJCQk8PO2aHvmmWfy6ivep0jxTpGz7wxu3Lg1bJM/vIPBfH7NmzcvkrnfBVIvDvLztmZ7/PHH99x3331n/Y0QQnzIUbwEhBBCCCGEEEIIIYQQQgghhBASgvw/x5amaSTCRXEAAAAASUVORK5CYII=) The algorithm repeats the calculation of centroids and assignment of points until points stop changing clusters. When clustering large datasets, you stop the algorithm before reaching convergence, using other criteria instead. *Note: Some content in this section was [adapted](https://creativecommons.org/licenses/by/4.0/) from Google's free [Clustering in Machine Learning](https://developers.google.com/machine-learning/clustering) course. The course is a great resource if you want to explore clustering in more detail!* ### Cluster the Spotify Tracks using their Audio Features Now, we will use the `sklearn.cluster.KMeans` Python library to apply the $k$-means algorithm to our `tracks_df` data. Based on our visual inspection of the PCA plot, let's start with a guess k=3 to get 3 clusters. ``` initial_k = ____ # Scale the data, so that the units of features don't impact feature importance scaled_df = StandardScaler().fit_transform(tracks_df[audio_feature_cols]) # Cluster the data using the k means algorithm initial_cluster_results = ______(n_clusters=initial_k, n_init=25, random_state=rs).fit(scaled_df) ``` Now, let's print the cluster results. Notice that we're given a number (0 or 1) for each observation in our data set. This number is the id of the cluster assigned to each track. ``` # Print the cluster results print(initial_cluster_results._______) ``` And let's save the cluster results in our `tracks_df` dataframe as a column named `initial_cluster` so we can access them later. ``` # Save the cluster labels in our dataframe tracks_df[______________] = ['Cluster ' + str(i) for i in __________.______] ``` Let's plot the PCA plot and color each observation based on the assigned cluster to visualize our $k$-means results. ``` # Show a PCA plot of the clusters pca_plot(tracks_df[audio_feature_cols], classes=tracks_df['initial_cluster']) ``` Does it look like our $k$-means algorithm correctly separated the tracks into clusters? Does each color map to a distinct group of points? ### How do our clusters of songs differ? One way we can evaluate our clusters is by looking how the distribution of each data feature varies by cluster. In our case, let's check to see if tracks in the different clusters tend to have different values of energy, loudness, or speechiness. ``` # Plot the distribution of audio features by cluster g = sns.pairplot(tracks_df, hue="initial_cluster", vars=['danceability', 'energy', 'loudness', 'speechiness', 'tempo'], hue_order=sorted(tracks_df.initial_cluster.unique()), palette='Set1') g.fig.suptitle('Distribution of Audio Features by Cluster', y=1.05) plt.show() ``` ### Experiment with different values of $k$ Use the slider to select different values of $k$, then run the cell below to see how the choice of the number of clusters affects our results. ``` trial_k = 10 #@param {type:"slider", min:1, max:10, step:1} # Cluster the data using the k means algorithm trial_cluster_results = KMeans(n_clusters=trial_k, n_init=25, random_state=rs).fit(scaled_df) # Save the cluster labels in our dataframe tracks_df['trial_cluster'] = ['Cluster ' + str(i) for i in trial_cluster_results.labels_] # Show a PCA plot of the clusters pca_plot(tracks_df[audio_feature_cols], classes=tracks_df['trial_cluster']) # Plot the distribution of audio features by cluster g = sns.pairplot(tracks_df, hue="trial_cluster", vars=['danceability', 'energy', 'loudness', 'speechiness', 'tempo'], hue_order=sorted(tracks_df.trial_cluster.unique()), palette='Set1') g.fig.suptitle('Distribution of Audio Features by Cluster', y=1.05) plt.show() ``` ### Which value of $k$ works best for our data? You may have noticed that the $k$-means algorithm requires you to choose $k$ and decide the number of clusters before you run the algorithm. But how do we know which value of $k$ is the best fit for our data? One approach is to track the total distance from points to their cluster centroid as we increase the number of clusters, $k$. Usually, the total distance decreases as we increase $k$, but we reach a value of $k$ where increasing $k$ only marginally decreases the total distance. An elbow plot helps us to find that value of $k$; it's the value of $k$ where the slope of the line in the elbow plot crosses the threshold of slope $=-1$. When you plot distance vs $k$, this point often looks like an "elbow". Let's build an elbow plot to select the value of $k$ that will give us the highest quality clusters that best explain the variation in our data. ``` # Calculate the Total Distance for each value of k between 1 and 10 scores = [] k_list = np.arange(____,____) for i in k_list: fit_k = _____(n_clusters=i, n_init=5, random_state=rs).fit(scaled_df) scores.append(fit_k.inertia_) # Plot this in an elbow plot plt.figure(figsize=(11,8.5)) sns.lineplot(______, ______) plt.xlabel('Number of clusters $k$') plt.ylabel('Total Point to Centroid Distance') plt.grid() plt.title('The Elbow Method showing the optimal $k$') plt.show() ``` Do you see the "elbow"? At what value of $k$ does it occur? ### Evaluate the results of our clustering algorithm for the best $k$ Use the slider below to choose the "best" $k$ that you determined from looking at the elbow plot. Evaluate the results in the PCA plot. Does this look like a good value of $k$ to separate the data into meaningful clusters? ``` best_k = 1 #@param {type:"slider", min:1, max:10, step:1} # Cluster the data using the k means algorithm best_cluster_results = KMeans(n_clusters=best_k, n_init=25, random_state=rs).fit(scaled_df) # Save the cluster labels in our dataframe tracks_df['best_cluster'] = ['Cluster ' + str(i) for i in best_cluster_results.labels_] # Show a PCA plot of the clusters pca_plot(tracks_df[audio_feature_cols], classes=tracks_df['best_cluster']) ``` ## How did we do? In addition to the mathematical ways to validate the selection of the best $k$ parameter for our model and the quality of our resulting clusters, there's another very important way to evaluate our results: listening to the tracks! Let's listen to the tracks in each cluster! What do you notice about the attributes that tracks in each cluster have in common? What do you notice about how the clusters are different? What makes each cluster unique? ``` play_cluster_tracks(tracks_df, cluster_column='best_cluster') ``` ## Wrap Up and Next Session That's a wrap! Now that you've learned some practical skills in data science, please join us tomorrow afternoon for the third and final session in our series, where we'll talk about how to continue your studies and/or pursue a career in Data Science! **Making Your Next Professional Play in Data Science**\ Friday, October 2 | 11:30am - 12:45pm PT\ [https://sched.co/dtqZ](https://sched.co/dtqZ)
github_jupyter
# Python是什么? ### Python是一种高级的多用途编程语言,广泛用于各种非技术和技术领域。Python是一种具备动态语义、面向对象的解释型高级编程语言。它的高级内建数据结构和动态类型及动态绑定相结合,使其在快速应用开发上极具吸引力,也适合于作为脚本或者“粘合剂”语言,将现有组件连接起来。Python简单、易学的语法强调可读性,因此可以降低程序维护成本。Python支持模块和软件包,鼓励模块化的代码重用。 ``` print('hellow world') ``` ## Python简史 ### 1989,为了度过圣诞假期,Guido开始编写Python语言编译器。Python这个名字来自Guido的喜爱的电视连续剧《蒙蒂蟒蛇的飞行马戏团》。他希望新的语言Python能够满足他在C和Shell之间创建全功能、易学、可扩展的语言的愿景。 ### 1989年由荷兰人Guido van Rossum于1989年发明,第一个公开发行版发行于1991年 ### Granddaddy of Python web frameworks, Zope 1 was released in 1999 ### Python 1.0 - January 1994 增加了 lambda, map, filter and reduce. ### Python 2.0 - October 16, 2000,加入了内存回收机制,构成了现在Python语言框架的基础 ### Python 2.4 - November 30, 2004, 同年目前最流行的WEB框架Django 诞生 ### Python 2.5 - September 19, 2006 ### Python 2.6 - October 1, 2008 ### Python 2.7 - July 3, 2010 ### Python 3.0 - December 3, 2008 ### Python 3.1 - June 27, 2009 ### Python 3.2 - February 20, 2011 ### Python 3.3 - September 29, 2012 ### Python 3.4 - March 16, 2014 ### Python 3.5 - September 13, 2015 ### Python 3.6 - December 23, 2016 ### Python 3.7 - June 15, 2018 ## Python的主要运用领域有: ### 云计算:云计算最热的语言,典型的应用OpenStack ### WEB开发:许多优秀的WEB框架,许多大型网站是Python开发、YouTube、Dropbox、Douban……典型的Web框架包括Django ### 科学计算和人工智能:典型的图书馆NumPy、SciPy、Matplotlib、Enided图书馆、熊猫 ### 系统操作和维护:操作和维护人员的基本语言 ### 金融:定量交易、金融分析,在金融工程领域,Python不仅使用最多,而且使用最多,其重要性逐年增加。 ![jupyter](./tiobe_rank.png) ## Python在一些公司的运用有: ### 谷歌:谷歌应用程序引擎,代码。谷歌。com、Google.、Google爬虫、Google广告和其他项目正在广泛使用Python。 ### CIA:美国中情局网站是用Python开发的 ### NASA:美国航天局广泛使用Python进行数据分析和计算 ### YouTube:世界上最大的视频网站YouTube是用Python开发的。 ### Dropbox:美国最大的在线云存储网站,全部用Python实现,每天处理10亿的文件上传和下载。 ### Instagram:美国最大的照片共享社交网站,每天有3000多万张照片被共享,所有这些都是用Python开发的 ### Facebook:大量的基本库是通过Python实现的 ### Red.:世界上最流行的Linux发行版中的Yum包管理工具是用Python开发的 ### Douban:几乎所有公司的业务都是通过Python开发的。 ### 知识:中国最大的Q&A社区,通过Python开发(国外Quora) ### 除此之外,还有搜狐、金山、腾讯、盛大、网易、百度、阿里、淘宝、土豆、新浪、果壳等公司正在使用Python来完成各种任务。 ## Python有如下特征: ### 1. 开放源码:Python和大部分可用的支持库及工具都是开源的,通常使用相当灵活和开放的许可证。 ### 2. 多重范型:Python支持不同的编程和实现范型,例如面向对象和命令式/函数式或者过程式编程。 ### 3. 多用途:Python可以用用于快速、交互式代码开发,也可以用于构建大型应用程序;它可以用于低级系统操作,也可以承担高级分析任务。 ### 4. 跨平台:Python可用于大部分重要的操作系统,如Windows、Linux和Mac OS;它用于构建桌面应用和Web应用。 ### 5. 运行速度慢:这里是指与C和C++相比。 ## Python 常用标准库 ### math模块为浮点运算提供了对底层C函数库的访问: ``` import math print(math.pi) print(math.log(1024, 2)) ``` ### random提供了生成随机数的工具。 ``` import random print(random.choice(['apple', 'pear', 'banana'])) print(random.random()) ``` ### datetime模块为日期和时间处理同时提供了简单和复杂的方法。 ``` from datetime import date now = date.today() birthday = date(1999, 8, 20) age = now - birthday print(age.days) ``` ### Numpy是高性能科学计算和数据分析的基础包。 ### Pandas 纳入了大量库和一些标准的数据模型,提供了高效地操作大型数据集所需的工具。 ### Statismodels是一个Python模块,它提供对许多不同统计模型估计的类和函数,并且可以进行统计测试和统计数据的探索。 ### matplotlib一个绘制数据图的库。对于数据科学家或分析师非常有用。 ### 更多https://docs.python.org/zh-cn/3/library/ # 基础架构工具 ## Anaconda安装 https://www.anaconda.com/products/individual ## Spyder使用 ## GitHub创建与使用 ### GitHub 是一个面向开源及私有软件项目的托管平台,因为只支持 Git 作为唯一的版本库格式进行托管,故名 GitHub。 GitHub 于 2008 年 4 月 10 日正式上线,除了 Git 代码仓库托管及基本的 Web 管理界面以外,还提供了订阅、讨论组、文本渲染、在线文件编辑器、协作图谱(报表)、代码片段分享(Gist)等功能。目前,其注册用户已经超过350万,托管版本数量也是非常之多,其中不乏知名开源项目 Ruby on Rails、jQuery、python 等。GitHub 去年为漏洞支付了 16.6 万美元赏金。 2018年6月,GitHub被微软以75亿美元的价格收购。https://github.com/ # Python基础语法 ``` print ("Hello, Python!") ``` ## 行和缩进 ### python 最具特色的就是用缩进来写模块。 ### 缩进的空白数量是可变的,但是所有代码块语句必须包含相同的缩进空白数量,这个必须严格执行。 ### 以下实例缩进为四个空格: ``` if 1>2: print ("True") else: print ("False") if True: print ("Answer") print ("True") else: print ("Answer") # 没有严格缩进,在执行时会报错 print ("False") ``` ## 多行语句 ### Python语句中一般以新行作为语句的结束符。 ### 但是我们可以使用斜杠( \)将一行的语句分为多行显示,如下所示: ``` total = 1 + \ 2 + \ 3 print(total) ``` ### 语句中包含 [], {} 或 () 括号就不需要使用多行连接符。如下实例: ``` days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'] print(days) ``` ## Python 引号 ### Python 可以使用引号( ' )、双引号( " )、三引号( ''' 或 """ ) 来表示字符串,引号的开始与结束必须是相同类型的。 ### 其中三引号可以由多行组成,编写多行文本的快捷语法,常用于文档字符串,在文件的特定地点,被当做注释。 ``` word = 'word' sentence = "这是一个句子。" paragraph = """这是一个段落。 包含了多个语句""" print(word) print(sentence) print(paragraph) ``` ## Python注释 ### python中单行注释采用 # 开头。 ``` # 第一个注释 print ("Hello, Python!") # 第二个注释 ``` # Python变量类型 ## 标准数据类型 ### 在内存中存储的数据可以有多种类型。 ### 例如,一个人的年龄可以用数字来存储,他的名字可以用字符来存储。 ### Python 定义了一些标准类型,用于存储各种类型的数据。 ### Python有五个标准的数据类型: ### Numbers(数字) ### String(字符串) ### List(列表) ### Tuple(元组) ### Dictionary(字典) ## Python数字 ### Python支持三种不同的数字类型: ### int(有符号整型) ### float(浮点型) ### complex(复数) ``` int1 = 1 float2 = 2.0 complex3 = 1+2j print(type(int1),type(float2),type(complex3)) ``` ## Python字符串 ### 字符串或串(String)是由数字、字母、下划线组成的一串字符。 ``` st = '123asd_' st1 = st[0:3] print(st) print(st1) ``` ## Python列表 ### List(列表) 是 Python 中使用最频繁的数据类型。 ### 列表可以完成大多数集合类的数据结构实现。它支持字符,数字,字符串甚至可以包含列表(即嵌套)。 ### 列表用 [ ] 标识,是 python 最通用的复合数据类型。 ``` list1 = [ 'runoob', 786 , 2.23, 'john', 70.2 ] tinylist = [123, 'john'] print (list1) # 输出完整列表 print (list1[0]) # 输出列表的第一个元素 print (list1[1:3]) # 输出第二个至第三个元素 print (list1[2:]) # 输出从第三个开始至列表末尾的所有元素 print (tinylist * 2) # 输出列表两次 print (list1 + tinylist) # 打印组合的列表 list1[0] = 0 print(list1) ``` ## Python元组 ### 元组是另一个数据类型,类似于 List(列表)。 ### 元组用 () 标识。内部元素用逗号隔开。但是元组不能二次赋值,相当于只读列表。 ``` tuple1 = ( 'runoob', 786 , 2.23, 'john', 70.2 ) tinytuple = (123, 'john') print(tuple1[0]) print(tuple1+tinytuple) ``` ## Python 字典 ### 字典(dictionary)是除列表以外python之中最灵活的内置数据结构类型。列表是有序的对象集合,字典是无序的对象集合。 ### 两者之间的区别在于:字典当中的元素是通过键来存取的,而不是通过偏移存取。 ### 字典用"{ }"标识。字典由索引(key)和它对应的值value组成。 ``` dict1 = {} dict1['one'] = "This is one" tinydict = {'name': 'john','code':6734, 'dept': 'sales'} print(tinydict['name']) print(dict1) print(tinydict.keys()) print(tinydict.values()) ``` # Python运算符 ## Python算术运算符 ``` a = 21 b = 10 c = 0 c = a + b print ("c 的值为:", c) c = a - b print ("c 的值为:", c) c = a * b print ("c 的值为:", c) c = a / b print ("c 的值为:", c) c = a % b #取余数 print ("c 的值为:", c) # 修改变量 a 、b 、c a = 2 b = 3 c = a**b print ("c 的值为:", c) a = 10 b = 5 c = a//b #取整 print ("c 的值为:", c) ``` ## Python比较运算符 ``` a = 21 b = 10 c = 0 if a == b : print ("a 等于 b") else: print ("a 不等于 b") if a != b : print ("a 不等于 b") else: print ("a 等于 b") if a < b : print ("a 小于 b") else: print ("a 大于等于 b") if a > b : print ("a 大于 b") else: print ("a 小于等于 b") # 修改变量 a 和 b 的值 a = 5 b = 20 if a <= b : print ("a 小于等于 b") else: print ("a 大于 b") if b >= a : print ("b 大于等于 a") else: print ("b 小于 a") ``` ## Python逻辑运算符 ``` a = True b = False if a and b : print ("变量 a 和 b 都为 true") else: print ("变量 a 和 b 有一个不为 true") if a or b : print ("变量 a 和 b 都为 true,或其中一个变量为 true") else: print ("变量 a 和 b 都不为 true") if not( a and b ): print ("变量 a 和 b 都为 false,或其中一个变量为 false") else: print ("变量 a 和 b 都为 true") ``` ## Python赋值运算符 ``` a = 21 b = 10 c = 0 c = a + b print ("c 的值为:", c) c += a print ("c 的值为:", c) c *= a print ("c 的值为:", c) c /= a print ("c 的值为:", c) c = 2 c %= a print ("c 的值为:", c) c **= a print ("c 的值为:", c) c //= a print ("c 的值为:", c) ``` # Python 条件语句 ### Python条件语句是通过一条或多条语句的执行结果(True或者False)来决定执行的代码块。 ``` flag = False name = 'luren' if name == 'python': # 判断变量是否为 python flag = True # 条件成立时设置标志为真 print ('welcome boss') # 并输出欢迎信息 else: print (name) # 条件不成立时输出变量名称 num = 5 if num == 3: # 判断num的值 print ('boss') elif num == 2: print ('user') elif num == 1: print ('worker') elif num < 0: # 值小于零时输出 print ('error') else: print ('roadman') # 条件均不成立时输出 num = 9 if num >= 0 and num <= 10: # 判断值是否在0~10之间 print ('hello') num = 10 if num < 0 or num > 10: # 判断值是否在小于0或大于10 print ('hello') else: print ('undefine') num = 8 # 判断值是否在0~5或者10~15之间 if (num >= 0 and num <= 5) or (num >= 10 and num <= 15): print ('hello') else: print ('undefine') ``` # Python循环语句 ## Python 提供了 for 循环和 while 循环 ## Python While 循环语句 ### Python 编程中 while 语句用于循环执行程序,即在某条件下,循环执行某段程序,以处理需要重复处理的相同任务。 ``` count = 0 while (count < 9): print ('The count is:', count) count = count + 1 print ("Good bye!") count = 0 while count < 5: print (count, " is less than 5") count = count + 1 else: print (count, " is not less than 5") ``` ## Python for 循环语句 ``` fruits = ['banana', 'apple', 'mango'] for index in range(len(fruits)): print ('当前水果 :', fruits[index]) print ("Good bye!") ``` ## Python 循环嵌套 ``` num=[]; i=2 for i in range(2,100): j=2 for j in range(2,i): if(i%j==0): break else: num.append(i) print(num) print ("Good bye!") ``` ## Python break 语句 ``` for letter in 'Python': if letter == 'h': break print ('当前字母 :', letter) ``` ## Python continue 语句 ### Python continue 语句跳出本次循环,而break跳出整个循环。 ``` for letter in 'Python': if letter == 'h': continue print ('当前字母 :', letter) ``` ## Python pass 语句 ### Python pass 是空语句,是为了保持程序结构的完整性。 ### pass 不做任何事情,一般用做占位语句。 ``` # 输出 Python 的每个字母 for letter in 'Python': if letter == 'h': pass print ('这是 pass 块') print ('当前字母 :', letter) print ("Good bye!") ``` # Python应用实例(链家二手房数据分析) ## 一、根据上海的部分二手房信息,从多角度进行观察和分析房价与哪些因素有关以及房屋不同状况所占比例 ## 二、先对数据进行预处理、构造预测房价的模型、并输入参数对房价进行预测 备注:数据来源CSDN下载。上海链家二手房.csv.因文件读入问题,改名为sh.csv ## 一、导入数据 对数据进行一些简单的预处理 ``` #导入需要用到的包 import pandas as pd import numpy as np import seaborn as sns import matplotlib as mpl import matplotlib.pyplot as plt from IPython.display import display sns.set_style({'font.sans-serif':['simhei','Arial']}) %matplotlib inline shanghai=pd.read_csv('sh.csv')# 将已有数据导进来 shanghai.head(n=1)#显示第一行数据 查看数据是否导入成功 ``` ### 每项数据类型均为object 不方便处理,需要对一些项删除单位转换为int或者float类型 ### 有些列冗余 像house_img需要删除 ### 有些列 如何house_desc包含多种信息 需要逐个提出来单独处理 ``` shanghai.describe() # 检查缺失值情况 shanghai.info() #np.isnan(shanghai).any() shanghai.dropna(inplace=True) #数据处理 删除带有NAN项的行 df=shanghai.copy() house_desc=df['house_desc'] house_desc[0] ``` ### house_desc 中带有 室厅的信息 房子面积 楼层 朝向信息 需要分别提出来当一列 下面进行提取 ``` df['layout']=df['house_desc'].map(lambda x:x.split('|')[0]) df['area']=df['house_desc'].map(lambda x:x.split('|')[1]) df['temp']=df['house_desc'].map(lambda x:x.split('|')[2]) #df['Dirextion']=df['house_desc'].map(lambda x:x.split('|')[3]) df['floor']=df['temp'].map(lambda x:x.split('/')[0]) df.head(n=1) ``` ### 一些列中带有单位 不利于后期处理 去掉单位 并把数据类型转换为float或int ``` df['area']=df['area'].apply(lambda x:x.rstrip('平')) df['singel_price']=df['singel_price'].apply(lambda x:x.rstrip('元/平')) df['singel_price']=df['singel_price'].apply(lambda x:x.lstrip('单价')) df['district']=df['district'].apply(lambda x:x.rstrip('二手房')) df['house_time']=df['house_time'].apply(lambda x:str(x)) df['house_time']=df['house_time'].apply(lambda x:x.rstrip('年建')) df.head(n=1) ``` ### 删除一些不需要用到的列 以及 house_desc、temp ``` del df['house_img'] del df['s_cate_href'] del df['house_desc'] del df['zone_href'] del df['house_href'] del df['temp'] ``` ### 根据房子总价和房子面积 计算房子每平方米的价格 ### 从house_title 描述房子信息中提取关键词。若带有 交通便利、地铁则认为其交通方便,否则交通不便 ``` df.head(n=1) df['singel_price']=df['singel_price'].apply(lambda x:float(x)) df['area']=df['area'].apply(lambda x:float(x)) df.head(n=1) df.head(n=1) df['house_title']=df['house_title'].apply(lambda x:str(x)) df['trafic']=df['house_title'].apply(lambda x:'交通便利' if x.find("交通便利")>=0 or x.find("地铁")>=0 else "交通不便" ) df.head(n=1) ``` ## 二、根据各列信息 用可视化的形式展现 房价与不同因素如地区、房子面积、所在楼层等之间的关系 ``` df_house_count = df.groupby('district')['house_price'].count().sort_values(ascending=False).to_frame().reset_index() df_house_mean = df.groupby('district')['singel_price'].mean().sort_values(ascending=False).to_frame().reset_index() f, [ax1,ax2,ax3] = plt.subplots(3,1,figsize=(20,15)) sns.barplot(x='district', y='singel_price', palette="Reds_d", data=df_house_mean, ax=ax1) ax1.set_title('上海各大区二手房每平米单价对比',fontsize=15) ax1.set_xlabel('区域') ax1.set_ylabel('每平米单价') sns.countplot(df['district'], ax=ax2) sns.boxplot(x='district', y='house_price', data=df, ax=ax3) ax3.set_title('上海各大区二手房房屋总价',fontsize=15) ax3.set_xlabel('区域') ax3.set_ylabel('房屋总价') plt.show() ``` ### 上面三幅图显示了 房子单价、总数量、总价与地区之间的关系。 #### 由上面第一幅图可以看到房子单价与地区有关,其中黄浦以及静安地区房价最高。这与地区的发展水平、交通便利程度以及离市中心远近程度有关 #### 由上面第二幅图可以直接看出不同地区的二手房数量,其中浦东最多 #### 由上面第三幅图可以看出上海二手房房价基本在一千万上下,很少有高于两千万的 ``` f, [ax1,ax2] = plt.subplots(1, 2, figsize=(15, 5)) # 二手房的面积分布 sns.distplot(df['area'], bins=20, ax=ax1, color='r') sns.kdeplot(df['area'], shade=True, ax=ax1) # 二手房面积和价位的关系 sns.regplot(x='area', y='house_price', data=df, ax=ax2) plt.show() ``` ### 由从左到右第一幅图可以看出 基本二手房面积在60-200平方米之间,其中一百平方米左右的占比更大 ### 由第二幅看出,二手房总结与二手房面积基本成正比,和我们的常识吻合 ``` areas=[len(df[df.area<100]),len(df[(df.area>100)&(df.area<200)]),len(df[df.area>200])] labels=['area<100' , '100<area<200','area>200'] plt.pie(areas,labels= labels,autopct='%0f%%',shadow=True) plt.show() # 绘制饼图 ``` ### 将面积划分为三个档次,面积大于200、面积小与100、面积在一百到两百之间 三者的占比情况可以发现 百分之六十九的房子面积在一百平方米一下,高于一百大于200的只有百分之二十五而面积大于两百的只有百分之四 ``` df.loc[df['area']>1000] # 查看size>1000的样本 发现只有一个是大于1000 f, ax1= plt.subplots(figsize=(20,20)) sns.countplot(y='layout', data=df, ax=ax1) ax1.set_title('房屋户型',fontsize=15) ax1.set_xlabel('数量') ax1.set_ylabel('户型') f, ax2= plt.subplots(figsize=(20,20)) sns.barplot(y='layout', x='house_price', data=df, ax=ax2) plt.show() ``` ### 上述两幅图显示了 不同户型的数量和价格 #### 由第一幅图看出2室1厅最多 2室2厅 3室2厅也较多 是主流的户型选择 #### 由第二幅看出 室和厅的数量增加随之价格也增加,但是室和厅之间的比例要适合 ``` a1=0 a2=0 for x in df['trafic']: if x=='交通便利': a1=a1+1 else: a2=a2+1 sizes=[a1,a2] labels=['交通便利' , '交通不便'] plt.pie(sizes,labels= labels,autopct='%0f%%',shadow=True) plt.show() ``` #### 上述图显示了上海二手房交通不便利情况。其中百分之六十一为交通不便,百分之三十八为交通不便。由于交通便利情况仅仅是根据对房屋的描述情况提取出来的,实际上 交通便利的占比会更高些 ``` f, [ax1,ax2] = plt.subplots(1, 2, figsize=(20, 10)) sns.countplot(df['trafic'], ax=ax1) ax1.set_title('交通是否便利数量对比',fontsize=15) ax1.set_xlabel('交通是否便利') ax1.set_ylabel('数量') sns.barplot(x='trafic', y='house_price', data=df, ax=ax2) ax2.set_title('交通是否便利房价对比',fontsize=15) ax2.set_xlabel('交通是否便利') ax2.set_ylabel('总价') plt.show() ``` ### 左边那幅图显示了交通便利以及不便的二手房数量,这与我们刚才的饼图信息一致 ### 右边那幅图显示了交通便利与否与房价的关系。交通便利的房子价格更高 ``` f, ax1= plt.subplots(figsize=(20,5)) sns.countplot(x='floor', data=df, ax=ax1) ax1.set_title('楼层',fontsize=15) ax1.set_xlabel('楼层数') ax1.set_ylabel('数量') f, ax2 = plt.subplots(figsize=(20, 5)) sns.barplot(x='floor', y='house_price', data=df, ax=ax2) ax2.set_title('楼层',fontsize=15) ax2.set_xlabel('楼层数') ax2.set_ylabel('总价') plt.show() ``` #### 楼层(地区、高区、中区、地下几层)与数量、房价的关系。高区、中区、低区居多 ## 三、根据已有数据建立简单的上海二手房房间预测模型 ### 对数据再次进行简单的预处理 把户型这列拆成室和厅 ``` df[['室','厅']] = df['layout'].str.extract(r'(\d+)室(\d+)厅') df['室'] = df['室'].astype(float) df['厅'] = df['厅'].astype(float) del df['layout'] df.head() df.dropna(inplace=True) df.info() df.columns ``` ### 删除不需要用到的信息如房子的基本信息描述 ``` del df['house_title'] del df['house_detail'] del df['s_cate'] from sklearn.linear_model import LinearRegression linear = LinearRegression() area=df['area'] price=df['house_price'] area = np.array(area).reshape(-1,1) # 这里需要注意新版的sklearn需要将数据转换为矩阵才能进行计算 price = np.array(price).reshape(-1,1) # 训练模型 model = linear.fit(area,price) # 打印截距和回归系数 print(model.intercept_, model.coef_) linear_p = model.predict(area) plt.figure(figsize=(12,6)) plt.scatter(area,price) plt.plot(area,linear_p,'red') plt.xlabel("area") plt.ylabel("price") plt.show() ``` #### 上面用线性回归模型对房价进行简单的预测 红色的代表预测房价,而蓝色点代表真实值。可以看出在面积小于1000时真实值紧密分布在预测值两旁 # 注意! ## 当是用Jupyter Notebook编程时,第一步请检查Notebook是否可读性可写 ![jupyter](./jup.png) ## 如果显示read-only,请打开终端(CMD),输入sudo chmod -R 777 filename,给文件夹授权,之后重新打开Jupyter Notebook方可保存文件。
github_jupyter
<a href="https://colab.research.google.com/github/huan/concise-chit-chat/blob/master/Concise_Chit_Chat.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Concise Chit Chat GitHub Repository: <https://github.com/huan/concise-chit-chat> ## Code TODO: 1. create a DataLoader class for dataset preprocess. (Use tf.data.Dataset inside?) 1. Create a PyPI package for easy load cornell movie curpos dataset(?) 1. Use PyPI module `embeddings` to load `GLOVES`, or use tfhub to load `GLOVES`? 1. How to do a `clip_norm`(or set `clip_value`) in Keras with Eager mode but without `tf.contrib`? 1. Better name for variables & functions 1. Code clean 1. Encapsulate all layers to Model Class: 1. ChitChatEncoder 1. ChitChatDecoder 1. ChitChatModel 1. Re-style to follow the book 1. ...? ## Book Todo 1. Outlines 1. What's seq2seq 1. What's word embedding 1. 1. Split code into snips 1. Write for snips 1. Content cleaning and optimizing 1. ...? ## Other 1. `keras.callbacks.TensorBoard` instead of `tf.contrib.summary`? - `model.fit(callbacks=[TensorBoard(...)])` 1. download url? - http://old.pep.com.cn/gzsx/jszx_1/czsxtbjxzy/qrzptgjzxjc/dzkb/dscl/ ### config.py ``` '''doc''' # GO for start of the sentence # DONE for end of the sentence GO = '\b' DONE = '\a' # max words per sentence MAX_LEN = 20 ``` ### data_loader.py ``` ''' data loader ''' import gzip import re from typing import ( # Any, List, Tuple, ) import tensorflow as tf import numpy as np # from .config import ( # GO, # DONE, # MAX_LEN, # ) DATASET_URL = 'https://github.com/huan/concise-chit-chat/releases/download/v0.0.1/dataset.txt.gz' DATASET_FILE_NAME = 'concise-chit-chat-dataset.txt.gz' class DataLoader(): '''data loader''' def __init__(self) -> None: print('DataLoader', 'downloading dataset from:', DATASET_URL) dataset_file = tf.keras.utils.get_file( DATASET_FILE_NAME, origin=DATASET_URL, ) print('DataLoader', 'loading dataset from:', dataset_file) # dataset_file = './data/dataset.txt.gz' # with open(path, encoding='iso-8859-1') as f: with gzip.open(dataset_file, 'rt') as f: self.raw_text = f.read().lower() self.queries, self.responses \ = self.__parse_raw_text(self.raw_text) self.size = len(self.queries) def get_batch( self, batch_size=32, ) -> Tuple[List[List[str]], List[List[str]]]: '''get batch''' # print('corpus_list', self.corpus) batch_indices = np.random.choice( len(self.queries), size=batch_size, ) batch_queries = self.queries[batch_indices] batch_responses = self.responses[batch_indices] return batch_queries, batch_responses def __parse_raw_text( self, raw_text: str ) -> Tuple[List[List[str]], List[List[str]]]: '''doc''' query_list = [] response_list = [] for line in raw_text.strip('\n').split('\n'): query, response = line.split('\t') query, response = self.preprocess(query), self.preprocess(response) query_list.append('{} {} {}'.format(GO, query, DONE)) response_list.append('{} {} {}'.format(GO, response, DONE)) return np.array(query_list), np.array(response_list) def preprocess(self, text: str) -> str: '''doc''' new_text = text new_text = re.sub('[^a-zA-Z0-9 .,?!]', ' ', new_text) new_text = re.sub(' +', ' ', new_text) new_text = re.sub( '([\w]+)([,;.?!#&-\'\"-]+)([\w]+)?', r'\1 \2 \3', new_text, ) if len(new_text.split()) > MAX_LEN: new_text = (' ').join(new_text.split()[:MAX_LEN]) match = re.search('[.?!]', new_text) if match is not None: idx = match.start() new_text = new_text[:idx+1] new_text = new_text.strip().lower() return new_text ``` ### vocabulary.py ``` '''doc''' import re from typing import ( List, ) import tensorflow as tf # from .config import ( # DONE, # GO, # MAX_LEN, # ) class Vocabulary: '''voc''' def __init__(self, text: str) -> None: self.tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='') self.tokenizer.fit_on_texts( [GO, DONE] + re.split( r'[\s\t\n]', text, ) ) # additional 1 for the index 0 self.size = 1 + len(self.tokenizer.word_index.keys()) def texts_to_padded_sequences( self, text_list: List[List[str]] ) -> tf.Tensor: '''doc''' sequence_list = self.tokenizer.texts_to_sequences(text_list) padded_sequences = tf.keras.preprocessing.sequence.pad_sequences( sequence_list, maxlen=MAX_LEN, padding='post', truncating='post', ) return padded_sequences def padded_sequences_to_texts(self, sequence: List[int]) -> str: return 'tbw' ``` ### model.py ``` '''doc''' import tensorflow as tf import numpy as np from typing import ( List, ) # from .vocabulary import Vocabulary # from .config import ( # DONE, # GO, # MAX_LENGTH, # ) EMBEDDING_DIM = 300 LATENT_UNIT_NUM = 500 class ChitEncoder(tf.keras.Model): '''encoder''' def __init__( self, ) -> None: super().__init__() self.lstm_encoder = tf.keras.layers.CuDNNLSTM( units=LATENT_UNIT_NUM, return_state=True, ) def call( self, inputs: tf.Tensor, # shape: [batch_size, max_len, embedding_dim] training=None, mask=None, ) -> tf.Tensor: _, *state = self.lstm_encoder(inputs) return state # shape: ([latent_unit_num], [latent_unit_num]) class ChatDecoder(tf.keras.Model): '''decoder''' def __init__( self, voc_size: int, ) -> None: super().__init__() self.lstm_decoder = tf.keras.layers.CuDNNLSTM( units=LATENT_UNIT_NUM, return_sequences=True, return_state=True, ) self.dense = tf.keras.layers.Dense( units=voc_size, ) self.time_distributed_dense = tf.keras.layers.TimeDistributed( self.dense ) self.initial_state = None def set_state(self, state=None): '''doc''' # import pdb; pdb.set_trace() self.initial_state = state def call( self, inputs: tf.Tensor, # shape: [batch_size, None, embedding_dim] training=False, mask=None, ) -> tf.Tensor: '''chat decoder call''' # batch_size = tf.shape(inputs)[0] # max_len = tf.shape(inputs)[0] # outputs = tf.zeros(shape=( # batch_size, # batch_size # max_len, # max time step # LATENT_UNIT_NUM, # dimention of hidden state # )) # import pdb; pdb.set_trace() outputs, *states = self.lstm_decoder(inputs, initial_state=self.initial_state) self.initial_state = states outputs = self.time_distributed_dense(outputs) return outputs class ChitChat(tf.keras.Model): '''doc''' def __init__( self, vocabulary: Vocabulary, ) -> None: super().__init__() self.word_index = vocabulary.tokenizer.word_index self.index_word = vocabulary.tokenizer.index_word self.voc_size = vocabulary.size # [batch_size, max_len] -> [batch_size, max_len, voc_size] self.embedding = tf.keras.layers.Embedding( input_dim=self.voc_size, output_dim=EMBEDDING_DIM, mask_zero=True, ) self.encoder = ChitEncoder() # shape: [batch_size, state] self.decoder = ChatDecoder(self.voc_size) # shape: [batch_size, max_len, voc_size] def call( self, inputs: List[List[int]], # shape: [batch_size, max_len] teacher_forcing_targets: List[List[int]]=None, # shape: [batch_size, max_len] training=None, mask=None, ) -> tf.Tensor: # shape: [batch_size, max_len, embedding_dim] '''call''' batch_size = tf.shape(inputs)[0] inputs_embedding = self.embedding(tf.convert_to_tensor(inputs)) state = self.encoder(inputs_embedding) self.decoder.set_state(state) if training: teacher_forcing_targets = tf.convert_to_tensor(teacher_forcing_targets) teacher_forcing_embeddings = self.embedding(teacher_forcing_targets) # outputs[:, 0, :].assign([self.__go_embedding()] * batch_size) batch_go_embedding = tf.ones([batch_size, 1, 1]) * [self.__go_embedding()] batch_go_one_hot = tf.ones([batch_size, 1, 1]) * [tf.one_hot(self.word_index[GO], self.voc_size)] outputs = batch_go_one_hot output = self.decoder(batch_go_embedding) for t in range(1, MAX_LEN): outputs = tf.concat([outputs, output], 1) if training: target = teacher_forcing_embeddings[:, t, :] decoder_input = tf.expand_dims(target, axis=1) else: decoder_input = self.__indice_to_embedding(tf.argmax(output)) output = self.decoder(decoder_input) return outputs def predict(self, inputs: List[int], temperature=1.) -> List[int]: '''doc''' outputs = self([inputs]) outputs = tf.squeeze(outputs) word_list = [] for t in range(1, MAX_LEN): output = outputs[t] indice = self.__logit_to_indice(output, temperature=temperature) word = self.index_word[indice] if indice == self.word_index[DONE]: break word_list.append(word) return ' '.join(word_list) def __go_embedding(self) -> tf.Tensor: return self.embedding( tf.convert_to_tensor(self.word_index[GO])) def __logit_to_indice( self, inputs, temperature=1., ) -> int: ''' [vocabulary_size] convert one hot encoding to indice with temperature ''' inputs = tf.squeeze(inputs) prob = tf.nn.softmax(inputs / temperature).numpy() indice = np.random.choice(self.voc_size, p=prob) return indice def __indice_to_embedding(self, indice: int) -> tf.Tensor: tensor = tf.convert_to_tensor([[indice]]) return self.embedding(tensor) ``` ### Train ### Tensor Board [Quick guide to run TensorBoard in Google Colab](https://www.dlology.com/blog/quick-guide-to-run-tensorboard-in-google-colab/) `tensorboard` vs `tensorboard/` ? ``` LOG_DIR = '/content/data/tensorboard/' get_ipython().system_raw( 'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &' .format(LOG_DIR) ) # Install ! npm install -g localtunnel # Tunnel port 6006 (TensorBoard assumed running) get_ipython().system_raw('lt --port 6006 >> url.txt 2>&1 &') # Get url ! cat url.txt '''train''' import tensorflow as tf # from chit_chat import ( # ChitChat, # DataLoader, # Vocabulary, # ) tf.enable_eager_execution() data_loader = DataLoader() vocabulary = Vocabulary(data_loader.raw_text) chitchat = ChitChat(vocabulary=vocabulary) def loss(model, x, y) -> tf.Tensor: '''doc''' weights = tf.cast( tf.not_equal(y, 0), tf.float32, ) prediction = model( inputs=x, teacher_forcing_targets=y, training=True, ) # implment the following contrib function in a loop ? # https://stackoverflow.com/a/41135778/1123955 # https://stackoverflow.com/q/48025004/1123955 return tf.contrib.seq2seq.sequence_loss( prediction, tf.convert_to_tensor(y), weights, ) def grad(model, inputs, targets): '''doc''' with tf.GradientTape() as tape: loss_value = loss(model, inputs, targets) return tape.gradient(loss_value, model.variables) def train() -> int: '''doc''' learning_rate = 1e-3 num_batches = 8000 batch_size = 128 print('Dataset size: {}, Vocabulary size: {}'.format( data_loader.size, vocabulary.size, )) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) root = tf.train.Checkpoint( optimizer=optimizer, model=chitchat, optimizer_step=tf.train.get_or_create_global_step(), ) root.restore(tf.train.latest_checkpoint('./data/save')) print('checkpoint restored.') writer = tf.contrib.summary.create_file_writer('./data/tensorboard') writer.set_as_default() global_step = tf.train.get_or_create_global_step() for batch_index in range(num_batches): global_step.assign_add(1) queries, responses = data_loader.get_batch(batch_size) encoder_inputs = vocabulary.texts_to_padded_sequences(queries) decoder_outputs = vocabulary.texts_to_padded_sequences(responses) grads = grad(chitchat, encoder_inputs, decoder_outputs) optimizer.apply_gradients( grads_and_vars=zip(grads, chitchat.variables) ) if batch_index % 10 == 0: print("batch %d: loss %f" % (batch_index, loss( chitchat, encoder_inputs, decoder_outputs).numpy())) root.save('./data/save/model.ckpt') print('checkpoint saved.') with tf.contrib.summary.record_summaries_every_n_global_steps(1): # your model code goes here tf.contrib.summary.scalar('loss', loss( chitchat, encoder_inputs, decoder_outputs).numpy()) # print('summary had been written.') return 0 def main() -> int: '''doc''' return train() main() #! rm -fvr data/tensorboard # ! pwd # ! rm -frv data/save # ! rm -fr /content/data/tensorboard # ! kill 2823 # ! kill -9 2823 # ! ps axf | grep lt ! cat url.txt ``` ### chat.py ``` '''train''' # import tensorflow as tf # from chit_chat import ( # ChitChat, # DataLoader, # Vocabulary, # DONE, # GO, # ) # tf.enable_eager_execution() def main() -> int: '''chat main''' data_loader = DataLoader() vocabulary = Vocabulary(data_loader.raw_text) print('Dataset size: {}, Vocabulary size: {}'.format( data_loader.size, vocabulary.size, )) chitchat = ChitChat(vocabulary) checkpoint = tf.train.Checkpoint(model=chitchat) checkpoint.restore(tf.train.latest_checkpoint('./data/save')) print('checkpoint restored.') return cli(chitchat, vocabulary=vocabulary, data_loader=data_loader) def cli(chitchat: ChitChat, data_loader: DataLoader, vocabulary: Vocabulary): '''command line interface''' index_word = vocabulary.tokenizer.index_word word_index = vocabulary.tokenizer.word_index query = '' while True: try: # Get input sentence query = input('> ').lower() # Check if it is quit case if query == 'q' or query == 'quit': break # Normalize sentence query = data_loader.preprocess(query) query = '{} {} {}'.format(GO, query, DONE) # Evaluate sentence query_sequence = vocabulary.texts_to_padded_sequences([query])[0] response_sequence = chitchat.predict(query_sequence, 1) # Format and print response sentence response_word_list = [ index_word[indice] for indice in response_sequence if indice != 0 and indice != word_index[DONE] ] print('Bot:', ' '.join(response_word_list)) except KeyError: print("Error: Encountered unknown word.") main() ! cat /proc/cpuinfo ```
github_jupyter
<img src="../../../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left"> # _*Quantum K-Means algorithm*_ The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial. *** ### Contributors Shan Jin, Xi He, Xiaokai Hou, Li Sun, Dingding Wen, Shaojun Wu and Xiaoting Wang$^{1}$ 1. Institute of Fundamental and Frontier Sciences, University of Electronic Science and Technology of China,Chengdu, China,610051 *** ## Introduction Clustering algorithm is a typical unsupervised learning algorithm, which is mainly used to automatically classify similar samples into one category.In the clustering algorithm, according to the similarity between the samples, the samples are divided into different categories. For different similarity calculation methods, different clustering results will be obtained. The commonly used similarity calculation method is the Euclidean distance method. What we want to show is the quantum K-Means algorithm. The K-Means algorithm is a distance-based clustering algorithm that uses distance as an evaluation index for similarity, that is, the closer the distance between two objects is, the greater the similarity. The algorithm considers the cluster to be composed of objects that are close together, so the compact and independent cluster is the ultimate target. #### Experiment design The implementation of the quantum K-Means algorithm mainly uses the swap test to compare the distances among the input data points. Select K points randomly from N data points as centroids, measure the distance from each point to each centroid, and assign it to the nearest centroid- class, recalculate centroids of each class that has been obtained, and iterate 2 to 3 steps until the new centroid is equal to or less than the specified threshold, and the algorithm ends. In our example, we selected 6 data points, 2 centroids, and used the swap test circuit to calculate the distance. Finally, we obtained two clusters of data points. $|0\rangle$ is an auxiliary qubit, through left $H$ gate, it will be changed to $\frac{1}{\sqrt{2}}(|0\rangle + |1\rangle)$. Then under the control of $|1\rangle$, the circuit will swap two vectors $|x\rangle$ and $|y\rangle$. Finally, we get the result at the right end of the circuit: $$|0_{anc}\rangle |x\rangle |y\rangle \rightarrow \frac{1}{2}|0_{anc}\rangle(|xy\rangle + |yx\rangle) + \frac{1}{2}|1_{anc}\rangle(|xy\rangle - |yx\rangle)$$ If we measure auxiliary qubit alone, then the probability of final state in the ground state $|1\rangle$ is: $$P(|1_{anc}\rangle) = \frac{1}{2} - \frac{1}{2}|\langle x | y \rangle|^2$$ If we measure auxiliary qubit alone, then the probability of final state in the ground state $|1\rangle$ is: $$Euclidean \ distance = \sqrt{(2 - 2|\langle x | y \rangle|)}$$ So, we can see that the probability of measuring $|1\rangle$ has positive correlation with the Euclidean distance. The schematic diagram of quantum K-Means is as the follow picture.[[1]](#cite) <img src="../images/k_means_circuit.png"> To make our algorithm can be run using qiskit, we design a more detailed circuit to achieve our algorithm. | #### Quantum K-Means circuit <img src="../images/k_means.png"> ## Data points <table border="1"> <tr> <td>point num</td> <td>theta</td> <td>phi</td> <td>lam</td> <td>x</td> <td>y</td> </tr> <tr> <td>1</td> <td>0.01</td> <td>pi</td> <td>pi</td> <td>0.710633</td> <td>0.703562</td> </tr> <tr> <td>2</td> <td>0.02</td> <td>pi</td> <td>pi</td> <td>0.714142</td> <td>0.7</td> </tr> <tr> <td>3</td> <td>0.03</td> <td>pi</td> <td>pi</td> <td>0.717633</td> <td>0.696421</td> </tr> <tr> <td>4</td> <td>0.04</td> <td>pi</td> <td>pi</td> <td>0.721107</td> <td>0.692824</td> </tr> <tr> <td>5</td> <td>0.05</td> <td>pi</td> <td>pi</td> <td>0.724562</td> <td>0.68921</td> </tr> <tr> <td>6</td> <td>1.31</td> <td>pi</td> <td>pi</td> <td>0.886811</td> <td>0.462132</td> </tr> <tr> <td>7</td> <td>1.32</td> <td>pi</td> <td>pi</td> <td>0.889111</td> <td>0.457692</td> </tr> <tr> <td>8</td> <td>1.33</td> <td>pi</td> <td>pi</td> <td>0.891388</td> <td>0.453241</td> </tr> <tr> <td>9</td> <td>1.34</td> <td>pi</td> <td>pi</td> <td>0.893643</td> <td>0.448779</td> </tr> <tr> <td>10</td> <td>1.35</td> <td>pi</td> <td>pi</td> <td>0.895876</td> <td>0.444305</td> </tr> ## Quantum K-Means algorithm program ``` # import math lib from math import pi # import Qiskit from qiskit import Aer, IBMQ, execute from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister # import basic plot tools from qiskit.tools.visualization import plot_histogram # To use local qasm simulator backend = Aer.get_backend('qasm_simulator') ``` In this section, we first judge the version of Python and import the packages of qiskit, math to implement the following code. We show our algorithm on the ibm_qasm_simulator, if you need to run it on the real quantum conputer, please remove the "#" in frint of "import Qconfig". ``` theta_list = [0.01, 0.02, 0.03, 0.04, 0.05, 1.31, 1.32, 1.33, 1.34, 1.35] ``` Here we define the number pi in the math lib, because we need to use u3 gate. And we also define a list about the parameter theta which we need to use in the u3 gate. As the same above, if you want to implement on the real quantum comnputer, please remove the symbol "#" and configure your local Qconfig.py file. ``` # create Quantum Register called "qr" with 5 qubits qr = QuantumRegister(5, name="qr") # create Classical Register called "cr" with 5 bits cr = ClassicalRegister(5, name="cr") # Creating Quantum Circuit called "qc" involving your Quantum Register "qr" # and your Classical Register "cr" qc = QuantumCircuit(qr, cr, name="k_means") #Define a loop to compute the distance between each pair of points for i in range(9): for j in range(1,10-i): # Set the parament theta about different point theta_1 = theta_list[i] theta_2 = theta_list[i+j] #Achieve the quantum circuit via qiskit qc.h(qr[2]) qc.h(qr[1]) qc.h(qr[4]) qc.u3(theta_1, pi, pi, qr[1]) qc.u3(theta_2, pi, pi, qr[4]) qc.cswap(qr[2], qr[1], qr[4]) qc.h(qr[2]) qc.measure(qr[2], cr[2]) qc.reset(qr) job = execute(qc, backend=backend, shots=1024) result = job.result() print(result) print('theta_1:' + str(theta_1)) print('theta_2:' + str(theta_2)) # print( result.get_data(qc)) plot_histogram(result.get_counts()) ``` Here we achieve the function k_means() and the test main function to run the program. Considering the qubits controlling direction of ibmqx4, we takes the quantum register 1, 2, 4 as our working register, if you want to run this program on other computer, please redesign the circuit structure to ensure your program can be run accurately. ## Result analysis In this program, we take the quantum register 1, 2, 4 as our operated register (considering the condition when using ibmqx4.) We take the quantum register 1, 4 storing the input information about data points, and the quantum register 2 as controlling register to decide whether to use the swap operator. To estimate the distance of any pair of data points, we use a loop to implement the K-Means Circuit. In the end, we measure the controlling register to judge the distance between two data points. The probability when we get 1 means that the distance between two data points. ## Reference <cite>[1].Quantum algorithms for supervised and unsupervised machine learning(*see open access: [ arXiv:1307.0411v2](https://arxiv.org/abs/1307.0411)*)</cite><a id='cite'></a>
github_jupyter
# Trigger Examples Triggers allow the user to specify a set of actions that are triggered by the result of a boolean expression. They provide flexibility to adapt what analysis and visualization actions are taken in situ. Triggers leverage Ascent's Query and Expression infrastructure. See Ascent's [Triggers](https://ascent.readthedocs.io/en/latest/Actions/Triggers.html) docs for deeper details on Triggers. ``` # cleanup any old results !./cleanup.sh # ascent + conduit imports import conduit import conduit.blueprint import ascent import numpy as np # helpers we use to create tutorial data from ascent_tutorial_jupyter_utils import img_display_width from ascent_tutorial_jupyter_utils import tutorial_gyre_example import matplotlib.pyplot as plt ``` ## Trigger Example 1 ### Using triggers to render when conditions occur ``` # Use triggers to render when conditions occur a = ascent.Ascent() a.open() # setup actions actions = conduit.Node() # declare a question to ask add_queries = actions.append() add_queries["action"] = "add_queries" # add our entropy query (q1) queries = add_queries["queries"] queries["q1/params/expression"] = "entropy(histogram(field('gyre'), num_bins=128))" queries["q1/params/name"] = "entropy" # declare triggers add_triggers = actions.append() add_triggers["action"] = "add_triggers" triggers = add_triggers["triggers"] # add a simple trigger (t1_ that fires at cycle 500 triggers["t1/params/condition"] = "cycle() == 500" triggers["t1/params/actions_file"] = "cycle_trigger_actions.yaml" # add trigger (t2) that fires when the change in entroy exceeds 0.5 # the history function allows you to access query results of previous # cycles. relative_index indicates how far back in history to look. # Looking at the plot of gyre entropy in the previous notebook, we see a jump # in entropy at cycle 200, so we expect the trigger to fire at cycle 200 triggers["t2/params/condition"] = "entropy - history(entropy, relative_index = 1) > 0.5" triggers["t2/params/actions_file"] = "entropy_trigger_actions.yaml" # view our full actions tree print(actions.to_yaml()) # gyre time varying params nsteps = 10 time = 0.0 delta_time = 0.5 for step in range(nsteps): # call helper that generates a double gyre time varying example mesh. # gyre ref :https://shaddenlab.berkeley.edu/uploads/LCS-tutorial/examples.html mesh = tutorial_gyre_example(time) # update the example cycle cycle = 100 + step * 100 mesh["state/cycle"] = cycle print("time: {} cycle: {}".format(time,cycle)) # publish mesh to ascent a.publish(mesh) # execute the actions a.execute(actions) # update time time = time + delta_time # retrieve the info node that contains the trigger and query results info = conduit.Node() a.info(info) # close ascent a.close() # we expect our cycle trigger to render only at cycle 500 ! ls cycle_trigger*.png # show the result image from the cycle trigger ascent.jupyter.AscentImageSequenceViewer(["cycle_trigger_out_500.png"]).show() # we expect our entropy trigger to render only at cycle 200 ! ls entropy_trigger*.png # show the result image from the entropy trigger ascent.jupyter.AscentImageSequenceViewer(["entropy_trigger_out_200.png"]).show() print(info["expressions"].to_yaml()) ```
github_jupyter
## These notebooks can be found at https://github.com/jaspajjr/pydata-visualisation if you want to follow along https://matplotlib.org/users/intro.html Matplotlib is a library for making 2D plots of arrays in Python. * Has it's origins in emulating MATLAB, it can also be used in a Pythonic, object oriented way. * Easy stuff should be easy, difficult stuff should be possible ``` import matplotlib.pyplot as plt import numpy as np import pandas as pd %matplotlib inline ``` Everything in matplotlib is organized in a hierarchy. At the top of the hierarchy is the matplotlib “state-machine environment” which is provided by the matplotlib.pyplot module. At this level, simple functions are used to add plot elements (lines, images, text, etc.) to the current axes in the current figure. Pyplot’s state-machine environment behaves similarly to MATLAB and should be most familiar to users with MATLAB experience. The next level down in the hierarchy is the first level of the object-oriented interface, in which pyplot is used only for a few functions such as figure creation, and the user explicitly creates and keeps track of the figure and axes objects. At this level, the user uses pyplot to create figures, and through those figures, one or more axes objects can be created. These axes objects are then used for most plotting actions. ## Scatter Plot To start with let's do a really basic scatter plot: ``` plt.plot([0, 1, 2, 3, 4, 5], [0, 2, 4, 6, 8, 10]) x = [0, 1, 2, 3, 4, 5] y = [0, 2, 4, 6, 8, 10] plt.plot(x, y) ``` What if we don't want a line? ``` plt.plot([0, 1, 2, 3, 4, 5], [0, 2, 5, 7, 8, 10], marker='o', linestyle='') plt.xlabel('The X Axis') plt.ylabel('The Y Axis') plt.show(); ``` #### Simple example from matplotlib https://matplotlib.org/tutorials/intermediate/tight_layout_guide.html#sphx-glr-tutorials-intermediate-tight-layout-guide-py ``` def example_plot(ax, fontsize=12): ax.plot([1, 2]) ax.locator_params(nbins=5) ax.set_xlabel('x-label', fontsize=fontsize) ax.set_ylabel('y-label', fontsize=fontsize) ax.set_title('Title', fontsize=fontsize) fig, ax = plt.subplots() example_plot(ax, fontsize=24) fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2) # fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True) ax1.plot([0, 1, 2, 3, 4, 5], [0, 2, 5, 7, 8, 10]) ax2.plot([0, 1, 2, 3, 4, 5], [0, 2, 4, 9, 16, 25]) ax3.plot([0, 1, 2, 3, 4, 5], [0, 13, 18, 21, 23, 25]) ax4.plot([0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]) plt.tight_layout() ``` ## Date Plotting ``` import pandas_datareader as pdr df = pdr.get_data_fred('GS10') df = df.reset_index() print(df.info()) df.head() fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) ax.plot_date(df['DATE'], df['GS10']) ``` ## Bar Plot ``` fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(111) x_data = [0, 1, 2, 3, 4] values = [20, 35, 30, 35, 27] ax.bar(x_data, values) ax.set_xticks(x_data) ax.set_xticklabels(('A', 'B', 'C', 'D', 'E')) ; ``` ## Matplotlib basics http://pbpython.com/effective-matplotlib.html ### Behind the scenes * matplotlib.backend_bases.FigureCanvas is the area onto which the figure is drawn * matplotlib.backend_bases.Renderer is the object which knows how to draw on the FigureCanvas * matplotlib.artist.Artist is the object that knows how to use a renderer to paint onto the canvas The typical user will spend 95% of their time working with the Artists. https://matplotlib.org/tutorials/intermediate/artists.html#sphx-glr-tutorials-intermediate-artists-py ``` fig, (ax1, ax2) = plt.subplots( nrows=1, ncols=2, sharey=True, figsize=(12, 8)) fig.suptitle("Main Title", fontsize=14, fontweight='bold'); x_data = [0, 1, 2, 3, 4] values = [20, 35, 30, 35, 27] ax1.barh(x_data, values); ax1.set_xlim([0, 55]) #ax1.set(xlabel='Unit of measurement', ylabel='Groups') ax1.set(title='Foo', xlabel='Unit of measurement') ax1.grid() ax2.barh(x_data, [y / np.sum(values) for y in values], color='r'); ax2.set_title('Transformed', fontweight='light') ax2.axvline(x=.1, color='k', linestyle='--') ax2.set(xlabel='Unit of measurement') # Worth noticing this ax2.set_axis_off(); fig.savefig('example_plot.png', dpi=80, bbox_inches="tight") ```
github_jupyter
# Procedures and Functions Tutorial MLDB is the Machine Learning Database, and all machine learning operations are done via Procedures and Functions. Training a model happens via Procedures, and applying a model happens via Functions. The notebook cells below use `pymldb`'s `Connection` class to make [REST API](../../../../doc/#builtin/WorkingWithRest.md.html) calls. You can check out the [Using `pymldb` Tutorial](../../../../doc/nblink.html#_tutorials/Using pymldb Tutorial) for more details. ``` from pymldb import Connection mldb = Connection("http://localhost") ``` ## Loading a Dataset The classic [Iris Flower Dataset](http://en.wikipedia.org/wiki/Iris_flower_data_set) isn't very big but it's well-known and easy to reason about so it's a good example dataset to use for machine learning examples. We can import it directly from a remote URL: ``` mldb.put('/v1/procedures/import_iris', { "type": "import.text", "params": { "dataFileUrl": "file://mldb/mldb_test_data/iris.data", "headers": [ "sepal length", "sepal width", "petal length", "petal width", "class" ], "outputDataset": "iris", "runOnCreation": True } }) ``` ## A quick look at the data We can use the [Query API](../../../../doc/#builtin/sql/QueryAPI.md.html) to get the data into a Pandas DataFrame to take a quick look at it. ``` df = mldb.query("select * from iris") df.head() %matplotlib inline import seaborn as sns, pandas as pd sns.pairplot(df, hue="class", size=2.5) ``` ## Unsupervised Machine Learning with a `kmeans.train` Procedure We will create and run a [Procedure](../../../../doc/#builtin/procedures/Procedures.md.html) of type [`kmeans.train`](../../../../doc/#builtin/procedures/KmeansProcedure.md.html). This will train an unsupervised K-Means model and use it to assign each row in the input to a cluster, in the output dataset. ``` mldb.put('/v1/procedures/iris_train_kmeans', { 'type' : 'kmeans.train', 'params' : { 'trainingData' : 'select * EXCLUDING(class) from iris', 'outputDataset' : 'iris_clusters', 'numClusters' : 3, 'metric': 'euclidean', "runOnCreation": True } }) ``` Now we can look at the output dataset and compare the clusters the model learned with the three types of flower in the dataset. ``` mldb.query(""" select pivot(class, num) as * from ( select cluster, class, count(*) as num from merge(iris_clusters, iris) group by cluster, class ) group by cluster """) ``` As you can see, the K-means algorithm doesn't do a great job of clustering this data (as is mentioned in the Wikipedia article!). ## Supervised Machine Learning with `classifier.train` and `.test` Procedures We will now create and run a [Procedure](../../../../doc/#builtin/procedures/Procedures.md.html) of type [`classifier.train`](../../../../doc/#builtin/procedures/Classifier.md.html). The configuration below will use 20% of the data to train a decision tree to classify rows into the three classes of Iris. The output of this procedure is a [Function](../../../../doc/#builtin/functions/Functions.md.html), which we will be able to call from REST or SQL. ``` mldb.put('/v1/procedures/iris_train_classifier', { 'type' : 'classifier.train', 'params' : { 'trainingData' : """ select {* EXCLUDING(class)} as features, class as label from iris where rowHash() % 5 = 0 """, "algorithm": "dt", "modelFileUrl": "file://models/iris.cls", "mode": "categorical", "functionName": "iris_classify", "runOnCreation": True } }) ``` We can now test the classifier we just trained on the subset of the data we didn't use for training. To do so we use a procedure of type [`classifier.test`](../../../../doc/#builtin/procedures/Accuracy.md.html). ``` rez = mldb.put('/v1/procedures/iris_test_classifier', { 'type' : 'classifier.test', 'params' : { 'testingData' : """ select iris_classify({ features: {* EXCLUDING(class)} }) as score, class as label from iris where rowHash() % 5 != 0 """, "mode": "categorical", "runOnCreation": True } }) runResults = rez.json()["status"]["firstRun"]["status"] print rez ``` The procedure returns a confusion matrix, which you can compare with the one that resulted from the K-means procedure. ``` pd.DataFrame(runResults["confusionMatrix"])\ .pivot_table(index="actual", columns="predicted", fill_value=0) ``` As you can see, the decision tree does a much better job of classifying the data than the K-means model, using 20% of the examples as training data. The procedure also returns standard classification statistics on how the classifier performed on the test set. Below are performance statistics for each label: ``` pd.DataFrame.from_dict(runResults["labelStatistics"]).transpose() ``` They are also available, averaged over all labels: ``` pd.DataFrame.from_dict({"weightedStatistics": runResults["weightedStatistics"]}) ``` ### Scoring new examples We can call the Function REST API endpoint to classify a never-before-seen set of measurements like this: ``` mldb.get('/v1/functions/iris_classify/application', input={ "features":{ "petal length": 1, "petal width": 2, "sepal length": 3, "sepal width": 4 } }) ``` ## Where to next? Check out the other [Tutorials and Demos](../../../../doc/#builtin/Demos.md.html). You can also take a look at the [`classifier.experiment`](../../../../doc/#builtin/procedures/ExperimentProcedure.md.html) procedure type that can be used to train and test a classifier in a single call.
github_jupyter
### Analysis of motifs using Motif Miner (RINGS tool that employs alpha frequent subtree mining) ``` csv_files = ["ABA_14361_100ug_v5.0_DATA.csv", "ConA_13799-10ug_V5.0_DATA.csv", 'PNA_14030_10ug_v5.0_DATA.csv', "RCAI_10ug_14110_v5.0_DATA.csv", "PHA-E-10ug_13853_V5.0_DATA.csv", "PHA-L-10ug_13856_V5.0_DATA.csv", "LCA_10ug_13934_v5.0_DATA.csv", "SNA_10ug_13631_v5.0_DATA.csv", "MAL-I_10ug_13883_v5.0_DATA.csv", "MAL_II_10ug_13886_v5.0_DATA.csv", "GSL-I-B4_10ug_13920_v5.0_DATA.csv", "jacalin-1ug_14301_v5.0_DATA.csv", 'WGA_14057_1ug_v5.0_DATA.csv', "UEAI_100ug_13806_v5.0_DATA.csv", "SBA_14042_10ug_v5.0_DATA.csv", "DBA_100ug_13897_v5.0_DATA.csv", "PSA_14040_10ug_v5.0_DATA.csv", "HA_PuertoRico_8_34_13829_v5_DATA.csv", 'H3N8-HA_16686_v5.1_DATA.csv', "Human-DC-Sign-tetramer_15320_v5.0_DATA.csv"] csv_file_normal_names = [ r"\textit{Agaricus bisporus} agglutinin (ABA)", r"Concanavalin A (Con A)", r'Peanut agglutinin (PNA)', r"\textit{Ricinus communis} agglutinin I (RCA I/RCA\textsubscript{120})", r"\textit{Phaseolus vulgaris} erythroagglutinin (PHA-E)", r"\textit{Phaseolus vulgaris} leucoagglutinin (PHA-L)", r"\textit{Lens culinaris} agglutinin (LCA)", r"\textit{Sambucus nigra} agglutinin (SNA)", r"\textit{Maackia amurensis} lectin I (MAL-I)", r"\textit{Maackia amurensis} lectin II (MAL-II)", r"\textit{Griffonia simplicifolia} Lectin I isolectin B\textsubscript{4} (GSL I-B\textsubscript{4})", r"Jacalin", r'Wheat germ agglutinin (WGA)', r"\textit{Ulex europaeus} agglutinin I (UEA I)", r"Soybean agglutinin (SBA)", r"\textit{Dolichos biflorus} agglutinin (DBA)", r"\textit{Pisum sativum} agglutinin (PSA)", r"Influenza hemagglutinin (HA) (A/Puerto Rico/8/34) (H1N1)", r'Influenza HA (A/harbor seal/Massachusetts/1/2011) (H3N8)', r"Human DC-SIGN tetramer"] import sys import os import pandas as pd import numpy as np from scipy import interp sys.path.append('..') from ccarl.glycan_parsers.conversions import kcf_to_digraph, cfg_to_kcf from ccarl.glycan_plotting import draw_glycan_diagram from ccarl.glycan_graph_methods import generate_digraph_from_glycan_string from ccarl.glycan_features import generate_features_from_subtrees import ccarl.glycan_plotting from sklearn.linear_model import LogisticRegressionCV, LogisticRegression from sklearn.metrics import matthews_corrcoef, make_scorer, roc_curve, auc import matplotlib.pyplot as plt from collections import defaultdict aucs = defaultdict(list) ys = defaultdict(list) probs = defaultdict(list) motifs = defaultdict(list) for fold in [1,2,3,4,5]: print(f"Running fold {fold}...") for csv_file in csv_files: alpha = 0.8 minsup = 0.2 input_file = f'./temp_{csv_file}' training_data = pd.read_csv(f"../Data/CV_Folds/fold_{fold}/training_set_{csv_file}") test_data = pd.read_csv(f"../Data/CV_Folds/fold_{fold}/test_set_{csv_file}") pos_glycan_set = training_data['glycan'][training_data.binding == 1].values kcf_string = '\n'.join([cfg_to_kcf(x) for x in pos_glycan_set]) with open(input_file, 'w') as f: f.write(kcf_string) min_sup = int(len(pos_glycan_set) * minsup) subtrees = os.popen(f"ruby Miner_cmd.rb {min_sup} {alpha} {input_file}").read() subtree_graphs = [kcf_to_digraph(x) for x in subtrees.split("///")[0:-1]] motifs[csv_file].append(subtree_graphs) os.remove(input_file) binding_class = training_data.binding.values glycan_graphs = [generate_digraph_from_glycan_string(x, parse_linker=True, format='CFG') for x in training_data.glycan] glycan_graphs_test = [generate_digraph_from_glycan_string(x, parse_linker=True, format='CFG') for x in test_data.glycan] features = [generate_features_from_subtrees(subtree_graphs, glycan) for glycan in glycan_graphs] features_test = [generate_features_from_subtrees(subtree_graphs, glycan) for glycan in glycan_graphs_test] logistic_clf = LogisticRegression(penalty='l2', C=100, solver='lbfgs', class_weight='balanced', max_iter=1000) X = features y = binding_class logistic_clf.fit(X, y) y_test = test_data.binding.values X_test = features_test fpr, tpr, _ = roc_curve(y_test, logistic_clf.predict_proba(X_test)[:,1], drop_intermediate=False) aucs[csv_file].append(auc(fpr, tpr)) ys[csv_file].append(y_test) probs[csv_file].append(logistic_clf.predict_proba(X_test)[:,1]) # Assess the number of subtrees generated for each CV round. subtree_lengths = defaultdict(list) for fold in [1,2,3,4,5]: print(f"Running fold {fold}...") for csv_file in csv_files: alpha = 0.8 minsup = 0.2 input_file = f'./temp_{csv_file}' training_data = pd.read_csv(f"../Data/CV_Folds/fold_{fold}/training_set_{csv_file}") test_data = pd.read_csv(f"../Data/CV_Folds/fold_{fold}/test_set_{csv_file}") pos_glycan_set = training_data['glycan'][training_data.binding == 1].values kcf_string = '\n'.join([cfg_to_kcf(x) for x in pos_glycan_set]) with open(input_file, 'w') as f: f.write(kcf_string) min_sup = int(len(pos_glycan_set) * minsup) subtrees = os.popen(f"ruby Miner_cmd.rb {min_sup} {alpha} {input_file}").read() subtree_graphs = [kcf_to_digraph(x) for x in subtrees.split("///")[0:-1]] subtree_lengths[csv_file].append(len(subtree_graphs)) os.remove(input_file) subtree_lengths = [y for x in subtree_lengths.values() for y in x] print(np.mean(subtree_lengths)) print(np.max(subtree_lengths)) print(np.min(subtree_lengths)) def plot_multiple_roc(data): '''Plot multiple ROC curves. Prints out key AUC values (mean, median etc). Args: data (list): A list containing [y, probs] for each model, where: y: True class labels probs: Predicted probabilities Returns: Figure, Axes, Figure, Axes ''' mean_fpr = np.linspace(0, 1, 100) fig, axes = plt.subplots(figsize=(4, 4)) ax = axes ax.set_title('') #ax.legend(loc="lower right") ax.set_xlabel('False Positive Rate') ax.set_ylabel('True Positive Rate') ax.set_aspect('equal', adjustable='box') auc_values = [] tpr_list = [] for y, probs in data: #data_point = data[csv_file] #y = data_point[7] # test binding #X = data_point[8] # test features #logistic_clf = data_point[0] # model fpr, tpr, _ = roc_curve(y, probs, drop_intermediate=False) tpr_list.append(interp(mean_fpr, fpr, tpr)) auc_values.append(auc(fpr, tpr)) ax.plot(fpr, tpr, color='blue', alpha=0.1, label=f'ROC curve (area = {auc(fpr, tpr): 2.3f})') ax.plot([0,1], [0,1], linestyle='--', color='grey', linewidth=0.8, dashes=(5, 10)) mean_tpr = np.mean(tpr_list, axis=0) median_tpr = np.median(tpr_list, axis=0) upper_tpr = np.percentile(tpr_list, 75, axis=0) lower_tpr = np.percentile(tpr_list, 25, axis=0) ax.plot(mean_fpr, median_tpr, color='black') ax.fill_between(mean_fpr, lower_tpr, upper_tpr, color='grey', alpha=.5, label=r'$\pm$ 1 std. dev.') fig.savefig("Motif_Miner_CV_ROC_plot_all_curves.svg") fig2, ax2 = plt.subplots(figsize=(4, 4)) ax2.hist(auc_values, range=[0.5,1], bins=10, rwidth=0.9, color=(0, 114/255, 178/255)) ax2.set_xlabel("AUC value") ax2.set_ylabel("Counts") fig2.savefig("Motif_Miner_CV_AUC_histogram.svg") print(f"Mean AUC value: {np.mean(auc_values): 1.3f}") print(f"Median AUC value: {np.median(auc_values): 1.3f}") print(f"IQR of AUC values: {np.percentile(auc_values, 25): 1.3f} - {np.percentile(auc_values, 75): 1.3f}") return fig, axes, fig2, ax2, auc_values # Plot ROC curves for all test sets roc_data = [[y, prob] for y_fold, prob_fold in zip(ys.values(), probs.values()) for y, prob in zip(y_fold, prob_fold)] _, _, _, _, auc_values = plot_multiple_roc(roc_data) auc_values_ccarl = [0.950268817204301, 0.9586693548387097, 0.9559811827956988, 0.8686155913978494, 0.9351222826086956, 0.989010989010989, 0.9912587412587414, 0.9090909090909092, 0.9762626262626264, 0.9883597883597884, 0.9065533980582524, 0.9417475728155339, 0.8268608414239482, 0.964349376114082, 0.9322638146167558, 0.9178037686809616, 0.96361273554256, 0.9362139917695472, 0.9958847736625515, 0.9526748971193415, 0.952300785634119, 0.9315375982042648, 0.9705387205387206, 0.9865319865319865, 0.9849773242630385, 0.9862385321100917, 0.9862385321100918, 0.9606481481481481, 0.662037037037037, 0.7796296296296297, 0.9068627450980392, 0.915032679738562, 0.9820261437908496, 0.9893790849673203, 0.9882988298829882, 0.9814814814814815, 1.0, 0.8439153439153441, 0.9859813084112149, 0.9953271028037383, 0.8393308080808081, 0.8273358585858586, 0.7954545454545453, 0.807070707070707, 0.8966329966329966, 0.8380952380952381, 0.6201058201058202, 0.7179894179894181, 0.6778846153846154, 0.75, 0.9356060606060607, 0.8619528619528619, 0.8787878787878789, 0.9040816326530613, 0.7551020408163266, 0.9428694158075602, 0.9226804123711341, 0.8711340206185567, 0.7840909090909091, 0.8877840909090909, 0.903225806451613, 0.8705594120049, 0.9091465904450796, 0.8816455696202531, 0.8521097046413502, 0.8964521452145213, 0.9294554455445544, 0.8271452145214522, 0.8027272727272727, 0.8395454545454546, 0.8729967948717949, 0.9306891025641025, 0.9550970873786407, 0.7934686672550749, 0.8243601059135041, 0.8142100617828772, 0.9179611650485436, 0.8315533980582525, 0.7266990291262136, 0.9038834951456312, 0.9208916083916084, 0.7875, 0.9341346153846154, 0.9019230769230768, 0.9086538461538461, 0.9929245283018868, 0.9115566037735848, 0.9952830188679246, 0.9658018867924528, 0.7169811320754716, 0.935981308411215, 0.9405660377358491, 0.9905660377358491, 0.9937106918238994, 0.9302935010482181, 0.7564814814814815, 0.9375, 0.8449074074074074, 0.8668981481481483, 0.7978971962616823] auc_value_means = [np.mean(auc_values[x*5:x*5+5]) for x in range(int(len(auc_values) / 5))] auc_value_means_ccarl = [np.mean(auc_values_ccarl[x*5:x*5+5]) for x in range(int(len(auc_values_ccarl) / 5))] auc_value_mean_glymmr = np.array([0.6067939 , 0.76044574, 0.66786624, 0.69578298, 0.81659623, 0.80536403, 0.77231548, 0.96195032, 0.70013384, 0.60017685, 0.77336818, 0.78193305, 0.66269668, 0.70333122, 0.54247748, 0.63003707, 0.79619231, 0.85141509, 0.9245296 , 0.63366329]) auc_value_mean_glymmr_best = np.array([0.77559242, 0.87452658, 0.75091636, 0.7511371 , 0.87450697, 0.82895628, 0.81083123, 0.96317065, 0.75810185, 0.82680149, 0.84747054, 0.8039597 , 0.69651882, 0.73431593, 0.582194 , 0.67407767, 0.83049825, 0.88891509, 0.9345188 , 0.72702016]) auc_value_motiffinder = [0.9047619047619048, 0.9365601503759399, 0.6165413533834586, 0.9089068825910931, 0.4962962962962963, 0.6358816964285713, 0.8321078431372548, 0.8196576151121606, 0.8725400457665904, 0.830220713073005, 0.875, 0.7256367663344407, 0.8169291338582677, 0.9506818181818182, 0.7751351351351351, 0.9362947658402204, 0.6938461538461539, 0.6428571428571428, 0.7168021680216802, 0.5381136950904392] #Note, only from a single test-train split. import seaborn as sns sns.set(style="ticks") plot_data = np.array([auc_value_mean_glymmr, auc_value_mean_glymmr_best, auc_value_motiffinder, auc_value_means, auc_value_means_ccarl]).T ax = sns.violinplot(data=plot_data, cut=2, inner='quartile') sns.swarmplot(data=plot_data, color='black') ax.set_ylim([0.5, 1.05]) ax.set_xticklabels(["GLYMMR\n(mean)", "GLYMMR\n(best)", "MotifFinder", "Glycan\nMiner Tool", "CCARL"]) #ax.grid('off') ax.set_ylabel("AUC") ax.figure.savefig('method_comparison_violin_plot.svg') auc_value_means_ccarl print("CCARL Performance") print(f"Median AUC value: {np.median(auc_value_means_ccarl): 1.3f}") print(f"IQR of AUC values: {np.percentile(auc_value_means_ccarl, 25): 1.3f} - {np.percentile(auc_value_means_ccarl, 75): 1.3f}") print("Glycan Miner Tool Performance") print(f"Median AUC value: {np.median(auc_value_means): 1.3f}") print(f"IQR of AUC values: {np.percentile(auc_value_means, 25): 1.3f} - {np.percentile(auc_value_means, 75): 1.3f}") print("Glycan Miner Tool Performance") print(f"Median AUC value: {np.median(auc_value_mean_glymmr_best): 1.3f}") print(f"IQR of AUC values: {np.percentile(auc_value_mean_glymmr_best, 25): 1.3f} - {np.percentile(auc_value_mean_glymmr_best, 75): 1.3f}") print("Glycan Miner Tool Performance") print(f"Median AUC value: {np.median(auc_value_mean_glymmr): 1.3f}") print(f"IQR of AUC values: {np.percentile(auc_value_mean_glymmr, 25): 1.3f} - {np.percentile(auc_value_mean_glymmr, 75): 1.3f}") from matplotlib.backends.backend_pdf import PdfPages sns.reset_orig() import networkx as nx for csv_file in csv_files: with PdfPages(f"./motif_miner_motifs/glycan_motif_miner_motifs_{csv_file}.pdf") as pdf: for motif in motifs[csv_file][0]: fig, ax = plt.subplots() ccarl.glycan_plotting.draw_glycan_diagram(motif, ax) pdf.savefig(fig) plt.close(fig) glymmr_mean_stdev = np.array([0.15108904, 0.08300011, 0.11558078, 0.05259819, 0.061275 , 0.09541182, 0.09239553, 0.05114523, 0.05406571, 0.16180131, 0.10345311, 0.06080207, 0.0479003 , 0.09898648, 0.06137992, 0.09813596, 0.07010635, 0.14010784, 0.05924527, 0.13165457]) glymmr_best_stdev = np.array([0.08808868, 0.04784959, 0.13252895, 0.03163248, 0.04401516, 0.08942411, 0.08344247, 0.05714308, 0.05716086, 0.05640053, 0.08649275, 0.05007289, 0.05452531, 0.05697662, 0.0490626 , 0.1264917 , 0.04994508, 0.1030053 , 0.03359648, 0.12479809]) auc_value_std_ccarl = [np.std(auc_values_ccarl[x*5:x*5+5]) for x in range(int(len(auc_values_ccarl) / 5))] print(r"Lectin & GLYMMR(mean) & GLYMMR(best) & Glycan Miner Tool & MotifFinder & CCARL \\ \hline") for i, csv_file, name in zip(list(range(len(csv_files))), csv_files, csv_file_normal_names): print(f"{name} & {auc_value_mean_glymmr[i]:0.3f} ({glymmr_mean_stdev[i]:0.3f}) & {auc_value_mean_glymmr_best[i]:0.3f} ({glymmr_best_stdev[i]:0.3f}) \ & {np.mean(aucs[csv_file]):0.3f} ({np.std(aucs[csv_file]):0.3f}) & {auc_value_motiffinder[i]:0.3f} & {auc_value_means_ccarl[i]:0.3f} ({auc_value_std_ccarl[i]:0.3f}) \\\\") ```
github_jupyter
``` import json import random import numpy as np import tensorflow as tf from collections import deque from keras.models import Sequential from keras.optimizers import RMSprop from keras.layers import Dense, Flatten from keras.layers.convolutional import Conv2D from keras import backend as K import datetime import itertools import matplotlib.pyplot as plt import pandas as pd import scipy as sp import time import math from matplotlib.colors import LinearSegmentedColormap import colorsys import numpy as np from data_retrieval_relocation_3ksol_reloc import INSTANCEProvider from kbh_yard_b2b_relocation import KBH_Env #This is the environment of the shunting yard from dqn_kbh_colfax_relocation_test_agent import DQNAgent # this function returns random colors for visualisation of learning. def rand_cmap(nlabels, type='soft', first_color_black=True, last_color_black=False): # Generate soft pastel colors, by limiting the RGB spectrum if type == 'soft': low = 0.6 high = 0.95 randRGBcolors = [(np.random.uniform(low=low, high=high), np.random.uniform(low=low, high=high), np.random.uniform(low=low, high=high)) for i in range(nlabels)] if first_color_black: randRGBcolors[0] = [0, 0, 0] if last_color_black: randRGBcolors[-1] = [0, 0, 0] random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels) return random_colormap #1525445230 is the 185k expensive relocation model. for model_nr in ['1525445230']: #which model to load. test_case = model_nr #LOAD THE INSTANCE PROVIDER ig = INSTANCEProvider() instances = ig.instances # Create environment KBH yrd = KBH_Env() # Create the DQNAgent with the CNN approximation of the Q-function and its experience replay and training functions. # load the trained model. agent = DQNAgent(yrd, True, test_case) # set epsilon to 0 to act just greedy agent.epsilon = 0 #new_cmap = rand_cmap(200, type='soft', first_color_black=True, last_color_black=False, verbose=True) visualization = False n = len(instances) # result vectors original_lengths = [] terminated_at_step = [] success = [] relocations = [] print_count = 0 # train types different tracks? type_step_track = [] for instance in instances: nr_relocations = 0 if print_count % 100 == 0: print(print_count) print_count = print_count + 1 #Initialize problem event_list = ig.get_instance(instance) steps, t, total_t, score= len(event_list), 0, 0, 0 state = yrd.reset(event_list) # Get first observation based on the first train arrival. history = np.reshape(state, ( 1, yrd.shape[0], yrd.shape[1], yrd.shape[2])) # reshape state into tensor, which we call history. done, busy_relocating = False, False if visualization: #visualize learning new_cmap = rand_cmap(200, type='soft', first_color_black=True, last_color_black=False) if visualization == True: plt.imshow(np.float32(history[0][0]), cmap=new_cmap, interpolation='nearest') plt.show() while not done: action = agent.get_action(history) # RL choose action based on observation if visualization == True: print(agent.model.predict(history)) print(action+1) # # RL take action and get next observation and reward # # note the +1 at action # save for arrival activities the parking location event_list_temp = event_list.reset_index(drop=True).copy() if event_list_temp.event_type[0]=='arrival': train_type = event_list_temp.composition[0] type_step_track.append({'type': train_type, 'action': action+1, 'step':t, 'instance_id': instance}) # based on that action now let environment go to new state event = event_list.iloc[0] # check if after this we are done... done_ = True if len(event_list) == 1 else False # then there is no next event # if done_: # print("Reached the end of a problem!") if busy_relocating: # here we do not drop an event from the event list. coming_arrivals = event_list.loc[event_list['event_type'] == 'arrival'].reset_index(drop=True) coming_departures = event_list.loc[event_list['event_type'] == 'departure'].reset_index(drop=True) next_state, reward, done = yrd.reloc_destination_step(event, event_list, action+1, coming_arrivals, coming_departures, done_) nr_relocations += 1 busy_relocating = False else: # These operations below are expensive: maybe just use indexing. event_list.drop(event_list.index[:1], inplace=True) coming_arrivals = event_list.loc[event_list['event_type'] == 'arrival'].reset_index(drop=True) coming_departures = event_list.loc[event_list['event_type'] == 'departure'].reset_index(drop=True) # do step next_state, reward, done = yrd.step(action+1, coming_arrivals, coming_departures, event, event_list, done_) busy_relocating = True if reward == -0.5 else False history_ = np.float32(np.reshape(next_state, (1, yrd.shape[0], yrd.shape[1], yrd.shape[2]))) score += reward # log direct reward of action if visualization == True: #show action plt.imshow(np.float32(history_[0][0]), cmap=new_cmap, interpolation='nearest') plt.show() time.sleep(0.05) if reward == -1: time.sleep(1) print(reward) if done: # based on what the environment returns. #print('ended at step' , t+1) #print('original length', steps) original_lengths.append(steps) terminated_at_step.append(t+1) relocations.append(nr_relocations) if int(np.unique(history_)[0]) == 1: #then we are in win state success.append(1) else: success.append(0) break; history = history_ # next state now becomes the current state. t += 1 # next step in this episode #save data needed for Entropy calculations. df_type_step_track = pd.DataFrame.from_records(type_step_track) df_type_step_track['strtype'] = df_type_step_track.apply(lambda row: str(row.type), axis = 1) df_type_step_track.strtype = df_type_step_track.strtype.astype('category') filename = 'data_'+model_nr+'_relocation_arrival_actions.csv' df_type_step_track.to_csv(filename) # analysis_runs = pd.DataFrame( # {'instance_id': instances, # 'original_length': original_lengths, # 'terminated_at_step': terminated_at_step # }) # analysis_runs['solved'] = analysis_runs.apply(lambda row: 1 if row.original_length == row.terminated_at_step else 0, axis =1 ) # analysis_runs['tried'] = analysis_runs.apply(lambda row: 1 if row.terminated_at_step != -1 else 0, axis =1) # analysis_runs['percentage'] = analysis_runs.apply(lambda row: row.solved/755, axis=1) # analysis_runs.to_csv('best_model_solved_instances.csv') # print('Model: ', model_nr) # summary = analysis_runs.groupby('original_length', as_index=False)[['solved', 'tried', 'percentage']].sum() # print(summary) # #print hist # %matplotlib inline # #%% # # analyse the parking actions per step and train type # df_type_step_track = pd.DataFrame.from_records(type_step_track) # bins = [1,2,3,4,5,6,7,8,9,10] # plt.hist(df_type_step_track.action, bins, align='left') # #prepare for save # df_type_step_track['strtype'] = df_type_step_track.apply(lambda row: str(row.type), axis = 1) # df_type_step_track.strtype = df_type_step_track.strtype.astype('category') # filename = 'data_'+model_nr+'_paper.csv' # df_type_step_track.to_csv(filename) analysis_runs = pd.DataFrame( {'instance_id': instances, 'original_length': original_lengths, 'terminated_at_step': terminated_at_step, 'success': success, 'nr_relocations': relocations }) analysis_runs.sort_values('terminated_at_step') print(analysis_runs.loc[analysis_runs.success == 0].instance_id.to_string(index=False)) analysis_runs.loc[analysis_runs.success == 1].copy().groupby('nr_relocations')[['instance_id']].count() summary = analysis_runs.groupby('original_length', as_index=False)[['success']].sum() print(summary) summary = analysis_runs.groupby('original_length', as_index=False)[['success']].mean() print(summary) max_reloc = max(analysis_runs.nr_relocations) print(max_reloc) plt.hist(analysis_runs.nr_relocations, bins=range(0,max_reloc+2), align='left') import seaborn as sns sns.set(style="darkgrid") g = sns.FacetGrid(analysis_runs, col="original_length", margin_titles=True) bins = range(0,max_reloc+2) g.map(plt.hist, "nr_relocations", color="steelblue", bins=bins, lw=0, align='left') print(analysis_runs.loc[analysis_runs.success == 1].groupby('original_length', as_index=False)[['nr_relocations']].mean()) ``` # CODE HAS BEEN RUN UNTILL HERE. . . . . . . . . v # analysis of mistakes ``` analysis_runs.loc[analysis_runs.success == 0].sort_values('terminated_at_step') #plt.hist(analysis_runs.loc[analysis_runs.success == 0].terminated_at_step, bins=8) len(analysis_runs.loc[analysis_runs.success == 0]) analysis_runs['instance_size'] = analysis_runs.apply(lambda row: str(row.original_length).replace('37', '14').replace('41', '15').replace('43', '16').replace('46','17'), axis=1) import seaborn as sns sns.set(style="darkgrid") bins = [0,5,10,15,20,25,30,35,40,45,50] g = sns.FacetGrid(analysis_runs.loc[analysis_runs.success == 0], col="instance_size", margin_titles=True) g.set(ylim=(0, 100), xlim=(0,50)) g.map(plt.hist, "terminated_at_step", color="steelblue", bins=bins, lw=0) sns.plt.savefig('185k_failures.eps') ```
github_jupyter
<a href="https://colab.research.google.com/github/dauparas/tensorflow_examples/blob/master/VAE_cell_cycle.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> https://github.com/PMBio/scLVM/blob/master/tutorials/tcell_demo.ipynb Variational Autoencoder Model (VAE) with latent subspaces based on: https://arxiv.org/pdf/1812.06190.pdf ``` #Step 1: import dependencies from tensorflow.keras import layers import numpy as np import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from keras import regularizers import time from __future__ import division import tensorflow as tf import tensorflow_probability as tfp tfd = tfp.distributions %matplotlib inline plt.style.use('dark_background') import pandas as pd import os from matplotlib import cm import h5py import scipy as SP import pylab as PL data = os.path.join('data_Tcells_normCounts.h5f') f = h5py.File(data,'r') Y = f['LogNcountsMmus'][:] # gene expression matrix tech_noise = f['LogVar_techMmus'][:] # technical noise genes_het_bool=f['genes_heterogen'][:] # index of heterogeneous genes geneID = f['gene_names'][:] # gene names cellcyclegenes_filter = SP.unique(f['cellcyclegenes_filter'][:].ravel() -1) # idx of cell cycle genes from GO cellcyclegenes_filterCB = f['ccCBall_gene_indices'][:].ravel() -1 # idx of cell cycle genes from cycle base ... # filter cell cycle genes idx_cell_cycle = SP.union1d(cellcyclegenes_filter,cellcyclegenes_filterCB) # determine non-zero counts idx_nonzero = SP.nonzero((Y.mean(0)**2)>0)[0] idx_cell_cycle_noise_filtered = SP.intersect1d(idx_cell_cycle,idx_nonzero) # subset gene expression matrix Ycc = Y[:,idx_cell_cycle_noise_filtered] plt = PL.subplot(1,1,1); PL.imshow(Ycc,cmap=cm.RdBu,vmin=-3,vmax=+3,interpolation='None'); #PL.colorbar(); plt.set_xticks([]); plt.set_yticks([]); PL.xlabel('genes'); PL.ylabel('cells'); X = np.delete(Y, idx_cell_cycle_noise_filtered, axis=1) X = Y #base case U = Y[:,idx_cell_cycle_noise_filtered] mean = np.mean(X, axis=0) variance = np.var(X, axis=0) indx_small_mean = np.argwhere(mean < 0.00001) X = np.delete(X, indx_small_mean, axis=1) mean = np.mean(X, axis=0) variance = np.var(X, axis=0) fano = variance/mean print(fano.shape) indx_small_fano = np.argwhere(fano < 1.0) X = np.delete(X, indx_small_fano, axis=1) mean = np.mean(X, axis=0) variance = np.var(X, axis=0) fano = variance/mean print(fano.shape) #Reconstruction loss def x_given_z(z, output_size): with tf.variable_scope('M/x_given_w_z'): act = tf.nn.leaky_relu h = z h = tf.layers.dense(h, 8, act) h = tf.layers.dense(h, 16, act) h = tf.layers.dense(h, 32, act) h = tf.layers.dense(h, 64, act) h = tf.layers.dense(h, 128, act) h = tf.layers.dense(h, 256, act) loc = tf.layers.dense(h, output_size) #log_variance = tf.layers.dense(x, latent_size) #scale = tf.nn.softplus(log_variance) scale = 0.01*tf.ones(tf.shape(loc)) return tfd.MultivariateNormalDiag(loc, scale) #KL term for z def z_given_x(x, latent_size): #+ with tf.variable_scope('M/z_given_x'): act = tf.nn.leaky_relu h = x h = tf.layers.dense(h, 256, act) h = tf.layers.dense(h, 128, act) h = tf.layers.dense(h, 64, act) h = tf.layers.dense(h, 32, act) h = tf.layers.dense(h, 16, act) h = tf.layers.dense(h, 8, act) loc = tf.layers.dense(h,latent_size) log_variance = tf.layers.dense(h, latent_size) scale = tf.nn.softplus(log_variance) # scale = 0.01*tf.ones(tf.shape(loc)) return tfd.MultivariateNormalDiag(loc, scale) def z_given(latent_size): with tf.variable_scope('M/z_given'): loc = tf.zeros(latent_size) scale = 0.01*tf.ones(tf.shape(loc)) return tfd.MultivariateNormalDiag(loc, scale) #Connect encoder and decoder and define the loss function tf.reset_default_graph() x_in = tf.placeholder(tf.float32, shape=[None, X.shape[1]], name='x_in') x_out = tf.placeholder(tf.float32, shape=[None, X.shape[1]], name='x_out') z_latent_size = 2 beta = 0.000001 #KL_z zI = z_given(z_latent_size) zIx = z_given_x(x_in, z_latent_size) zIx_sample = zIx.sample() zIx_mean = zIx.mean() #kl_z = tf.reduce_mean(zIx.log_prob(zIx_sample)- zI.log_prob(zIx_sample)) kl_z = tf.reduce_mean(tfd.kl_divergence(zIx, zI)) #analytical #Reconstruction xIz = x_given_z(zIx_sample, X.shape[1]) rec_out = xIz.mean() rec_loss = tf.losses.mean_squared_error(x_out, rec_out) loss = rec_loss + beta*kl_z optimizer = tf.train.AdamOptimizer(0.001).minimize(loss) #Helper function def batch_generator(features, x, u, batch_size): """Function to create python generator to shuffle and split features into batches along the first dimension.""" idx = np.arange(features.shape[0]) np.random.shuffle(idx) for start_idx in range(0, features.shape[0], batch_size): end_idx = min(start_idx + batch_size, features.shape[0]) part = idx[start_idx:end_idx] yield features[part,:], x[part,:] , u[part, :] n_epochs = 5000 batch_size = X.shape[0] start = time.time() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i in range(n_epochs): gen = batch_generator(X, X, U, batch_size) #create batch generator rec_loss_ = 0 kl_z_ = 0 for j in range(np.int(X.shape[0]/batch_size)): x_in_batch, x_out_batch, u_batch = gen.__next__() _, rec_loss__, kl_z__= sess.run([optimizer, rec_loss, kl_z], feed_dict={x_in: x_in_batch, x_out: x_out_batch}) rec_loss_ += rec_loss__ kl_z_ += kl_z__ if (i+1)% 50 == 0 or i == 0: zIx_mean_, rec_out_= sess.run([zIx_mean, rec_out], feed_dict ={x_in:X, x_out:X}) end = time.time() print('epoch: {0}, rec_loss: {1:.3f}, kl_z: {2:.2f}'.format((i+1), rec_loss_/(1+np.int(X.shape[0]/batch_size)), kl_z_/(1+np.int(X.shape[0]/batch_size)))) start = time.time() from sklearn.decomposition import TruncatedSVD svd = TruncatedSVD(n_components=2, n_iter=7, random_state=42) svd.fit(U.T) print(svd.explained_variance_ratio_) print(svd.explained_variance_ratio_.sum()) print(svd.singular_values_) U_ = svd.components_ U_ = U_.T import matplotlib.pyplot as plt fig, axs = plt.subplots(1, 2, figsize=(14,5)) axs[0].scatter(zIx_mean_[:,0],zIx_mean_[:,1], c=U_[:,0], cmap='viridis', s=5.0); axs[0].set_xlabel('z1') axs[0].set_ylabel('z2') fig.suptitle('X1') plt.show() fig, axs = plt.subplots(1, 2, figsize=(14,5)) axs[0].scatter(wIxy_mean_[:,0],wIxy_mean_[:,1], c=U_[:,1], cmap='viridis', s=5.0); axs[0].set_xlabel('w1') axs[0].set_ylabel('w2') axs[1].scatter(zIx_mean_[:,0],zIx_mean_[:,1], c=U_[:,1], cmap='viridis', s=5.0); axs[1].set_xlabel('z1') axs[1].set_ylabel('z2') fig.suptitle('X1') plt.show() error = np.abs(X-rec_out_) plt.plot(np.reshape(error, -1), '*', markersize=0.1); plt.hist(np.reshape(error, -1), bins=50); ```
github_jupyter