code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import math
import os
import sys
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import namedtuple, OrderedDict
from pathlib import Path
from tqdm.notebook import tqdm
# -
INPUT_PATH = '../data/pku-autonomous-driving'
PRED_PATH = '../exp/car_pose_6dof/car_pose_default/results/predictions_045.csv'
# +
def parse_pred_str(s):
cars = np.array(s.split()).reshape([-1, 7])
out = []
for car in cars:
car = {
'rotation': car[0:3].astype(np.float64),
'location': car[3:6].astype(np.float64),
'score': car[-1].astype(np.float64),
}
out.append(car)
return out
def create_camera_matrix():
return np.array([
[2304.5479, 0, 1686.2379, 0],
[0, 2305.8757, 1354.9849, 0],
[0, 0, 1., 0]
], dtype=np.float32)
def proj_point(p, calib):
p = np.dot(calib[:,:3], p)
p = p[:2] / p[2]
return p
# -
df = pd.read_csv(PRED_PATH)
calib = create_camera_matrix()
# +
def imshow(img, boxes=None, figsize=(16,8)):
plt.figure(figsize=figsize)
ax = plt.axes()
if boxes is not None:
for bbox in boxes:
rect = plt.Rectangle(
(bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=1)
plt.gca().add_patch(rect)
ax.imshow(img)
ax.yaxis.set_major_locator(plt.NullLocator())
ax.xaxis.set_major_formatter(plt.NullFormatter())
def add_blend_img(back, fore, trans=0.7, white_theme=False):
if white_theme:
fore = 255 - fore
if fore.shape[0] != back.shape[0] or fore.shape[0] != back.shape[1]:
fore = cv2.resize(fore, (back.shape[1], back.shape[0]))
if len(fore.shape) == 2:
fore = fore.reshape(fore.shape[0], fore.shape[1], 1)
out = (back * (1. - trans) + fore * trans)
out[out > 255] = 255
out[out < 0] = 0
return out.astype(np.uint8).copy()
# -
def show_images(images, cols = 1, titles = None):
"""Display a list of images in a single figure with matplotlib.
Parameters
---------
images: List of np.arrays compatible with plt.imshow.
cols (Default = 1): Number of columns in figure (number of rows is
set to np.ceil(n_images/float(cols))).
titles: List of titles corresponding to each image. Must have
the same length as titles.
"""
assert((titles is None)or (len(images) == len(titles)))
n_images = len(images)
if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)
if image.ndim == 2:
plt.gray()
plt.imshow(image)
a.set_title(title)
fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
plt.show()
def has_intersect(ct, mask, min_thresh=0.15):
"""
ct - point (x, y)
mask - inverted mask
"""
x, y = int(round(ct[0])), int(round(ct[1]))
h, w = mask.shape
if x >= w or y >= h:
return False
# mask inverted: 1 - miss, 0 - inside mask
out_of_mask = bool(mask[y,x])
if out_of_mask:
return False
else:
top = 1 + mask[:y,x].nonzero()[0][-1] # max
bottom = y + mask[y:,x].nonzero()[0][0] - 1 # min
left_pxs = mask[y,:x].nonzero()[0]
left = 1 + left_pxs[-1] if left_pxs.sum() > 0 else 0 # max
right_pxs = mask[y, x:].nonzero()[0]
right = x + right_pxs[0] - 1 if right_pxs.sum() > 0 else (w - 1) # min
w_mask = right - left
h_mask = bottom - top
if (x - left) / w_mask < min_thresh or (right - x) / w_mask < min_thresh:
return False
if (y - top) / h_mask < min_thresh or (bottom - y) / h_mask < min_thresh:
return False
return True
# +
# def circ_mean(angles):
# av_sin = np.mean(np.sin(angles), axis=0)
# av_cos = np.mean(np.cos(angles), axis=0)
# ang_rad = np.arctan2(av_sin,av_cos)
# return ang_rad
# def average_duplicates(cars, min_dist=2.5):
# cars_m = []
# remove = []
# n = len(cars)
# for i in range(n):
# if i in remove:
# continue
# for j in range(i+1, n):
# loc0 = cars[i]['location']
# loc1 = cars[j]['location']
# dist_z = (loc0[-1] + loc1[-1]) / 2
# dist = np.sqrt(np.square(loc1 - loc0).sum()) / dist_z
# if dist < min_dist:
# car_m = {
# 'location': np.mean([loc0, loc1], axis=0),
# 'rotation': circ_mean([cars[i]['rotation'], cars[j]['rotation']]),
# 'score': np.mean([cars[i]['score'], cars[j]['score']])
# }
# cars_m.append(car_m)
# remove.append(i)
# remove.append(j)
# break
# cars_m += [c for i,c in enumerate(cars) if i not in remove]
# dups = [c for i,c in enumerate(cars) if i in remove]
# return cars_m, dups
# +
def cars2str(cars):
preds = []
for car in cars:
cc = [*car['rotation'], *car['location'], car['score']]
cc_str = ' '.join([str(v) for v in cc])
preds.append(cc_str)
return ' '.join(preds)
def count_cars(dataframe):
c = 0
for i in range(len(dataframe)):
v = dataframe.iloc[i,1]
if v is not np.nan:
c += len(v.split()) / 7
return c
# +
THRESH = 0.1
removed = 0
dead = {}
duplicates = {}
fixed_df = df.copy()
for i in tqdm(range(len(df))):
pred_str = df.iloc[i,1]
if pred_str is np.nan:
continue
img_id = df.iloc[i,0]
mask_path = os.path.join(INPUT_PATH, 'test_masks', img_id+'.jpg')
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
if mask is None:
continue
_, mask = cv2.threshold(mask, 200, 255, cv2.THRESH_BINARY)
mask = (~mask / 255).astype(np.uint8)
preds = parse_pred_str(pred_str)
valid_cars = []
need_fix = False
for car in preds:
ct = proj_point(car['location'], calib) # x,y
if has_intersect(ct, mask, min_thresh=THRESH):
car['ct'] = ct
if not img_id in dead:
dead[img_id] = []
dead[img_id].append(car)
removed += 1
need_fix = True
else:
valid_cars.append(car)
if need_fix:
pred_str = cars2str(valid_cars)
fixed_df.iloc[i,1] = pred_str
print('Removed: %d' % removed)
# -
count_cars(df), count_cars(fixed_df)
fixed_df.to_csv('predicitions_fixed.csv', index=False)
# !cat predicitions_fixed.csv | head -n 2
img_ids = iter(dead.keys())
# +
img_id = next(img_ids)
print(img_id)
img_path = os.path.join(INPUT_PATH, 'test_images', img_id+'.jpg')
img = cv2.imread(img_path)[:,:,::-1]
mask_path = os.path.join(INPUT_PATH, 'test_masks', img_id+'.jpg')
mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
check = []
for car in dead[img_id]:
ct = car['ct']
x,y = int(ct[0]), int(ct[1])
h,w = mask.shape
s = 120
blend = add_blend_img(img, mask, trans=0.5, white_theme=True)
blend = blend[max(0,y-s):min(y+s, h-1), max(0,x-s):min(x+s, w-1),:]
check.append(blend)
show_images(check)
| notebooks/filter_predictions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # DS108 Databases : Lesson Three Companion Notebook
# ### Table of Contents <a class="anchor" id="DS108L3_toc"></a>
#
# * [Table of Contents](#DS108L3_toc)
# * [Page 1 - Overview CRUD operations](#DS108L3_page_1)
# * [Page 2 - INSERT Examples](#DS108L3_page_2)
# * [Page 3 - What is NULL?](#DS108L3_page_3)
# * [Page 4 - UPDATE](#DS108L3_page_4)
# * [Page 5 - DELETE](#DS108L3_page_5)
# * [Page 6 - Key Terms](#DS108L3_page_6)
# * [Page 7 - Lesson 1-3 Review](#DS108L3_page_7)
# * [Page 8 - Lesson 3 Practice Hands-On](#DS108L3_page_8)
# * [Page 9 - Lesson 3 Practice Hands-On Solution](#DS108L3_page_9)
#
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 1 - Overview CRUD operations<a class="anchor" id="DS108L3_page_1"></a>
#
# [Back to Top](#DS108L3_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
from IPython.display import VimeoVideo
# Tutorial Video Name: Joins
VimeoVideo('243214658', width=720, height=480)
# During this lesson, you are going to explore CRUD operations in SQL commands. You may be wondering, what is CRUD? *CRUD* stands for Create, Read, Update and Delete. This is a term you will run into during your career, so it is important to understand it. When executing CRUD in SQL, you will be using slightly different keywords. Below is the SQL commands that are equivalent to CRUD:
#
# <table class="table table-striped">
# <tr style="font-weight: bold;">
# <td>CRUD name</td>
# <td>SQL command</td>
# </tr>
# <tr>
# <td>Create</td>
# <td>Insert</td>
# <tr>
# <tr>
# <td>Read</td>
# <td>Select</td>
# <tr>
# <tr>
# <td>Update</td>
# <td>Update</td>
# <tr>
# <tr>
# <td>Delete</td>
# <td>Delete</td>
# <tr>
# </table>
#
# As you can see, Update and Delete use the same keywords. You have already explored SELECT in past lessons, so you will now examine the rest of CRUD operations in SQL.
#
# ---
#
# ## INSERT INTO
#
# The `INSERT INTO` statement is used to insert data into a table within a database. There are two ways to insert data into the database.
#
# The first way is to specify both the columns and the values of the columns:
#
# ```sql
# INSERT INTO table_name (column1, column2, column3...)
# VALUES (value1, value2, value3...);
# ```
#
# The second way is to insert values for every column in the table. If this is the case, you do not need to specify the column names, just the values:
#
# ```sql
# INSERT INTO table_name
# VALUES (value1, value2, value3...);
# ```
#
# When inserting data into every existing column, make sure the order of the values is the same as the order of the columns.
#
# Click "Next" to see more `Insert` examples.
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 2 - INSERT Examples<a class="anchor" id="DS108L3_page_2"></a>
#
# [Back to Top](#DS108L3_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # INSERT Examples
#
# Below are more `Insert` examples. Go ahead and get started!
#
# ---
#
# ## Example 1: Specify Columns and Values
#
# Consider the following query:
#
# ```sql
# insert into sakila.actor (first_name, last_name, last_update)
# values ("Johnny", "Smith", "2019-01-17 08:43:11");
# ```
#
# The above query may look like a lot, but all you are doing is inserting a new actor into the table `actor`.
#
# If you want to see the actor in the table, you can run the following query:
#
# ```sql
# SELECT * FROM sakila.actor;
# ```
#
# If you scroll down, <NAME> is the last actor listed. Did you notice that you didn't specify the `actor_id`, but it gave a value of `201`? That is because it is an auto-increment field that will automatically generate an id when a new record is inserted.
#
# If you wanted only to see <NAME>'s information, you could add a WHERE statement:
#
# ```sql
# select * from sakila.actor
# where last_name = "Smith";
# ```
#
# The above query will pull in just the data of the customer with the `last-name` of `Smith`.
#
# ---
#
# ## Example 2: Specify Just Values
#
# Now, instead of what you did above, you could use the second way of inserting to save time writing out every column name:
#
# ```sql
# insert into sakila.actor
# values ("Kermit", "DaFrog", "2019-01-19 08:56:12");
# ```
#
# If you run the above query, you will get an error:
#
# ```text
# Error Code: 1136. Column count doesn't match value count at row 1
# ```
#
# As you can see, MySQL is complaining because you only have 12 values when there are 13 columns. Can you guess what the missing column is? You're right! It's `actor_id`! In the previous query, you listed out every column that data was being inserted to and `actor_id` was automatically generated. But in this second `insert into`, you will have to define the `actor_id` because MySQL is assuming you are inserting and defining data for EVERY column. You would have to run the below query:
#
# ```sql
# insert into sakila.actor
# values (202, "Kermit", "DaFrog", "2019-01-19 08:56:12");
# ```
#
# The above query will insert the new actor without any problems.
#
# ---
#
# ## Example 3: Insert Data when Some is Missing
#
# Sometimes, you want to insert a new row, but you don't have all the data for each column. If that is the case, there is an automatic fix for that. Consider below:
#
# ```sql
# insert into sakila.actor (first_name, last_name)
# values ("Miss", "Pigee");
# ```
#
# Above, you only have the information for two columns. If you run a query to look up this new actor, you will see the following output:
#
# ![Select Sarah. Output from a query. It says Select from new customers where first name equals Sarah.](Media/SelectMiss.png)
# *Figure 3-4: Select Miss Pigee*
#
# Great work! Now that you feel good about inserting data into a column, you will explore NULL.
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 3 - What is NULL?<a class="anchor" id="DS108L3_page_3"></a>
#
# [Back to Top](#DS108L3_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # What is NULL?
#
# A column field that does not have a value will be read as a *Null* value. If a column allows for optional data, it is possible to insert a new record with no value. If this happens, then the record will be saved with a Null value. It is essential to understand that a Null value does not mean a value of zero or a field that contains spaces as its value. *Null* means that the value was left blank when the record was created.
#
# It is possible to check using SQL keywords for Null values or non-Null values. You'll use the `IS NULL` and `IS NOT NULL` keywords in MySQL. Below is the syntax for `NULL` and `IS NOT NULL`:
#
# NULL:
#
# ```sql
# SELECT column_names FROM table_name
# WHERE column_name IS NULL;
# ```
#
# IS NOT NULL:
#
# ```sql
# SELECT column_names FROM table_name
# WHERE column_name IS NOT NULL;
# ```
#
# Now you will try it out!
#
# ---
#
# ## NULL
#
# If you want to check for values in a particular table that are Null, you can use the `IS NULL` keyword. You need this keyword because you are unable to check for Null values using operators such as =, >, etc. about which you have previously learned. For example, you could not run the below query:
#
# ```sql
# select * from address
# where address2 = Null;
# ```
#
# That will give you no results.
#
# But if you run the following query, you will see all customers that do not have a company attached to them:
#
# ```sql
# select * from address
# where address2 is null;
# ```
#
# And here is your MySQL output:
#
# ```text
# 1 47 MySakila Drive Alberta 300 ... 2014-09-25 22:30:27
# 2 28 MySQL Boulevard QLD 576 ... 2014-09-25 22:30:09
# 3 23 Workhaven Lane Alberta 300 14033335568 ... 2014-09-25 22:30:27
# 4 1411 Lillydale Drive QLD 576 6172235589 ... 2014-09-25 22:30:09
# ```
#
# As you can see, you selected the `address2` column from the `address` table and pulled in all actors that have a `address2` field with the value of `Null`, and you get a total of 5 rows.
#
# ---
#
# ## IS NOT NULL
#
# Now, if you want to check for data that is not null, you could flip around what you did in the `IS NULL` example:
#
# ```sql
# select * from address
# where address2 is not null;
# ```
#
# As you can see, the query is pulling in all actors that have non-Null data for the `address2` column, which is the preponderance of people - 605!
#
# What if you want to pull up everything that has multiple null values? At first, you might want to try something like this using the `AND` keyword:
#
# ```sql
# select * from staff
# where picture and password is null;
# ```
#
# But the above query will return nothing. If you look in the `staff` table, there is clearly an entry there fulfilling the criteria! Well, using the `AND` keyword is correct, but you need to check if each column is not null separately, like below:
#
# ```sql
# select * from staff
# where picture is null and password is null;
# ```
#
# The output of above will be this:
#
# ```text
# 2 Jon Stephens 4 <EMAIL> 2 1 Jon 2006-02-15 03:57:16
# ```
#
# You could also do something like:
#
# ```sql
# select * from staff
# where picture is null and store_id = 2;
# ```
#
# You'll come up with the same result!
#
# You have learned so much about SQL already; you can start putting what you have learned together! Feel free to try different queries and practice what you can do so far!
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 4 - UPDATE<a class="anchor" id="DS108L3_page_4"></a>
#
# [Back to Top](#DS108L3_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # UPDATE
#
# You have now explored the first two operations in CRUD: Create and Read, which in SQL are referred to as `Insert` and `Select`. You will now learn about updating your database!
#
# ---
#
# ## Update a Table
#
# Now that you feel comfortable with Insert and Select, Update will be fairly straightforward. When updating, you have to use a new keyword: `SET`. This will set the field you want to be updated to the new data. Below is the syntax for `Update`:
#
# ```sql
# UPDATE table_name
# SET column1 = value1, column2 = value2, ...
# WHERE condition;
# ```
#
# Go ahead and update the actor `<NAME>` that you inserted into the database earlier:
#
# ```sql
# update actor
# set first_name = "Jonathan"
# where actor_id = 201;
# ```
#
# Once the query is executed successfully, go ahead and view that specific customer using the below query:
#
# ```sql
# select * from actor
# where actor_id = 201;
# ```
#
# As you can see from the output, this customer's first name is now "Jonathan" instead of "Johnny".
#
# ---
#
# ## Update Multiple Records
#
# What if you wanted to update a lot of things at once? Perhaps you want to set every customer in a particular store to active. You can do that all in one query!
#
# ```sql
# update customer
# set active = 1
# where store_id = 2;
# ```
#
# And if you look at all customers within the second store, you now see their `store_id` is `2`:
#
# ---
#
# ## Warning!
#
# If you don't include the WHERE statement when updating data, it will update every row to what you have SET.
#
# Consider below:
#
# ```sql
# update customer
# set active = 1;
# ```
#
# Without a `WHERE` statement, all customers will be updated to be active. Be careful, because if this is not your goal, you could update large amounts of data that shouldn't be updated!
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 5 - DELETE<a class="anchor" id="DS108L3_page_5"></a>
#
# [Back to Top](#DS108L3_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # DELETE
#
# *Delete* is a way to remove existing data from a table. Below is the syntax for deleting:
#
# ```sql
# DELETE FROM table_name
# WHERE condition;
# ```
#
# Well, Jonathan (Johnny) is now no longer an actor, and you've been asked to delete him from the `actor` table. Below is the query to delete Johnny from the table:
#
# ```sql
# delete from actor
# where actor_id = 201;
# ```
#
# If you run a select query to see an actor with an `actor_id` of 201, you will see that no results are found. Try it!
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 6 - Key Terms<a class="anchor" id="DS108L3_page_6"></a>
#
# [Back to Top](#DS108L3_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Key Terms
#
# Below are a list and short description of the relevant keywords you have learned in this lesson. Please read through and go back and review any concepts you don't fully understand. Great Work!
#
# <table class="table table-striped">
# <tr>
# <th>Keyword</th>
# <th>Description</th>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>CRUD</td>
# <td>Stands for Create, Read, Update and Delete. SQL uses slightly different keywords to complete these tasks.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>INSERT INTO</td>
# <td>Used to insert data into a table.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>VALUES</td>
# <td>Used along with INSERT INTO to define what values are being inserted into a table.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>IS NULL</td>
# <td>Used along with WHERE as a condition to see the data that has NULL as a value in a column.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>IS NOT NULL</td>
# <td>Used along with WHERE as a condition to see the data that does not have NULL as a value in a column.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>UPDATE</td>
# <td>Used to update data within a column based on a condition.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>SET</td>
# <td>Used along side UPDATE to set the data to be the desired value in a particular column.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>DELETE</td>
# <td>Deletes specific data based on a condition.</td>
# </tr>
# </table>
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 7 - Lesson 1-3 Review<a class="anchor" id="DS108L3_page_7"></a>
#
# [Back to Top](#DS108L3_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Lesson 1-3 Review
#
# PHEW! Great work so far! This is a lot of info to take in about SQL, so take some time and do a review of all the topics you have learned about up until this point. If you run into something you don't quite understand, go back and review it, so you are fully prepared to continue with SQL. Keep up the great work!
#
# <table class="table-striped">
# <tr>
# <th>Keyword</th>
# <th>Description</th>
# <th>When Used</th>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>SELECT</td>
# <td>Used to select and view data from database.</td>
# <td>Used very often to view the data you want to work with and manipulate.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>*</td>
# <td>A wildcard that is used to select all columns.</td>
# <td>Used when you want to view every column in the table that is based on a condition.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>FROM</td>
# <td>Used to identify which table you are selecting from.</td>
# <td>Always used when using the SELECT keyword and DELETE keywords.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>LIMIT</td>
# <td>Gives the query a limit as to how many rows should be returned.</td>
# <td>Used when you don't want every bit of data returned, which in some cases could be millions of rows.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>WHERE</td>
# <td>Defines a condition to be met when running a query.</td>
# <td>Can be used with SELECT, INSERT, UPDATE and DELETE, but is not required.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>AND</td>
# <td>Makes it possible to have multiple conditions when selecting.</td>
# <td>Used with the WHERE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>OR</td>
# <td>Checks to see if any one of the conditions listed are true.</td>
# <td>Used with the WHERE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>NOT</td>
# <td>Used to check if a condition is not true.</td>
# <td>Used with the WHERE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>LIKE</td>
# <td>Searches for a particular pattern or character within the data.</td>
# <td>Used with the WHERE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>%</td>
# <td>Represents zero, one or multiple characters.</td>
# <td>Used with the LIKE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>_</td>
# <td>Represents a single character.</td>
# <td>Used with the LIKE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>IN</td>
# <td>Allows you to specify multiple values.</td>
# <td>Used with the WHERE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>ORDER BY</td>
# <td>Used to order the output of a query. Default is alphabetical but can order in reverse alphabetical by using DESC.</td>
# <td>Not a required statement, but is used when you are selecting and viewing data.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>INNER JOIN</td>
# <td>SQL command to join two tables.</td>
# <td>Only includes records which have data present in both tables.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>JOIN</td>
# <td>SQL Command to join two or more tables together.</td>
# <td>Shorthand for INNER JOIN.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>OUTER JOIN</td>
# <td>SQL command to join two tables, including all of the records from one table, along with the matching values from the other.</td>
# <td>Used when there are matching values in both tables.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>OUTER LEFT JOIN</td>
# <td>Returns all records from the left table, and the matched records from the right table.</td>
# <td>Used when joining records from the left table and the matching records from the right table.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>OUTER RIGHT JOIN</td>
# <td>Returns all records from the right table, and the matched records from the left table.</td>
# <td>Used when joining records from the right table and the matching records from the left table.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>ON</td>
# <td>Identifies the table and column for the first and second tables in a Join.</td>
# <td>Used in an OUTER or INNER JOIN.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>USING</td>
# <td>Shorthand for ON which allows you to define the column name once.</td>
# <td>Used when both tables in a join have the same column name.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>AS</td>
# <td>Gives the ability to change the column name in a query.</td>
# <td>Used for clarification or consistency purposes.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>CRUD</td>
# <td>Stands for Create, Read, Update and Delete.</td>
# <td>SQL uses slightly different keywords to complete these tasks.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>INSERT INTO</td>
# <td>Used to insert data into a table.</td>
# <td>Can insert data for all columns or for a select number of columns. If data is not defined, NULL will be the result.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>VALUES</td>
# <td>Defines what values are being inserted into a table.</td>
# <td>Used along with INSERT INTO.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>IS NULL</td>
# <td>A condition to view the data in a column that has NULL as a value.</td>
# <td>Used with the WHERE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>IS NOT NULL</td>
# <td>A condition to view the data in a column that does not have NULL as a value.</td>
# <td>Used with the WHERE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>UPDATE</td>
# <td>Updates data within a column based on a condition.</td>
# <td>Used when there are pre-existing tables.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>SET</td>
# <td>Sets the data to be the desired value in a particular column.</td>
# <td>Used with the UPDATE clause.</td>
# </tr>
# <tr>
# <td style="font-weight: bold;" nowrap>DELETE</td>
# <td>Deletes specific data based on a condition.</td>
# <td>Used when there are pre-existing tables.</td>
# </tr>
# </table>
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 8 - Lesson 3 Practice Hands-On<a class="anchor" id="DS108L3_page_8"></a>
#
# [Back to Top](#DS108L3_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# For your Lesson 3 Practice Hands-On, you will be working with your new knowledge on SQL queries. This Hands-On will **not** be graded, but you are encouraged to complete it. The best way to become great at working with databases is to practice! Once you have submitted your project, you will be able to access the solution on the next page. Please read through the below setup instructions before starting your project.
#
# ---
#
# ## Setup
#
# This Hands-On is structured into two parts, and each part may ask you to run multiple queries. After each query, please take a screenshot of the MySQL output and add it to a Word document (or an equivalent) and name this file `SQL-HandsOn3`. This way, you will be able to submit your answers to each part all at once.
#
# Now you are ready to get started! Good luck!
#
# ---
#
# ## Part 1
#
# Run the following queries:
#
# 1. Write a query that Inserts a new actor into the database. Once you have completed the insert, write a query to view all information for that specific actor.
# 2. Write a query to Update the actor that you just inserted. Give your new actor a `first_name` of `Emmy` and a `last_name` of `Rock`. When you have completed that update, run another query to see your updated employee.
# > Tip! Make sure only to update your specific employee. Don't update all employees!
# 3. Write a query that finds all staff that do not have a value specified for `password`.
# 4. Write a query that finds all staff's information that has a value for `first_name` and `store_id`.
# 5. Write a query that updates all people with a Null value in the `address2` column. If the `district` is `Alberta`, put `address2` as Canada, and if the `district` is `QLD`, put Australia.
# > Tip! Even if you get an error because of permissions, include screenshots of your code and it won't count against you.
#
# <div class="panel panel-info">
# <div class="panel-heading">
# <h3 class="panel-title">Tip!</h3>
# </div>
# <div class="panel-body">
# <p>You will be working in the same sakila database.</p>
# </div>
# </div>
#
# <div class="panel panel-danger">
# <div class="panel-heading">
# <h3 class="panel-title">Caution!</h3>
# </div>
# <div class="panel-body">
# <p>Be sure to zip and submit your <code>SQL-HandsOn3</code> Word document when finished! You will not be able to re-submit, so be sure the screenshots to each part are located within this document.</p>
# </div>
# </div>
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Page 9 - Lesson 3 Practice Hands-On Solution<a class="anchor" id="DS108L3_page_9"></a>
#
# [Back to Top](#DS108L3_toc)
#
# <hr style="height:10px;border-width:0;color:gray;background-color:gray">
#
# # Solution
#
# Below is the solution for your Lesson 3 Practice Hands-On.
#
# ---
#
# ## Part 1
#
# 1.
#
# ```sql
# -- query 1
#
# INSERT INTO actor (first_name, last_name, last_update) VALUES ("Emma", "Stone")
#
# -- query 2
#
# SELECT * FROM actor WHERE first_name = "Emma" AND last_name = "Stone"
# ```
#
# 2.
#
# ```sql
# -- query 1
#
# UPDATE actor SET first_name = "Emmy", last_name = "Rock" WHERE actor_id = 203
#
# -- query 2
#
# SELECT * FROM actor WHERE actor_id = 203
# ```
#
# 3.
#
# ```sql
# SELECT * FROM staff WHERE password IS NULL
# ```
#
# 4.
#
# ```sql
# SELECT * FROM staff WHERE first_name IS NOT NULL AND store_id IS NOT NULL
# ```
#
# 5.
#
# -- query 1
#
# ```sql
# UPDATE address
# SET address2 = "Canada"
# WHERE district = "Alberta"
#
# -- query 2
#
# UPDATE address
# SET address2 = "Australia"
# WHERE district = "QLD"
# ```
| Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/RECAP_DS/07_DATABASES/SQL/L03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # An Interactive Briefing Note(book)
# Lab 7 is an exercise to get acquainted with `Jupyter Notebooks`, using the familiarity of a policy briefing note as a framework.
#
# Notebooks allow for a mix of code, text, and graphics, making them an ideal platform for policy analytics briefing notes.
#
# Start by renaming this notebook template "JSGS 867 Lab 7 *Your Name*", and then edit this template to produce your own interactive briefing notebook.
#
# In this document, wherever you see *italicized text*, replace it with your own text.
#
# This is a toy problem to illustrate how a briefing note might be written in Jupyter Notebook. The final briefing note you create won't necessarily make sense, but it will illustrate how markdown text and code can be combined into a dynamic policy analytics briefing note. Don't spend long writing the text in the note below. The task involves mostly cutting-and-pasting your work from lab #5 In the format of a briefing note.
#
# When you are done reading these introductory instructions, delete this cell and the one above it (the title).
#
# For any of the following Markdown or Code cells, delete whatever is unnEcessary after you have run the commands. Leave other code and markdown cells needed for the final briefing note.
# Date: November 7th, 2017
#
# File: K11319
# + [markdown] slideshow={"slide_type": "-"}
# MINISTRY of CENTRAL SERVICES
#
# BRIEFING NOTE FOR INFORMATION
# -
# In the cell above, change the name of the Ministry to "MINISTRY of CORPORATE SERVICES" then delete this cell.
# I. PREPARED FOR: Pa<NAME>, Deputy Minister
# insert a Python command in the next cell that prints the output "PREPARED BY: *Your Name*".
"PREPARED BY: <NAME>"
print ("PREPARED BY: <NAME>")
# First, we have to prepare the environment we're working in.
# "Requests" is a simple HTTP library for Python. Run the following command using Cell Magic
# %%cmd
pip install requests
# Now install Python Twitter Tools (as we did with Lab 5)
#
# Download and uncompress twitter-1.17.1.tar.gz from https://pypi.python.org/packages/75/30/86a053e40068daece37a8167edc710fd6630ee58d14bcf2aa0997d79bfd4/twitter-1.17.1.tar.gz#md5=65219549f09a030719bac6e20b12c3eb
#
# Move the twitter-1.17.1 folder to the folder you're working in. (Use your desktop file manager).
# find out what directory you are in using the following Magic command
# %pwd
# N.B.: The following command will list all magic commands
# %lsmagic
# change to the directory with the twitter-1.17.1Setup.py file
# %cd C:\Users\stamatij\Desktop\Twitter 1.17
# Install the Python Twitter Tools package using three separate commands. Delete this comment and the three commands when done.
# %%cmd
python setup.py --help
# %%cmd
python setup.py build
# %%cmd
python setup.py install
# II. ISSUE
# +
# Import the necessary package to process data in JSON format
try:
import json
except ImportError:
import simplejson as json
# Import the necessary methods from "twitter" library
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
# Variables that contains the user credentials to access Twitter API
ACCESS_TOKEN = '<KEY>'
ACCESS_SECRET = '<KEY>'
CONSUMER_KEY = '2Nh755tawtHRVemY09AvqF9tx'
CONSUMER_SECRET = '<KEY>'
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
# Initiate the connection to Twitter Streaming API
twitter_stream = TwitterStream(auth=oauth)
# Get a sample of the public data following through Twitter
iterator = twitter_stream.statuses.filter(track="falcons", geocode="33.748995,-84.387982,150km")
# Print each tweet in the stream to the screen
# Here we set it to stop after getting 1000 tweets.
# You don't have to set it to stop, but can continue running
# the Twitter API to collect data for days or even longer.
tweet_count = 1000
for tweet in iterator:
tweet_count -= 1
# Twitter Python Tool wraps the data returned by Twitter
# as a TwitterDictResponse object.
# We convert it back to the JSON format to print/score
print (json.dumps(tweet, indent=4))
# The command below will do pretty printing for JSON data, try it out
# print json.dumps(tweet, indent=4)
if tweet_count <= 0:
break
# + code_folding=[]
# copy-and-paste Python code from lab 5 - the first twitter_streaming.py file
# (it starts with the line "# Import the necessary package to process data in JSON format),"
# and can be found in the tutorial at http://socialmedia-class.org/twittertutorial.html under
# "Basic Uses of Streaming APIs" and before the "Advanced Uses of Streaming APIs"
# if you get an error at "print json.dumps(tweet, indent=4)",
# it may be because you are Using Python 3.
# Try "print (json.dumps(tweet, indent=4))" instead
# You'll have a large output area. Double-click on the active area to the left of the output to minimize it.
# -
# III. ANALYSIS
# +
# copy-and-paste Python code from lab 5 - the final twitter_streaming.py file you submitted
# implementing the "Advanced Uses of Streaming APIs" (i.e., searching by keyword and geolocation)
# -
# V. RECOMMENDATION
#
# | Option | Description |
# | :------:| -----------:|
# | 1 | Markdown does not allow for a lot of formatting. |
# | 2 | This is one way. |
# | 3 | You can look up other formatting tricks. |
# Submit your notebook file by email to <EMAIL> or put online (e.g., GitHub) and send me the URL.
| Lab7/Jordan Stamatinos.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import fastai
from fastai.vision.all import *
from fastai.vision.widgets import *
# <h1> Vehicle Type Predictor </h1>
#
# This model classifies the image uploaded into of the following categories:
# 1. Car
# 2. Truck
# 3. Bus
# 4. Aeroplane
# 5. Ship
#
# The model is trained on a resnet-18, with a dataset of only 186 images for each category.
path = Path()
learn_inf = load_learner(path/'vehexport.pkl', cpu = True)
btn_upload = widgets.FileUpload()
out_pl = widgets.Output()
print('YOOHOO 7')
print(learn_inf.dls.vocab)
lbl_pred = widgets.Label()
lbl_pred.value = 'Updated'
btn_run = widgets.Button(description='Classify')
def on_click(change):
img = PILImage.create(btn_upload.data[-1])
out_pl.clear_output()
lbl_pred.value = '.........'
with out_pl: display(img.to_thumb(128, 128))
lbl_pred.value = 'c1'
pred, pred_idx, probs = learn_inf.predict(img)
lbl_pred.value = 'c2'
print(f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}')
lbl_pred.value = 'changed'
btn_run.on_click(on_click)
btn_upload = widgets.FileUpload()
VBox([widgets.Label('Select your vehicle!'),
btn_upload,btn_run,lbl_pred, out_pl])
# © <NAME> - <EMAIL> - 2020
| VehicleTypeModel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + active=""
# A notebook to make an animation of the surface salinity.
# +
from matplotlib import pylab
import matplotlib.pyplot as plt
import netCDF4 as nc
import numpy as np
from salishsea_tools import tidetools
from salishsea_tools import (nc_tools,viz_tools)
import os
import glob
from matplotlib import animation
import datetime
# %matplotlib inline
# -
# Define date range
# +
start = datetime.datetime(2015,1,1)
end = datetime.datetime(2015,8,26)
numdays = (end-start).days
dates = [start + datetime.timedelta(days=num)
for num in range(0, numdays+1)]
# -
results_home = '/data/dlatorne/MEOPAR/SalishSea/nowcast/'
bathy = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc')
def results_dataset(period, grid, results_dir):
"""Return the results dataset for period (e.g. 1h or 1d)
and grid (e.g. grid_T, grid_U) from results_dir.
"""
filename_pattern = 'SalishSea_{period}_*_{grid}.nc'
filepaths = glob.glob(os.path.join(results_dir, filename_pattern.format(period=period, grid=grid)))
return nc.Dataset(filepaths[0])
# Load files over date range
# +
#subset
ist=0
ien=350
jst=200
jen=650
depth=0
x=np.arange(ist,ien)
y=np.arange(jst,jen)
#initalize arrays
Us=np.zeros((1,y.shape[0],x.shape[0]));
Vs=np.zeros((1,y.shape[0],x.shape[0]))
Ss=np.zeros((1,y.shape[0],x.shape[0]))
time=np.array([])
#lats and lons
lat = bathy.variables['nav_lat'][jst:jen,ist:ien]
lon = bathy.variables['nav_lon'][jst:jen,ist:ien]
#loop through files
period = '1d'
for date in dates:
results_dir = os.path.join(results_home, date.strftime('%d%b%y').lower())
grid_T = results_dataset(period,'grid_T',results_dir)
#load variables
S = grid_T.variables['vosaline'][:,0,jst:jen,ist:ien]
Ss = np.append(Ss,S,axis=0)
t = nc_tools.timestamp(grid_T, np.arange(S.shape[0]))
t = t.datetime
time = np.append(time, t)
Ss=Ss[1:,:,:];
# -
# Testing out the size and colors before animating
def salinity(t):
ax.clear()
#mesh
mesh=ax.contourf(lon,lat,Ss[t],cs,cmap=cmap,extend='both')
#land
viz_tools.plot_land_mask(ax,bathy,coords='map',xslice=x,yslice=y,color='burlywood')
#title
timestamp = time[t]
ax.set_title(timestamp.strftime('%d-%b-%Y %H:%M'))
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
return mesh
# +
smin, smax, dels = 0, 34, 1
cs = np.arange(smin,smax)
cmap = plt.get_cmap('spectral')
st=5
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
t=0
mesh = salinity(0)
cbar = plt.colorbar(mesh, ax=ax)
cbar.set_label('Practical Salinity [PSU]')
# +
#Setting up first frame and static content
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
mesh = salinity(0)
cbar = plt.colorbar(mesh, ax=ax)
cbar.set_label('Practical Salinity [PSU]')
#frmaes
framess=np.arange(1,Ss.shape[0])
#The animation function
anim = animation.FuncAnimation(fig, salinity,frames=framess, blit=True, repeat=False)
#A line that makes it all work
mywriter = animation.FFMpegWriter( fps=3, bitrate=10000)
#Save in current folder
anim.save('salinity-2015.mp4',writer=mywriter)
# -
| Nancy/animations/Surface salininty animation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generate CSVs from COVID-19 WHO data
# ## Dependencies
# +
import pandas as pd
import os
from datetime import datetime
INPUT_PATH = 'input/'
OUTPUT_PATH = 'public/data/'
POPULATION_CSV_PATH = os.path.join(INPUT_PATH, 'un', 'population_thousands.csv')
REGIONS_CSV_PATH = os.path.join(INPUT_PATH, 'regions.csv')
DEATHS_CSV_PATH = os.path.join(INPUT_PATH, 'deaths.csv')
CASES_CSV_PATH = os.path.join(INPUT_PATH, 'cases.csv')
# -
# !mkdir -p $OUTPUT_PATH
# ## Load the data
# Population figures
df_pop = pd.read_csv(POPULATION_CSV_PATH)
# WHO includes counts for Kosovo in Serbia
# https://www.who.int/countries/srb/en/
df_pop['entity'] = df_pop['entity'].replace({ 'Serbia (including Kosovo)': 'Serbia' })
df_pop['population'] = df_pop['population_thousands'] * 1000
def find_closest_year(df, year=2020):
df = df.copy()
df['year'] = df['year'].sort_values(ascending=True)
return df.loc[df['year'].map(lambda x: abs(x - 2020)).idxmin()]
current_pop = pd.DataFrame([
find_closest_year(df, 2020)
for loc, df in df_pop.groupby('entity')
]) \
.dropna() \
.drop(columns=['population_thousands']) \
.rename(columns={'entity': 'location'})
# WHO regions & OWID names
df_regions = pd.read_csv(REGIONS_CSV_PATH)
df_regions = df_regions.merge(
current_pop[['location', 'population']],
how='left',
left_on='OWID Country Name',
right_on='location'
).drop(columns=['location'])
# Convert population figures to ints
df_regions['population'] = df_regions['population'].round().astype('Int64')
# Add a more descriptive header
df_regions = df_regions.rename(columns={'population': 'Population (UN 2020 projection)'})
# Transform the "wide" format into "long" format, which is easier to work with.
def melt_csv(df, var_name):
return df.melt(
id_vars=df.columns[0],
value_vars=df.columns[1:],
var_name='location',
value_name=var_name
).dropna()
df_deaths = melt_csv(pd.read_csv(DEATHS_CSV_PATH, header=1).rename(columns={ 'Date': 'date' }), 'total_deaths')
df_cases = melt_csv(pd.read_csv(CASES_CSV_PATH, header=1).rename(columns={ 'Date': 'date' }), 'total_cases')
# Convert all numbers from floating point to integers:
df_deaths['total_deaths'] = df_deaths['total_deaths'].astype('Int64')
df_cases['total_cases'] = df_cases['total_cases'].astype('Int64')
# ## Calculations
# Join cases & deaths into one dataframe
df_merged = df_cases.merge(
df_deaths,
how='outer',
left_on=['date', 'location'],
right_on=['date', 'location']
).sort_values(by=['location', 'date'])
# Standardize names to OWID names
df_regions_merged = df_regions.merge(
df_merged[['location']].drop_duplicates(),
how="outer",
left_on="WHO Country Name",
right_on="location"
)
df_regions_merged[df_regions_merged['OWID Country Name'].isnull()]
assert(df_regions_merged['OWID Country Name'].isnull().any() == False)
who_name_replace_map = { r['WHO Country Name']: r['OWID Country Name'] for r in df_regions_merged.to_dict('records') }
df_merged['location'] = df_merged['location'].replace(who_name_replace_map)
# Calculate daily cases & deaths
# Convert to Int64 to handle <NA>
df_merged['new_cases'] = df_merged.groupby('location')['total_cases'].diff().astype('Int64')
df_merged['new_deaths'] = df_merged.groupby('location')['total_deaths'].diff().astype('Int64')
# Create a `World` aggregate
df_global = df_merged.groupby('date').sum().reset_index()
df_global['location'] = 'World'
df_merged = pd.concat([df_merged, df_global], sort=True)
# Calculate per population variables
df_merged_pop = df_merged.merge(
current_pop,
how='left',
on='location'
)
# These entities do not exist in the UN population dataset but do in the WHO data
df_merged_pop[df_merged_pop['population'].isnull()]['location'].unique()
df_merged_pop['total_cases_per_million'] = df_merged_pop['total_cases'] / (df_merged_pop['population'] / 1e6)
df_merged_pop['total_deaths_per_million'] = df_merged_pop['total_deaths'] / (df_merged_pop['population'] / 1e6)
df_merged_pop['new_cases_per_million'] = df_merged_pop['new_cases'] / (df_merged_pop['population'] / 1e6)
df_merged_pop['new_deaths_per_million'] = df_merged_pop['new_deaths'] / (df_merged_pop['population'] / 1e6)
df_merged = df_merged_pop.drop(columns=['year', 'population'])
# Calculate days since 100th case
THRESHOLD = 100
DAYS_SINCE_COL_NAME = 'days_since_%sth_case' % THRESHOLD
DAYS_SINCE_COL_NAME_POSITIVE = 'days_since_%sth_case_positive' % THRESHOLD
def get_date_of_nth_case(df, nth):
try:
df_gt_nth = df[df['total_cases'] >= nth]
earliest = df.loc[pd.to_datetime(df_gt_nth['date']).idxmin()]
return earliest['date']
except:
return None
date_of_nth_case = pd.DataFrame([
(loc, get_date_of_nth_case(df, THRESHOLD))
for loc, df in df_merged.groupby('location')
], columns=['location', 'date_of_nth_case']).dropna()
def inject_days_since(df, ref_date):
df = df[['date', 'location']].copy()
df[DAYS_SINCE_COL_NAME] = df['date'].map(lambda date: (pd.to_datetime(date) - pd.to_datetime(ref_date)).days)
return df
df_grouped = df_merged.groupby('location')
df_days_since_nth_case = pd.concat([
inject_days_since(df_grouped.get_group(row['location']), row['date_of_nth_case'])
for _, row in date_of_nth_case.iterrows()
])
df_merged = df_merged.merge(
df_days_since_nth_case,
how='outer',
on=['date', 'location'],
)
df_merged[DAYS_SINCE_COL_NAME] = df_merged[DAYS_SINCE_COL_NAME].astype('Int64')
df_merged[DAYS_SINCE_COL_NAME_POSITIVE] = df_merged[DAYS_SINCE_COL_NAME] \
.map(lambda x: x if (pd.notna(x) and x >= 0) else None).astype('Int64')
# Calculate doubling rates
def get_days_to_double(df, col_name):
try:
# verbose because being very careful not to modify original data with dates
latest = df.loc[pd.to_datetime(df['date']).idxmax()]
df_lt_half = df[df[col_name] <= (latest[col_name] / 2)]
half = df_lt_half.loc[pd.to_datetime(df_lt_half['date']).idxmax()]
return (pd.to_datetime(latest['date']) - pd.to_datetime(half['date'])).days
except:
return None
days_to_double_cases = pd.DataFrame([
(loc, get_days_to_double(df, 'total_cases'))
for loc, df in df_merged.groupby('location')
], columns=['location', 'days_to_double_cases'])
days_to_double_cases['days_to_double_cases'] = days_to_double_cases['days_to_double_cases'].astype('Int64')
# ### Grapher data extract
df_grapher = df_merged.copy()
df_grapher['date'] = pd.to_datetime(df_grapher['date']).map(lambda date: (date - datetime(2020, 1, 21)).days)
df_grapher = df_grapher[[
'location', 'date',
'new_cases', 'new_deaths',
'total_cases', 'total_deaths',
'new_cases_per_million', 'new_deaths_per_million',
'total_cases_per_million', 'total_deaths_per_million',
DAYS_SINCE_COL_NAME, DAYS_SINCE_COL_NAME_POSITIVE]] \
.rename(columns={
'location': 'country',
'date': 'year',
'new_cases': 'Daily new confirmed cases of COVID-19',
'new_deaths': 'Daily new confirmed deaths due to COVID-19',
'total_cases': 'Total confirmed cases of COVID-19',
'total_deaths': 'Total confirmed deaths due to COVID-19',
'new_cases_per_million': 'Daily new confirmed cases of COVID-19 per million people',
'new_deaths_per_million': 'Daily new confirmed deaths due to COVID-19 per million people',
'total_cases_per_million': 'Total confirmed cases of COVID-19 per million people',
'total_deaths_per_million': 'Total confirmed deaths due to COVID-19 per million people',
DAYS_SINCE_COL_NAME: 'Days since the total confirmed cases of COVID-19 reached %s' % THRESHOLD,
DAYS_SINCE_COL_NAME_POSITIVE: 'Days since the total confirmed cases of COVID-19 reached %s (positive only)' % THRESHOLD,
})
# ## Inspect the results
# +
# df_merged
# +
# df_merged[df_merged['location'] == 'World']
# -
# ## Write output files
# Should keep these append-only in case someone external depends on the order
df_merged[[
'date', 'location',
'new_cases', 'new_deaths',
'total_cases', 'total_deaths']] \
.to_csv(os.path.join(OUTPUT_PATH, 'full_data.csv'), index=False)
for col_name in [
'total_cases', 'total_deaths',
'new_cases', 'new_deaths',
'total_cases_per_million', 'total_deaths_per_million',
'new_cases_per_million', 'new_deaths_per_million']:
df_pivot = df_merged.pivot(index='date', columns='location', values=col_name)
# move World to first column
cols = df_pivot.columns.tolist()
cols.insert(0, cols.pop(cols.index('World')))
df_pivot[cols].to_csv(os.path.join(OUTPUT_PATH, '%s.csv' % col_name))
days_to_double_cases.to_csv(os.path.join(OUTPUT_PATH, 'days_to_double_cases.csv'), index=False)
df_regions.to_csv(os.path.join(OUTPUT_PATH, 'regions.csv'), index=False)
df_grapher.to_csv(os.path.join(OUTPUT_PATH, 'grapher.csv'), index=False)
| covid-19-get-data/notebooks/who.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python Conda AC209a
# language: python
# name: ac209a
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from os import listdir
from os.path import isfile, join
import time
sns.set()
# -
# Spot check an individual songs file to explore its format and data.
df = pd.read_csv('../data/Songs/songs285.csv')
print(df.shape)
list(df.columns)
# This is a *lot* of data: ball-park $65,000$ rows in $1,000$ files, totaling around $65,000,000$ observations – or songs in playlists, many of which are certainly repeated.
df.head()
# Some very different playlist lengths
df.groupby('pid')['track_uri'].nunique()[10:20]
# Every file seems to be an arbitrary-length list of playlists, each of which has an id `pid` (scoped to that file) and an arbitrary-length list of songs, each with a position id `pos` in that playlist. For each song, the `artist_name`, `track_uri`, `artist_uri`, `track_name`, `album_uri`, `duration_ms`, and `album_name` are all stored. Most of these are repeated every time a song is repeated (within and across files), so there is a lot of room for simplification here. Furthermore, any URI is effectively a unique identified for the song, artist, or album, and can be used as such.
#
# $1,000$ playlist files, as expected.
all_files = listdir('../data/Songs')
print(len(all_files))
all_files[0:3]
# ### Structuring the data
# A reasonable first step to slim down the size of the dataset without losing information or fidelity, is to parse through all the files to create a reference table/file of all songs and their metadata. Each playlist can then be stored as a simple named object, where the name is the overall playlist id and its value a vector of song ids.
# ### Looping over all our files to fill out the master DataFrame (songs) and Series (playlists)
# +
start_time = time.time()
loop_start = time.time()
# List of all files
all_files = listdir('../data/Songs')
# Limit to some of the playlists
#all_files = all_files[0:200]
# Load first file to get columns (standard across all)
df = pd.read_csv('../data/Songs/' + all_files[0])
# Master DataFrame of all unique songs included across all playlists
#songs = pd.DataFrame(columns = list(df.columns)[2:])
songs = pd.DataFrame()
# Master Series of playlists and the songs included in each
playlists = pd.Series()
# Aggregator functions to limit to one row per song and count occurrences across playlists
a1 = dict()
for key in df.columns[2:]:
a1[key] = 'first'
a1['track_uri'] = 'count'
# Aggregator to consolidate into sum of songs across playlists
a2= dict()
for key in df.columns[2:]:
a2[key] = 'first'
del a2['track_uri']
a2['count'] = 'sum'
# Loop over each file to extract data
for i, file in enumerate(all_files):
# split on "." to split into "filename" and "csv"
# Then select "filename" and ditch the first five letters "songs"
filenum = file.split(".")[0][5:]
# Load file and store in temporary dataframe
fdf = pd.read_csv('../data/Songs/' + file)
# --- SONGS IN FILE ---
fdf_counts = fdf.iloc[:, 2:]
fdf_counts = fdf_counts.groupby('track_uri').agg(a1)
fdf_counts.rename(columns = {'track_uri': 'count'}, inplace = True)
# Add to df of unique songs, update counters, and remove duplicates
songs = songs.append(fdf_counts)
# -- SONGS IN EACH PLAYLIST --
# Songs included in every playlist (ordered) in file
# For each playlist, get list of track_uri's (unique identifiers)
songs_in_playlist = fdf.groupby('pid')['track_uri'].unique()
# Update index to be not the pid in file (id), but a combination of them
#songs_in_playlist.index = [filenum + '_' + str(pid) for pid in songs_in_playlist.index.values]
songs_in_playlist.index = list(map(lambda x: filenum + '_' + str(x), songs_in_playlist.index.values))
# Add playlists to master Series of all playlists
playlists = playlists.append(songs_in_playlist)
# Every 50 files, consolidate the songs table so it doesn't grow too big
if (i+1)%25 == 0:
print('{}/{} -- {} s'.format(i+1, len(all_files), time.time() - loop_start))
loop_start = time.time()
songs = songs.groupby('track_uri').agg(a2, sort = True)
print(' Consolidation: {} s'.format(time.time() - loop_start))
print("--- %s seconds ---" % (time.time() - start_time))
# +
# Do a final consolidation just to be safe (should be very fast)
# Add song ID to table, now that it only contains unique songs
start_time = time.time()
songs_counts = songs.groupby('track_uri').agg(a2)
songs_counts['song_id'] = np.arange(len(songs_counts))
print("--- %s seconds ---" % (time.time() - start_time))
print(songs_counts.shape)
display(songs_counts.head())
# +
# Replace playlist track_uri with song_id
start_time = time.time()
loop_start = time.time()
playlists_songids = pd.Series(index = playlists.index)
playlists_songids = playlists_songids.astype(object)
i = 0
for ind, row in playlists.items():
songids = np.array(songs_counts.loc[row, 'song_id'], 'int')
playlists_songids.loc[str(ind)] = songids
i += 1
if i % (len(playlists)/100) == 0 == 0:
print('{}/{} -- {} s'.format(i, int(len(playlists)), time.time() - loop_start))
loop_start = time.time()
print("--- %s seconds ---" % (time.time() - start_time))
print(playlists_songids.shape)
print(playlists_songids.head())
# -
# Change songs table to have song id as index and track_uri as column
# We will be doing lookups on song_id while running
songs_counts_id = songs_counts.copy()
songs_counts_id['track_uri'] = songs_counts_id.index.values
songs_counts_id.set_index('song_id', inplace = True)
songs_counts_id.head()
# ### Write files to disk (csv and pickle)
# Use `pd.read_pickle` to easily read back in a data frame or series with the exact same structure as the one you dumped.
songs_counts_id.to_csv('../data/songs_counts_'+str(len(all_files))+'.csv')
songs_counts_id.to_pickle('../data/songs_counts_'+str(len(all_files))+'.pkl')
playlists_songids.to_csv('../data/playlists_song_ids_'+str(len(all_files))+'.csv', header = False)
playlists_songids.to_pickle('../data/playlists_song_ids_'+str(len(all_files))+'.pkl')
# ### For later use:
# Quickly filtering out from playlists songs that do not exist. Use to filter out songs that appear below a certain threshold by first filtering `songs_counts` on that threshold, and run `get` on that. Some songs in some playlists will then not exist in `songs_counts`, and the below code will drop them from the playlist. Handy for limiting the size of the dataset.
songs_counts.song_id.get(['derp', *playlists[0]], np.NaN)
np.array(songs_counts.song_id.get(['derp', *playlists[0]], np.NaN).dropna(), 'int')
| wrangling/data_prep_1000.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Simple dynamic seq2seq with TensorFlow
# This tutorial covers building seq2seq using dynamic unrolling with TensorFlow.
#
# I wasn't able to find any existing implementation of dynamic seq2seq with TF (as of 01.01.2017), so I decided to learn how to write my own, and document what I learn in the process.
#
# I deliberately try to be as explicit as possible. As it currently stands, TF code is the best source of documentation on itself, and I have a feeling that many conventions and design decisions are not documented anywhere except in the brains of Google Brain engineers.
#
# I hope this will be useful to people whose brains are wired like mine.
#
# **UPDATE**: as of r1.0 @ 16.02.2017, there is new official implementation in `tf.contrib.seq2seq`. See [tutorial #3](3-seq2seq-native-new.ipynb). Official tutorial reportedly be up soon. Personally I still find wiring dynamic encoder-decoder by hand insightful in many ways.
# Here we implement plain seq2seq — forward-only encoder + decoder without attention. I'll try to follow closely the original architecture described in [Sutskever, Vinyals and Le (2014)](https://arxiv.org/abs/1409.3215). If you notice any deviations, please let me know.
# Architecture diagram from their paper:
# ![seq2seq architecutre](pictures/1-seq2seq.png)
# Rectangles are encoder and decoder's recurrent layers. Encoder receives `[A, B, C]` sequence as inputs. We don't care about encoder outputs, only about the hidden state it accumulates while reading the sequence. After input sequence ends, encoder passes its final state to decoder, which receives `[<EOS>, W, X, Y, Z]` and is trained to output `[W, X, Y, Z, <EOS>]`. `<EOS>` token is a special word in vocabulary that signals to decoder the beginning of translation.
# ## Implementation details
#
# TensorFlow has its own [implementation of seq2seq](https://www.tensorflow.org/tutorials/seq2seq/). Recently it was moved from core examples to [`tensorflow/models` repo](https://github.com/tensorflow/models/tree/master/tutorials/rnn/translate), and uses deprecated seq2seq implementation. Deprecation happened because it uses **static unrolling**.
#
# **Static unrolling** involves construction of computation graph with a fixed sequence of time step. Such a graph can only handle sequences of specific lengths. One solution for handling sequences of varying lengths is to create multiple graphs with different time lengths and separate the dataset into this buckets.
#
# **Dynamic unrolling** instead uses control flow ops to process sequence step by step. In TF this is supposed to more space efficient and just as fast. This is now a recommended way to implement RNNs.
# ## Vocabulary
#
# Seq2seq maps sequence onto another sequence. Both sequences consist of integers from a fixed range. In language tasks, integers usually correspond to words: we first construct a vocabulary by assigning to every word in our corpus a serial integer. First few integers are reserved for special tokens. We'll call the upper bound on vocabulary a `vocabulary size`.
#
# Input data consists of sequences of integers.
x = [[5, 7, 8], [6, 3], [3], [1]]
# While manipulating such variable-length lists are convenient to humans, RNNs prefer a different layout:
import helpers
xt, xlen = helpers.batch(x)
x
xt
# Sequences form columns of a matrix of size `[max_time, batch_size]`. Sequences shorter then the longest one are padded with zeros towards the end. This layout is called `time-major`. It is slightly more efficient then `batch-major`. We will use it for the rest of the tutorial.
xlen
# For some forms of dynamic layout it is useful to have a pointer to terminals of every sequence in the batch in separate tensor (see following tutorials).
# # Building a model
# ## Simple seq2seq
# Encoder starts with empty state and runs through the input sequence. We are not interested in encoder's outputs, only in its `final_state`.
#
# Decoder uses encoder's `final_state` as its `initial_state`. Its inputs are a batch-sized matrix with `<EOS>` token at the 1st time step and `<PAD>` at the following. This is a rather crude setup, useful only for tutorial purposes. In practice, we would like to feed previously generated tokens after `<EOS>`.
#
# Decoder's outputs are mapped onto the output space using `[hidden_units x output_vocab_size]` projection layer. This is necessary because we cannot make `hidden_units` of decoder arbitrarily large, while our target space would grow with the size of the dictionary.
#
# This kind of encoder-decoder is forced to learn fixed-length representation (specifically, `hidden_units` size) of the variable-length input sequence and restore output sequence only from this representation.
# +
import numpy as np
import tensorflow as tf
import helpers
tf.reset_default_graph()
sess = tf.InteractiveSession()
# -
tf.__version__
# ### Model inputs and outputs
# First critical thing to decide: vocabulary size.
#
# Dynamic RNN models can be adapted to different batch sizes and sequence lengths without retraining (e.g. by serializing model parameters and Graph definitions via `tf.train.Saver`), but changing vocabulary size requires retraining the model.
# +
PAD = 0
EOS = 1
vocab_size = 10
input_embedding_size = 20
encoder_hidden_units = 20
decoder_hidden_units = encoder_hidden_units
# -
# Nice way to understand complicated function is to study its signature - inputs and outputs. With pure functions, only inputs-output relation matters.
#
# - `encoder_inputs` int32 tensor is shaped `[encoder_max_time, batch_size]`
# - `decoder_targets` int32 tensor is shaped `[decoder_max_time, batch_size]`
encoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')
# We'll add one additional placeholder tensor:
# - `decoder_inputs` int32 tensor is shaped `[decoder_max_time, batch_size]`
decoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_inputs')
# Notice that all shapes are specified with `None`s (dynamic). We can use batches of any size with any number of timesteps. This is convenient and efficient, however but there are obvious constraints:
# - Feed values for all tensors should have same `batch_size`
# - Decoder inputs and ouputs (`decoder_inputs` and `decoder_targets`) should have same `decoder_max_time`
# We actually don't want to feed `decoder_inputs` manually — they are a function of either `decoder_targets` or previous decoder outputs during rollout. However, there are different ways to construct them. It might be illustrative to explicitly specify them for out first seq2seq implementation.
#
# During training, `decoder_inputs` will consist of `<EOS>` token concatenated with `decoder_targets` along time axis. In this way, we always pass target sequence as the history to the decoder, regrardless of what it actually outputs predicts. This can introduce distribution shift from training to prediction.
# In prediction mode, model will receive tokens it previously generated (via argmax over logits), not the ground truth, which would be unknowable.
# ### Embeddings
#
# `encoder_inputs` and `decoder_inputs` are int32 tensors of shape `[max_time, batch_size]`, while encoder and decoder RNNs expect dense vector representation of words, `[max_time, batch_size, input_embedding_size]`. We convert one to another by using *word embeddings*. Specifics of working with embeddings are nicely described in [official tutorial on embeddings](https://www.tensorflow.org/tutorials/word2vec/).
# First we initialize embedding matrix. Initializations are random. We rely on our end-to-end training to learn vector representations for words jointly with encoder and decoder.
embeddings = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
# We use `tf.nn.embedding_lookup` to *index embedding matrix*: given word `4`, we represent it as 4th column of embedding matrix.
# This operation is lightweight, compared with alternative approach of one-hot encoding word `4` as `[0,0,0,1,0,0,0,0,0,0]` (vocab size 10) and then multiplying it by embedding matrix.
#
# Additionally, we don't need to compute gradients for any columns except 4th.
#
# Encoder and decoder will share embeddings. It's all words, right? Well, digits in this case. In real NLP application embedding matrix can get very large, with 100k or even 1m columns.
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, encoder_inputs)
decoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, decoder_inputs)
# ### Encoder
#
# The centerpiece of all things RNN in TensorFlow is `RNNCell` class and its descendants (like `LSTMCell`). But they are outside of the scope of this post — nice [official tutorial](https://www.tensorflow.org/tutorials/recurrent/) is available.
#
# `@TODO: RNNCell as a factory`
# +
encoder_cell = tf.contrib.rnn.LSTMCell(encoder_hidden_units)
encoder_outputs, encoder_final_state = tf.nn.dynamic_rnn(
encoder_cell, encoder_inputs_embedded,
dtype=tf.float32, time_major=True,
)
del encoder_outputs
# -
# We discard `encoder_outputs` because we are not interested in them within seq2seq framework. What we actually want is `encoder_final_state` — state of LSTM's hidden cells at the last moment of the Encoder rollout.
#
# `encoder_final_state` is also called "thought vector". We will use it as initial state for the Decoder. In seq2seq without attention this is the only point where Encoder passes information to Decoder. We hope that backpropagation through time (BPTT) algorithm will tune the model to pass enough information throught the thought vector for correct sequence output decoding.
encoder_final_state
# TensorFlow LSTM implementation stores state as a tuple of tensors.
# - `encoder_final_state.h` is activations of hidden layer of LSTM cell
# - `encoder_final_state.c` is final output, which can potentially be transfromed with some wrapper `@TODO: check correctness`
# ### Decoder
# +
decoder_cell = tf.contrib.rnn.LSTMCell(decoder_hidden_units)
decoder_outputs, decoder_final_state = tf.nn.dynamic_rnn(
decoder_cell, decoder_inputs_embedded,
initial_state=encoder_final_state,
dtype=tf.float32, time_major=True, scope="plain_decoder",
)
# -
# Since we pass `encoder_final_state` as `initial_state` to the decoder, they should be compatible. This means the same cell type (`LSTMCell` in our case), the same amount of `hidden_units` and the same amount of layers (single layer). I suppose this can be relaxed if we additonally pass `encoder_final_state` through a one-layer MLP.
# With encoder, we were not interested in cells output. But decoder's outputs are what we actually after: we use them to get distribution over words of output sequence.
#
# At this point `decoder_cell` output is a `hidden_units` sized vector at every timestep. However, for training and prediction we need logits of size `vocab_size`. Reasonable thing would be to put linear layer (fully-connected layer without activation function) on top of LSTM output to get non-normalized logits. This layer is called projection layer by convention.
# +
decoder_logits = tf.contrib.layers.linear(decoder_outputs, vocab_size)
decoder_prediction = tf.argmax(decoder_logits, 2)
# -
# ### Optimizer
decoder_logits
# RNN outputs tensor of shape `[max_time, batch_size, hidden_units]` which projection layer maps onto `[max_time, batch_size, vocab_size]`. `vocab_size` part of the shape is static, while `max_time` and `batch_size` is dynamic.
# +
stepwise_cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=tf.one_hot(decoder_targets, depth=vocab_size, dtype=tf.float32),
logits=decoder_logits,
)
loss = tf.reduce_mean(stepwise_cross_entropy)
train_op = tf.train.AdamOptimizer().minimize(loss)
# -
sess.run(tf.global_variables_initializer())
# ### Test forward pass
#
# Did I say that deep learning is a game of shapes? When building a Graph, TF will throw errors when static shapes are not matching. However, mismatches between dynamic shapes are often only discovered when we try to run something through the graph.
#
#
# So let's try running something. For that we need to prepare values we will feed into placeholders.
# ```
# this is key part where everything comes together
#
# @TODO: describe
# - how encoder shape is fixed to max
# - how decoder shape is arbitraty and determined by inputs, but should probably be longer then encoder's
# - how decoder input values are also arbitraty, and how we use GO token, and what are those 0s, and what can be used instead (shifted gold sequence, beam search)
# @TODO: add references
# ```
# +
batch_ = [[6], [3, 4], [9, 8, 7]]
batch_, batch_length_ = helpers.batch(batch_)
print('batch_encoded:\n' + str(batch_))
din_, dlen_ = helpers.batch(np.ones(shape=(3, 1), dtype=np.int32),
max_sequence_length=4)
print('decoder inputs:\n' + str(din_))
pred_ = sess.run(decoder_prediction,
feed_dict={
encoder_inputs: batch_,
decoder_inputs: din_,
})
print('decoder predictions:\n' + str(pred_))
# -
# Successful forward computation, everything is wired correctly.
# ## Training on the toy task
# We will teach our model to memorize and reproduce input sequence. Sequences will be random, with varying length.
#
# Since random sequences do not contain any structure, model will not be able to exploit any patterns in data. It will simply encode sequence in a thought vector, then decode from it.
# +
batch_size = 100
batches = helpers.random_sequences(length_from=3, length_to=8,
vocab_lower=2, vocab_upper=10,
batch_size=batch_size)
print('head of the batch:')
for seq in next(batches)[:10]:
print(seq)
# -
def next_feed():
batch = next(batches)
encoder_inputs_, _ = helpers.batch(batch)
decoder_targets_, _ = helpers.batch(
[(sequence) + [EOS] for sequence in batch]
)
decoder_inputs_, _ = helpers.batch(
[[EOS] + (sequence) for sequence in batch]
)
return {
encoder_inputs: encoder_inputs_,
decoder_inputs: decoder_inputs_,
decoder_targets: decoder_targets_,
}
# Given encoder_inputs `[5, 6, 7]`, decoder_targets would be `[5, 6, 7, 1]`, where 1 is for `EOS`, and decoder_inputs would be `[1, 5, 6, 7]` - decoder_inputs are lagged by 1 step, passing previous token as input at current step.
loss_track = []
# +
max_batches = 3001
batches_in_epoch = 1000
try:
for batch in range(max_batches):
fd = next_feed()
_, l = sess.run([train_op, loss], fd)
loss_track.append(l)
if batch == 0 or batch % batches_in_epoch == 0:
print('batch {}'.format(batch))
print(' minibatch loss: {}'.format(sess.run(loss, fd)))
predict_ = sess.run(decoder_prediction, fd)
for i, (inp, pred) in enumerate(zip(fd[encoder_inputs].T, predict_.T)):
print(' sample {}:'.format(i + 1))
print(' input > {}'.format(inp))
print(' predicted > {}'.format(pred))
if i >= 2:
break
print()
except KeyboardInterrupt:
print('training interrupted')
# -
# %matplotlib inline
import matplotlib.pyplot as plt
plt.plot(loss_track)
print('loss {:.4f} after {} examples (batch_size={})'.format(loss_track[-1], len(loss_track)*batch_size, batch_size))
# Something is definitely getting learned.
# # Limitations of the model
#
# We have no control over transitions of `tf.nn.dynamic_rnn`, it is unrolled in a single sweep. Some of the things that are not possible without such control:
#
# - We can't feed previously generated tokens without falling back to Python loops. This means *we cannot make efficient inference with dynamic_rnn decoder*!
#
# - We can't use attention, because attention conditions decoder inputs on its previous state
#
# Solution would be to use `tf.nn.raw_rnn` instead of `tf.nn.dynamic_rnn` for decoder, as we will do in tutorial #2.
# # Fun things to try (aka Exercises)
#
# - In `copy_task` increasing `max_sequence_size` and `vocab_upper`. Observe slower learning and general performance degradation.
#
# - For `decoder_inputs`, instead of shifted target sequence `[<EOS> W X Y Z]`, try feeding `[<EOS> <PAD> <PAD> <PAD>]`, like we've done when we tested forward pass. Does it break things? Or slows learning?
| 1-seq2seq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="M2pDjB7sQ4sC" colab_type="code" colab={}
import math
import random
random.seed(250)
class Connection:
def __init__(self, weight):
self.weight = weight
self.delta_weight = 0.0
class Neuron:
# Class constants
eta = 0.15 # Overall net learning rate [0.0, 1.0]
alpha = 0.5 # Multiplier of last weight change, momentum [0.0, 1.0]
def __init__(self, my_idx, num_outputs):
"""Constructor for Neuron
:param num_outputs: The number of out-links this neuron has
"""
self.output_val = 1.0
self.my_idx = my_idx
self.gradient = 0.0
# Each element in output weights is a Connection object
self.output_weights = list()
for i in range(num_outputs):
self.output_weights.append(Connection(self.random_weight()))
@staticmethod
def random_weight():
return random.random()
@staticmethod
def transfer_function(weighted_sum):
"""Performs activation function on the weighted_sum
:param weighted_sum: The weighted sum from the previous layer
:return: Transformed weighted sum
"""
# Performing tanh on the weighted sum. Range (-1.0, 1.0)
return math.tanh(weighted_sum)
@staticmethod
def transfer_function_derivative(weighted_sum):
"""Performs derivative of activation function on the weighted_sum
:param weighted_sum: The weighted sum from the previous layer
:return: Transformed weighted sum
"""
# derivative of tanh(x) is (1 - x^2)
return 1.0 - (weighted_sum ** 2)
def feed_forward(self, prev_layer):
"""Performs forward propagation by computing output value of a neuron
:param prev_layer: List of previous layer neurons
"""
weighted_sum = 0.0
for prev_neuron in prev_layer:
weighted_sum += prev_neuron.output_val * prev_neuron.output_weights[self.my_idx].weight
self.output_val = self.transfer_function(weighted_sum)
def calc_output_gradient(self, target_val):
"""Computes the target values for the output neuron
:param target_val:
:return:
"""
delta = target_val - self.output_val
self.gradient = delta * self.transfer_function_derivative(self.output_val)
def sum_dow(self, next_layer):
"""
:param next_layer:
:return:
"""
dow_sum = 0.0
# Sum our contributions of the errors at the nodes we feed
for neuron_idx in range(len(next_layer) - 1):
dow_sum += (self.output_weights[neuron_idx].weight * next_layer[neuron_idx].gradient)
return dow_sum
def calc_hidden_gradient(self, next_layer):
"""
:param next_layer:
:return:
"""
dow = self.sum_dow(next_layer)
self.gradient = dow * self.transfer_function_derivative(self.output_val)
def update_input_weights(self, prev_layer):
"""This method is called after back propagation to update the input weights
:param prev_layer: List of previous layer neurons
"""
# The weights to be updated are in the connection container
# in the neurons in the preceding layer
for neuron in prev_layer:
old_delta_weight = neuron.output_weights[self.my_idx].delta_weight
new_delta_weight = (neuron.eta * neuron.output_val * self.gradient) + (neuron.alpha * old_delta_weight)
neuron.output_weights[self.my_idx].delta_weight = new_delta_weight
neuron.output_weights[self.my_idx].weight += new_delta_weight
def __str__(self):
"""Returns the string representation of the object for printing
:return: The string representation of the object
"""
ret_str = 'My index:' + str(self.my_idx) + '\nOutput weights:'
for conn in self.output_weights:
ret_str += str(conn.weight) + ' '
return ret_str
class Net:
def __init__(self, topology):
"""Constructor to create a neural net
:param topology: List that contains the number of neurons in each layer
"""
# Represent the layers in the neural net
self.layers = list()
# Each layer contains a list of neurons
for layer_num, neuron_count in enumerate(topology):
neuron_list = list()
num_outputs = 0 if layer_num == len(topology) - 1 else topology[layer_num + 1]
for index in range(neuron_count + 1):
neuron_list.append(Neuron(index, num_outputs))
self.layers.append(neuron_list)
self.error = 0.0
def feed_forward(self, input_vals):
"""Performs forward propagation in the network
:param input_vals: List of input parameters
"""
assert len(input_vals) == len(self.layers[0]) - 1
# Assigning input values to the input neurons
for index in range(len(input_vals)):
self.layers[0][index].output_val = input_vals[index]
# Forward propagation
for index in range(1, len(self.layers)):
prev_layer = self.layers[index - 1]
for neuron_idx in range(len(self.layers[index]) - 1):
self.layers[index][neuron_idx].feed_forward(prev_layer)
def back_prop(self, target_vals):
"""Performs back propagation in the network
:param target_vals: List of output values
:return:
"""
# Calculate overall net error (RMS of output neuron errors)
output_layer = self.layers[len(self.layers) - 1]
error = 0.0
for neuron_idx in range(len(target_vals)):
delta = target_vals[neuron_idx] - output_layer[neuron_idx].output_val
error += (delta ** 2)
error /= (len(target_vals)) # Average error squared
self.error = math.sqrt(error) # RMS
# Calculate output layer gradients
for neuron_idx in range(0, len(output_layer) - 1):
output_layer[neuron_idx].calc_output_gradient(target_vals[neuron_idx])
# Calculate gradients on hidden layers
for layer_idx in range(len(self.layers) - 2, 0, -1):
hidden_layer = self.layers[layer_idx]
next_layer = self.layers[layer_idx + 1]
for hidden_neuron in hidden_layer:
hidden_neuron.calc_hidden_gradient(next_layer)
# For all layers from output to first hidden layer,
# update connection weights
for layer_idx in range(len(self.layers) - 1, 0, -1):
curr_layer = self.layers[layer_idx]
prev_layer = self.layers[layer_idx - 1]
for neuron_idx in range(0, len(curr_layer) - 1):
neuron = curr_layer[neuron_idx]
neuron.update_input_weights(prev_layer)
def get_results(self):
"""Returns the results
:return: An array of predicted output
"""
results = list()
output_layer = self.layers[len(self.layers) - 1]
for idx in range(0, len(output_layer) - 1):
results.append(output_layer[idx].output_val)
return results
def build_model(input_shape):
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, Input, ZeroPadding2D, BatchNormalization, Activation, MaxPooling2D, Flatten, Dense
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
"""
Arugments:
input_shape: A tuple representing the shape of the input of the model. shape=(image_width, image_height, #_channels)
Returns:
model: A Model object.
"""
# Define the input placeholder as a tensor with shape input_shape.
X_input = Input(input_shape) # shape=(?, 240, 240, 3)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((2, 2))(X_input) # shape=(?, 244, 244, 3)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X) # shape=(?, 238, 238, 32)
# MAXPOOL
X = MaxPooling2D((4, 4), name='max_pool0')(X) # shape=(?, 59, 59, 32)
# MAXPOOL
X = MaxPooling2D((4, 4), name='max_pool1')(X) # shape=(?, 14, 14, 32)
# FLATTEN X
X = Flatten()(X) # shape=(?, 6272)
# FULLYCONNECTED
X = Dense(1, activation='sigmoid', name='fc')(X) # shape=(?, 1)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='BrainDetectionModel')
return model
# + id="UfEadAZVozh3" colab_type="code" colab={}
| my_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Eland Demo Notebook
# + pycharm={"is_executing": false}
import eland as ed
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from elasticsearch import Elasticsearch
# Import standard test settings for consistent results
from eland.conftest import *
# -
# ## Compare Eland DataFrame vs pandas DataFrame
# Create an eland.DataFrame from a `flights` index
# + pycharm={"is_executing": false}
ed_flights = ed.DataFrame('localhost', 'flights')
# + pycharm={"is_executing": false}
type(ed_flights)
# -
# Compare to pandas DataFrame (created from the same data)
# + pycharm={"is_executing": false}
pd_flights = ed.eland_to_pandas(ed_flights)
# + pycharm={"is_executing": false}
type(pd_flights)
# -
# ## Attributes and underlying data
# ### DataFrame.columns
# + pycharm={"is_executing": false}
pd_flights.columns
# + pycharm={"is_executing": false}
ed_flights.columns
# -
# ### DataFrame.dtypes
# + pycharm={"is_executing": false}
pd_flights.dtypes
# + pycharm={"is_executing": false}
ed_flights.dtypes
# -
# ### DataFrame.select_dtypes
# + pycharm={"is_executing": false}
pd_flights.select_dtypes(include=np.number)
# + pycharm={"is_executing": false}
ed_flights.select_dtypes(include=np.number)
# -
# ### DataFrame.empty
# + pycharm={"is_executing": false}
pd_flights.empty
# + pycharm={"is_executing": false}
ed_flights.empty
# -
# ### DataFrame.shape
# + pycharm={"is_executing": false}
pd_flights.shape
# + pycharm={"is_executing": false}
ed_flights.shape
# -
# ### DataFrame.index
#
# Note, `eland.DataFrame.index` does not mirror `pandas.DataFrame.index`.
# + pycharm={"is_executing": false}
pd_flights.index
# + pycharm={"is_executing": false}
# NBVAL_IGNORE_OUTPUT
ed_flights.index
# + pycharm={"is_executing": false}
ed_flights.index.es_index_field
# -
# ### DataFrame.values
#
# Note, `eland.DataFrame.values` is not supported.
# + pycharm={"is_executing": false}
pd_flights.values
# + pycharm={"is_executing": false}
try:
ed_flights.values
except AttributeError as e:
print(e)
# -
# ## Indexing, iteration
# ### DataFrame.head
# + pycharm={"is_executing": false}
pd_flights.head()
# + pycharm={"is_executing": false}
ed_flights.head()
# -
# ### DataFrame.tail
# + pycharm={"is_executing": false}
pd_flights.tail()
# + pycharm={"is_executing": false}
ed_flights.tail()
# -
# ### DataFrame.keys
# + pycharm={"is_executing": false}
pd_flights.keys()
# + pycharm={"is_executing": false}
ed_flights.keys()
# -
# ### DataFrame.get
# + pycharm={"is_executing": false}
pd_flights.get('Carrier')
# + pycharm={"is_executing": false}
ed_flights.get('Carrier')
# + pycharm={"is_executing": false}
pd_flights.get(['Carrier', 'Origin'])
# -
# List input not currently supported by `eland.DataFrame.get`
# + pycharm={"is_executing": false}
try:
ed_flights.get(['Carrier', 'Origin'])
except TypeError as e:
print(e)
# -
# ### DataFrame.query
# + pycharm={"is_executing": false}
pd_flights.query('Carrier == "Kibana Airlines" & AvgTicketPrice > 900.0 & Cancelled == True')
# -
# `eland.DataFrame.query` requires qualifier on bool i.e.
#
# `ed_flights.query('Carrier == "Kibana Airlines" & AvgTicketPrice > 900.0 & Cancelled')` fails
# + pycharm={"is_executing": false}
ed_flights.query('Carrier == "Kibana Airlines" & AvgTicketPrice > 900.0 & Cancelled == True')
# -
# #### Boolean indexing query
# + pycharm={"is_executing": false}
pd_flights[(pd_flights.Carrier=="Kibana Airlines") &
(pd_flights.AvgTicketPrice > 900.0) &
(pd_flights.Cancelled == True)]
# + pycharm={"is_executing": false}
ed_flights[(ed_flights.Carrier=="Kibana Airlines") &
(ed_flights.AvgTicketPrice > 900.0) &
(ed_flights.Cancelled == True)]
# -
# ## Function application, GroupBy & window
# ### DataFrame.aggs
# + pycharm={"is_executing": false}
pd_flights[['DistanceKilometers', 'AvgTicketPrice']].aggregate(['sum', 'min', 'std'])
# -
# `eland.DataFrame.aggregate` currently only supported numeric columns
# + pycharm={"is_executing": false}
ed_flights[['DistanceKilometers', 'AvgTicketPrice']].aggregate(['sum', 'min', 'std'])
# -
# ## Computations / descriptive stats
# ### DataFrame.count
# + pycharm={"is_executing": false}
pd_flights.count()
# + pycharm={"is_executing": false}
ed_flights.count()
# -
# ### DataFrame.describe
# + pycharm={"is_executing": false}
pd_flights.describe()
# -
# Values returned from `eland.DataFrame.describe` may vary due to results of Elasticsearch aggregations.
# + pycharm={"is_executing": false}
# NBVAL_IGNORE_OUTPUT
ed_flights.describe()
# -
# ### DataFrame.info
# + pycharm={"is_executing": false}
pd_flights.info()
# + pycharm={"is_executing": false}
# NBVAL_IGNORE_OUTPUT
ed_flights.info()
# -
# ### DataFrame.max, DataFrame.min, DataFrame.mean, DataFrame.sum
# #### max
# + pycharm={"is_executing": false}
pd_flights.max(numeric_only=True)
# -
# `eland.DataFrame.max,min,mean,sum` only aggregate numeric columns
# + pycharm={"is_executing": false}
ed_flights.max(numeric_only=True)
# -
# #### min
# + pycharm={"is_executing": false}
pd_flights.min(numeric_only=True)
# + pycharm={"is_executing": false}
ed_flights.min(numeric_only=True)
# -
# #### mean
# + pycharm={"is_executing": false}
pd_flights.mean(numeric_only=True)
# + pycharm={"is_executing": false}
ed_flights.mean(numeric_only=True)
# -
# #### sum
# + pycharm={"is_executing": false}
pd_flights.sum(numeric_only=True)
# + pycharm={"is_executing": false}
ed_flights.sum(numeric_only=True)
# -
# ### DataFrame.nunique
# + pycharm={"is_executing": false}
pd_flights[['Carrier', 'Origin', 'Dest']].nunique()
# + pycharm={"is_executing": false}
ed_flights[['Carrier', 'Origin', 'Dest']].nunique()
# -
# ### DataFrame.drop
# + pycharm={"is_executing": false}
pd_flights.drop(columns=['AvgTicketPrice',
'Cancelled',
'DestLocation',
'Dest',
'DestAirportID',
'DestCityName',
'DestCountry'])
# + pycharm={"is_executing": false}
ed_flights.drop(columns=['AvgTicketPrice',
'Cancelled',
'DestLocation',
'Dest',
'DestAirportID',
'DestCityName',
'DestCountry'])
# -
# ### Plotting
# + pycharm={"is_executing": false}
pd_flights.select_dtypes(include=np.number).hist(figsize=[10,10])
plt.show()
# + pycharm={"is_executing": false}
ed_flights.select_dtypes(include=np.number).hist(figsize=[10,10])
plt.show()
# -
# ### Elasticsearch utilities
# + pycharm={"is_executing": false}
ed_flights2 = ed_flights[(ed_flights.OriginAirportID == 'AMS') & (ed_flights.FlightDelayMin > 60)]
ed_flights2 = ed_flights2[['timestamp', 'OriginAirportID', 'DestAirportID', 'FlightDelayMin']]
ed_flights2 = ed_flights2.tail()
# + pycharm={"is_executing": false}
print(ed_flights2.es_info())
| eland/tests/tests_notebook/test_demo_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 01 - Data Analysis and Preparation
#
# This notebook covers the following tasks:
#
# 1. Perform exploratory data analysis and visualization.
# 2. Prepare the data for the ML task in BigQuery.
# 3. Generate and fix a ` TFDV schema` for the source data.
# 4. Create a `Vertex Dataset resource` dataset.
#
# ## Dataset
#
# The [Chicago Taxi Trips](https://pantheon.corp.google.com/marketplace/details/city-of-chicago-public-data/chicago-taxi-trips) dataset is one of [public datasets hosted with BigQuery](https://cloud.google.com/bigquery/public-data/), which includes taxi trips from 2013 to the present, reported to the City of Chicago in its role as a regulatory agency. The `taxi_trips` table size is 70.72 GB and includes more than 195 million records. The dataset includes information about the trips, like pickup and dropoff datetime and location, passengers count, miles travelled, and trip toll.
#
# The ML task is to predict whether a given trip will result in a tip > 20%.
# ## Setup
# ### Import libraries
# +
import os
import pandas as pd
import tensorflow as tf
import tensorflow_data_validation as tfdv
from google.cloud import bigquery
import matplotlib.pyplot as plt
from google.cloud import aiplatform as vertex_ai
from google.cloud import aiplatform_v1beta1 as vertex_ai_beta
# -
# ### Setup Google Cloud project
# +
PROJECT = '[your-project-id]' # Change to your project id.
REGION = 'us-central1' # Change to your region.
if PROJECT == "" or PROJECT is None or PROJECT == "[your-project-id]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT = shell_output[0]
print("Project ID:", PROJECT)
print("Region:", REGION)
# -
# ### Set configurations
# +
BQ_DATASET_NAME = 'playground_us' # Change to your BQ dataset name.
BQ_TABLE_NAME = 'chicago_taxitrips_prep'
BQ_LOCATION = 'US'
DATASET_DISPLAY_NAME = 'chicago-taxi-tips'
RAW_SCHEMA_DIR = 'src/raw_schema'
# -
# ## 1. Explore the data in BigQuery
# +
# %%bigquery data
SELECT
CAST(EXTRACT(DAYOFWEEK FROM trip_start_timestamp) AS string) AS trip_dayofweek,
FORMAT_DATE('%A',cast(trip_start_timestamp as date)) AS trip_dayname,
COUNT(*) as trip_count,
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE
EXTRACT(YEAR FROM trip_start_timestamp) = 2015
GROUP BY
trip_dayofweek,
trip_dayname
ORDER BY
trip_dayofweek
;
# -
data
data.plot(kind='bar', x='trip_dayname', y='trip_count')
# ## 2. Create data for the ML task
#
# We add a `ML_use` column for pre-splitting the data, where 80% of the datsa items are set to `UNASSIGNED` while the other 20% is set to `TEST`.
#
# This column is used during training (custom and AutoML) to split the dataset for training and test.
#
# In the training phase, the `UNASSIGNED` are split into `train` and `eval`. The `TEST` split is will be used for the final model validation.
# ### Create destination BigQuery dataset
# !bq --location=US mk -d \
# $PROJECT:$BQ_DATASET_NAME
sample_size = 1000000
year = 2020
# +
sql_script = '''
CREATE OR REPLACE TABLE `@PROJECT.@DATASET.@TABLE`
AS (
WITH
taxitrips AS (
SELECT
trip_start_timestamp,
trip_seconds,
trip_miles,
payment_type,
pickup_longitude,
pickup_latitude,
dropoff_longitude,
dropoff_latitude,
tips,
fare
FROM
`bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE 1=1
AND pickup_longitude IS NOT NULL
AND pickup_latitude IS NOT NULL
AND dropoff_longitude IS NOT NULL
AND dropoff_latitude IS NOT NULL
AND trip_miles > 0
AND trip_seconds > 0
AND fare > 0
AND EXTRACT(YEAR FROM trip_start_timestamp) = @YEAR
)
SELECT
trip_start_timestamp,
EXTRACT(MONTH from trip_start_timestamp) as trip_month,
EXTRACT(DAY from trip_start_timestamp) as trip_day,
EXTRACT(DAYOFWEEK from trip_start_timestamp) as trip_day_of_week,
EXTRACT(HOUR from trip_start_timestamp) as trip_hour,
trip_seconds,
trip_miles,
payment_type,
ST_AsText(
ST_SnapToGrid(ST_GeogPoint(pickup_longitude, pickup_latitude), 0.1)
) AS pickup_grid,
ST_AsText(
ST_SnapToGrid(ST_GeogPoint(dropoff_longitude, dropoff_latitude), 0.1)
) AS dropoff_grid,
ST_Distance(
ST_GeogPoint(pickup_longitude, pickup_latitude),
ST_GeogPoint(dropoff_longitude, dropoff_latitude)
) AS euclidean,
CONCAT(
ST_AsText(ST_SnapToGrid(ST_GeogPoint(pickup_longitude,
pickup_latitude), 0.1)),
ST_AsText(ST_SnapToGrid(ST_GeogPoint(dropoff_longitude,
dropoff_latitude), 0.1))
) AS loc_cross,
IF((tips/fare >= 0.2), 1, 0) AS tip_bin,
IF(RAND() <= 0.8, 'UNASSIGNED', 'TEST') AS ML_use
FROM
taxitrips
LIMIT @LIMIT
)
'''
# -
sql_script = sql_script.replace(
'@PROJECT', PROJECT).replace(
'@DATASET', BQ_DATASET_NAME).replace(
'@TABLE', BQ_TABLE_NAME).replace(
'@YEAR', str(year)).replace(
'@LIMIT', str(sample_size))
print(sql_script)
bq_client = bigquery.Client(project=PROJECT, location=BQ_LOCATION)
job = bq_client.query(sql_script)
_ = job.result()
# +
# %%bigquery --project {PROJECT}
SELECT ML_use, COUNT(*)
FROM playground_us.chicago_taxitrips_prep # Change to your BQ dataset and table names.
GROUP BY ML_use
# -
# ### Load a sample data to a Pandas DataFrame
# +
# %%bigquery sample_data --project {PROJECT}
SELECT * EXCEPT (trip_start_timestamp, ML_use)
FROM playground_us.chicago_taxitrips_prep # Change to your BQ dataset and table names.
# -
sample_data.head().T
sample_data.tip_bin.value_counts()
sample_data.euclidean.hist()
# ## 3. Generate raw data schema
#
# The [TensorFlow Data Validation (TFDV)](https://www.tensorflow.org/tfx/data_validation/get_started) data schema will be used in:
# 1. Identify the raw data types and shapes in the data transformation.
# 2. Create the serving input signature for the custom model.
# 3. Validate the new raw training data in the TFX pipeline.
stats = tfdv.generate_statistics_from_dataframe(
dataframe=sample_data,
stats_options=tfdv.StatsOptions(
label_feature='tip_bin',
weight_feature=None,
sample_rate=1,
num_top_values=50
)
)
tfdv.visualize_statistics(stats)
schema = tfdv.infer_schema(statistics=stats)
tfdv.display_schema(schema=schema)
raw_schema_location = os.path.join(RAW_SCHEMA_DIR, 'schema.pbtxt')
tfdv.write_schema_text(schema, raw_schema_location)
# ## 4. Create Vertex Dataset resource
vertex_ai.init(
project=PROJECT,
location=REGION
)
# ### Create the dataset resource
# +
bq_uri = f"bq://{PROJECT}.{BQ_DATASET_NAME}.{BQ_TABLE_NAME}"
dataset = vertex_ai.TabularDataset.create(
display_name=DATASET_DISPLAY_NAME, bq_source=bq_uri)
dataset.gca_resource
# -
# ### Get the dataset resource
#
# The dataset resource is retrieved by display name. Because multiple datasets can have the same display name, we retrieve the most recent updated one.
# +
dataset = vertex_ai.TabularDataset.list(
filter=f"display_name={DATASET_DISPLAY_NAME}",
order_by="update_time")[-1]
print("Dataset resource name:", dataset.resource_name)
print("Dataset BigQuery source:", dataset.gca_resource.metadata['inputConfig']['bigquerySource']['uri'])
# -
| 01-dataset-management.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import geopandas as gpd
import pandas as pd
from datetime import datetime
states = gpd.read_file('raw/spatial/us-states/cb_2018_us_state_500k.shp')
states.plot()
states.crs
states.info()
states.head()
# +
# Join / merge data
# +
# 1 Read dataframe
# -
adv = pd.read_excel('raw/asd/ADV_AllData.xlsx')
adv.head()
# +
# 2 Pivot table - to make it clean we will create multiple tables per source type
# +
sources = adv['Source'].unique()
sourced_regions = {}
# -
for source in sources:
ds = adv[adv['Source'] == source].copy()
piv = ds.pivot(index='Location', columns='Year', values=['Denominator', 'Lower CI', 'Upper CI', 'Prevalence'])
piv.columns = [f'{col[0]}-{col[1]}' for col in piv.columns.values]
sourced_regions[source] = piv
# +
# 3 Now merge each table and save it into a spatial table for future analysis
# +
def create_fname(region_name, directory='processed-spatial/'):
dt = str(datetime.now())
fname = directory + region_name + '_' + dt + '.json'
return fname
spatial = []
for source in sources:
merged_geo = pd.merge(states, sourced_regions[source], left_on='STUSPS', right_on='Location')
filename = create_fname(source)
merged_geo.to_file(filename, driver='GeoJSON')
# -
# ---
| states.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import wget, json, os
from string import capwords
from pybtex.database import parse_string
import pybtex.errors
from mpcontribs.client import Client
pybtex.errors.set_strict_mode(False)
client = Client('your-api-key-here', host='ml-api.materialsproject.cloud')
fn = 'dataset_metadata.json'
if not os.path.exists(fn):
wget.download(f'https://raw.githubusercontent.com/hackingmaterials/matminer/master/matminer/datasets/{fn}')
data = json.load(open(fn, 'r'))
for name, info in data.items():
if not name.startswith('matbench_'):
continue
columns = {}
for col, text in info['columns'].items():
k = col.replace('_', '|').replace('-', '|').replace('(', ' ').replace(')', '')
columns[k] = text
project = {
'project': name,
'is_public': True,
'owner': '<EMAIL>',
'title': name, # TODO update and set long_title
'authors': '<NAME>, <NAME>',
'description': info['description'],
'other': {
'columns': columns,
'entries': info['num_entries']
},
'urls': {
'FigShare': info['url']
}
}
print(name)
for ref in info['bibtex_refs']:
bib = parse_string(ref, 'bibtex')
for key, entry in bib.entries.items():
key_is_doi = key.startswith('doi:')
url = 'https://doi.org/' + key.split(':', 1)[-1] if key_is_doi else entry.fields.get('url')
k = 'Zhuo2018' if key_is_doi else capwords(key.replace('_', ''))
if k.startswith('C2'):
k = 'Castelli2012'
elif k.startswith('Landolt'):
k = 'LB1997'
elif k == 'Citrine':
url = 'https://www.citrination.com'
if len(k) > 8:
k = k[:4] + k[-4:]
project['urls'][k] = url
try:
print(client.projects.create_entry(project=project).result())
except Exception as ex:
print(ex) # TODO should use get_entry to check existence -> use update_entry if project exists
| mpcontribs-portal/notebooks/ml.materialsproject.cloud/get_started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:REL560] *
# language: python
# name: conda-env-REL560-py
# ---
# +
# Lesson from Intro to Cultural Analytics - https://melaniewalsh.github.io/Intro-Cultural-Analytics/05-Text-Analysis/08-Topic-Modeling-Text-Files.html
# -
path_to_mallet = "/Users/jeriwieringa/mallet-2.0.8/bin/mallet"
import little_mallet_wrapper
import seaborn
import glob
from pathlib import Path
directory = "../data/Example_texts/history/NYT-Obituaries/"
files = glob.glob(f"{directory}/*.txt")
files
help(little_mallet_wrapper)
help(glob)
import pandas
help(pandas.DataFrame)
# Documentation for Little Mallet Wrapper - https://github.com/maria-antoniak/little-mallet-wrapper
# +
training_data = []
for file in files:
with open(file, encoding="utf-8") as f:
text = f.read()
processed_text = little_mallet_wrapper.process_string(text, numbers="remove")
training_data.append(processed_text)
# +
original_texts = []
for file in files:
with open(file, encoding="utf-8") as f:
text = f.read()
original_texts.append(text)
# -
obit_titles = [Path(file).stem for file in files]
obit_titles
# # Create the Model of Topics
little_mallet_wrapper.print_dataset_stats(training_data)
num_topics = 15
training_data = training_data
output_directory_path = '../data/outputs/topic-model-output/NYT-Obit'
# +
# First create the output directory if it does not already exist
Path(f"{output_directory_path}").mkdir(parents=True, exist_ok=True)
# Then create paths for all of the files Mallet will output
path_to_training_data = f"{output_directory_path}/training.txt"
path_to_formatted_training_data = f"{output_directory_path}/mallet.training"
path_to_model = f"{output_directory_path}/mallet.model.{str(num_topics)}"
path_to_topic_keys = f"{output_directory_path}/mallet.topic_keys.{str(num_topics)}"
path_to_topic_distributions = f"{output_directory_path}/mallet.topic_distributions.{str(num_topics)}"
# -
little_mallet_wrapper.quick_train_topic_model(path_to_mallet,
output_directory_path,
num_topics,
training_data)
# # View Results
topics = little_mallet_wrapper.load_topic_keys(path_to_topic_keys)
for topic_number, topic in enumerate(topics):
print(f"🌟 Topic {topic_number} 🌟\n\n{topic}\n")
topic_distributions = little_mallet_wrapper.load_topic_distributions(path_to_topic_distributions)
topic_distributions[32]
# +
obituary_to_check = "1962-Marilyn-Monroe"
obit_number = obit_titles.index(obituary_to_check)
# -
print(f"Topic Distributions for {obit_titles[obit_number]}\n")
for topic_number, (topic, topic_distribution) in enumerate(zip(topics, topic_distributions[obit_number])):
print(f"🌟Topic {topic_number} {topic[:6]} 🌟\nProbability: {round(topic_distribution, 3)}\n")
# # Visualize Result
import random
target_labels = random.sample(obit_titles, 10)
little_mallet_wrapper.plot_categories_by_topics_heatmap(obit_titles,
topic_distributions,
topics,
output_directory_path + '/categories_by_topics.pdf',
target_labels=target_labels,
dim= (13,9)
)
# # Display top titles per topic
training_data_obit_titles = dict(zip(training_data, obit_titles))
training_data_original_text = dict(zip(training_data, original_texts))
def display_top_titles_per_topic(topic_number=0, number_of_documents=5):
print(f"🌟Topic {topic_number}🌟\n\n{topics[topic_number]}\n")
for probability,document in little_mallet_wrapper.get_top_docs(training_data, topic_distributions, topic_number, n=number_of_documents):
print(round(probability, 4), training_data_obit_titles[document] + "\n")
return
display_top_titles_per_topic(topic_number=0, number_of_documents=5)
# What would you label this topic?
# # Display Topic Words in Context
# +
from IPython.display import Markdown, display
import re
def display_bolded_topic_words_in_context(topics=topics, topic_number=3, number_of_documents=3, custom_words=None):
print(f"🌟Topic {topic_number}🌟\n\n{topics[topic_number]}\n")
for probability, document in little_mallet_wrapper.get_top_docs(training_data, topic_distributions, topic_number, n=number_of_documents):
probability = f"🌟🌟🌟\n\n**{probability}**"
obit_title = f"**{training_data_obit_titles[document]}**"
original_text = training_data_original_text[document]
topic_words = topics[topic_number]
topic_words = custom_words if custom_words != None else topic_words
for word in topic_words:
if word in original_text:
original_text = re.sub(f"\\b{word}\\b", f"**{word}**", original_text)
display(Markdown(probability)), display(Markdown(obit_title)), display(Markdown(original_text))
return
# -
display_bolded_topic_words_in_context(topic_number=3, number_of_documents=3)
| code/Topic-Model-with-MALLET.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Scientist Nanodegree
# ## Supervised Learning
# ## Project: Finding Donors for *CharityML*
# In this project, I will employ several supervised algorithms of my choice to accurately model individuals' income using data collected from the 1994 U.S. Census. I will then choose the best candidate algorithm from preliminary results and further optimize this algorithm to best model the data. My goal with this implementation is to construct a model that accurately predicts whether an individual makes more than $50,000. This sort of task can arise in a non-profit setting, where organizations survive on donations. Understanding an individual's income can help a non-profit better understand how large of a donation to request, or whether or not they should reach out to begin with. While it can be difficult to determine an individual's general income bracket directly from public sources, we can (as we will see) infer this value from other publically available features.
#
# The dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Census+Income). The datset was donated by <NAME> and <NAME>, after being published in the article _"Scaling Up the Accuracy of Naive-Bayes Classifiers: A Decision-Tree Hybrid"_. One can find the article by <NAME> [online](https://www.aaai.org/Papers/KDD/1996/KDD96-033.pdf). The data we investigate here consists of small changes to the original dataset, such as removing the `'fnlwgt'` feature and records with missing or ill-formatted entries.
# +
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from time import time
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualization code visuals.py
import visuals as vs
# Pretty display for notebooks
# %matplotlib inline
# Load the Census dataset
data = pd.read_csv("census.csv")
data_test = pd.read_csv("test_census.csv")
# Success - Display the first record
display(data.head(5))
# -
# ### Implementation: Data Exploration
#
# A cursory investigation of the dataset will determine how many individuals fit into either group, and will tell me about the percentage of these individuals making more than \$50,000. In the code cell below, I compute the following:
# - The total number of records, `'n_records'`
# - The number of individuals making more than \$50,000 annually, `'n_greater_50k'`.
# - The number of individuals making at most \$50,000 annually, `'n_at_most_50k'`.
# - The percentage of individuals making more than \$50,000 annually, `'greater_percent'`.
# +
# TODO: Total number of records
n_records = data.shape[0]
# TODO: Number of records where individual's income is more than $50,000
n_greater_50k = np.sum(data['income']=='>50K')
# TODO: Number of records where individual's income is at most $50,000
n_at_most_50k = np.sum(data['income']=='<=50K')
# TODO: Percentage of individuals whose income is more than $50,000
greater_percent = n_greater_50k/n_records
# Print the results
print("Total number of records: {}".format(n_records))
print("Individuals making more than $50,000: {}".format(n_greater_50k))
print("Individuals making at most $50,000: {}".format(n_at_most_50k))
print("Percentage of individuals making more than $50,000: {}%".format(greater_percent))
# -
# ** Featureset Exploration **
#
# * **age**: continuous.
# * **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
# * **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
# * **education-num**: continuous.
# * **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
# * **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
# * **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
# * **race**: Black, White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other.
# * **sex**: Female, Male.
# * **capital-gain**: continuous.
# * **capital-loss**: continuous.
# * **hours-per-week**: continuous.
# * **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
# ----
# ## Data Preprocessing
# Before data can be used as input for machine learning algorithms, it must be cleaned, formatted, and restructured. Fortunately, for this dataset, there are no invalid or missing entries we must deal with, however, there are some qualities about certain features that must be adjusted. This preprocessing can help tremendously with the outcome and predictive power of nearly all learning algorithms.
# ### Transforming Skewed Continuous Features
# +
# Split the data into features and target label
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# Visualize skewed continuous features of original data
vs.distribution(data)
# -
# For highly-skewed feature distributions such as `'capital-gain'` and `'capital-loss'`, it is common practice to apply a logarithmic transformation on the data so that the very large and very small values do not negatively affect the performance of a learning algorithm. Using a logarithmic transformation significantly reduces the range of values caused by outliers. The logarithm of `0` is undefined, so we must translate the values by a small amount above `0` to apply the the logarithm successfully.
#
# Run the code cell below to perform a transformation on the data and visualize the results. Again, note the range of values and how they are distributed.
# +
# Log-transform the skewed features
skewed = ['capital-gain', 'capital-loss']
features_log_transformed = pd.DataFrame(data = features_raw)
features_log_transformed[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Log-transform the skewed features for Kaggle Prediction
features_log_transformed_test = pd.DataFrame(data = data_test)
features_log_transformed_test[skewed] = features_raw[skewed].apply(lambda x: np.log(x + 1))
# Visualize the new log distributions
vs.distribution(features_log_transformed, transformed = True)
# -
# ### Normalizing Numerical Features
# In addition to performing transformations on features that are highly skewed, it is often good practice to perform some type of scaling on numerical features. Normalization ensures that each feature is treated equally when applying supervised learners.
# +
# Import sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_log_minmax_transform = pd.DataFrame(data = features_log_transformed)
features_log_minmax_transform[numerical] = scaler.fit_transform(features_log_transformed[numerical])
# apply scaler to the Kaggle test data features
features_log_minmax_transform_test = pd.DataFrame(data = features_log_transformed_test)
features_log_minmax_transform_test[numerical] = scaler.fit_transform(features_log_transformed_test[numerical])
# Show an example of a record with scaling applied
display(features_log_minmax_transform.head(n = 5))
# -
# ### One-hot encode
# +
# TODO: One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = pd.get_dummies(features_log_minmax_transform)
# TODO: Encode the 'income_raw' data to numerical values
income = income_raw.apply(lambda x=0: 0 if x == "<=50K" else 1)
#One-hot encode the 'features_log_minmax_transform_test' data using pandas.get_dummies()
features_final_test = pd.get_dummies(features_log_minmax_transform_test)
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print("{} total features after one-hot encoding.".format(len(encoded)))
# Uncomment the following line to see the encoded feature names
# print encoded
display(features_final.head(n = 5))
# -
# ### Shuffle and Split Data
# Now all _categorical variables_ have been converted into numerical features, and all numerical features have been normalized. We will now split the data (both features and their labels) into training and test sets. 80% of the data will be used for training and 20% for testing.
# +
# Import train_test_split
from sklearn.model_selection import train_test_split
# Split the 'features' and 'income' data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features_final,
income,
test_size = 0.2,
random_state = 0)
# Show the results of the split
print("Training set has {} samples.".format(X_train.shape[0]))
print("Testing set has {} samples.".format(X_test.shape[0]))
# -
# ----
# ## Evaluating Model Performance
# In this section, I will investigate four different algorithms, and determine which is best at modeling the data. Three of these algorithms will be supervised learners of my choice, and the fourth algorithm is a *naive predictor*.
# ### Naive Predictor Performace
# * Choosing a model that always predicted an individual made more than $50,000, I check what would that model's accuracy and F-score be on this dataset.
# +
TP = np.sum(income) # Counting the ones as this is the naive case. Note that 'income' is the 'income_raw' data
#ncoded to numerical values done in the data preprocessing step.
FP = income.count() - TP # Specific to the naive case
TN = 0 # No predicted negatives in the naive case
FN = 0 # No predicted negatives in the naive case
# TODO: Calculate accuracy, precision and recall
accuracy = (TP+TN)/(TP+FP+TN+FN)
recall = (TP)/(TP+FN)
precision = (TP)/(TP+FP)
# TODO: Calculate F-score using the formula above for beta = 0.5 and correct values for precision and recall.
fscore = (1+0.5**2)*precision*recall/(0.5**2*precision+recall)
# Print the results
print("Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore))
# -
# ### Model Application
# Describing three of the supervised learning models that I think will be appropriate for this problem to test on the census data
# \### RandomForestClassifier:
# ##### Real life application:
# In finance sector this classifier can be used for the detection of customers that are more likely to repay their debt on time, or use a bank's services more frequently. It is also used to detect fraudsters out to scam the bank.
# ##### Strengths:
# One of the biggest problems in machine learning is overfitting, but most of the time this won’t happen in random forest. Random forest is also great because of its abilty to deal with missing values, and the default hyperparameters it uses often produce a good prediction result.
# ##### Weakness:
# The main limitation of random forest is that a large number of trees can make the algorithm too slow and ineffective for real-time predictions. In general, these algorithms are fast to train, but quite slow to create predictions once they are trained.
# ##### Project Usefulnesss:
# Random forest's simplicity makes it a tough proposition to build a “bad” model with it. Provides a pretty good indicator of the importance it assigns to the dataset features, which we want to find out in the project.
#
# [Reference](https://builtin.com/data-science/random-forest-algorithm)
#
#
# ### AdaBoostClassifier:
# ##### Real life application:
# It can be used for predicting customer churn and classifying the types of topics customers are talking/calling about.
# ##### Strengths:
# One of the many advantages of the AdaBoost Algorithm is it is fast, simple and easy to program. Also, it has the flexibility to be combined with any machine learning algorithm.
# ##### Weakness:
# If the weak classifiers are too weak in algorithm used for AdaBoost, it can lead to low margins and overfitting of the data.
# ##### Project Usefulnesss:
# There is a class imbalance in our data, and algorithms such as AdaBoost are great option to deal with them.
#
# [Reference](https://www.educba.com/adaboost-algorithm/)
#
# ### Gaussian Naive Bayes:
# ##### Real life application:
# It can be used to build a Spam filtering module or perform Sentiment Analysis in social media analysis, to identify positive and negative customer sentiments.
# ##### Strengths:
# It is easy and fast to predict the class of the test data set. It also performs well in multi-class prediction. Moreover, when assumption of independence holds, a Naive Bayes classifier performs well.
# ##### Weakness:
# Naive Bayes is the assumption of independent predictors. In real life, it is almost impossible that we get a set of predictors which are completely independent. Naive Bayes is also known as a bad estimator for many datasets.
# ##### Project Usefulnesss:
# The dataset is pretty large, and Naive Bayes is fast to learn compared to most other algorithms. If the model provides good results it might be great canditate to use for final predictions due to its speed.
#
# [Reference](https://towardsdatascience.com/all-about-naive-bayes-8e13cef044cf)
#
#
# ### Implementation - Creating a Training and Predicting Pipeline
#
# I created a training and predicting pipeline that allows me to quickly and effectively train models using various sizes of training data and perform predictions on the testing data.
# TODO: Import two metrics from sklearn - fbeta_score and accuracy_score
from sklearn.metrics import fbeta_score, accuracy_score
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
'''
inputs:
- learner: the learning algorithm to be trained and predicted on
- sample_size: the size of samples (number) to be drawn from training set
- X_train: features training set
- y_train: income training set
- X_test: features testing set
- y_test: income testing set
'''
results = {}
# TODO: Fit the learner to the training data using slicing with 'sample_size' using .fit(training_features[:], training_labels[:])
start = time() # Get start time
learner = learner.fit(X_train[:sample_size], y_train[:sample_size])
end = time() # Get end time
# TODO: Calculate the training time
results['train_time'] = end - start
# TODO: Get the predictions on the test set(X_test),
# then get predictions on the first 300 training samples(X_train) using .predict()
start = time() # Get start time
predictions_train = learner.predict(X_train[:300])
predictions_test = learner.predict(X_test)
end = time() # Get end time
# TODO: Calculate the total prediction time
results['pred_time'] = end - start
# TODO: Compute accuracy on the first 300 training samples which is y_train[:300]
results['acc_train'] = accuracy_score(y_train[:300], predictions_train)
# TODO: Compute accuracy on test set using accuracy_score()
results['acc_test'] = accuracy_score(y_test, predictions_test)
# TODO: Compute F-score on the the first 300 training samples using fbeta_score()
results['f_train'] = fbeta_score(y_train[:300], predictions_train,beta=0.5)
# TODO: Compute F-score on the test set which is y_test
results['f_test'] = fbeta_score(y_test, predictions_test,beta=0.5)
# Success
print("{} trained on {} samples.".format(learner.__class__.__name__, sample_size))
# Return the results
return results
# ### Implementation: Initial Model Evaluation
# +
# TODO: Import the three supervised learning models from sklearn
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
# TODO: Initialize the three models
clf_A = AdaBoostClassifier(random_state=0)
clf_B = RandomForestClassifier(random_state=0)
clf_C = GaussianNB()
# TODO: Calculate the number of samples for 1%, 10%, and 100% of the training data
# HINT: samples_100 is the entire training set i.e. len(y_train)
# HINT: samples_10 is 10% of samples_100 (ensure to set the count of the values to be `int` and not `float`)
# HINT: samples_1 is 1% of samples_100 (ensure to set the count of the values to be `int` and not `float`)
samples_100 = len(y_train)
samples_10 = int(samples_100*0.1)
samples_1 = int(samples_100*0.01)
# Collect results on the learners
results = {}
for clf in [clf_A, clf_B, clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = \
train_predict(clf, samples, X_train, y_train, X_test, y_test)
# Run metrics visualization for the three supervised learning models chosen
vs.evaluate(results, accuracy, fscore)
# -
# ## Improving Results
# I will choose from the three supervised learning models the *best* model to use on the student data. I will then perform a grid search optimization for the model over the entire training set by tuning at least one parameter to improve upon the untuned model's F-score.
# ## Choosing the Best Model
#
# * Based on the results above, AdaBoost is the best model for the task out of the three models.
#
# * It is the classifier that performed the best on the testing data, in terms of both the accuracy and f-score.
#
# * Moreover, It takes only a resonable amount if time to train on the full dataset. Although GaussianNB had better training times, the accuracy and F-score was considerably lower
#
# * Adaboost uses a decision tree of depth 1 as its base classifier, which can handle categorical and numerical data. Because of its reasonable train time, it should scale well even if we have more data.
# ## Describing the Model in Layman's Terms
#
# Adboost works by combining many weak learner, to create an ensemble of learners that can predict whether an individual earns above 50k or not.
#
# For this project, the weak learners were decision trees. They were created using individual “features” we have been provided to create a set of rules that can predict whether an individual earns above 50k or not.
#
# Adaboost priortizes the data points predicted incorrectly in the previous rounds to ensure they are predicted correctly in the next round during the training process.
#
# The training algorithm repeats the process for a specified number of rounds, or till we can’t improve the predictions further. During each of the rounds, the model finds the best learner to split the data. This learner is incorporated into the ensemble.
#
# All the learners are then combined into one model, where they each vote to predict if a person earns more than 50k or not. Majority vote usually determines the final prediction.
#
# This model can be used to predict the same information for a potential new donor and predict if they earn more than 50K or not, which hints at their likeliness of donating to charity.
# ### Implementation: Model Tuning
# +
# TODO: Import 'GridSearchCV', 'make_scorer', and any other necessary libraries
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
# TODO: Initialize the classifier
clf = AdaBoostClassifier(random_state=1)
# TODO: Create the parameters list you wish to tune, using a dictionary if needed.
# HINT: parameters = {'parameter_1': [value1, value2], 'parameter_2': [value1, value2]}
parameters = {'n_estimators':[60, 120],
'learning_rate':[0.5, 1, 2]}
# TODO: Make an fbeta_score scoring object
scorer = make_scorer(fbeta_score,beta=0.5)
# TODO: Perform grid search on the classifier using 'scorer' as the scoring method
grid_obj = GridSearchCV(clf, parameters, scoring = scorer)
# TODO: Fit the grid search object to the training data and find the optimal parameters using fit()
grid_fit = grid_obj.fit(X_train, y_train)
# Get the estimator
best_clf = grid_fit.best_estimator_
# Make predictions using the unoptimized and model
predictions = (clf.fit(X_train, y_train)).predict(X_test)
best_predictions = best_clf.predict(X_test)
# Report the before-and-afterscores
print("Unoptimized model\n------")
print("Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, predictions, beta = 0.5)))
print("\nOptimized Model\n------")
print("Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions)))
print("Final F-score on the testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5)))
# -
# ### Final Model Evaluation
# #### Results:
#
# | Metric | Unoptimized Model | Optimized Model |
# | :------------: | :---------------: | :-------------: |
# | Accuracy Score | 0.8576 | 0.8612 |
# | F-score | 0.7246 | 0.7316 |
#
# * The accuracy score on the testing data for the optimized model is 0.8612. The F-score on the testing data for the optimized model is 0.7316.
#
# * Both the accuracy score and the F-score on the optimized model have improved slighly.
#
# * They are considerably better compared to the naive predictor benchmarks. The difference in both accuracy score and the F-score is over 45 percent.
#
# ----
# ## Feature Importance
#
# An important task when performing supervised learning on a dataset like the census data we study here is determining which features provide the most predictive power. By focusing on the relationship between only a few crucial features and the target label we simplify our understanding of the phenomenon, which is most always a useful thing to do. In the case of this project, that means we wish to identify a small number of features that most strongly predict whether an individual makes at most or more than \$50,000.
#
# Choose a scikit-learn classifier (e.g., adaboost, random forests) that has a `feature_importance_` attribute, which is a function that ranks the importance of features according to the chosen classifier. In the next python cell fit this classifier to training set and use this attribute to determine the top 5 most important features for the census dataset.
# ### Implementation - Extracting Feature Importance
# I will find the feature_importance_ attribute for the best model. This ranks the importance of each feature when making predictions based on the chosen algorithm.
# +
# TODO: Extract the feature importances using .feature_importances_
importances = best_clf.feature_importances_
# Plot
vs.feature_plot(importances, X_train, y_train)
# -
# ### Feature Selection
# Try training the model with the reduced data set that only has the the attibutes ranked in the top 5 of feature importance.
# +
# Import functionality for cloning a model
from sklearn.base import clone
# Reduce the feature space
X_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]]
X_test_reduced = X_test[X_test.columns.values[(np.argsort(importances)[::-1])[:5]]]
# Train on the "best" model found from grid search earlier
clf = (clone(best_clf)).fit(X_train_reduced, y_train)
# Make new predictions
reduced_predictions = clf.predict(X_test_reduced)
# Report scores from the final model using both versions of data
print("Final Model trained on full data\n------")
print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, best_predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5)))
print("\nFinal Model trained on reduced data\n------")
print("Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, reduced_predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, reduced_predictions, beta = 0.5)))
# -
# ## Final Prediction
# +
#filling in the values that are NAN
final_test = features_final_test.interpolate()
#predict using best_clf for Kaggle test dataset
predictions = best_clf.predict(final_test.drop('Unnamed: 0', axis = 1))
#Create pandas datframe to be submitted at Kaggle
final_pred = pd.DataFrame({ 'id' : final_test['Unnamed: 0'], 'income': predictions })
final_pred = final_pred.set_index('id')
#convert final prediction to CSV
final_pred.to_csv('Prediction.csv')
| finding_donors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# <img src = "https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width = 400>
#
# # From Requirements to Collection
#
# Estaimted time needed: **15** minutes
#
# ## Objectives
#
# After complting this lab you will be able to:
#
# - Understand Data Requirements
# - Explore the stages in Data Collection
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## Table of Contents
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
#
# 1. [Data Requirements](#0)<br>
# 2. [Data Collection](#2)<br>
# </div>
# <hr>
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Data Requirements <a id="0"></a>
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
#
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig1_flowchart_data_requirements.png" width=500>
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# In the videos, we learned that the chosen analytic approach determines the data requirements. Specifically, the analytic methods to be used require certain data content, formats and representations, guided by domain knowledge.
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# In the **From Problem to Approach Lab**, we determined that automating the process of determining the cuisine of a given recipe or dish is potentially possible using the ingredients of the recipe or the dish. In order to build a model, we need extensive data of different cuisines and recipes.
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Identifying the required data fulfills the data requirements stage of the data science methodology.
#
# * * *
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Data Collection <a id="2"></a>
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig2_flowchart_data_collection.png" width=500>
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# In the initial data collection stage, data scientists identify and gather the available data resources. These can be in the form of structured, unstructured, and even semi-structured data relevant to the problem domain.
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# #### Web Scraping of Online Food Recipes
#
# A researcher named <NAME> scraped tens of thousands of food recipes (cuisines and ingredients) from three different websites, namely:
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig3_allrecipes.png" width=500>
#
# [www.allrecipes.com](http://www.allrecipes.com?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DS0103EN-SkillsNetwork-20083987&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)
#
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig4_epicurious.png" width=500>
#
# [www.epicurious.com](http://www.epicurious.com?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DS0103EN-SkillsNetwork-20083987&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)
#
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab2_fig5_menupan.png" width=500>
#
# [www.menupan.com](http://www.menupan.com?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DS0103EN-SkillsNetwork-20083987&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# For more information on <NAME> and his research, you can read his paper on [Flavor Network and the Principles of Food Pairing](http://yongyeol.com/papers/ahn-flavornet-2011.pdf?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DS0103EN-SkillsNetwork-20083987&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DS0103EN-SkillsNetwork-20083987&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ).
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Luckily, we will not need to carry out any data collection as the data that we need to meet the goal defined in the business understanding stage is readily available.
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# #### We have already acquired the data and placed it on an IBM server. Let's download the data and take a look at it.
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# <strong>Important note:</strong> Please note that you are not expected to know how to program in python. The following code is meant to illustrate the stage of data collection, so it is totally fine if you do not understand the individual lines of code. There will be a full course in this certificate on programming in python, <a href="http://cocl.us/PY0101EN_DS0103EN_LAB2_PYTHON_edX">Python for Data Science</a>, which will teach you how to program in Python if you decide to complete this certificate.
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Using this notebook:
#
# To run any of the following cells of code, you can type **Shift + Enter** to excute the code in a cell.
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Get the version of Python installed.
#
# + button=false new_sheet=false run_control={"read_only": false}
# check Python version
# !python -V
# -
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Read the data from the IBM server into a _pandas_ dataframe.
#
# + button=false new_sheet=false run_control={"read_only": false}
import pandas as pd # download library to read data into dataframe
pd.set_option('display.max_columns', None)
recipes = pd.read_csv("https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/data/recipes.csv")
print("Data read into dataframe!") # takes about 30 seconds
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Show the first few rows.
#
# + button=false new_sheet=false run_control={"read_only": false}
recipes.head()
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Get the dimensions of the dataframe.
#
# + button=false new_sheet=false run_control={"read_only": false}
recipes.shape
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# So our dataset consists of 57,691 recipes. Each row represents a recipe, and for each recipe, the corresponding cuisine is documented as well as whether 384 ingredients exist in the recipe or not beginning with almond and ending with zucchini.
#
# * * *
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# Now that the data collection stage is complete, data scientists typically use descriptive statistics and visualization techniques to better understand the data and get acquainted with it. Data scientists, essentially, explore the data to:
#
# - understand its content,
# - assess its quality,
# - discover any interesting preliminary insights, and,
# - determine whether additional data is necessary to fill any gaps in the data.
#
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Thank you for completing this lab!
#
# This notebook is part of a course called _The Data Science Method_. If you accessed this notebook outside the course, you can take this course, online by clicking [here](https://cocl.us/DS0103EN-Review-From-Requirements-to-Collection).
#
# ## Author
#
# <a href="https://www.linkedin.com/in/aklson/" target="_blank"><NAME></a>
#
# ## Change Log
#
# | Date (YYYY-MM-DD) | Version | Changed By | Change Description |
# | ----------------- | ------- | ---------- | ---------------------------------- |
# | 2020-08-27 | 2.0 | Lavanya | Moved lab to course repo in GitLab |
#
# <hr>
#
# ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
#
| Data_Science_Specialization_IBM/Introduction_to_Data_Science_Specialization_IBM/Data_Science_Methodology/week1_from_problem_to_approach/DS0103EN-2-2-1-From-Requirements-to-Collection-v2.0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/melisaguler/hu-bby261-2020/blob/main/proje_03.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="3RunnM29DXFt" outputId="544eab5d-c293-46f4-c9f9-c81f4c330c9f"
import matplotlib as plt
from google.colab import drive
drive.mount("/gdrive", force_remount=True)
import cv2
import numpy as nm
img=cv2.imread("/gdrive/My Drive/odev/4.jpg" ,0)
img=cv2.imread("/gdrive/My Drive/odev/4.jpg")
col_İmg=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
print(col_İmg.dtype)
print(col_İmg.shape)
print(col_İmg)
import tensorflow as tf
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
import matplotlib.pyplot as plt
image_index = 7 # You may select anything up to 60,000
print(y_train[image_index]) # The label is 0
plt.imshow(x_train[image_index], cmap='Greys')
print(x_train[image_index])
x_train.shape
# Reshaping the array to 4-dims so that it can work with the Keras API
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
# Making sure that the values are float so that we can get decimal points after division
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# Normalizing the RGB codes by dividing it to the max RGB value.
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print('Number of images in x_train', x_train.shape[0])
print('Number of images in x_test', x_test.shape[0])
# Importing the required Keras modules containing model and layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
# Creating a Sequential Model and adding the layers
model = Sequential()
model.add(Conv2D(28, kernel_size=(3,3), input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # Flattening the 2D arrays for fully connected layers
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(10,activation=tf.nn.softmax))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x=x_train,y=y_train, epochs=10)
model.evaluate(x_test, y_test)
plt.imshow(col_İmg.reshape(28, 28),cmap='Greys')
pred = model.predict(col_İmg.reshape(1, 28, 28, 1))
print(pred.argmax())
| proje_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''ea_houses_prices_env'': venv)'
# name: python3
# ---
# + [markdown] id="i5UVTehIrJwn"
# ## Insight Project
# + [markdown] id="Iupcr2TsUB9B"
# ## Exploratory Data Analysis
# + [markdown] id="TKBvtleOUB9E"
# ### Overview
# + [markdown] id="UgdCKG-sUB9E"
# The project objective is answer main two questions, in the context of a company that buys houses to sell.
#
# 1. Whith houses should the buy and at what purchase price?
#
# 2. With the house purchased, what is the best moment to sell and at what price?
#
# * Will be used the data from <https://www.kaggle.com/harlfoxem/housesalesprediction>
# + [markdown] id="TdVeqFtoUB9F"
# ### Environment Setup
# + executionInfo={"elapsed": 4, "status": "ok", "timestamp": 1645705423612, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="X54Ln9MCUB9F"
#import libraries
import pandas as pd
import numpy as np
import seaborn as sns
# + [markdown] id="_TY1xr98UB9G"
# ### Import DATA
# + executionInfo={"elapsed": 964, "status": "ok", "timestamp": 1645705424573, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="VL7VwM9CUB9H"
df = pd.read_csv('data/kc_house_data.csv')
# + [markdown] id="XGrN9oxYUB9I"
# ### Clearing DATA
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 5, "status": "ok", "timestamp": 1645705424574, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="QPKYtz4OUB9I" outputId="d9f5902e-d7d6-47ea-906f-8f01142b9216"
# Are there empty row for the features?
df.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 988, "status": "ok", "timestamp": 1645705425559, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="JhV_ZvpsUB9J" outputId="eae90039-0725-4de0-b91f-cd4d96386a97"
# What are the data type of the features?
df.dtypes
# + executionInfo={"elapsed": 17, "status": "ok", "timestamp": 1645705425560, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="IUfrGrzpUB9K"
# It's necessary converting feature date to datetime, because will be use for calculate the seasons.
df['date'] = pd.to_datetime(df['date'])
# + colab={"base_uri": "https://localhost:8080/", "height": 296} executionInfo={"elapsed": 16, "status": "ok", "timestamp": 1645705425560, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="kr3AucmSUB9K" outputId="947aefe0-ef99-4044-f2e2-af079b3b16a4"
# Are there outlier price on dataframe?
sns.boxplot(x=df['price'])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 13, "status": "ok", "timestamp": 1645705425562, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "06802689726903945908"}, "user_tz": 180} id="z8ioTn8kUB9L" outputId="59b6603d-eda5-4746-a3be-e54a2523f1f3"
#There are outliers, it's necessary remove them.
price = df['price'].sort_values()
Q1 = price.quantile(.25)
Q3 = price.quantile(.75)
IIQ = Q3 - Q1
inferior_limit = Q1 - 1.5 * IIQ
upper_limit = Q3 + 1.5 * IIQ
print(f'Inferior Limit {inferior_limit}\nUpper Limit {upper_limit}')
df = df[df['price'] < upper_limit]
# + [markdown] id="maqt9RotuWVs"
# ## Answering business questions
# + [markdown] id="4UPSMb_un0dM"
# #### 1) Whith houses should buy and at what purchase price?
# + colab={"base_uri": "https://localhost:8080/", "height": 510} executionInfo={"elapsed": 11343, "status": "ok", "timestamp": 1645705436894, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="1LCEo59OUB9M" outputId="8ab64118-204c-4430-9cb3-8d262f2a8e4f"
# Suggestion of houses that should to buy, because they're price are under median and with good conditions.
# It's generate a report with the sugestions, called houses_to_buy.
median_price_zipcode = df[['zipcode','price']].groupby(['zipcode']).median().reset_index()
median_price_zipcode.rename(columns = {'price':'median_price_zipcode'}, inplace=True)
df = df.merge(median_price_zipcode, on='zipcode', how='inner')
df['status'] = None
for i in range(len(df)):
if (df.loc[i, 'price'] < df.loc[i,'median_price_zipcode']) and (df.loc[i,'condition'] >= 3):
df['status'].iloc[i] = 'buy'
else:
df['status'].iloc[i] = 'not_buy'
houses_to_buy = df[df['status'] == 'buy']
houses_to_buy = houses_to_buy[['id', 'zipcode', 'price', 'median_price_zipcode', 'condition', 'status']].reset_index()
houses_to_buy.to_csv('data/houses_to_buy.csv', index=False)
houses_to_buy.head(10)
# -
df['median_price_zipcode']
# + [markdown] id="XU447g_5hGfr"
# #### 2) With the house purchased, what is the best moment to sell and at what price?
# + executionInfo={"elapsed": 12493, "status": "ok", "timestamp": 1645705449385, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="I4URfZsiRrwl"
# New column called season based from date, considering north hemisphere seasons.
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start= year +'-03-21 00:00:00', end=year + '-06-20 00:00:00'),
'summer': pd.date_range(start= year + '-06-21 00:00:00', end= year + '-09-22 00:00:00'),
'autumn': pd.date_range(start= year + '-09-23 00:00:00', end= year + '-12-20 00:00:00')}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
df['season'] = df.date.map(season_of_date)
# + colab={"base_uri": "https://localhost:8080/", "height": 293} executionInfo={"elapsed": 11754, "status": "ok", "timestamp": 1645705461138, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="S2DYJyGIhF63" outputId="2c23af23-a76d-4a15-dac4-13aff3dc5e86"
# Suggestion of houses price to sell, the suggestion are considering the median price and the seasons of the year.
median_price_zipcode_season = df[['zipcode', 'season','price']].groupby(['zipcode', 'season']).median().reset_index()
median_price_zipcode_season.rename(columns = {'price':'median_price_zipcode_season'}, inplace=True)
median_price_zipcode_season.head(10)
df = df.merge(median_price_zipcode_season, on=['zipcode','season'], how='inner')
df[['price_sale', 'profit']] = np.NaN, np.NaN
for i in range(len(df)):
if (df.loc[i, 'price'] >= df.loc[i,'median_price_zipcode_season']) and (df.loc[i, 'status'] == 'buy'):
df['price_sale'].iloc[i] = df.loc[i, 'price'] * 1.1
df['profit'].iloc[i] = (df.loc[i, 'price'] * 1.1) - df.loc[i, 'price']
elif (df.loc[i, 'price'] < df.loc[i,'median_price_zipcode_season']) and (df.loc[i, 'status'] == 'buy'):
df['price_sale'].iloc[i] = df.loc[i, 'price'] * 1.3
df['profit'].iloc[i] = (df.loc[i, 'price'] * 1.3) - df.loc[i, 'price']
else:
pass
houses_to_sell = df[df['status'] == 'buy']
houses_to_sell = houses_to_sell[['id', 'zipcode', 'season', 'median_price_zipcode_season','price', 'price_sale', 'profit']].reset_index()
houses_to_sell.rename(columns = {'median_price_zipcode_season': 'median_price', 'price': 'price_bought'}, inplace=True)
houses_to_sell.to_csv('data/houses_to_sell.csv', index=False)
houses_to_sell.head()
# + [markdown] id="aV-mxVzvu_RI"
# ### Potential Profit Using Generated Reports above
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 5, "status": "ok", "timestamp": 1645705461139, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi6YRVzPrLUC8LVQ71IPEjBjCtuWeidswcPvoIAtZk=s64", "userId": "06802689726903945908"}, "user_tz": 180} id="T3_fin0CutTQ" outputId="38fb98cd-a97c-455a-a0a2-cadc781c54fb"
#Total potential profit if sell all the houses that was indicated in the first business question, using the suggested price.
print(f'Total profit: {houses_to_sell.profit.sum():,}')
| Houses_Sales/houses_sales.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": true}
import unittest
import import_ipynb
import numpy as np
class TestExercise12_6(unittest.TestCase):
def setUp(self):
import Exercise_12_6
self.exercises = Exercise_12_6
def test_original_population(self):
pop = self.exercises.original_population(4,5)
self.assertEqual(len(pop[0]),len(pop[1]))
def test_create_target_solution(self):
target = self.exercises.create_target_solution(50)
self.assertEqual(len(target), 50)
def test_fitness_function(self):
target = np.zeros(5)
pop = self.exercises.original_population(2,5)
weights = self.exercises.fitness_function(target, pop)
self.assertEqual(len(weights),2)
def test_select_parents(self):
pop = self.exercises.original_population(5,8)
weights = np.array([1,0,2,4,3,2,1,0])
parents = self.exercises.select_parents(pop, weights)
self.assertIsNot(pop, parents)
def test_crossover_reproduction(self):
pop = self.exercises.original_population(2,6)
parents = pop[0:2]
c1,c2 = self.exercises.crossover_reproduction(parents, pop)
self.assertEqual(len(c1), len(parents[0]))
self.assertEqual(len(c2), len(parents[1]))
self.assertIsNot(c1,parents[0])
self.assertIsNot(c2, parents[1])
def test_mutate_population(self):
p = 0.005
pop = self.exercises.original_population(6,2)
pop1= self.exercises.mutate_population(pop,p)
self.assertEqual(len(pop), len(pop1))
for i in range(len(pop)):
self.assertIsNot(pop[i], pop1[i])
# + pycharm={"name": "#%%\n"}
suite = unittest.TestLoader().loadTestsFromTestCase(TestExercise12_6)
unittest.TextTestRunner(verbosity=2).run(suite)
| Chapter12/Exercise12.06/Test_Exercise_12_6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# We'll dig in to the following topics:
# * Bias-Variance Tradeoff
# * Validation Set
# * Model Tuning
# * Cross-Validation
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_validate
from sklearn.linear_model import LinearRegression, Ridge, Lasso, LassoCV, RidgeCV
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
import warnings
warnings.filterwarnings("ignore")
# -
boston = pd.read_csv('Boston.csv')
data = boston[['crim', 'zn', 'indus', 'chas', 'nox', 'rm', 'age', 'dis', 'rad',
'tax', 'ptratio', 'black', 'lstat']]
target = boston[['medv']]
# train/test split
X_train, X_test, y_train, y_test = train_test_split(data,
target,
shuffle=True,
test_size=0.2,
random_state=15)
# We know we’ll need to calculate training and test error, so let’s go ahead and create functions to do just that. Let’s include a meta-function that will generate a nice report for us while we’re at it. Also, Root Mean Squared Error (RMSE) will be our metric of choice.
# +
def calc_train_error(X_train, y_train, model):
'''returns in-sample error for already fit model.'''
predictions = model.predict(X_train)
mse = mean_squared_error(y_train, predictions)
rmse = np.sqrt(mse)
return mse
def calc_validation_error(X_test, y_test, model):
'''returns out-of-sample error for already fit model.'''
predictions = model.predict(X_test)
mse = mean_squared_error(y_test, predictions)
rmse = np.sqrt(mse)
return mse
def calc_metrics(X_train, y_train, X_test, y_test, model):
'''fits model and returns the RMSE for in-sample error and out-of-sample error'''
model.fit(X_train, y_train)
train_error = calc_train_error(X_train, y_train, model)
validation_error = calc_validation_error(X_test, y_test, model)
return train_error, validation_error
# -
# Theory
# Bias-Variance Tradeoff
# Pay very close attention to this section. It is one of the most important concepts in all of machine learning. Understanding this concept will help you diagnose all types of models, be they linear regression, XGBoost, or Convolutional Neural Networks.
#
# We already know how to calculate training error and test error. So far we’ve simply been using test error as a way to gauge how well our model will generalize. That was a good first step but it’s not good enough. We can do better. We can tune our model. Let’s drill down.
#
# We can compare training error and something called validation error to figure out what’s going on with our model - more on validation error in a minute. Depending on the values of each, our model can be in one of three regions:
# 1) High Bias - underfitting
# 2) Goldilocks Zone - just right
# 3) High Variance - overfitting
# <img src="pic/bias-variance-tradeoff.png" />
# ### Plot Orientation
# The x-axis represents model complexity. This has to do with how flexible your model is. Some things that add complexity to a model include: additional features, increasing polynomial terms, and increasing the depth for tree-based models. Keep in mind this is far from an exhaustive list but you should get the gist.
#
# The y-axis indicates model error. It’s often measured as Mean-Squared Error (MSE) for Regression and Cross-Entropy or Accuracy for Classification.
#
# The blue curve is Training Error. Notice that it only decreases. What should be painfully obvious is that adding model complexity leads to smaller and smaller training errors. That’s a key finding.
#
# The green curve forms a U-shape. This curve represents Validation Error. Notice the trend. First it decreases, hits a minimum, and then increases. We’ll talk in more detail shortly about what exactly Validation Error is and how to calculate it.
# ### High Bias
# The rectangular box outlined by dashes to the left and labeled as High Bias is the first region of interest. Here you’ll notice Training Error and Validation Error are high. You’ll also notice that they are close to one another. This region is defined as the one where the model lacks the flexibility required to really pull out the inherent trend in the data. In machine learning speak, it is underfitting, meaning it’s doing a poor job all around and won’t generalize well. The model doesn’t even do well on the training set.
#
# How do you fix this?
#
# By adding model complexity of course. I’ll go into much more detail about what to do when you realize you’re under or
# overfitting in another post. For now, assuming you’re using linear regression, a good place to start is by adding additional features. The addition of parameters to your model grants it flexibility that can push your model into the Golidlocks Zone.
# ### Goldilocks Zone
# The middle region without dashes I’ve named the Goldilocks Zone. Your model has just the right amount of flexibility to pick up on the pattern inherent in the data but isn’t so flexible that it’s really just memorizing the training data. This region is marked by Training Error and Validation Error that are both low and close to one another. This is where your model should live.
# ### High Variance
# The dashed rectangular box to the right and labeled High Variance is the flip of the High Bias region. Here the model has so much flexiblity that it essentially starts to memorize the training data. Not surprisingly, that approach leads to low Training Error. But as was mentioned in the train/test post, a lookup table does not generalize, which is why we see high Validation Error in this region. You know you’re in this region when your Training Error is low but your Validation Error is high. Said another way, if there’s a sizeable delta between the two, you’re overfitting.
#
# How do you fix this?
#
# By decreasing model complexity. Again, I’ll go into much more detail in a separate post about what exactly to do. For now, consider applying regularization or dropping features.
# ### Canonical Plot
# Let’s look at one more plot to drive these ideas home.
# <img src="pic/bias-and-variance-targets.jpg" />
# Imagine you’ve entered an archery competition. You receive a score based on which portion of the target you hit: 0 for the red circle (bullseye), 1 for the blue, and 2 for the while. The goal is to minimize your score and you do that by hitting as many bullseyes as possible.
#
# The archery metaphor is a useful analog to explain what we’re trying to accomplish by building a model. Given different datasets (equivalent to different arrows), we want a model that predicts as closely as possible to observed data (aka targets).
#
# The top Low Bias/Low Variance portion of the graph represents the ideal case. This is the Goldilocks Zone. Our model has extracted all the useful information and generalizes well. We know this because the model is accurate and exhibits little variance, even when predicting on unforeseen data. The model is highly tuned, much like an archer who can adjust to different wind speeds, distances, and lighting conditions.
#
# The Low Bias/High Variance portion of the graph represents overfitting. Our model does well on the training data, but we see high variance for specific datasets. This is analagous to an archer who has trained under very stringent conditions - perhaps indoors where there is no wind, the distance is consistent, and the lighting is always the same. Any variation in any of those attributes throws off the archer’s accuracy. The archer lacks consistency.
#
# The High Bias/Low Variance portion of the graph represents underfitting. Our model does poorly on any given dataset. In fact, it’s so bad that it does just about as poorly regardless of the data you feed it, hence the small variance. As an analog, consider an archer who has learned to fire with consistency but hasn’t learned to hit the target. This is analagous to a model that always predicts the average value of the training data’s target.
#
# The High Bias/High Variance portion of the graph actually has no analog in machine learning that I’m aware of. There exists a tradeoff between bias and variance. Therefore, it’s not possible for both to be high.
#
# Alright, let’s shift gears to see this in practice now that we’ve got the theory down.
# +
lr = LinearRegression(fit_intercept=True)
train_error, test_error = calc_metrics(X_train, y_train, X_test, y_test, lr)
train_error, test_error = round(train_error, 3), round(test_error, 3)
print('train error: {} | test error: {}'.format(train_error, test_error))
print('train/test: {}'.format(round(test_error/train_error, 1)))
# -
# Hmm, our training error is somewhat lower than the test error. In fact, the test error is 1.1 times or 10% worse. It’s not a big difference but it’s worth investigating.
#
# Which region does that put us in?
#
# That’s right, it’s every so slightly in the High Variance region, which means our model is slightly overfitting. Again, that means our model has a tad too much complexity.
#
# Unfortunately, we’re stuck at this point.
#
# You’re probably thinking, “Hey wait, no we’re not. I can drop a feature or two and then recalculate training error and test error.”
#
# My response is simply: NOPE. DON’T. PLEASE. EVER. FOR ANY REASON. PERIOD.
#
# Why not?
#
# Because if you do that then your test set is no longer a test set. You are using it to train your model. It’s the same as if you trained your model on the all the data from the beginning. Seriously, don’t do this. Unfortunately, practicing data scientists do this sometimes; it’s one of the worst things you can do. You’re almost guaranteed to produce a model that cannot generalize.
#
# So what do we do?
#
# We need to go back to the beginning. We need to split our data into three datasets: training, validation, test.
#
# Remember, the test set is data you don’t touch until you’re happy with your model. The test set is used only ONE time to see how your model will generalize. That’s it.
#
# Okay, let’s take a look at this thing called a Validation Set.
# ### Validation Set
# Three datasets from one seems like a lot of work but I promise it’s worth it. First, let’s see how to do this in practice.
# +
# intermediate/test split (gives us test set)
X_intermediate, X_test, y_intermediate, y_test = train_test_split(data,
target,
shuffle=True,
test_size=0.2,
random_state=15)
# train/validation split (gives us train and validation sets)
X_train, X_validation, y_train, y_validation = train_test_split(X_intermediate,
y_intermediate,
shuffle=False,
test_size=0.25,
random_state=2018)
# +
# delete intermediate variables
del X_intermediate, y_intermediate
# print proportions
print('train: {}% | validation: {}% | test {}%'.format(round(len(y_train)/len(target),2),
round(len(y_validation)/len(target),2),
round(len(y_test)/len(target),2)))
# -
# If you’re a visual person, this is how our data has been segmented.
# <img src="pic/train-validate-test.png" />
# We have now three datasets depicted by the graphic above where the training set constitutes 60% of all data, the validation set 20%, and the test set 20%. Do notice that I haven’t changed the actual test set in any way. I used the same initial split and the same random state. That way we can compare the model we’re about to fit and tune to the linear regression model we built earlier.
# ### Side note:
# there is no hard and fast rule about how to proportion your data. Just know that your model is limited in what it can learn if you limit the data you feed it. However, if your test set is too small, it won’t provide an accurate estimate as to how your model will perform. Cross-validation allows us to handle this situation with ease, but more on that later.
# ### Model Tuning
# We need to decrease complexity. One way to do this is by using regularization.Regularization is a form of constrained optimization that imposes limits on determining model parameters. It effectively allows me to add bias to a model that’s overfitting. I can control the amount of bias with a hyperparameter called lambda or alpha (you’ll see both, though sklearn uses alpha because lambda is a Python keyword) that defines regularization strength.
#
# The code:
alphas = [0, 0.001, 0.01, 0.1, 1, 10]
print('All errors are RMSE')
print('-'*76)
for alpha in alphas:
# instantiate and fit model
ridge = Ridge(alpha=alpha, fit_intercept=True, random_state=99)
ridge.fit(X_train, y_train)
# calculate errors
new_train_error = mean_squared_error(y_train, ridge.predict(X_train))
new_validation_error = mean_squared_error(y_validation, ridge.predict(X_validation))
new_test_error = mean_squared_error(y_test, ridge.predict(X_test))
# print errors as report
print('alpha: {:7} | train error: {:5} | val error: {:6} | test error: {}'.
format(alpha,
round(new_train_error,3),
round(new_validation_error,3),
round(new_test_error,3)))
# There are a few key takeaways here. First, notice the U-shaped behavior exhibited by the validation error. It starts at 18.001, goes down for two steps and then back up. Also notice that validation error and test error tend to move together, but by no means is the relationship perfect. We see both errors decrease as alpha increases initially but then test error keeps going down while validation error rises again. It’s not perfect. It actually has a whole lot to do with the fact that we’re dealing with a very small dataset. Each sample represents a much larger proportion of the data than say if we had a dataset with a million or more records. Anyway, validation error is a good proxy for test error, especially as dataset size increases. With small to medium-sized datasets, we can do better by leveraging cross-validation. We’ll talk about that shortly.
#
# Now that we’ve tuned our model, let’s fit a new ridge regression model on all data except the test data. Then we’ll check the test error and compare it to that of our original linear regression model with all features.
# +
# train/test split
X_train, X_test, y_train, y_test = train_test_split(data,
target,
shuffle=True,
test_size=0.2,
random_state=15)
# instantiate model
ridge = Ridge(alpha=0.11, fit_intercept=True, random_state=99)
# fit and calculate errors
new_train_error, new_test_error = calc_metrics(X_train, y_train, X_test, y_test, ridge)
new_train_error, new_test_error = round(new_train_error, 3), round(new_test_error, 3)
# -
print('ORIGINAL ERROR')
print('-' * 40)
print('train error: {} | test error: {}\n'.format(train_error, test_error))
print('ERROR w/REGULARIZATION')
print('-' * 40)
print('train error: {} | test error: {}'.format(new_train_error, new_test_error))
# A very small increase in training error coupled with a small decrease in test error. We’re definitely moving in the right direction. Perhaps not quite the magnitude of change we expected, but we’re simply trying to prove a point here. Remember this is a tiny dataset. Also remember I said we can do better by using something called Cross-Validation. Now’s the time to talk about that.
# ### Cross-Validation
# Let me say this upfront: this method works great on small to medium-sized datasets. This is absolutely not the kind of thing you’d want to try on a massive dataset (think tens or hundreds of millions of rows and/or columns). Alright, let’s dig in now that that’s out of the way.
#
# As we saw in the post about train/test split, how you split smaller datasets makes a significant difference; the results can vary tremendously. As the random state is not a hyperparameter (seriously, please don’t do that), we need a way to extract every last bit of signal from the data that we possibly can. So instead of just one train/validation split, let’s do K of them.
#
# This technique is appropriately named K-fold cross-validation. Again, K represents how many train/validation splits you need. There’s no hard and fast rule about how to choose K but there are better and worse choices. As the size of your dataset grows, you can get away with smaller values for K, like 3 or 5. When your dataset is small, it’s common to select a larger number like 10. Again, these are just rules of thumb.
#
# Here’s the general idea for 10-fold CV:
# <img src="pic/kfold-cross-validation.png" />
# ### Technical note:
# Be careful with terminology. Some people will refer to the validation fold as the test fold. Unfortunately, they use the terms interchangeably, which is confusing and therefore not correct. Don’t do that. The test set is the pure data that only gets consumed at the end, if it exists at all.
# Once data has been segmented off in the validation fold, you fit a fresh model on the remaining training data. Ideally, you calculate train and validation error. Some people only look at validation error, however.
#
# The data included in the first validation fold will never be part of a validation fold again. A new validation fold is created, segmenting off the same percentage of data as in the first iteration. Then the process repeats - fit a fresh model, calculate key metrics, and iterate. The algorithm concludes when this process has happened K times. Therefore, you end up with K estimates of the validation error, having visited all the data points in the validation set once and numerous times in training sets. The last step is to average the validation errors for regression. This gives a good estimate as to how well a particular model will perform.
#
# Again, this method is invaluable for tuning hyperparameters on small to medium-sized datasets. You technically don’t even need a test set. That’s great if you just don’t have the data. For large datasets, use a simple train/validation/test split strategy and tune your hyperparameters like we did in the previous section.
#
# Alright, let’s see K-fold CV in action.
# ### Sklearn & CV
# There’s two ways to do this in sklearn, pending what you want to get out of it.
#
# The first method I’ll show you is cross_val_score, which works beautifully if all you care about is validation error.
#
# The second method is KFold, which is perfect if you require train and validation errors.
#
# Let’s try a new model called LASSO just to keep things interesting.
# +
alphas = [0, 1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1]
val_errors = []
for alpha in alphas:
lasso = Lasso(alpha=alpha, fit_intercept=True, random_state=77)
errors = np.sum(-cross_val_score(lasso,
data,
y=target,
scoring='neg_mean_squared_error',
cv=10,
n_jobs=-1))
val_errors.append(np.sqrt(errors))
# -
# RMSE
print(val_errors)
# Which value of alpha gave us the smallest validation error
print('best alpha: {}'.format(alphas[np.argmin(val_errors)]))
# ### K-Fold
data_array = np.array(data)
target_array = np.array(target)
# +
K = 10
kf = KFold(n_splits=K, shuffle=True, random_state=42)
alphas = [0, 1e-4, 1e-3, 1e-2, 1e-1, 1, 1e1]
for alpha in alphas:
train_errors = []
validation_errors = []
for train_index, val_index in kf.split(data_array, target_array):
# split data
X_train, X_val = data_array[train_index], data_array[val_index]
y_train, y_val = target_array[train_index], target_array[val_index]
# instantiate model
lasso = Lasso(alpha=alpha, fit_intercept=True, random_state=77)
#calculate errors
train_error, val_error = calc_metrics(X_train, y_train, X_val, y_val, lasso)
# append to appropriate list
train_errors.append(train_error)
validation_errors.append(val_error)
# generate report
print('alpha: {:6} | mean(train_error): {:7} | mean(val_error): {}'.
format(alpha,
round(np.mean(train_errors),4),
round(np.mean(validation_errors),4)))
# -
# Comparing the output of cross_val_score to that of KFold, we can see that the general trend holds - an alpha of 10 results in the largest validation error. You may wonder why we get different values. The reason is that the data was split differently. We can control the splitting procedure with KFold but not cross_val_score. Therefore, there’s no way I know of to perfectly sync the two procedures without an exhaustive search of splits or writing the algorithm from scratch ourselves. The important thing is that each gives us a viable method to calculate whatever we need, whether it be purely validation error or a combination of training and validation error.
# ### Summary
# We discussed the Bias-Variance Tradeoff where a high bias model is one that is underfit while a high variance model is one that is overfit. We also learned that we can split data into three groups for tuning purposes. Specifically, the three groups are train, validation, and test. Remember the test set is used only one time to check how well a model generalizes on data it’s never seen. This three-group split works exceedingly well for large datasets but not for small to medium-sized datasets, though. In that case, use cross-validation (CV). CV can help you tune your models and extract as much signal as possible from the small data sample. Remember, with CV you don’t need a test set. By using a K-fold approach, you get the equivalent of K-test sets by which to check validation error. This helps you diagnose where you’re at in the bias-variance regime.
| Lectures/Levon/ModelTuning_Validation&Cross-Validation/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Data Generation
# ### created by <NAME>, 04/30/2020
# This script is used for generating training, validating, testdata sets for multiscale HiTS experiments
import os
import numpy as np
import scipy as sp
from scipy import integrate
from tqdm.notebook import tqdm
# shared parameters (adjustables)
dt = 0.01 # set to 1e-3 for Lorenz
n_forward = 5
total_steps = 1024 * n_forward
t = np.linspace(0, (total_steps)*dt, total_steps+1)
# ### Hyperbolic fixed point
# \begin{split}
# \dot{x} &= \mu x \\
# \dot{y} &= \lambda(y-x^2)
# \end{split}
# +
# path
data_dir = '../../data/VaryingStep/Hyperbolic/'
# system
mu = -0.05
lam = -1.0
def hyperbolic_rhs(x):
return np.array([mu*x[0], lam*(x[1]-x[0]**2)])
# simulation parameters
np.random.seed(2)
n = 2
# dataset
n_train = 500
n_val = 100
n_test = 100
# +
# simulate training trials
train_data = np.zeros((n_train, total_steps+1, n))
print('generating training trials ...')
for i in tqdm(range(n_train)):
x_init = np.random.uniform(-1.0, 1.0, n)
sol = sp.integrate.solve_ivp(lambda _, x: hyperbolic_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
train_data[i, :, :] = sol.y.T
# simulate validation trials
val_data = np.zeros((n_val, total_steps+1, n))
print('generating validation trials ...')
for i in tqdm(range(n_val)):
x_init = np.random.uniform(-1.0, 1.0, n)
sol = sp.integrate.solve_ivp(lambda _, x: hyperbolic_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
val_data[i, :, :] = sol.y.T
# simulate test trials
test_data = np.zeros((n_test, total_steps+1, n))
print('generating testing trials ...')
for i in tqdm(range(n_test)):
x_init = np.random.uniform(-1.0, 1.0, n)
sol = sp.integrate.solve_ivp(lambda _, x: hyperbolic_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
test_data[i, :, :] = sol.y.T
# save data
np.save(os.path.join(data_dir, 'trainBig.npy'), train_data)
np.save(os.path.join(data_dir, 'valBig.npy'), val_data)
np.save(os.path.join(data_dir, 'testBig.npy'), test_data)
# -
# ### Cubic oscillator
# \begin{split}
# \dot{x} &= -0.1x^3 + 2y^3 \\
# \dot{y} &= -2x^3 - 0.1y^3
# \end{split}
# +
# path
data_dir = '../../data/VaryingStep/Cubic/'
# system
def cubic_rhs(x):
return np.array([-0.1*x[0]**3+2*x[1]**3,
-2*x[0]**3-0.1*x[1]**3])
# simulation parameters
np.random.seed(2)
n = 2
# dataset
n_train = 500
n_val = 100
n_test = 100
# +
# simulate training trials
train_data = np.zeros((n_train, total_steps+1, n))
print('generating training trials ...')
for i in tqdm(range(n_train)):
x_init = np.random.uniform(-1.0, 1.0, n)
sol = sp.integrate.solve_ivp(lambda _, x: cubic_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
train_data[i, :, :] = sol.y.T
# simulate validation trials
val_data = np.zeros((n_val, total_steps+1, n))
print('generating validation trials ...')
for i in tqdm(range(n_val)):
x_init = np.random.uniform(-1.0, 1.0, n)
sol = sp.integrate.solve_ivp(lambda _, x: cubic_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
val_data[i, :, :] = sol.y.T
# simulate test trials
test_data = np.zeros((n_test, total_steps+1, n))
print('generating testing trials ...')
for i in tqdm(range(n_test)):
x_init = np.random.uniform(-1.0, 1.0, n)
sol = sp.integrate.solve_ivp(lambda _, x: cubic_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
test_data[i, :, :] = sol.y.T
# save data
np.save(os.path.join(data_dir, 'trainBig.npy'), train_data)
np.save(os.path.join(data_dir, 'valBig.npy'), val_data)
np.save(os.path.join(data_dir, 'testBig.npy'), test_data)
# -
# ### <NAME>
# \begin{split}
# \dot{x} &= y \\
# \dot{y} &= \mu(1-x^2)y - x
# \end{split}
#
# where $\mu=2.0$
# +
# path
data_dir = '../../data/VaryingStep/VanDerPol/'
# system
mu = 2.0
def van_der_pol_rhs(x):
return np.array([x[1], mu*(1-x[0]**2)*x[1]-x[0]])
# simulation parameters
np.random.seed(2)
n = 2
# dataset
n_train = 500
n_val = 100
n_test = 100
# +
# simulate training trials
train_data = np.zeros((n_train, total_steps+1, n))
print('generating training trials ...')
for i in tqdm(range(n_train)):
x_init = [np.random.uniform(-2.0, 2.0), np.random.uniform(-4.0, 4.0)]
sol = sp.integrate.solve_ivp(lambda _, x: van_der_pol_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
train_data[i, :, :] = sol.y.T
# simulate validation trials
val_data = np.zeros((n_val, total_steps+1, n))
print('generating validation trials ...')
for i in tqdm(range(n_val)):
x_init = [np.random.uniform(-2.0, 2.0), np.random.uniform(-4.0, 4.0)]
sol = sp.integrate.solve_ivp(lambda _, x: van_der_pol_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
val_data[i, :, :] = sol.y.T
# simulate test trials
test_data = np.zeros((n_test, total_steps+1, n))
print('generating testing trials ...')
for i in tqdm(range(n_test)):
x_init = [np.random.uniform(-2.0, 2.0), np.random.uniform(-4.0, 4.0)]
sol = sp.integrate.solve_ivp(lambda _, x: van_der_pol_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
test_data[i, :, :] = sol.y.T
# save data
np.save(os.path.join(data_dir, 'trainBig.npy'), train_data)
np.save(os.path.join(data_dir, 'valBig.npy'), val_data)
np.save(os.path.join(data_dir, 'testBig.npy'), test_data)
# -
# ### Hopf bifurcation
# \begin{split}
# \dot{\mu} &= 0 \\
# \dot{x} &= \mu x + y -x(x^2+y^2) \\
# \dot{y} &= \mu y - x -y(x^2+y^2)
# \end{split}
# +
# path
data_dir = '../../data/VaryingStep/Hopf/'
# system
def hopf_rhs(x):
return np.array([0, x[0]*x[1]+x[2]-x[1]*(x[1]**2+x[2]**2),
-x[1]+x[0]*x[2]-x[2]*(x[1]**2+x[2]**2)])
# simulation parameters
np.random.seed(2)
n = 3
# dataset
n_train = 500
n_val = 100
n_test = 100
# +
# simulate training trials
train_data = np.zeros((n_train, total_steps+1, n))
print('generating training trials ...')
for i in tqdm(range(n_train)):
x_init = [np.random.uniform(-0.2, 0.6), np.random.uniform(-1, 2), np.random.uniform(-1, 1)]
sol = sp.integrate.solve_ivp(lambda _, x: hopf_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
train_data[i, :, :] = sol.y.T
# simulate validation trials
val_data = np.zeros((n_val, total_steps+1, n))
print('generating validation trials ...')
for i in tqdm(range(n_val)):
x_init = [np.random.uniform(-0.2, 0.6), np.random.uniform(-1, 2), np.random.uniform(-1, 1)]
sol = sp.integrate.solve_ivp(lambda _, x: hopf_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
val_data[i, :, :] = sol.y.T
# simulate test trials
test_data = np.zeros((n_test, total_steps+1, n))
print('generating testing trials ...')
for i in tqdm(range(n_test)):
x_init = [np.random.uniform(-0.2, 0.6), np.random.uniform(-1, 2), np.random.uniform(-1, 1)]
sol = sp.integrate.solve_ivp(lambda _, x: hopf_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
test_data[i, :, :] = sol.y.T
# save data
np.save(os.path.join(data_dir, 'trainBig.npy'), train_data)
np.save(os.path.join(data_dir, 'valBig.npy'), val_data)
np.save(os.path.join(data_dir, 'testBig.npy'), test_data)
# -
# ### Lorenz
# \begin{split}
# \dot{x} &= \sigma(y-x) \\
# \dot{y} &= x(\rho-z)-y \\
# \dot{z} &= xy - \beta z
# \end{split}
#
# where $\sigma=10, \rho=28, \beta=8/3$
# +
# path
data_dir = '../../data/VaryingStep/Lorenz/'
# system
sigma = 10
rho = 28
beta = 8/3
def lorenz_rhs(x):
return np.array([sigma*(x[1]-x[0]), x[0]*(rho-x[2])-x[1], x[0]*x[1]-beta*x[2]])
# simulation parameters
np.random.seed(2)
warmup = 1000
n = 3
# dataset
n_train = 500
n_val = 100
n_test = 100
# +
# simulate training trials
pre_t = np.linspace(0, warmup*dt, warmup+1)
train_data = np.zeros((n_train, total_steps+1, n))
print('generating training trials ...')
x_init = np.random.uniform(-0.1, 0.1, n)
sol = sp.integrate.solve_ivp(lambda _, x: lorenz_rhs(x), [0, warmup*dt], x_init, t_eval=pre_t)
for i in tqdm(range(n_train)):
x_init = sol.y[:, -1].T
sol = sp.integrate.solve_ivp(lambda _, x: lorenz_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
train_data[i, :, :] = sol.y.T
# simulate validation trials
val_data = np.zeros((n_val, total_steps+1, n))
print('generating validation trials ...')
x_init = np.random.uniform(-0.1, 0.1, n)
sol = sp.integrate.solve_ivp(lambda _, x: lorenz_rhs(x), [0, warmup*dt], x_init, t_eval=pre_t)
for i in tqdm(range(n_val)):
x_init = sol.y[:, -1].T
sol = sp.integrate.solve_ivp(lambda _, x: lorenz_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
val_data[i, :, :] = sol.y.T
# simulate test trials
test_data = np.zeros((n_test, total_steps+1, n))
print('generating testing trials ...')
x_init = np.random.uniform(-0.1, 0.1, n)
sol = sp.integrate.solve_ivp(lambda _, x: lorenz_rhs(x), [0, warmup*dt], x_init, t_eval=pre_t)
for i in tqdm(range(n_test)):
x_init = sol.y[:, -1].T
sol = sp.integrate.solve_ivp(lambda _, x: lorenz_rhs(x), [0, total_steps*dt], x_init, t_eval=t)
test_data[i, :, :] = sol.y.T
# save data
np.save(os.path.join(data_dir, 'trainBig.npy'), train_data)
np.save(os.path.join(data_dir, 'valBig.npy'), val_data)
np.save(os.path.join(data_dir, 'testBig.npy'), test_data)
# -
| scripts/multiscale_HiTS_exp/.ipynb_checkpoints/data_generation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simulation of data relating to weather at Dublin Airport
# * [Introduction](#Introduction)
# * [What is the dataset?](#What-is-the-dataset?)
# * [Setup](#Setup)
# * [Examination of the dataset](#Examination-of-the-dataset)
# * [Description of dataset](#Description-of-dataset)
# * [Skewness and kurtosis of dataset](#Skewness-and-kurtosis-of-dataset)
# * [Correlation](#Correlation)
# * [Plotting statistics](#Plotting-statistics)
# * [Discussion of the dataset](#Discussion-of-the-dataset)
# * [Simulation of new data](#Simulation-of-new-data)
# * [Additional checks](#Additional-checks)
# * [Further Analysis](#Further-Analysis)
# * [Bibliography](#Bibliography)
#
# ## Introduction
# This notebook is intended to fulfil two tasks, namely, to review a data set; and to simulate data to resemble the dataset chosen. In order to do these tasks, the project (and notebook) will be broken into 2 sections. In the first section, a review of the dataset chosen, in this case, the weather at Dublin Airport, will be conducted. This review will include a statistical review of the data, as well as discussion of what the statistics mean. The second section will be an attempt to simulate like data, based on the information gleaned in the first section.
#
# Throughout the notebook, there will be code used. These snippets of code will be used to cleanse the data, provide the statistical analysis, and ultimately attempt to simulate the data. It should be noted that some of the data generated will be random, and therefore the values of the generated data will change, in a [pseudorandom](https://www.random.org/randomness/) manner.
#
# *Note: There is a bibliography at the end of this document, which details articles, websites, and other items referenced with. The hyperlinks within this document connect directly to the referenced site, and not to the bibliography.
#
#
# ## What is the dataset?
# The dataset that was chosen is the Dublin Airport Weather records from the 1st January, 2016 to the 31st December, 2018. This data was sourced from the [Government of Ireland data website](https://data.gov.ie/dataset/dublin-airport-hourly-weather-station-data/resource/bbb2cb83-5982-48ca-9da1-95280f5a4c0d?inner_span=True). The dataset from the source is made up of record readings of various weather attributes recorded every hour from the 1st January, 1989 to the 31st December, 2018. Each row in the dataset is made up of the following columns:
#
# * __**Rain**__: the amount of precipitation to have fallen within the last hour. Measured in millimetres (mm).
# * __**Temp**__: the air temperature at the point of record. Measured in degrees Celsius (°C).
# * __**Wetb**__: the wet bulb temperature at the point of record. Measured in degrees Celsius (°C).
# * __**Dewpt**__: dew point air temperature at the point of record. Measured in degrees Celsius (°C).
# * __**Vappr**__: the vapour pressure of the air at the point of record. Measured in hectopascals (hpa).
# * __**Rhum**__: the relative humidity for the given air temperature. Measured in percent (%).
# * __**Msl**__: mean sea level pressure. Measured in hectopascals (hpa).
# * __**Wdsp**__: Mean hourly wind speed. Measured in knots (kt).
# * __**Wddir**__: Predominant wind direction. Measured in knots (kt).
# * __**Ww**__: Synop code for resent weather.
# * __**W**__: Synop code for past weather.
# * __**Sun**__: The duration of the sun for the last hour. Measured in hours (h).
# * __**Vis**__: Visibility, or air clarity. Measured in metres (m).
# * __**Clht**__: Cloud ceiling height. Measured in hundreds of feet (100 ft).
# * __**Clamt**__: Amount of cloud. Measured using okta.
#
# There are also a number of indicators for some of the data recorded. Given the timespan of the data (30 years), the number of record points for each row (up to 21 points), and the hourly record taking, the data set is very large, comprising of nearly 11,000 days, more than 262,000 rows, and 6,300,000 data points.
#
# The retrieved dataset is too large for the proposed simulation. It is therefore intended reduce it in size. This has been done by limiting the data to the period of the month of December, and the years of 2016 to 2018 inclusive. The number of record points has been reduced to rain, temperature, relative humidity, sun, and visibility. Additionally, the rows of data have been reduced by amalgamating the hourly records into days. The rainfall levels, and hours of sunshine have been added together to provide a total sum for each day. The temperature, relative humidity, and visibility have been averaged for the day in question. This has reduced the number of dataset to 93 (31 x 3) rows, and 6 columns.
#
# Both the original and new datasets are available in this repository.
#
# ## Why was this dataset chosen?
#
# The dataset was chosen for a number of reasons. Primarily, it was chosen as it provides a large volume of data, with interrelated variables. Some of these variables may be positively, or negatively, correlated to each other. This would stand to reason, as the number of hours of sunshine, and the millimetres of rain that have fallen would normally be negatively correlated. Secondly, the dataset is related to the weather in Ireland, or at least Dublin. As the weather .is a favourite topic of conversation, the dataset seemed appropriate.
#
# ## Setup
# Before the analysis of the dataset can begin, it is necessary to import both the libraries to be used.
#
# * **Pandas**: The data set will be held in various pandas dataframes, which will allow for some statistical analysis to be conducted.
# * **Seaborn**: Wil be used for various plotting functionality.
# * **Matplotlib.pyplot**: Will be used for various plotting functionality.
# * **Scipy.stats**: Will be used to simulate the data for the new dataframe.
#
# After this, the data can be imported in a dataframe. This will allow the determination of various statistics with regards to the dataset, as well as providing a basis for the simulation to be run.
#
# The script below will import the data, and set it up in a dataframe.
#
# +
# Importation of libraries, and setting up data
# Importation of libraries
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import skewnorm
from datetime import date
# Source for the data set
url = "https://raw.githubusercontent.com/Clauric/PfDA_Sim_Project/master/Dublin%20Airport%20Weather%202016%20-%202018%20cummulative.csv"
# Create a data frame containing the data set
# Set the values in the first column to dates
Dublin_df = pd.read_csv(url, parse_dates=[0])
# Rename the columns to be easier to read
Dublin_df.columns = ["Date", "Rain (mm)", "Temp. (°C)", "Rel. Hum. (%)", "Sun (hrs)", "Visibility (m)"]
# -
# ## Examination of the dataset
# The data set is expected to have the following attributes:
# * All columns (except *Date*) to be made up of numbers, either floating or integers.
# * Date column to be a datetime value.
# * Only the temperature (*Temp*) column can have a value below zero.
# * All non-date columns can have an unlimited upper value (except relative humidity (*Rel. Hum.*), which is limited to 100%).
#
# Additionally, the dataframe should consist of 93 (31 days per month x 3 months) rows, 6 columns, and 1 row of headers.
#
# Looking at the dataframe shapes, type and the first 10 rows, we get the following:
# +
# Shape, data types and first 10 rows of data set
# Shape
print("Shape of dataframe")
row, column = Dublin_df.shape
print("Rows:", row)
print("Columns:", column)
print()
# Types of values
print("Data types in dataframe")
print(Dublin_df.dtypes)
print()
# First 10 rows
print("First 10 rows of dataframe")
print(Dublin_df.head(10))
# -
# From the above, we can see that the shape of the data is as expected (i.e. 93 rows, 6 columns). The first 10 rows show that the column headers are as expected. While not really an issue, it should be noted that the relative humidity is given as values above 1, even though it is a percentage value. However, for the sake of this analysis, it will be left as is, instead of converting to a value between 0 and 1.
#
# ### Description of dataset
# In order to be able to work with the dataset, and draw any conclusions from the data, it is important to determine some of the dataset’s properties. In order to do this, we will extract basic measures, more commonly known as descriptive statistics. These statistics can then be used as a guide to both further analysis, as well as determine which pseudorandom number generator is most appropriate to use (if possible).
#
# The initial set of descriptive statistics are the mean, mode, and median of the data, as well as standard deviation, quartiles, and min and max values. Luckily, pandas has the ability to provide the values for most of these statistics, using the describe function. However, while this is useful, it is also necessary to understand what the terms provided by the describe function actually mean.
#
# * **Mean**: Also known as the simple average, is the sum of all the values divided by the number of values being summed.
# * **Standard deviation**: A measure of how are a number is from the mean. In a perfectly normal distribution, ~68% of all values would be within 1 standard deviation of the mean. Represented in the describe output as *std*.
# * **min**: The lowest value within the dataset.
# * **25%**: The value for the 25th percentile. In other words, 25% of all the values in the dataset are below this value.
# * **50%**: The value for the 50% percentile. In other words, 50% of all the values in the dataset are below this value. This value is often called the *median value*.
# * **75%**: The value for the 75th percentile. In other words, 75% of all the values in the dataset are below this value.
# * **Max**: The largest value in the dataset.
#
# The values for the current dataset are:
# Describe function for the weather dataset
print("Descriptions of the weather dataset")
print()
print(Dublin_df.describe())
# As we can see, certain measures from the descriptive statistics such as correlation, skewness, and kurtosis are missing from the describe functionality. These statistics also give rise to important information regarding the dataset. These will need to be gather to provide a full picture of the dataset.
#
# ### Skewness and kurtosis of dataset
# The skewness and kurtosis are interrelated terms that are used to describe the nature of the distribution of the dataset, and how it differs from a normally distributed dataset. The definition of these terms are:
# • Skewness: the direction and amount of asymmetry of the dataset about its mean. If the absolute value of the skewness is:
# > - greater than 1, the data is highly skewed and the distribution is very asymmetric.
# > - greater than 1, the data is highly skewed and the distribution is very asymmetric.
# > - greater than 0.5 and less than 1, the data is moderately skewed, and the distribution is somewhat asymmetric.
# > - greater than 0, and less than 0.5, the distribution of the data is approximately symmetric.
# > - equal to 0, the data is normally distributed, and symmetric.
#
# The sign of the skewness (i.e. positive or negative) also determines the skewness. Negative skewness indicates that the distribution is skewed to the left, the mean being less than the median, which is less than the mode. Positive values of skewness indicate the opposite, with the distribution being skewed to the right, and the mode being less than the median, which is less than the mean.
#
# * Kurtosis: the kurtosis of a dataset indicates the sharpness, or flatness, of the peak of the data (around the mode, or mean, depending on the skewness).
#
# The kurtosis is measure against the normal distribution, which has a kurtosis of 0. If the kurtosis is negative, then distribution of the data has a smaller standard deviation, as more values are grouped near the mean. This gives the distribution a sharper, and higher peak, and narrower body. A positive kurtosis indicates that there is less grouping around the mean, and indicating that the distribution has a larger standard deviation. This also gives the distribution a flatter, and lower peak, and a wider body.
#
# In pandas, the skewness and kurtosis of a dataset can be ascertained using the *skew* and *kurt* functions. These functions return values for each numeric column within the data set.
#
# +
# Skewness and kurtosis of the dataset
print("Skewness")
print(Dublin_df.skew())
print()
print("Kurtosis")
print(Dublin_df.kurt())
# -
# ### Correlation
# Correlation is a statistic that can be used to measure how well two sets of data correspond to each other. [Weisstein (2019)](http://mathworld.wolfram.com/Correlation.html) defines correlation as "*the degree to which two or more quantities are linearly associated*." As such, a correlation coefficient can be calculated that shows the relationship between the two sets of variables, as well strength of the relationship.
#
# In correlation analysis, positive values show that the two sets of data are positively correlated (i.e. as one value rises or falls, so does the other). Conversely, negative values indicate that the two data sets are negatively or inversely correlated (i.e. as one value rises, or falls, the other falls, or rises). A zero value indicates that there is no relationship between the two sets of data. The strength of the relationship is indicated be the actual value of the correlation coefficient. An absolute value above 0.5 is considered a strong correlation, and above 0.75 is a very strong correlation. A value of -1 or 1 means that the two sets of data are perfectly correlated (i.e. either perfectly positive or perfectly negative correlation).
#
# In pandas dataframes, the *corr* function can be used to ascertain the correlation between numeric sets of data.
# +
# Correlation analysis fot the weather dataset
print()
print("Correlation coefficient for the weather dataset")
print()
# Create new dataframe for the correlation coefficient values
corr_df = Dublin_df.corr(method="pearson")
# Create separate correlation dataframe for heatmap
corr_df_p = corr_df
# As each column will be perfectly correlated with itself, there is no need to show these values
# Replace the values of 1 with a blank value
corr_df = corr_df.replace({1.00000: ""})
# Print the new dataframe to show the correlation coefficients of the weather dataset
print(corr_df)
print()
# Create heatmap of correlations
# From Zaric (2019)
ax = sns.heatmap(corr_df_p, vmin=-1, vmax=1, center=0, cmap=sns.diverging_palette(10, 200, n=500), square=True)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45, horizontalalignment='right')
plt.rcParams["figure.figsize"] = [7, 7]
plt.title("Heat map for weather dataset correlation")
plt.show()
# -
# The heatmap and the correlation table, when combined, aloow, at quick glance, to see how the values are correlated against each other.
#
# ### Plotting statistics
# Before a discussion of the statistics that were determined, it is useful to plot some of the statistics determined. In this case, it would be useful to plot the some of the columns, which will show the skewness of the distribution.
# +
# Plot of distribution of weather data
# For rain, new values for mean and median values
R_mean = Dublin_df["Rain (mm)"].mean()
R_median = Dublin_df["Rain (mm)"].median()
# For temp, new values for mean and median values
T_mean = Dublin_df["Temp. (°C)"].mean()
T_median = Dublin_df["Temp. (°C)"].median()
# For sun, new values for mean and median values
S_mean = Dublin_df["Sun (hrs)"].mean()
S_median = Dublin_df["Sun (hrs)"].median()
# Seaborn distplots showing both histograms and bell curves for temp, rain, and sun
sns.distplot(Dublin_df["Temp. (°C)"], axlabel = False, kde = False, label = "Temp. (°C)")
sns.distplot(Dublin_df["Rain (mm)"], axlabel = False, kde = False, label = "Rain (mm)")
sns.distplot(Dublin_df["Sun (hrs)"], axlabel = False, kde = False, label = "Sun (hrs)")
# Plotlines for mean and median
plt.axvline(R_mean, color = 'r', linestyle = "-", label = "Rain - mean")
plt.axvline(R_median, color = 'm', linestyle = "--", label = "Rain - median")
plt.axvline(T_mean, color = 'g', linestyle = "-", label = "Temp - mean")
plt.axvline(T_median, color = 'y', linestyle = "--", label = "Temp - median")
plt.axvline(S_median, color = 'b', linestyle = "--", label = "Sun - median")
plt.axvline(S_mean, color = 'k', linestyle = "-", label = "Sun - mean")
# Set size of plot area
plt.rcParams["figure.figsize"] = [15, 6]
# Set title, labels, and legend
plt.title("Histogram of rain, temp, and sun vs frequency")
plt.xlabel("mm (rain), temp (°C), hours (sun)")
plt.ylabel("Frequency")
plt.grid(b = True, which = "major", axis = "x")
plt.legend()
# Show plot
plt.show()
# -
# Neither visibility or relative humidity were plotted due to the fact that the minimum value for visibility is over 8,000 (m), while the range for relative humidity is 92 (%). Plotting these values would have dwarfed the other values in the plot, and make it difficult to glean any information from it.
# ### Discussion of the dataset
# As noted previously, in order to make the dataset easier to process and simulate, a number of adjustments were made to the data. These adjustments, such as averaging the temperature, relative humidity, and visibility, and summing the rainfall values, and sunlight hours, will have changed the overall data set. However, this was done in order to avoid having to simulate different sets of values for each hour of the day and night, as well as reduce the dataset from 2,232 rows of data to 93 rows, while still maintaining each of the 5 data points for each row. However, these adjustment will have impacted the mean, median, standard deviation, and correlation values, as well as the skewness and kurtosis of the data set.
# #### Distribution
#
# Looking at the data in (somewhat) reverse order, we can see from the histograms that the data is not normally distributed, although some of the data looks somewhat normally distributed (temp). The both the distribution for rain and sunshine show long tails leading to the right, with slight “humps” in these tails. For values for the sun, the “hump” seems to be about 5 hours, suggesting that there is slightly more days with 4 – 5 hours of sunlight than 3 – 4 hours, but is trails off considerably after the 5 hour mark. Likewise the rainfall seems to have slight “hump” in the 12 to 16 mm range, but there are also plateaus in the 3 – 4 mm range, as well as around the 6 mm range. This would suggest that there is a slight clustering of rainfall amounts around these levels during the months in question.
#
# It is notable that there is a significant peak in frequency of days 0 (zero) hours of sunshine, and 0 (zero) millimetres of rainfall. Intuitively, this seems reasonable for hours of sunshine, as December is normally a fairly overcast and cloudy month. However, it is normally considered a fairly wet month, while this seems to suggest that it is often dry. This should not be confused with the relative humidity, which gives the feeling of damp that is often associated with the month.
#
# The temperature is somewhat more normally distributed than either the rainfall, or the hours of sunlight. However, even then the peak frequency is rather low, with the tails on either end being long and drawn out. There is also a “hump” in the 11 – 12 °C range.
#
# #### Skewness and Kurtosis
#
# The skewness of the values, as seen in the distribution histograms, is also clearly demonstrated in the skew values. The skew values for relative humidity, temperature, and visibility are all negatively skewed. This indicates that the mean is less median, and that the peak is to the right of both values. This is visible for the temp values in the plot above, where the mean is slightly less than the media, and the peak values are to the right of both the mean and median. This also suggests that there is a longer tail on the left than right of the mode. Additionally, the values for the skew are between -0.5 and 0, which indicates that the values are reasonably symmetric.
#
# With regards to the remaining two values (rain and sun), they are both positively skewed. This indicates that the mean is greater than the median values. Additionally, the peaks for a positively skewed distribution is to the left of both the mean and median. This is clearly demonstrated in the histogram above. However, the skew values for both the rain and sun are above 1 (2.37 and 1.1 respectively). This indicates that they are both heavy asymmetric, and very skewed. This corresponds with the values indicated in the histogram.
#
# Looking at the kurtosis, the kurtosis values for rain indicate that there is less grouping around the mean, and the values are more spread out. This is clear from the above plot, where there are small clusters of rain values between 8 and 10, 10 and 12, and 1 through 16 millimetres. For all the other variables, their kurtosis values indicate that the cluster closer to the mean than the normal standard deviation, as they are all negative. The most significant of these is relative humidity, which has the greatest cluster near the mean, while the kurtosis of the sun’s values are reasonably close, in clustering terms, to the normal distribution.
#
# #### Correlation
# While no regression analysis has been performed on the dataset, it is still worthwhile examining the correlation between the variables. The heatmap gives a visual representation of the correlation coefficient table above it. The three strongest correlations, either positive or negative are between:
# * Visibility and relative humidity (-0.611) – strong to very strong, but negative, indicating that as the relative humidity increase, visibility decreases, and vice versa.
# * Sunlight hours and temperature (-0.433) – strong(ish) negative correlation, indicating that as the sunlight hours increases, the temperature drops, and vice versa. While correlation does not imply causation, this correlation is unusual, in that temperature normally increases with sunlight. A possible explanation for this is that the cloud cover acts as blanket, which keeps heat in, but is absent when the sun is visible. This would seem to support the findings of [Matuszko & Weglarczyk (2014)](https://rmets.onlinelibrary.wiley.com/doi/full/10.1002/joc.4238).
# * Visibility and rain (-0.364) – weak-strong negative correlation, indicating that as the amount of rain increases, the visibility decreases, and vice versa. This would indicate that the level of rain reduces visibility, which is important for aircraft (all the readings are recorded at an airport), as it will impinge on their ability to see clearly at distance.
#
# The most significant positive correlation is between visibility and sunlight hours (0.280, weak). This suggests that the visibility increases as the period of sunlight increases. This would stand to reason as both sunlight and visibility are negatively correlated with rainfall.
#
# #### Standard statistics
# Looking first at the standard deviation. As we have already seen, the data is skewed both positively and negatively. In addition, most of the kurtosis values are not that close to zero. As such, the standard deviation is not relevant here. However, the standard deviations will be useful for the simulation later on.
#
# For the rain we see that the mean is considerably greater than the median, and is fact far closer to the 75% quartile value. Combined with the is the fact that both the minimum, and 25% quartile values are 0.00 mm, and the 75% quartile value is 0.2 mm would suggest that there are a large number of days with no recordable rainfall. This would seem slightly counterintuitive for Ireland during the winter. However, 25% of the rainfall values are between 2.5 and 15.5 mm, a range of 13 mm. This would suggest that when it does rain in Dublin, it rains reasonably heavily.
#
# The mean for the recorded temperatures is 6.8°C, while the median value is 6.85°C. This suggests that the temperature reading are more normally distributed than some of the other recordings. However, as we have seen, the skew and kurtosis values suggest that there is still a reasonable degree of skew in the values. The range is quite large, with the minimum value in the data set being below 0°C (-0.52°C) and the largest value is above 13°C (13.81°C). The interquartile range (25% - 75% quartile values) is less than 4.5°C, which suggests that the temperature, while reasonably cold, does not fluctuate as wildly as the minimum and maximum values indicate.
#
# The mean of the relative humidity is 87.15%, while the median relative humidity is 88.04%. Like the temperature readings, these values are quite close, and suggest that the distributions are fairly close to normally distributed. However, the skew and kurtosis values likewise indicate that there is some skew in the data. The high relative humidity levels, being above 73% give that damp feeling that is often associated with the wintertime in Ireland.
#
# Sunlight hours in dataset note that there is often not much sunlight visible during the month of December. The highest recorded number of hours if sunlight is nearly 7 hours (6 hours, 54 minutes). Considering that the shortest period of daylight (between sunrise and sunset) in December 2019 is expected to be 7 hours, 30 minutes on December 22nd [(Time and Date, 2019)](https://www.timeanddate.com/sun/ireland/dublin?month=12), this would indicate that for one particular day, there was almost no cloud cover. However, with the minimum, 25% quartile, and median values are all at or below 30 minutes (0.00, 0.10, 0.50 hours respectively), this would suggest that there is a large amount of cloud cover. From these statistics, it is also clear that the data is negatively skewed, as the mean is considerably less than the median.
#
# Visibility is defined as the *“greatest distance at which a black object … can be seen and recognised when observes against a bright background”* [(International Civil Aviation Organization, 2007)](https://www.wmo.int/pages/prog/www/ISS/Meetings/CT-MTDCF-ET-DRC_Geneva2008/Annex3_16ed.pdf). The mean visibility is nearly 22.5 km (22,480m), with the median visibility being just 63 m less (22,417m). This would suggest that the data is reasonably normally distributed, while still being skewed. The range of the visibility is quite large, with the minimum and maximum values being nearly 28 km apart (8,979m and 36,667 m respectively). However, the interquartile range (25% - 75% quartile values) is less than 9,000m, suggesting that there is a fairly constant, and stead range of visibility for the period of the dataset.
#
#
# ## Simulation of new data
# Looking back at the original data, and as per the discussion above, a number of statistics stand out. The most significant is that all the 5 sets of data are skewed to one degree or other. While the skewness of the temperature, relative humidity, and visibility data sets might lend themselves to being simulated using normal distribution, neither rain nor sun datasets could be so simulated. This leaves the option of simulating each dataset using a different random number generator approach, or looking for one random number generator that could simulate all the datasets on the same basis.
#
# As such, a number of random number generators were examined to see if they would be able to generate all the datasets required. These included np.random.multivariate, scipy.truncnorm, scipy.JohsnonB, and scipy.skewnorm.
#
# Issues with arose with the multivariate approach, in that it did not take into account the skew values for the datasets. Additionally, it generates a normal distribution pattern, which it has been determined that the datasets are not. It did, however, allow for the introduction of a covariance between the values, which would have helpful in more accurately simulating the relationship between the datasets. Similar to the multivariate distribution, the truncnorm also produces a normally distributed dataset. However, the truncnorm allows for values to be cut off at the required lower and upper bounds, as necessary, although this does produce some spikes in the frequency of these values. JohnsonB was also examined, as it does allow for the median, mean, variance, standard deviation, and skew to be used. However, due to the lack of tutorials using this method, it was discarded (*Note: There were only 3 videos available for the search terms “scipy.johnsonb skewness python”, and all pointed to the same site*).
#
# The final approach examined to simulate the datasets was scipy.skewnorm. This library had the advantage of using skewness as one of its variables, as well as the mean and standard deviation. However, it does produce a skewed normal distribution, which doesn’t exactly match the dataset. However, of the libraries and approaches examined, it produced the nearest to original results, when comparing to the mean, and standard deviation (see output below next code box). Additionally, there were a number of tutorials available, and some concise explanations of how the code worked, to enable its use.
#
# Using the scipy.skewnorm distribution, there are a number of steps that need to be taken to simulate the new dataset. These are:
# 1. Determine the skewness, standard deviation, and mean of each of the columns in the original data set, using the *skew()*, *std()*, and *mean()* functionality respectively.
# 2. Set the number of random values for each of the columns to be simulated to 93, to match the original dataset.
# 3. Based on [Weckesser (2018)](https://stackoverflow.com/questions/49367436/scipy-skewnorm-mean-not-matching-theory), for each of the columns (ie. Rain, Temperature, Relative Humidity, Sun, and Visibility) calculate the delta, adjusted standard deviation, and adjusted mean, using the formulae:
# > - Delta – skew / square_root (1 + skew ^ 2))
# > - Adjusted_StD – square_root( Std ^ 2) / (1 – 2 x (delta ^ 2) / pi))
# > - Adjusted_Mean – Mean – Adjusted_StD x square_root(2 / pi) * delta
# 4. Using the values derived from above, input the values into the scipy.skewnorm formula as follows:
# > - X = skewnorm.rvs(Skew, loc = Adjusted_Mean, scale = Adjusted_StD, size = sample_size)
# 5. These values can be put together into a new dataset.
# 6. From this dataset, we can check the simulated data against the original dataset. This will show how close the simulated data is against the original dataset. *Note: there is not set seed for these calculations, so the simulated values will change each time it is run.*
#
# +
# Simulation of new data
# Variables needed to generate random values
# Skewness
R_skew = Dublin_df["Rain (mm)"].skew()
RH_skew = Dublin_df["Rel. Hum. (%)"].skew()
S_skew = Dublin_df["Sun (hrs)"].skew()
T_skew = Dublin_df["Temp. (°C)"].skew()
V_skew = Dublin_df["Visibility (m)"].skew()
# Standard deviations
R_std = Dublin_df["Rain (mm)"].std()
RH_std = Dublin_df["Rel. Hum. (%)"].std()
S_std = Dublin_df["Sun (hrs)"].std()
T_std = Dublin_df["Temp. (°C)"].std()
V_std = Dublin_df["Visibility (m)"].std()
# Mean values
R_mean # Already calculated
S_mean # Already calculated
T_mean # Already calculated
RH_mean = Dublin_df["Rel. Hum. (%)"].mean()
V_mean = Dublin_df["Visibility (m)"].mean()
# Other variables
No_of_samples = 93
# Determine values using skewnorm (Weckesser, 2018)
# Rain
R_delta = R_skew / math.sqrt(1. + math.pow(R_skew, 2.))
R_adjStdev = math.sqrt(math.pow(R_std, 2.) / (1. - 2. * math.pow(R_delta, 2.) / math.pi))
R_adjMean = R_mean - R_adjStdev * math.sqrt(2. / math.pi) * R_delta
R_Random = skewnorm.rvs(R_skew, loc = R_adjMean, scale = R_adjStdev, size = No_of_samples)
# Relative Humidity
RH_delta = RH_skew / math.sqrt(1. + math.pow(RH_skew, 2.))
RH_adjStdev = math.sqrt(math.pow(RH_std, 2.) / (1. - 2. * math.pow(RH_delta, 2.) / math.pi))
RH_adjMean = RH_mean - RH_adjStdev * math.sqrt(2. / math.pi) * RH_delta
RH_Random = skewnorm.rvs(RH_skew, loc = RH_adjMean, scale = RH_adjStdev, size = No_of_samples)
# Sun
S_delta = S_skew / math.sqrt(1. + math.pow(S_skew, 2.))
S_adjStdev = math.sqrt(math.pow(S_std, 2.) / (1. - 2. * math.pow(S_delta, 2.) / math.pi))
S_adjMean = S_mean - S_adjStdev * math.sqrt(2. / math.pi) * S_delta
S_Random = skewnorm.rvs(S_skew, loc = S_adjMean, scale = S_adjStdev, size = No_of_samples)
# Temperature
T_delta = T_skew / math.sqrt(1. + math.pow(T_skew, 2.))
T_adjStdev = math.sqrt(math.pow(T_std, 2.) / (1. - 2. * math.pow(T_delta, 2.) / math.pi))
T_adjMean = T_mean - T_adjStdev * math.sqrt(2. / math.pi) * T_delta
T_Random = skewnorm.rvs(T_skew, loc = T_adjMean, scale = T_adjStdev, size = No_of_samples)
# Visibility
V_delta = V_skew / math.sqrt(1. + math.pow(V_skew, 2.))
V_adjStdev = math.sqrt(math.pow(V_std, 2.) / (1. - 2. * math.pow(V_delta, 2.) / math.pi))
V_adjMean = V_mean - V_adjStdev * math.sqrt(2. / math.pi) * V_delta
V_Random = skewnorm.rvs(V_skew, loc = V_adjMean, scale = V_adjStdev, size = No_of_samples)
# Create new, random dataframe
Random_df = pd.DataFrame({
"Date": Dublin_df["Date"],
"Rain (mm)": R_Random,
"Temp. (°C)": T_Random,
"Rel. Hum. (%)": RH_Random,
"Sun (hrs)": S_Random,
"Visibility (m)": V_Random
})
# Check the mean, and standard deviations of both the original and new datasets
print("Check the mean, standard deviation, and skewness of the original and simulated datasets")
print()
print("".ljust(16) + "Rain".ljust(15) + "Temp".ljust(15) + "Rel. Hum.".ljust(15) + "Sun".ljust(15) + "Visibility")
print("---------------------------------------------------------------------------------------")
# Means
print("Mean orig: %11.4f %14.4f %15.4f %13.4f %18.4f" %(R_mean, T_mean, RH_mean, S_mean, V_mean))
print("Mean sim: %12.4f %14.4f %15.4f %13.4f %18.4f" %(Random_df["Rain (mm)"].mean(), Random_df["Temp. (°C)"].mean(),
Random_df["Rel. Hum. (%)"].mean(), Random_df["Sun (hrs)"].mean(),
Random_df["Visibility (m)"].mean()))
print()
# Standard deviation
print("Std. Dev. orig: %4.4f %14.4f %14.4f %14.4f %17.4f" %(R_std, T_std, RH_std, S_std, V_std))
print("Std Dev. sim: %8.4f %14.4f %14.4f %14.4f %17.4f" %(Random_df["Rain (mm)"].std(), Random_df["Temp. (°C)"].std(),
Random_df["Rel. Hum. (%)"].std(), Random_df["Sun (hrs)"].std(),
Random_df["Visibility (m)"].std()))
print()
# Skewness
print("Skewness orig: %7.4f %14.4f %14.4f %14.4f %14.4f" %(R_skew, T_skew, RH_skew, S_skew, V_skew))
print("Skewness sim: %8.4f %14.4f %14.4f %14.4f %14.4f" %(Random_df["Rain (mm)"].skew(), Random_df["Temp. (°C)"].skew(),
Random_df["Rel. Hum. (%)"].skew(), Random_df["Sun (hrs)"].skew(),
Random_df["Visibility (m)"].skew()))
# -
# As we can see, some of the simulated values are reasonably close to the original data. However, it is noted that in some cases, both the size of the skewness, as well as the orientaion (positive/negative) has changed. This would indicate that the distribution used, while fairly accurate, may need to be further refined. Additionally, the small size of the data sample for each column could possibly affect the values, including the skewness changing orientation, as well as the discrepancies in the means and standard deviations. It is possible that a larger sample size, in the tens of thousands, would help reduce, if not eliminate these divergences.
#
# ### Additional checks
# It is worthwhile to check that the dataset conforms to the logical values imposed by the laws of physics or nature. For example, the lower and upper bounds of for relative humidiy are 0% and 100%. If the values in the simulated data are higher or lower than these bounds, then they will need to be corrected. This will, however, change the mean, standard deviation, and skewness, but need to be done nonetheless.
# +
# Print the description of the simulated dataset
# Get new values for rows and columns
new_row, new_col = Random_df.shape
print("Description of the simulated data")
print()
print("Shape of simulated dataframe")
print("Rows:", new_row)
print("Columns:", new_col)
print()
print(Random_df.describe())
# -
# Adjusting the simulated values to set values above or below the lower/upper bounds to those bounds.
# +
# Cleanse of data to ensure that the logical upper and lower bounds are adhered to
# Check for values above or below the upper or lower bounds for each variable
# Replace each variable outside these bounds with the boundary limit
# Rain has a lower bound of 0 mm per day, and an upper bound of the max of the original dataset
Random_df.loc[Random_df["Rain (mm)"] < 0, "Rain (mm)"] = 0
Random_df.loc[Random_df["Rain (mm)"] > Dublin_df["Rain (mm)"].max(), "Rain (mm)"] = Dublin_df["Rain (mm)"].max()
# Temperature has a lower bound of -15.7C, and an upper bound of 17.1C (respective records for Dublin in December)
Random_df.loc[Random_df["Temp. (°C)"] < -15.7, "Temp. (°C)"] = -15.7
Random_df.loc[Random_df["Temp. (°C)"] > 17.1, "Temp. (°C)"] = 17.1
# Relative humidity has a lower bound of 0 (%), and an uppder bound of 100 (%)
Random_df.loc[Random_df["Rel. Hum. (%)"] < 0, "Rel. Hum. (%)"] = 0
Random_df.loc[Random_df["Rel. Hum. (%)"] > 100, "Rel. Hum. (%)"] = 100
# Sun has a lower bound of 0 (hrs), and an upper bound of the max of the original dataset
Random_df.loc[Random_df["Sun (hrs)"] < 0, "Sun (hrs)"] = 0
Random_df.loc[Random_df["Sun (hrs)"] > Dublin_df["Sun (hrs)"].max(), "Sun (hrs)"] = Dublin_df["Sun (hrs)"].max()
# Visibility has a lower bound of 0, and an upper bound of the max of the original dataset
Random_df.loc[Random_df["Visibility (m)"] < 0, "Visibility (m)"] = 0
Random_df.loc[Random_df["Visibility (m)"] > Dublin_df["Visibility (m)"].max(), "Visibility (m)"] = Dublin_df["Visibility (m)"].max()
# Print descripiton of dataframe
print("Stats for original and simulated dataframes")
print()
print("Original data")
print(Dublin_df.describe())
print()
print("Simulated data")
print(Random_df.describe())
print()
print("Top 5 rows of simulated dataframe")
print(Random_df.head(5))
# -
# The histograms for the simulated data for rain, temperature, and sun values can be plotted as was done in for the [original dataset](#Plotting-Statistics).
# +
# Plot of distribution of simulated weather data
# For rain, new values for mean and median values
R_mean_r = Random_df["Rain (mm)"].mean()
R_median_r = Random_df["Rain (mm)"].median()
# For temp, new values for mean and median values
T_mean_r = Random_df["Temp. (°C)"].mean()
T_median_r = Random_df["Temp. (°C)"].median()
# For sun, new values for mean and median values
S_mean_r = Random_df["Sun (hrs)"].mean()
S_median_r = Random_df["Sun (hrs)"].median()
# Seaborn distplots showing both histograms and bell curves for temp, rain, and sun
sns.distplot(Random_df["Temp. (°C)"], axlabel = False, kde = False, label = "Temp. (°C)")
sns.distplot(Random_df["Rain (mm)"], axlabel = False, kde = False, label = "Rain (mm)")
sns.distplot(Random_df["Sun (hrs)"], axlabel = False, kde = False, label = "Sun (hrs)")
# Plotlines for mean and median
plt.axvline(R_mean_r, color = 'r', linestyle = "-", label = "Rain - mean")
plt.axvline(R_median_r, color = 'k', linestyle = "--", label = "Rain - median")
plt.axvline(T_mean_r, color = 'g', linestyle = "-", label = "Temp - mean")
plt.axvline(T_median_r, color = 'y', linestyle = "--", label = "Temp - median")
plt.axvline(S_median_r, color = 'b', linestyle = "-", label = "Sun - median")
plt.axvline(S_mean_r, color = 'm', linestyle = "--", label = "Sun - mean")
# Set size of plot area
plt.rcParams["figure.figsize"] = [12, 6]
# Set title, labels, and legend
plt.title("Distribution of rain, temp, and sun - simulated data")
plt.xlabel("mm (rain), °C (temp), hours (sun)")
plt.ylabel("Frequency")
plt.grid(b = True, which = "major", axis = "x")
plt.legend()
# Show plot
plt.show()
# -
# ## Further Analysis
# There are a number of further pieces of analysis that could be undertaken on the original and modified datasets used above. The original dataset was broken up into hourly readings, which were combined to form the dataset used to conduct this analysis. Analysis could be undertaken to determine the statistics for the hourly dataset, in order to allow for a more accurate simulation. This simulation could take into account the hours of sunrise and sunset, especially when correlating variables with respect to hours of sunlight.
#
# Additionally, both a larger sample size could have been taken, expanding the months used from one to all twelve, or using more years of data for a single month. These would allow for a greater refinement of the statistics, and could also be used to determine the effects of global warming on the overall weather. In the original dataset, there were 21 columns of data, the majority of which were excluded as they would have made the dataset too large and unwieldly. Adding some, or all, of these datasets back into the examination would undoubtedly produce more accurate results, especially around correlation, and distribution. This, in turn, would allow for greater simulation accuracy, both in terms of the random number generator to use, as well the values produced.
#
# Finally, it should be noted that any examination of the initial, unedited dataset, would need to take into account the changing climatic conditions, as well as the variations caused by the different seasons. Both of these issues would create challenges, as well as opportunities for further study.
#
# ## Bibliography
# * <NAME>., 2018. The Skew-Normal Probability Distribution. [Online] Available at: http://azzalini.stat.unipd.it/SN/index.html
# [Accessed 10 December 2019].
# * <NAME>., 2019. Introduction to Randomness and Random Numbers. [Online] Available at: https://www.random.org/randomness/
# [Accessed 7 November 2019].
# * <NAME>., 2019. Create random numbers with left skewed probability distribution. [Online] Available at: https://stackoverflow.com/questions/24854965/create-random-numbers-with-left-skewed-probability-distribution/56552531#56552531
# [Accessed 12 December 2019].
# * International Civil Aviation Organization, 2007. Meteorological Service for International Air Navigation, 16th Edition. [Online] Available at: https://www.wmo.int/pages/prog/www/ISS/Meetings/CT-MTDCF-ET-DRC_Geneva2008/Annex3_16ed.pdf
# [Accessed 12 December 2019].
# * <NAME>., 2016. Analysis of Weather data using Pandas, Python, and Seaborn. [Online] Available at: https://www.shanelynn.ie/analysis-of-weather-data-using-pandas-python-and-seaborn
# [Accessed 30 November 2019].
# * <NAME>. & <NAME>., 2014. Relationship between sunshine duration and air temperature and contemporary global warming. International Journal of Climatology, 35(12), pp. 3640 - 3653.
# * Met Eireann, 2010. Absolute maximum air temperatures (°C) for each month at selected stations. [Online] Available at: http://archive.met.ie/climate-ireland/extreme_maxtemps.pdf
# [Accessed 12 December 2019].
# * Met Eireann, 2010. Absolute minimum air temperatures (°C) for each month at selected stations. [Online] Available at: http://archive.met.ie/climate-ireland/extreme_mintemps.pdf
# [Accessed 12 December 2019].
# * Met Éireann, 2019. Dublin Airport Hourly Weather Station Data. [Online] Available at: https://data.gov.ie/dataset/dublin-airport-hourly-weather-station-data/resource/bbb2cb83-5982-48ca-9da1-95280f5a4c0d?inner_span=True
# [Accessed 30 November 2019].
# * SciPy.org, 2019. scipy.stats.johnsonsb. [Online] Available at: https://scipy.github.io/devdocs/generated/scipy.stats.johnsonsb.html#scipy.stats.johnsonsb
# [Accessed 10 December 2019].
# * SciPy.org, 2019. scipy.stats.skewnorm. [Online] Available at: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skewnorm.html
# [Accessed 12 December 2019].
# * Time and Date AS, 2019. Dublin, Ireland — Sunrise, Sunset, and Daylength, December 2019. [Online] Available at: https://www.timeanddate.com/sun/ireland/dublin?month=12
# [Accessed 13 December 2019].
# * <NAME>., 2018. scipy skewnorm mean not matching theory?. [Online] Available at: https://stackoverflow.com/questions/49367436/scipy-skewnorm-mean-not-matching-theory
# [Accessed 12 December 2019].
# * <NAME>., 2019. Correlation. [Online] Available at: http://mathworld.wolfram.com/Correlation.html
# [Accessed 3 November 2019].
# * <NAME>., 2019. Better Heatmaps and Correlation Matrix Plots in Python. [Online] Available at: https://towardsdatascience.com/better-heatmaps-and-correlation-matrix-plots-in-python-41445d0f2bec
# [Accessed 10 December 2019].
#
#
| Weather data simulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.datasets import imdb
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import keras
from keras import regularizers
from keras.layers import SpatialDropout1D, Dense, Dropout, LSTM, Bidirectional, Conv1D, MaxPooling1D, GlobalMaxPooling1D, Flatten, Concatenate
from keras.layers.embeddings import Embedding
from keras.models import Model
from keras.callbacks import EarlyStopping, ModelCheckpoint
import string
import re
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from matplotlib import pyplot as plt
from ast import literal_eval
from scipy.stats.stats import pearsonr
import nltk
# Download glove vectors at https://www.kaggle.com/yutanakamura/glove42b300dtxt
# # 1. Data Cleaning & Feature Extraction
# Import dataset
df = pd.read_csv("cleaned_dataset_with_labels.csv")
df = df.rename(columns={"Unnamed: 0": "id"})
df.head()
# +
# Clean the subject lines
df['subject lines'] = df['subject lines'].str.lower()
df['subject lines'] = df['subject lines'].str.replace('\r',' ')
df['subject lines'] = df['subject lines'].str.replace('[^\w\s]',' ')
df = df.fillna(0)
df.head()
# -
# Check for null values
df.isnull().values.any()
# Create X and Y and split training and test sets
x = df['subject lines'].values
y = df[['enthusiasm', 'urgency', 'surprise', 'trust', 'curiosity', 'exclusivity']].values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
# # 2. Tokenization and Creation of GloVe Embeddings
# Tokenize the words
tokenizer = Tokenizer(num_words=5000)
tokenizer.fit_on_texts(x)
words_to_index = tokenizer.word_index
len(words_to_index)
# Overview of word_to_index
list(words_to_index.items())[:10]
# Pad the sequences
train_sequences = tokenizer.texts_to_sequences(x_train)
x_train_indices = pad_sequences(train_sequences, padding='post', maxlen=25)
test_sequences = tokenizer.texts_to_sequences(x_test)
x_test_indices = pad_sequences(test_sequences, padding='post', maxlen=25)
# +
# Read GloVe vectors
def read_glove_vector(glove_vec):
with open(glove_vec, 'r', encoding='UTF-8') as f:
words = set()
word_to_vec_map = {}
for line in f:
w_line = line.split()
curr_word = w_line[0]
word_to_vec_map[curr_word] = np.array(w_line[1:], dtype=np.float64)
return word_to_vec_map
word_to_vec_map = read_glove_vector('glove.42b.300d.txt')
# +
# Create embedding layer
vocab_len = len(words_to_index)
emb_dim = 300
input_shape=(25, )
hits = 0
misses = 0
miss_list = []
emb_matrix = np.zeros((vocab_len, emb_dim))
for word, i in words_to_index.items():
embedding_vector = word_to_vec_map.get(word)
if embedding_vector is not None:
emb_matrix[i-1, :] = embedding_vector
hits += 1
else:
miss_list.append(word)
misses += 1
print("Converted %d words (%d misses)" % (hits, misses))
# -
# Overview of emb_matrix
emb_matrix.shape
# Overview of the list of words without vectors
miss_list[:5]
# # 3. Convolutional Neural Network (CNN) Model
# CNN model creation
emb_layer = Embedding(vocab_len, emb_dim, input_shape=input_shape, weights=[emb_matrix], trainable=False)
keras.backend.clear_session()
cnn_model = keras.Sequential()
cnn_model.add(emb_layer)
cnn_model.add(Conv1D(250, 3, padding='valid', activation='relu'))
cnn_model.add(GlobalMaxPooling1D())
cnn_model.add(Dropout(0.5))
cnn_model.add(Flatten())
cnn_model.add(Dense(75, activation="relu"))
cnn_model.add(Dense(6, activation="softmax", kernel_regularizer=regularizers.l2(0.2)))
cnn_model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
cnn_model.summary()
# Overview of x-train
x_train_indices
# Overview of y-train
y_train
# Fit the CNN model and stop at minimum validation loss
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=100)
history_1 = cnn_model.fit(x_train_indices, y_train, validation_split=0.1, batch_size=64, epochs=50, shuffle=False, verbose=0, callbacks=[es])
# Overview of predictions
# Sample of emotions
y_pred = cnn_model.predict(x_test_indices)
y_pred
# Overview of y-test to compare with y-pred
y_test
# Calculate the loss and accuracy of CNN model
cnn_accr = cnn_model.evaluate(x_test_indices, y_test)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(cnn_accr[0], cnn_accr[1]))
# +
# Plot the loss and accuracy for CNN model
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history_1, "accuracy")
plot_graphs(history_1, "loss")
# -
# Determine the Pearson correlation coefficient for CNN model
def pearson_correlation(y_pred, y_test):
pred_list = []
test_list = []
results = []
for i in range(6):
pred_list.append([item[i] for item in y_pred])
test_list.append([item[i] for item in y_test])
for i in range(6):
results.append(pearsonr(pred_list[i], test_list[i])[0])
return results
cnn_coeffs = dict(zip(['enthusiasm', 'urgency', 'surprise', 'trust', 'curiosity', 'exclusivity'], pearson_correlation(y_pred, y_test)))
cnn_coeffs
# # 4. Long Short-Term Memory (LSTM) Model
# LSTM model creation
keras.backend.clear_session()
lstm_model = keras.Sequential()
lstm_model.add(emb_layer)
lstm_model.add(SpatialDropout1D(0.2))
lstm_model.add(Bidirectional(LSTM(150, dropout=0.2, recurrent_dropout=0.2, return_sequences=False)))
lstm_model.add(Dropout(0.2))
lstm_model.add(Dense(6, activation="softmax", kernel_regularizer=regularizers.l2(0.1)))
lstm_model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
lstm_model.summary()
# Fit the LSTM model
history_2 = lstm_model.fit(x_train_indices, y_train, validation_split=0.1, batch_size=64, epochs=25, shuffle=False, verbose=0, callbacks=[es])
# Calculate the loss and accuracy of LSTM model
lstm_accr = lstm_model.evaluate(x_test_indices, y_test)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(lstm_accr[0], lstm_accr[1]))
# Plot the loss and accuracy for LSTM model
plot_graphs(history_2, "accuracy")
plot_graphs(history_2, "loss")
# Determine the Pearson correlation coefficient for LSTM model
y_pred_1 = lstm_model.predict(x_test_indices)
lstm_coeffs = dict(zip(['enthusiasm', 'urgency', 'surprise', 'trust', 'curiosity', 'exclusivity'], pearson_correlation(y_pred_1, y_test)))
lstm_coeffs
# Create helper function for prediction of single subject line
def emotion_predict(text):
text = text.lower()
text = re.sub(r"[^\w\s]","", text)
text_sequence = tokenizer.texts_to_sequences([text])
text_indices = pad_sequences(text_sequence, padding='post', maxlen=25)
return cnn_model.predict(text_indices)[0]
emotion_predict("How do you like our new product")
| models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="f0b15ab723e1"
msg = None
a: float = 2.25 # Variable a
b = [
'Hello', # First element
# Dummy comment
'World'
] # type: List[str] Nice list
# Interesting c variable
c: "NoneType" = None
# Not introspectable line
d = a == 3
# Broken name definition
= 2
| papermill/tests/notebooks/complex_parameters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# -
df = pd.read_csv('datasource/auto_clean.csv')
df.head()
sns.distplot(df['mpg'], kde=False, bins=20)
plt.show()
sns.lmplot('weight', 'mpg', data=df)
sns.lmplot('weight', 'mpg', data=df, fit_reg=False, aspect=2, size=6)
sns.lmplot('weight', 'mpg', data=df, fit_reg=False, aspect=2, size=6, hue='cylinders')
sns.lmplot('weight', 'mpg', data=df, fit_reg=False, aspect=2, size=6, hue='cylinders', col='origin')
sns.kdeplot(df['weight'], df['mpg'], shade=True)
sns.jointplot('weight', 'mpg', data=df, alpha=0.4)
g = sns.PairGrid(df, diag_sharey=False)
g.map_lower(sns.kdeplot, )
g.map_upper(plt.scatter)
g.map_diag(sns.kdeplot)
sns.countplot(y='cylinders', data=df)
sns.boxplot(x='cylinders', y='mpg', data=df)
sns.boxplot(x='origin', y='mpg', data=df, hue='cylinders')
| 5.3_seaborn.ipynb |
# ---
# jupyter:
# jupytext:
# cell_metadata_filter: all,-execution,-papermill,-trusted
# formats: ipynb,py//py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown] tags=[]
# # Description
# %% [markdown] tags=[]
# It prepares the data to create a clustering tree visualization (using the R package `clustree`).
# %% [markdown] tags=[]
# # Modules loading
# %% tags=[]
# %load_ext autoreload
# %autoreload 2
# %% tags=[]
from IPython.display import display
from pathlib import Path
import numpy as np
import pandas as pd
from utils import generate_result_set_name
import conf
# %% [markdown] tags=[]
# # Settings
# %% tags=[]
CONSENSUS_CLUSTERING_DIR = Path(
conf.RESULTS["CLUSTERING_DIR"], "consensus_clustering"
).resolve()
display(CONSENSUS_CLUSTERING_DIR)
# %% [markdown] tags=[]
# # Load data
# %% [markdown] tags=[]
# ## PCA
# %% tags=[]
INPUT_SUBSET = "pca"
# %% tags=[]
INPUT_STEM = "z_score_std-projection-smultixcan-efo_partial-mashr-zscores"
# %% tags=[]
DR_OPTIONS = {
"n_components": 50,
"svd_solver": "full",
"random_state": 0,
}
# %% tags=[]
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
generate_result_set_name(
DR_OPTIONS, prefix=f"{INPUT_SUBSET}-{INPUT_STEM}-", suffix=".pkl"
),
).resolve()
display(input_filepath)
assert input_filepath.exists(), "Input file does not exist"
input_filepath_stem = input_filepath.stem
display(input_filepath_stem)
# %% tags=[]
data_pca = pd.read_pickle(input_filepath).iloc[:, :5]
# %% tags=[]
data_pca.shape
# %% tags=[]
data_pca.head()
# %% [markdown] tags=[]
# ## UMAP
# %% tags=[]
INPUT_SUBSET = "umap"
# %% tags=[]
INPUT_STEM = "z_score_std-projection-smultixcan-efo_partial-mashr-zscores"
# %% tags=[]
DR_OPTIONS = {
"n_components": 5,
"metric": "euclidean",
"n_neighbors": 15,
"random_state": 0,
}
# %% tags=[]
input_filepath = Path(
conf.RESULTS["DATA_TRANSFORMATIONS_DIR"],
INPUT_SUBSET,
generate_result_set_name(
DR_OPTIONS, prefix=f"{INPUT_SUBSET}-{INPUT_STEM}-", suffix=".pkl"
),
).resolve()
display(input_filepath)
assert input_filepath.exists(), "Input file does not exist"
input_filepath_stem = input_filepath.stem
display(input_filepath_stem)
# %% tags=[]
data_umap = pd.read_pickle(input_filepath)
# %% tags=[]
data_umap.shape
# %% tags=[]
data_umap.head()
# %% [markdown] tags=[]
# # Load selected best partitions
# %% tags=[]
input_file = Path(CONSENSUS_CLUSTERING_DIR, "best_partitions_by_k.pkl").resolve()
display(input_file)
# %% tags=[]
best_partitions = pd.read_pickle(input_file)
# %% tags=[]
best_partitions.shape
# %% tags=[]
best_partitions.head()
# %% [markdown] tags=[]
# # Prepare data for clustrees
# %% tags=[]
clustrees_df = pd.concat((data_pca, data_umap), join="inner", axis=1)
# %% tags=[]
display(clustrees_df.shape)
assert clustrees_df.shape == (data_pca.shape[0], data_pca.shape[1] + data_umap.shape[1])
# %% tags=[]
clustrees_df.head()
# %% [markdown] tags=[]
# ## Add partitions
# %% tags=[]
_tmp = np.unique(
[best_partitions.loc[k, "partition"].shape for k in best_partitions.index]
)
display(_tmp)
assert _tmp.shape[0] == 1
assert _tmp[0] == data_umap.shape[0] == data_pca.shape[0]
# %% tags=[]
assert not best_partitions.isna().any().any()
# %% tags=[]
# df = df.assign(**{f'k{k}': partitions.loc[k, 'partition'] for k in selected_k_values})
clustrees_df = clustrees_df.assign(
**{
f"k{k}": best_partitions.loc[k, "partition"]
for k in best_partitions.index
if best_partitions.loc[k, "selected"]
}
)
# %% tags=[]
clustrees_df.index.rename("trait", inplace=True)
# %% tags=[]
clustrees_df.shape
# %% tags=[]
clustrees_df.head()
# %% tags=[]
# make sure partitions were assigned correctly
assert (
np.unique(
[
clustrees_df[f"{k}"].value_counts().sum()
for k in clustrees_df.columns[
clustrees_df.columns.str.contains("^k[0-9]+$", regex=True)
]
]
)[0]
== data_pca.shape[0]
)
# %% [markdown] tags=[]
# # Save
# %% tags=[]
output_file = Path(CONSENSUS_CLUSTERING_DIR, "clustering_tree_data.tsv").resolve()
display(output_file)
# %% tags=[]
clustrees_df.to_csv(output_file, sep="\t")
# %% tags=[]
| nbs/13_consensus_clustering/040_04-clustering_trees_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ASLingo Model Training
#
# This notebook trains the model used in the ASLingo backend. It assumes that `data_collection.py` has been run and the data is saved in the data/ folder.
# +
import pandas as pd
import numpy as np
import os
import time
import json
# %matplotlib inline
import matplotlib.pyplot as plt
# -
metadata_path = os.path.join('data', 'metadata.json')
with open(metadata_path) as f:
metadata = json.load(f)
words = list(metadata.keys())
def get_csv_path(word, sample_idx=0):
return os.path.join('data', word, f"{sample_idx}.csv")
# ## Data Preprocessing
# +
from sklearn import preprocessing
encoder = preprocessing.LabelEncoder()
encoder.fit(words)
# -
from utils import StaticSignProcessor
# +
class StaticSignProcessor():
def __init__(self, X_shape=(10,126,1)):
self.shape = X_shape
def process(self, df):
'''
just the cleanup: cut out head and tail, fill nan, (normalize)
'''
norm_df_array = self.normalize(df, method='hand_bbox')
norm_df = pd.DataFrame(data=norm_df_array, columns=df.columns)
# Drop the frames in the beginning and end of the video where no hands are detected
# Drop the timeframe and pose data
start_idx = (~norm_df['lefthand_0_x'].isna() | ~norm_df['righthand_0_x'].isna()).argmax()
end_idx = len(norm_df) - (norm_df[::-1]['lefthand_0_x'].isna() & norm_df[::-1]['righthand_0_x'].isna()).argmin()
norm_df = norm_df.iloc[start_idx:end_idx,1:127]
# Fill empty values with the previous seen value
norm_df = norm_df.fillna(method='ffill').fillna(method='bfill').fillna(0.)
# For classifiers, just return the mean of each column
if self.shape == (126,):
return norm_df.mean().to_numpy()
# for now, just choose 10 frames from the middle
# data = df.iloc[len(df)//3:len(df)//3+10].mean().to_numpy()
# if sum(np.isnan(data)) != 0:
# print(sum(np.isnan(data)))
# norm_data = np.reshape(data, self.shape)
# assert data.shape == self.shape
# return data
def normalize(self, df, method='hand_bbox'):
df_array = df.to_numpy().T # shape: (202,num_frames)
if method=='hand_bbox':
for h in ['left','right']:
hand_x_df = df.filter(regex=h).filter(regex='_x')
hand_y_df = df.filter(regex=h).filter(regex='_y')
x1,y1,x2,y2 = hand_x_df.min().min(),hand_y_df.min().min(),hand_x_df.max().max(),hand_y_df.max().max()
x_cols = [df.columns.get_loc(col) for col in hand_x_df.columns]
y_cols = [df.columns.get_loc(col) for col in hand_y_df.columns]
df_array[x_cols] = (df_array[x_cols]-min(x1,x2))/(max(x1,x2)-min(x1,x2)+0.000001)
df_array[y_cols] = (df_array[y_cols]-min(y1,y2))/(max(y1,y2)-min(y1,y2)+0.000001)
# df_x = (df.filter(regex='_x')-min(x1,x2))/(max(x1,x2)-min(x1,x2)+0.000001)
# df_y = (df.filter(regex='_y')-min(y1,y2))/(max(y1,y2)-min(y1,y2)+0.000001)
return df_array.T
elif method=='pose':
# normalize based on the width of the shoulders and height from shoulders to nose
# x1,y1,x2,y2 = df[['pose_11_x','pose_0_y','pose_12_x','pose_12_y']].mean()
hand_x_df = df.filter(regex='_x')
hand_y_df = df.filter(regex='_y')
col_indices = [df.columns.get_loc(col) for col in ('pose_11_x','pose_0_y','pose_12_x','pose_12_y')]
x1,y1,x2,y2 = df_array[col_indices].mean(axis=1)
x_cols = [df.columns.get_loc(col) for col in hand_x_df.columns]
y_cols = [df.columns.get_loc(col) for col in hand_y_df.columns]
df_array[x_cols] = (df_array[x_cols]-min(x1,x2))/(max(x1,x2)-min(x1,x2)+0.000001)
df_array[y_cols] = (df_array[y_cols]-min(y1,y2))/(max(y1,y2)-min(y1,y2)+0.000001)
return df_array.T
def flip_hands(self, df_array):
assert len(df_array) == 126
return np.concatenate((df_array[len(df_array)//2:],df_array[:len(df_array)//2]))
def generate_more_data(self, df_array, n=10, std=0.1):
'''
Generate more data from a single sample by adding noise
'''
samples = []
for i in range(n):
noise = np.random.normal(0, std, df_array.shape)
# randomly select up to 5 joints to perturb
perturb_indices = np.random.choice(len(df_array.T), np.random.choice(5), replace=False)
df_array.T[perturb_indices] = df_array.T[perturb_indices] + np.random.normal(0, std, df_array.T[perturb_indices].shape)
samples.append(df_array + noise)
return samples
# -
lstm_data_shape = (10,126)
cnn_data_shape = (10,126,1)
classifier_data_shape = (126,)
data_processor = StaticSignProcessor(X_shape=classifier_data_shape)
# +
X_data = []
y_data = []
for word in words:
for file in os.listdir(os.path.join('data', word)):
df = pd.read_csv(os.path.join('data', word, file))
df_array = data_processor.process(df)
X_data.append(df_array)
X_data += data_processor.generate_more_data(df_array, n=10, std=0.02)
y_data += [word] * 11
flipped_data = data_processor.flip_hands(df_array)
X_data.append(flipped_data)
X_data += data_processor.generate_more_data(flipped_data, n=10, std=0.02)
y_data += [word] * 11
X_data = np.array(X_data)
# for cnn model
# X_data = np.reshape(X_data, (X_data.shape[0], 10, 126, 1))
y_data = np.vstack(encoder.transform(y_data))
print(X_data.shape)
print(y_data.shape)
# -
file = os.listdir(os.path.join('data', words[0]))[0]
df = pd.read_csv(os.path.join('data', words[0], file))
df_array = data_processor.process(df)
flipped_data = data_processor.flip_hands(df_array)
# df_array[len(df_array)//2:]
flipped_data[:len(df_array)//2]
# sanity check
for i,x in enumerate(X_data):
if sum(np.isnan(x)) != 0:
print(i, x)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_data,
y_data,
stratify=y_data,
test_size=0.25)
# ## Model Selection
from keras.models import Model, Sequential
from keras.layers import Input, LSTM, Dense, Dropout, Flatten, concatenate
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras import optimizers
from keras.utils import np_utils
# +
def lstm_model():
# from MP2
batch_size=50
epochs=50
latent_dim=128
# input_layer = Input(shape=X_data.shape[1:])
# lstm = LSTM(latent_dim)(input_layer)
# dense = Dense(latent_dim, activation='relu')(lstm)
# pred = Dense(len(sampled_words), activation='softmax')(dense)
model = Sequential()
model.add(LSTM(latent_dim))
model.add(Dense(latent_dim, activation='relu'))
model.add(Dense(len(words), activation='softmax'))
# model = Model(inputs=input_layer, outputs=pred)
model.compile(loss="sparse_categorical_crossentropy", optimizer='adam', metrics=["acc"])
model.fit(X_data,
y_data,
epochs=epochs,
batch_size=batch_size,
verbose=1,
validation_split=0.3,
shuffle=True)
print(model.summary())
return model
lstm_model()
# -
def cnn_model():
num_of_classes = len(words)
model = Sequential()
model.add(Conv2D(16, (2,1), input_shape=(10, 126, 1), activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Conv2D(32, (2,1), activation='relu'))
# model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3), padding='same'))
# model.add(Conv2D(64, (2,1), activation='relu'))
# model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
# model.add(Dropout(0.2))
model.add(Dense(num_of_classes, activation='softmax'))
sgd = optimizers.SGD(lr=1e-2)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# filepath="cnn_model_keras2.h5"
# checkpoint1 = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
# callbacks_list = [checkpoint1]
#from keras.utils import plot_model
#plot_model(model, to_file='model.png', show_shapes=True)
# return model, callbacks_list
print(model.summary())
return model
# +
y_cat = np_utils.to_categorical(y_data)
X_train, X_test, y_train, y_test = train_test_split(X_data,
y_cat,
stratify=y_cat,
test_size=0.25)
epochs=15
batch_size=20
model = cnn_model()
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs, batch_size=batch_size)
# +
model.save('cnn_letters_1.h5')
# model.evaluate(val_images, val_labels, verbose=0)
# -
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
# +
import warnings
warnings.filterwarnings('ignore')
lda_accuracy = []
qda_accuracy = []
knn_accuracy = []
rf_accuracy = []
nb_accuracy = []
for i in range(50):
X_train, X_test, y_train, y_test = train_test_split(X_data,
y_data,
stratify=y_data,
test_size=0.25)
clf = LinearDiscriminantAnalysis()
clf.fit(X_train, y_train)
lda_accuracy.append(clf.score(X_test,y_test))
clf = QuadraticDiscriminantAnalysis()
clf.fit(X_train, y_train)
qda_accuracy.append(clf.score(X_test,y_test))
neigh = KNeighborsClassifier(n_neighbors=15)
neigh.fit(X_train, y_train)
knn_accuracy.append(neigh.score(X_test,y_test))
clf = RandomForestClassifier(n_estimators=100, max_depth=10, random_state=0)
clf.fit(X_train, y_train)
rf_accuracy.append(clf.score(X_test,y_test))
clf = GaussianNB()
clf.fit(X_train, y_train)
nb_accuracy.append(clf.score(X_test,y_test))
# -
accuracy_list = [qda_accuracy, nb_accuracy, knn_accuracy, rf_accuracy, lda_accuracy]
plt.figure(figsize=(5.5, 5.5))
plt.ylim(0, 1)
plt.boxplot(accuracy_list, labels = ['QDA', 'Naive Bayes', 'k-nn', 'Random Forest', 'LDA'])
plt.title('Model Performance')
plt.ylabel('Accuracy')
plt.savefig('model_performance.png', dpi = 500)
plt.show()
plt.figure(figsize=(15,10))
# word = encoder.transform([np.random.choice(sampled_words)])[0]
word = encoder.transform(['LETTER-F'])[0]
samples = X_data[(y_data==word).flatten()]
samples = np.concatenate((samples[:11], samples[22:33], samples[44:55]))
# for x in samples[np.random.choice(len(samples), 10, replace=False)]:
for x in samples:
plt.title('right hand x positions for {}'.format(encoder.inverse_transform([word])[0]))
plt.plot(np.arange(len(x)//6), x[len(x)//2::3])
plt.savefig('hand_positions.png')
plt.show()
# +
import pickle
X_train, X_test, y_train, y_test = train_test_split(X_data,
y_data,
stratify=y_data,
test_size=0.25)
print(X_train[0].shape)
# model = KNeighborsClassifier(n_neighbors=15)
# model.fit(X_train, y_train)
# model = RandomForestClassifier(n_estimators=100, max_depth=10, random_state=0)
# model.fit(X_train, y_train)
model = LinearDiscriminantAnalysis()
model.fit(X_train, y_train)
print(model.score(X_test,y_test))
pred_prob = model.predict_proba([X_test[0]])[0]
pred_class = list(pred_prob).index(max(pred_prob))
print(encoder.inverse_transform([pred_class]))
print(max(pred_prob))
# Its important to use binary mode
model_path = open('saved_model.pkl', 'wb')
# source, destination
pickle.dump(model, model_path)
model_path.close()
# +
from sklearn.preprocessing import StandardScaler
def return_features(df_list, hand_list, drop_left):
scaler = StandardScaler()
feature_list = []
select_class = hand_selection(drop_left)
frames = select_class.transform(df_list, hand_list)
for df in frames:
class_obj = extraction(df)
class_obj.extract_features()
feature_list.append(class_obj.features)
feat_df = pd.DataFrame(feature_list)
y = feat_df.label
X = scaler.fit_transform(feat_df.drop(['label'], axis = 1))
return X, y
# -
def get_dfs_for_sign(sign):
label = encoder.transform([sign])[0]
samples = X_data[(y_data==label).flatten()]
return samples
# +
from matplotlib.path import Path
import matplotlib.patches as patches
def visualize_hands(df_array):
# plt.figure(figsize=(15,15))
fig, ax1 = plt.subplots(1,1,figsize=(10,10))
ax = ax1
for hand in (df_array[:len(df_array)//2], df_array[len(df_array)//2:]):
hand_data = []
finger_data = []
for i in range(len(hand)//3):
x,y,z = df_array[3*i:3*(i+1)]
if i % 4 == 0 and i != 0:
finger_data.append([x,y])
else:
hand_data.append([x,y])
# hand_data = max(hand_data) - np.array(hand_data)
# finger_data = max(finger_data) - np.array(finger_data)
t1 = ax.scatter(np.array(hand_data).T[0], np.array(hand_data).T[1], color='blue')
t2 = ax.scatter(np.array(finger_data).T[0], np.array(finger_data).T[1], color='red')
# verts = np.array(hand_data)
# codes = [Path.MOVETO] + [Path.LINETO] * (len(verts)-2) + [Path.CLOSEPOLY]
# path = Path(verts, codes)
# patch = patches.PathPatch(path, lw=2)
# ax.add_patch(patch)
# ax = ax2
ax.set_ylim(ax.get_ylim()[::-1])
plt.title('Plotted hand data for LETTER-F')
plt.savefig('plotted_hand.png', dpi = 500)
plt.show()
# -
letter_data = get_dfs_for_sign('LETTER-F')
visualize_hands(letter_data[11])
| train_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="AOpGoE2T-YXS" colab_type="text"
# ##### Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License").
#
# # Neural Machine Translation with Attention
#
# <table class="tfo-notebook-buttons" align="left"><td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb">
# <img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td><td>
# <a target="_blank" href="https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table>
# + [markdown] id="CiwtNgENbx2g" colab_type="text"
# This notebook trains a sequence to sequence (seq2seq) model for Spanish to English translation using [tf.keras](https://www.tensorflow.org/programmers_guide/keras) and [eager execution](https://www.tensorflow.org/programmers_guide/eager). This is an advanced example that assumes some knowledge of sequence to sequence models.
#
# After training the model in this notebook, you will be able to input a Spanish sentence, such as *"¿todavia estan en casa?"*, and return the English translation: *"are you still at home?"*
#
# The translation quality is reasonable for a toy example, but the generated attention plot is perhaps more interesting. This shows which parts of the input sentence has the model's attention while translating:
#
# <img src="https://tensorflow.org/images/spanish-english.png" alt="spanish-english attention plot">
#
# Note: This example takes approximately 10 mintues to run on a single P100 GPU.
# + id="tnxXKDjq3jEL" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
from __future__ import absolute_import, division, print_function
# Import TensorFlow >= 1.9 and enable eager execution
import tensorflow as tf
tf.enable_eager_execution()
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import time
print(tf.__version__)
# + [markdown] id="wfodePkj3jEa" colab_type="text"
# ## Download and prepare the dataset
#
# We'll use a language dataset provided by http://www.manythings.org/anki/. This dataset contains language translation pairs in the format:
#
# ```
# May I borrow this book? ¿Puedo tomar prestado este libro?
# ```
#
# There are a variety of languages available, but we'll use the English-Spanish dataset. For convenience, we've hosted a copy of this dataset on Google Cloud, but you can also download your own copy. After downloading the dataset, here are the steps we'll take to prepare the data:
#
# 1. Add a *start* and *end* token to each sentence.
# 2. Clean the sentences by removing special characters.
# 3. Create a word index and reverse word index (dictionaries mapping from word → id and id → word).
# 4. Pad each sentence to a maximum length.
# + id="kRVATYOgJs1b" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# Download the file
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt"
# + id="rd0jw-eC3jEh" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# Converts the unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# replacing everything with space except (a-z, A-Z, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<start> ' + w + ' <end>'
return w
# + id="OHn4Dct23jEm" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# 1. Remove the accents
# 2. Clean the sentences
# 3. Return word pairs in the format: [ENGLISH, SPANISH]
def create_dataset(path, num_examples):
lines = open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return word_pairs
# + id="9xbqO7Iie9bb" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# This class creates a word -> index mapping (e.g,. "dad" -> 5) and vice-versa
# (e.g., 5 -> "dad") for each language,
class LanguageIndex():
def __init__(self, lang):
self.lang = lang
self.word2idx = {}
self.idx2word = {}
self.vocab = set()
self.create_index()
def create_index(self):
for phrase in self.lang:
self.vocab.update(phrase.split(' '))
self.vocab = sorted(self.vocab)
self.word2idx['<pad>'] = 0
for index, word in enumerate(self.vocab):
self.word2idx[word] = index + 1
for word, index in self.word2idx.items():
self.idx2word[index] = word
# + id="eAY9k49G3jE_" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def max_length(tensor):
return max(len(t) for t in tensor)
def load_dataset(path, num_examples):
# creating cleaned input, output pairs
pairs = create_dataset(path, num_examples)
# index language using the class defined above
inp_lang = LanguageIndex(sp for en, sp in pairs)
targ_lang = LanguageIndex(en for en, sp in pairs)
# Vectorize the input and target languages
# Spanish sentences
input_tensor = [[inp_lang.word2idx[s] for s in sp.split(' ')] for en, sp in pairs]
# English sentences
target_tensor = [[targ_lang.word2idx[s] for s in en.split(' ')] for en, sp in pairs]
# Calculate max_length of input and output tensor
# Here, we'll set those to the longest sentence in the dataset
max_length_inp, max_length_tar = max_length(input_tensor), max_length(target_tensor)
# Padding the input and output tensor to the maximum length
input_tensor = tf.keras.preprocessing.sequence.pad_sequences(input_tensor,
maxlen=max_length_inp,
padding='post')
target_tensor = tf.keras.preprocessing.sequence.pad_sequences(target_tensor,
maxlen=max_length_tar,
padding='post')
return input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_tar
# + [markdown] id="GOi42V79Ydlr" colab_type="text"
# ### Limit the size of the dataset to experiment faster (optional)
#
# Training on the complete dataset of >100,000 sentences will take a long time. To train faster, we can limit the size of the dataset to 30,000 sentences (of course, translation quality degrades with less data):
# + id="cnxC7q-j3jFD" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# Try experimenting with the size of that dataset
num_examples = 30000
input_tensor, target_tensor, inp_lang, targ_lang, max_length_inp, max_length_targ = load_dataset(path_to_file, num_examples)
# + id="4QILQkOs3jFG" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# Creating training and validation sets using an 80-20 split
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
# Show length
len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val)
# + [markdown] id="rgCLkfv5uO3d" colab_type="text"
# ### Create a tf.data dataset
# + id="TqHsArVZ3jFS" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word2idx)
vocab_tar_size = len(targ_lang.word2idx)
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(BATCH_SIZE))
# + [markdown] id="TNfHIF71ulLu" colab_type="text"
# ## Write the encoder and decoder model
#
# Here, we'll implement an encoder-decoder model with attention which you can read about in the TensorFlow [Neural Machine Translation (seq2seq) tutorial](https://www.tensorflow.org/tutorials/seq2seq). This example uses a more recent set of APIs. This notebook implements the [attention equations](https://www.tensorflow.org/tutorials/seq2seq#background_on_the_attention_mechanism) from the seq2seq tutorial. The following diagram shows that each input words is assigned a weight by the attention mechanism which is then used by the decoder to predict the next word in the sentence.
#
# <img src="https://www.tensorflow.org/images/seq2seq/attention_mechanism.jpg" width="500" alt="attention mechanism">
#
# The input is put through an encoder model which gives us the encoder output of shape *(batch_size, max_length, hidden_size)* and the encoder hidden state of shape *(batch_size, hidden_size)*.
#
# Here are the equations that are implemented:
#
# <img src="https://www.tensorflow.org/images/seq2seq/attention_equation_0.jpg" alt="attention equation 0" width="800">
# <img src="https://www.tensorflow.org/images/seq2seq/attention_equation_1.jpg" alt="attention equation 1" width="800">
#
# We're using *Bahdanau attention*. Lets decide on notation before writing the simplified form:
#
# * FC = Fully connected (dense) layer
# * EO = Encoder output
# * H = hidden state
# * X = input to the decoder
#
# And the pseudo-code:
#
# * `score = FC(tanh(FC(EO) + FC(H)))`
# * `attention weights = softmax(score, axis = 1)`. Softmax by default is applied on the last axis but here we want to apply it on the *1st axis*, since the shape of score is *(batch_size, max_length, hidden_size)*. `Max_length` is the length of our input. Since we are trying to assign a weight to each input, softmax should be applied on that axis.
# * `context vector = sum(attention weights * EO, axis = 1)`. Same reason as above for choosing axis as 1.
# * `embedding output` = The input to the decoder X is passed through an embedding layer.
# * `merged vector = concat(embedding output, context vector)`
# * This merged vector is then given to the GRU
#
# The shapes of all the vectors at each step have been specified in the comments in the code:
# + id="avyJ_4VIUoHb" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def gru(units):
# If you have a GPU, we recommend using CuDNNGRU(provides a 3x speedup than GRU)
# the code automatically does that.
if tf.test.is_gpu_available():
return tf.keras.layers.CuDNNGRU(units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
# + id="nZ2rI24i3jFg" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.enc_units)
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
# + id="yJ_B3mhW3jFk" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.dec_units)
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.W1 = tf.keras.layers.Dense(self.dec_units)
self.W2 = tf.keras.layers.Dense(self.dec_units)
self.V = tf.keras.layers.Dense(1)
def call(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
# hidden shape == (batch_size, hidden size)
# hidden_with_time_axis shape == (batch_size, 1, hidden size)
# we are doing this to perform addition to calculate the score
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, max_length, hidden_size)
score = tf.nn.tanh(self.W1(enc_output) + self.W2(hidden_with_time_axis))
# attention_weights shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
attention_weights = tf.nn.softmax(self.V(score), axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * enc_output
context_vector = tf.reduce_sum(context_vector, axis=1)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * max_length, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size * max_length, vocab)
x = self.fc(output)
return x, state, attention_weights
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.dec_units))
# + id="P5UY8wko3jFp" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
# + [markdown] id="_ch_71VbIRfK" colab_type="text"
# ## Define the optimizer and the loss function
# + id="WmTHr5iV3jFr" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
optimizer = tf.train.AdamOptimizer()
def loss_function(real, pred):
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
return tf.reduce_mean(loss_)
# + [markdown] id="hpObfY22IddU" colab_type="text"
# ## Training
#
# 1. Pass the *input* through the *encoder* which return *encoder output* and the *encoder hidden state*.
# 2. The encoder output, encoder hidden state and the decoder input (which is the *start token*) is passed to the decoder.
# 3. The decoder returns the *predictions* and the *decoder hidden state*.
# 4. The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss.
# 5. Use *teacher forcing* to decide the next input to the decoder.
# 6. *Teacher forcing* is the technique where the *target word* is passed as the *next input* to the decoder.
# 7. The final step is to calculate the gradients and apply it to the optimizer and backpropagate.
# + id="ddefjBMa3jF0" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']] * BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
total_loss += (loss / int(targ.shape[1]))
variables = encoder.variables + decoder.variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables), tf.train.get_or_create_global_step())
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
loss.numpy() / int(targ.shape[1])))
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss/len(input_tensor)))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
# + [markdown] id="mU3Ce8M6I3rz" colab_type="text"
# ## Translate
#
# * The evaluate function is similar to the training loop, except we don't use *teacher forcing* here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output.
# * Stop predicting when the model predicts the *end token*.
# * And store the *attention weights for every time step*.
#
# Note: The encoder output is calculated only once for one input.
# + id="EbQpyYs13jF_" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word2idx[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs], maxlen=max_length_inp, padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word2idx['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input, dec_hidden, enc_out)
# storing the attention weigths to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.multinomial(tf.exp(predictions), num_samples=1)[0][0].numpy()
result += targ_lang.idx2word[predicted_id] + ' '
if targ_lang.idx2word[predicted_id] == '<end>':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# + id="s5hQWlbN3jGF" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# function for plotting the attention weights
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
plt.show()
# + id="sl9zUHzg3jGI" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def translate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ):
result, sentence, attention_plot = evaluate(sentence, encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
print('Input: {}'.format(sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
# + id="WrAM0FDomq3E" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
translate('hace mucho frio aqui.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
# + id="zSx2iM36EZQZ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
translate('esta es mi vida.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
# + id="A3LLCx3ZE0Ls" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
translate('¿todavia estan en casa?', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
# + id="DUQVLVqUE1YW" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
# wrong translation
translate('trata de averiguarlo.', encoder, decoder, inp_lang, targ_lang, max_length_inp, max_length_targ)
# + [markdown] id="RTe5P5ioMJwN" colab_type="text"
# ## Next steps
#
# * [Download a different dataset](http://www.manythings.org/anki/) to experiment with translations, for example, English to German, or English to French.
# * Experiment with training on a larger dataset, or using more epochs
#
| tensorflow/contrib/eager/python/examples/nmt_with_attention/nmt_with_attention.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
path = r"E:\wenpeiyu\PythonProject\LogAnalysis\rawdata\LogOutput.csv"
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df_log = pd.read_csv(path)
df_log_RA_MSG = df_log[df_log.MSG+df_log.RA>0]
df_CRNTI = df_log_RA_MSG.groupby('CRNTI').size()
# df_CRNTI = df_log_RA_MSG.groupby(['CRNTI','UEIndex']).size()
crnticount = list(df_CRNTI)[1:]
# %matplotlib inline
sns.distplot(crnticount, rug=True)
plt.show()
# CRNTI频数
df_CRNTI.value_counts()
df_CRNTI[df_CRNTI==87]
df_CRNTI[df_CRNTI==43]
df_22666 = df_log_RA_MSG[df_log_RA_MSG.CRNTI=="22666"]
list(df_22666[df_22666.TOOL==1]["Message"])
list(df_22666["Message"])[:-5]
df_20712 = df_log_RA_MSG[df_log_RA_MSG.CRNTI=="20712"]
list(df_20712["Message"])[:-4]
df_20712 = df_log[df_log.CRNTI=="20712"]
list(df_20712["Message"])
import re
for i in list(df_20712["Message"]):
stra = i
re_sign = re.compile(r"\[[a-zA-Z]+[a-zA-Z\s0-9]*?\]")
re_allocate = re.compile(r"[ 0-9a-zA-Z_]+\[[,\:\-a-zA-Z\s0-9]+?\][a-zA-Z]*")
re_measure = re.compile(r"[0-9a-zA-Z_]+=[-0-9]*")
sign = re_sign.findall(stra)
sign = [i.strip() for i in sign if i.strip()]
stra = re_sign.sub("", stra)
allocate = re_allocate.findall(stra)
allocate = [i.strip() for i in allocate if i.strip()]
stra = re_allocate.sub("", stra)
measure = re_measure.findall(stra)
measure = [i.strip() for i in measure if i.strip()]
stra = re_measure.sub("", stra)
describe = stra.replace(".","").split(",")
describe = [i.strip() for i in describe if i.strip()]
print(i)
print(sign, allocate, measure, describe)
import re
def extract_information(lst_message):
lst_randomaccess = []
for i in lst_message:
stra = i
re_sign = re.compile(r"\[[a-zA-Z]+[a-zA-Z\s0-9]*?\]")
re_allocate = re.compile(r"[ 0-9a-zA-Z_]+\[[,\:\-a-zA-Z\s0-9]+?\][a-zA-Z]*")
re_measure = re.compile(r"[0-9a-zA-Z_]+=[-0-9]*")
sign = re_sign.findall(stra)
sign = {"sign" :[i.strip() for i in sign if i.strip()]}
stra = re_sign.sub("", stra)
allocate = re_allocate.findall(stra)
allocate = [i.strip() for i in allocate if i.strip()]
allocate = {"allocate" :{i.split('[')[0]:i.split('[')[1].replace(']', '') for i in allocate}}
stra = re_allocate.sub("", stra)
measure = re_measure.findall(stra)
measure = [i.strip() for i in measure if i.strip()]
measure = {"measure": {i.split('=')[0]:i.split('=')[1] for i in measure}}
stra = re_measure.sub("", stra)
describe = stra.replace(".","").split(",")
describe = {"describe" :[i.strip() for i in describe if i.strip()]}
lst_randomaccess.append({**sign, **allocate, **measure, **describe})
return lst_randomaccess
lst_randomaccess = extract_information(list(df_20712["Message"])[:-4])
def generate_tagset(lst_randomaccess):
sign = []
allocate = []
measure = []
describe = []
for i in lst_randomaccess:
sign = sign + i['sign']
measure = measure + list(i["measure"].keys())
allocate = allocate + list(i["allocate"].keys())
describe = describe + i['describe']
sign = set(sign)
measure = set(measure)
allocate = set(allocate)
describe = set(describe)
return sign, measure, allocate, describe
set_tags = generate_tagset(lst_randomaccess)
def init_mapping(set_tags):
tags = []
attrs = []
no = []
sign = set_tags[0]
measure = set_tags[1]
allocate = set_tags[2]
describe = set_tags[3]
tags = tags + list(sign)
attrs = attrs + len(sign)*["sign"]
tags = tags + list(measure)
attrs = attrs + len(measure)*["measure"]
tags = tags + list(allocate)
attrs = attrs + len(allocate)*["allocate"]
tags = tags + list(describe)
attrs = attrs + len(describe)*["describe"]
no = [hex(i)[2:].zfill(4) for i in range(len(sign)+len(measure)+len(allocate)+len(describe))]
df_mapping = pd.DataFrame({
"tag": tags,
"attr": attrs,
"no": no})
return df_mapping
init_mapping(set_tags)
list(sign)
allocate = ['[DLHARQ]', '[MSG4 HARQ]', 'CRNTI[20712]', 'CEL[0]', 'AckNackDtx[0]', 'HarqTranCn[0]', 'Msg4ReTxCnt[4]']
i.split('[')[0]:i.split('[')[1].replace(']', '') for i in allocate}
hex(11)[2:].zfill(4)
5*["1"]
len(sign)+len(measure)
# +
import re
import pandas as pd
import pickle
from log import logger
class LogFlow:
"""
日志编码流
"""
def __init__(self, filepath):
"""
初始化对象
:param filepath: 欲处理的日志文件地址
"""
self.filepath = filepath
self.logger = logger
self.read_file()
self.logger.info("模块{0}初始化成功".format(self.__class__.__name__))
def read_file(self):
"""
读入日志文件
:return:
"""
self.df_log = pd.read_csv(self.filepath)
self.logger.info("日志文件 - {file} 载入成功".format(file=self.filepath))
self.df_log_RA_MSG = self.df_log[self.df_log.MSG + self.df_log.RA > 0]
def collect_message(self, crnti=None, RA=True):
"""
提取日志的信息列
:param crnti: 欲提取的crnti编号,默认为None不具体到具体crnti
:param RA: 是否开启随机接入过滤,默认开启,否则读取全部日志字段
:return:
"""
self.lst_message = []
if crnti is None:
if RA:
self.lst_message = list(self.df_log_RA_MSG["Message"])
else:
self.lst_message = list(self.df_log["Message"])
else:
if RA:
df_temp = self.df_log_RA_MSG[self.df_log_RA_MSG.CRNTI == crnti]
self.lst_message = list(df_temp["Message"])
else:
df_temp = self.df_log[self.df_log.CRNTI == crnti]
self.lst_message = list(df_temp["Message"])
def extract_information(self):
"""
提取消息中的字段,格式化为格式数据
:return:
"""
self.lst_randomaccess = []
for i in self.lst_message:
stra = i
re_sign = re.compile(r"\[[a-zA-Z]+[a-zA-Z\s0-9]*?\]")
re_allocate = re.compile(r"[ 0-9a-zA-Z_]+\[[,\:\-a-zA-Z\s0-9]+?\][a-zA-Z]*")
re_measure = re.compile(r"[0-9a-zA-Z_]+=[-0-9]*")
sign = re_sign.findall(stra)
sign = {"sign": [i.strip() for i in sign if i.strip()]}
stra = re_sign.sub("", stra)
allocate = re_allocate.findall(stra)
allocate = [i.strip() for i in allocate if i.strip()]
allocate = {"allocate": {i.split('[')[0]: i.split('[')[1].replace(']', '') for i in allocate}}
stra = re_allocate.sub("", stra)
measure = re_measure.findall(stra)
measure = [i.strip() for i in measure if i.strip()]
measure = {"measure": {i.split('=')[0]: i.split('=')[1] for i in measure}}
stra = re_measure.sub("", stra)
describe = stra.replace(".", "").split(",")
describe = {"describe": [
re.sub("\d+ msg3gid", "msg3gid", re.sub("SFN\[\d+\]", "", re.sub("\| \@\[\d+\]", "", i.strip())))
for i in describe if i.strip()]}
self.lst_randomaccess.append({**sign, **allocate, **measure, **describe})
def generate_tagset(self):
"""
生成标签集合
:return:
"""
sign = []
allocate = []
measure = []
describe = []
for i in self.lst_randomaccess:
sign = sign + i['sign']
sign = list(set(sign))
measure = measure + list(i["measure"].keys())
measure = list(set(measure))
allocate = allocate + list(i["allocate"].keys())
allocate = list(set(allocate))
describe = describe + i['describe']
describe = list(set(describe))
self.set_tags = (sign, measure, allocate, describe)
def init_mapping(self):
"""
生成标签到编码的映射表
:return:
"""
tags = []
attrs = []
no = []
sign = self.set_tags[0]
measure = self.set_tags[1]
allocate = self.set_tags[2]
describe = self.set_tags[3]
tags = tags + list(sign)
attrs = attrs + len(sign) * ["sign"]
no1 = [hex(i)[2:].zfill(4) for i in range(len(sign))]
tags = tags + list(measure)
attrs = attrs + len(measure) * ["measure"]
no2 = [hex(i+256)[2:].zfill(4) for i in range(len(measure))]
tags = tags + list(allocate)
attrs = attrs + len(allocate) * ["allocate"]
no3 = [hex(i+1280)[2:].zfill(4) for i in range(len(allocate))]
tags = tags + list(describe)
attrs = attrs + len(describe) * ["describe"]
no4 = [hex(i+2304)[2:].zfill(4) for i in range(len(describe))]
no = [hex(i)[2:].zfill(4) for i in range(len(sign) + len(measure) + len(allocate) + len(describe))]
self.df_mapping = pd.DataFrame({
"tag": tags,
"attr": attrs,
"no_order": no,
"no": no1+no2+no3+no4})
def dump_mapping(self, path):
"""
导出映射表
:param path:
:return:
"""
pickle.dump(self.df_mapping, open(path, "wb"))
def load_mapping(self, path):
"""
导入映射表
:param path:
:return:
"""
self.df_mapping = pickle.load(open(path, 'rb'))
def generate_code(self):
"""
根据映射表将日志消息编码化
:return:
"""
for num, i in enumerate(self.lst_randomaccess):
str_code = ""
str_code_color = ""
for j in i:
for m in i[j]:
str_code = str_code + dict(zip(self.df_mapping["tag"], self.df_mapping["no"]))[m]
str_code_color = str_code_color + dict(zip(self.df_mapping["tag"], self.df_mapping["no_order"]))[m]
self.lst_randomaccess[num]["code"] = str_code
self.lst_randomaccess[num]["code_color"] = str_code_color
# -
a = LogFlow(path)
a.collect_message("20712",True)
a.extract_information()
a.load_mapping('d:/mapping.pkl')
a.generate_code()
a.df_mapping.to_excel("d:/字段编码映射表.xlsx")
a.lst_randomaccess
# +
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def plot_message_spectrum(lst_randomaccess):
length = len(lst_randomaccess)
matplotlib.rcParams['figure.figsize'] = [10, length*0.5]
plt.figure()
plt.subplots_adjust(hspace=0)
for num,i in enumerate(lst_randomaccess):
s = i["code_color"]
plt.subplot(length,1,num+1)
lst_color = [eval("0x" + s[i:i+4]) for i in range(0,len(s),4)]
arr_color = np.array([lst_color])
plt.pcolor(arr_color)
plt.axis('off')
plot_message_spectrum(c.lst_randomaccess)
# +
plt.subplot(4,1,2)
w = np.array([[1,6,2,8]])
plt.pcolor(w)
plt.axis('off')
plt.subplot(4,1,3)
w = np.array([[5,3,9,6]])
plt.pcolor(w)
plt.axis('off')
plt.subplot(4,1,4)
w = np.array([[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]])
plt.pcolor(w)
plt.axis('off')
# -
s = "000c00010067005300720085008f008200a1"
[eval("0x" + s[i:i+4]) for i in range(0,len(s),4)]
0x55
s = [1,2,1]
np.array([s])
a.lst_randomaccess
b = LogFlow(r"C:\Users\wenpeiyu\Desktop\success.csv")
b.collect_message()
b.extract_information()
b.load_mapping('d:/mapping.pkl')
b.generate_code()
b.lst_randomaccess
c = LogFlow(r"C:\Users\wenpeiyu\Desktop\fail.csv")
c.collect_message()
c.extract_information()
c.load_mapping('d:/mapping.pkl')
c.generate_code()
| LogAnalysis/LogFlow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# 我们给出了 N 种不同类型的贴纸。每个贴纸上都有一个小写的英文单词。
#
# 你希望从自己的贴纸集合中裁剪单个字母并重新排列它们,从而拼写出给定的目标字符串 target。
#
# 如果你愿意的话,你可以不止一次地使用每一张贴纸,而且每一张贴纸的数量都是无限的。
#
# 拼出目标 target 所需的最小贴纸数量是多少?如果任务不可能,则返回 -1。
#
# 示例 1:
# 输入:
# ["with", "example", "science"], "thehat"
# 输出:
# 3
# 解释:
# 我们可以使用 2 个 "with" 贴纸,和 1 个 "example" 贴纸。
# 把贴纸上的字母剪下来并重新排列后,就可以形成目标 “thehat“ 了。
# 此外,这是形成目标字符串所需的最小贴纸数量。
#
# 示例 2:
# 输入:
# ["notice", "possible"], "basicbasic"
# 输出:
# -1
# 解释:
# 我们不能通过剪切给定贴纸的字母来形成目标“basicbasic”。
#
#
# 提示:
# stickers 长度范围是 [1, 50]。
# stickers 由小写英文单词组成(不带撇号)。
# target 的长度在 [1, 15] 范围内,由小写字母组成。
# 在所有的测试案例中,所有的单词都是从 1000 个最常见的美国英语单词中随机选取的,目标是两个随机单词的串联。
# 时间限制可能比平时更具挑战性。预计 50 个贴纸的测试案例平均可在35ms内解决。
# -
class Solution:
def minStickers(self, stickers, target: str) -> int:
s_set, t_set = set(), set(target)
for s in stickers:
s_set |= set(s)
if len(s_set & t_set) != len(t_set):
return -1
s2idx = []
for s in stickers:
temp = 0
for w in s:
temp |= (1 << (ord(w) - ord('a')))
s2idx.append(temp)
bin_t = 0
for t in target:
bin_t |= (1 << (ord(t) - ord('a')))
dp = [float('inf')] * bin_t
for t in range(bin_t):
temp_dp = dp[:]
for s in s2idx:
new_t = t | s
if temp_dp[new_t] > dp[t] + 1:
temp_dp[new_t] = dp[t] + 1
return dp[-1]
solution = Solution()
solution.minStickers(["with", "example", "science"], "thehat")
class Solution:
def minStickers(self, stickers, target: str) -> int:
def find_next_status(status, pos_str):
for ch in pos_str:
for i in range(len(target)):
if ch not in target:
continue
if ((status >> i) & 1) == 0 and target[i] == ch:
status |= (1 << i)
break
return status
n = len(target)
dp = [float('inf')] * (1 << len(target))
dp[0] = 0
for status in range(1 << len(target)):
if dp[status] == float('inf'):
continue
for s in stickers:
new_status = find_next_status(status, s)
dp[new_status] = min(dp[new_status], dp[status] + 1)
return dp[-1] if dp[-1] != float('inf') else -1
solution = Solution()
solution.minStickers(["with", "example", "science"], "thehat")
class Solution {
public:
int minStickers(vector<string>& stickers, string target)
{
int n = target.size();
vector<int>dp(1<<n,INT_MAX);
dp[0] = 0;
for (int state=0; state<(1<<n); state++)
{
if (dp[state]==INT_MAX) continue;
for (string str:stickers)
{
int new_state = findNextStatusByUsingStr(state,target,str);
dp[new_state] = min(dp[new_state], dp[state]+1);
}
}
return dp[(1<<n)-1]==INT_MAX?-1: dp[(1<<n)-1];
}
int findNextStatusByUsingStr(int status, string target, string s)
{
int n = target.size();
for (auto ch:s)
{
// loop over each character in target, if equals to ch and not filled, then set as filled
for (int k=0; k<n; k++)
{
if (((status>>k)&1)==0 && target[k]==ch)
{
status = status+(1<<k);
break;
}
}
}
return status;
}
};
| Dynamic Programming/1123/691. Stickers to Spell Word.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Energy A.I. Hackathon 2021 - Project Template
#
# ## General Guidance
#
# We're expecting a workflow that could be deployed to any competent engineer or scientist with basic subsurface resource, data analytics and machine learning knowledge and they could understand and apply your workflow.
#
# ### Expectations on the Workflow
#
# * include short descriptions, no 2 code blocks should be adjacent, always have a short statement to explain the next code block
#
# * be as concise as possible:
#
# * use point form (except for the executive summary)
# * use effective, creative figures that compine what could have been in multiple plots
# * every line of code, statment or figure must have purpose
# * conciseness is part of the grading, don't add content that isn't needed
#
# * be very clear with readable code
#
# * label every axis for every plot
# * use readable code, logical variable names, use available functionality and define functions and classes for compactness and concise comments in the code
# * proceed step by step, explain each important step concisely for a easy to follow narrative
#
#
# ### Using Code From Others
#
# You may use blocks/snipets of code from other sources with citation. To cite a set of code separate in a block and do this in the markdown above the block.
#
# The following code block is from Professor <NAME> (@GeostatsGuy), SubSurfuceDataAnalytics_PCA.ipynb from [GeostatsGuy GitHub](https://github.com/GeostatsGuy/PythonNumericalDemos/blob/master/SubsurfaceDataAnalytics_PCA.ipynb).
#
# ```python
# def simple_simple_krige(df,xcol,ycol,vcol,dfl,xlcol,ylcol,vario,skmean):
# # load the variogram
# nst = vario['nst']; pmx = 9999.9
# cc = np.zeros(nst); aa = np.zeros(nst); it = np.zeros(nst)
# ```
#
# or use inline citations with comments, such as this for a few of lines of code.
#
# ```python
# def simple_simple_krige(df,xcol,ycol,vcol,dfl,xlcol,ylcol,vario,skmean): # function from Professor <NAME>,https://github.com/GeostatsGuy/PythonNumericalDemos/blob/master/SubsurfaceDataAnalytics_PCA.ipynb
# ```
#
# ## The Workflow Template
#
# Here's the template for your workflow.
#
# ___
#
# # Energy A.I. Hackathon 2021 Workflow - MOAR Drilling
#
# #### Authors:
# #### - **<NAME>**, Mechanical Engineering
# #### - **<NAME>**, Petroleum and Geological Engineering
# #### - **<NAME>**, Petroleum and Geological Engineering
#
# #### The University of Texas at Austin, Austin, Texas USA
# ___
#
# ### Executive Summary
#
# Only 4 short sentences.
#
# 1. What is the problem?
# We were tasked with forecasting production of 10 unproduced wells from the production, well log, and seismic data of 73 others wells in the reservoir.
# 2. What did your team do to address the problem?
# Production was forecasted by upscaling well data, quantifying features, and using a gradient boosted decision tree model.
# 3. What did your team learn?
# Our three-year cumulative production estimates have a mean absolute error near 20% and ___.
# 4. What does your team recommend going forward?
# Production of the 10 wells may commence immediately as global oil demand has returned and oil prices are predicted to rise.
#
# ___
#
# ### Workflow Goal
#
# Our workflow aimed to forecast oil production for the 10 production wells using gradient boosted trees. To ensure a strong model, we utilized mean average error to find the accuracy of our model throughout.
# ___
# ### Import Packages
# +
# Data import and processing
import pandas as pd
import numpy as np
# Plotting and visualization
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
# Machine Learning modules from sklearn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error
# Gradient-boosted Decision Trees
import xgboost as xgb
# -
# ### Load Data
#
# Load the sample data and production data for the wells already in production.
dfProdSamples = pd.read_csv(r"./well_data/wellbore_data_producer_wells.csv")
dfProdHist = pd.read_csv(r"./prod_data/production_history.csv")
dfProdSamples.head()
# ### Data Processing
# Process data for easier readability and model construction.
# First we change the "Rock facies" column into an ordinal discrete column that's a measure of "sandiness".
# Then, we calculate the euclidian distance from each well to the fault line and process that as two seperate columns, one for being above or below the fault, and the other as the absolute value of it's distance.
def process_data( df):
# Dictionary to process rock facies into a "sandiness measure", an ordinal measure
ordinal_mapper = {'Sandstone': 3, 'Sandy shale': 1, 'Shaly sandstone': 2, 'Shale': 0}
df['Rock facies'] = df['Rock facies'].replace(ordinal_mapper)
# Group all well samples together by well and simply take the mean of each measure
df = df.groupby(df["Well_ID"]).mean()
# Calculate distance to fault
df['Fault Distance, m'] = (((-1)*df['X, m'])+((-1)*df['Y, m'])+11750)/np.sqrt(2)
# Positive values of distance are above the line and negative values are below
df["Above Fault"] = (df["Fault Distance, m"] > 0).astype(int)
# Final distance should be an absolute value
df['Fault Distance, m'] = abs(df['Fault Distance, m'])
return df
# #### Apply function to training data
#
# Now that our function has been created we can apply it to the initial dataset and combine it with the production data to create a single DataFrame that expresses each Well. We remove all production columns except the 3 year cumulative oil production, as we won't have the rest of the data for the pre-production wells, so they can't be features. The 3 year cumulative oil production will be our target variable. Additionally, we also created X as our features and y as our target variable.
dfProdProc = process_data(dfProdSamples)
unneeded = ["Cumulative oil production (1 yr), MSTB",
"Cumulative oil production (2 yr), MSTB",
"Cumulative Water production (1 yr), MSTB",
"Cumulative Water production (2 yr), MSTB",
"Cumulative Water production (3 yr), MSTB"]
dfProdHistDr = dfProdHist.drop(unneeded, axis=1).set_index("Well_ID")
dfProdFinal = pd.concat([dfProdProc, dfProdHistDr], axis=1)
X = dfProdFinal.drop(["Cumulative oil production (3 yr), MSTB"], axis=1)
y = dfProdFinal["Cumulative oil production (3 yr), MSTB"]
# ### Correlation Matrix Function
#
# The following code block is modified from one originally from Professor <NAME> (@GeostatsGuy) and Professor <NAME> (@johntfoster), SubSurfuceDataAnalytics_PCA.ipynb from GeostatsGuy GitHub.
def plot_corr(dataframe,size=10): # plots a correlation matrix as a heat map
corr = dataframe.corr()
fig, ax = plt.subplots(figsize=(size, size))
im = ax.matshow(corr,vmin = -1.0, vmax = 1.0, cmap = "bwr")
plt.xticks(range(len(corr.columns)), corr.columns, rotation = 90);
plt.yticks(range(len(corr.columns)), corr.columns);
plt.colorbar(im, orientation = 'vertical')
plt.title('Correlation Matrix')
# ### Basic Data Checking and Visualization
#
# Our correlation matrix indicates that there's low levels of correlation for most features outside of perhpas porosity ofn acoustic impedance. However, performing PCA loses explainability and removes the ability to prescribe different changes in the data collection going forward as it abstracts real-world features into arbitrary columns. A useful alternative would be factor analysis, but time did not permit for this analysis. With that in mind, we decided to use all features from the samples in our model. Since there's low levels of linearity, we decided that a decision tree-based model would be best to capture areas in our data that have different productions.
plot_corr(dfProdFinal)
# #### Model Generation Function
#
# We needed to build out a function that could generate 100 versions of our model based on different bagged samples of our data. We utilized sklearn's train_test_split to grab 75% of the data at a time and applied xgboost's XGBRegressor algorithm to utilize gradient-boosted decision trees. Additionally, we also grab the out-of-bag error for each of our models to get a sense of it's MAE.
def model_generator(features, target, seed):
train_X, test_X, train_y, test_y = train_test_split(features, target, random_state=seed)
model = xgb.XGBRegressor(random_state=1, n_jobs=-1)
model.fit(train_X, train_y)
preds = model.predict(test_X)
oob_error = mean_absolute_error(preds, test_y)
return [model, oob_error]
# #### Build out Models and Assess Error
#
# We create a list of models, which are generated with 100 different seeds of the test-train-split. Additionally, we take the errors for each of our models and average them to give us a general sense of the out-of-bag error for our ensemble model.
# +
models = []
error_list = []
rng = np.random.RandomState(42)
for i in range(100):
model_gens = model_generator(X, y, rng.randint(0, 10000))
models.append(model_gens[0])
error_list.append(model_gens[1])
final_error = sum(error_list)/len(error_list)
print("The average MAE of the predictors is: ", final_error)
print("The MAE in terms of percentage of the mean production is: ", final_error/y.mean())
# -
# #### Import Pre-production Well Data
#
# Now that we have models built we need to apply them to our pre-production data. To do that, we must first import said data, then apply our same function to prepare the data for our model.
dfPreProd = pd.read_csv(r"./well_data/wellbore_data_preproduction_well.csv")
dfPreProdFinal = process_data(dfPreProd)
# #### Apply Model to Pre-production Wells
# We need the names for each of our realizations, as it has in the solutions, so first we create an array of column names with R1, R2, ..., R100
# Then, we loop through our models and apply each one to our data to create 100 realizations each.
# +
names = []
for i in range(100):
names.append("R"+ str(i+1) + ", MSTB")
predictions = np.array([])
for model in models:
predictions = np.append(predictions, np.array(model.predict(dfPreProdFinal)))
predictions = pd.DataFrame(predictions.reshape(100,10).T, columns=names)
# -
# #### Realization Histograms
#
# Now, to check the viability of our realizations, we graphed them to ensure that they didn't seem strangely uniform or overly concentrated in any one spot. Additionally, we see that most of our graphs are normal or near-normal.
predictions["Well_ID"] = dfPreProdFinal.index
predictions = predictions.set_index("Well_ID")
predictions.T.hist(layout=(2,5), figsize=(11, 8.5), grid=False, bins=9, linewidth=1, edgecolor="black"); #Semicolon added to suppress extra Matplotlib output
# #### Percentiles for Realizations
#
# We then took the realizations and created a table of their P10, P50, and P90 values to give a better sense of bull and bear cases for each well.
p_values = pd.DataFrame([predictions.T.quantile(.10), predictions.T.quantile(.50), predictions.T.quantile(.90)])
p_values.index.names = ['Percentile']
p_values
# #### Finalize Results and Format Output
# Finally, we create a "Prediction" for each well, based on the mean of the realizations for each respective well. Then, we reformat the DataFrame to match the formatting of the solutions by moving columns and setting the index properly. Finally, we use Pandas to export our realizations to a CSV
predictions["Prediction, MSTB"] = predictions.mean(axis=1)
predictions = predictions[["Prediction, MSTB"] + names]
predictions.to_csv(r".\final_solutions.csv")
| Hackathon_ProjectTemplate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/cartoee_projections.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab"/></a>
#
# Uncomment the following line to install [geemap](https://geemap.org) if needed.
# +
# # !pip install geemap
# -
# ## Working with projections in cartoee
# +
import ee
from geemap import cartoee
import cartopy.crs as ccrs
# %pylab inline
# -
ee.Initialize()
# ### Plotting an image on a map
#
# Here we are going to show another example of creating a map with EE results. We will use global sea surface temperature data for Jan-Mar 2018.
# get an earth engine image of ocean data for Jan-Mar 2018
ocean = (
ee.ImageCollection('NASA/OCEANDATA/MODIS-Terra/L3SMI')
.filter(ee.Filter.date('2018-01-01', '2018-03-01'))
.median()
.select(["sst"],["SST"])
)
# set parameters for plotting
# will plot the Sea Surface Temp with specific range and colormap
visualization = {'bands':"SST",'min':-2,'max':30}
# specify region to focus on
bbox = [-180,-88,180,88]
# +
fig = plt.figure(figsize=(10,7))
# plot the result with cartoee using a PlateCarre projection (default)
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=bbox)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
plt.show()
# -
# ### Mapping with different projections
#
# You can specify what ever projection is available within `cartopy` to display the results from Earth Engine. Here are a couple examples of global and regions maps using the sea surface temperature example. Please refer to the [`cartopy` projection documentation](https://scitools.org.uk/cartopy/docs/latest/crs/projections.html) for more examples with different projections.
# +
fig = plt.figure(figsize=(10,7))
# create a new Mollweide projection centered on the Pacific
projection = ccrs.Mollweide(central_longitude=-180)
# plot the result with cartoee using the Mollweide projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='bottom',cmap='plasma',
orientation='horizontal')
ax.set_title("Mollweide projection")
ax.coastlines()
plt.show()
# +
fig = plt.figure(figsize=(10,7))
# create a new Goode homolosine projection centered on the Pacific
projection = ccrs.InterruptedGoodeHomolosine(central_longitude=-180)
# plot the result with cartoee using the Goode homolosine projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='bottom',cmap='plasma',
orientation='horizontal')
ax.set_title("Goode homolosine projection")
ax.coastlines()
plt.show()
# +
fig = plt.figure(figsize=(10,7))
# create a new orographic projection focused on the Pacific
projection = ccrs.Orthographic(-130,-10)
# plot the result with cartoee using the orographic projection
ax = cartoee.get_map(ocean,vis_params=visualization,region=bbox,
cmap='plasma',proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma',
orientation='vertical')
ax.set_title("Orographic projection")
ax.coastlines()
plt.show()
# -
# ### Warping artifacts
#
# Often times global projections are not needed so we use specific projection for the map that provides the best view for the geographic region of interest. When we use these, sometimes image warping effects occur. This is because `cartoee` only requests data for region of interest and when mapping with `cartopy` the pixels get warped to fit the view extent as best as possible. Consider the following example where we want to map SST over the south pole:
# +
fig = plt.figure(figsize=(10,7))
# Create a new region to focus on
spole = [-180,-88,180,0]
projection = ccrs.SouthPolarStereo()
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=spole,proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
plt.show()
# -
# As you can see from the result there are warping effects on the plotted image. There is really no way of getting aound this (other than requesting a larger extent of data which may not always be the case).
#
# So, what we can do is set the extent of the map to a more realistic view after plotting the image as in the following example:
# +
fig = plt.figure(figsize=(10,7))
# plot the result with cartoee focusing on the south pole
ax = cartoee.get_map(ocean,cmap='plasma',vis_params=visualization,region=spole,proj=projection)
cb = cartoee.add_colorbar(ax,vis_params=visualization,loc='right',cmap='plasma')
ax.coastlines()
ax.set_title('The South Pole')
# get bounding box coordinates of a zoom area
zoom = spole
zoom[-1] = -20
# convert bbox coordinate from [W,S,E,N] to [W,E,S,N] as matplotlib expects
zoom_extent = cartoee.bbox_to_extent(zoom)
# set the extent of the map to the zoom area
ax.set_extent(zoom_extent,ccrs.PlateCarree())
plt.show()
| examples/notebooks/cartoee_projections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
aot = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Aotizhongxin_20130301-20170228.csv')
# +
chan = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Changping_20130301-20170228.csv')
ding = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Dingling_20130301-20170228.csv')
dong = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Dongsi_20130301-20170228.csv')
guan = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Guanyuan_20130301-20170228.csv')
guch = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Gucheng_20130301-20170228.csv')
hua = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Huairou_20130301-20170228.csv')
nong = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Nongzhanguan_20130301-20170228.csv')
shu = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Shunyi_20130301-20170228.csv')
tian = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Tiantan_20130301-20170228.csv')
wan = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Wanliu_20130301-20170228.csv')
wans = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter10/Datasets/PRSA_Data_Wanshouxigong_20130301-20170228.csv')
# -
dfs = [aot, chan, ding, dong, guan, guch, hua, nong, shu, tian, wan, wans]
air = pd.concat(dfs)
air.reset_index(drop = True, inplace = True)
air.head()
air.tail()
air.describe()
air = air.drop(['No'], axis = 1)
air.rename(index = str, columns = {
'PM2.5' : 'PM25',
}, inplace = True)
air.isnull().sum()
round(air.isnull().sum()/len(air.index), 4)*100
air.head()
pm_25 = sns.boxplot(air['PM25'])
(air['PM25'] >= 250).sum()
pm25 = air.loc[air['PM25'] >= 250]
pm25.head()
pm25.station.unique()
pm_10 = sns.boxplot(air.PM10)
pm10 = air.loc[air['PM10'] >= 320]
pm10.head()
(air['PM10'] >= 320).sum()
air.loc[(air['PM25'] >= 250) & (air['PM10'] >= 320)]
((air['PM25'] >= 250) & (air['PM10'] >= 320)).sum()
air.describe()
so2 = sns.boxplot(air.SO2)
(air['SO2'] >= 70).sum()
((air['PM25'] >= 250) & (air['PM10'] >= 320) & (air['SO2'] >= 70)).sum()
(air['SO2'] >= 300).sum()
no2 = sns.boxplot(air.NO2)
(air['NO2'] >= 150).sum()
((air['PM25'] >= 250) & (air['PM10'] >= 320) & (air['SO2'] >= 200) & (air['NO2'] >= 150).sum()).sum()
co = sns.boxplot(air.CO)
(air['CO'] >= 3000).sum()
o3 = sns.boxplot(air.O3)
(air['O3'] >= 200).sum()
(air['O3'] >= 470).sum()
rain = sns.boxplot(air.RAIN)
colors = ["windows blue", "amber", "faded green", "dusty purple"]
sns.set(rc = {"figure.figsize": (18,8),
# "axes.titlesize" : 18,
"axes.labelsize" : 14,
"xtick.labelsize" : 14, "ytick.labelsize" : 14
})
sns.boxplot(x = 'station', y = 'PM25', data = air.dropna(axis = 0).reset_index())
sns.boxplot(x = 'station', y = 'PM10', data = air.dropna(axis = 0).reset_index())
sns.boxplot(x = 'station', y = 'SO2', data = air.dropna(axis = 0).reset_index())
sns.boxplot(x = 'station', y = 'NO2', data = air.dropna(axis = 0).reset_index())
sns.boxplot(x = 'station', y = 'CO', data = air.dropna(axis = 0).reset_index())
sns.boxplot(x = 'station', y = 'O3', data = air.dropna(axis = 0).reset_index())
sns.boxplot(x = 'station', y = 'RAIN', data = air.dropna(axis = 0).reset_index())
# +
# Exercise 10.03
# -
new_air = air
new_air.isnull().sum()
new_air['PM25'].fillna(new_air['PM25'].median(), inplace=True)
new_air['PM10'].fillna(new_air['PM10'].median(), inplace=True)
new_air['SO2'].fillna(new_air['SO2'].median(), inplace=True)
new_air['NO2'].fillna(new_air['NO2'].median(), inplace=True)
new_air['CO'].fillna(new_air['CO'].median(), inplace=True)
new_air['O3'].fillna(new_air['O3'].median(), inplace=True)
new_air['TEMP'].fillna(new_air['TEMP'].median(), inplace=True)
new_air['PRES'].fillna(new_air['PRES'].median(), inplace=True)
new_air['DEWP'].fillna(new_air['DEWP'].median(), inplace=True)
new_air['RAIN'].fillna(new_air['RAIN'].median(), inplace=True)
new_air['WSPM'].fillna(new_air['WSPM'].median(), inplace=True)
new_air.isnull().sum()
new_air = new_air.fillna(new_air['wd'].value_counts().index[0])
new_air.isnull().sum()
| Chapter10/Exercise10.03/Exercise10.03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="velwGW5PN9ek"
# # Imputasi
# + [markdown] id="aANn-r28No1O"
# Imputasi adalah mengganti nilai/data yang hilang (missing value; NaN; Blank) dengan nilai pengganti
#
# Teknik imputasi berdasarkan tipe data, terdiri dari:
# 1. Tipe Data variabel Numerik , dengan cara
# - Imputasi Mean atau median
# - Imputasi nilai suka-suka (arbitrary)
# - Imputasi nilai data ujung (end of tail)
# 2. Tipe Data variabel Kategorik, dengan cara
# - Imputasi kategori yang sering muncul
# - Tambah kategori yang hilang
# + [markdown] id="3A64B65zOBqS"
# ### Mean
# + id="MfuXr9c5J5-z"
import pandas as pd
import numpy as np
#nan = gak ada isi
kolom = {'col1': [2, 9, 19],
'col2': [5, np.nan, 17],
'col3': [3, 9, np.nan],
'col4': [6, 0, 9],
'col5': [np.nan, 7, np.nan]}
data = pd.DataFrame(kolom)
# + colab={"base_uri": "https://localhost:8080/", "height": 143} id="HHrTwzi1Ows2" outputId="babf6d30-67cf-408b-9d2d-c4d3480013f9"
data
# + colab={"base_uri": "https://localhost:8080/", "height": 143} id="AXnAvWikPAFc" outputId="e21b921c-0541-42e5-9e91-dffa7cb22ac9"
#mengisi data nan dengan mean
data.fillna(data.mean())
# + [markdown] id="WBYtoutuPlCN"
# ### Imputasi Nilai suka-suka (Arbitrary)
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="QTI2c03LPQhK" outputId="493fa0c2-14bc-4620-bd10-5bed4198283a"
umur = {'umur': [29, 43, np.nan, 25, 34, np.nan, 50]}
data = pd.DataFrame(umur)
data
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="aOYqxdayQqaK" outputId="4c2ff970-298b-418f-9e14-2e95bc5b1083"
data.fillna(99)
# + [markdown] id="ykn2i4-MQ6k0"
# ### Imputasi End of Tail
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="xbZi_6b0RbAv" outputId="72dd12c1-b72e-4190-a2ea-db6255bb0603"
umur = {'umur': [29, 43, np.nan, 25, 34, np.nan, 50]}
data = pd.DataFrame(umur)
data
# + colab={"base_uri": "https://localhost:8080/"} id="txuqjeNCTD3w" outputId="ecd1919f-7f9a-4be6-dac3-a86eabdb6250"
pip install feature-engine
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="zjV9zK5pR0v_" outputId="e3903c8d-c591-4588-840f-42892656a959"
# mean + 3 * std deviasi
#import EndTailImputer
from feature_engine.imputation import EndTailImputer
#buat Imputer
imputer = EndTailImputer (imputation_method='gaussian', tail='right')
#fitkan imputer ke nilai set
imputer.fit(data)
#ubah data
test_data = imputer.transform(data)
#tampilkan data
test_data
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="6ELhzJEvSHgB" outputId="a960fc7e-5437-421f-bd5b-ccf23c2f38f2"
#import EndTailImputer
from feature_engine.imputation import EndTailImputer
#buat Imputer
imputer = EndTailImputer (imputation_method='gaussian', tail='left')
#fitkan imputer ke nilai set
imputer.fit(data)
#ubah data
test_data = imputer.transform(data)
#tampilkan data
test_data
# + [markdown] id="9FnyD6_HWZlR"
# ### Bagaimana dengan data kategorikal?
# + [markdown] id="IyuWPYHoXKq0"
# ### Imputasi Modus
# + colab={"base_uri": "https://localhost:8080/", "height": 363} id="_HLAkYevWY9i" outputId="ddcb1ae3-29d1-44e1-b472-a35508db7968"
from sklearn.impute import SimpleImputer
mobil = {'mobil' :['Ford', 'Ford', 'Toyota', 'Honda', np.nan,
'Toyota', 'Honda', 'Toyota', np.nan, np.nan]}
data = pd.DataFrame(mobil)
data
# + id="WJHZECFKVaBA"
imp = SimpleImputer(strategy='most_frequent')
# + colab={"base_uri": "https://localhost:8080/", "height": 363} id="cTPnyD7xYXJU" outputId="586b1767-c725-4834-b91a-27aebfc071fc"
imp.fit_transform(data)
# + [markdown] id="2odwFi9RZHls"
# Bagaimana kalau kita butuh untuk mengatasi missing value untuk kategorikal dan numerikal secara bersamaan?
#
# kita bisa gunakan random sampel
# + colab={"base_uri": "https://localhost:8080/", "height": 363} id="kM8ZI4T1Yhgy" outputId="f838a234-4108-4c5f-81a1-fb88b0599504"
#import dulu Random Sample
from feature_engine.imputation import RandomSampleImputer
#buat data yang ada missing value
data = {'Jenis Kelamin' : ['Laki-laki', 'Perempuan', 'Laki-laki',
np.nan, 'Laki-laki', 'Perempuan', 'Perempuan', np.nan, np.nan, 'Laki-laki'],
'Umur' : [29, np.nan, np.nan, 32, np.nan,43,50,22,52,17]}
df = pd.DataFrame(data)
df
# +
#buat dulu imputer
imputer = RandomSampleImputer(random_state=29)
#fitkan
imputer.fit (df)
#ubah data
testing_df = imputer.transform(df)
# -
testing_df
| Metdat-science/Pertemuan 8/Imputasi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting time series using pyacs.gts.lib.plot
# ### Author: <NAME>
# ### Date: 27/12/2019
import numpy as np
# import pyacs time series modules
import pyacs.gts.Gts as Gts
import pyacs.gts.Sgts as Sgts
# loads some time series as Gts objects
albh_pbo = Gts.Gts.read(tsfile='../data/formats/ALBH.pbo.final_igs14.pos',fmt='pos',verbose=True)
# where plot will appear
# in the notebook as an interactive window
# %matplotlib notebook
# in a separate interactive qt window
# #%matplotlib qt
# in the notebook as a dead window
# #%matplotlib inline
# The following cell Plotting using all default values
albh_pbo.plot()
# This shows how most plot settings can be controled.
albh_pbo.plot(
# figure size
plot_size=(8,4),
# components to plot
lcomponent = ['U','E'],
# unit
unit='m',
# general title for the plot
title='site ALBH from GAGE PBO Analysis',
# info for each subplot
info = [("Up component - rms: %.1lf mm" % ( np.std(albh_pbo.detrend().data ,axis=0)[3]*1.E3)),
("East component - rms: %.1lf mm" % ( np.std(albh_pbo.detrend().data ,axis=0)[2]*1.E3))],
# period for plot in decimal years
date=[2013.5,2013.9],
# y-axis bounds for East and North components
yaxis=[-0.007,0.007],
# y-axis bounds for Up component
yupaxis=[-0.02,0.02],
# rescale error bars
error_scale=5.,
# xlabel format, here asking for 2 digits to be displayed
xlabel_fmt="%.2lf",
# minor xticks
xticks_minor_locator = 0.1,
# from now on, the key-words are from matplotlib.plot.errorbar
# marker as circle
marker='o',
# size of the marker, set to zero to hide them
markersize=4,
# marker color
markerfacecolor='red',
# line style
linestyle='--',
# width of the line joining data
linewidth=2,
# line color
color='grey',
# errorbar color
ecolor='blue',
# errorbar width
elinewidth = 2 ,
)
# plot can also superimpose several time series.
# In the following example, we will plot original data as dots and smooth data as a red line.
# For time series to be superimposed, the setting need to be defined through matplotlib.rcParams
#
# import
import matplotlib as mpl
# filter the original time series
albh_filtered_101 = albh_pbo.median_filter(101)
albh_filtered_vondrak = albh_pbo.vondrak(fc=4)
# setting for the superimposed time series
mpl.rc('lines', linewidth=2, linestyle='--',marker=None,markersize=1)
# plot
albh_pbo.plot(superimposed=[albh_filtered_101,albh_filtered_vondrak], \
date=[2017.,2018.],
xticks_minor_locator=1,
label=['101','vondrak'],
lcolor=['black','red'],
legend=True)
# Pyacs plot also handles several date formats
# %matplotlib inline
albh_pbo.plot(plot_size=(8,2.), lcomponent = ['E'], date=[2017.8,2018.3],
title='default decimal year')
albh_pbo.plot(plot_size=(8,2.), lcomponent = ['E'], date=[2017.8,2018.3],
title='decimal year since 2013.70',date_ref=2013.70)
albh_pbo.plot(plot_size=(8,2.), lcomponent = ['E'], date=[2017.8,2018.3],
title='Days since 2018.00',date_unit='days',date_ref=2018.00)
albh_pbo.plot(plot_size=(8,2.), lcomponent = ['E'], date=[2017.8,2018.3],
title='default calendar date',date_unit='cal')
albh_pbo.plot(plot_size=(8,2.), lcomponent = ['E'], date=[2018.1,2018.2],
title='calendar date - only month and days',date_unit='cal',
xticks_minor_locator="%d",xlabel_fmt="%m-%d")
# + outputExpanded=false jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
| pyacs_docs/pyacs/downloads/905b7583964989788fea61aed0d6df13/plotting_time_series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''udl'': conda)'
# name: python3
# ---
# # Neural networks with PyTorch
#
# Deep learning networks tend to be massive with dozens or hundreds of layers, that's where the term "deep" comes from. You can build one of these deep networks using only weight matrices as we did in the previous notebook, but in general it's very cumbersome and difficult to implement. PyTorch has a nice module `nn` that provides a nice way to efficiently build large neural networks.
# +
# Import necessary packages
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import numpy as np
import torch
import helper
import matplotlib.pyplot as plt
# -
#
# Now we're going to build a larger network that can solve a (formerly) difficult problem, identifying text in an image. Here we'll use the MNIST dataset which consists of greyscale handwritten digits. Each image is 28x28 pixels, you can see a sample below
#
# <img src='assets/mnist.png'>
#
# Our goal is to build a neural network that can take one of these images and predict the digit in the image.
#
# First up, we need to get our dataset. This is provided through the `torchvision` package. The code below will download the MNIST dataset, then create training and test datasets for us. Don't worry too much about the details here, you'll learn more about this later.
# +
### Run this cell
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# -
# We have the training data loaded into `trainloader` and we make that an iterator with `iter(trainloader)`. Later, we'll use this to loop through the dataset for training, like
#
# ```python
# for image, label in trainloader:
# ## do things with images and labels
# ```
#
# You'll notice I created the `trainloader` with a batch size of 64, and `shuffle=True`. The batch size is the number of images we get in one iteration from the data loader and pass through our network, often called a *batch*. And `shuffle=True` tells it to shuffle the dataset every time we start going through the data loader again. But here I'm just grabbing the first batch so we can check out the data. We can see below that `images` is just a tensor with size `(64, 1, 28, 28)`. So, 64 images per batch, 1 color channel, and 28x28 images.
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)
# This is what one of the images looks like.
images[1].numpy().shape
plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');
# First, let's try to build a simple network for this dataset using weight matrices and matrix multiplications. Then, we'll see how to do it using PyTorch's `nn` module which provides a much more convenient and powerful method for defining network architectures.
#
# The networks you've seen so far are called *fully-connected* or *dense* networks. Each unit in one layer is connected to each unit in the next layer. In fully-connected networks, the input to each layer must be a one-dimensional vector (which can be stacked into a 2D tensor as a batch of multiple examples). However, our images are 28x28 2D tensors, so we need to convert them into 1D vectors. Thinking about sizes, we need to convert the batch of images with shape `(64, 1, 28, 28)` to a have a shape of `(64, 784)`, 784 is 28 times 28. This is typically called *flattening*, we flattened the 2D images into 1D vectors.
#
# Previously you built a network with one output unit. Here we need 10 output units, one for each digit. We want our network to predict the digit shown in an image, so what we'll do is calculate probabilities that the image is of any one digit or class. This ends up being a discrete probability distribution over the classes (digits) that tells us the most likely class for the image. That means we need 10 output units for the 10 classes (digits). We'll see how to convert the network output into a probability distribution next.
#
# > **Exercise:** Flatten the batch of images `images`. Then build a multi-layer network with 784 input units, 256 hidden units, and 10 output units using random tensors for the weights and biases. For now, use a sigmoid activation for the hidden layer. Leave the output layer without an activation, we'll add one that gives us a probability distribution next.
# +
## Your solution
# A flatten operation on a tensor reshapes the
# tensor to have a shape that is equal to the number
# of elements contained in the tensor.
# This is the same thing as a 1d-array of elements.
def sigmoid(x):
return torch.sigmoid(x)
# The -1 infers size from the other values
# Thus tensor (64,1,28,28) takes [0] as 64 rows
# and 28*28*1 as cols
input = images.view(images.shape[0],-1)
print(input.shape)
print(input.size())
weights = torch.randn(784,256)
bias = torch.randn(256)
h_weight = torch.randn(256,10)
h_bias = torch.randn(10)
# (64,784) (784,256) + (256)
h = sigmoid(torch.mm(input,weights) + bias)
# (64,256) (256,10)
out = torch.mm(h,h_weight) + h_bias
# output of your network, should have shape (64,10)
# +
def sigmoid1(x):
return 1/(1+torch.exp(-x))
#return torch.sigmoid(x)
def sigmoid2(x):
#return 1/(1+torch.exp(-x))
return torch.sigmoid(x)
print("sigmoid1: ",sigmoid1(torch.tensor(1.0)), " sigmoid2: ",sigmoid2(torch.tensor(1.0)))
# -
# Now we have 10 outputs for our network. We want to pass in an image to our network and get out a probability distribution over the classes that tells us the likely class(es) the image belongs to. Something that looks like this:
# <img src='assets/image_distribution.png' width=500px>
#
# Here we see that the probability for each class is roughly the same. This is representing an untrained network, it hasn't seen any data yet so it just returns a uniform distribution with equal probabilities for each class.
#
# To calculate this probability distribution, we often use the [**softmax** function](https://en.wikipedia.org/wiki/Softmax_function). Mathematically this looks like
#
# $$
# \Large \sigma(x_i) = \cfrac{e^{x_i}}{\sum_k^K{e^{x_k}}}
# $$
#
# What this does is squish each input $x_i$ between 0 and 1 and normalizes the values to give you a proper probability distribution where the probabilites sum up to one.
#
# > **Exercise:** Implement a function `softmax` that performs the softmax calculation and returns probability distributions for each example in the batch. Note that you'll need to pay attention to the shapes when doing this. If you have a tensor `a` with shape `(64, 10)` and a tensor `b` with shape `(64,)`, doing `a/b` will give you an error because PyTorch will try to do the division across the columns (called broadcasting) but you'll get a size mismatch. The way to think about this is for each of the 64 examples, you only want to divide by one value, the sum in the denominator. So you need `b` to have a shape of `(64, 1)`. This way PyTorch will divide the 10 values in each row of `a` by the one value in each row of `b`. Pay attention to how you take the sum as well. You'll need to define the `dim` keyword in `torch.sum`. Setting `dim=0` takes the sum across the rows while `dim=1` takes the sum across the columns.
# +
def softmax(x):
## TODO: Implement the softmax function here
a = torch.exp(x)
# b has a shape (64,10)
# need to re-shape to (64,1)
b = torch.sum(torch.exp(x), dim=1).view(64,1)
out = a / b
print("a=", a.shape, " b=", b.shape)
return(out)
# Here, out should be the output of the network in the previous excercise with shape (64,10)
probabilities = softmax(out)
# Does it have the right shape? Should be (64, 10)
print(probabilities.shape)
# Does it sum to 1?
print(probabilities.sum(dim=1))
# -
# ## Building networks with PyTorch
#
# PyTorch provides a module `nn` that makes building networks much simpler. Here I'll show you how to build the same one as above with 784 inputs, 256 hidden units, 10 output units and a softmax output.
from torch import nn
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
# Define sigmoid activation and softmax output
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
return x
# Let's go through this bit by bit.
#
# ```python
# class Network(nn.Module):
# ```
#
# Here we're inheriting from `nn.Module`. Combined with `super().__init__()` this creates a class that tracks the architecture and provides a lot of useful methods and attributes. It is mandatory to inherit from `nn.Module` when you're creating a class for your network. The name of the class itself can be anything.
#
# ```python
# self.hidden = nn.Linear(784, 256)
# ```
#
# This line creates a module for a linear transformation, $x\mathbf{W} + b$, with 784 inputs and 256 outputs and assigns it to `self.hidden`. The module automatically creates the weight and bias tensors which we'll use in the `forward` method. You can access the weight and bias tensors once the network (`net`) is created with `net.hidden.weight` and `net.hidden.bias`.
#
# ```python
# self.output = nn.Linear(256, 10)
# ```
#
# Similarly, this creates another linear transformation with 256 inputs and 10 outputs.
#
# ```python
# self.sigmoid = nn.Sigmoid()
# self.softmax = nn.Softmax(dim=1)
# ```
#
# Here I defined operations for the sigmoid activation and softmax output. Setting `dim=1` in `nn.Softmax(dim=1)` calculates softmax across the columns.
#
# ```python
# def forward(self, x):
# ```
#
# PyTorch networks created with `nn.Module` must have a `forward` method defined. It takes in a tensor `x` and passes it through the operations you defined in the `__init__` method.
#
# ```python
# x = self.hidden(x)
# x = self.sigmoid(x)
# x = self.output(x)
# x = self.softmax(x)
# ```
#
# Here the input tensor `x` is passed through each operation and reassigned to `x`. We can see that the input tensor goes through the hidden layer, then a sigmoid function, then the output layer, and finally the softmax function. It doesn't matter what you name the variables here, as long as the inputs and outputs of the operations match the network architecture you want to build. The order in which you define things in the `__init__` method doesn't matter, but you'll need to sequence the operations correctly in the `forward` method.
#
# Now we can create a `Network` object.
# Create the network and look at it's text representation
model = Network()
model
# You can define the network somewhat more concisely and clearly using the `torch.nn.functional` module. This is the most common way you'll see networks defined as many operations are simple element-wise functions. We normally import this module as `F`, `import torch.nn.functional as F`.
# +
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.sigmoid(self.hidden(x))
# Output layer with softmax activation
x = F.softmax(self.output(x), dim=1)
return x
# -
# ### Activation functions
#
# So far we've only been looking at the sigmoid activation function, but in general any function can be used as an activation function. The only requirement is that for a network to approximate a non-linear function, the activation functions must be non-linear. Here are a few more examples of common activation functions: Tanh (hyperbolic tangent), and ReLU (rectified linear unit).
#
# <img src="assets/activation.png" width=700px>
#
# In practice, the ReLU function is used almost exclusively as the activation function for hidden layers.
# ### Your Turn to Build a Network
#
# <img src="assets/mlp_mnist.png" width=600px>
#
# > **Exercise:** Create a network with 784 input units, a hidden layer with 128 units and a ReLU activation, then a hidden layer with 64 units and a ReLU activation, and finally an output layer with a softmax activation as shown above. You can use a ReLU activation with the `nn.ReLU` module or `F.relu` function.
#
# It's good practice to name your layers by their type of network, for instance 'fc' to represent a fully-connected layer. As you code your solution, use `fc1`, `fc2`, and `fc3` as your layer names.
# +
## Your solution here
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 64)
# Output layer, 10 units - one for each digit
self.fc3 = nn.Linear(64, 10)
# Define sigmoid activation and softmax output
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
self.relu = nn.ReLU()
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
x = self.softmax(x)
return x
model = Network()
model
# -
# ### Initializing weights and biases
#
# The weights and such are automatically initialized for you, but it's possible to customize how they are initialized. The weights and biases are tensors attached to the layer you defined, you can get them with `model.fc1.weight` for instance.
print(model.fc1.weight)
print(model.fc1.bias)
# For custom initialization, we want to modify these tensors in place. These are actually autograd *Variables*, so we need to get back the actual tensors with `model.fc1.weight.data`. Once we have the tensors, we can fill them with zeros (for biases) or random normal values.
# Set biases to all zeros
model.fc1.bias.data.fill_(0)
# sample from random normal with standard dev = 0.01
model.fc1.weight.data.normal_(std=0.01)
# ### Forward pass
#
# Now that we have a network, let's see what happens when we pass in an image.
# +
# Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size
# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx,:])
img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), ps)
# -
# As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random!
#
# ### Using `nn.Sequential`
#
# PyTorch provides a convenient way to build networks like this where a tensor is passed sequentially through operations, `nn.Sequential` ([documentation](https://pytorch.org/docs/master/nn.html#torch.nn.Sequential)). Using this to build the equivalent network:
# +
# Hyperparameters for our network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10
# Build a feed-forward network
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size),
nn.Softmax(dim=1))
print(model)
# Forward pass through the network and display output
images, labels = next(iter(trainloader))
images.resize_(images.shape[0], 1, 784)
ps = model.forward(images[0,:])
helper.view_classify(images[0].view(1, 28, 28), ps)
# -
# Here our model is the same as before: 784 input units, a hidden layer with 128 units, ReLU activation, 64 unit hidden layer, another ReLU, then the output layer with 10 units, and the softmax output.
#
# The operations are available by passing in the appropriate index. For example, if you want to get first Linear operation and look at the weights, you'd use `model[0]`.
print(model[0])
model[0].weight
# You can also pass in an `OrderedDict` to name the individual layers and operations, instead of using incremental integers. Note that dictionary keys must be unique, so _each operation must have a different name_.
#
from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_sizes[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_sizes[1], output_size)),
('softmax', nn.Softmax(dim=1))]))
model
# Now you can access layers either by integer or the name
print(model[0])
print(model.fc1)
# In the next notebook, we'll see how we can train a neural network to accuractly predict the numbers appearing in the MNIST images.
| intro-to-pytorch/Part 2 - Neural Networks in PyTorch (Exercises).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Name
# Data preparation using Apache Pig on YARN with Cloud Dataproc
#
# # Label
# Cloud Dataproc, GCP, Cloud Storage, YARN, Pig, Apache, Kubeflow, pipelines, components
#
#
# # Summary
# A Kubeflow Pipeline component to prepare data by submitting an Apache Pig job on YARN to Cloud Dataproc.
#
#
# # Details
# ## Intended use
# Use the component to run an Apache Pig job as one preprocessing step in a Kubeflow Pipeline.
#
# ## Runtime arguments
# | Argument | Description | Optional | Data type | Accepted values | Default |
# |----------|-------------|----------|-----------|-----------------|---------|
# | project_id | The ID of the Google Cloud Platform (GCP) project that the cluster belongs to. | No | GCPProjectID | | |
# | region | The Cloud Dataproc region to handle the request. | No | GCPRegion | | |
# | cluster_name | The name of the cluster to run the job. | No | String | | |
# | queries | The queries to execute the Pig job. Specify multiple queries in one string by separating them with semicolons. You do not need to terminate queries with semicolons. | Yes | List | | None |
# | query_file_uri | The HCFS URI of the script that contains the Pig queries. | Yes | GCSPath | | None |
# | script_variables | Mapping of the query’s variable names to their values (equivalent to the Pig command: SET name="value";). | Yes | Dict | | None |
# | pig_job | The payload of a [PigJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob). | Yes | Dict | | None |
# | job | The payload of a [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs). | Yes | Dict | | None |
# | wait_interval | The number of seconds to pause between polling the operation. | Yes | Integer | | 30 |
#
# ## Output
# Name | Description | Type
# :--- | :---------- | :---
# job_id | The ID of the created job. | String
#
# ## Cautions & requirements
#
# To use the component, you must:
# * Set up a GCP project by following this [guide](https://cloud.google.com/dataproc/docs/guides/setup-project).
# * [Create a new cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster).
# * Run the component under a secret [Kubeflow user service account](https://www.kubeflow.org/docs/started/getting-started-gke/#gcp-service-accounts) in a Kubeflow cluster. For example:
#
# ```
# component_op(...).apply(gcp.use_gcp_secret('user-gcp-sa'))
# ```
# * Grant the Kubeflow user service account the role `roles/dataproc.editor` on the project.
#
# ## Detailed description
# This component creates a Pig job from [Dataproc submit job REST API](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/submit).
#
# Follow these steps to use the component in a pipeline:
# 1. Install the Kubeflow Pipeline SDK:
#
# +
# %%capture --no-stderr
KFP_PACKAGE = 'https://storage.googleapis.com/ml-pipeline/release/0.1.14/kfp.tar.gz'
# !pip3 install $KFP_PACKAGE --upgrade
# -
# 2. Load the component using KFP SDK
# +
import kfp.components as comp
dataproc_submit_pig_job_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/02c991dd265054b040265b3dfa1903d5b49df859/components/gcp/dataproc/submit_pig_job/component.yaml')
help(dataproc_submit_pig_job_op)
# -
# ### Sample
#
# Note: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
#
#
# #### Setup a Dataproc cluster
#
# [Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster) (or reuse an existing one) before running the sample code.
#
#
# #### Prepare a Pig query
#
# Either put your Pig queries in the `queries` list, or upload your Pig queries into a file to a Cloud Storage bucket and then enter the Cloud Storage bucket’s path in `query_file_uri`. In this sample, we will use a hard coded query in the `queries` list to select data from a local `passwd` file.
#
# For more details on Apache Pig, see the [Pig documentation.](http://pig.apache.org/docs/latest/)
#
# #### Set sample parameters
# + tags=["parameters"]
PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
QUERY = '''
natality_csv = load 'gs://public-datasets/natality/csv' using PigStorage(':');
top_natality_csv = LIMIT natality_csv 10;
dump natality_csv;'''
EXPERIMENT_NAME = 'Dataproc - Submit Pig Job'
# -
# #### Example pipeline that uses the component
import kfp.dsl as dsl
import kfp.gcp as gcp
import json
@dsl.pipeline(
name='Dataproc submit Pig job pipeline',
description='Dataproc submit Pig job pipeline'
)
def dataproc_submit_pig_job_pipeline(
project_id = PROJECT_ID,
region = REGION,
cluster_name = CLUSTER_NAME,
queries = json.dumps([QUERY]),
query_file_uri = '',
script_variables = '',
pig_job='',
job='',
wait_interval='30'
):
dataproc_submit_pig_job_op(
project_id=project_id,
region=region,
cluster_name=cluster_name,
queries=queries,
query_file_uri=query_file_uri,
script_variables=script_variables,
pig_job=pig_job,
job=job,
wait_interval=wait_interval).apply(gcp.use_gcp_secret('user-gcp-sa'))
# #### Compile the pipeline
pipeline_func = dataproc_submit_pig_job_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
# #### Submit the pipeline for execution
# +
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
# -
# ## References
# * [Create a new Dataproc cluster](https://cloud.google.com/dataproc/docs/guides/create-cluster)
# * [Pig documentation](http://pig.apache.org/docs/latest/)
# * [Dataproc job](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs)
# * [PigJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob)
#
# ## License
# By deploying or using this software you agree to comply with the [AI Hub Terms of Service](https://aihub.cloud.google.com/u/0/aihub-tos) and the [Google APIs Terms of Service](https://developers.google.com/terms/). To the extent of a direct conflict of terms, the AI Hub Terms of Service will control.
| components/gcp/dataproc/submit_pig_job/sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.2 64-bit (''.env'': venv)'
# language: python
# name: python3
# ---
import mlflow
import mlflow.sklearn
from mlflow.tracking.client import MlflowClient
import mlflow.pyfunc
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_boston
from sklearn.metrics import mean_absolute_error
data = load_boston()
X = pd.DataFrame(data.data)
y = data.target
def train_sklearn_model(X, y):
model = LinearRegression()
model.fit(X, y)
return model
sql_string = "http://localhost:5000"
mlflow.set_tracking_uri(sql_string)
# Создаем эксперимент
expname = "Boston_exp"
exp_id = mlflow.create_experiment(expname)
print(exp_id)
with mlflow.start_run(experiment_id=exp_id, run_name='sk'):
# Автоматом сохраняет model's parameters, metrics, artifacts
mlflow.sklearn.autolog()
train_sklearn_model(X, y)
run_id_sk = mlflow.active_run().info.run_id
print(run_id_sk)
# +
# Задаем имя модели
model_name = "model_boston_sk"
# Стандартный путь к каталогу с моделями
artifact_path = "model"
model_uri = "runs:/{run_id}/{artifact_path}".format(run_id=run_id_sk, artifact_path=artifact_path)
model_details_sk = mlflow.register_model(model_uri=model_uri, name=model_name)
# -
# Описание совокупности моделей
client = MlflowClient()
client.update_registered_model(
name=model_details_sk.name,
description="Линейная регрессия"
)
# Описание версии моделей
client.update_model_version(
name=model_details_sk.name,
version=model_details_sk.version,
description="Линейная регрессия. Версия 1"
)
# Изменяем ТЕГ модели
client.transition_model_version_stage(
name=model_details_sk.name,
version=model_details_sk.version,
stage='Production',
)
# Смотрим на статус модели по имени
model_version_details = client.get_model_version(
name=model_details_sk.name,
version=model_details_sk.version,
)
print("The current model stage is: '{stage}'".format(stage=model_details_sk.current_stage))
latest_version_info = client.get_latest_versions(model_name, stages=["Production"])
latest_production_version = latest_version_info[0].version
print("The latest production version of the model '%s' is '%s'." % (model_name, latest_production_version))
experiments = client.list_experiments()
experiments
experiments[-1].experiment_id
run = client.create_run(experiments[-1].experiment_id)
run
client.list_run_infos(experiments[-1].experiment_id)
# +
model_version_uri = "models:/{model_name}/1".format(model_name=model_details_sk.name)
print("Loading registered model version from URI: '{model_uri}'".format(model_uri=model_version_uri))
model_sk = mlflow.pyfunc.load_model(model_version_uri)
# -
y_predict = model_sk.predict(X)
y_predict[:5]
mean_absolute_error(y,y_predict)
| mlflow_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Read data with a time index
#
# Pandas DataFrame objects can have an index that denotes time. This is useful because Matplotlib recognizes that these measurements represent time and labels the values on the axis accordingly.
#
# In this exercise, you will read data from a CSV file called `climate_change.csv` that contains measurements of CO2 levels and temperatures made on the 6th of every month from 1958 until 2016. You will use Pandas' `read_csv` function.
#
# To designate the index as a `DateTimeIndex`, you will use the `parse_dates` and `index_col` key-word arguments both to parse this column as a variable that contains dates and also to designate it as the index for this DataFrame.
#
# _By the way, if you haven't downloaded it already, check out the [Matplotlib Cheat Sheet](https://datacamp-community-prod.s3.amazonaws.com/28b8210c-60cc-4f13-b0b4-5b4f2ad4790b). It includes an overview of the most important concepts, functions and methods and might come in handy if you ever need a quick refresher!_
#
# Instructions
#
# - Import the Pandas library as `pd`.
# - Read in the data from a CSV file called `'climate_change.csv'` using `pd.read_csv`.
# - Use the `parse_dates` key-word argument to parse the `"date"` column as dates.
# - Use the `index_col` key-word argument to set the `"date"` column as the index.
# +
# Import pandas as pd
import pandas as pd
# Read the data from file using read_csv
climate_change = pd.read_csv('climate_change.csv', parse_dates=['date'], index_col='date')
# -
# ## Plot time-series data
#
# To plot time-series data, we use the `Axes` object `plot` command. The first argument to this method are the values for the x-axis and the second argument are the values for the y-axis.
#
# This exercise provides data stored in a DataFrame called `climate_change`. This variable has a time-index with the dates of measurements and two data columns: `"co2"` and `"relative_temp"`.
#
# In this case, the index of the DataFrame would be used as the x-axis values and we will plot the values stored in the `"relative_temp"` column as the y-axis values. We will also properly label the x-axis and y-axis.
#
# Instructions
#
# - Add the data from `climate_change` to the plot: use the DataFrame `index` for the x value and the `"relative_temp"` column for the y values.
# - Set the x-axis label to `'Time'`.
# - Set the y-axis label to `'Relative temperature (Celsius)'`.
# - Show the figure.
# +
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# Add the time-series for "relative_temp" to the plot
ax.plot(climate_change.index, climate_change['relative_temp'])
# Set the x-axis label
ax.set_xlabel('Time')
# Set the y-axis label
ax.set_ylabel('Relative temperature (Celsius)')
# Show the figure
plt.show()
# -
# ## Using a time index to zoom in
#
# When a time-series is represented with a time index, we can use this index for the x-axis when plotting. We can also select a to zoom in on a particular period within the time-series using Pandas' indexing facilities. In this exercise, you will select a portion of a time-series dataset and you will plot that period.
#
# The data to use is stored in a DataFrame called `climate_change`, which has a time-index with dates of measurements and two data columns: `"co2"` and `"relative_temp"`.
#
# Instructions
#
# - Use `plt.subplots` to create a Figure with one Axes called `fig` and `ax`, respectively.
# - Create a variable called `seventies` that includes all the data between `"1970-01-01"` and `"1979-12-31"`.
# - Add the data from `seventies` to the plot: use the DataFrame `index` for the x value and the `"co2"` column for the y values.
# +
# Use plt.subplots to create fig and ax
fig, ax = plt.subplots()
# Create variable seventies with data from "1970-01-01" to "1979-12-31"
seventies = climate_change['1970-01-01':'1979-12-31']
# Add the time-series for "co2" data from seventies to the plot
ax.plot(seventies.index, seventies['co2'])
# Show the figure
plt.show()
# -
# ## Plotting two variables
#
# If you want to plot two time-series variables that were recorded at the same times, you can add both of them to the same subplot.
#
# If the variables have very different scales, you'll want to make sure that you plot them in different twin Axes objects. These objects can share one axis (for example, the time, or x-axis) while not sharing the other (the y-axis).
#
# To create a twin Axes object that shares the x-axis, we use the `twinx` method.
#
# In this exercise, you'll have access to a DataFrame that has the `climate_change` data loaded into it. This DataFrame was loaded with the `"date"` column set as a `DateTimeIndex`, and it has a column called `"co2"` with carbon dioxide measurements and a column called `"relative_temp"` with temperature measurements.
#
# Instructions
#
# - Use `plt.subplots` to create a Figure and Axes objects called `fig` and `ax`, respectively.
# - Plot the carbon dioxide variable in blue using the Axes `plot` method.
# - Use the Axes `twinx` method to create a twin Axes that shares the x-axis.
# - Plot the relative temperature variable in the twin Axes using its `plot` method.
# +
import matplotlib.pyplot as plt
# Initalize a Figure and Axes
fig, ax = plt.subplots()
# Plot the CO2 variable in blue
ax.plot(climate_change.index, climate_change['co2'], color='blue')
# Create a twin Axes that shares the x-axis
ax2 = ax.twinx()
# Plot the relative temperature in red
ax2.plot(climate_change.index, climate_change['relative_temp'], color='red')
plt.show()
# -
# ## Defining a function that plots time-series data
# Once you realize that a particular section of code that you have written is useful, it is a good idea to define a function that saves that section of code for you, rather than copying it to other parts of your program where you would like to use this code.
#
# Here, we will define a function that takes inputs such as a time variable and some other variable and plots them as x and y inputs. Then, it sets the labels on the x- and y-axis and sets the colors of the y-axis label, the y-axis ticks and the tick labels.
#
# Instructions
#
# - Define a function called `plot_timeseries` that takes as input an Axes object (`axes`), data (`x`,`y`), a string with the name of a color and strings for x- and y-axis labels.
# - Plot y as a function of in the color provided as the input `color`.
# - Set the x- and y-axis labels using the provided input `xlabel` and `ylabel`, setting the y-axis label color using `color`.
# - Set the y-axis tick parameters using the `tick_params` method of the Axes object, setting the `colors` key-word to `color`.
# Define a function called plot_timeseries
def plot_timeseries(axes, x, y, color, xlabel, ylabel):
# Plot the inputs x,y in the provided color
axes.plot(x, y, color=color)
# Set the x-axis label
axes.set_xlabel(xlabel)
# Set the y-axis label
axes.set_ylabel(ylabel, color=color)
# Set the colors tick params for y-axis
axes.tick_params('y', colors=color)
# ## Using a plotting function
#
# Defining functions allows us to reuse the same code without having to repeat all of it. Programmers sometimes say ["Don't repeat yourself"](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself).
#
# In the previous exercise, you defined a function called `plot_timeseries`:
#
# `plot_timeseries(axes, x, y, color, xlabel, ylabel)`
#
# that takes an Axes object (as the argument `axes`), time-series data (as `x` and `y` arguments) the name of a color (as a string, provided as the `color` argument) and x-axis and y-axis labels (as `xlabel` and `ylabel` arguments). In this exercise, the function `plot_timeseries` is already defined and provided to you.
#
# Use this function to plot the `climate_change` time-series data, provided as a Pandas DataFrame object that has a DateTimeIndex with the dates of the measurements and `co2` and `relative_temp` columns.
#
# Instructions
#
# - In the provided `ax` object, use the function `plot_timeseries` to plot the `"co2"` column in blue, with the x-axis label `"Time (years)"` and y-axis label `"CO2 levels"`.
# - Use the `ax.twinx` method to add an Axes object to the figure that shares the x-axis with `ax`.
# - Use the function `plot_timeseries` to add the data in the `"relative_temp"` column in red to the twin Axes object, with the x-axis label `"Time (years)"` and y-axis label `"Relative temperature (Celsius)"`.
# +
fig, ax = plt.subplots()
# Plot the CO2 levels time-series in blue
plot_timeseries(ax, climate_change.index, climate_change['co2'], 'blue', 'Time (years)', 'CO2 levels')
# Create a twin Axes object that shares the x-axis
ax2 = ax.twinx()
# Plot the relative temperature data in red
plot_timeseries(ax2, climate_change.index, climate_change['relative_temp'], 'red', 'Time (years)', 'Relative temperature (Celsius)')
plt.show()
# -
# ## Annotating a plot of time-series data
#
# Annotating a plot allows us to highlight interesting information in the plot. For example, in describing the climate change dataset, we might want to point to the date at which the relative temperature first exceeded 1 degree Celsius.
#
# For this, we will use the `annotate` method of the Axes object. In this exercise, you will have the DataFrame called `climate_change` loaded into memory. Using the Axes methods, plot only the relative temperature column as a function of dates, and annotate the data.
#
# Instructions
#
# - Use the `ax.plot` method to plot the DataFrame index against the `relative_temp` column.
# - Use the annotate method to add the text `'>1 degree'` in the location `(pd.Timestamp('2015-10-06'), 1)`.
# +
fig, ax = plt.subplots()
# Plot the relative temperature data
ax.plot(climate_change.index, climate_change['relative_temp'])
# Annotate the date at which temperatures exceeded 1 degree
ax.annotate(">1 degree", xy=(pd.Timestamp('2015-10-06'), 1))
plt.show()
# -
# ## Plotting time-series: putting it all together
#
# In this exercise, you will plot two time-series with different scales on the same Axes, and annotate the data from one of these series.
#
# The CO2/temperatures data is provided as a DataFrame called `climate_change`. You should also use the function that we have defined before, called `plot_timeseries`, which takes an Axes object (as the `axes` argument) plots a time-series (provided as x and y arguments), sets the labels for the x-axis and y-axis and sets the color for the data, and for the y tick/axis labels:
#
# `plot_timeseries(axes, x, y, color, xlabel, ylabel)`
#
# Then, you will annotate with text an important time-point in the data: on 2015-10-06, when the temperature first rose to above 1 degree over the average.
#
# Instructions
#
# - Use the `plot_timeseries` function to plot CO2 levels against time. Set xlabel to `"Time (years)"` ylabel to `"CO2 levels"` and color to `'blue'`.
# - Create `ax2`, as a twin of the first Axes.
# - In `ax2`, plot temperature against time, setting the color ylabel to `"Relative temp (Celsius)"` and color to `'red'`.
# - Annotate the data using the `ax2.annotate` method. Place the text `">1 degree"` in x=`pd.Timestamp('2008-10-06')`, y=`-0.2` pointing with a gray thin arrow to x=`pd.Timestamp('2015-10-06')`, y=`1`.
# +
fig, ax = plt.subplots()
# Plot the CO2 levels time-series in blue
plot_timeseries(ax, climate_change.index, climate_change['co2'], 'blue', 'Time (years)', 'CO2 levels')
# Create an Axes object that shares the x-axis
ax2 = ax.twinx()
# Plot the relative temperature data in red
plot_timeseries(ax2, climate_change.index, climate_change['relative_temp'], 'red', 'Time (years)', 'Relative temp (Celsius)')
# Annotate the point with relative temperature >1 degree
ax2.annotate('>1 degree', xy=(pd.Timestamp('2015-10-06'), 1), xytext=(pd.Timestamp('2008-10-06'), -0.2), arrowprops={'arrowstyle':'->', 'color':'gray'})
plt.show()
| introduction_to_data_visualization_with_matplotlib/2_plotting_time_series.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from my_commonfunctions import *
def draw_hough_lines(lines, shape, line_thickness):
lines_img = np.zeros(shape)
for l in lines:
x1 = l[0][0]
y1 = l[0][1]
x2 = l[1][0]
y2 = l[1][1]
#print(l)
#print(x1, y1, x2, y2)
cv2.line(lines_img, (x1,y1), (x2,y2), (255,255,255), line_thickness)
return lines_img
def potential_staff_lines(note_img_thresh, staff_thickness):
DEG_TO_RAD = np.pi/180
width = note_img_thresh.shape[1]
# Hough to get potential staff lines
line_length = int(width/4)
lines = probabilistic_hough_line(note_img_thresh, threshold=10, line_length=line_length, line_gap=3, theta=np.arange(80*DEG_TO_RAD, 100*DEG_TO_RAD, 1*DEG_TO_RAD), seed=40)
lines_img_before_filteration = draw_hough_lines(lines, note_img_thresh.shape, 1)
lines_img_before_filteration = cv2.dilate(lines_img_before_filteration, np.ones((1, 11)))
# Get widest 5 contours/lines
lines_img = np.copy(lines_img_before_filteration)
image, contours, hierarchy = cv2.findContours(lines_img_before_filteration.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours_bounding_rectangles = []
for c in contours:
x, y, w, h = cv2.boundingRect(c)
#print(x, y, w, h)
contours_bounding_rectangles.append([c, [x, y, w, h]])
contours_bounding_rectangles_sorted = sorted(contours_bounding_rectangles, key = lambda x: x[1][2], reverse=True) # sort by width
contours_widest_5 = []
j = 5 if len(contours_bounding_rectangles_sorted) >= 5 else len(contours_bounding_rectangles_sorted)
for i in range(j):
contours_widest_5.append(contours_bounding_rectangles_sorted[i][0])
# Draw widest 5 contours/lines
lines_img = np.zeros(note_img_thresh.shape, dtype=np.uint8)
lines_img = rgb2gray(cv2.drawContours(gray2rgb(lines_img), contours_widest_5, -1, (255,255,255), 1))
k = 3
lines_img = my_close(lines_img, np.ones((k*staff_thickness, k*staff_thickness)))
# my_show_images([lines_img])
return lines_img
def remove_staff_lines(note_img_gray):
# Otsu's thresholding
ret, note_img_thresh = cv2.threshold(note_img_gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# distance_between_staves, staff_thickness
distance_between_staves, staff_thickness = get_distance_between_staves_and_staff_thickness(note_img_thresh)
note_img_thresh = ~note_img_thresh
# Potential staff lines
lines_img = potential_staff_lines(note_img_thresh, staff_thickness)
lines_img_flattened = (lines_img > 0.5).T.flatten()
# Iterate over each column to remove any "run of white pixels" with a length of "m*staff_thickness"
# But it must be a part from a potentail line to confirm removing (potential lines calculated above)
note_img_thresh_flattened = (note_img_thresh).T.flatten()
image, contours, hierarchy = cv2.findContours((note_img_thresh_flattened).astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if c.shape == (2, 1, 2):
p1 = c[1][0][1]
p0 = c[0][0][1]
m = 1.55
if p1 - p0 <= staff_thickness*m:
#print(c)
staff_pixel_percentage = lines_img_flattened[p0:p1+1].sum() / len(lines_img_flattened[p0:p1+1])
if staff_pixel_percentage > 0.35:
note_img_thresh_flattened[p0:p1+1] = 0
elif c.shape == (1, 1, 2):
#print(c)
p0 = c[0][0][1]
staff_pixel_percentage = lines_img_flattened[p0:p0+1].sum() / len(lines_img_flattened[p0:p0+1])
if staff_pixel_percentage > 0.35:
note_img_thresh_flattened[p0:p0+1] = 0
staff_lines_removed = note_img_thresh_flattened.reshape(note_img_thresh.T.shape).T
return staff_lines_removed
img = my_imread_gray('shubra1/13 3.jpg')
img_staff_lines_removed = remove_staff_lines(img)
my_show_images([img, img_staff_lines_removed], dpi=100, row_max=2)
| Junk/Yamani/Remove Staff Lines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Outlier Detection with `bqplot`
# ---
# In this notebook, we create a class `DNA` that leverages the new bqplot canvas based [HeatMap](https://github.com/bloomberg/bqplot/blob/master/examples/Marks/HeatMap.ipynb) along with the ipywidgets Range Slider to help us detect and clean outliers in our data. The class accepts a DataFrame and allows you to visually and programmatically filter your outliers. The cleaned DataFrame can then be retrieved through a simple convenience function.
# +
from bqplot import (
DateScale,
ColorScale,
HeatMap,
Figure,
LinearScale,
OrdinalScale,
Axis,
)
from scipy.stats import percentileofscore
from scipy.interpolate import interp1d
import bqplot.pyplot as plt
from traitlets import List, Float, observe
from ipywidgets import IntRangeSlider, Layout, VBox, HBox, jslink
from pandas import DatetimeIndex
import numpy as np
import pandas as pd
def quantile_space(x, q1=0.1, q2=0.9):
"""
Returns a function that squashes quantiles between q1 and q2
"""
q1_x, q2_x = np.percentile(x, [q1, q2])
qs = np.percentile(x, np.linspace(0, 100, 100))
def get_quantile(t):
return np.interp(t, qs, np.linspace(0, 100, 100))
def f(y):
return np.interp(get_quantile(y), [0, q1, q2, 100], [-1, 0, 0, 1])
return f
class DNA(VBox):
colors = List()
q1 = Float()
q2 = Float()
def __init__(self, data, **kwargs):
self.data = data
date_x, date_y = False, False
transpose = kwargs.pop("transpose", False)
if transpose is True:
if type(data.index) is DatetimeIndex:
self.x_scale = DateScale()
if type(data.columns) is DatetimeIndex:
self.y_scale = DateScale()
x, y = list(data.columns.values), data.index.values
else:
if type(data.index) is DatetimeIndex:
date_x = True
if type(data.columns) is DatetimeIndex:
date_y = True
x, y = data.index.values, list(data.columns.values)
self.q1, self.q2 = kwargs.pop("quantiles", (1, 99))
self.quant_func = quantile_space(
self.data.values.flatten(), q1=self.q1, q2=self.q2
)
self.colors = kwargs.pop("colors", ["Red", "Black", "Green"])
self.x_scale = DateScale() if date_x is True else LinearScale()
self.y_scale = DateScale() if date_y is True else OrdinalScale(padding_y=0)
self.color_scale = ColorScale(colors=self.colors)
self.heat_map = HeatMap(
color=self.quant_func(self.data.T),
x=x,
y=y,
scales={"x": self.x_scale, "y": self.y_scale, "color": self.color_scale},
)
self.x_ax = Axis(scale=self.x_scale)
self.y_ax = Axis(scale=self.y_scale, orientation="vertical")
show_axes = kwargs.pop("show_axes", True)
self.axes = [self.x_ax, self.y_ax] if show_axes is True else []
self.height = kwargs.pop("height", "800px")
self.layout = kwargs.pop(
"layout", Layout(width="100%", height=self.height, flex="1")
)
self.fig_margin = kwargs.pop(
"fig_margin", {"top": 60, "bottom": 60, "left": 150, "right": 0}
)
kwargs.setdefault("padding_y", 0.0)
self.create_interaction(**kwargs)
self.figure = Figure(
marks=[self.heat_map],
axes=self.axes,
fig_margin=self.fig_margin,
layout=self.layout,
min_aspect_ratio=0.0,
**kwargs
)
super(VBox, self).__init__(
children=[self.range_slider, self.figure],
layout=Layout(align_items="center", width="100%", height="100%"),
**kwargs
)
def create_interaction(self, **kwargs):
self.range_slider = IntRangeSlider(
description="Filter Range",
value=(self.q1, self.q2),
layout=Layout(width="100%"),
)
self.range_slider.observe(self.slid_changed, "value")
self.observe(self.changed, ["q1", "q2"])
def slid_changed(self, new):
self.q1 = self.range_slider.value[0]
self.q2 = self.range_slider.value[1]
def changed(self, new):
self.range_slider.value = (self.q1, self.q2)
self.quant_func = quantile_space(
self.data.values.flatten(), q1=self.q1, q2=self.q2
)
self.heat_map.color = self.quant_func(self.data.T)
def get_filtered_df(self, fill_type="median"):
q1_x, q2_x = np.percentile(self.data, [self.q1, self.q2])
if fill_type == "median":
return self.data[(self.data >= q1_x) & (self.data <= q2_x)].apply(
lambda x: x.fillna(x.median())
)
elif fill_type == "mean":
return self.data[(self.data >= q1_x) & (self.data <= q2_x)].apply(
lambda x: x.fillna(x.mean())
)
else:
raise ValueError("fill_type must be one of ('median', 'mean')")
# -
# We define the size of our matrix here. Larger matrices require a larger height.
size = 100
# +
def num_to_col_letters(num):
letters = ""
while num:
mod = (num - 1) % 26
letters += chr(mod + 65)
num = (num - 1) // 26
return "".join(reversed(letters))
letters = []
for i in range(1, size + 1):
letters.append(num_to_col_letters(i))
# -
data = pd.DataFrame(np.random.randn(size, size), columns=letters)
data_dna = DNA(
data, title="DNA of our Data", height="1400px", colors=["Red", "White", "Green"]
)
data_dna
# Instead of setting the quantiles by the sliders, we can also set them programmatically. Using a range of (5, 95) restricts the data considerably.
data_dna.q1, data_dna.q2 = 5, 95
# Now, we can use the convenience function to extract a clean DataFrame.
data_clean = data_dna.get_filtered_df()
# The DNA fills outliers with the mean of the column. Alternately, we can fill the outliers by the mean.
data_mean = data_dna.get_filtered_df(fill_type="mean")
# We can also visualize the new DataFrame the same way to test how our outliers look now.
DNA(data_clean, title="Cleaned Data", height="1200px", colors=["Red", "White", "Green"])
| examples/Applications/Outlier Detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:dev_azure]
# language: python
# name: conda-env-dev_azure-py
# ---
# %load_ext autoreload
# %autoreload 2
# ## Imports
from dmsbatch import create_batch_client, create_blob_client
import datetime
# ## First create a batch client from the config file
client = create_batch_client('../tests/data/dmsbatch.config')
blob_client = create_blob_client('../tests/data/dmsbatch.config')
# ## Application packages
# To copy large files and programs it is best to zip (or targz) them and upload them as application packages
#
# Application packages are setup separately in either azure management apis or from the web console or cli tool
#
# These are referenced here by their name and version
# e.g. DSM2, python and other programs
#
# One extra field (last one) is the path within the zip file where the executables can be found. These are used later to setup the PATH varible
app_pkgs = [('dsm2linux', '8.2.8449db2', 'DSM2-8.2.8449db2-Linux/bin')]
# ### Show vms available
#
# https://docs.microsoft.com/en-us/azure/virtual-machines/fsv2-series
# +
#display(client.skus_available())
# -
# ### Create or resize existing pool
# If the pool doesn't exist it will create it
# If the pool exists, it will resize to the second arg
client.set_path_to_apps(app_pkgs,ostype='linux')
pool_start_cmds = ['printenv',
'yum install -y glibc.i686 libstdc++.i686 glibc.x86_64 libstdc++.x86_64',# --setopt=protected_multilib=false',
'yum-config-manager --add-repo https://yum.repos.intel.com/2019/setup/intel-psxe-runtime-2019.repo',
'rpm --import https://yum.repos.intel.com/2019/setup/RPM-GPG-KEY-intel-psxe-runtime-2019',
'yum install -y intel-icc-runtime-32bit intel-ifort-runtime-32bit']
client.wrap_commands_in_shell(pool_start_cmds,ostype='linux')
created_pool = client.create_pool('dsm2linuxpool',
1,
app_packages=[(app,version) for app,version,_ in app_pkgs],
vm_size='standard_f2s_v2',
tasks_per_vm=2,
os_image_data=('openlogic', 'centos', '7_8'),
start_task_cmd=client.wrap_commands_in_shell(pool_start_cmds,ostype='linux'),
start_task_admin=True,
elevation_level='admin'
)
if not created_pool:
client.resize_pool('dsm2linuxpool',1)
# ### Create job on pool or fail if it exists
# Jobs are containers of tasks (things that run on nodes (machines) in the pool). If this exists, the next line will fail
try:
client.create_job('dsm2linuxjobs','dsm2linuxpool')
except Exception as exc:
print('Job already exists? Its ok, we will continue')
# ### Create a task
# This uses the application package as pre -set up. If not, create one https://docs.microsoft.com/en-us/azure/batch/batch-application-packages
tsnow = str(datetime.datetime.now().timestamp()).split('.')[0]
task_name = f'hydro_version_{tsnow}'
cmd_string = client.wrap_cmd_with_app_path(
"""
source /opt/intel/psxe_runtime/linux/bin/compilervars.sh ia32;
%s;
hydro -v;
unzip -v;
"""%(client.set_path_to_apps(app_pkgs,ostype='linux')),app_pkgs,ostype='linux')
print(task_name)
print(cmd_string)
blob_client.blob_client.account_name
import dmsbatch
permissions = dmsbatch.commands.BlobSasPermissions(write=True)
# |helpers.azureblob.BlobPermissions.ADD|helpers.azureblob.BlobPermissions.CREATE
output_dir_sas_url = blob_client.get_container_sas_url(
'dsm2linuxjobs',
permissions)
print(output_dir_sas_url)
std_out_files = client.create_output_file_spec(
'../std*.txt', output_dir_sas_url, blob_path=f'{task_name}')
hydro_v_task = client.create_task(task_name,cmd_string,output_files=[std_out_files])
# ### Next submit the task and wait
client.submit_tasks('dsm2linuxjobs',[hydro_v_task])
client.wait_for_tasks_to_complete('dsm2linuxjobs', datetime.timedelta(seconds=100))
# ## Finally resize the pool to 0 to save costs
client.resize_pool('dsm2linuxpool',0)
| notebooks/sample_submit_dsm2_linux_hydro_version.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cv-nd
# language: python
# name: cv-nd
# ---
# ## Facial Filters
#
# Using your trained facial keypoint detector, you can now do things like add filters to a person's face, automatically. In this optional notebook, you can play around with adding sunglasses to detected face's in an image by using the keypoints detected around a person's eyes. Checkout the `images/` directory to see what pther .png's have been provided for you to try, too!
#
# <img src="images/face_filter_ex.png" width=60% height=60%/>
#
# Let's start this process by looking at a sunglasses .png that we'll be working with!
# import necessary resources
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import cv2
# +
# load in sunglasses image with cv2 and IMREAD_UNCHANGED
sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)
# plot our image
plt.imshow(sunglasses)
# print out its dimensions
print('Image shape: ', sunglasses.shape)
# -
# ## The 4th dimension
#
# You'll note that this image actually has *4 color channels*, not just 3 as your avg RGB image does. This is due to the flag we set `cv2.IMREAD_UNCHANGED`, which tells this to read in another color channel.
#
# #### Alpha channel
# It has the usual red, blue, and green channels any color image has, and the 4th channel respresents the **transparency level of each pixel** in the image; this is often called the **alpha** channel. Here's how the transparency channel works: the lower the value, the more transparent, or see-through, the pixel will become. The lower bound (completely transparent) is zero here, so any pixels set to 0 will not be seen; these look like white background pixels in the image above, but they are actually totally transparent.
#
# This transparent channel allows us to place this rectangular image of sunglasses on an image of a face and still see the face area that is techically covered by the transparentbackground of the sunglasses image!
#
# Let's check out the alpha channel of our sunglasses image in the next Python cell. Because many of the pixels in the background of the image have an alpha value of 0, we'll need to explicitly print out non-zero values if we want to see them.
# print out the sunglasses transparency (alpha) channel
alpha_channel = sunglasses[:,:,3]
print ('The alpha channel looks like this (black pixels = transparent): ')
plt.imshow(alpha_channel, cmap='gray')
# just to double check that there are indeed non-zero values
# let's find and print out every value greater than zero
values = np.where(alpha_channel != 0)
print ('The non-zero values of the alpha channel are: ')
print (values)
# #### Overlaying images
#
# This means that when we place this sunglasses image on top of another image, we can use the transparency channel as a filter:
#
# * If the pixels are non-transparent (alpha_channel > 0), overlay them on the new image
#
# #### Keypoint locations
#
# In doing this, it's helpful to understand which keypoint belongs to the eyes, mouth, etc., so in the image below we also print the index of each facial keypoint directly on the image so you can tell which keypoints are for the eyes, eyebrows, etc.,
#
# <img src="images/landmarks_numbered.jpg" width=50% height=50%/>
#
# It may be useful to use keypoints that correspond to the edges of the face to define the width of the sunglasses, and the locations of the eyes to define the placement.
#
# Next, we'll load in an example image. Below, you've been given an image and set of keypoints from the provided training set of data, but you can use your own CNN model to generate keypoints for *any* image of a face (as in Notebook 3) and go through the same overlay process!
# +
# load in training data
key_pts_frame = pd.read_csv('data/training_frames_keypoints.csv')
# print out some stats about the data
print('Number of images: ', key_pts_frame.shape[0])
# -
# helper function to display keypoints
def show_keypoints(image, key_pts):
"""Show image with keypoints"""
plt.imshow(image)
plt.scatter(key_pts[:, 0], key_pts[:, 1], s=20, marker='.', c='m')
# +
# a selected image
n = 120
image_name = key_pts_frame.iloc[n, 0]
image = mpimg.imread(os.path.join('data/training/', image_name))
key_pts = key_pts_frame.iloc[n, 1:].as_matrix()
key_pts = key_pts.astype('float').reshape(-1, 2)
print('Image name: ', image_name)
plt.figure(figsize=(5, 5))
show_keypoints(image, key_pts)
plt.show()
# -
# Next, you'll see an example of placing sunglasses on the person in the loaded image.
#
# Note that the keypoints are numbered off-by-one in the numbered image above, and so `key_pts[0,:]` corresponds to the first point (1) in the labelled image.
# +
# Display sunglasses on top of the image in the appropriate place
# # copy of the face image for overlay
image_copy = np.copy(image)
# top-left location for sunglasses to go
# 17 = edge of left eyebrow
x = int(key_pts[17, 0])
y = int(key_pts[17, 1])
# height and width of sunglasses
# h = length of nose
h = int(abs(key_pts[27,1] - key_pts[34,1]))
# w = left to right eyebrow edges
w = int(abs(key_pts[17,0] - key_pts[26,0]))
# read in sunglasses
sunglasses = cv2.imread('images/sunglasses.png', cv2.IMREAD_UNCHANGED)
# resize sunglasses
new_sunglasses = cv2.resize(sunglasses, (w, h), interpolation = cv2.INTER_CUBIC)
# get region of interest on the face to change
roi_color = image_copy[y:y+h,x:x+w]
# find all non-transparent pts
ind = np.argwhere(new_sunglasses[:,:,3] > 0)
# for each non-transparent point, replace the original image pixel with that of the new_sunglasses
for i in range(3):
roi_color[ind[:,0],ind[:,1],i] = new_sunglasses[ind[:,0],ind[:,1],i]
# set the area of the image to the changed region with sunglasses
image_copy[y:y+h,x:x+w] = roi_color
# display the result!
plt.imshow(image_copy)
# -
# #### Further steps
#
# Look in the `images/` directory to see other available .png's for overlay! Also, you may notice that the overlay of the sunglasses is not entirely perfect; you're encouraged to play around with the scale of the width and height of the glasses and investigate how to perform [image rotation](https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html) in OpenCV so as to match an overlay with any facial pose.
| 4. Fun with Keypoints.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# pandas(python data analysis library)是基于NumPy 的一种工具,该工具是为了解决数据分析任务而创建的。
import numpy as np
import pandas as pd
# Series
ser = pd.Series([4, 7, -5, 3])
ser
ser.values
ser.index
ser2 = pd.Series([4, 7, -5, 3], index=['d', 'b', 'a', 'c'])
ser2
ser2.values
ser2.index
| dev/openlibs/pandas/pandas_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import functools
# 9 Answers total
example_answers = {
1: 6, # 1 - 6, Original: 1
2: 1, # 1 - 4, Original: 3
3: 2, # 1 - 3, Original: 4
4: 3, # 1 - 3, Original: 5
5: 2, # 1 - 5, Original: 7
6: 5, # 1 - 5, Original: 8
7: 4, # 1 - 5, Original: 9
8: 3, # 1 - 4, Original: 10
}
horizon_qa = {q:a for q,a in example_answers.items() if q == 1}
def updated_risk_keys(old):
return {
2: 3,
3: 4,
4: 5,
5: 7,
6: 8,
7: 9,
8: 10
}[old]
risk_qa = {updated_risk_keys(q):a for q,a in example_answers.items() if q != 1}
# -
horizon_qa
risk_qa
def horizon_score(question, answer):
score_map = {
# 1: {
# 1: 0,
# 2: 1,
# 3: 3,
# 4: 6,
# 5: 9,
# 6: 11
# },
1: {
1: 0,
2: 2, # Added 2-1
3: 6, # Added 4-1
4: 10, # Added 5-1
5: 14, # Added 6-1
6: 15
},
2: {
1: 0,
2: 2,
3: 4,
4: 5,
5: 6
}
}
if question not in score_map:
raise Exception(f'Unknown horizon question {str(question)}')
return score_map[question][answer]
def risk_score(question, answer):
score_map = {
3: {
1: 13,
2: 8,
3: 5,
4: 3
},
4: {
1: 5, # Added 1
2: 10, # Added 3
3: 15 # Added 4
},
5: {
1: 6, # Added 2
2: 9, # Added 2
3: 15 # Added 4
},
6: {
1: 3,
2: 5,
3: 8,
4: 13
},
7: {
1: 0,
2: 4,
3: 7,
4: 11,
5: 17 # Added 4
},
8: {
1: 0,
2: 4,
3: 7,
4: 11,
5: 17 # Added 4
},
9: {
1: 0,
2: 4,
3: 7,
4: 11,
5: 17 # Added 4
},
10: {
1: 14, # Added 1
2: 8,
3: 5,
4: 3
}
}
if question not in score_map:
raise Exception(f'Unknown risk question {str(question)}')
return score_map[question][answer]
hs = sum([horizon_score(q, a) for q,a in horizon_qa.items()])
rs = sum([risk_score(q, a) for q,a in risk_qa.items()])
def total_score(hs, rs):
if hs < 1:
return None
if 1 <= hs < 3:
return 1
if 3 <= hs < 6:
if rs < 24:
return 1
return 2
if 6 <= hs < 8:
if rs < 24:
return 1
if rs < 44:
return 2
return 3
if 8 <= hs < 11:
if rs < 24:
return 1
if rs < 44:
return 2
if rs < 65:
return 3
return 4
# rs >= 11
if rs < 24:
return 1
if rs < 44:
return 2
if rs < 65:
return 3
if rs < 85:
return 4
return 5
total_score(hs, rs)
# # Omer little investor
no_risk_example = {
1: 1,
2: 1,
3: 1,
4: 1,
5: 1,
6: 1,
7: 1,
8: 1,
}
horizon_qa = {q:a for q,a in no_risk_example.items() if q == 1}
risk_qa = {updated_risk_keys(q):a for q,a in no_risk_example.items() if q != 1}
hs = sum([horizon_score(q, a) for q,a in horizon_qa.items()])
rs = sum([risk_score(q, a) for q,a in risk_qa.items()])
print(total_score(hs, rs))
minimal_risk_example = {
1: 2,
2: 1,
3: 1,
4: 1,
5: 1,
6: 1,
7: 1,
8: 1,
}
horizon_qa = {q:a for q,a in minimal_risk_example.items() if q == 1}
risk_qa = {updated_risk_keys(q):a for q,a in minimal_risk_example.items() if q != 1}
hs = sum([horizon_score(q, a) for q,a in horizon_qa.items()])
rs = sum([risk_score(q, a) for q,a in risk_qa.items()])
print(total_score(hs, rs))
minimal_risk_example = {
1: 3,
2: 1,
3: 1,
4: 1,
5: 1, # Change to 4, for example to jump to risk=3
6: 1,
7: 1,
8: 1,
}
horizon_qa = {q:a for q,a in minimal_risk_example.items() if q == 1}
risk_qa = {updated_risk_keys(q):a for q,a in minimal_risk_example.items() if q != 1}
hs = sum([horizon_score(q, a) for q,a in horizon_qa.items()])
rs = sum([risk_score(q, a) for q,a in risk_qa.items()])
print(total_score(hs, rs))
balanced_example = {
1: 3,
2: 4,
3: 3,
4: 2,
5: 4,
6: 5,
7: 3,
8: 2,
}
horizon_qa = {q:a for q,a in balanced_example.items() if q == 1}
risk_qa = {updated_risk_keys(q):a for q,a in balanced_example.items() if q != 1}
hs = sum([horizon_score(q, a) for q,a in horizon_qa.items()])
rs = sum([risk_score(q, a) for q,a in risk_qa.items()])
print(total_score(hs, rs))
balanced_example = {
1: 5,
2: 3,
3: 2,
4: 2,
5: 4,
6: 4,
7: 5,
8: 4,
}
horizon_qa = {q:a for q,a in balanced_example.items() if q == 1}
risk_qa = {updated_risk_keys(q):a for q,a in balanced_example.items() if q != 1}
hs = sum([horizon_score(q, a) for q,a in horizon_qa.items()])
rs = sum([risk_score(q, a) for q,a in risk_qa.items()])
print(total_score(hs, rs))
balanced_example = {
1: 6,
2: 4,
3: 3,
4: 3,
5: 5,
6: 5,
7: 5,
8: 4,
}
horizon_qa = {q:a for q,a in balanced_example.items() if q == 1}
risk_qa = {updated_risk_keys(q):a for q,a in balanced_example.items() if q != 1}
hs = sum([horizon_score(q, a) for q,a in horizon_qa.items()])
rs = sum([risk_score(q, a) for q,a in risk_qa.items()])
print(total_score(hs, rs))
| answers2risk.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/imdeepmind/GenerateFace/blob/main/FaceGAN_V1_0.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="U7FbkU6QPmkY" outputId="da7085e5-9248-41a4-cf75-c33a0f25cdef" colab={"base_uri": "https://localhost:8080/", "height": 146}
# Setting the env for the model
# !git clone https://github.com/imdeepmind/GenerateFace.git
# !mv GenerateFace/* ./
# !cp "/content/drive/My Drive/Copy of img_align_celeba.zip" ./
# !mkdir data/
# !unzip -qq "Copy of img_align_celeba.zip" -d ./data/images
# !rm "Copy of img_align_celeba.zip"
# + id="qDOqJHtlRUKC"
# Model Configuration
MODE = "test"
config = {
"test": {
"path": "./data/images/img_align_celeba",
"batch_size": 16
}
}
# + id="ePEAz1ZhQuRd"
# Building the dataloader for loading our custom dataset
from data_loader import FaceDataset
from torch.utils.data import DataLoader, sampler
from torchvision import transforms
custom_transform = transforms.Compose([transforms.Resize((28,28)), transforms.ToTensor()])
dataset = FaceDataset(config[MODE]["path"], custom_transform)
train_loader = DataLoader(dataset=dataset, batch_size=config[MODE]["batch_size"], shuffle=True)
# + id="r817gDz-Sw7Y" outputId="7202904a-9da9-4564-e839-5a07e725cfda" colab={"base_uri": "https://localhost:8080/", "height": 35}
for t in train_loader:
print(t.shape)
break
# + id="PaQQ73FhTsiv"
| FaceGAN_V1_0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="0wPixjHS38aO"
# # **Beginner's Python - Session Two Physics/Engineering Answers**
# + [markdown] id="tdITd2KuzKT3"
# ## **Numerically solving an ODE**
# + [markdown] id="jTKCIuxNPWEK"
# In this exercise we will be writing some code which generates a plot of the motion of a mass hanging on the end of an (idealised) spring. This will involve solving the following linear differential equation numerically using Euler's method.
#
# $$\frac{d^2x}{dt^2} = -\frac{k}{m}x-g $$
#
# If you're unfamiliar with Euler's method, you can check out https://tutorial.math.lamar.edu/classes/de/eulersmethod.aspx.
#
# + [markdown] id="eZPu1rpXnesd"
# First of all, in the cell below write code which takes a user input and asks has the text "Enter initial position coordinate".
#
# You should assign this user input - *cast as a float* - to a variable valled ```x0```. After you've run this cell, input a value between -5.0 and 5.0 and hit enter.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="AerNBkAgoOP1" outputId="be07e24a-8479-4caf-8e93-1d7837d3c5d2"
x0 = float(input("Please input an initial position"))
# + [markdown] id="hPA1ELN6TJ9S"
# Now run the cell below. You should see a graph generated which shows the numerical solutions for both velocity and position of the mass. You can also edit the parameter values at the top and re-run the cell to see the effect on the numerical solution.
#
# **Note:** Don't worry about the details of the code, but know that it gives us
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="lgJB8aeRLf9U" outputId="03fc3962-0f3f-4292-cae8-30eb3415d15e"
# Do not edit the code in this cell. You can edit the 6 parameters at the top and re-run
# the cell to see the effect on the graph, but only after you have completed the questions.
import numpy as np
import matplotlib.pyplot as plt
N = 2000 # the number of steps used - higher N results in a more accurate result
v0 = 0.0 # initial velocity of the mass
tau = 5.0 # number of seconds we are solving over
k = 3.5 # spring constant
mass = 0.2 # mass
gravity = 9.81 # strength of gravity
time = np.linspace(0, tau, N)
dt = tau/float(N-1) # time between each step
def euler_method(y, t, dt, derivs):
y_next = y + derivs(y,t) * dt
return y_next
y = np.zeros([N,2])
y[0,0] = x0
y[0,1] = v0
def SHO(state, time):
g0 = state[1]
g1 = - k / mass * state[0] - gravity
return np.array([g0, g1])
for i in range(N-1):
y[i+1] = euler_method(y[i], time[i], dt, SHO)
x_data = [y[i,0] for i in range(N)] # this creates a long list containing the position coordinates
v_data = [y[i,1] for i in range(N)] # this does the same for velocity
plt.plot(time, x_data) # these just create a graph of the data
plt.plot(time, v_data)
plt.xlabel("time (s)")
plt.ylabel("position (m), velocity (m/s)")
plt.show()
# + [markdown] id="tDu8X3f0crEk"
# The above code also gives us two *lists*, each containing N numbers. These are ```x_data```, containing the position coordinates for a range of times, and ```v_data```, containing the velocities. Already it's clear that Python is extremely useful handling these lists, since they are too large for us to do things with them by hand.
# + [markdown] id="kdbp1Dz-eoMJ"
# Print below the following, replacing the #### with the correct value, rounded to 5 decimal places: **"The maximum position value achieved was #### and the maximum velocity was ####"**
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="rKcQI7cfe189" outputId="055dee63-1cbb-4bb9-9dc9-9597db8c86ea"
print("The maximum position value achieved was", round(max(x_data),5), "and the maximum velocity was", round(max(v_data),5))
# + [markdown] id="dwxl0SC7piII"
# What was the range in values of the velocity? Print your answer below to two decimal places. Remember that since ```range``` is a reserved name in Python, you should pick a different one.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="1kVMEOAGpqA-" outputId="0d6a0d21-4a31-42b8-8c5b-53924e1cdff3"
spread = round(max(v_data) - min(v_data),2)
print(spread)
# + [markdown] id="inHw8zI6gd_u"
# A useful feature in Python is the ability to specify a single element of a list. Each entry of a list is numbered, *starting from 0*, and you can then specify an entry by putting the position in square brackets after the list. For example:
#
# + colab={"base_uri": "https://localhost:8080/", "height": 51} id="GxewM49HhJbM" outputId="7e2375b6-9bf5-4a3e-9410-235ebc3ccf44"
example_list = [1,3,5,7,9]
print(example_list[3])
print(example_list[0])
# + [markdown] id="kglgz634ktnD"
# Print below the 444th entry in the list ```v_data``` rounded to 4 decimal places (for simplicity, we will consider the first entry as the "zeroth" entry, since Python starts counting at 0.)
#
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="jyxlCzmIk4xL" outputId="51dc21b2-eb0f-491d-e4ef-e5d025745e06"
print(round(v_data[444],4))
# + [markdown] id="EPoJC30nhmsW"
# You can also add new elements to the end of a list, using the ```.append()``` function. You simply write the function after a list, and can put *one* new element in the brackets.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="lFy6xH1jh621" outputId="6b282797-68cb-4562-a7b6-8aa76fb6b2ed"
example_list.append(20)
print(example_list)
# + [markdown] id="5jheSpYFrG5k"
# In the cell below there is a list defined, which contains the maximum/minimum values for both position and velocity. You must add two more elements onto the list, namely the mean values for both parameters, and then print the list.
#
# **Notes:**
# * You should calculate the mean by summing all of the data values and dividing by the number of values, ```N```.
# * Enter values to three decimal places.
#
# Hint: Create two new variables and then append them onto ```data_set```.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="67gbIfjssTZa" outputId="92c8e83f-fe7d-44d0-dba0-10c45b6746fd"
x_max = round(max(x_data),3)
x_min = round(min(x_data),3)
v_max = round(max(v_data),3)
v_min = round(min(v_data),3)
data_set = [x_max, x_min, v_max, v_min]
x_mean = round(sum(x_data) / N, 3)
v_mean = round(sum(v_data) / N, 3)
data_set.append(x_mean)
data_set.append(v_mean)
print(data_set)
| session-two/subject_questions/PhysEng_two_answers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Introduction to Data Science
# ---
#
# Welcome to Data Science! In this notebook, you will learn how to use Jupyter Notebooks and the basics of programming in Python.
#
# *Estimated Time: 30 minutes*
#
# ---
#
# **Topics Covered:**
# - Learn how to work with Jupyter notebooks.
# - Learn about variables in Python, including variable types, variable assignment, and arithmetic.
# - Learn about functions in Python, including defining and calling functions, as well as scope.
#
# **Parts:**
# - Jupyter Notebooks
# - Programming in Python
# - Variables
# - Functions
# - Scope
# ## Jupyter Notebooks
# ---
# In this section, we will learn the basics of how to work with Jupyter notebooks.
# This Jupyter notebook is composed of 2 kinds of cells: markdown and code. A **markdown cell**, such as this one, contains text. A **code cell** contains code in Python, a programming language that we will be using for the remainder of this module.
#
# To run a code cell, press Shift-Enter or click Cell > Run Cells in the menu at the top of the screen. To edit a code cell, simply click in the cell and make your changes.
# ### Exercise
#
# Try running the code below. What happens?
# CODE
print("Hello World!")
# Now, let's try editing the code. In the cell below, replace "friend" with your name for a more personalized message.
print("Welcome to Jupyter notebooks, friend.")
# ## Programming in Python
# ---
# Now that you are comfortable with using Jupyter notebooks, we can learn more about programming in this notebook.
#
# ### What is Programming?
# **Programming** is giving the computer a set of step-by-step instructions to follow in order to execute a task. It's a lot like writing your own recipe book! For example, let's say you wanted to teach someone how to make a PB&J sandwich:
# 1. Gather bread, peanut butter, jelly, and a spreading knife.
# 2. Take out two slices of bread.
# 3. Use the knife to spread peanut butter on one slice of bread.
# 4. Use the knife to spread jelly on the other slice of bread.
# 5. Put the two slices of bread together to make a sandwich.
#
# Just like that, programming is breaking up a complex task into smaller commands for the computer to understand and execute.
#
# In order to communicate with computers, however, we must talk to them in a way that they can understand us: via a **programming language**.
#
# There are many different kinds of programming languages, but we will be using **Python** because it is concise, simple to read, and applicable in a variety of projects - from web development to mobile apps to data analysis.
# ## Variables
# ---
# In programming, we often compute many values that we want to save so that we can use the result in a later step. For example, let's say that we want to find the number of seconds in a day. We can easily calculate this with the following:
# <p style="text-align: center">$60 * 60 * 24 = 86400$ seconds</p>
# However, let's say that your friend Alexander asked you how many seconds there are in three days. We could, of course, perform the calculation in a similar manner:
# <p style="text-align: center">$(60 * 60 * 24) * 3 = 259200$ seconds</p>
# But we see that we repeated the calculation in parentheses above. Instead of doing this calculation again, we could have saved the result from our first step (calculating the number of seconds in a day) as a variable.
# +
# This is Python code that assigns variables.
# The name to the left of the equals sign is the variable name.
# The value to the right of the equals sign is the value of the variable.
# Press Shift-Enter to run the code and see the value of our variable!
seconds_in_day = 60 * 60 * 24 # This is equal to 86400.
seconds_in_day
# -
# Then, we can simply multiply this variable by three to get the number of seconds in *three* days:
# +
# The code below takes the number of seconds in a day (which we calculated in the previous code cell)
# and multiplies it by 3 to find the number of seconds in 3 days.
seconds_in_three_days = seconds_in_day * 3 # This is equal to 259200.
seconds_in_three_days
# -
# As you can see, variables can be used to simplify calculations, make code more readable, and allow for repetition and reusability of code.
#
# ### Variable Types
#
# Next, we'll talk about a few types of variables that you'll be using. As we saw in the example above, one common type of variable is the *integer* (positive and negative whole numbers). You'll also be using decimal numbers in Python, which are called *doubles* (positive and negative decimal numbers).
#
# A third type of variable used frequently in Python is the *string*; strings are essentially sequences of characters, and you can think of them as words or sentences. We denote strings by surrounding the desired value with quotes. For example, "Data Science" and "2017" are strings, while `bears` and `2020` (both without quotes) are not strings.
#
# Finally, the last variable type we'll go over is the *boolean*. They can take on one of two values: `True` or `False`. Booleans are often used to check conditions; for example, we might have a list of dogs, and we want to sort them into small dogs and large dogs. One way we could accomplish this is to say either `True` or `False` for each dog after seeing if the dog weighs more than 15 pounds.
#
# Here is a table that summarizes the information in this section:
# |Variable Type|Definition|Examples|
# |-|-|-|
# |Integer|Positive and negative whole numbers|`42`, `-10`, `0`|
# |Double|Positive and negative decimal numbers|`73.9`, `2.4`, `0.0`|
# |String|Sequence of characters|`"Go Bears!"`, `"variables"`|
# |Boolean|True or false value|`True`, `False`|
#
# ### Arithmetic
# Now that we've discussed what types of variables we can use, let's talk about how we can combine them together. As we saw at the beginning of this section, we can do basic math in Python. Here is a table that shows how to write such operations:
#
# |Operation|Operator|Example|Value|
# |-|-|-|
# |Addition|+|`2 + 3`|`5`|
# |Subtraction|-|`2 - 3`|`-1`|
# |Multiplication|*|`2 * 3`|`6`|
# |Division|/|`7 / 3`|`2.66667`|
# |Remainder|%|`7 % 3`|`1`|
# |Exponentiation|**|`2 ** 0.5`|`1.41421`|
#
# In addition, you can use parentheses to denote priority, just like in math.
#
# As an exercise, try to predict what each of these lines below will print out. Then, run the cell and check your answers.
# +
q_1 = (3 + 4) / 2
print(q_1) # What prints here?
q_2 = 3 + 4 / 2
print(q_2) # What prints here?
some_variable = 1 + 2 + 3 + 4 + 5
q_3 = some_variable * 4
print(q_3) # What prints here?
q_4 = some_variable % 3
print(q_4) # What prints here?
step_1 = 6 * 5 - (6 * 3)
step_2 = (2 ** 3) / 4 * 7
q_5 = 1 + step_1 ** 2 * step_2
print(q_5) # What prints here?
# -
# ## Functions
# So far, you've learnt how to carry out basic operations on your inputs and assign variables to certain values.
# Now, let's try to be more efficient.
#
# Let's say we want to perform a certain operation on many different inputs that will produce distinct outputs. What do we do? We write a _**function**_.
#
# A function is a block of code which works a lot like a machine: it takes an input, does something to it, and produces an output.
#
# The input is put between brackets and can also be called the _argument_ or _parameter_. Functions can have multiple arguments.
#
# Try running the cell below after changing the variable _name_:
#
# +
# Edit this cell to your own name!
name = "<NAME>"
# Our function
def hello(name):
return "Hello " + name + "!"
hello(name)
# -
# Interesting, right? Now, you don't need to write 10 different lines with 10 different names to print a special greeting for each person. All you need to is write one function that does all the work for you!
#
# Functions are very useful in programming because they help you write shorter and more modular code. A good example to think of is the _print_ function, which we've used quite a lot in this module. It takes many different inputs and performs the specified task, printing its input, in a simple manner.
#
# Now, let's write our own function. Let's look at the following rules:
#
# ### Defining
# - All functions must start with the "def" keyword.
# - All functions must have a name, followed by parentheses, followed by a colon. Eg. def hello( ):
# - The brackets may have a variable that stores its arguments (inputs)
# - All functions must have a "return" statement which will return the output. Think of a function like a machine. When you put something inside, you want it to return something. Hence, this is very important.
#
# ### Calling
# After you define a function, it's time to use it. This is known as _calling_ a function.
#
# To call a function, simply write the name of the function with your input variable in brackets (argument).
#
# +
# Complete this function
def #name(argument):
return # function must return a value
# Calling our function below...
my_first_function(name)
# -
# Great! Now let's do some math. Let's write a function that returns the square of the input.
#
# Try writing it from scratch!
# +
# square function
square(5)
# -
# Neat stuff! Try different inputs and check if you get the correct answer each time.
#
# You've successfully written your first function from scratch! Let's take this up one notch.
#
# #### The power function
#
# _pow_ is a function that takes in two numbers: x, which is the "base" and y, the "power". So when you write pow(3,2) the function returns 3 raised to the power 2, which is 3^2 = 9.
#
# Task: Write a function called _mulpowply_ which takes in three inputs (x, y, z) and returns the value of x multiplied by y to power z. Symbolically, it should return (xy)^z.
# +
# mulpowply function
# -
# ## Scope
# ---
# Programming is great, but it can also be quite peculiar sometimes. For example, each variable defined outside of any functions by default, is **global**.
#
# Try executing the code below:
# +
# Global Variable - name
name = "<NAME>"
# our function
def salutation(name):
return "Hi " + name + ", nice to meet you!"
# calling our function
salutation(name)
# un-comment the line below
#salutation("<NAME>")
# -
# Even though your argument was called _name_, it didnt output <NAME>, which was the **global** value of the variable called name. Instead, it gave preference to the **local** value which was given to the function as an argument, <NAME>.
#
# Think of it as filling your coffeemaker (function) up with coffee (variable). If you have a variable with **global** access called _name_ which is filled with coffee called <NAME>, you can choose to either:
#
# 1) Not input another value in your function. (Use the same name of the **global** variable as your argument)
#
# In this case, the **global** type of coffee will still be used.
#
# 2) Choose to fill another value. In this case, your function will assign the value you pass as the argument to the “variable” which **is** the argument.
#
# Think of it as overriding your **global** coffee and putting a new type of coffee into your coffeemaker.
#
# ### Activity
#
# Using the rules of scope you've learned so far, complete the function _puzzle_ to output the value **35**.
# +
# Scope Puzzle!
x = 5
y = 6
z = 7
def puzzle(x, y):
return x * y
# fill in this function call
puzzle()
# -
# ## Control
# ---
# Sometimes, we want to manipulate the flow of our code. For example, we might want our code to make decisions on its own or repeat itself a certain amount of times. By implementing control structures, we can avoid redundant code and make processes more efficient.
#
# ### Conditionals
# We use **conditionals** to run certain pieces of code _if_ something is true. For example, we should only go to the grocery store _if_ we are out of peanut butter!
#
# We use **comparators** to determine whether an expression is _true_ or _false_. There are six comparators to be aware of:
# 1. Equal to: ==
# 2. Not equal to: !=
# 3. Greater than: >
# 4. Greater than or equal to: >=
# 5. Less than: <
# 6. Less than or equal to: <=
#
# Let's try it out!
# +
# EXERCISE 1
# Determine whether the following will print true or false
# Run the code to check your answers!
print(10 == 10)
print(2016 < 2017)
print("foo" != "bar")
print( (1+2+3+4+5) <= (1*2*3))
# +
# EXERCISE 2
# Write an expression that evaluates to True
expression1 = # YOUR CODE HERE
# Write an expression that evaluates to False
expression2 = # YOUR CODE HERE
print(expression1)
print(expression2)
# -
# Now that we know how to compare values, we can tell our computer to make decisions using the **if statement**.
#
# ### If Statements
# An **if statement** takes the following form:
# +
# Please do not run this code, as it will error. It is provided as a skeleton.
if (condition1):
# code to be executed if condition1 is true
elif (condition2):
# code to be executed if condition2 is true
else:
# code to be executed otherwise
# -
# With if statements, we can control which code is executed. Check out how handy this can be in the activity below!
# +
# We want to make a PB&J sandwich, but things keep going wrong!
# Modify the variables below so that you go grocery shopping
# with no mishaps and successfully purchase some peanut butter.
# Run the code when you're done to see the results.
print("Let's make a PB&J sandwich!")
peanut_butter = 10
jelly = 100
gas = 60
flat_tire = True
if (peanut_butter < 50):
print("Uh oh! We need more peanut butter. Must go grocery shopping...")
if (gas < 75):
print("Oops! Your car is out of gas :(")
elif (flat_tire):
print("Oh no! You have a flat tire :'(")
else:
print("You made it to the grocery store and succesfully got peanut butter!")
peanut_butter = # reset the value of peanut_butter so it is 100% full again
else:
print("We have all the ingredients we need! Yummy yummy yay!")
# -
# ### For Loops
# We can also regulate the flow of our code by repeating some action over and over. Say that we wanted to greet ten people. Instead of copying and pasting the same call to _print_ over and over again, it would be better to use a **for loop**.
#
# A basic **for loop** is written in the following order:
# - The word "for"
# - A name we want to give each item in a sequence
# - The word "in"
# - A sequence (i.e. "range(100)" to go through numbers 0-99
#
# For example, to greet someone ten times, we could write:
# Run me to see "hello!" printed ten times!
for i in range(10):
print("hello!")
# In this way, for loops help us avoid redundant code and have useful capabilities.
#
# **Exercise:** Write a function that returns the sum of the first _n_ numbers, where _n_ is the input to the function. Use a for loop!
# +
def sum_first_n(n):
# YOUR CODE HERE
sum_first_n(5) # should return 1+2+3+4+5 = 15
# -
# ## Conclusion
# ---
# Congratulations! You've successfully learnt the basics of programming: creating your own variables, writing your own functions, and controlling the flow of your code! You will apply the concepts learnt throughout this notebook in class. After delving into this notebook, you are only just getting started!
# ---
#
# ## Bibliography
# Some examples adapted from the UC Berkeley Data 8 textbook, <a href="https://www.inferentialthinking.com">*Inferential Thinking*</a>.
#
# Authors:
# - <NAME>
# - <NAME>
# - <NAME>
| intro/intro-module-final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Continuous Futures
# Continuous Futures are an abstraction of the chain of consecutive contracts for the same underlying commodity or asset. Additionally, they maintain an ongoing reference to the active contract on the chain. Continuous futures make it much easier to maintain a dynamic reference to contracts that you want to order, and get historical series of data. In this lesson, we will explore some of the ways in which we can use continuous futures to help us in our research.
# In order to create an instance of a `ContinuousFuture` in Research, we need to use the <a href="https://www.quantopian.com/help#quantopian_research_experimental_continuous_future">continuous_future</a> function. Similar to history, we need to import it from research's experimental library:
from quantopian.research.experimental import continuous_future, history
# To create a continuous future, we just need to supply a root_symbol to the `continuous_future` function. The following cell creates a continuous future for Light Sweet Crude Oil.
cl = continuous_future('CL')
cl
# ### Continuous Futures & `history`
# We can use `history` to get pricing and volume data for a particular `ContinuousFuture` in the same way we do for `Futures`. Additionally, we can get the reference to its currently active `Future` contract by using the `contract` field.
# Running the next cell will get pricing data for our CL continuous future and plot it:
# +
# Pricing data for CL `ContinuousFuture`.
cl_pricing = history(
cl,
fields='price',
frequency='daily',
start_date='2015-10-21',
end_date='2016-06-01'
)
cl_pricing.plot()
# -
# To better understand the need for continuous futures, let's use `history` to get pricing data for the chain of individual contracts we looked at in the previous lesson and plot it.
# +
cl_contracts = symbols(['CLF16', 'CLG16', 'CLH16', 'CLJ16', 'CLK16', 'CLM16'])
# Pricing data for our consecutive contracts from earlier.
cl_consecutive_contract_pricing = history(
cl_contracts,
fields='price',
frequency='daily',
start_date='2015-10-21',
end_date='2016-06-01'
)
cl_consecutive_contract_pricing.plot();
# -
# The price difference between contracts at a given time is not considered to be an increase in value in the future. Instead, it is associated with the carrying cost and the opportunity cost of holding the underlying commodity or asset prior to delivery. This concept is covered more in depth in the Introduction to Futures Contracts lecture from our <a href="https://www.quantopian.com/lectures">Lecture Series</a>.
# Next, let's look at the price history for active contracts separately. We will notice that this difference in price creates discontinuities when a contract expires and the reference moves to the next contract:
# +
# Pricing and contract data for unadjusted CL `ContinuousFuture`.
# Adjustments are covered in the next section.
cl_unadjusted = continuous_future('CL', adjustment=None)
cl_history = history(
cl_unadjusted,
fields=['contract', 'price'],
frequency='daily',
start_date='2015-10-21',
end_date='2016-06-01'
)
cl_active_contract_pricing = cl_history.pivot(index=cl_history.index, columns='contract')
cl_active_contract_pricing.plot();
# -
# Part of the job of our continuous future abstraction is to account for these discontinuities, as we will see next by plotting our CL continuous future price against the price history for individual active contracts.
cl_active_contract_pricing.plot()
cl_pricing.plot(style='k--')
# The above plot is adjusted for the price jumps that we see between contracts. This allows us to get a price series that reflects the changes in the price of the actual underlying commodity/asset.
# In the next section, we will explore the options for adjusting historical lookback windows of continuous futures.
# ### Adjustment Styles
# As we just saw, continuous future historical data series are adjusted to account for price jumps between contracts by default. This can be overridden by specifying an adjustment argument when creating the continuous future. The adjustment argument has 3 options: `'mul'` (default), `'add'`, and `None`.
# The `'mul'` option multiplies the prices series by the ratio of consecutive contract prices. The effect from each jump is only applied to prices further back in the lookback window.
# Similarly, the `'add'` technique adjusts by the difference between consecutive contract prices.
# Finally, passing `None` means that no adjustments will be applied to the lookback window.
# ### Roll Styles
# In the previous lesson we saw that trading activity jumps from one contract in the chain to the next as they approach their delivery date. A continuous future changes its reference from the active contract to the next bassed on its roll attribute.
# A `'calendar'` roll means that the continuous future will point to the next contract in the chain when it reaches the `auto_close_date` of the current active contract.
# The `volume` roll (default) means that the continuous future will begin pointing to the next contract when the trading volume of the next contract surpasses the volume of the current contract. The idea is to roll when the majority of traders have moved to the next contract. If the volume swap doesn't happen before the `auto_close_date`, the contract will roll at this date. Note: volume rolls will not occur earlier than 7 trading days before the `auto_close_date`.
# Let's get the volume history of our CL continuous future and plot it against the individual contract volumes we saw before.
# +
cl_consecutive_contract_data = history(
cl_contracts,
fields='volume',
frequency='daily',
start_date='2015-10-21',
end_date='2016-06-01'
)
cl_continuous_volume = history(
cl,
fields='volume',
frequency='daily',
start_date='2015-10-21',
end_date='2016-06-01'
)
cl_consecutive_contract_data.plot()
cl_continuous_volume.plot(style='k--');
# -
# The volume for the CL `ContinuousFuture` is essentially the skyline of the individual contract volumes. As the volume moves from one contract to the next, the continuous future starts pointing to the next contract. Note that there are some points where the volume does not exactly match, most notably in the transition from `CLK16` to `CLM16` between April and May. This is because the rolls are currently computed daily, using only the previous day's volume to avoid lookahead bias.
# ### Offset
# The offset argument allows you to specify whether you want to maintain a reference to the front contract or to a back contract. Setting offset=0 (default) maintains a reference to the front contract, or the contract with the next soonest delivery. Setting offset=1 creates a continuous reference to the contract with the second closest date of delivery, etc.
print continuous_future.__doc__
| Notebooks/quantopian_research_public/notebooks/tutorials/4_futures_getting_started_lesson4/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
import numpy as np
import math
# +
x1 = torch.tensor([[1],[2.]])
W1 = torch.tensor([[1,2.],
[-1,3]], requires_grad=True)
W2 = torch.tensor([[1,-1.],
[1,2]], requires_grad=True)
W3 = torch.tensor([[-1,2.],
[0,1]], requires_grad=True)
phi1 = torch.relu
phi2 = torch.sigmoid
phi3 = torch.softmax
l = lambda yh,t: - t.view(1,-1) @ torch.log(yh)
t = torch.tensor([[1], [0.]])
# same result:
# sl = nn.NLLLoss(reduction="sum")
# sl(torch.log_softmax(z3,0).view(1,-1), torch.max(t, 0)[1])
grad = lambda x: x.clone().detach().requires_grad_(True)
z1 = W1@x1
x2 = phi1(z1)
z2 = W2@x2
#z2 = grad(z2)
x3 = phi2(z2)
#x3 = grad(x3)
z3 = W3@x3
yh = phi3(z3, 0)
E = l(yh.flatten(),t)
T = lambda A: A.transpose(0,1)
E.backward()
E
# -
with torch.no_grad():
dy = -t/yh
dsoft = torch.tensor([[yh[0,0]*(1-yh[0,0]), -yh[0,0]*yh[1,0]],
[-yh[0,0]*yh[1,0], yh[1,0]*(1-yh[1,0])]])
dsig = torch.diag((x3 * (1-x3)).flatten())
drelu = torch.diag((z1 > 0).to(torch.float).flatten())
dz3 = dsoft @ dy
dz2 = dsig @ (T(W3) @ dz3)
dz1 = drelu @ (T(W2) @ dz2)
dw3 = dz3 @ T(x3)
dw2 = dz2 @ T(x2)
dw1 = dz1 @ T(x1)
print(dw3)
print(dw2)
print(dw1)
print(W3.grad)
print(W2.grad)
print(W1.grad)
dz3
dw2
W2.grad
W3.grad
dw3
| _useless/notebooks/Backprop.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Seismic Data Analysis
# ### What is Seismic Hazard Analysis?
#
#
# In general terms, the seismic hazard defines the expected seismic ground motion at a site, phenomenon which may result in destructions and losses.
#
# Тwo major approaches – deterministic and probabilistic – are worldwide used at present for seismic hazard assessment.
#
# The deterministic approach takes into account a single, particular earthquake, the event that is expected to produce the strongest level of shaking at the site.
#
# The outputs – macroseismic intensity, peak ground acceleration, peak ground velocity, peak ground displacement, response spectra – may be used directly in engineering applications.
#
# In the probabilistic approach, initiated with the pioneering work of Cornell, the seismic hazard is estimated in terms of a ground motion parameter – macroseismic intensity, peak ground acceleration – and its annual probability of exceedance (or return period) at a site.
#
# The method yields regional seismic probability maps, displaying contours of maximum ground motion (macroseismic intensity, PGA) of equal – specified – return period.
#
#
# Source : http://www.infp.ro/en/seismic-hazard/
# ### Dataset :
#
# * Name- seismic-bumps Data Set
#
# * Abstract: The data describe the problem of high energy (higher than 10^4 J) seismic bumps forecasting in a coal mine. Data come from two of longwalls located in a Polish coal mine.
#
# * Source : https://archive.ics.uci.edu/ml/datasets/seismic-bumps
#
# *** Dataset Information ***
#
# Mining activity was and is always connected with the occurrence of dangers which are commonly called
# mining hazards. A special case of such threat is a seismic hazard which frequently occurs in many
# underground mines. Seismic hazard is the hardest detectable and predictable of natural hazards and in
# this respect it is comparable to an earthquake. More and more advanced seismic and seismoacoustic
# monitoring systems allow a better understanding rock mass processes and definition of seismic hazard
# prediction methods. Accuracy of so far created methods is however far from perfect. Complexity of
# seismic processes and big disproportion between the number of low-energy seismic events and the number
# of high-energy phenomena (e.g. > 10^4J) causes the statistical techniques to be insufficient to predict
# seismic hazard.
#
#
#
#
# The task of seismic prediction can be defined in different ways, but the main
# aim of all seismic hazard assessment methods is to predict (with given precision relating to time and
# date) of increased seismic activity which can cause a rockburst. In the data set each row contains a
# summary statement about seismic activity in the rock mass within one shift (8 hours). If decision
# attribute has the value 1, then in the next shift any seismic bump with an energy higher than 10^4 J was
# registered. That task of hazards prediction bases on the relationship between the energy of recorded
# tremors and seismoacoustic activity with the possibility of rockburst occurrence. Hence, such hazard
# prognosis is not connected with accurate rockburst prediction. Moreover, with the information about the
# possibility of hazardous situation occurrence, an appropriate supervision service can reduce a risk of
# rockburst (e.g. by distressing shooting) or withdraw workers from the threatened area. Good prediction
# of increased seismic activity is therefore a matter of great practical importance. The presented data
# set is characterized by unbalanced distribution of positive and negative examples. In the data set there
# are only 170 positive examples representing class 1.
#
#
# <img src= "att.jpg">
# # Classification Seismic of Hazard in coal mines
# +
# Dependencies import
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import arff
import pandas as pd
import seaborn as sns;
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import roc_auc_score, f1_score
from sklearn import preprocessing
# %matplotlib inline
# +
## load data and clean
data = arff.loadarff('data/seismic-bumps.arff')
df = pd.DataFrame(data[0])
df['seismic'] = df['seismic'].str.decode('utf-8')
df['seismoacoustic'] = df['seismoacoustic'].str.decode('utf-8')
df['shift'] = df['shift'].str.decode('utf-8')
df['ghazard'] = df['ghazard'].str.decode('utf-8')
df['class'] = df['class'].str.decode('utf-8')
df['class'] = pd.to_numeric(df['class'])
# -
df.head()
# # Exploratory Data Analysis
# +
df_plot = df[['genergy', 'gpuls', 'gdenergy', 'gdpuls',
'nbumps', 'nbumps2',
'energy', 'maxenergy']].copy()
p = sns.pairplot(df_plot)
# -
# The plots above show some colinearity between attributes (e.g. genergy and gpuls, energy and maxenergy). The following will use regularization to mitigate the problem.
# # Build models
# +
data_x = df.loc[:,['shift', 'genergy', 'gpuls', 'gdenergy', 'gdpuls',
'nbumps', 'nbumps2', 'nbumps3', 'nbumps4', 'nbumps5',
'nbumps6', 'nbumps7', 'nbumps89',
'energy', 'maxenergy']]
# true response
data_y = df.loc[:,['class']]
# responses from seismic theories
data_y1 = df.loc[:, ['seismic']]
data_y2 = df.loc[:, ['seismoacoustic']]
data_y3 = df.loc[:, ['ghazard']]
Le = preprocessing.LabelEncoder()
Le.fit(['a', 'b', 'c', 'd'])
data_y1['seismic'] = Le.transform(data_y1['seismic'])
data_y2['seismoacoustic'] = Le.transform(data_y2['seismoacoustic'])
data_y3['ghazard'] = Le.transform(data_y3['ghazard'])
Le2 = preprocessing.LabelEncoder()
Le2.fit(['W', 'N'])
data_x['shift'] = Le2.transform(data_x['shift'])
# -
X_train, X_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.2, random_state=42)
X_train.describe()
X_train.info()
# #### Let'sfind the best regularization coefficient
# +
## use ROC as the score
C = [1e-4, 1e-3, 1e-2, 1e-1, 1, 10, 1e2]
scores = []
for c in C:
logist = LogisticRegression(penalty='l1', C=c, max_iter=500)
logist.fit(X_train, y_train.values.ravel())
scores.append(roc_auc_score(y_train['class'].values, logist.predict(X_train)))
C_best = C[scores.index(max(scores))]
print("Best C: ", C_best)
# -
# ## Using Logistic Regression
# +
clf = LogisticRegression(penalty='l1', C=C_best, max_iter = 500)
clf.fit(X_train, y_train.values.ravel())
roc_train = roc_auc_score(y_train['class'].values, clf.predict(X_train))
# print("training score: %.4f" % clf.score(Xtrain, ytrain))
print("training score: %.4f" % roc_train)
# print("test score: ", clf.score(Xtest, ytest))
roc_test = roc_auc_score(y_test['class'].values, clf.predict(X_test))
print("test score: %.4f" % roc_test)
print("n_iter: ", clf.n_iter_)
# -
clf.coef_
# +
ind = y_test.index.values
# get the responses from the seismic, seismoacoustic and ghazard methods
# that correspond to indices in ytest
yseismic = data_y1.loc[ind, ['seismic']]
yseismoacoustic = data_y2.loc[ind, ['seismoacoustic']]
yghazard = data_y3.loc[ind, ['ghazard']]
# -
# Responses as probabilies from the logit model
# +
yprob = clf.predict_proba(X_test)
yprob
# -
# Threshold
ypred = yprob[:,1] > 0.2 # threshold
# From the plot below, to use the probabilites from the prediction, we need to set a threshold to determine if the response should be hazardous or not. The hard labels from the prediction will be mostly 0's.
#
# Note: setting the threshold requires further study. One way is to tune the threshold in training sets and test the performance in test sets.
# +
plt.plot([i for i in range(len(y_test))], y_test, 'x', yprob[:,1], '.')
plt.ylabel('Probability')
plt.title('Raw results from prediction')
# -
plt.plot([i for i in range(len(y_test))], y_test, 'o', ypred, '.')
plt.ylabel('Probability')
plt.title('Probabilities after cut-off')
# ### Results
# +
dy = { 'logit': pd.Series(ypred) }
dfy = pd.DataFrame(dy)
frames = [dfy, yseismic.reset_index(drop=True),
yseismoacoustic.reset_index(drop=True),
yghazard.reset_index(drop=True)]
# build the responses data frame (each column is responses from one method)
df_result = pd.concat(frames, axis = 1)
df_result = df_result*1 # convert bool to int
# -
df_result
# +
yvote = (df_result == 0).sum(axis=1) # number of zeros on each row
yvote = (yvote <= 2)*1
# final results based on the vote from each of the four methods
# 0 means no/low hazard, 1 means hazardous
# if tie, assume response is 1 (hazardous)
df_result['ensemble'] = yvote.values
df_result['true'] = y_test.values
df_result.head(20)
# -
# score from the ensemble method with logit regression
roc_auc_score(y_test['class'].values, df_result['ensemble'].values)
# +
## compare to the three methods already in the dataset
frames = [yseismic.reset_index(drop=True),
yseismoacoustic.reset_index(drop=True),
yghazard.reset_index(drop=True)]
df_result0 = pd.concat(frames, axis = 1)
df_result0 = df_result0*1
yvote0 = (df_result0 == 0).sum(axis=1)
yvote0 = (yvote0 <= 2)*1
df_result0['ensemble'] = yvote0.values
df_result0['true'] = y_test.values
df_result0.head(20)
# -
# score from the ensemble of the three methods in the original dataset
roc_auc_score(y_test['class'].values, df_result0['ensemble'].values)
# score from the seismic method (no ensemble)
roc_auc_score(y_test['class'].values, yseismic['seismic'].values)
# score from the seismoacoustic method (no ensemble)
roc_auc_score(y_test['class'].values, yseismoacoustic['seismoacoustic'].values)
| Seismic Data Analysis Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Reproduce In-situ Sequencing results with Starfish
#
# This notebook walks through a work flow that reproduces an ISS result for one field of view using the starfish package.
#
# ## Load tiff stack and visualize one field of view
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
from showit import image
import pprint
from starfish import data, FieldOfView
from starfish.types import Features, Indices
# +
use_test_data = os.getenv("USE_TEST_DATA") is not None
experiment = data.ISS(use_test_data=use_test_data)
# s.image.squeeze() simply converts the 4D tensor H*C*X*Y into a list of len(H*C) image planes for rendering by 'tile'
# -
# ## Show input file format that specifies how the tiff stack is organized
#
# The stack contains multiple single plane images, one for each color channel, 'c', (columns in above image) and imaging round, 'r', (rows in above image). This protocol assumes that genes are encoded with a length 4 quatenary barcode that can be read out from the images. Each round encodes a position in the codeword. The maximum signal in each color channel (columns in the above image) corresponds to a letter in the codeword. The channels, in order, correspond to the letters: 'T', 'G', 'C', 'A'. The goal is now to process these image data into spatially organized barcodes, e.g., ACTG, which can then be mapped back to a codebook that specifies what gene this codeword corresponds to.
pp = pprint.PrettyPrinter(indent=2)
pp.pprint(experiment._src_doc)
# The flat TIFF files are loaded into a 4-d tensor with dimensions corresponding to imaging round, channel, x, and y. For other volumetric approaches that image the z-plane, this would be a 5-d tensor.
fov = experiment.fov()
primary_image = fov[FieldOfView.PRIMARY_IMAGES]
dots = fov['dots']
nuclei = fov['nuclei']
images = [primary_image, nuclei, dots]
# round, channel, x, y, z
primary_image.xarray.shape
# ## Show auxiliary images captured during the experiment
# 'dots' is a general stain for all possible transcripts. This image should correspond to the maximum projcection of all color channels within a single imaging round. This auxiliary image is useful for registering images from multiple imaging rounds to this reference image. We'll see an example of this further on in the notebook
image(dots.max_proj(Indices.ROUND, Indices.CH, Indices.Z))
# Below is a DAPI auxiliary image, which specifically marks nuclei. This is useful cell segmentation later on in the processing.
image(nuclei.max_proj(Indices.ROUND, Indices.CH, Indices.Z))
# ## Examine the codebook
# Each 4 letter quatenary code (as read out from the 4 imaging rounds and 4 color channels) represents a gene. This relationship is stored in a codebook
experiment.codebook
# ## Filter and scale raw data
#
# Now apply the white top hat filter to both the spots image and the individual channels. White top had enhances white spots on a black background.
# +
from starfish.image import Filter
# filter raw data
masking_radius = 15
filt = Filter.WhiteTophat(masking_radius, is_volume=False)
for img in images:
filt.run(img, verbose=True, in_place=True)
# -
# ## Register data
# For each imaging round, the max projection across color channels should look like the dots stain.
# Below, this computes the max projection across the color channels of an imaging round and learns the linear transformation to maps the resulting image onto the dots image.
#
# The Fourier shift registration approach can be thought of as maximizing the cross-correlation of two images.
#
# In the below table, Error is the minimum mean-squared error, and shift reports changes in x and y dimension.
# +
from starfish.image import Registration
registration = Registration.FourierShiftRegistration(
upsampling=1000,
reference_stack=dots,
verbose=True)
registered_image = registration.run(primary_image, in_place=False)
# -
# ## Use spot-detector to create 'encoder' table for standardized input to decoder
# Each pipeline exposes an encoder that translates an image into spots with intensities. This approach uses a Gaussian spot detector.
# +
from starfish.spots import SpotFinder
import warnings
# parameters to define the allowable gaussian sizes (parameter space)
min_sigma = 1
max_sigma = 10
num_sigma = 30
threshold = 0.01
p = SpotFinder.GaussianSpotDetector(
min_sigma=min_sigma,
max_sigma=max_sigma,
num_sigma=num_sigma,
threshold=threshold,
measurement_type='mean',
)
# detect triggers some numpy warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# blobs = dots; define the spots in the dots image, but then find them again in the stack.
blobs_image = dots.max_proj(Indices.ROUND, Indices.Z)
intensities = p.run(registered_image, blobs_image=blobs_image)
# -
# The Encoder table is the hypothesized standardized file format for the output of a spot detector, and is the first output file format in the pipeline that is not an image or set of images
# `attributes` is produced by the encoder and contains all the information necessary to map the encoded spots back to the original image
#
# `x, y` describe the position, while `x_min` through `y_max` describe the bounding box for the spot, which is refined by a radius `r`. This table also stores the intensity and spot_id.
# ## Decode
# Each assay type also exposes a decoder. A decoder translates each spot (spot_id) in the Encoder table into a gene (that matches a barcode) and associates this information with the stored position. The goal is to decode and output a quality score that describes the confidence in the decoding.
# There are hard and soft decodings -- hard decoding is just looking for the max value in the code book. Soft decoding, by contrast, finds the closest code by distance (in intensity). Because different assays each have their own intensities and error modes, we leave decoders as user-defined functions.
decoded = experiment.codebook.decode_per_round_max(intensities)
# ## Compare to results from paper
# Besides house keeping genes, VIM and HER2 should be most highly expessed, which is consistent here.
genes, counts = np.unique(decoded.loc[decoded[Features.PASSES_THRESHOLDS]][Features.TARGET], return_counts=True)
table = pd.Series(counts, index=genes).sort_values(ascending=False)
# ### Segment
# After calling spots and decoding their gene information, cells must be segmented to assign genes to cells. This paper used a seeded watershed approach.
# +
from starfish.image import Segmentation
dapi_thresh = .16 # binary mask for cell (nuclear) locations
stain_thresh = .22 # binary mask for overall cells // binarization of stain
min_dist = 57
stain = np.mean(registered_image.max_proj(Indices.CH, Indices.Z), axis=0)
stain = stain/stain.max()
nuclei_projection = nuclei.max_proj(Indices.ROUND, Indices.CH, Indices.Z)
seg = Segmentation.Watershed(
nuclei_threshold=dapi_thresh,
input_threshold=stain_thresh,
min_distance=min_dist
)
label_image = seg.run(registered_image, nuclei)
seg.show()
# -
# ### Visualize results
#
# This FOV was selected to make sure that we can visualize the tumor/stroma boundary, below this is described by pseudo-coloring `HER2` (tumor) and vimentin (`VIM`, stroma)
# +
from skimage.color import rgb2gray
GENE1 = 'HER2'
GENE2 = 'VIM'
rgb = np.zeros(registered_image.tile_shape + (3,))
rgb[:,:,0] = nuclei.max_proj(Indices.ROUND, Indices.CH, Indices.Z)
rgb[:,:,1] = dots.max_proj(Indices.ROUND, Indices.CH, Indices.Z)
do = rgb2gray(rgb)
do = do/(do.max())
image(do,size=10)
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
is_gene1 = decoded.where(decoded[Features.AXIS][Features.TARGET] == GENE1, drop=True)
is_gene2 = decoded.where(decoded[Features.AXIS][Features.TARGET] == GENE2, drop=True)
plt.plot(is_gene1.x, is_gene1.y, 'or')
plt.plot(is_gene2.x, is_gene2.y, 'ob')
plt.title(f'Red: {GENE1}, Blue: {GENE2}');
| notebooks/ISS_Pipeline_-_Breast_-_1_FOV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] iooxa={"id": {"block": "ljl07JNYSrIXE70uWYO0", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}}
# ## UTAH FORGE WELL 58-32
#
# **Well 58-32 was drilled to a depth of 7536 feet** in the Milford FORGE area during the summer of
# 2017 to confirm the reservoir characteristics inferred from existing wells and a wide variety of
# both new and legacy geologic and geophysical data. **Drill cuttings were collected and described
# at 10-foot intervals** and a robust **suite of geophysical logs** were run. Analyses show
# that the basement rock within the FORGE area consists of a suite of **intrusive rock types that are
# primarily granitic. Some diorite and monzodiorite was also encountered**, as was a significant
# volume of rock with a more intermediate composition.
#
# The density of the granite and intermediate rock types typically range from **2.6 to
# 2.65 g/cm³**, but the higher gamma response of the **granitic rock (140–290 gAPI)** can often
# differentiate granitic compositions from **intermediate compositions (70–210 gAPI).** The **higher
# density (2.7–3.0 g/cm³) and lower gamma values (50–80 gAPI) of the dioritic compositions** is
# more distinctive and greatly simplifies identification.
#
# The various laboratory analyses and geophysical logs of the 58-32 well prove it was drilled into **low porosity/low permeability intrusive rock** with temperatures well within the U.S. Department of Energy-specified window of **175°–225°C (347°–437°F).** More details here https://utahforge.com/
#
# -
# ### Let's import the libraries, remember Lasio and Seaborn must be installed previously
# + iooxa={"id": {"block": "bCoQ217Se5IoWzAvnM6x", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}, "outputId": null}
import lasio
import pandas as pd
import numpy as np
#libraries for plots
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
# + [markdown] iooxa={"id": {"block": "NJ3M1nBKUzM2AXbRaoeZ", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}}
# ### Read 58-32 well logs with Lasio and inspect
# + iooxa={"id": {"block": "Z6bDDyAUxa2TGPCk6ENW", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}, "outputId": null}
reg_all = lasio.read('../alldata/58-32_main.las')
# -
reg_all.version
reg_all.curves
reg_all['SP']
reg_all.keys()
reg_all.data
# + [markdown] iooxa={"id": {"block": "dAR1AgfP4yyfzpXCRpNS", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}}
# ### From Lasio to Data Frame Pandas
# DataFrames in Pandas are two-dimensional tables with row and columns that can be easily edited and manipulated.
# -
df_main = reg_all.df()
df_main
# + iooxa={"id": {"block": "uUC9Yb53FupxOfbextPD", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}, "outputId": {"block": "kdODK2dt28SaDB27bDxB", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}}
#Print the first 5 rows of the data frame with the header of the columns
df_main.head(5)
# -
#Print the last 10 rows of the data frame with the header of the columns
df_main.tail(10)
# statistics
df_main.describe()
#parameters from only 1 column
df_main.AF10.std()
# ### Create a dataset only with GR, SP, AT10, AT90, RHOZ, NPHI, CTEM
df_mini = df_main[['GR', 'SP', 'AT10', 'AT90', 'RHOZ', 'NPHI', 'CTEM']]
df_mini.describe()
df_mini['CTEM_C']= ((df_mini['CTEM']-32)*5)/9
df_mini.info()
count_neg = (df_mini.RHOZ < 0).sum()
count_neg
df_mini.loc[(df_mini['RHOZ'] < 0), 'RHOZ']=np.nan
count_neg = (df_mini.RHOZ < 0).sum()
count_neg
# **Unknown LowGR (<50)
# **Dioritic comp. (50–80 gAPI)
# **Intermediate comp. (80–140 gAPI)
# **Granite (140–290 gAPI)
# **Unknown HighGR(>290)
# +
conditions = [
(df_mini['GR'] <= 50),
(df_mini['GR'] > 50) & (df_mini['GR'] <= 80),
(df_mini['GR'] > 80) & (df_mini['GR'] <= 140),
(df_mini['GR'] > 140) & (df_mini['GR'] <= 290),
(df_mini['GR'] > 290)
]
# create a list of the values we want to assign for each condition
values = ['Unknown LowGR', 'Dioritic Comp', 'Intermediate Comp', 'Granite', 'Unknown HighGR' ]
# create a new column and use np.select to assign values to it using our lists as arguments
df_mini['Labels'] = np.select(conditions, values)
# -
df_mini.sample(10)
# +
#statistics grouped by Labels
df_mini[['Labels','GR', 'SP', 'AT10', 'AT90', 'RHOZ', 'NPHI', 'CTEM', 'CTEM_C']].groupby('Labels').mean()
# + [markdown] iooxa={"id": {"block": "jWnAnS4nJBZrB2ELssCF", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}}
# ### Read Thermal conductivity and mineralogy data measured in drill cuttings. CAUTION: Depths are in meters, need to be converted to feet
# ##### Full report https://ugspub.nr.utah.gov/publications/misc_pubs/mp-169/mp-169-l.pdf
# +
TC_coredata = pd.read_csv ('../alldata/58-32_thermal_conductivity_data.csv', index_col=1)
XRD_coredata = pd.read_csv ('../alldata/58-32_xray_diffraction_data.csv', index_col=1)
#TC_coredata.head()
XRD_coredata.columns
# -
TC_coredata.index
XRD_coredata.index
result = pd.concat([XRD_coredata, TC_coredata], axis=1, sort=False)
result.columns
cutt_data = result[['Illite','Plagioclase', 'K-feldspar',
'Quartz', 'matrix thermal conductivity (W/m deg C)']]
cutt_data.index=(3.28084*cutt_data.index) #m to ft
#cutt_data.loc[(cutt_data =='tr')]=np.nan
cutt_data=cutt_data.replace('tr', np.nan)
cutt_data.columns=['Illi', 'Plag', 'K-feld', 'Qz', 'TC']
cutt_data.info()
cutt_data.sample(5)
# # Visualization TO FIX%%%%%%%%
# +
#let's start with something simple (xplot, pie, 1 histogram...)
# -
X=df_mini[['GR', 'SP', 'AT10', 'AT90', 'RHOZ', 'NPHI', 'CTEM_C']]
# + iooxa={"id": {"block": "DXyNFHJcBxR9L9SUiTrl", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}, "outputId": {"block": "yw1SFb2eRh0YPgQh6tmS", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}}
#plotting the statistic using Seaborn
color = ['#2ea869', '#0a0a0a', '#ea0606','#1577e0', '#6e787c','#ea0606',
'#ed8712']
sns.set(font_scale=1)
cols = X.columns
n_row = len(cols)
n_col = 2
n_sub = 1
fig = plt.figure(figsize=(10,20))
for i in range(len(cols)):
plt.subplots_adjust(left=-0.3, right=1.3, bottom=-0.3, top=1.3)
plt.subplot(n_row, n_col, n_sub)
sns.distplot(X[cols[i]],norm_hist=False,kde=False, color=color[i],
label=['mean '+str('{:.2f}'.format(X.iloc[:,i].mean()))
+'\n''std '+str('{:.2f}'.format(X.iloc[:,i].std()))
+'\n''min '+str('{:.2f}'.format(X.iloc[:,i].min()))
+'\n''max '+str('{:.2f}'.format(X.iloc[:,i].max()))])
n_sub+=1
plt.legend()
plt.show()
# +
#correlation matrix
corr = df_mini.corr() #exclude any string data type
#figure parameters
fig, ax = plt.subplots(figsize=(8,6))
sns.heatmap(corr, ax=ax, cmap="magma")
#plt.grid()
plt.show()
# + [markdown] iooxa={"id": {"block": "ytvHl7fnOy1HLm624IXR", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}}
# ### Create a function that would create a layout with basic logs and core data
# + iooxa={"id": {"block": "9mv3ARJQuI40H3MYf0FZ", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}, "outputId": null}
#basic plot to inspect data
def make_layout_tc (log_df, cuttings_df):
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=1, ncols=5, sharey=True, squeeze=True, figsize=(15, 15), gridspec_kw={'wspace': 0.25})
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.975, top=0.7, wspace=0.2, hspace=0.2)
axs[0].set_ylabel('Depth (ft)')
axs[0].invert_yaxis()
axs[0].get_xaxis().set_visible(False)
# First track GR/SP logs to display
ax1 = axs[0].twiny()
ax1.plot(log_df.GR, log_df.index, '-', color='#2ea869', linewidth=0.5)
ax1.set_xlim(0,450)
ax1.set_xlabel('GR (API)', color='#2ea869')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.075))
ax2 = axs[0].twiny()
ax2.plot(log_df.SP, log_df.index, '-', color='#0a0a0a', linewidth=0.7)
ax2.set_xlim(-200,200)
ax2.set_xlabel('SP(mV)', color='#0a0a0a')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.0))
ax2.grid(True)
axs[0].get_xaxis().set_visible(False)
# Second track RHOZ/NPHI logs to display
ax1 = axs[1].twiny()
ax1.plot(log_df.RHOZ, log_df.index, '-', color='#ea0606', linewidth=0.5)
ax1.set_xlim(1.5,3.0)
ax1.set_xlabel('RHOZ (g/cm3)', color='#ea0606')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.075))
ax2 = axs[1].twiny()
ax2.plot(log_df.NPHI, log_df.index, '-', color='#1577e0', linewidth=0.5)
ax2.set_xlim(1,0)
ax2.set_xlabel('NPHI (v/v)', color='#1577e0')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.0))
ax2.grid(True)
axs[1].get_xaxis().set_visible(False)
# Third track Resistivities
ax1 = axs[2].twiny()
ax1.plot(log_df.AT10, log_df.index, '-', color='#6e787c', linewidth=0.5)
ax1.set_xlim(0.1,100000)
ax1.set_xlabel('AT10 (ohm.m)', color='#6e787c')
ax1.set_xscale('log')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.075))
ax2 = axs[2].twiny()
ax2.plot(log_df.AT90, log_df.index, '-', color='#ea0606', linewidth=0.5)
ax2.set_xlim(0.1,100000)
ax2.set_xlabel('AT90 (ohm.m)', color='#ea0606')
ax2.set_xscale('log')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.0))
ax2.grid(True)
axs[2].get_xaxis().set_visible(False)
# Fourth track XRD to display
ax1 = axs[3].twiny()
ax1.plot(cuttings_df.Qz, cuttings_df.index, 'o', color='#eac406')
ax1.set_xlim(0,50)
ax1.set_xlabel('Quartz %', color='#eac406')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.075))
ax2 = axs[3].twiny()
ax2.plot(cuttings_df.Illi, cuttings_df.index, 'o', color='#94898c')
ax2.set_xlim(0,50)
ax2.set_xlabel('Illite %', color='#94898c')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.0))
ax2.grid(True)
axs[3].get_xaxis().set_visible(False)
# Fifth track Temp/TC to display
ax1 = axs[4].twiny()
ax1.plot(cuttings_df.TC, cuttings_df.index, 'o', color='#6e787c')
ax1.set_xlim(0,5)
ax1.set_xlabel('Matrix TC Measured W/mC', color='#6e787c')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.075))
ax2 = axs[4].twiny()
ax2.plot(log_df.CTEM_C, log_df.index, '-', color='#ed8712')
ax2.set_xlim(20,200)
ax2.set_xlabel('Temp degC', color='#ed8712')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.0))
ax2.grid(True)
axs[4].get_xaxis().set_visible(False)
fig.suptitle('Well Data for UTAH FORGE 58-32',weight='bold', fontsize=20, y=0.85);
plt.show()
# + iooxa={"id": {"block": "J0Fjsd3Eq1wwDz69GMxG", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}, "outputId": {"block": "cITmpozD1QjaBPwgARYe", "project": "anKPrTxY08dBACBwy7Ui", "version": 1}}
make_layout_tc (df_mini, cutt_data)
| notebooks/.ipynb_checkpoints/Tutorial_pyhton_transform21_UtahForge_58-32_well-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: proj_env
# language: python
# name: proj_env
# ---
from fastquant import get_stock_data
from gaussian_hmm import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import time
import sys
import os
with open('pass.txt') as f:
password = f.read()
password = password.strip('\n')
f = open("tickers.txt", "r")
tickers = [s.strip('\n') for s in f.readlines()]
tickers
for ticker in tickers:
print(get_stock_data(ticker, '2017-01-01', '2021-05-12'))
import smtplib
def send_email(sent_to, message):
sender = '<EMAIL>'
gmail_user = '<EMAIL>'
sent_from = '<EMAIL>'
email_text = message
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, password)
server.sendmail(sent_from, sent_to, email_text)
server.close()
recievers = [('rowan','<EMAIL>')]
from datetime import datetime, timedelta
quant_format = '%Y-%m-%d'
today = datetime.now()
training_start = today - timedelta(days=1825)
training_start
today
training_end = today - timedelta(days=1)
training_start = training_start.strftime(quant_format)
training_start
training_end = training_end.strftime(quant_format)
training_end
tomorrow = today + timedelta(days=1)
today = today.strftime(quant_format)
tomorrow = tomorrow.strftime(quant_format)
training_start
today
tomorrow
training_data = get_stock_data('AAPL', training_start, today)
test_data = get_stock_data('AAPL', today, tomorrow)
training_data
test_data
params = {'n_components': 2,
'algorithm': 'map',
'n_iter': 100,
'd': 5,
'name':'GHMM'}
ghmm = GHMM(params=params)
ghmm.train(training_data)
preds,_ = ghmm.predict(test_data)
preds
prev_close = training_data['close'].values[-1]
pred_close = preds[0]
change = (pred_close-prev_close)/prev_close
change
open_price = test_data['open'].values[0]
frac_change = (pred_close-open_price)/open_price
frac_change
def get_data(ticker):
today = datetime.now()
training_start = today - timedelta(days=1825)
training_start = training_start.strftime(quant_format)
tomorrow = today + timedelta(days=1)
today = today.strftime(quant_format)
tomorrow = tomorrow.strftime(quant_format)
training_data = get_stock_data(ticker, training_start, today)
test_data = get_stock_data(ticker, today, tomorrow)
return training_data, test_data
def get_prediction(ticker):
params = {'n_components': 2,
'algorithm': 'map',
'n_iter': 100,
'd': 5,
'name':'GHMM'}
ghmm = GHMM(params=params)
train,test = get_data(ticker)
ghmm.train(train)
pred,_ = ghmm.predict(test)
open_price = test['open'].values[0]
pred_close = pred[0]
frac_change = (pred_close-open_price)/open_price
return frac_change, open_price, pred_close
sender = '<EMAIL>'
start = time.time()
picks = ""
for ticker in tickers:
c,o,p = get_prediction(ticker)
picks += f'Ticker: {ticker}\nOpen: {round(o,3)}\nPred Close: {round(p,3)}\nPred Percent Change: {round(c*100,3)}\n\n'
end = time.time()
print(round(end-start,4))
for name,email in recievers:
message = f'From: From Rowan <{sender}>\nTo: To {name} <{email}>\nSubject: Daily Picks\n\n{picks}'
print(message)
send_email(email,message)
| trading/email_playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
res_folders=os.listdir('../../results/')
model_folder='/home/mara/multitask_adversarial/results//'
CONCEPT=['domain', 'ncount','narea', 'full_contrast']
import keras
keras.__version__
from sklearn.metrics import accuracy_score
'../../doc/data_shuffle.csv'
# +
## Loading OS libraries to configure server preferences
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings("ignore")
import setproctitle
SERVER_NAME = 'ultrafast'
EXPERIMENT_TYPE='test_domain'
import time
import sys
import shutil
## Adding PROCESS_UC1 utilities
sys.path.append('../../lib/TASK_2_UC1/')
from models import *
from util import otsu_thresholding
from extract_xml import *
from functions import *
sys.path.append('../../lib/')
from mlta import *
import math
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(0)# str(hvd.local_rank())
keras.backend.set_session(tf.Session(config=config))
verbose=1
"""loading dataset files"""
#rank = MPI.COMM_WORLD.rank
cam16 = hd.File('/home/mara/adversarialMICCAI/data/ultrafast/cam16_500/patches.h5py', 'r', libver='latest', swmr=True)
all500 = hd.File('/home/mara/adversarialMICCAI/data/ultrafast/all500/patches.h5py', 'r', libver='latest', swmr=True)
extra17 = hd.File('/home/mara/adversarialMICCAI/data/ultrafast/extra17/patches.h5py','r', libver='latest', swmr=True)
tumor_extra17=hd.File('/home/mara/adversarialMICCAI/data/ultrafast/1129-1155/patches.h5py', 'r', libver='latest', swmr=True)
test2 = hd.File('/mnt/nas2/results/IntermediateResults/Camelyon/ultrafast/test_data2/patches.hdf5', 'r', libver='latest', swmr=True)
pannuke= hd.File('/mnt/nas2/results/IntermediateResults/Camelyon/pannuke/patches_fix.hdf5', 'r', libver='latest', swmr=True)
global datasetss
datasetss={'cam16':cam16,'all500':all500,'extra17':extra17, 'tumor_extra17':tumor_extra17, 'test_data2': test2, 'pannuke':pannuke}
global concept_db
concept_db = hd.File('../../data/normalized_cmeasures/concept_values_def.h5py','r')
#SYSTEM CONFIGS
CONFIG_FILE = '../../doc/config.cfg'
COLOR = True
BATCH_SIZE = 32
# SAVE FOLD
f=open(model_folder+"/seed.txt","r")
seed=1001#int(f.read())
if verbose: print(seed)
#f.write(str(seed))
f.close()
# SET PROCESS TITLE
setproctitle.setproctitle('{}'.format(EXPERIMENT_TYPE))
# SET SEED
np.random.seed(seed)
tf.set_random_seed(seed)
# DATA SPLIT CSVs
train_csv=open('/mnt/nas2/results/IntermediateResults/Camelyon/train_shuffle.csv', 'r') # How is the encoding of .csv files ?
val_csv=open('/mnt/nas2/results/IntermediateResults/Camelyon/val_shuffle.csv', 'r')
test_csv=open('/mnt/nas2/results/IntermediateResults/Camelyon/test_shuffle.csv', 'r')
train_list=train_csv.readlines()
val_list=val_csv.readlines()
test_list=test_csv.readlines()
test2_csv = open('/mnt/nas2/results/IntermediateResults/Camelyon/test2_shuffle.csv', 'r')
test2_list=test2_csv.readlines()
test2_csv.close()
train_csv.close()
val_csv.close()
test_csv.close()
data_csv=open('../../doc/data_shuffle.csv', 'r')
data_list=data_csv.readlines()
data_csv.close()
# STAIN NORMALIZATION
def get_normalizer(patch, save_folder='../../results/'):
normalizer = ReinhardNormalizer()
normalizer.fit(patch)
np.save('{}/normalizer'.format(save_folder),normalizer)
np.save('{}/normalizing_patch'.format(save_folder), patch)
#print('Normalisers saved to disk.')
return normalizer
def normalize_patch(patch, normalizer):
return np.float64(normalizer.transform(np.uint8(patch)))
global normalizer
db_name, entry_path, patch_no = get_keys(data_list[0])
normalization_reference_patch = datasetss[db_name][entry_path][patch_no]
normalizer = get_normalizer(normalization_reference_patch, save_folder='../../results/')
"""
Batch generators:
They load a patch list: a list of file names and paths.
They use the list to create a batch of 32 samples.
"""
# Retrieve Concept Measures
def get_concept_measure(db_name, entry_path, patch_no, measure_type=''):
if measure_type=='domain':
return get_domain(db_name, entry_path)
path=db_name+'/'+entry_path+'/'+str(patch_no)+'/'+measure_type.strip(' ')
try:
cm=concept_db[path][0]
return cm
except:
print("[ERR]: {}, {}, {}, {} with path {}".format(db_name, entry_path, patch_no, measure_type, path))
#import pdb; pdb.set_trace()
return 1.
# BATCH GENERATORS
import keras.utils
class DataGenerator(keras.utils.Sequence):
def __init__(self, patch_list, concept=CONCEPT, batch_size=32, shuffle=True, data_type=0):
self.batch_size=batch_size
self.patch_list=patch_list
self.shuffle=shuffle
self.concept = concept
self.data_type=data_type
self.on_epoch_end()
def __len__(self):
return int(np.floor(len(self.patch_list)/self.batch_size))
def __getitem__(self, index):
indexes=self.indexes[index*self.batch_size:(index+1)*self.batch_size]
patch_list_temp=[self.patch_list[k] for k in indexes]
self.patch_list_temp=patch_list_temp
return self.__data_generation(self), None
def get(self, index):
indexes=self.indexes[index*self.batch_size:(index+1)*self.batch_size]
patch_list_temp=[self.patch_list[k] for k in indexes]
self.patch_list_temp=patch_list_temp
return self.__data_generation(self), None
def on_epoch_end(self):
self.indexes = np.arange(len(self.patch_list))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, patch_list_temp):
patch_list_temp=self.patch_list_temp
batch_x=np.zeros((len(patch_list_temp), 224,224,3))
batch_y=np.zeros(len(patch_list_temp))
i=0
for line in patch_list_temp:
db_name, entry_path, patch_no = get_keys(line)
patch=datasetss[db_name][entry_path][patch_no]
patch=normalize_patch(patch, normalizer)
patch=keras.applications.inception_v3.preprocess_input(patch)
label = get_class(line, entry_path)
if self.data_type!=0:
label=get_test_label(entry_path)
batch_x[i]=patch
batch_y[i]=label
i+=1
generator_output=[batch_x, batch_y]
for c in self.concept:
batch_concept_values=np.zeros(len(patch_list_temp))
i=0
for line in patch_list_temp:
db_name, entry_path, patch_no = get_keys(line)
batch_concept_values[i]=get_concept_measure(db_name, entry_path, patch_no, measure_type=c)
i+=1
if c=='domain':
batch_concept_values=keras.utils.to_categorical(batch_concept_values, num_classes=7)
generator_output.append(batch_concept_values)
return generator_output
# -
#import matplotlib as mpl
#mpl.use('Agg')
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import warnings
warnings.filterwarnings("ignore")
import logging
logging.getLogger('tensorflow').disabled = True
from keras import *
import setproctitle
SERVER_NAME = 'ultrafast'
import time
import sys
import shutil
## Adding PROCESS_UC1 utilities
sys.path.append('../../lib/TASK_2_UC1/')
from models import *
from util import otsu_thresholding
from extract_xml import *
from functions import *
sys.path.append('../../lib/')
from mlta import *
import math
import keras.callbacks as callbacks
from keras.callbacks import Callback
# +
keras.backend.clear_session()
"""
Get trainable model with Hepistemic Uncertainty Weighted Loss
"""
def get_trainable_model(baseline_model):
inp = keras.layers.Input(shape=(224,224,3,), name='inp')
outputs = baseline_model(inp)
n_extra_concepts = len(outputs) -2
print(n_extra_concepts)
y_true=keras.layers.Input(shape=(1,),name='y_true')
domain_true=keras.layers.Input(shape=(7,),name='domain_true')
extra_concepts_true=[]
for i in range(n_extra_concepts):
print('extra_{}'.format(i))
extra_true=keras.layers.Input(shape=(1,), name='extra_{}'.format(i))
extra_concepts_true.append(extra_true)
new_model_input=[inp, y_true, domain_true]
loss_inputs=[y_true, domain_true]
for i in range(len(extra_concepts_true)):
new_model_input.append(extra_concepts_true[i])
loss_inputs.append(extra_concepts_true[i])
for out_ in outputs:
loss_inputs.append(out_)
out = CustomMultiLossLayer(nb_outputs=len(outputs), new_folder='')(loss_inputs)
return Model(input=new_model_input, output=out)
"""
LOSS FUNCTIONS
"""
def keras_mse(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.mean_squared_error(y_true, y_pred))
def bbce(y_true, y_pred):
# we use zero weights to set the loss to zero for unlabeled data
verbose=0
zero= tf.constant(-1, dtype=tf.float32)
where = tf.not_equal(y_true, zero)
where = tf.reshape(where, [-1])
indices=tf.where(where) #indices where the item of y_true is NOT -1
indices = tf.reshape(indices, [-1])
sliced_y_true = tf.nn.embedding_lookup(y_true, indices)
sliced_y_pred = tf.nn.embedding_lookup(y_pred, indices)
n1 = tf.shape(indices)[0] #number of train images in batch
batch_size = tf.shape(y_true)[0]
n2 = batch_size - n1 #number of test images in batch
sliced_y_true = tf.reshape(sliced_y_true, [n1, -1])
n1_ = tf.cast(n1, tf.float32)
n2_ = tf.cast(n2, tf.float32)
multiplier = (n1_+ n2_) / n1_
zero_class = tf.constant(0, dtype=tf.float32)
where_class_is_zero=tf.cast(tf.reduce_sum(tf.cast(tf.equal(sliced_y_true, zero_class), dtype=tf.float32)), dtype=tf.float32)
if verbose:
where_class_is_zero=tf.Print(where_class_is_zero,[where_class_is_zero],'where_class_is_zero: ')
class_weight_zero = tf.cast(tf.divide(n1_, 2. * tf.cast(where_class_is_zero, dtype=tf.float32)+0.001), dtype=tf.float32)
if verbose:
class_weight_zero=tf.Print(class_weight_zero,[class_weight_zero],'class_weight_zero: ')
one_class = tf.constant(1, dtype=tf.float32)
where_class_is_one=tf.cast(tf.reduce_sum(tf.cast(tf.equal(sliced_y_true, one_class), dtype=tf.float32)), dtype=tf.float32)
if verbose:
where_class_is_one=tf.Print(where_class_is_one,[where_class_is_one],'where_class_is_one: ')
n1_=tf.Print(n1_,[n1_],'n1_: ')
class_weight_one = tf.cast(tf.divide(n1_, 2. * tf.cast(where_class_is_one,dtype=tf.float32)+0.001), dtype=tf.float32)
class_weight_zero = tf.constant(23477.0/(23477.0+123820.0), dtype=tf.float32)
class_weight_one = tf.constant(123820.0/(23477.0+123820.0), dtype=tf.float32)
A = tf.ones(tf.shape(sliced_y_true), dtype=tf.float32) - sliced_y_true
A = tf.scalar_mul(class_weight_zero, A)
B = tf.scalar_mul(class_weight_one, sliced_y_true)
class_weight_vector=A+B
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=sliced_y_true,logits=sliced_y_pred)
ce = tf.multiply(class_weight_vector,ce)
return tf.reduce_mean(ce)
from keras.initializers import Constant
global domain_weight
global main_task_weight
class CustomMultiLossLayer(Layer):
def __init__(self, new_folder='', nb_outputs=2, **kwargs):
self.nb_outputs = nb_outputs
self.is_placeholder = True
super(CustomMultiLossLayer, self).__init__(**kwargs)
def build(self, input_shape=None):
# initialise log_vars
self.log_vars = []
for i in range(self.nb_outputs):
self.log_vars += [self.add_weight(name='log_var' + str(i), shape=(1,),
initializer=Constant(0.), trainable=True)]
super(CustomMultiLossLayer, self).build(input_shape)
"""
def multi_loss(self, ys_true, ys_pred):
assert len(ys_true) == self.nb_outputs and len(ys_pred) == self.nb_outputs
loss = 0
for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars):
precision = K.exp(-log_var[0])
loss += K.sum(precision * (y_true - y_pred)**2. + log_var[0], -1)
return K.mean(loss)
"""
def multi_loss(self, ys_true, ys_pred):
assert len(ys_true) == self.nb_outputs and len(ys_pred) == self.nb_outputs
loss = 0
i=0
for y_true, y_pred, log_var in zip(ys_true, ys_pred, self.log_vars):
precision =keras.backend.exp(-log_var[0])
if i==0:
pred_loss = bbce(y_true, y_pred)
term = main_task_weight*precision*pred_loss + main_task_weight*0.5 * log_var[0]
#term=tf.Print(keras.backend.mean(term), [keras.backend.mean(term)], 'mean bbce: ')
elif i==1:
# I need to find a better way for this
pred_loss = keras.losses.categorical_crossentropy(y_true, y_pred)
#keras_mse(y_true, y_pred)
term = domain_weight * precision * pred_loss + domain_weight * log_var[0]
#term=tf.Print(keras.backend.mean(term), [keras.backend.mean(term)], 'mean cce: ')
else:
pred_loss = keras_mse(y_true, y_pred)
#pred_loss=tf.Print(pred_loss, [pred_loss], 'MSE: ')
term = 0.5 * precision * pred_loss + 0.5 * log_var[0]
loss+=term
term = 0.
i+=1
return keras.backend.mean(loss)
def call(self, inputs):
ys_true = inputs[:self.nb_outputs]
ys_pred = inputs[self.nb_outputs:]
loss = self.multi_loss(ys_true, ys_pred)
self.add_loss(loss, inputs=inputs)
return keras.backend.concatenate(inputs, -1)
"""
EVALUATION FUNCTIONs
"""
def accuracy_domain(y_true,y_pred):
y_p_r=np.round(y_pred)
acc = np.equal(y_p_r, y_true)**1.
acc = np.mean(np.float32(acc))
return acc
def my_sigmoid(x):
return 1 / (1 + np.exp(-x))
def my_accuracy_np(y_true, y_pred):
sliced_y_pred = my_sigmoid(y_pred)
y_pred_rounded = np.round(sliced_y_pred)
acc = np.equal(y_pred_rounded, y_true)**1.
acc = np.mean(np.float32(acc))
return acc
def r_square_np(y_true, y_pred):
SS_res = np.sum(np.square(y_true - y_pred))
SS_tot = np.sum(np.square(y_true - np.mean(y_true)))
r2_mine=( 1 - SS_res/(SS_tot + keras.backend.epsilon()) )
return ( 1 - SS_res/(SS_tot + keras.backend.epsilon()) )
global report_val_acc
global report_val_r2
global report_val_mse
report_val_acc=[]
report_val_r2=[]
report_val_mse=[]
"""
Building guidable model
"""
def get_baseline_model(hp_lambda=0., c_list=[]):
base_model = keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=(224,224,3))
layers_list=['conv2d_92', 'conv2d_93', 'conv2d_88', 'conv2d_89', 'conv2d_86']
#layers_list=[]
for i in range(len(base_model.layers[:])):
layer=base_model.layers[i]
if layer.name in layers_list:
print layer.name
layer.trainable=True
else:
layer.trainable = False
feature_output=base_model.layers[-1].output
gap_layer_output = keras.layers.GlobalAveragePooling2D()(feature_output)
feature_output = Dense(2048, activation='relu', name='finetuned_features1',kernel_regularizer=keras.regularizers.l2(0.01))(gap_layer_output)
feature_output = keras.layers.Dropout(0.8, noise_shape=None, seed=None)(feature_output)
feature_output = Dense(512, activation='relu', name='finetuned_features2',kernel_regularizer=keras.regularizers.l2(0.01))(feature_output)
feature_output = keras.layers.Dropout(0.8, noise_shape=None, seed=None)(feature_output)
feature_output = Dense(256, activation='relu', name='finetuned_features3',kernel_regularizer=keras.regularizers.l2(0.01))(feature_output)
feature_output = keras.layers.Dropout(0.8, noise_shape=None, seed=None)(feature_output)
grl_layer=GradientReversal(hp_lambda=hp_lambda)
feature_output_grl = grl_layer(feature_output)
domain_adversarial = keras.layers.Dense(7, activation = keras.layers.Activation('softmax'), name='domain_adversarial')(feature_output_grl)
finetuning = Dense(1,name='predictions')(feature_output)
## here you need to check how many other concepts you have apart from domain adversarial
# then you add one layer per each.
output_nodes=[finetuning, domain_adversarial]
for c in c_list:
if c!='domain':
concept_layer= keras.layers.Dense(1, activation = keras.layers.Activation('linear'), name='extra_{}'.format(c.strip(' ')))(feature_output)
output_nodes.append(concept_layer)
model = Model(input=base_model.input, output=output_nodes)
model.grl_layer=grl_layer
return model
# -
main_task_weight=1.
domain_weight = 1. #e-100
model=get_baseline_model(hpa_lambda=1., c_list=CONCEPT)
t_m = get_trainable_model(model)
#model=get_baseline_model(hp_lambda=1., c_list=CONCEPT)
#t_m = get_trainable_model(model)
t_m.load_weights('{}/best_model.h5'.format(model_folder))
BATCH_SIZE = 32
from sklearn.metrics import auc
from sklearn.metrics import confusion_matrix
def evaluate(pred_, save_file=None, c_list=CONCEPT):
y_true = pred_[:,0]
domain_true = pred_[:,1:8]
true_extra_concepts={}
if len(c_list)>1:
for i in range(1, len(c_list)):
true_extra_concepts[i]=pred_[:,8+i]
#print(i)
y_pred = pred_[:,8+i]
val_acc = my_accuracy_np(y_true, y_pred)
domain_pred = pred_[:, 8+i+1:8+i+1+7]
last_index=8+i+7
pred_extra_concepts={}
if len(c_list)>1:
for i in range(1, len(c_list)):
pred_extra_concepts[i]=pred_[:,last_index+i]
val_acc_d = accuracy_domain(domain_true, domain_pred)
val_r2={}
val_mse={}
if len(c_list)>1:
for i in range(1, len(c_list)):
val_r2[i] = r_square_np(true_extra_concepts[i], pred_extra_concepts[i])
val_mse[i] = compute_mse(true_extra_concepts[i], pred_extra_concepts[i])
extra_string=''
if len(c_list)>1:
for i in range(1, len(c_list)):
extra_string=extra_string+" {}: r2 {}, mse {}; ".format(i, val_r2[i], val_mse[i])
#print("Acc: {}, Acc domain: {}\n".format(val_acc, val_acc_d)+extra_string)
if save_file is not None:
save_file.write("Val acc: {}, acc_domain: {}\n".format(val_acc, val_acc_d)+extra_string)
return y_true, domain_true, true_extra_concepts, y_pred, domain_pred, pred_extra_concepts
def compute_mse(labels, predictions):
errors = labels - predictions
sum_squared_errors = np.sum(np.asarray([pow(errors[i],2) for i in range(len(errors))]))
mse = sum_squared_errors / len(labels)
return mse
def evaluate_model(d_list, model, batch_size=BATCH_SIZE, test_type=''):
batch_size=32
t_gen=DataGenerator(d_list, concept=CONCEPT, batch_size=BATCH_SIZE, data_type=0)
steps=len(d_list)//batch_size
initial_lr = 1e-4
opt = keras.optimizers.SGD(lr=initial_lr, momentum=0.9, nesterov=True)
compile_model(t_m,opt,loss=None,metrics=None)
callbacks = []
y_true=np.zeros(len(d_list))
y_pred=np.zeros((len(d_list),1))
N=0
all_true_domain=[]
all_pred_domain=[]
all_true_extra_cm={}#[]#np.zeros(len(d_list))
all_pred_extra_cm={}#[]#np.zeros(len(d_list))
batch_counter=0
while N<len(d_list):
#print N
input_,_ = t_gen.__getitem__(batch_counter)
pred_ = t_m.predict(input_)
y_true_batch, d_true, true_ec, y_pred_batch, d_pred, pred_ec = evaluate(pred_)
#maybe some import pdb here
y_true[N:N+len(y_true_batch)]=y_true_batch.reshape(len(y_true_batch))
y_pred[N:N+len(y_pred_batch)]=y_pred_batch.reshape(len(y_pred_batch),1)
all_true_domain.append(d_true)
all_pred_domain.append(d_pred)
for extra_concept in true_ec.keys():
try:
all_true_extra_cm[extra_concept].append(true_ec[extra_concept])
except:
all_true_extra_cm[extra_concept]=[]
all_true_extra_cm[extra_concept].append(true_ec[extra_concept])
for extra_concept in pred_ec.keys():
try:
all_pred_extra_cm[extra_concept].append(pred_ec[extra_concept])
except:
all_pred_extra_cm[extra_concept]=[]
all_pred_extra_cm[extra_concept].append(pred_ec[extra_concept])
N+=len(y_pred_batch)
batch_counter+=1
#import pdb; pdb.set_trace()
y_true=y_true.reshape((len(d_list),1))
#y_pred=y_pred.reshape((len(d_list),1))
acc = my_accuracy(y_true, y_pred).eval(session=tf.Session())
sliced_y_pred = tf.sigmoid(y_pred)
y_pred_rounded = K.round(sliced_y_pred)
acc_sc = accuracy_score(y_pred_rounded.eval(session=tf.Session()), y_true)
print('accuracy: ', acc_sc)
y_pred = sliced_y_pred.eval(session=tf.Session())
#sliced_y_pred = tf.sigmoid(y_pred)
#y_pred_rounded = K.round(sliced_y_pred)
auc_score=sklearn.metrics.roc_auc_score(y_true,sliced_y_pred.eval(session=tf.Session()))
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(1):
fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_true.ravel(), y_pred.ravel())
roc_auc["micro"] = auc_score
plt.figure()
lw = 2
plt.plot(fpr[0], tpr[0], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[0])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
auc_record = open('{}/auc_{}.txt'.format(model_folder,test_type), 'w')
auc_record.write('{}'.format(roc_auc[0]))
auc_record.close()
return all_true_domain, all_true_extra_cm, all_pred_domain, all_pred_extra_cm
# ## Performance on Main Task
# ##### How do we do on the patch classification?
#keras.backend.get_session().run(tf.global_variables_initializer())
#t_m.load_weights(model_folder+'/best_model.h5')
all_true_domain, all_true_extra_cm, all_pred_domain, all_pred_extra_cm=evaluate_model(train_list[:1000],t_m, test_type='train')
all_true_domain_internal_test, all_true_extra_cm_internal_test, all_pred_domain_internal_test, all_pred_extra_cm_internal_test=evaluate_model(test_list,t_m, test_type='internal')
all_true_domain_external_test, all_true_extra_cm_external_test, all_pred_domain_external_test, all_pred_extra_cm_external_test=evaluate_model(test2_list,t_m, test_type='external')
# # Performance on Auxiliary tasks
# ## Are we learning the concepts?
def compute_rsquared(labels, predictions):
errors = labels - predictions
sum_squared_errors = np.sum(np.asarray([pow(errors[i],2) for i in range(len(errors))]))
# total sum of squares, TTS
average_y = np.mean(labels)
total_errors = labels - average_y
total_sum_squares = np.sum(np.asarray([pow(total_errors[i],2) for i in range(len(total_errors))]))
#rsquared is 1-RSS/TTS
rss_over_tts = sum_squared_errors/total_sum_squares
rsquared = 1-rss_over_tts
return rsquared
def compute_mse(labels, predictions):
errors = labels - predictions
sum_squared_errors = np.sum(np.asarray([pow(errors[i],2) for i in range(len(errors))]))
mse = sum_squared_errors / len(labels)
return mse
def accuracy_domain(y_true,y_pred):
y_p_r = np.asarray([np.argmax(y_pred[i,:]) for i in range(len(y_pred[:,0]))])
#y_p_r=np.round(y_pred)
y_true = np.asarray([np.argmax(y_true[i,:]) for i in range(len(y_true[:,0]))])
acc = np.equal(y_p_r, y_true)**1.
acc = np.mean(np.float32(acc))
return acc
cm_i=np.concatenate([true_cm for true_cm in all_true_domain])
cm_p_i=np.concatenate([pred_cm for pred_cm in all_pred_domain])
acc_d_i = accuracy_domain(cm_i, cm_p_i)
print 'Internal: ', acc_d_i
#r2_e = compute_rsquared(all_cm_e, all_p_cm_e)
#mse_e = compute_mse(all_cm_e, all_p_cm_e)
cm_e=np.concatenate([true_cm for true_cm in all_cm_e])
cm_p_e=np.concatenate([pred_cm for pred_cm in all_p_cm_e])
acc_d_e = accuracy_domain(cm_e, cm_p_e)
print 'External: ', acc_d_e
test_type='internal'
auc_record = open('{}/concept_metrics_{}.txt'.format(model_folder,test_type), 'w')
auc_record.write('{}'.format(acc_d_i))
auc_record.close()
test_type='external'
auc_record = open('{}/concept_metrics_{}.txt'.format(model_folder,test_type), 'w')
auc_record.write('{}'.format(acc_d_e))
auc_record.close()
cm_t=np.concatenate([true_cm for true_cm in all_cm_t])
cm_p_t=np.concatenate([pred_cm for pred_cm in all_p_cm_t])
acc_d_t = accuracy_domain(cm_t, cm_p_t)
print 'Train: ', acc_d_t
val_r2=np.load('{}/val_r2_log.npy'.format(model_folder))
plt.plot(val_r2)
history=np.load('{}/training_log.npy'.format(model_folder), allow_pickle=True).item()
plt.plot(history['loss'])
plt.plot(history['val_loss'])
f=open('{}/val_by_epoch.txt'.format(model_folder), 'r')
f_l=f.readlines()
val_acc=[]
val_r2=[]
val_mse=[]
for line in f_l:
acc=line.split('Val acc: ')[1].split(', r2')[0]
val_acc.append(acc)
r2=line.split(', r2:')[1].split(', mse:')[0]
mse=line.split(', mse:')[1].split('\n')[0]
val_r2.append(r2)
val_mse.append(mse)
plt.plot(np.asarray(val_acc, dtype=np.float32))
plt.plot(np.asarray(val_r2, dtype=np.float32))
plt.plot(np.asarray(val_mse, dtype=np.float32))
#
| notebooks/test/test_multitaskdomain.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project 2: Digit Recognition
#
# ## Statistical Machine Learning (COMP90051), Semester 2, 2017
#
# *Copyright the University of Melbourne, 2017*
# ### Submitted by: *<NAME>*
# ### Student number: *725439*
# ### Kaggle-in-class username: *your username here*
# In this project, you will be applying machine learning for recognising digits from real world images. The project worksheet is a combination of text, pre-implemented code and placeholders where we expect you to add your code and answers. You code should produce desired result within a reasonable amount of time. Please follow the instructions carefully, **write your code and give answers only where specifically asked**. In addition to worksheet completion, you are also expected to participate **live competition with other students in the class**. The competition will be run using an on-line platform called Kaggle.
# ** Marking:** You can get up to 33 marks for Project 2. The sum of marks for Project 1 and Project 2 is then capped to 50 marks
#
# **Due date:** Wednesday 11/Oct/17, 11:59pm AEST (LMS components); and Kaggle competition closes Monday 09/Oct/17, 11:59pm AEST.
#
# **Late submissions** will incur a 10% penalty per calendar day
#
# ** Submission materials**
# - **Worksheet**: Fill in your code and answers within this IPython Notebook worksheet.
# - **Competition**: Follow the instructions provided in the corresponding section of this worksheet. Your competition submissions should be made via Kaggle website.
# - **Report**: The report about your competition entry should be submitted to the LMS as a PDF file (see format requirements in `2.2`).
# - **Code**: The source code behind your competition entry.
# The **Worksheet**, **Report** and **Code** should be bundled into a `.zip` file (not 7z, rar, tar, etc) and submitted in the LMS. Marks will be deducted for submitting files in other formats, or we may elect not to mark them at all.
#
# **Academic Misconduct:** Your submission should contain only your own work and ideas. Where asked to write code, you cannot re-use someone else's code, and should write your own implementation. We will be checking submissions for originality and will invoke the University’s <a href="http://academichonesty.unimelb.edu.au/policy.html">Academic Misconduct policy</a> where inappropriate levels of collusion or plagiarism are deemed to have taken place.
# **Table of Contents**
#
# 1. Handwritten Digit Recognition **(16 marks)**
# 1. Linear Approach
# 2. Basis Expansion
# 3. Kernel Perceptron
# 4. Dimensionality Reduction
#
# 2. Kaggle Competition **(17 marks)**
# 1. Making Submissions
# 2. Method Description
# ## 1. Handwritten Digit Recognition
# Handwritten digit recognition can be framed as a classification task: given a bitmap image as input, predict the digit type (0, 1, ..., 9). The pixel values in each position of the image form our features, and the digit type is the class. We are going to use a dataset where the digits are represented as *28 x 28* bitmap images. Each pixel value ranges between 0 and 1, and represents the monochrome ink intensity at that position. Each image matrix has been flattened into one long feature vector, by concatenating each row of pixels.
#
# In this part of the project, we will only use images of two digits, namely "7" and "9". As such, we will be working on a binary classification problem. *Throughout this first section, our solution is going to be based on the perceptron classifier.*
#
# Start by setting up working environment, and loading the dataset. *Do not override variable `digits`, as this will be used throughout this section.*
# +
# %pylab inline
digits = np.loadtxt('digits_7_vs_9.csv', delimiter=' ')
# -
# Take some time to explore the dataset. Note that each image of "7" is labeled as -1, and each image of "9" is labeled as +1.
# +
# extract a stack of 28x28 bitmaps
X = digits[:, 0:784]
# extract labels for each bitmap
y = digits[:, 784:785]
# display a single bitmap and print its label
bitmap_index = 0
plt.imshow(X[bitmap_index,:].reshape(28, 28), interpolation=None)
print(y[bitmap_index])
# -
# You can also display several bitmaps at once using the following code.
# +
def gallery(array, ncols):
nindex, height, width = array.shape
nrows = nindex//ncols
result = (array.reshape((nrows, ncols, height, width))
.swapaxes(1,2)
.reshape((height*nrows, width*ncols)))
return result
ncols = 10
result = gallery(X.reshape((300, 28, 28))[:ncols**2], ncols)
plt.figure(figsize=(10,10))
plt.imshow(result, interpolation=None)
# -
# ### 1.1 Linear Approach
# We are going to use perceptron for our binary classification task. Recall that perceptron is a linear method. Also, for this first step, we will not apply non-linear transformations to the data.
#
# Implement and fit a perceptron to the data above. You may use the implementation from *sklearn*, or implementation from one of our workshops. Report the error of the fit as the proportion of misclassified examples.
#
# <br />
#
# <font color='red'>**Write your code in the cell below ...**</font>
# +
## your code here
# -
# One of the advantages of a linear approach is the ability to interpret results. To this end, plot the parameters learned above. Exclude the bias term if you were using it, set $w$ to be the learned perceptron weights, and run the following command.
plt.imshow(w.reshape(28,28), interpolation=None))
# In a few sentences, describe what you see, referencing which features are most important for making classification. Report any evidence of overfitting.
# <font color='red'>**Write your answer here ...**</font> (as a *markdown* cell)
# Split the data into training and heldout validation partitions by holding out a random 25% sample of the data. Evaluate the error over the course of a training run, and plot the training and validation error rates as a function of the number of passes over the training dataset.
#
# <br />
# <font color='red'>**Write your code in the cell below ...**</font>
# +
## your code here
# -
# In a few sentences, describe the shape of the curves, and compare the two. Now consider if we were to stop training early, can you choose a point such that you get the best classification performance? Justify your choice.
# <font color='red'>**Write your answer here ...**</font> (as a *markdown* cell)
# Now that we have tried a simple approach, we are going to implement several non-linear approaches to our task. Note that we are still going to use a linear method (the perceptron), but combine this with a non-linear data transformation. We start with basis expansion.
# ### 1.2 Basis Expansion
# Apply Radial Basis Function (RBF)-based transformation to the data, and fit a perceptron model. Recall that the RBF basis is defined as
#
# $$\varphi_l(\mathbf{x}) = \exp\left(-\frac{||\mathbf{x} - \mathbf{z}_l||^2}{\sigma^2}\right)$$
#
# where $\mathbf{z}_l$ is centre of the $l^{th}$ RBF. We'll use $L$ RBFs, such that $\varphi(\mathbf{x})$ is a vector with $L$ elements. The spread parameter $\sigma$ will be the same for each RBF.
#
# *Hint: You will need to choose the values for $\mathbf{z}_l$ and $\sigma$. If the input data were 1D, the centres $\mathbf{z}_l$ could be uniformly spaced on a line. However, here we have 784-dimensional input. For this reason you might want to use some of the training points as centres, e.g., $L$ randomly chosen "2"s and "7"s.*
#
# <br />
#
# <font color='red'>**Write your code in the cell below ...**</font>
# +
## your code here
# -
# Now compute the validation error for your RBF-perceptron and use this to choose good values of $L$ and $\sigma$. Show a plot of the effect of changing each of these parameters, and justify your parameter choice.
#
# <br />
#
# <font color='red'>**Write your code in the cell below ...**</font>
# +
## your code here
# -
# <font color='red'>**Write your justfication here ...**</font> (as a *markdown* cell)
# ### 1.3 Kernel Perceptron
# Next, instead of directly computing a feature space transformation, we are going to use the kernel trick. Specifically, we are going to use the kernelised version of perceptron in combination with a few different kernels.
#
# *In this section, you cannot use any libraries other than `numpy` and `matplotlib`.*
#
# First, implement linear, polynomial and RBF kernels. The linear kernel is simply a dot product of its inputs, i.e., there is no feature space transformation. Polynomial and RBF kernels should be implemented as defined in the lecture slides.
#
# <br />
#
# <font color='red'>**Write your code in the cell below ...**</font>
# +
# Input:
# u,v - column vectors of the same dimensionality
#
# Output:
# v - a scalar
def linear_kernel(u, v):
## your code here
# Input:
# u,v - column vectors of the same dimensionality
# c,d - scalar parameters of the kernel as defined in lecture slides
#
# Output:
# v - a scalar
def polynomial_kernel(u, v, c=0, d=3):
## your code here
# Input:
# u,v - column vectors of the same dimensionality
# gamma - scalar parameter of the kernel as defined in lecture slides
#
# Output:
# v - a scalar
def rbf_kernel(u, v, gamma=1):
## your code here
# -
# Kernel perceptron was a "green slides" topic, and you will not be asked about this method in the exam. Here, you are only asked to implement a simple prediction function following the provided equation. In kernel perceptron, the prediction for instance $\mathbf{x}$ is made based on the sign of
#
# $$w_0 + \sum_{i=1}^{n}\alpha_i y_i K(\mathbf{x}_i, \mathbf{x})$$
#
# Here $w_0$ is the bias term, $n$ is the number of training examples, $\alpha_i$ are learned weights, $\mathbf{x}_i$ and $y_i$ is the training dataset,and $K$ is the kernel.
#
# <br />
#
# <font color='red'>**Write your code in the cell below ...**</font>
# Input:
# x_test - (r x m) matrix with instances for which to predict labels
# X - (n x m) matrix with training instances in rows
# y - (n x 1) vector with labels
# alpha - (n x 1) vector with learned weigths
# bias - scalar bias term
# kernel - a kernel function that follows the same prototype as each of the three kernels defined above
#
# Output:
# y_pred - (r x 1) vector of predicted labels
def kernel_ptron_predict(x_test, X, y, alpha, bias, kernel):
## your code here
# The code for kernel perceptron training is provided below. You can treat this function as a black box, but we encourage you to understand the implementation.
# Input:
# X - (n x m) matrix with training instances in rows
# y - (n x 1) vector with labels
# kernel - a kernel function that follows the same prototype as each of the three kernels defined above
# epochs - scalar, number of epochs
#
# Output:
# alpha - (n x 1) vector with learned weigths
# bias - scalar bias term
def kernel_ptron_train(X, y, kernel, epochs=100):
n, m = X.shape
alpha = np.zeros(n)
bias = 0
updates = None
for epoch in range(epochs):
print('epoch =', epoch, ', updates =', updates)
updates = 0
schedule = list(range(n))
np.random.shuffle(schedule)
for i in schedule:
y_pred = kernel_ptron_predict(X[i], X, y, alpha, bias, kernel)
if y_pred != y[i]:
alpha[i] += 1
bias += y[i]
updates += 1
if updates == 0:
break
return alpha, bias
# Now use the above functions to train the perceptron. Use heldout validation, and compute the validation error for this method using each of the three kernels. Write a paragraph or two with analysis how the accuracy differs between the different kernels and choice of kernel parameters. Discuss the merits of a kernel approach versus direct basis expansion approach as was used in the previous section.
#
# <br />
#
# <font color='red'>**Write your code in the cell below ...**</font>
# <font color='red'>**Provide your analysis here ...**</font> (as a *markdown* cell)
# ### 1.4 Dimensionality Reduction
# Yet another approach to working with complex data is to use a non-linear dimensionality reduction. To see how this might work, first apply a couple of dimensionality reduction methods and inspect the results.
# +
from sklearn import manifold
X = digits[:, 0:784]
y = np.squeeze(digits[:, 784:785])
# n_components refers to the number of dimensions after mapping
# n_neighbors is used for graph construction
X_iso = manifold.Isomap(n_neighbors=30, n_components=2).fit_transform(X)
# n_components refers to the number of dimensions after mapping
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0)
X_se = embedder.fit_transform(X)
f, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(X_iso[y==-1,0], X_iso[y==-1,1], "bo")
ax1.plot(X_iso[y==1,0], X_iso[y==1,1], "ro")
ax1.set_title('Isomap')
ax2.plot(X_se[y==-1,0], X_se[y==-1,1], "bo")
ax2.plot(X_se[y==1,0], X_se[y==1,1], "ro")
ax2.set_title('spectral')
# -
# In a few sentences, explain how a dimensionality reduction algorithm can be used for your binary classification task.
# <font color='red'>**Write your answer here ...**</font> (as a *markdown* cell)
# Implement such an approach and assess the result. For simplicity, we will assume that both training and test data are available ahead of time, and thus the datasets should be used together for dimensionality reduction, after which you can split off a test set for measuring generalisation error. *Hint: you do not have to reduce number of dimensions to two. You are welcome to use the sklearn library for this question.*
#
# <br />
#
# <font color='red'>**Write your code in the cell below ...**</font>
# In a few sentences, comment on the merits of the dimensionality reduction based approach compared to linear classification from Section 1.1 and basis expansion from Section 1.2.
# <font color='red'>**Write your answer here ...**</font> (as a *markdown* cell)
# ## 2. Kaggle Competition
# The final part of the project is a competition, on more challenging digit data sourced from natural scenes. This data is coloured, pixelated or otherwise blurry, and the digits are not perfectly centered. It is often difficult for humans to classify! The dataset is also considerably larger.
#
# Please sign up to the [COMP90051 Kaggle competition](https://inclass.kaggle.com/c/comp90051-2017) using your `student.unimelb.edu.au` email address. Then download the file `data.npz` from Kaggle. This is a compressed `numpy` data file containing three ndarray objects:
# - `train_X` training set, with 4096 input features (greyscale pixel values);
# - `train_Y` training labels (0-9)
# - `test_X` test set, with 4096 input features, as per above
#
# Each image is 64x64 pixels in size, which has been flattened into a vector of 4096 values. You should load the files using `np.load`, from which you can extract the three elements. You may need to transpose the images for display, as they were flattened in a different order. Each pixel has an intensity value between 0-255. For those using languages other than python, you may need to output these objects in another format, e.g., as a matlab matrix.
#
# Your job is to develop a *multiclass* classifier on this dataset. You can use whatever techniques you like, such as the perceptron code from above, or other methods such as *k*NN, logistic regression, neural networks, etc. You may want to compare several methods, or try an ensemble combination of systems. You are free to use any python libraries for this question. Note that some fancy machine learning algorithms can take several hours or days to train (we impose no time limits), so please start early to allow sufficient time. *Note that you may want to sample smaller training sets, if runtime is an issue, however this will degrade your accuracy. Sub-sampling is a sensible strategy when developing your code.*
#
# You may also want to do some basic image processing, however, as this is not part of the subject, we would suggest that you focus most of your efforts on the machine learning. For inspiration, please see [Yan Lecun's MNIST page](http://yann.lecun.com/exdb/mnist/), specifically the table of results and the listed papers. Note that your dataset is harder than MNIST, so your mileage may vary.
# ### 2.1 Making Submissions
# This will be setup as a *Kaggle in class* competition, in which you can upload your system predictions on the test set. You should format your predictions as a csv file, with the same number of lines as the test set, and each line comprising two numbers `id, class` where *id* is the instance number (increasing integers starting from 1) and *class* is an integer between 0-9, corresponding to your system prediction. E.g.,
# ```
# Id,Label
# 1,9
# 2,9
# 3,4
# 4,5
# 5,1
# ...```
# based on the first five predictions of the system being classes `9 9 4 5 1`. See the `sample_submission.csv` for an example file.
#
# Kaggle will report your accuracy on a public portion of the test set, and maintain a leaderboard showing the performance of you and your classmates. You will be allowed to upload up to four submissions each day. At the end of the competition, you should nominate your best submission, which will be scored on the private portion of the test set. The accuracy of your system (i.e., proportion of correctly classified examples) on the private test set will be used for grading your approach.
#
# **Marks will be assigned as follows**:
# - position in the class, where all students are ranked and then the ranks are linearly scaled to <br>0 marks (worst in class) - 4 marks (best in class)
# - absolute performance (4 marks), banded as follows (rounded to nearest integer):
# <br>below 80% = 0 marks; 80-89% = 1; 90-92% = 2; 93-94% = 3; above 95% = 4 marks
# Note that you are required to submit your code with this notebook, submitted to the LMS. Failure to provide your implementation may result in assigning zero marks for the competition part, irrespective of the competition standing. Your implementation should be able to exactly reproduce submitted final Kaggle entry, and match your description below.
# ### 2.2. Method Description
# Describe your approach, and justify each of the choices made within your approach. You should write a document with no more than 400 words, as a **PDF** file (not *docx* etc) with up to 2 pages of A4 (2 sides). Text must only appear on the first page, while the second page is for *figures and tables only*. Please use a font size of 11pt or higher. Please consider using `pdflatex` for the report, as it's considerably better for this purpose than wysiwyg document editors. You are encouraged to include empirical results, e.g., a table of results, graphs, or other figures to support your argument. *(this will contribute 9 marks; note that we are looking for clear presentation, sound reasoning, good evaluation and error analysis, as well as general ambition of approach.)*
| COMP90051 Statistical Machine Learning/project-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
data=pd.read_csv("data/database.csv")
data
data["grapes"]=[x[0:len(x)-1] for x in data.grapes]
data["grapes"]=[x.split(',') for x in data.grapes]
rows=[]
_=data.apply(lambda row: [rows.append([row['country'], nn])
for nn in row.grapes], axis=1)
data_new=pd.DataFrame(rows)
data_new.rename(columns={0:"Country",1:"Grape"}, inplace=True)
dt=data_new.groupby(by=["Country","Grape"]).size().copy()
dt=pd.DataFrame(dt)
# +
dt.rename(columns={0:'nr'}, inplace=True)
dt
# -
dt.reset_index(inplace=True)
dt["Grape"]=[x.strip() for x in dt["Grape"]]
grapes=dt.groupby(by=['Country','Grape']).sum().copy()
grapes.reset_index(inplace=True)
grapes
grapes.to_csv("all_grapes.csv")
dat = dt.groupby(level=0).apply(lambda x:100 * x / float(x.sum()))
dat.reset_index(inplace=True)
new_dat=dat.sort_values(['Country','nr'],ascending=[True,False]).reset_index().copy()
new_dat
new_dat['Max'] = new_dat.groupby(['Country'])['nr'].transform(max)
df=new_dat[new_dat.nr == new_dat.Max].copy()
df.reset_index(inplace=True)
data=df[['Country','Grape','Max']].copy()
data
data.to_csv("grapes.csv")
| v1/GrapesCloropleth/Generate_All_Grapes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import panel as pn
import numpy as np
import holoviews as hv
pn.extension()
# -
# For a large variety of use cases we do not need complete control over the exact layout of each individual component on the page, as could be achieved with a [custom template](../../user_guide/Templates.ipynb), we just want to achieve a more polished look and feel. For these cases Panel ships with a number of default templates, which are defined by declaring four main content areas on the page, which can be populated as desired:
#
# * **`header`**: The header area of the HTML page
# * **`sidebar`**: A collapsible sidebar
# * **`main`**: The main area of the application
# * **`modal`**: A modal area which can be opened and closed from Python
#
# These four areas behave very similarly to other Panel layout components and have list-like semantics. The `ReactTemplate` in particular however is an exception to this rule as the `main` area behaves like panel `GridSpec` object. Unlike regular layout components however, the contents of the areas is fixed once rendered. If you need a dynamic layout you should therefore insert a regular Panel layout component (e.g. a `Column` or `Row`) and modify it in place once added to one of the content areas.
#
# Templates can allow for us to quickly and easily create web apps for displaying our data. Panel comes with a default Template, and includes multiple Templates that extend the default which add some customization for a better display.
#
# #### Parameters:
#
# In addition to the four different areas we can populate the `ReactTemplate` declares a few variables to configure the layout:
#
# * **`cols`** (dict): Number of columns in the grid for different display sizes (`default={'lg': 12, 'md': 10, 'sm': 6, 'xs': 4, 'xxs': 2}`)
# * **`breakpoints`** (dict): Sizes in pixels for various layouts (`default={'lg': 1200, 'md': 996, 'sm': 768, 'xs': 480, 'xxs': 0}`)
# * **`row_height`** (int, default=150): Height per row in the grid
# * **`dimensions`** (dict): Minimum/Maximum sizes of cells in grid units (`default={'minW': 0, 'maxW': 'Infinity', 'minH': 0, 'maxH': 'Infinity'}`)
# * **`prevent_collision`** (bool, default=Flase): Prevent collisions between grid items.
#
# These parameters control the responsive resizing in different layouts. The `ReactTemplate` also exposes the same parameters as other templates:
#
# * **`busy_indicator`** (BooleanIndicator): Visual indicator of application busy state.
# * **`header_background`** (str): Optional header background color override.
# * **`header_color`** (str): Optional header text color override.
# * **`logo`** (str): URI of logo to add to the header (if local file, logo is base64 encoded as URI).
# * **`site`** (str): Name of the site. Will be shown in the header. Default is '', i.e. not shown.
# * **`site_url`** (str): Url of the site and logo. Default is "/".
# * **`title`** (str): A title to show in the header.
# * **`theme`** (Theme): A Theme class (available in `panel.template.theme`)
#
# ________
# In this case we are using the `ReactTemplate`, built on [react-grid-layout](https://github.com/STRML/react-grid-layout), which provides a responsive, resizable, draggable grid layout. Here is an example of how you can set up a display using this template:
# +
react = pn.template.ReactTemplate(title='React Template')
pn.config.sizing_mode = 'stretch_both'
xs = np.linspace(0, np.pi)
freq = pn.widgets.FloatSlider(name="Frequency", start=0, end=10, value=2)
phase = pn.widgets.FloatSlider(name="Phase", start=0, end=np.pi)
@pn.depends(freq=freq, phase=phase)
def sine(freq, phase):
return hv.Curve((xs, np.sin(xs*freq+phase))).opts(
responsive=True, min_height=400)
@pn.depends(freq=freq, phase=phase)
def cosine(freq, phase):
return hv.Curve((xs, np.cos(xs*freq+phase))).opts(
responsive=True, min_height=400)
react.sidebar.append(freq)
react.sidebar.append(phase)
# Unlike other templates the `ReactTemplate.main` area acts like a GridSpec
react.main[:4, :6] = pn.Card(hv.DynamicMap(sine), title='Sine')
react.main[:4, 6:] = pn.Card(hv.DynamicMap(cosine), title='Cosine')
react.servable();
# -
# With the `row_height=150` this will result in the two `Card` objects filling 4 rows each totalling 600 pixels and each taking up 6 columns, which resize responsively to fill the screen and reflow when working on a smaller screen. When hovering of the top-left corner of each card a draggable handle will allow dragging the components around while a resize handle will show up at the bottom-right corner.
# <h3><b>ReactTemplate with DefaultTheme</b></h3>
# <img src="../../assets/React.png" style="margin-left: auto; margin-right: auto; display: block;"></img>
# </br>
# <h3><b>ReactTemplate with DarkTheme</b></h3>
# <img src="../../assets/ReactDark.png" style="margin-left: auto; margin-right: auto; display: block;"></img>
# The app can be displayed within the notebook by using `.servable()`, or rendered in another tab by replacing it with `.show()`.
#
# Themes can be added using the optional keyword argument `theme`. Each template comes with a DarkTheme and a DefaultTheme, which can be set `ReactTemplate(theme=DarkTheme)`. If no theme is set, then DefaultTheme will be applied.
#
# It should be noted that Templates may not render correctly in a notebook, and for the best performance the should ideally be deployed to a server.
| examples/reference/templates/React.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convolutional Neural Networks: Application
#
# Welcome to Course 4's second assignment! In this notebook, you will:
#
# - Implement helper functions that you will use when implementing a TensorFlow model
# - Implement a fully functioning ConvNet using TensorFlow
#
# **After this assignment you will be able to:**
#
# - Build and train a ConvNet in TensorFlow for a classification problem
#
# We assume here that you are already familiar with TensorFlow. If you are not, please refer the *TensorFlow Tutorial* of the third week of Course 2 ("*Improving deep neural networks*").
# ## 1.0 - TensorFlow model
#
# In the previous assignment, you built helper functions using numpy to understand the mechanics behind convolutional neural networks. Most practical applications of deep learning today are built using programming frameworks, which have many built-in functions you can simply call.
#
# As usual, we will start by loading in the packages.
# +
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
# %matplotlib inline
np.random.seed(1)
# -
# Run the next cell to load the "SIGNS" dataset you are going to use.
# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# As a reminder, the SIGNS dataset is a collection of 6 signs representing numbers from 0 to 5.
#
# <img src="images/SIGNS.png" style="width:800px;height:300px;">
#
# The next cell will show you an example of a labelled image in the dataset. Feel free to change the value of `index` below and re-run to see different examples.
# Example of a picture
index = 6
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
# In Course 2, you had built a fully-connected network for this dataset. But since this is an image dataset, it is more natural to apply a ConvNet to it.
#
# To get started, let's examine the shapes of your data.
X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
conv_layers = {}
# ### 1.1 - Create placeholders
#
# TensorFlow requires that you create placeholders for the input data that will be fed into the model when running the session.
#
# **Exercise**: Implement the function below to create placeholders for the input image X and the output Y. You should not define the number of training examples for the moment. To do so, you could use "None" as the batch size, it will give you the flexibility to choose it later. Hence X should be of dimension **[None, n_H0, n_W0, n_C0]** and Y should be of dimension **[None, n_y]**. [Hint](https://www.tensorflow.org/api_docs/python/tf/placeholder).
# +
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_H0, n_W0, n_C0, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_H0 -- scalar, height of an input image
n_W0 -- scalar, width of an input image
n_C0 -- scalar, number of channels of the input
n_y -- scalar, number of classes
Returns:
X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float"
Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float"
"""
### START CODE HERE ### (≈2 lines)
X = tf.placeholder(tf.float32,[None, n_H0, n_W0, n_C0])
Y = tf.placeholder(tf.float32,[None, n_y])
### END CODE HERE ###
return X, Y
# -
X, Y = create_placeholders(64, 64, 3, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
# **Expected Output**
#
# <table>
# <tr>
# <td>
# X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32)
#
# </td>
# </tr>
# <tr>
# <td>
# Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32)
#
# </td>
# </tr>
# </table>
# ### 1.2 - Initialize parameters
#
# You will initialize weights/filters $W1$ and $W2$ using `tf.contrib.layers.xavier_initializer(seed = 0)`. You don't need to worry about bias variables as you will soon see that TensorFlow functions take care of the bias. Note also that you will only initialize the weights/filters for the conv2d functions. TensorFlow initializes the layers for the fully connected part automatically. We will talk more about that later in this assignment.
#
# **Exercise:** Implement initialize_parameters(). The dimensions for each group of filters are provided below. Reminder - to initialize a parameter $W$ of shape [1,2,3,4] in Tensorflow, use:
# ```python
# W = tf.get_variable("W", [1,2,3,4], initializer = ...)
# ```
# [More Info](https://www.tensorflow.org/api_docs/python/tf/get_variable).
# +
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes weight parameters to build a neural network with tensorflow. The shapes are:
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
Returns:
parameters -- a dictionary of tensors containing W1, W2
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 2 lines of code)
W1 = tf.get_variable("W1",[4, 4, 3, 8],initializer=tf.contrib.layers.xavier_initializer(seed=0))
W2 = tf.get_variable("W2",[2, 2, 8, 16],initializer=tf.contrib.layers.xavier_initializer(seed=0))
### END CODE HERE ###
parameters = {"W1": W1,
"W2": W2}
return parameters
# -
tf.reset_default_graph()
with tf.Session() as sess_test:
parameters = initialize_parameters()
init = tf.global_variables_initializer()
sess_test.run(init)
print("W1 = " + str(parameters["W1"].eval()[1,1,1]))
print("W2 = " + str(parameters["W2"].eval()[1,1,1]))
# ** Expected Output:**
#
# <table>
#
# <tr>
# <td>
# W1 =
# </td>
# <td>
# [ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394 <br>
# -0.06847463 0.05245192]
# </td>
# </tr>
#
# <tr>
# <td>
# W2 =
# </td>
# <td>
# [-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058 <br>
# -0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228 <br>
# -0.22779644 -0.1601823 -0.16117483 -0.10286498]
# </td>
# </tr>
#
# </table>
# ### 1.2 - Forward propagation
#
# In TensorFlow, there are built-in functions that carry out the convolution steps for you.
#
# - **tf.nn.conv2d(X,W1, strides = [1,s,s,1], padding = 'SAME'):** given an input $X$ and a group of filters $W1$, this function convolves $W1$'s filters on X. The third input ([1,f,f,1]) represents the strides for each dimension of the input (m, n_H_prev, n_W_prev, n_C_prev). You can read the full documentation [here](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d)
#
# - **tf.nn.max_pool(A, ksize = [1,f,f,1], strides = [1,s,s,1], padding = 'SAME'):** given an input A, this function uses a window of size (f, f) and strides of size (s, s) to carry out max pooling over each window. You can read the full documentation [here](https://www.tensorflow.org/api_docs/python/tf/nn/max_pool)
#
# - **tf.nn.relu(Z1):** computes the elementwise ReLU of Z1 (which can be any shape). You can read the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/nn/relu)
#
# - **tf.contrib.layers.flatten(P)**: given an input P, this function flattens each example into a 1D vector it while maintaining the batch-size. It returns a flattened tensor with shape [batch_size, k]. You can read the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/flatten)
#
# - **tf.contrib.layers.fully_connected(F, num_outputs):** given a the flattened input F, it returns the output computed using a fully connected layer. You can read the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/fully_connected)
#
# In the last function above (`tf.contrib.layers.fully_connected`), the fully connected layer automatically initializes weights in the graph and keeps on training them as you train the model. Hence, you did not need to initialize those weights when initializing the parameters.
#
#
# **Exercise**:
#
# Implement the `forward_propagation` function below to build the following model: `CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED`. You should use the functions above.
#
# In detail, we will use the following parameters for all the steps:
# - Conv2D: stride 1, padding is "SAME"
# - ReLU
# - Max pool: Use an 8 by 8 filter size and an 8 by 8 stride, padding is "SAME"
# - Conv2D: stride 1, padding is "SAME"
# - ReLU
# - Max pool: Use a 4 by 4 filter size and a 4 by 4 stride, padding is "SAME"
# - Flatten the previous output.
# - FULLYCONNECTED (FC) layer: Apply a fully connected layer without an non-linear activation function. Do not call the softmax here. This will result in 6 neurons in the output layer, which then get passed later to a softmax. In TensorFlow, the softmax and cost function are lumped together into a single function, which you'll call in a different function when computing the cost.
# +
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "W2"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
W2 = parameters['W2']
### START CODE HERE ###
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(X,W1,strides=[1,1,1,1],padding="SAME")
# RELU
A1 = tf.nn.relu(Z1)
# MAXPOOL: window 8x8, sride 8, padding 'SAME'
P1 = tf.nn.max_pool(A1,ksize=[1,8,8,1],strides=[1,8,8,1],padding="SAME")
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(P1,W2,strides=[1,1,1,1],padding="SAME")
# RELU
A2 = tf.nn.relu(Z2)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P2 = tf.nn.max_pool(A2,ksize=[1,4,4,1],strides=[1,4,4,1],padding="SAME")
# FLATTEN
P2 = tf.contrib.layers.flatten(P2)
# FULLY-CONNECTED without non-linear activation function (not not call softmax).
# 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
Z3 = tf.contrib.layers.fully_connected(P2,num_outputs=6,activation_fn=None)
### END CODE HERE ###
return Z3
# +
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(Z3, {X: np.random.randn(2,64,64,3), Y: np.random.randn(2,6)})
print("Z3 = " + str(a))
# -
# **Expected Output**:
#
# <table>
# <td>
# Z3 =
# </td>
# <td>
# [[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064] <br>
# [-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]]
# </td>
# </table>
# ### 1.3 - Compute cost
#
# Implement the compute cost function below. You might find these two functions helpful:
#
# - **tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y):** computes the softmax entropy loss. This function both computes the softmax activation function as well as the resulting loss. You can check the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits)
# - **tf.reduce_mean:** computes the mean of elements across dimensions of a tensor. Use this to sum the losses over all the examples to get the overall cost. You can check the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/reduce_mean)
#
# ** Exercise**: Compute the cost below using the function above.
# +
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y))
### END CODE HERE ###
return cost
# +
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(cost, {X: np.random.randn(4,64,64,3), Y: np.random.randn(4,6)})
print("cost = " + str(a))
# -
# **Expected Output**:
#
# <table>
# <td>
# cost =
# </td>
#
# <td>
# 2.91034
# </td>
# </table>
# ## 1.4 Model
#
# Finally you will merge the helper functions you implemented above to build a model. You will train it on the SIGNS dataset.
#
# You have implemented `random_mini_batches()` in the Optimization programming assignment of course 2. Remember that this function returns a list of mini-batches.
#
# **Exercise**: Complete the function below.
#
# The model below should:
#
# - create placeholders
# - initialize parameters
# - forward propagate
# - compute the cost
# - create an optimizer
#
# Finally you will create a session and run a for loop for num_epochs, get the mini-batches, and then for each mini-batch you will optimize the function. [Hint for initializing the variables](https://www.tensorflow.org/api_docs/python/tf/global_variables_initializer)
# +
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009,
num_epochs = 100, minibatch_size = 64, print_cost = True):
"""
Implements a three-layer ConvNet in Tensorflow:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X_train -- training set, of shape (None, 64, 64, 3)
Y_train -- test set, of shape (None, n_y = 6)
X_test -- training set, of shape (None, 64, 64, 3)
Y_test -- test set, of shape (None, n_y = 6)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
train_accuracy -- real number, accuracy on the train set (X_train)
test_accuracy -- real number, testing accuracy on the test set (X_test)
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep results consistent (tensorflow seed)
seed = 3 # to keep results consistent (numpy seed)
(m, n_H0, n_W0, n_C0) = X_train.shape
n_y = Y_train.shape[1]
costs = [] # To keep track of the cost
# Create Placeholders of the correct shape
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables globally
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
### START CODE HERE ### (1 line)
_ , temp_cost = sess.run([optimizer,cost],feed_dict={X:minibatch_X,Y:minibatch_Y})
### END CODE HERE ###
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if print_cost == True and epoch % 1 == 0:
costs.append(minibatch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# Calculate the correct predictions
predict_op = tf.argmax(Z3, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(accuracy)
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
return train_accuracy, test_accuracy, parameters
# -
# Run the following cell to train your model for 100 epochs. Check if your cost after epoch 0 and 5 matches our output. If not, stop the cell and go back to your code!
_, _, parameters = model(X_train, Y_train, X_test, Y_test)
# **Expected output**: although it may not match perfectly, your expected output should be close to ours and your cost value should decrease.
#
# <table>
# <tr>
# <td>
# **Cost after epoch 0 =**
# </td>
#
# <td>
# 1.917929
# </td>
# </tr>
# <tr>
# <td>
# **Cost after epoch 5 =**
# </td>
#
# <td>
# 1.506757
# </td>
# </tr>
# <tr>
# <td>
# **Train Accuracy =**
# </td>
#
# <td>
# 0.940741
# </td>
# </tr>
#
# <tr>
# <td>
# **Test Accuracy =**
# </td>
#
# <td>
# 0.783333
# </td>
# </tr>
# </table>
# Congratulations! You have finised the assignment and built a model that recognizes SIGN language with almost 80% accuracy on the test set. If you wish, feel free to play around with this dataset further. You can actually improve its accuracy by spending more time tuning the hyperparameters, or using regularization (as this model clearly has a high variance).
#
# Once again, here's a thumbs up for your work!
fname = "images/thumbs_up.jpg"
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64))
plt.imshow(my_image)
| Convolutional Neural Networks/Week 1/Convolution+model+-+Application+-+v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: guitarsounds
# language: python
# name: guitarsounds
# ---
import os
os.chdir('/Users/Olivier/anaconda3/envs/guitarsounds')
# %load_ext autoreload
# %autoreload 2
from guitarsounds.analysis import SoundPack, Sound, Signal
from timbral_models import timbral_extractor
import numpy as np
import matplotlib.pyplot as plt
test = Sound('soundfiles/Wood_Guitar/Wood_A0_2.wav').condition(return_self=True)
test.signal.save_wav('temp')
file = 'temp.wav'
timbre = timbral_extractor(file, verbose=False)
# +
timbre = {key:timbre[key] for key in timbre if key not in ['rough', 'reverb', 'hardness']}
values = list(timbre.values())
categories = list(timbre.keys())
N = len(values)
values += values[:1]
angles = [n / float(N) * 2 * np.pi for n in range(N)]
angles += angles[:1]
# make a square figure
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(111, polar=True)
ax.set_yticks([])
ax.set_yticklabels([])
r = np.arange(0, 3.0, 0.01)
theta = 2*np.pi*r
ax.plot(angles, values, color='#ee8d18', lw=3)
ax.fill(angles, values, color='#ee8d18')
# set ticklabels location at 1.3 times the axes' radius
ax.set_xticks(angles[:-1])
ax.set_xticklabels(categories)
ax.xaxis.set_tick_params(pad=15)
plt.tight_layout()
plt.show()
# -
test1 = Sound('soundfiles/Wood_Guitar/Wood_D0_2.wav').condition(return_self=True)
test2 = Sound('soundfiles/flax_carbon/Carbon_D0_2.wav').condition(return_self=True)
test3 = Sound('soundfiles/Wood_Guitar/Wood_E1_1.wav').condition(return_self=True)
# +
fig, axs = plt.subplots(1,3, subplot_kw={'projection':'polar'})
axs = axs.reshape(-1)
for ax, sound in zip(axs, [test1, test2, test3]):
plt.sca(ax)
sound.signal.plot('timbre')
plt.tight_layout()
fig = plt.gcf()
display(fig)
# -
pack = SoundPack(test1, test2)
pack.compare_plot('timbre')
plt.show()
pack.plot('timbre')
plt.show()
test1.signal.plot('timbre')
plt.show()
| Notebooks/Dev/DEV_Timbre.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# +
# %matplotlib inline
from ast import literal_eval
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from typing import Tuple
import numpy as np
from sklearn import preprocessing
from sklearn.metrics import roc_auc_score
from scipy.stats import norm
import warnings
from lob_data_utils import lob, db_result
sns.set_style('whitegrid')
warnings.filterwarnings('ignore')
# +
r=0.01
s=0.02
K = 50
gdf_columns = ['gdf_' + str(i) for i in range(0, K)]
gdfs_r = r * np.ones(K)
gdfs_m = 0.1000 * np.hstack([np.arange(- K // 2, 0), np.arange(1, K // 2 + 1)])
gdfs_s = s * np.ones(K)
gdfs = np.vstack([gdfs_r, gdfs_m, gdfs_s]).T
def gdf_representation(buy_orders, sell_orders, gdf):
buy_price, buy_volume = buy_orders
sell_price, sell_volume = sell_orders
buy_gdf_y = gdf[0] * norm.pdf(buy_price, loc=gdf[1], scale=gdf[2])
sell_gdf_y = gdf[0] * norm.pdf(sell_price, loc=gdf[1], scale=gdf[2])
return np.clip(buy_volume, 0.0, buy_gdf_y).sum() + np.clip(sell_volume, 0.0, sell_gdf_y).sum()
# -
def transform_to_orders(df: pd.DataFrame) -> pd.DataFrame:
order_list = []
for idx, row in df.iterrows():
l_bid = [literal_eval(row.get('bid'))][0]
bid_dict = [{'price': p, 'volume': v} for p, v in l_bid]
d_bid = pd.DataFrame(bid_dict, columns=['price', 'volume'])
# d_bid = d_bid.sort_values(by='price', ascending=False) #.iloc[0:n]
# d_bid = d_bid.sort_values(by='price', ascending=True)
# d_bid.index = list(range(0, len(d_bid)))
# d_bid.reindex()
l_ask = [literal_eval(row.get('ask'))][0]
ask_dict = [{'price': p, 'volume': v} for p, v in l_ask]
d_ask = pd.DataFrame(ask_dict, columns=['price', 'volume'])
# d_ask = d_ask.sort_values(by='price', ascending=True)# .iloc[0:n]
mid_price = row['mid_price']
d_bid['volume_norm'] = d_bid['volume'] * d_bid['price']
d_ask['volume_norm'] = d_ask['volume'] * d_ask['price']
total_capital = d_bid['volume_norm'].sum() + d_ask['volume_norm'].sum()
d_bid['volume_norm'] /= total_capital
d_ask['volume_norm'] /= total_capital
d_bid['price_norm'] = 1000 * (d_bid['price'] - mid_price) / mid_price
d_ask['price_norm'] = 1000 * (d_ask['price'] - mid_price) / mid_price
new_row_dict = {}
for i in range(0, K):
p = K/(len(d_bid) + len(d_ask))
idx = int(np.floor(p*len(d_bid) - K//2 + i))
gdf_repr = gdf_representation((d_bid['price_norm'], d_bid['volume_norm']),
(d_ask['price_norm'], d_ask['volume_norm']),
gdfs[i, :])
new_row_dict['gdf_' + str(i)] = gdf_repr
new_row_dict['ask_vol' + str(i)] = d_ask.iloc[idx]['volume_norm']
new_row_dict['ask_p' + str(i)] = d_ask.iloc[idx]['price_norm']
new_row_dict['bid_vol' + str(i)] = d_bid.iloc[idx]['volume_norm']
new_row_dict['bid_p' + str(i)] = d_bid.iloc[idx]['price_norm']
new_row_dict['mid_price'] = row.get('mid_price')
new_row_dict['mid_price_indicator'] = row.get('mid_price_indicator')
order_list.append(new_row_dict)
order_df = pd.DataFrame(order_list)
return order_df
# +
data_length = 5050
dfs = {}
dfs_test = {}
dfs_cv = {}
stocks = ['9061', '9062', '9063', '9064', '9065']
for s in stocks:
d, d_cv, d_test = lob.load_prepared_data(s, data_dir='data/', cv=True, length=data_length)
dfs[s] = transform_to_orders(d)
dfs_cv[s] = transform_to_orders(d_cv)
# -
dfs['9061'][['gdf_1']].plot()
sns.heatmap(dfs['9061'][['mid_price'] + gdf_columns].corr())
def svm_classification(df):
clf = SVC(probability=True, C=10000)
X = df[gdf_columns]
print(X.shape)
y = df['mid_price_indicator'].values.reshape(-1, 1)
y[0] = 0
print(y.shape)
clf.fit(X, y)
return clf
clfs = {}
for s in stocks:
print('**************************************', s)
try:
clf = svm_classification(dfs[s])
clfs[s] = clf
predictions = clf.predict(dfs[s][gdf_columns])
print(s, roc_auc_score(predictions, dfs[s]['mid_price_indicator']))
except Exception as e:
print(e)
for s in stocks:
print('****************************************', s)
predictions_cv = clfs[s].predict(dfs_cv[s].loc[:, gdf_columns])
try:
print(s, roc_auc_score(predictions_cv, dfs_cv[s]['mid_price_indicator']))
except Exception as e:
print(s, e)
for s in stocks:
d, d_cv, d_test = lob.load_prepared_data(s, data_dir='data/', cv=True, length=data_length)
dfs_test[s] = transform_to_orders(d_test)
for s in stocks:
predictions_test = clfs[s].predict(dfs_test[s].loc[:, gdf_columns])
try:
print(s, roc_auc_score(predictions_test, dfs_test[s]['mid_price_indicator']))
except Exception as e:
print(s, e)
# # Blabla
s='9061'
d, d_cv, d_test = lob.load_prepared_data(s, data_dir='data/', cv=True, length=data_length)
d.head()
i = 0
print(len(d.iloc[i]['bid']))
print(len(d.iloc[i]['ask']))
print(d.iloc[i]['mid_price'])
# +
bid = [{'price': b[0], 'volume': b[1]} for b in literal_eval(d.iloc[i]['bid'])]
df_bid = pd.DataFrame(bid)
df_bid = df_bid.sort_values(by='price', ascending=False)#.iloc[0:20]
df_bid = df_bid.sort_values(by='price', ascending=True)
df_bid.index = list(range(0, len(df_bid)))
df_bid.reindex()
df_bid.head()
# -
ask = [{'price': b[0], 'volume': b[1]} for b in literal_eval(d.iloc[i]['ask'])]
df_ask = pd.DataFrame(ask)
df_ask = df_ask.sort_values(by='price', ascending=True)#.iloc[0:20]
df_ask.head()
# +
mid_price = d.iloc[i]['mid_price']
df_bid['volume_norm'] = df_bid['volume'] * df_bid['price']
df_ask['volume_norm'] = df_ask['volume'] * df_ask['price']
total_capital = df_bid['volume_norm'].sum() + df_ask['volume_norm'].sum()
df_bid['volume_norm'] /= total_capital
df_ask['volume_norm'] /= total_capital
df_bid['price_norm'] = 1000 * (df_bid['price'] - mid_price) / mid_price
df_ask['price_norm'] = 1000 * (df_ask['price'] - mid_price) / mid_price
# -
df_bid[['price_norm', 'volume_norm']]
gdf_representation((df_bid['price_norm'], df_bid['volume_norm']),
(df_ask['price_norm'], df_ask['volume_norm']), gdfs[0, :])
# +
import lobs_plots
% matplotlib inline
import plotly.offline as py
import plotly.figure_factory as ff
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
from ipywidgets import interact, interactive, fixed, interact_manual, widgets
gdf_features = [gdf_representation((df_bid['price_norm'], df_bid['volume_norm']),
(df_ask['price_norm'], df_ask['volume_norm']), gdfs[j, :]) for j in range(0, K)]
lobs_plots.plot_gdf_features(gdf_features)
# +
buy_orders = np.array(list(zip(df_bid['price_norm'].values, df_bid['volume_norm'].values)))
sell_orders = np.array(list(zip(df_ask['price_norm'].values, df_ask['volume_norm'].values)))
print(len(buy_orders), len(sell_orders))
p = K/(len(buy_orders) + len(sell_orders))
ss = len(buy_orders) / (len(buy_orders) + len(sell_orders))
print(p*len(buy_orders))
lobs_plots.plot_lob_and_gdf(buy_orders, sell_orders, gdfs[13, :], K)
# -
| gaussian_filter/feature_15_gdf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://github.com/sdasadia/Oil-Price-Prediction/blob/master/Oil%20Price%20Forecast.ipynb
#
# https://github.com/sergeyivanov01/PHBS_MLF_2018/blob/master/BP2018.py
# +
# %matplotlib inline
import matplotlib
import seaborn as sns
import quandl
import math
import numpy as np
import scipy as sp
import pandas as pd
import sklearn.linear_model
import sklearn.metrics
import statsmodels.api as sm
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
import seaborn as sns
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
plt.rcParams['figure.figsize'] = (20, 15)
plt.style.use('seaborn-darkgrid')
# -
# %store -r data_VNQ
X = data_VNQ.values
size = int(len(X) * 0.6)
# +
# def test_stationarity(timeseries):
# #Determing rolling statistics
# rolmean = ts_log.rolling(1).mean()
# rolstd = ts_log.rolling(1).std()
# #Plot rolling statistics:
# orig = plt.plot(ts, color='blue',label='Original')
# mean = plt.plot(rolmean, color='red', label='Rolling Mean')
# std = plt.plot(rolstd, color='black', label = 'Rolling Std')
# plt.legend(loc='best')
# plt.title('Rolling Mean & Standard Deviation')
# plt.show(block=False)
# #Perform Dickey-Fuller test:
# print ('Results of Dickey-Fuller Test:')
# dftest = adfuller(timeseries.iloc[:,0].values, autolag='AIC' )
# dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used'])
# for key,value in dftest[4].items():
# dfoutput['Critical Value (%s)'%key] = value
# print (dfoutput)
# +
#print(data)
# +
#ts = data
# -
train, test = X[0:size], X[size:len(X)]
# +
history = [x for x in train]
predictions = list()
for t in range(len(test)):
model = ARIMA(history, order=(2,1,1))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat)
obs = test[t]
history.append(obs)
print('predicted=%f, expected=%f' % (yhat, obs))
# -
error_VNQ = math.sqrt(mean_squared_error(test, predictions))
# %store error_VNQ
print('Test RMSE: %.3f' % error_VNQ)
# plot
fig= plt.figure(figsize=(15,10))
plt.plot(test, label = 'VNQ Actual')
plt.plot(predictions, color='orange', label = 'VNQ Prediction')
plt.xlabel('Weeks')
plt.ylabel('VNQ Price')
plt.title('ARIMA Prediction')
plt.legend()
plt.show()
ARIMA_pred_VNQ = pd.DataFrame(predictions)
ARIMA_pred_VNQ.columns = ['ARIMA_pred_VNQ']
# %store ARIMA_pred_VNQ
| ARIMA_pred/6_ARIMA_10VNQ.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Importing the libraries
# +
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.factorization import KMeans
# -
# ## Ignore all GPUs
# tf k-means does not benefit from it.
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# ## Loading the MNIST images data
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
full_data_x = mnist.train.images
print('Data has %d datapoints' % len(full_data_x))
# ## Defining the hyperparameters
# Parameters
num_steps = 100 # Total steps to train
k = 50 # The number of clusters
num_classes = 10 # The 10 digits
num_features = full_data_x.shape[1] # Each image is 28x28 pixels
# ## Defining the placeholders for input data
# Input images
X = tf.placeholder(tf.float32, shape=[None, num_features])
# Labels (for assigning a label to a centroid and testing)
Y = tf.placeholder(tf.float32, shape=[None, num_classes])
# ## Initializing the KMeans algorithm and its scope
# K-Means Parameters
kmeans = KMeans(inputs=X, num_clusters=k, distance_metric='cosine')
# Build KMeans graph
with tf.name_scope('KMeans') as scope:
training_graph = kmeans.training_graph()
# ## Defining the average distance operation and its summary
# +
(all_scores, cluster_idx, scores, cluster_centers_initialized, init_op, train_op) = training_graph
cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple
avg_distance = tf.reduce_mean(scores)
distance_h = tf.summary.scalar('average_distance', avg_distance)
# Initialize the variables (i.e. assign their default value)
init_vars = tf.global_variables_initializer()
merged_summary_op = tf.summary.merge_all()
# -
# ## Starting a TF session and initalizing summary data
# +
# Start TensorFlow session
session = tf.Session()
# Run the initializer
session.run(init_vars, feed_dict={X: full_data_x})
session.run(init_op, feed_dict={X: full_data_x})
summary_writer = tf.summary.FileWriter('logs', graph=session.graph)
# -
# ## Training the KMeans on input data and collecting the summary
# Training
for i in range(num_steps):
_, d, idx = session.run([train_op, avg_distance, cluster_idx], feed_dict={X: full_data_x})
summary_info = session.run(merged_summary_op, feed_dict={X: full_data_x})
summary_writer.add_summary(summary_info, i)
if i % 10 == 0:
print("Step %i, Avg Distance: %f" % (i+1, d))
# ## Assigning labels to clusters and definding accuracy operation
# Assign a label to each centroid
# Count total number of labels per centroid, using the label of each training
# sample to their closest centroid (given by 'idx')
counts = np.zeros(shape=(k, num_classes))
for i in range(len(idx)):
counts[idx[i]] += mnist.train.labels[i]
# Assign the most frequent label to the centroid
labels_map = [np.argmax(c) for c in counts]
labels_map = tf.convert_to_tensor(labels_map)
# Evaluation ops
# Lookup: centroid_id -> label
cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx)
# Compute accuracy
correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1), tf.int32))
accuracy_op = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# ## Using the accuracy operation to check accuracy of model on test data
# Test Model
test_x, test_y = mnist.test.images, mnist.test.labels
print("Test Accuracy:", session.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
# ## Tensor flows
# ![graph.png](graph.png)
| HW3/Question4/q4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from scipy.stats import norm
# ### Solution 1
# +
# initialize
n = 9
X_bar = 36
var = 3**2
# a)
alpha = 0.05
z = norm.ppf(1 - alpha/2)
se = z*np.sqrt(var/n)
print(f"{(1-alpha)*100}% Confidence interval: ({X_bar-se:.2f}, {X_bar+se:.2f})")
# a)
alpha = 0.01
z = norm.ppf(1 - alpha/2)
se = z*np.sqrt(var/n)
print(f"{(1-alpha)*100}% Confidence interval: ({X_bar-se:.2f}, {X_bar+se:.2f})")
| Solutions/Chapter3/3.6. Confidence interval.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # RQ 1 (Frequency)
#
# > How often does unsafe code appear explicitly in Rust crates?
# ## Setup
# !pip install plotly
# +
# import libraries
import itertools
import functools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import tree
import utils
import top_crates
from whitelists import *
from drawing import *
# Configure pyplot: enlarge plots
plt.rcParams['figure.figsize'] = [15, 8]
# Configure pandas: show all columns when printing a dataframe
pd.set_option('display.max_columns', None)
# Configure pandas: show more rows when printing a dataframe
pd.set_option('display.min_rows', 20)
# -
# Load tables
selected_builds = utils.load_selected_builds()
unsafe_blocks = utils.load_unsafe_blocks()
traits = utils.load_selected_traits()
traits_impls = utils.load_selected_impl_definitions()
function_definitions = utils.load_selected_function_definitions()
# Validation
assert len(selected_builds) > 20000
assert selected_builds.build.is_unique
# +
# Preprocessing
crates = pd.DataFrame({
"crate": selected_builds.crate,
"crate_hash": selected_builds.crate_hash,
"package": selected_builds.package.map(lambda x: str(x).lower().replace("-", "_")),
})
user_written_unsafe_blocks = unsafe_blocks[unsafe_blocks.check_mode == "UnsafeBlockUserProvided"]
unsafe_function_definitions = function_definitions[function_definitions.unsafety == "Unsafe"]
unsafe_traits = traits[traits.unsafety == "Unsafe"]
unsafe_traits_impls = traits_impls[traits_impls.unsafety == "Unsafe"]
# Validation
assert len(user_written_unsafe_blocks) > 0
# -
# ## Query 1
#
# > Table 1 shows in both absolute and relative numbers how many crates contain unsafe code, and which unsafe features they use (our first query)
#
# #### Results from the paper for comparison (Table 1)
#
# | Unsafe Feature| #crates | % |
# | ------------- |--------------|-------|
# | None | 24,360 | 76.4 |
# | Any | 7,507 | 23.6 |
# | Blocks | 6,414 | 20.1 |
# | Function Declarations| 4,287 | 13.5 |
# | Trait Implementations| 1,591 | 5.0 |
# | Trait Declarations| 280 | 0.9 |
# +
join_key = "crate_hash"
crates["has_unsafe_blocks"] = crates[join_key].isin(set(user_written_unsafe_blocks[join_key].unique()))
crates["has_unsafe_functions"] = crates[join_key].isin(set(unsafe_function_definitions[join_key].unique()))
crates["has_unsafe_traits"] = crates[join_key].isin(set(unsafe_traits[join_key].unique()))
crates["has_unsafe_trait_impls"] = crates[join_key].isin(set(unsafe_traits_impls[join_key].unique()))
feature_names = [
"has_unsafe_blocks",
"has_unsafe_functions",
"has_unsafe_traits",
"has_unsafe_trait_impls",
]
crates["has_unsafe"] = functools.reduce(lambda x, y: x | y, [crates[f] for f in feature_names])
feature_names.append("has_unsafe")
crates["has_no_unsafe"] = ~crates["has_unsafe"]
feature_names.append("has_no_unsafe")
print("Number of crates: {}".format(len(crates)))
feature_freq = crates[feature_names].apply(sum).sort_values(ascending=False)
feature_freq.plot.bar(subplots=True, color="blue", title="Features per crate")
print(feature_freq)
print(feature_freq / len(crates) * 100)
# -
# ## Query 2
#
# > while Fig. 2 shows the relative amount of statements in unsafe blocks and functions in both all crates and, for readability, crates that contain at least one unsafe statement (our second query).
#
# #### Results from the paper for comparison (Fig. 2)
#
# ![Fig. 2](pics/unsafe_proportion.png)
#
# Load tables
selected_function_sizes = utils.load_selected_function_sizes()
selected_build_sizes = utils.load_selected_build_sizes()
assert len(selected_build_sizes.query('statement_count == 0')) == 0
crates_with_unsafe_statements_count = len(selected_build_sizes.query('user_unsafe_statement_count > 0'))
print("The number of crates with at least one user written unsafe MIR statement:",
crates_with_unsafe_statements_count)
print("Percentage:", 100 * crates_with_unsafe_statements_count / len(crates))
selected_build_sizes["ratio"] = selected_build_sizes.apply(
lambda row: 100 * row['user_unsafe_statement_count']/row['statement_count'], axis=1)
build_sizes_with_unsafe = selected_build_sizes.query('user_unsafe_statement_count > 0')
selected_builds = utils.load_selected_builds()
unsafe_selected_build_hashes = set(selected_build_sizes['build_crate_hash'])
selected_build_hashes = set(selected_builds['crate_hash'])
assert unsafe_selected_build_hashes.issubset(selected_build_hashes)
safe_selected_build_hashes = selected_build_hashes - unsafe_selected_build_hashes
safe_selected_build_sizes = []
for (_, build) in selected_builds.iterrows():
if build['crate_hash'] in unsafe_selected_build_hashes:
continue
safe_selected_build_sizes.append({
'package_name': build['package'],
'package_version': build['version'],
'crate_name': build['crate'],
'build_crate_hash': build['crate_hash'],
'edition': build['edition'],
'statement_count': 1,
'unsafe_statement_count': 0,
'user_unsafe_statement_count': 0,
'ratio': 0.0,
})
all_selected_build_sizes = selected_build_sizes.append(safe_selected_build_sizes)
assert len(all_selected_build_sizes) == len(selected_build_sizes) + len(safe_selected_build_sizes)
assert len(selected_builds) == len(all_selected_build_sizes)
print('Proportion of all crates for which unsafe statement ratio is <= 10%:',
100 * len(all_selected_build_sizes.query('ratio <= 10'))/len(all_selected_build_sizes))
print('Proportion of crates with at least one unsafe statement for which unsafe statement ratio is <= 20%:',
100 * len(build_sizes_with_unsafe.query('ratio <= 20'))/len(build_sizes_with_unsafe))
ax = all_selected_build_sizes.ratio.plot.hist(
cumulative=True,
density=1,
bins=len(build_sizes_with_unsafe),
xlim=(0, 100),
ylim=(0, 1),
histtype="step",
linewidth=4,
fontsize=26,
)
ax2 = build_sizes_with_unsafe.ratio.plot.hist(
cumulative=True,
density=1,
bins=len(build_sizes_with_unsafe),
xlim=(0, 100),
ylim=(0, 1),
histtype="step",
linewidth=4,
fontsize=26,
)
ax.title.set_size(20)
ax.set_xlabel("Proportion of unsafe statements in a crate", fontsize=32)
ax.set_ylabel("Percentage of crates", fontsize=32)
ax.set_axisbelow(True)
ax.set_yticklabels(['{:,.0%}'.format(x) for x in ax.get_yticks()])
ax.set_xticklabels(['{:,.0%}'.format(x/100) for x in ax.get_xticks()])
ax.set_xticks(range(0, 100, 10), minor=True)
ax.set_xticks(range(0, 100, 20))
ax.grid(True, linestyle='dotted')
plt.show()
| reports/RQ1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Непараметрические криетрии
# Критерий | Одновыборочный | Двухвыборочный | Двухвыборочный (связанные выборки)
# ------------- | -------------|
# **Знаков** | $\times$ | | $\times$
# **Ранговый** | $\times$ | $\times$ | $\times$
# **Перестановочный** | $\times$ | $\times$ | $\times$
# ## Mirrors as potential environmental enrichment for individually housed laboratory mice
# (Sherwin, 2004): 16 лабораторных мышей были помещены в двухкомнатные клетки, в одной из комнат висело зеркало. С целью установить, есть ли у мышей какие-то предпочтения насчет зеркал, измерялась доля времени, которое каждая мышь проводила в каждой из своих двух клеток.
# +
import numpy as np
import pandas as pd
import itertools
from scipy import stats
from statsmodels.stats.descriptivestats import sign_test
from statsmodels.stats.weightstats import zconfint
# -
# %pylab inline
# ### Загрузка данных
mouses_data = pd.read_csv('mirror_mouses.txt', header = None)
mouses_data.columns = ['proportion_of_time']
mouses_data
mouses_data.describe()
pylab.hist(mouses_data.proportion_of_time)
pylab.show()
# ## Одновыборочные критерии
print '95%% confidence interval for the median time: [%f, %f]' % zconfint(mouses_data)
# ### Критерий знаков
# $H_0\colon$ медиана доли времени, проведенного в клетке с зеркалом, равна 0.5
#
# $H_1\colon$ медиана доли времени, проведенного в клетке с зеркалом, не равна 0.5
print "M: %d, p-value: %f" % sign_test(mouses_data, 0.5)
# ### Критерий знаковых рангов Вилкоксона
m0 = 0.5
stats.wilcoxon(mouses_data.proportion_of_time - m0)
# ### Перестановочный критерий
# $H_0\colon$ среднее равно 0.5
#
# $H_1\colon$ среднее не равно 0.5
def permutation_t_stat_1sample(sample, mean):
t_stat = sum(map(lambda x: x - mean, sample))
return t_stat
permutation_t_stat_1sample(mouses_data.proportion_of_time, 0.5)
def permutation_zero_distr_1sample(sample, mean, max_permutations = None):
centered_sample = map(lambda x: x - mean, sample)
if max_permutations:
signs_array = set([tuple(x) for x in 2 * np.random.randint(2, size = (max_permutations,
len(sample))) - 1 ])
else:
signs_array = itertools.product([-1, 1], repeat = len(sample))
distr = [sum(centered_sample * np.array(signs)) for signs in signs_array]
return distr
pylab.hist(permutation_zero_distr_1sample(mouses_data.proportion_of_time, 0.5), bins = 15)
pylab.show()
def permutation_test(sample, mean, max_permutations = None, alternative = 'two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
t_stat = permutation_t_stat_1sample(sample, mean)
zero_distr = permutation_zero_distr_1sample(sample, mean, max_permutations)
if alternative == 'two-sided':
return sum([1. if abs(x) >= abs(t_stat) else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'less':
return sum([1. if x <= t_stat else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'greater':
return sum([1. if x >= t_stat else 0. for x in zero_distr]) / len(zero_distr)
print "p-value: %f" % permutation_test(mouses_data.proportion_of_time, 0.5)
print "p-value: %f" % permutation_test(mouses_data.proportion_of_time, 0.5, 10000)
| statistics/Одновыборочные непараметрические критерии stat.non_parametric_tests_1sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # The Multivariate Gaussian distribution
#
# The density of a multivariate Gaussian with mean vector $\mu$ and covariance matrix $\Sigma$ is given as
#
# \begin{align}
# \mathcal{N}(x; \mu, \Sigma) &= |2\pi \Sigma|^{-1/2} \exp\left( -\frac{1}{2} (x-\mu)^\top \Sigma^{-1} (x-\mu) \right) \\
# & = \exp\left(-\frac{1}{2} x^\top \Sigma^{-1} x + \mu^\top \Sigma^{-1} x - \frac{1}{2} \mu^\top \Sigma^{-1} \mu -\frac{1}{2}\log \det(2\pi \Sigma) \right) \\
# \end{align}
#
# Here, $|X|$ denotes the determinant of a square matrix.
#
# $\newcommand{\trace}{\mathop{Tr}}$
#
# \begin{align}
# {\cal N}(s; \mu, P) & = |2\pi P|^{-1/2} \exp\left(-\frac{1}2 (s-\mu)^\top P^{-1} (s-\mu) \right)
# \\
# & = \exp\left(
# -\frac{1}{2}s^\top{P^{-1}}s + \mu^\top P^{-1}s { -\frac{1}{2}\mu^\top{P^{-1}\mu -\frac12|2\pi P|}}
# \right) \\
# \log {\cal N}(s; \mu, P) & = -\frac{1}{2}s^\top{P^{-1}}s + \mu^\top P^{-1}s + \text{ const} \\
# & = -\frac{1}{2}\trace {P^{-1}} s s^\top + \mu^\top P^{-1}s + \text{ const} \\
# \end{align}
#
# ## Special Cases
#
# To gain the intuition, we take a look to a few special cases
# ### Bivariate Gaussian
#
# #### Example 1: Identity covariance matrix
#
# $
# x = \left(\begin{array}{c} x_1 \\ x_2 \end{array} \right)
# $
#
# $
# \mu = \left(\begin{array}{c} 0 \\ 0 \end{array} \right)
# $
#
# $
# \Sigma = \left(\begin{array}{cc} 1& 0 \\ 0 & 1 \end{array} \right) = I_2
# $
#
# \begin{align}
# \mathcal{N}(x; \mu, \Sigma) &= |2\pi I_{2}|^{-1/2} \exp\left( -\frac{1}{2} x^\top x \right)
# = (2\pi)^{-1} \exp\left( -\frac{1}{2} \left( x_1^2 + x_2^2\right) \right) = (2\pi)^{-1/2} \exp\left( -\frac{1}{2} x_1^2 \right)(2\pi)^{-1/2} \exp\left( -\frac{1}{2} x_2^2 \right)\\
# & = \mathcal{N}(x; 0, 1) \mathcal{N}(x; 0, 1)
# \end{align}
#
# #### Example 2: Diagonal covariance
# $\newcommand{\diag}{\text{diag}}$
#
# $
# x = \left(\begin{array}{c} x_1 \\ x_2 \end{array} \right)
# $
#
# $
# \mu = \left(\begin{array}{c} \mu_1 \\ \mu_2 \end{array} \right)
# $
#
# $
# \Sigma = \left(\begin{array}{cc} s_1 & 0 \\ 0 & s_2 \end{array} \right) = \diag(s_1, s_2)
# $
#
# \begin{eqnarray}
# \mathcal{N}(x; \mu, \Sigma) &=& \left|2\pi \left(\begin{array}{cc} s_1 & 0 \\ 0 & s_2 \end{array} \right)\right|^{-1/2} \exp\left( -\frac{1}{2} \left(\begin{array}{c} x_1 - \mu_1 \\ x_2-\mu_2 \end{array} \right)^\top \left(\begin{array}{cc} 1/s_1 & 0 \\ 0 & 1/s_2 \end{array} \right) \left(\begin{array}{c} x_1 - \mu_1 \\ x_2-\mu_2 \end{array} \right) \right) \\
# &=& ((2\pi)^2 s_1 s_2 )^{-1/2} \exp\left( -\frac{1}{2} \left( \frac{(x_1-\mu_1)^2}{s_1} + \frac{(x_2-\mu_2)^2}{s_2}\right) \right) \\
# & = &\mathcal{N}(x; \mu_1, s_1) \mathcal{N}(x; \mu_2, s_2)
# \end{eqnarray}
#
# #### Example 3:
# $
# x = \left(\begin{array}{c} x_1 \\ x_2 \end{array} \right)
# $
#
# $
# \mu = \left(\begin{array}{c} \mu_1 \\ \mu_2 \end{array} \right)
# $
#
# $
# \Sigma = \left(\begin{array}{cc} 1 & \rho \\ \rho & 1 \end{array} \right)
# $
# for $1<\rho<-1$.
#
# Need $K = \Sigma^{-1}$. When $|\Sigma| \neq 0$ we have $K\Sigma^{-1} = I$.
#
# $
# \left(\begin{array}{cc} 1 & \rho \\ \rho & 1 \end{array} \right) \left(\begin{array}{cc} k_{11} & k_{12} \\ k_{21} & k_{22} \end{array} \right) = \left(\begin{array}{cc} 1& 0 \\ 0 & 1 \end{array} \right)
# $
# \begin{align}
# k_{11} &+ \rho k_{21} & & &=1 \\
# \rho k_{11} &+ k_{21} & & &=0 \\
# && k_{12} &+ \rho k_{22} &=0 \\
# && \rho k_{12} &+ k_{22} &=1 \\
# \end{align}
# Solving these equations leads to the solution
#
# $$
# \left(\begin{array}{cc} k_{11} & k_{12} \\ k_{21} & k_{22} \end{array} \right) = \frac{1}{1-\rho^2}\left(\begin{array}{cc} 1 & -\rho \\ -\rho & 1 \end{array} \right)
# $$
# Plotting the Equal probability contours
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from notes_utilities import pnorm_ball_points
RHO = np.arange(-0.9,1,0.3)
plt.figure(figsize=(20,20/len(RHO)))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
for i,rho in enumerate(RHO):
plt.subplot(1,len(RHO),i+1)
plt.axis('equal')
ax = plt.gca()
ax.set_xlim(-4,4)
ax.set_ylim(-4,4)
S = np.mat([[1, rho],[rho,1]])
A = np.linalg.cholesky(S)
dx,dy = pnorm_ball_points(3*A)
plt.title(r'$\rho =$ '+str(rho if np.abs(rho)>1E-9 else 0), fontsize=16)
ln = plt.Line2D(dx,dy,markeredgecolor='k', linewidth=1, color='b')
ax.add_line(ln)
ax.set_axis_off()
#ax.set_visible(False)
plt.show()
# +
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
from IPython.display import clear_output, display, HTML
from matplotlib import rc
from notes_utilities import bmatrix, pnorm_ball_line
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
fig = plt.figure(figsize=(5,5))
S = np.array([[1,0],[0,1]])
dx,dy = pnorm_ball_points(S)
ln = plt.Line2D(dx,dy,markeredgecolor='k', linewidth=1, color='b')
dx,dy = pnorm_ball_points(np.eye(2))
ln2 = plt.Line2D(dx,dy,markeredgecolor='k', linewidth=1, color='k',linestyle=':')
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
ax = fig.gca()
ax.set_xlim((-4,4))
ax.set_ylim((-4,4))
txt = ax.text(-1,-3,'$\left(\right)$',fontsize=15)
ax.add_line(ln)
ax.add_line(ln2)
plt.close(fig)
def set_line(s_1, s_2, rho, p, a, q):
S = np.array([[s_1**2, rho*s_1*s_2],[rho*s_1*s_2, s_2**2]])
A = np.linalg.cholesky(S)
#S = A.dot(A.T)
dx,dy = pnorm_ball_points(A,p=p)
ln.set_xdata(dx)
ln.set_ydata(dy)
dx,dy = pnorm_ball_points(a*np.eye(2),p=q)
ln2.set_xdata(dx)
ln2.set_ydata(dy)
txt.set_text(bmatrix(S))
display(fig)
ax.set_axis_off()
interact(set_line, s_1=(0.1,2,0.01), s_2=(0.1, 2, 0.01), rho=(-0.99, 0.99, 0.01), p=(0.1,4,0.1), a=(0.2,10,0.1), q=(0.1,4,0.1))
# -
# %run plot_normballs.py
# %run matrix_norm_sliders.py
# Exercise:
#
# $
# x = \left(\begin{array}{c} x_1 \\ x_2 \end{array} \right)
# $
#
# $
# \mu = \left(\begin{array}{c} \mu_1 \\ \mu_2 \end{array} \right)
# $
#
# $
# \Sigma = \left(\begin{array}{cc} s_{11} & s_{12} \\ s_{12} & s_{22} \end{array} \right)
# $
#
#
# Need $K = \Sigma^{-1}$. When $|\Sigma| \neq 0$ we have $K\Sigma^{-1} = I$.
#
# $
# \left(\begin{array}{cc} s_{11} & s_{12} \\ s_{12} & s_{22} \end{array} \right) \left(\begin{array}{cc} k_{11} & k_{12} \\ k_{21} & k_{22} \end{array} \right) = \left(\begin{array}{cc} 1& 0 \\ 0 & 1 \end{array} \right)
# $
#
# Derive the result
# $$
# K = \left(\begin{array}{cc} k_{11} & k_{12} \\ k_{21} & k_{22} \end{array} \right)
# $$
#
# Step 1: Verify
#
# $$
# \left(\begin{array}{cc} s_{11} & s_{12} \\ s_{21} & s_{22} \end{array} \right) = \left(\begin{array}{cc} 1 & -s_{12}/s_{22} \\ 0 & 1 \end{array} \right) \left(\begin{array}{cc} s_{11}-s_{12}^2/s_{22} & 0 \\ 0 & s_{22} \end{array} \right) \left(\begin{array}{cc} 1 & 0 \\ -s_{12}/s_{22} & 1 \end{array} \right)
# $$
#
# Step 2: Show that
# $$
# \left(\begin{array}{cc} 1 & a\\ 0 & 1 \end{array} \right)^{-1} = \left(\begin{array}{cc} 1 & -a\\ 0 & 1 \end{array} \right)
# $$
# and
# $$
# \left(\begin{array}{cc} 1 & 0\\ b & 1 \end{array} \right)^{-1} = \left(\begin{array}{cc} 1 & 0\\ -b & 1 \end{array} \right)
# $$
#
# Step 3: Using the fact $(A B)^{-1} = B^{-1} A^{-1}$ and $s_{12}=s_{21}$, show that and simplify
# $$
# \left(\begin{array}{cc} s_{11} & s_{12} \\ s_{21} & s_{22} \end{array} \right)^{-1} =
# \left(\begin{array}{cc} 1 & 0 \\ s_{12}/s_{22} & 1 \end{array} \right)
# \left(\begin{array}{cc} 1/(s_{11}-s_{12}^2/s_{22}) & 0 \\ 0 & 1/s_{22} \end{array} \right) \left(\begin{array}{cc} 1 & s_{12}/s_{22} \\ 0 & 1 \end{array} \right)
# $$
#
#
# ## Gaussian Processes Regression
#
#
# In Bayesian machine learning, a frequent problem encountered is the regression problem where we are given a pairs of inputs $x_i \in \mathbb{R}^N$ and associated noisy observations $y_i \in \mathbb{R}$. We assume the following model
#
# \begin{eqnarray*}
# y_i &\sim& {\cal N}(y_i; f(x_i), R)
# \end{eqnarray*}
#
# The interesting thing about a Gaussian process is that the function $f$ is not specified in close form, but we assume that the function values
# \begin{eqnarray*}
# f_i & = & f(x_i)
# \end{eqnarray*}
# are jointly Gaussian distributed as
# \begin{eqnarray*}
# \left(
# \begin{array}{c}
# f_1 \\
# \vdots \\
# f_L \\
# \end{array}
# \right) & = & f_{1:L} \sim {\cal N}(f_{1:L}; 0, \Sigma(x_{1:L}))
# \end{eqnarray*}
# Here, we define the entries of the covariance matrix $\Sigma(x_{1:L})$ as
# \begin{eqnarray*}
# \Sigma_{i,j} & = & K(x_i, x_j)
# \end{eqnarray*}
# for $i,j \in \{1, \dots, L\}$. Here, $K$ is a given covariance function. Now, if we wish to predict the value of $f$ for a new $x$, we simply form the following joint distribution:
# \begin{eqnarray*}
# \left(
# \begin{array}{c}
# f_1 \\
# f_2 \\
# \vdots \\
# f_L \\
# f \\
# \end{array}
# \right) & \sim & {\cal N}\left( \left(\begin{array}{c}
# 0 \\
# 0 \\
# \vdots \\
# 0 \\
# 0 \\
# \end{array}\right)
# , \left(\begin{array}{cccccc}
# K(x_1,x_1) & K(x_1,x_2) & \dots & K(x_1, x_L) & K(x_1, x) \\
# K(x_2,x_1) & K(x_2,x_2) & \dots & K(x_2, x_L) & K(x_2, x) \\
# \vdots &\\
# K(x_L,x_1) & K(x_L,x_2) & \dots & K(x_L, x_L) & K(x_L, x) \\
# K(x,x_1) & K(x,x_2) & \dots & K(x, x_L) & K(x, x) \\
# \end{array}\right) \right) \\
# \left(
# \begin{array}{c}
# f_{1:L} \\
# f
# \end{array}
# \right) & \sim & {\cal N}\left( \left(\begin{array}{c}
# \mathbf{0} \\
# 0 \\
# \end{array}\right)
# , \left(\begin{array}{cc}
# \Sigma(x_{1:L}) & k(x_{1:L}, x) \\
# k(x_{1:L}, x)^\top & K(x, x) \\
# \end{array}\right) \right) \\
# \end{eqnarray*}
#
# Here, $k(x_{1:L}, x)$ is a $L \times 1$ vector with entries $k_i$ where
#
# \begin{eqnarray*}
# k_i = K(x_i, x)
# \end{eqnarray*}
#
# Popular choices of covariance functions to generate smooth regression functions include a Bell shaped one
# \begin{eqnarray*}
# K_1(x_i, x_j) & = & \exp\left(-\frac{1}2 \| x_i - x_j \|^2 \right)
# \end{eqnarray*}
# and a Laplacian
# \begin{eqnarray*}
# K_2(x_i, x_j) & = & \exp\left(-\frac{1}2 \| x_i - x_j \| \right)
# \end{eqnarray*}
#
# where $\| x \| = \sqrt{x^\top x}$ is the Euclidian norm.
#
# ## Part 1
# Derive the expressions to compute the predictive density
# \begin{eqnarray*}
# p(\hat{y}| y_{1:L}, x_{1:L}, \hat{x})
# \end{eqnarray*}
#
#
# \begin{eqnarray*}
# p(y | y_{1:L}, x_{1:L}, x) &=& {\cal N}(y; m, S) \\
# m & = & \\
# S & = &
# \end{eqnarray*}
#
# ## Part 2
# Write a program to compute the mean and covariance of $p(\hat{y}| y_{1:L}, x_{1:L}, \hat{x})$ to generate a for the following data:
#
# x = [-2 -1 0 3.5 4]
# y = [4.1 0.9 2 12.3 15.8]
#
# Try different covariance functions $K_1$ and $K_2$ and observation noise covariances $R$ and comment on the nature of the approximation.
#
# ## Part 3
# Suppose we are using a covariance function parameterised by
# \begin{eqnarray*}
# K_\beta(x_i, x_j) & = & \exp\left(-\frac{1}\beta \| x_i - x_j \|^2 \right)
# \end{eqnarray*}
# Find the optimum regularisation parameter $\beta^*(R)$ as a function of observation noise variance via maximisation of the marginal likelihood, i.e.
# \begin{eqnarray*}
# \beta^* & = & \arg\max_{\beta} p(y_{1:N}| x_{1:N}, \beta, R)
# \end{eqnarray*}
# Generate a plot of $b^*(R)$ for $R = 0.01, 0.02, \dots, 1$ for the dataset given in 2.
#
# +
def cov_fun_bell(x1,x2,delta=1):
return np.exp(-0.5*np.abs(x1-x2)**2/delta)
def cov_fun_exp(x1,x2):
return np.exp(-0.5*np.abs(x1-x2))
def cov_fun(x1,x2):
return cov_fun_bell(x1,x2,delta=0.1)
R = 0.05
x = np.array([-2, -1, 0, 3.5, 4]);
y = np.array([4.1, 0.9, 2, 12.3, 15.8]);
Sig = cov_fun(x.reshape((len(x),1)),x.reshape((1,len(x)))) + R*np.eye(len(x))
SigI = np.linalg.inv(Sig)
xx = np.linspace(-10,10,100)
yy = np.zeros_like(xx)
ss = np.zeros_like(xx)
for i in range(len(xx)):
z = np.r_[x,xx[i]]
CrossSig = cov_fun(x,xx[i])
PriorSig = cov_fun(xx[i],xx[i]) + R
yy[i] = np.dot(np.dot(CrossSig, SigI),y)
ss[i] = PriorSig - np.dot(np.dot(CrossSig, SigI),CrossSig)
plt.plot(x,y,'or')
plt.plot(xx,yy,'b.')
plt.plot(xx,yy+3*np.sqrt(ss),'b:')
plt.plot(xx,yy-3*np.sqrt(ss),'b:')
plt.show()
| MultivariateGaussian.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Floats - Internal Representation
# The ``float`` class can be used to represent real numbers.
help(float)
# The ``float`` class has a single constructor, which can take a number or a string and will attempt to convert it to a float.
float(10)
float(3.14)
float('0.1')
# However, strings that represent fractions cannot be converted to floats, unlike the Fraction class we saw earlier.
float('22/7')
# If you really want to get a float from a string such as ``'22/7'``, you could first create a ``Fraction``, then create a ``float`` from that:
from fractions import Fraction
float(Fraction('22/7'))
# Floats do not always have an exact representation:
print(0.1)
# Although this looks like ``0.1`` exactly, we need to reveal more digits after the decimal point to see what's going on:
format(0.1, '.25f')
# However, certain numbers can be represented exactly in a binary fraction expansion:
format(0.125, '.25f')
# This is because 0.125 is precisely 1/8, or 1/(2^3)
| python-tuts/0-beginner/3-Numeric-Types/05 - Floats - Internal Representation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Analysis of data from fault-scarp model runs.
#
# Start by setting up arrays to hold the data.
# +
import numpy as np
N = 125
run_number = np.zeros(N)
hill_length = np.zeros(N)
dist_rate = np.zeros(N)
uplint = np.zeros(N)
max_ht = np.zeros(N)
mean_slope = np.zeros(N)
mean_ht = np.zeros(N)
# -
# Set up info on domain size and parameters.
domain_lengths = np.array([58, 103, 183, 325, 579])
disturbance_rates = 10.0 ** np.array([-4, -3.5, -3, -2.5, -2])
uplift_intervals = 10.0 ** np.array([2, 2.5, 3, 3.5, 4])
for i in range(N):
hill_length[i] = domain_lengths[i // 25]
dist_rate[i] = disturbance_rates[i % 5]
uplint[i] = uplift_intervals[(i // 5) % 5]
# Read data from file.
# +
import csv
i = -1
with open('./grain_hill_stats.csv', 'rb') as csvfile:
myreader = csv.reader(csvfile)
for row in myreader:
if i > -1:
run_number[i] = int(row[0])
max_ht[i] = float(row[1])
mean_ht[i] = float(row[2])
mean_slope[i] = float(row[3])
i += 1
csvfile.close()
# -
# Let's revisit the question of how to plot, now that I seem to have solved the "not run long enough" problem. If you take the primary variables, the scaling is something like
#
# $h = f( d, \tau, \lambda )$
#
# There is just one dimension here: time. So our normalization becomes simply:
#
# $h = f( d\tau, \lambda )$
#
# This suggests plotting $h$ versus $d\tau$ segregated by $\lambda$.
max_ht
# +
import matplotlib.pyplot as plt
# %matplotlib inline
dtau = dist_rate * uplint # Here's our dimensionless disturbance rate
syms = ['k+', 'k.', 'kv', 'k*', 'ko']
for i in range(5):
idx = (i * 25) + np.arange(25)
plt.loglog(dtau[idx], mean_ht[idx], syms[i])
plt.xlabel(r"Dimensionless disturbance rate, $d'$", {'fontsize' : 12})
plt.ylabel(r'Mean height, $h$', {'fontsize' : 12})
plt.legend([r'$\lambda = 58$', '$\lambda = 103$', '$\lambda = 183$', '$\lambda = 325$', '$\lambda = 579$'])
#plt.savefig('mean_ht_vs_dist_rate.pdf') # UNCOMMENT TO GENERATE FIGURE FILE
# -
# Try Scott's idea of normalizing by lambda
for i in range(5):
idx = (i * 25) + np.arange(25)
plt.loglog(dtau[idx], mean_ht[idx] / hill_length[idx], syms[i])
plt.xlabel(r"Dimensionless disturbance rate, $d'$", {'fontsize' : 12})
plt.ylabel(r'$h / \lambda$', {'fontsize' : 12})
plt.legend([r'$\lambda = 58$', '$\lambda = 103$', '$\lambda = 183$', '$\lambda = 325$', '$\lambda = 579$'])
plt.savefig('h_over_lam_vs_dist_rate.pdf')
# This (h vs d') is actually a fairly straightforward result. For any given hillslope width, there are three domains: (1) threshold, in which height is independent of disturbance or uplift rate; (2) linear, in which height is inversely proportional to $d\tau$, and (3) finite-size, where mean height is only one or two cells, and which is basically meaningless and can be ignored.
# Next, let's look at the effective diffusivity. One method is to start with mean hillslope height, $H_m$. Diffusion theory predicts that mean height should be given by:
#
# $H = \frac{U}{3D}L^2$
#
# Then simply invert this to solve for $D$:
#
# $D = \frac{U}{3H}L^2$
#
# In the particle model, $H$ in real length units is equal to height in cells, $h$, times scale of a cell, $\delta$. Similarly, $L = \lambda \delta / 2$, and $U = \delta / I_u$, where $I_u$ is uplift interval in cells/time (the factor of 2 in $\lambda$ comes from the fact that hillslope length is half the domain length). Substituting,
#
# $D = \frac{4}{3I_u h} \lambda^2 \delta^2$
#
# This of course requires defining cell size. We could also do it in terms of a disturbance rate, $d_{eff}$, equal to $D/\delta^2$,
#
# $d_{eff} = \frac{4}{3I_u h} \lambda^2$
#
# Ok, here's a neat thing: we can define a dimensionless effective diffusivity as follows:
#
# $D' = \frac{D}{d \delta^2} = \frac{4}{3 d I_u h} \lambda^2$
#
# This measures the actual diffusivity relative to the nominal value reflected by the disturbance rate. Here we'll plot it against slope gradient in both linear and log-log.
#
# +
hill_halflen = hill_length / 2.0
D_prime = (hill_halflen * hill_halflen) / (12 * uplint * mean_ht * dist_rate)
plt.plot(mean_slope, D_prime, 'k.')
plt.xlabel('Mean slope gradient')
plt.ylabel('Dimensionless diffusivity')
# -
syms = ['k+', 'k.', 'kv', 'k*', 'ko']
for i in range(0, 5):
idx = (i * 25) + np.arange(25)
plt.semilogy(mean_slope[idx], D_prime[idx], syms[i])
plt.xlabel('Mean slope gradient', {'fontsize' : 12})
plt.ylabel('Dimensionless diffusivity', {'fontsize' : 12})
plt.ylim([1.0e0, 1.0e5])
plt.legend(['L = 58', 'L = 103', 'L = 183', 'L = 325', 'L = 579'])
idx1 = np.where(max_ht > 4)[0]
idx2 = np.where(max_ht <= 4)[0]
plt.semilogy(mean_slope[idx1], D_prime[idx1], 'ko', mfc='none')
plt.semilogy(mean_slope[idx2], D_prime[idx2], '.', mfc='0.5')
plt.xlabel('Mean slope gradient', {'fontsize' : 12})
plt.ylabel(r"Dimensionless diffusivity, $D_e'$", {'fontsize' : 12})
plt.legend(['Mean height > 4 cells', 'Mean height <= 4 cells'])
plt.savefig('dimless_diff_vs_grad.pdf')
# Just for fun, let's try to isolate the portion of $D_e$ that doesn't contain Furbish et al.'s $\cos^2\theta$ factor. In other words, we'll plot against $\cos^2 \theta S$. Remember that $S=\tan \theta$, so $\theta = \tan^{-1} S$ and $\cos \theta = \cos\tan^{-1}S$.
theta = np.arctan(mean_slope)
cos_theta = np.cos(theta)
cos2_theta = cos_theta * cos_theta
cos2_theta_S = cos2_theta * mean_slope
idx1 = np.where(max_ht > 4)[0]
idx2 = np.where(max_ht <= 4)[0]
plt.semilogy(cos2_theta_S[idx1], D_prime[idx1], 'ko', mfc='none')
plt.semilogy(cos2_theta_S[idx2], D_prime[idx2], '.', mfc='0.5')
plt.xlabel(r'Mean slope gradient $\times \cos^2 \theta$', {'fontsize' : 12})
plt.ylabel(r"Dimensionless diffusivity, $D_e'$", {'fontsize' : 12})
plt.legend(['Mean height > 4 cells', 'Mean height <= 4 cells'])
plt.savefig('dimless_diff_vs_grad_cos2theta.pdf')
# Now let's try $D / (1 - (S/S_c)^2)$ and see if that collapses things...
Sc = np.tan(np.pi * 30.0 / 180.0)
de_with_denom = D_prime * (1.0 - (mean_slope / Sc) ** 2)
# +
plt.semilogy(mean_slope[idx1], D_prime[idx1], 'ko', mfc='none')
plt.semilogy(mean_slope[idx2], D_prime[idx2], '.', mfc='0.5')
plt.xlabel('Mean slope grad', {'fontsize' : 12})
plt.ylabel(r"Dimensionless diffusivity, $D_e'$", {'fontsize' : 12})
plt.legend(['Mean height > 3 cells', 'Mean height <= 3 cells'])
# Now add analytical
slope = np.arange(0, 0.6, 0.05)
D_pred = 10.0 / (1.0 - (slope/Sc)**2)
plt.plot(slope, D_pred, 'r')
# -
# Version of the De-S plot with lower end zoomed in to find the approximate asymptote:
idx1 = np.where(max_ht > 4)[0]
idx2 = np.where(max_ht <= 4)[0]
plt.semilogy(mean_slope[idx1], D_prime[idx1], 'ko', mfc='none')
plt.semilogy(mean_slope[idx2], D_prime[idx2], '.', mfc='0.5')
plt.xlabel('Mean slope gradient', {'fontsize' : 12})
plt.ylabel(r"Dimensionless diffusivity, $D_e'$", {'fontsize' : 12})
plt.ylim(10, 100)
# ====================================================
# OLDER STUFF BELOW HERE
# Start with a plot of $D$ versus slope for given fixed values of everything but $I_u$.
halflen = hill_length / 2.0
D = (halflen * halflen) / (2.0 * uplint * max_ht)
print np.amin(max_ht)
import matplotlib.pyplot as plt
# %matplotlib inline
# +
idx = np.arange(0, 25, 5)
plt.semilogy(mean_slope[idx], D[idx], '.')
idx = np.arange(25, 50, 5)
plt.plot(mean_slope[idx], D[idx], '.')
idx = np.arange(50, 75, 5)
plt.plot(mean_slope[idx], D[idx], '.')
idx = np.arange(75, 100, 5)
plt.plot(mean_slope[idx], D[idx], '.')
idx = np.arange(100, 125, 5)
plt.plot(mean_slope[idx], D[idx], '.')
# +
idx = np.arange(25, 50, 5)
plt.plot(mean_slope[idx], D[idx], '.')
idx = np.arange(100, 125, 5)
plt.plot(mean_slope[idx], D[idx], '.')
# -
# Is there something we could do with integrated elevation? To reduce noise...
#
# $\int_0^L z(x) dx = A = \int_0^L \frac{U}{2D}(L^2 - x^2) dx$
#
# $A= \frac{U}{2D}L^3 - \frac{U}{6D}L^3$
#
# $A= \frac{U}{3D}L^3$
#
# $A/L = H_{mean} = \frac{U}{3D}L^2$
#
# Rearranging,
#
# $D = \frac{U}{3H_{mean}}L^2$
#
# $D/\delta^2 = \frac{1}{3 I_u h_{mean}} \lambda^2$
#
# This might be more stable, since it measures area (a cumulative metric).
# First, a little nondimensionalization. We have an outcome, mean height, $h_m$, that is a function of three inputs: disturbance rate, $d$, system length $\lambda$, and uplift interval, $I_u$. If we treat cells as a kind of dimension, our dimensions are: C, C/T, C, T/C. This implies two dimensionless parameters:
#
# $h_m / \lambda = f( d I_u )$
#
# So let's calculate these quantities:
hmp = mean_ht / hill_length
di = dist_rate * uplint
plt.plot(di, hmp, '.')
plt.loglog(di, hmp, '.')
# I guess that's kind of a collapse? Need to split apart by different parameters. But first, let's try the idea of an effective diffusion coefficient:
dd = (1.0 / (uplint * mean_ht)) * halflen * halflen
plt.plot(mean_slope, dd, '.')
# Ok, kind of a mess. Let's try holding everything but uplift interval constant.
var_uplint = np.arange(0, 25, 5) + 2
for i in range(5):
idx = (i * 25) + var_uplint
plt.plot(mean_slope[idx], dd[idx], '.')
plt.xlabel('Mean slope gradient')
plt.ylabel('Effective diffusivity')
plt.legend(['L = 58', 'L = 103', 'L = 183', 'L = 325', 'L = 579'])
var_uplint = np.arange(0, 25, 5) + 3
for i in range(5):
idx = (i * 25) + var_uplint
plt.plot(mean_slope[idx], dd[idx], '.')
plt.xlabel('Mean slope gradient')
plt.ylabel('Effective diffusivity')
plt.legend(['L = 58', 'L = 103', 'L = 183', 'L = 325', 'L = 579'])
var_uplint = np.arange(0, 25, 5) + 4
for i in range(5):
idx = (i * 25) + var_uplint
plt.plot(mean_slope[idx], dd[idx], '.')
plt.xlabel('Mean slope gradient')
plt.ylabel('Effective diffusivity')
plt.legend(['L = 58', 'L = 103', 'L = 183', 'L = 325', 'L = 579'])
for i in range(5):
idx = np.arange(5) + 100 + 5 * i
plt.loglog(di[idx], hmp[idx], '.')
plt.xlabel('d I_u')
plt.ylabel('H_m / L')
plt.grid('on')
plt.legend(['I_u = 100', 'I_u = 316', 'I_u = 1000', 'I_u = 3163', 'I_u = 10,000'])
hmp2 = (mean_ht + 0.5) / hill_length
for i in range(5):
idx = np.arange(5) + 0 + 5 * i
plt.loglog(di[idx], hmp2[idx], '.')
plt.xlabel('d I_u')
plt.ylabel('H_m / L')
plt.grid('on')
plt.legend(['I_u = 100', 'I_u = 316', 'I_u = 1000', 'I_u = 3163', 'I_u = 10,000'])
# They don't seem to segregate much by $I_u$. I suspect they segregate by $L$. So let's plot ALL the data, colored by $L$:
for i in range(5):
idx = i * 25 + np.arange(25)
plt.loglog(di[idx], hmp2[idx], '.')
plt.xlabel('d I_u')
plt.ylabel('H_m / L')
plt.grid('on')
plt.legend(['L = 58', 'L = 103', 'L = 183', 'L = 325', 'L = 579'])
# The above plot makes sense actually. Consider the end members:
#
# At low $d I_u$, you have angle-of-repose:
#
# $H_m = \tan (30^\circ) L/4$ (I think)
#
# or
#
# $H_m / L \approx 0.15$
#
# At high $d I_u$, we have the diffusive case:
#
# $H_m = \frac{U}{3D} L^2$, or
#
# $H_m / L = \frac{U}{3D} L$
#
# But wait a minute, that's backwards from what the plot shows. Could this be a finite-size effect? Let's suppose that finite-size effects mean that there's a minimum $H_m$ equal to $N$ times the size of one particle. Then,
#
# $H_m / L = N / L$
#
# which has the right direction. What would $N$ actually be? From reading the graph above, estimate that (using $L$ as half-length, so half of the above numbers in legend):
#
# For $L=29$, $N/L \approx 0.02$
#
# For $L=51.5$, $N/L \approx 0.015$
#
# For $L=91.5$, $N/L \approx 0.009$
#
# For $L=162.5$, $N/L \approx 0.0055$
#
# For $L=289.5$, $N/L \approx 0.004$
nl = np.array([0.02, 0.015, 0.009, 0.0055, 0.004])
l = np.array([29, 51.5, 91.5, 162.5, 289.5])
n = nl * l
n
1.0 / l
# Ok, so these are all hovering around 1 cell! So, that could well explain the scatter at the right side.
#
# This is starting to make more sense. The narrow window around $d I_u \approx 10$ represents the diffusive regime. To the left, the angle-of-repose regime. To the right, the finite-size-effect regime. For good reasons, the diffusive regime is biggest with the biggest $L$ (more particles, so finite-size effect doesn't show up until larger $d I_u$). Within the diffusive regime, points separate according to scale, reflecting the $H_m / L \propto L$ effect. So, if we took points with varying $L$ but identical $d I_u$, ...
idx = np.where(np.logical_and(di>9.0, di<11.0))[0]
hmd = mean_ht[idx]
ld = hill_length[idx]
plt.plot(ld, hmd/ld, '.')
idx2 = np.where(np.logical_and(di>0.9, di<1.1))[0]
plt.plot(hill_length[idx2], mean_ht[idx2]/hill_length[idx2], 'o')
plt.grid('on')
plt.xlabel('L')
plt.ylabel('Hm/L')
plt.plot([0, 600.0], [0.5774/4.0, 0.5774/4.0])
plt.plot([0.0, 600.0], [0.0, 0.02 * 0.1/3.0 * 600.0])
# In the above plot, diffusive behavior is indicated by a slope of 1:1, whereas angle-of-repose is indicated by a flat trend. One thing this says is that, for a given $d I_u$, a longer slope is more likely to be influenced by the angle of repose. That makes sense I think...?
plt.loglog(ld, hmd, '*')
plt.grid('on')
# Now, what if it's better to consider disturbance rate, $d$, to be in square cells / time? That is, when dimensionalized, to be $L^2/T$ rather than $L/T$? Let's see what happens when we do it this way:
d2 = dist_rate * uplint / hill_length
plt.loglog(hmp, d2, '.')
# Not so great ...
#
#
# Let's try another idea, based again on dimensional analysis. Start with dimensional quantities $U$ (uplift rate), $L$ (length), $H$ (mean height), and $\delta$ (cell size). Nondimensionalize in a somewhat surprising way:
#
# $\frac{UL^2}{Hd\delta^2} = f( d\delta / U, L/\delta )$
#
# This is actually a dimensionless diffusivity: diffusivity relative to disturbance intensity.
#
# Now translate back: $H=h\delta$, $U=\delta / I_u$, and $L=\lambda \delta$:
#
# $\frac{\lambda^2}{hdI_u} = f( d I_u, \lambda )$
#
# So what happens if we plot thus?
diff_nd = halflen * halflen / (uplint * mean_ht * dist_rate)
diu = dist_rate * uplint
for i in range(5):
idx = (i * 25) + np.arange(25)
plt.loglog(diu[idx], diff_nd[idx], '+')
plt.grid('on')
plt.xlabel('Dimensionless disturbance rate (d I_u)')
plt.ylabel('Dimensionless diffusivity (l^2/hdI_u)')
plt.legend(['\lambda= 58', '\lambda = 103', '\lambda = 183', '\lambda = 325', '\lambda = 579'])
# Now THAT'S a collapse. Good. Interpretation: as we go left to right, we go from faster uplift or lower disturbance to slower uplift or faster disturbance. That means the relief goes from high to low. At high relief, we get angle-of-repose behavior, for which the effective diffusivity increases with relief---hence, diffusivity decreases with increasing $dI_u$. Then we get to a realm that is presumably the diffusive regime, where the curve flattens out. This represents a constant diffusivity. Finally, we get to the far right side, where you hit finite-size effects: there will be a hill at least one particle high on average no matter how high $d I_u$, so diffusivity appears to drop again.
#
# There's a one-to-one relation between $D'$ and $\lambda$, at least in the steep regime. This reflects simple scaling. In the steep regime, $H = S_c L / 4$. By definition $D' = (U / 3H\delta^2 d) L^2$, or $H = (U / 3D'\delta^2 d) L^2$. Substituting,
#
# $(U / 3D'\delta^2 d) L^2 = S_c L / 4$
#
# $D' = 4 U L / 3 \delta^2 d S_c$
#
# in other words, we expect $D' \propto L$ in this regime. (If we translate back, this writes as
#
# $D' = 4 \lambda / 3 I_u d S_c$
#
# Voila!
#
# Ok, but why does the scaling between $D'$ and $\lambda$ continue in the diffusive regime? My guess is as follows. To relate disturbance rate, $d$, to diffusivity, $D$, consider that disturbance acts over depth $\delta$ and length $L$. Therefore, one might scale diffusivity as follows:
#
# $D \propto dL\delta \propto d \lambda$
#
# By that argument, $D$ should be proportional to $\lambda$. Another way to say this is that in order to preserve constant $D$, $d$ should be treated as a scale-dependent parameter: $d \propto D/\lambda$.
#
# A further thought: can we define diffusivity more carefully? One approach would be
#
# (frequency of disturbance events per unit time per unit length, $F$) [1/LT]
#
# x
#
# (cross-sectional area disturbed, $A$)
#
# x
#
# (characteristic displacement length,$\Lambda$)
#
# For the first, take the expected number of events across the whole in unit time and divide by the length of the slope:
#
# $F = \lambda d / L = ...$
#
# hmm, this isn't going where I thought...
#
| ModelInputsAndRunScripts/DataAnalysis/analysis_of_grain_hill_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# !pip install pymongo
# !pip install pymongo[srv]
# !pip install dnspython
# !pip install tweepy
# !pip install twitter
import pymongo
from pymongo import MongoClient
import json
import tweepy
import twitter
from pprint import pprint
import configparser
import pandas as pd
# +
config = configparser.ConfigParser()
config.read('config.ini')
CONSUMER_KEY = config['mytwitter']['api_key']
CONSUMER_SECRET = config['mytwitter']['api_secrete']
OAUTH_TOKEN = config['mytwitter']['access_token']
OATH_TOKEN_SECRET = config['mytwitter']['access_secrete']
mongod_connect = config['mymongo']['connection']
# -
client = MongoClient(mongod_connect)
db = client.lab9 # use or create a database named demo
tweet_collection = db.tweet_collection #use or create a collection named tweet_collection
tweet_collection.create_index([("id", pymongo.ASCENDING)],unique = True) # make sure the collected tweets are unique
rest_auth = twitter.oauth.OAuth(OAUTH_TOKEN,OATH_TOKEN_SECRET,CONSUMER_KEY,CONSUMER_SECRET)
rest_api = twitter.Twitter(auth=rest_auth)
count = 100 #number of returned tweets, default and max is 100
#geocode = "38.4392897,-78.9412224,50mi" # defin the location, in Harrisonburg, VA
q = "covid19" #define the keywords, tweets contain election
search_results = rest_api.search.tweets( count=count,q=q) #you can use both q and geocode
statuses = search_results["statuses"]
since_id_new = statuses[-1]['id']
for statuse in statuses:
try:
tweet_collection.insert_one(statuse)
pprint(statuse['created_at'])# print the date of the collected tweets
except:
pass
since_id_old = 0
while(since_id_new != since_id_old):
since_id_old = since_id_new
search_results = rest_api.search.tweets( count=count,q=q,
max_id= since_id_new)
statuses = search_results["statuses"]
since_id_new = statuses[-1]['id']
for statuse in statuses:
try:
tweet_collection.insert_one(statuse)
pprint(statuse['created_at']) # print the date of the collected tweets
except:
pass
# +
print(tweet_collection.estimated_document_count())# number of tweets collected
user_cursor = tweet_collection.distinct("user.id")
print (len(user_cursor)) # number of unique Twitter users
# -
tweet_collection.create_index([("text", pymongo.TEXT)], name='text_index', default_language='english') # create a text index
tweet_cursor = tweet_collection.find({"$text": {"$search": "vaccine"}}) # return tweets contain vote
for document in tweet_cursor[0:10]: # display the first 10 tweets from the query
try:
print ('----')
# pprint (document) # use pprint to print the entire tweet document
print ('name:', document["user"]["name"]) # user name
print ('text:', document["text"]) # tweets
except:
print ("***error in encoding")
pass
tweet_cursor = tweet_collection.find({"$text": {"$search": "vaccine"}}) # return tweets contain vote
tweet_df = pd.DataFrame(list(tweet_cursor ))
tweet_df[:10] #display the first 10 tweets
tweet_df["favorite_count"].hist() # create a histogram show the favorite count
| lab9.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="8LOf4mvV2nlf" outputId="d6b51881-042b-4315-ea98-f922d5e01f96"
# Import our dependencies
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
import pandas as pd
import tensorflow as tf
# Import our input dataset
diabetes_df = pd.read_csv('https://2u-data-curriculum-team.s3.amazonaws.com/dataviz-classroom/v1.1/21-Deep-Learning/diabetes.csv')
diabetes_df.head()
# + id="6mnUA9Jz2nlp"
# Remove diabetes outcome target from features data
y = diabetes_df.Outcome.values
X = diabetes_df.drop(columns="Outcome").values
# Split training/test datasets
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, stratify=y)
# + id="reOMnWTW2nlq"
# Preprocess numerical data for neural network
# Create a StandardScaler instances
scaler = StandardScaler()
# Fit the StandardScaler
X_scaler = scaler.fit(X_train)
# Scale the data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="gJP8qIlU2nlq" outputId="96c0759d-3ac4-4cb1-c3b2-bd2f50ac50f0"
# Define the deep learning model
nn_model = tf.keras.models.Sequential()
nn_model.add(tf.keras.layers.Dense(units=16, activation="relu", input_dim=8))
nn_model.add(tf.keras.layers.Dense(units=16, activation="relu"))
nn_model.add(tf.keras.layers.Dense(units=1, activation="sigmoid"))
# Compile the Sequential model together and customize metrics
nn_model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
# Train the model
fit_model = nn_model.fit(X_train_scaled, y_train, epochs=50)
# Evaluate the model using the test data
model_loss, model_accuracy = nn_model.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# + id="mUleMxww2nlr"
| 01-Lesson-Plans/21-Deep-Learning/2/Activities/07-Stu_DetectingDiabetes/Solved/DetectingDiabetes.ipynb |
# # Beyond linear separation in classification
#
# As we saw in the regression section, the linear classification model
# expects the data to be linearly separable. When this assumption does not
# hold, the model is not expressive enough to properly fit the data.
# Therefore, we need to apply the same tricks as in regression: feature
# augmentation (potentially using expert-knowledge) or using a
# kernel-based method.
#
# We will provide examples where we will use a kernel support vector machine
# to perform classification on some toy-datasets where it is impossible to
# find a perfect linear separation.
#
# We will generate a first dataset where the data are represented as two
# interlaced half circle. This dataset is generated using the function
# [`sklearn.datasets.make_moons`](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html).
# +
import numpy as np
import pandas as pd
from sklearn.datasets import make_moons
feature_names = ["Feature #0", "Features #1"]
target_name = "class"
X, y = make_moons(n_samples=100, noise=0.13, random_state=42)
# We store both the data and target in a dataframe to ease plotting
moons = pd.DataFrame(np.concatenate([X, y[:, np.newaxis]], axis=1),
columns=feature_names + [target_name])
data_moons, target_moons = moons[feature_names], moons[target_name]
# -
# Since the dataset contains only two features, we can make a scatter plot to
# have a look at it.
# +
import matplotlib.pyplot as plt
import seaborn as sns
sns.scatterplot(data=moons, x=feature_names[0], y=feature_names[1],
hue=target_moons, palette=["tab:red", "tab:blue"])
_ = plt.title("Illustration of the moons dataset")
# -
# From the intuitions that we got by studying linear model, it should be
# obvious that a linear classifier will not be able to find a perfect decision
# function to separate the two classes.
#
# Let's try to see what is the decision boundary of such a linear classifier.
# We will create a predictive model by standardizing the dataset followed by
# a linear support vector machine classifier.
import sklearn
sklearn.set_config(display="diagram")
# +
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
linear_model = make_pipeline(StandardScaler(), SVC(kernel="linear"))
linear_model.fit(data_moons, target_moons)
# -
# <div class="admonition warning alert alert-danger">
# <p class="first admonition-title" style="font-weight: bold;">Warning</p>
# <p class="last">Be aware that we fit and will check the boundary decision of the classifier
# on the same dataset without splitting the dataset into a training set and a
# testing set. While this is a bad practice, we use it for the sake of
# simplicity to depict the model behavior. Always use cross-validation when
# you want to assess the generalization performance of a machine-learning model.</p>
# </div>
# Let's check the decision boundary of such a linear model on this dataset.
# +
from helpers.plotting import DecisionBoundaryDisplay
DecisionBoundaryDisplay.from_estimator(
linear_model, data_moons, response_method="predict", cmap="RdBu", alpha=0.5
)
sns.scatterplot(data=moons, x=feature_names[0], y=feature_names[1],
hue=target_moons, palette=["tab:red", "tab:blue"])
_ = plt.title("Decision boundary of a linear model")
# -
# As expected, a linear decision boundary is not enough flexible to split the
# two classes.
#
# To push this example to the limit, we will create another dataset where
# samples of a class will be surrounded by samples from the other class.
# +
from sklearn.datasets import make_gaussian_quantiles
feature_names = ["Feature #0", "Features #1"]
target_name = "class"
X, y = make_gaussian_quantiles(
n_samples=100, n_features=2, n_classes=2, random_state=42)
gauss = pd.DataFrame(np.concatenate([X, y[:, np.newaxis]], axis=1),
columns=feature_names + [target_name])
data_gauss, target_gauss = gauss[feature_names], gauss[target_name]
# -
ax = sns.scatterplot(data=gauss, x=feature_names[0], y=feature_names[1],
hue=target_gauss, palette=["tab:red", "tab:blue"])
_ = plt.title("Illustration of the Gaussian quantiles dataset")
# Here, this is even more obvious that a linear decision function is not
# adapted. We can check what decision function, a linear support vector machine
# will find.
linear_model.fit(data_gauss, target_gauss)
DecisionBoundaryDisplay.from_estimator(
linear_model, data_gauss, response_method="predict", cmap="RdBu", alpha=0.5
)
sns.scatterplot(data=gauss, x=feature_names[0], y=feature_names[1],
hue=target_gauss, palette=["tab:red", "tab:blue"])
_ = plt.title("Decision boundary of a linear model")
# As expected, a linear separation cannot be used to separate the classes
# properly: the model will under-fit as it will make errors even on
# the training set.
#
# In the section about linear regression, we saw that we could use several
# tricks to make a linear model more flexible by augmenting features or
# using a kernel. Here, we will use the later solution by using a radial basis
# function (RBF) kernel together with a support vector machine classifier.
#
# We will repeat the two previous experiments and check the obtained decision
# function.
kernel_model = make_pipeline(StandardScaler(), SVC(kernel="rbf", gamma=5))
kernel_model.fit(data_moons, target_moons)
DecisionBoundaryDisplay.from_estimator(
kernel_model, data_moons, response_method="predict", cmap="RdBu", alpha=0.5
)
sns.scatterplot(data=moons, x=feature_names[0], y=feature_names[1],
hue=target_moons, palette=["tab:red", "tab:blue"])
_ = plt.title("Decision boundary with a model using an RBF kernel")
# We see that the decision boundary is not anymore a straight line. Indeed,
# an area is defined around the red samples and we could imagine that this
# classifier should be able to generalize on unseen data.
#
# Let's check the decision function on the second dataset.
kernel_model.fit(data_gauss, target_gauss)
DecisionBoundaryDisplay.from_estimator(
kernel_model, data_gauss, response_method="predict", cmap="RdBu", alpha=0.5
)
ax = sns.scatterplot(data=gauss, x=feature_names[0], y=feature_names[1],
hue=target_gauss, palette=["tab:red", "tab:blue"])
_ = plt.title("Decision boundary with a model using an RBF kernel")
# We observe something similar than in the previous case. The decision function
# is more flexible and does not underfit anymore.
#
# Thus, kernel trick or feature expansion are the tricks to make a linear
# classifier more expressive, exactly as we saw in regression.
#
# Keep in mind that adding flexibility to a model can also risk increasing
# overfitting by making the decision function to be sensitive to individual
# (possibly noisy) data points of the training set. Here we can observe that
# the decision functions remain smooth enough to preserve good generalization.
# If you are curious, you can try to repeat the above experiment with
# `gamma=100` and look at the decision functions.
| notebooks/logistic_regression_non_linear.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Read data on regolith cover and thickness extracted from facet simulations, and plot the results
import csv
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# %matplotlib inline
filename = 'regolith_analysis20180910.csv'
# +
# Count number of lines in file
num_lines = len(open(filename).readlines( ))
# Create data arrays
dist_param = np.zeros(num_lines - 1) # skip 1 header line
weath_param = np.zeros(num_lines - 1)
reg_cover_proportion = np.zeros(num_lines - 1)
reg_thickness = np.zeros(num_lines - 1)
# Read data
with open(filename, 'r') as csvfile:
myreader = csv.reader(csvfile)
i = 0
for row in myreader:
print(','.join(row) + '\n')
if i >= 1:
dist_param[i-1] = row[1]
weath_param[i-1] = row[2]
reg_cover_proportion[i-1] = row[3]
reg_thickness[i-1] = row[9]
i += 1
# -
tau = 500.0 # average interval between one-cell slip events (corresponds to numerical model interval of 866 yr)
dprime = dist_param * tau
wprime = weath_param * tau
data = {'d': dist_param, 'dprime': dprime, 'w': weath_param, 'wprime': wprime, 'cover': reg_cover_proportion}
df = pd.DataFrame(data)
df = df.sort_values(by=['dprime', 'wprime'])
df
reg_cover_proportion = df['cover'].values.reshape((4, 31))
wprime = df['wprime'].values.reshape((4, 31))
dprime = df['dprime'].values.reshape((4, 31))
wprime
# +
psyms = ['k.', 'k+', 'k^', 'k*']
# Plot the weathering + disturbance runs
for d in range(4):
plt.semilogx(wprime[d,:], reg_cover_proportion[d,:], psyms[d])
# Labels and legend
plt.xlabel(r"Dimensionless weathering rate parameter, $w'$", fontsize=14)
plt.ylabel('Fractional regolith cover', fontsize=14)
plt.legend([r"$d'= 10^{-1}$", r"$d'= 10^0$", r"$d'= 10^1$", r"$d'= 10^2$", r"$d' \rightarrow \infty$", r"$\theta = 60^\circ - 360 w' / \pi$", r"$\theta = 30^\circ$"], fontsize=14)
plt.savefig('reg_cover_vs_wprime.pdf')
| DataAnalysis/analyze_and_plot_regolith_cover.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualization of Edition Differences
#
# ![screenshot of the visualizations](./img/header.png)
# +
import os
import sys
import shutil
from datetime import datetime
import math
from germansentiment import SentimentModel
import xml.etree.ElementTree as ET
import pandas as pd
# -
# In order to run this notebook, you will have to download the provided [sample dataset](https://doi.org/10.5281/zenodo.4992787) and extract it to a folder that will have to pass to the variable `baseDir`in the next cell.
#
# Alternatively, you can go the full way and download the files specified [here](https://github.com/elektrobohemian/StabiHacks/blob/master/sbbget/diverse_ill_spielbuch.txt) with the [sbbget.py](https://github.com/elektrobohemian/StabiHacks/blob/master/sbbget/sbbget.py) script that is part of the [StabiHacks repository](https://github.com/elektrobohemian/StabiHacks) which offers various utilities to deal with metadata and content provided by the Berlin State Library/Staatsbibliothek zu Berlin.
# +
# path to the sbbget temporary result files, e.g. "../sbbget/sbbget_downloads/download_temp" (the base path under which ALTO files are stored)
baseDir="/Users/david/src/python/StabiHacks/sbbget/sbbget_downloads.div_spielebuecher/download_temp/"
# path of the analysis results
analysisPath="./analysis/"
# verbose output
verbose=True
# utility function concatenating a given string with the current time
def printLog(text):
now = str(datetime.now())
print("[" + now + "]\t" + text)
# forces to output the result of the print command immediately, see: http://stackoverflow.com/questions/230751/how-to-flush-output-of-python-print
sys.stdout.flush()
# creates temporary and analyses directories
def createSupplementaryDirectories():
if not os.path.exists(analysisPath):
if verbose:
print("Creating " + analysisPath)
os.mkdir(analysisPath)
createSupplementaryDirectories()
# -
# ## METS/MODS Processing
#
# The next cell contains the logic to parse a METS/MODS XML file and save its contents to a dataframe.
# +
# XML namespace of MODS
modsNamespace = "{http://www.loc.gov/mods/v3}"
def parseOriginInfo(child):
"""
Parses an originInfo node and its children
:param child: The originInfo child in the element tree.
:return: A dict with the parsed information or None if the originInfo is invalid.
"""
discardNode = True
result = dict()
result["publisher"] = ""
# check if we can directly process the node
if "eventType" in child.attrib:
if child.attrib["eventType"] == "publication":
discardNode = False
else:
# we have to check if the originInfo contains and edition node with "[Electronic ed.]" to discard the node
children = child.getchildren()
hasEdition = False
for c in children:
if c.tag == modsNamespace + "edition":
hasEdition = True
if c.text == "[Electronic ed.]":
discardNode = True
else:
discardNode = False
if not hasEdition:
discardNode = False
if discardNode:
return None
else:
for c in child.getchildren():
cleanedTag = c.tag.replace(modsNamespace, "")
if cleanedTag == "place":
result["place"] = c.find("{http://www.loc.gov/mods/v3}placeTerm").text.strip()
if cleanedTag == "publisher":
result["publisher"] = c.text.strip()
# check for the most important date (see https://www.loc.gov/standards/mods/userguide/origininfo.html)
if "keyDate" in c.attrib:
result["date"] = c.text.strip()
return result
def parseTitleInfo(child):
result = dict()
result["title"]=""
result["subTitle"]=""
for c in child.getchildren():
cleanedTag = c.tag.replace(modsNamespace, "")
result[cleanedTag]=c.text.strip()
return result
def parseLanguage(child):
result = dict()
result["language"]=""
for c in child.getchildren():
cleanedTag = c.tag.replace(modsNamespace, "")
if cleanedTag=="languageTerm":
result["language"]=c.text.strip()
return result
def parseName(child):
result=dict()
role=""
name=""
for c in child.getchildren():
cleanedTag = c.tag.replace(modsNamespace, "")
if cleanedTag=="role":
for c2 in c.getchildren():
ct=c2.tag.replace(modsNamespace, "")
if ct=="roleTerm":
role=c2.text.strip()
elif cleanedTag=="displayForm":
name=c.text.strip()
result[role]=name
return result
def parseAccessCondition(child):
result = dict()
result["access"]=child.text.strip()
return result
def processMETSMODS(currentPPN, metsModsPath):
"""
Processes a given METS/MODS file.
:param currentPPN: the current PPN
:param metsModsPath: path to the METS/MODS file
:return: A dataframe with the parsing results.
"""
# parse the METS/MODS file
tree = ET.parse(metsModsPath)
root = tree.getroot()
# only process possibly interesting nodes, i.e.,
nodesOfInterest = ["originInfo", "titleInfo", "language", "name", "accessCondition"]
# stores result dicts created by various parsing function (see below)
resultDicts=[]
# master dictionary, later used for the creation of a dataframe
masterDict={'publisher':"",'place':"",'date':"",'title':"",'subTitle':"",'language':"",'aut':"",'rcp':"",'fnd':"",'access':"",'altoPaths':""}
# find all mods:mods nodes
for modsNode in root.iter(modsNamespace + 'mods'):
for child in modsNode:
# strip the namespace
cleanedTag = child.tag.replace(modsNamespace, "")
#print(cleanedTag)
#print(child)
if cleanedTag in nodesOfInterest:
if cleanedTag == "originInfo":
r = parseOriginInfo(child)
if r:
resultDicts.append(r)
elif cleanedTag=="titleInfo":
r = parseTitleInfo(child)
if r:
resultDicts.append(r)
elif cleanedTag=="language":
r = parseLanguage(child)
if r:
resultDicts.append(r)
elif cleanedTag=="name":
r = parseName(child)
if r:
resultDicts.append(r)
elif cleanedTag=="accessCondition":
r = parseAccessCondition(child)
if r:
resultDicts.append(r)
# we are only interested in the first occuring mods:mods node
break
resultDicts.append(r)
# copy results to the master dictionary
for result in resultDicts:
for key in result:
masterDict[key]=[result[key]]
masterDict["ppn"]=[currentPPN]
return pd.DataFrame(data=masterDict)
# -
# ## Sentiment Analysis
#
# The following cell is based on https://github.com/oliverguhr/german-sentiment-lib. The small fix in line 28 has been offered as a pull request to the original author.
# +
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from typing import List
import torch
import re
class SentimentModel_dazFix():
def __init__(self, model_name: str = "oliverguhr/german-sentiment-bert"):
if torch.cuda.is_available():
self.device = 'cuda'
else:
self.device = 'cpu'
self.model = AutoModelForSequenceClassification.from_pretrained(model_name)
self.model = self.model.to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.clean_chars = re.compile(r'[^A-Za-züöäÖÜÄß ]', re.MULTILINE)
self.clean_http_urls = re.compile(r'https*\S+', re.MULTILINE)
self.clean_at_mentions = re.compile(r'@\S+', re.MULTILINE)
def predict_sentiment(self, texts: List[str])-> List[str]:
texts = [self.clean_text(text) for text in texts]
# Add special tokens takes care of adding [CLS], [SEP], <s>... tokens in the right way for each model.
# daz: last two parameters added to limit maximum number of tokens in case of long strings and to prevent crashes
# such as:
#"Token indices sequence length is longer than the specified maximum sequence length for this model (603 > 512).
# Running this sequence through the model will result in indexing errors"
input_ids = self.tokenizer.batch_encode_plus(texts,padding=True, add_special_tokens=True,truncation=True,max_length=512)
input_ids = torch.tensor(input_ids["input_ids"])
input_ids = input_ids.to(self.device)
with torch.no_grad():
logits = self.model(input_ids)
label_ids = torch.argmax(logits[0], axis=1)
labels = [self.model.config.id2label[label_id] for label_id in label_ids.tolist()]
return labels
def replace_numbers(self,text: str) -> str:
return text.replace("0"," null").replace("1"," eins").replace("2"," zwei")\
.replace("3"," drei").replace("4"," vier").replace("5"," fünf") \
.replace("6"," sechs").replace("7"," sieben").replace("8"," acht") \
.replace("9"," neun")
def clean_text(self,text: str)-> str:
text = text.replace("\n", " ")
text = self.clean_http_urls.sub('',text)
text = self.clean_at_mentions.sub('',text)
text = self.replace_numbers(text)
text = self.clean_chars.sub('', text) # use only text chars
text = ' '.join(text.split()) # substitute multiple whitespace with single whitespace
text = text.strip().lower()
return text
# -
# ## Preparation of the Visualization
#
# The next cell will read in all images, their accompanying metadata from METS/MODS XML files and the associated fulltext data. A sentiment analysis will be carried out on the fulltexts. The sentiment analysis assumes German texts.
# Finally, records are created for each image.
# +
jpgFilePaths = dict()
ppnDirs=[]
rows=[]
startTime = str(datetime.now())
printLog("Loading sentiment model...")
model = SentimentModel_dazFix()
printLog("Fetching files...")
# check all subdirectories startings with PPN as each PPN stands for a different medium
for x in os.listdir(baseDir):
if x.startswith("PPN"):
ppnDirs.append(x)
# browse all directories below sbbGetBasePath and search for *_FULLTEXT directories
# and associate each with its PPN
for ppn in ppnDirs:
printLog("Processing files for PPN: "+ppn)
ppnRecord=dict()
# create a "surrounding" PPN records for all images belonging to a book
metsModsPath=baseDir+ppn+"/__metsmods/"+ppn+".xml"
r=processMETSMODS("PPNxyz",metsModsPath)
ppnRecord["_place"]=r["place"].values[0]
ppnRecord["_title"]=r["title"].values[0]
ppnRecord["_publisher"]=r["publisher"].values[0]
ppnRecord["_date"]=r["date"].values[0]
# iterate over all subdirectories of a PPN (=book)
for dirpath, dirnames, files in os.walk(baseDir+ppn):
for name in files:
# image directories have the _TIFF suffix no matter whether they contain JPEGs or other files...
if dirpath.endswith("_TIFF"):
record=dict()
# all relevant data is joined into the keywords field as Vikus will use this field for filtering
record["keywords"]=",".join((ppn,ppnRecord["_place"],ppnRecord["_title"],ppnRecord["_publisher"]+" (Verlag)",ppnRecord["_date"]))
result=[]
# if we find no OCR data, we will save a placeholder text instead
description="Keine OCR-Ergebnisse vorhanden."
# if we found a image directory, only add JPEG files
if name.endswith(".jpg") or name.endswith(".JPG"):
if not ppn in jpgFilePaths:
jpgFilePaths[ppn]=[]
fullJPGPath=os.path.join(dirpath, name)
jpgFilePaths[ppn].append(fullJPGPath)
# get the raw fulltext (following the directory creation rules of sbbget.py; see above)
rawTextPath=dirpath.replace("_TIFF","_FULLTEXT")+"/"
t=rawTextPath.split("FILE_")[1].split("_FULLTEXT")
txtFile=t[0].zfill(8)+"_raw.txt"
rawTextPath+=txtFile
if os.path.exists(rawTextPath):
fileHandler = open(rawTextPath,mode='r')
fulltext = fileHandler.read()
if len(fulltext)>800:
description=fulltext[:800]+"[...]"
else:
description=fulltext
fileHandler.close()
# sentiment analysis of the raw OCR fulltext
result = model.predict_sentiment([fulltext])
# get the physical page number of the current image
txtFilePath=ppn+".txt"
with open(os.path.join(dirpath, txtFilePath)) as txtFile:
dest=""
for row in txtFile:
logPage=row.split()[1]
dest=analysisPath+ppn+"_"+logPage+".jpg"
record["id"]=ppn+"_"+logPage
record["year"]=math.ceil(int(logPage.split("_")[1])/10)*10
record["_realpage"]=int(logPage.split("_")[1])
if result:
record["_sentiment"]=result[0]
record["keywords"]+=","+result[0]+ " (Sentiment)"
record["_description"]=description
#print("Copy from %s to %s"%(fullJPGPath,dest))
# copy the found files to a new location with new unique names as required by Vikus
shutil.copy(fullJPGPath,dest)
# "join" the current record with its surrounding PPN metadata
record.update(ppnRecord)
rows.append(record)
sum=0
for ppn in jpgFilePaths:
for f in jpgFilePaths[ppn]:
sum+=1
printLog("Found %i images."%sum)
endTime = str(datetime.now())
print("Started at:\t%s\nEnded at:\t%s" % (startTime, endTime))
# -
# In the next cell, a dataframe is created from the records. The resulting dataframe is then saved in a CSV file readable by Vikus.
df=pd.DataFrame.from_dict(rows)
df.to_csv(analysisPath+"edition_vis.csv",sep=",")
df
# The resulting CSV file can be used directly with a Vikus viewer instance. Details on how to obtain and configure Vikus can be found in a [separate repository](https://github.com/cpietsch/vikus-viewer).
#
# The config files for this visualization are available in the [vikus_config](./vikus_config/) subdirectory. These files have to be stored along with all data in the [vikus_deploy](./vikus_deploy/) subdirectory.
#
# Please note that the sample configuration assumes the Vikus sprites and thumbnails created by the [script](https://github.com/cpietsch/vikus-viewer-script) are placed under `./data/edition_vis_kids`.
| visualize_editions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Computational Astrophysics
# ## 01. Fundamentals of Python
#
# ---
# ## <NAME>
#
# Observatorio Astronómico Nacional\
# Facultad de Ciencias\
# Universidad Nacional de Colombia
#
# ---
# ### About this notebook
#
# In this notebook we present some of the fundamentals of `python` coding.
#
# ---
# ### Hello World!
print('Hello World!')
# ---
# ### Importing Modules and Output Formatting
# +
import math
x = math.pi
print(f'An approximate value of pi is {x:.5f}')
# -
print('An approximate value of pi is {:.5f}'.format(x))
print('An approximate value of pi is %5.5f'%x)
# Note the output formatting expression
#
# `%[width][.precision]type`
#
# `[width]` : Total number of digits (including decimal point)\
# `[.precision]` : Digits in the decimal part of the number
#
# `type` : <br>
# d (Integer)\
# f (float)\
# e (Scientific Notation)\
# g (same as 'e' if exponent is greater than -4 or less than precision, or equal as 'f' otherwise)
#
# <br>
# Complete information about output formatting can be found at
#
# https://docs.python.org/3/tutorial/inputoutput.html
#
# ---
# ### Simple functions
# +
import math as m
x = m.pi / 2.0
math.sin(x)
# -
m.cos(x)
x = m.pi / 2
m.cos(x)
a = 2.0
b = 3.0
c = a + b
c
a/b
m.sqrt(5*c)
m.exp(10.)
m.log(22026.47)
# ---
# ### Defining and manipulaitng arrays with `numpy`
# 1-dimensional arrays
# +
import numpy as np
a = np.array([0., 2., 4., 6. ])
a
# -
b = np.arange(10)
b
c = np.zeros(10,float)
c
d = np.ones(10)
d
e = np.linspace(1., 20., 9)
e
f = np.random.random(10)
f
# n-dimensional arrays
a = np.array([[1,2,3], [4,5,6], [7,8,9]])
a
a[0]
a[1]
a[1,1]
a[1,:]
a[:,1]
a[:]
np.array([np.arange(3),np.arange(3),np.arange(3)])
np.zeros([3,3],float)
np.ones([4,3])
np.random.random([3,4])
# Attributes of an array
a = np.array([[1,2,3], [4,5,6], [7,8,9]])
a
np.ndim(a)
a.shape
a.max()
a.min()
a.sum()
a[0,:].sum()
a[:,1].sum()
a.mean()
# ---
# ### Simple Operations with Arrays
# +
import numpy as np
a = np.random.random([3,3])
b = np.random.random([3,3])
a, b
# -
c = a + 2
c
d = a + b
d
e = a * b # Elementwise product
e
f = a @ b # matrix product
f
f = a.dot(b) # matrix product (alternative)
f
g = a / b
g
10*np.sin(c)
np.exp(d)
np.log10(f)
# Changing the shape of an array
a = np.random.random([3,4])
a.shape
a
a.T # Transpose
a.T.shape
a.ravel() #flattens the array
a.reshape(6,2)
| 01._Fundamentals/presentation/Fundamentals01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import feats
import constants
import transactions
import os
import pickle
import numpy as np
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.embeddings import Embedding
from keras.callbacks import ModelCheckpoint
from keras.preprocessing.sequence import pad_sequences
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import logging
# -
import pdb
class AucComputer(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict(self.validation_data[0], batch_size=2048)
logs['val_auc'] = roc_auc_score(self.validation_data[1], y_pred)
print('epoch {}, val auc {}'.format(epoch, logs['val_auc']))
# - Leak Problem
# +
with open(constants.FEAT_DATA_DIR + 'up_airr_sym.pkl', 'rb') as f:
up_airr_sym = pickle.load(f)
tle = transactions.TransLogExtractor(constants.RAW_DATA_DIR, constants.FEAT_DATA_DIR)
train_orders = tle.get_orders()
uid_train = train_orders[train_orders.eval_set == 'train'][['user_id']].drop_duplicates()
uid_test = train_orders[train_orders.eval_set == 'test'][['user_id']].drop_duplicates()
del train_orders
up_airr_sym_train = up_airr_sym[up_airr_sym.user_id.isin(uid_train.user_id)]
up_airr_sym_test = up_airr_sym[up_airr_sym.user_id.isin(uid_test.user_id)]
up_airr_sym_train = pd.merge(up_airr_sym_train, tle.craft_label(),
on=['user_id','product_id'], how='left')
up_airr_sym_train.label.fillna(0, inplace=True)
# -
up_airr_sym_train = shuffle(up_airr_sym_train)
up_airr_sym_train['len'] = up_airr_sym_train.up_airr_sym.apply(len)
# %%time
max_seq_len = 99
X = pad_sequences(up_airr_sym_train.up_airr_sym.values, maxlen=max_seq_len)
y = up_airr_sym_train.label.values
sym_set_size = 480
embed_vec_len = 32
hidden_units = 256
def embed_lstm(sym_set_size, embed_vec_len, max_seq_len, hidden_units):
model = Sequential()
model.add(Embedding(sym_set_size, embed_vec_len, input_length=max_seq_len))
model.add(LSTM(hidden_units, return_sequences = True))
model.add(LSTM(hidden_units))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
model = embed_lstm(sym_set_size, embed_vec_len, max_seq_len, hidden_units)
model.summary()
filepath="./__lstm_cache__/" + "lstm-symbol-{epoch:02d}-{val_loss:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
auc_computer = AucComputer()
callbacks_list = [checkpoint, auc_computer]
model.fit(X_train, y_train,
batch_size=2048,
epochs=100,
validation_split=0.02,
callbacks=callbacks_list,
class_weight={0:1, 1:1})
# %%time
y_pred = model.predict(X_test, batch_size=4028)
print('test auc {}'.format(roc_auc_score(y_test, y_pred)))
| lstm/LSTM Symbol order & interval.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overfitting II
#
# Last time, we saw a theoretical example of *overfitting*, in which we fit a machine learning model that perfectly fit the data it saw, but performed extremely poorly on fresh, unseen data. In this lecture, we'll observe overfitting in a more practical context, using the Titanic data set again. We'll then begin to study *validation* techniques for finding models with "just the right amount" of flexibility.
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
# +
# assumes that you have run the function retrieve_data()
# from "Introduction to ML in Practice" in ML_3.ipynb
titanic = pd.read_csv("data.csv")
titanic
# -
# Recall that we diagnosed overfitting by testing our model against some new data. In this case, we don't have any more data. So, what we can do instead is *hold out* some data that we won't let our model see at first. This holdout data is called the *validation* or *testing* data, depending on the use to which we put it. In contrast, the data that we allow our model to see is called the *training* data. `sklearn` provides a convenient function for partitioning our data into training and holdout sets called `train_test_split`. The default and generally most useful behavior is to randomly select rows of the data frame to be in each set.
# +
from sklearn.model_selection import train_test_split
np.random.seed(1234)
train, test = train_test_split(titanic, test_size = 0.3) # hold out 30% of the data
train.shape, test.shape
# -
# Now we have two data frames. As you may recall from a previous lecture, we need to do some data cleaning, and split them into predictor variables `X` and target variables `y`.
# +
from sklearn import preprocessing
def prep_titanic_data(data_df):
df = data_df.copy()
# convert male/female to 1/0
le = preprocessing.LabelEncoder()
df['Sex'] = le.fit_transform(df['Sex'])
# don't need name column
df = df.drop(['Name'], axis = 1)
# split into X and y
X = df.drop(['Survived'], axis = 1)
y = df['Survived']
return(X, y)
# -
X_train, y_train = prep_titanic_data(train)
X_test, y_test = prep_titanic_data(test)
# Now we're able to train our model on the `train` data, and then evaluate its performance on the `val` data. This will help us to diagnose and avoid overfitting.
#
# Let's try using the decision tree classifier again. As you may remember, the `DecisionTreeClassifier()` class takes an argument `max_depth` that governs how many layers of decisions the tree is allowed to make. Larger `max_depth` values correspond to more complicated trees. In this way, `max_depth` is a model complexity parameter, similar to the `degree` when we did polynomial regression.
#
# For example, with a small `max_depth`, the model scores on the training and validation data are relatively close.
# +
from sklearn import tree
T = tree.DecisionTreeClassifier(max_depth = 3)
T.fit(X_train, y_train)
T.score(X_train, y_train), T.score(X_test, y_test)
# -
# On the other hand, if we use a much higher `max_depth`, we can achieve a substantially better score on the training data, but our performance on the validation data has not improved by much, and might even suffer.
# +
T = tree.DecisionTreeClassifier(max_depth = 20)
T.fit(X_train, y_train)
T.score(X_train, y_train), T.score(X_test, y_test)
# -
# That looks like overfitting! The model achieves a near-perfect score on the training data, but a much lower one on the test data.
# +
fig, ax = plt.subplots(1, figsize = (10, 7))
for d in range(1, 30):
T = tree.DecisionTreeClassifier(max_depth = d)
T.fit(X_train, y_train)
ax.scatter(d, T.score(X_train, y_train), color = "black")
ax.scatter(d, T.score(X_test, y_test), color = "firebrick")
ax.set(xlabel = "Complexity (depth)", ylabel = "Performance (score)")
# -
# Observe that the training score (black) always increases, while the test score (red) tops out around 83\% and then even begins to trail off slightly. It looks like the optimal depth might be around 5-7 or so, but there's some random noise that can prevent us from being able to determine exactly what the optimal depth is.
#
# Increasing performance on the training set combined with decreasing performance on the test set is the trademark of overfitting.
#
# This noise reflects the fact that we took a single, random subset of the data for testing. In a more systematic experiment, we would draw many different subsets of the data for each value of depth and average over them. This is what *cross-validation* does, and we'll talk about it in the next lecture.
| content/ML/ML_5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table>
# <tr>
# <th><img src="img/rqc.png" href="http://www.rqc.ru/" width="150px"></th>
# <th colspan="3" align="left" style="font-weight:normal">
# <p>• <NAME> <a href="mailto:<EMAIL>"><EMAIL></a>, <NAME> <a href="mailto:<EMAIL>"><EMAIL></a>
# <p>• Based on <a href="https://www.nature.com/articles/s41567-018-0048-5">Neural-network quantum state tomography</a> paper</p>
# <p>• The latest version of this notebook is available <a href="https://github.com/RQC-QApp/NNQST">here</a></p>
# </th>
# </tr>
# </table>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# %load_ext autoreload
# %autoreload 2
from nnqst import rbm_qst, utils, paper_functions, generators,\
state_operations, state_representations, fidelities
# -
# ## RBM Quantum State Tomography
# +
num_units = 3
num_hidd = 5
num_samples = 3000
states = generators.get_all_states(num_units, "tuple")
phases_sampled = 0.1 * generators.random_phases(len(states))
# phases_sampled = generators.random_phases(len(states))
# phases_sampled = np.array([2.62022653e-01, 4.52593227e-01, 7.18638172e-05, 1.89961158e-01])
# phases_sampled = [1.4, 0.9, 0.3, 2.7]
# Filling dicts.
amplitudes, phases = {}, {}
for i in range(len(states)):
amplitudes[states[i]] = 1. / np.sqrt(len(states))
phases[states[i]] = phases_sampled[i]
print('> amplitudes:', amplitudes)
print("> phases:", phases)
print(' ')
dataset = generators.generate_Isinglike_dataset(num_units, states, amplitudes, phases, num_samples)
psi_true = dict()
for state in states:
psi_true[state] = amplitudes[state] * np.exp(1j * phases[state])
print('> psi_true: ', psi_true)
print(' ')
print('> dataset:', dataset)
# -
# ## (1/2) Amplitudes.
# +
dataset_Z = generators.generate_dataset(states, ['I' * num_units], amplitudes, phases, num_samples)
quantum_basis = states
r_qst = rbm_qst.RBM_QST(quantum_basis, num_units, num_hidd)
r_qst.train_amplitudes(dataset_Z, max_epochs=1000, learning_rate=0.9, debug=True, precise=True)
# -
plt.plot(r_qst.objectives)
plt.title('Objective function - Ksi - RBM_QST')
plt.show()
# Fidelity.
fid, sampled_from_RBM = fidelities.fidelity_RBM(r_qst, psi_true, num_samples=2000)
fid
sampled_from_RBM
utils.plot_histogram(sampled_from_RBM)
print(r_qst.weights_lambda)
# ## (2/2) Phases.
# +
basis_set = generators.generate_Isinglike_basis_set(num_units)
print('> basis_set:', basis_set)
print('> dataset:', dataset)
# -
r_qst.train_phases(dataset, basis_set=basis_set, max_epochs=1000,
learning_rate=0.05, debug=True, precise=True)
plt.plot(r_qst.objectives)
plt.title('Objective function - Ksi - RBM_QST')
plt.show()
# +
# Fidelity.
fid = fidelities.fidelity_RBM_PRECISE(r_qst, psi_true)
print('precise:', fid)
fid, sampled_from_RBM = fidelities.fidelity_RBM(r_qst, psi_true, num_samples=2000)
print('sampled:', fid)
# -
r_qst.weights_mu
# Phases
psi_rbm_lst = list(utils.psi_RBM(r_qst).values())
psi_true_lst = list(psi_true)
print(psi_true_lst)
phases_RBM = np.log(psi_rbm_lst).imag
phases_true = np.array(list(phases.values()))
print(phases_RBM%np.pi)
print(phases_true%np.pi)
print(np.array(phases_true - phases_RBM)%(2*np.pi))
phases_true
radii = np.ones(8)
fig = plt.figure(figsize=(8,8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
ax.bar(phases_true - phases_true[0], radii, width=0.02, bottom=0.0)
ax.bar(phases_RBM - phases_RBM[0], radii * 0.9, width=0.02, bottom=0.0)
plt.show()
| main_karazeev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A Simple Autoencoder
#
# We'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data.
#
# <img src='notebook_ims/autoencoder_1.png' />
#
# ### Compressed Representation
#
# A compressed representation can be great for saving and sharing any kind of data in a way that is more efficient than storing raw data. In practice, the compressed representation often holds key information about an input image and we can use it for denoising images or oher kinds of reconstruction and transformation!
#
# <img src='notebook_ims/denoising.png' width=60%/>
#
# In this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset.
# +
import torch
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# load the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# +
# Create training and test dataloaders
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, num_workers=num_workers)
# -
# ### Visualize the Data
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# get one image from the batch
img = np.squeeze(images[0])
fig = plt.figure(figsize = (5,5))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
# -
# ---
# ## Linear Autoencoder
#
# We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building a simple autoencoder. The encoder and decoder should be made of **one linear layer**. The units that connect the encoder and decoder will be the _compressed representation_.
#
# Since the images are normalized between 0 and 1, we need to use a **sigmoid activation on the output layer** to get values that match this input value range.
#
# <img src='notebook_ims/simple_autoencoder.png' width=50% />
#
#
# #### TODO: Build the graph for the autoencoder in the cell below.
# > The input images will be flattened into 784 length vectors. The targets are the same as the inputs.
# > The encoder and decoder will be made of two linear layers, each.
# > The depth dimensions should change as follows: 784 inputs > **encoding_dim** > 784 outputs.
# > All layers will have ReLu activations applied except for the final output layer, which has a sigmoid activation.
#
# **The compressed representation should be a vector with dimension `encoding_dim=32`.**
# +
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Autoencoder(nn.Module):
def __init__(self, encoding_dim):
super(Autoencoder, self).__init__()
## encoder ##
self.encoder1 = nn.Linear(784, 128)
self.encoder2 = nn.Linear(128, encoding_dim)
## decoder ##
self.decoder1 = nn.Linear(encoding_dim, 128)
self.decoder2 = nn.Linear(128, 784)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# define feedforward behavior
# and scale the *output* layer with a sigmoid activation function
x = F.relu(self.encoder1(x))
x = F.relu(self.encoder2(x))
x = F.relu(self.decoder1(x))
x = self.sigmoid(self.decoder2(x))
return x
# initialize the NN
encoding_dim = 32
model = Autoencoder(encoding_dim)
print(model)
# -
# ---
# ## Training
#
# Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss and the test loss afterwards.
#
# We are not concerned with labels in this case, just images, which we can get from the `train_loader`. Because we're comparing pixel values in input and output images, it will be best to use a loss that is meant for a regression task. Regression is all about comparing _quantities_ rather than probabilistic values. So, in this case, I'll use `MSELoss`. And compare output images and input images as follows:
# ```
# loss = criterion(outputs, images)
# ```
#
# Otherwise, this is pretty straightfoward training with PyTorch. We flatten our images, pass them into the autoencoder, and record the training loss as we go.
# +
# specify loss function
criterion = nn.MSELoss()
# specify loss function
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# +
# number of epochs to train the model
n_epochs = 20
for epoch in range(1, n_epochs+1):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data in train_loader:
# _ stands in for labels, here
images, _ = data
# flatten images
images = images.view(images.size(0), -1)
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
outputs = model(images)
# calculate the loss
loss = criterion(outputs, images)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*images.size(0)
# print avg training statistics
train_loss = train_loss/len(train_loader)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch,
train_loss
))
# -
# ## Checking out the results
#
# Below I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts.
# +
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
images_flatten = images.view(images.size(0), -1)
# get sample outputs
output = model(images_flatten)
# prep images for display
images = images.numpy()
# output is resized into a batch of images
output = output.view(batch_size, 1, 28, 28)
# use detach when it's an output that requires_grad
output = output.detach().numpy()
# plot the first ten input images and then reconstructed images
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(25,4))
# input images on top row, reconstructions on bottom
for images, row in zip([images, output], axes):
for img, ax in zip(images, row):
ax.imshow(np.squeeze(img), cmap='gray')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# -
# ## Up Next
#
# We're dealing with images here, so we can (usually) get better performance using convolution layers. So, next we'll build a better autoencoder with convolutional layers.
| part-3/autoencoder/linear-autoencoder/Simple_Autoencoder_Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:exptt]
# language: python
# name: conda-env-exptt-py
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from urllib.request import urlopen,urlretrieve
from PIL import Image
from tqdm import tqdm_notebook
# %matplotlib inline
from sklearn.utils import shuffle
import cv2
from resnets_utils import *
from keras.models import load_model
from sklearn.datasets import load_files
from keras.utils import np_utils
from glob import glob
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential,Model,load_model
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D,GlobalAveragePooling2D
from keras.callbacks import TensorBoard,ReduceLROnPlateau,ModelCheckpoint
# -
# ### Load the data set
# +
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
# -
img_height,img_width = 64,64
num_classes = 6
#If imagenet weights are being loaded,
#input must have a static square shape (one of (128, 128), (160, 160), (192, 192), or (224, 224))
base_model = applications.resnet50.ResNet50(weights= None, include_top=False, input_shape= (img_height,img_width,3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.7)(x)
predictions = Dense(num_classes, activation= 'softmax')(x)
model = Model(inputs = base_model.input, outputs = predictions)
from keras.optimizers import SGD, Adam
# sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
adam = Adam(lr=0.0001)
model.compile(optimizer= adam, loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train, Y_train, epochs = 100, batch_size = 64)
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
model.summary()
| resnet_keras/Residual_Network_Keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.9 64-bit (''iguanas_os_dev'': venv)'
# name: python3
# ---
# # Classification Metrics Example
# Classification metrics are used to calculate the performance of binary predictors based on a binary target. They are used extensively in other Iguanas modules. This example shows how they can be applied and how to create your own.
# ## Requirements
# To run, you'll need the following:
#
# * A dataset containing binary predictor columns and a binary target column.
# ----
# ## Import packages
# +
from iguanas.metrics.classification import Precision, Recall, FScore, Revenue
import pandas as pd
import numpy as np
from typing import Union
# -
# ## Create data
# Let's create some dummy predictor columns and a binary target column. For this example, let's assume the dummy predictor columns represent rules that have been applied to a dataset.
# +
np.random.seed(0)
y_pred = pd.Series(np.random.randint(0, 2, 1000), name = 'A')
y_preds = pd.DataFrame(np.random.randint(0, 2, (1000, 5)), columns=[i for i in 'ABCDE'])
y = pd.Series(np.random.randint(0, 2, 1000), name = 'label')
amounts = pd.Series(np.random.randint(0, 1000, 1000), name = 'amounts')
# -
# ----
# ## Apply optimisation functions
# There are currently four classification metrics available:
#
# * Precision score
# * Recall score
# * Fbeta score
# * Revenue
#
# **Note that the *FScore*, *Precision* and *Recall* classes are ~100 times faster on larger datasets compared to the same functions from Sklearn's *metrics* module. They also work with Koalas DataFrames, whereas the Sklearn functions do not.**
# ### Instantiate class and run fit method
# We can run the `fit` method to calculate the optimisation metric for each column in the dataset.
# #### Precision score
precision = Precision()
# Single predictor
rule_precision = precision.fit(y_preds=y_pred, y_true=y, sample_weight=None)
# Multiple predictors
rule_precisions = precision.fit(y_preds=y_preds, y_true=y, sample_weight=None)
# #### Recall score
recall = Recall()
# Single predictor
rule_recall = recall.fit(y_preds=y_pred, y_true=y, sample_weight=None)
# Multiple predictors
rule_recalls = recall.fit(y_preds=y_preds, y_true=y, sample_weight=None)
# #### Fbeta score (beta=1)
f1 = FScore(beta=1)
# Single predictor
rule_f1 = f1.fit(y_preds=y_pred, y_true=y, sample_weight=None)
# Multiple predictors
rule_f1s = f1.fit(y_preds=y_preds, y_true=y, sample_weight=None)
# #### Revenue
rev = Revenue(y_type='Fraud', chargeback_multiplier=2)
# Single predictor
rule_rev = rev.fit(y_preds=y_pred, y_true=y, sample_weight=amounts)
# Multiple predictors
rule_revs = rev.fit(y_preds=y_preds, y_true=y, sample_weight=amounts)
# ### Outputs
# The `fit` method returns the optimisation metric defined by the class:
rule_precision, rule_precisions
rule_recall, rule_recalls
rule_f1, rule_f1s
rule_rev, rule_revs
# The `fit` method can be fed into various Iguanas modules as an argument (wherever the `metric` parameter appears). For example, in the `RuleGeneratorOpt` module, you can set the metric used to optimise the rules using this methodology.
# ----
# ## Creating your own optimisation function
# Say we want to create a class which calculates the Positive likelihood ratio (TP rate/FP rate).
# The main class structure involves having a `fit` method which has three arguments - the binary predictor(s), the binary target and any event specific weights to apply. This method should return a single numeric value.
class PositiveLikelihoodRatio:
def fit(self,
y_preds: Union[pd.Series, pd.DataFrame],
y_true: pd.Series,
sample_weight: pd.Series) -> float:
def _calc_plr(y_true, y_preds):
# Calculate TPR
tpr = (y_true * y_preds).sum() / y_true.sum()
# Calculate FPR
fpr = ((1 - y_true) * y_preds).sum()/(1 - y_true).sum()
return 0 if tpr == 0 or fpr == 0 else tpr/fpr
if y_preds.ndim == 1:
return _calc_plr(y_true, y_preds)
else:
plrs = np.empty(y_preds.shape[1])
for i, col in enumerate(y_preds.columns):
plrs[i] = _calc_plr(y_true, y_preds[col])
return plrs
# We can then apply the `fit` method to the dataset to check it works:
# + tags=[]
plr = PositiveLikelihoodRatio()
# Single predictor
rule_plr = plr.fit(y_preds=y_pred, y_true=y, sample_weight=None)
# Multiple predictors
rule_plrs = plr.fit(y_preds=y_preds, y_true=y, sample_weight=None)
# -
rule_plr, rule_plrs
# Finally, after instantiating the class, we can feed the `fit` method to a relevant Iguanas module (for example, we can feed the `fit` method to the `metric` parameter in the `BayesianOptimiser` class so that rules are generated which maximise the Positive Likelihood Ratio).
# ----
| iguanas/metrics/examples/classification_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["remove_cell"]
# # The Atoms of Computation
# -
#
# Programming a quantum computer is now something that anyone can do in the comfort of their own home.
#
# But what to create? What is a quantum program anyway? In fact, what is a quantum computer?
#
#
# These questions can be answered by making comparisons to standard digital computers. Unfortunately, most people don’t actually understand how digital computers work either. In this article, we’ll look at the basics principles behind these devices. To help us transition over to quantum computing later on, we’ll do it using the same tools as we'll use for quantum.
# + [markdown] tags=["contents"]
# ## Contents
#
# 1. [Splitting information into bits](#bits)
# 2. [Computation as a Diagram](#diagram)
# 3. [Your First Quantum Circuit](#first-circuit)
# 4. [Example: Adder Circuit](#adder)
# 4.1 [Encoding an Input](#encoding)
# 4.2 [Remembering how to Add](#remembering-add)
# 4.3 [Adding with Qiskit](#adding-qiskit)
# -
# Below is some Python code we'll need to run if we want to use the code in this page:
from qiskit import QuantumCircuit, execute, Aer
from qiskit.visualization import plot_histogram
# ## 1. Splitting information into bits <a id="bits"></a>
# The first thing we need to know about is the idea of bits. These are designed to be the world’s simplest alphabet. With only two characters, 0 and 1, we can represent any piece of information.
#
# One example is numbers. You are probably used to representing a number through a string of the ten digits 0, 1, 2, 3, 4, 5, 6, 7, 8, and 9. In this string of digits, each digit represents how many times the number contains a certain power of ten. For example, when we write 9213, we mean
#
#
#
# $$ 9000 + 200 + 10 + 3 $$
#
#
#
# or, expressed in a way that emphasizes the powers of ten
#
#
#
# $$ (9\times10^3) + (2\times10^2) + (1\times10^1) + (3\times10^0) $$
#
#
#
# Though we usually use this system based on the number 10, we can just as easily use one based on any other number. The binary number system, for example, is based on the number two. This means using the two characters 0 and 1 to express numbers as multiples of powers of two. For example, 9213 becomes 10001111111101, since
#
#
#
# $$ 9213 = (1 \times 2^{13}) + (0 \times 2^{12}) + (0 \times 2^{11})+ (0 \times 2^{10}) +(1 \times 2^9) + (1 \times 2^8) + (1 \times 2^7) \\\\ \,\,\, + (1 \times 2^6) + (1 \times 2^5) + (1 \times 2^4) + (1 \times 2^3) + (1 \times 2^2) + (0 \times 2^1) + (1 \times 2^0) $$
#
#
#
# In this we are expressing numbers as multiples of 2, 4, 8, 16, 32, etc. instead of 10, 100, 1000, etc.
# <a id="binary_widget"></a>
from qiskit_textbook.widgets import binary_widget
binary_widget(nbits=5)
# These strings of bits, known as binary strings, can be used to represent more than just numbers. For example, there is a way to represent any text using bits. For any letter, number, or punctuation mark you want to use, you can find a corresponding string of at most eight bits using [this table](https://www.ibm.com/support/knowledgecenter/en/ssw_aix_72/com.ibm.aix.networkcomm/conversion_table.htm). Though these are quite arbitrary, this is a widely agreed-upon standard. In fact, it's what was used to transmit this article to you through the internet.
#
# This is how all information is represented in computers. Whether numbers, letters, images, or sound, it all exists in the form of binary strings.
#
# Like our standard digital computers, quantum computers are based on this same basic idea. The main difference is that they use *qubits*, an extension of the bit to quantum mechanics. In the rest of this textbook, we will explore what qubits are, what they can do, and how they do it. In this section, however, we are not talking about quantum at all. So, we just use qubits as if they were bits.
# ### Quick Exercises
# 1. Think of a number and try to write it down in binary.
# 2. If you have $n$ bits, how many different states can they be in?
# ## 2. Computation as a diagram <a id="diagram"></a>
#
# Whether we are using qubits or bits, we need to manipulate them in order to turn the inputs we have into the outputs we need. For the simplest programs with very few bits, it is useful to represent this process in a diagram known as a *circuit diagram*. These have inputs on the left, outputs on the right, and operations represented by arcane symbols in between. These operations are called 'gates', mostly for historical reasons.
#
# Here's an example of what a circuit looks like for standard, bit-based computers. You aren't expected to understand what it does. It should simply give you an idea of what these circuits look like.
#
# ![image1](images/classical_circuit.png)
#
# For quantum computers, we use the same basic idea but have different conventions for how to represent inputs, outputs, and the symbols used for operations. Here is the quantum circuit that represents the same process as above.
#
# ![image2](images/quantum_circuit.png)
#
# In the rest of this section, we will explain how to build circuits. At the end, you'll know how to create the circuit above, what it does, and why it is useful.
# ## 3. Your first quantum circuit <a id="first-circuit"></a>
# In a circuit, we typically need to do three jobs: First, encode the input, then do some actual computation, and finally extract an output. For your first quantum circuit, we'll focus on the last of these jobs. We start by creating a circuit with eight qubits and eight outputs.
n = 8
n_q = n
n_b = n
qc_output = QuantumCircuit(n_q,n_b)
# This circuit, which we have called `qc_output`, is created by Qiskit using `QuantumCircuit`. The number `n_q` defines the number of qubits in the circuit. With `n_b` we define the number of output bits we will extract from the circuit at the end.
#
# The extraction of outputs in a quantum circuit is done using an operation called `measure`. Each measurement tells a specific qubit to give an output to a specific output bit. The following code adds a `measure` operation to each of our eight qubits. The qubits and bits are both labelled by the numbers from 0 to 7 (because that’s how programmers like to do things). The command `qc.measure(j,j)` adds a measurement to our circuit `qc` that tells qubit `j` to write an output to bit `j`.
for j in range(n):
qc_output.measure(j,j)
# Now that our circuit has something in it, let's take a look at it.
qc_output.draw()
# Qubits are always initialized to give the output ```0```. Since we don't do anything to our qubits in the circuit above, this is exactly the result we'll get when we measure them. We can see this by running the circuit many times and plotting the results in a histogram. We will find that the result is always ```00000000```: a ```0``` from each qubit.
counts = execute(qc_output,Aer.get_backend('qasm_simulator')).result().get_counts()
plot_histogram(counts)
# The reason for running many times and showing the result as a histogram is because quantum computers may have some randomness in their results. In this case, since we aren’t doing anything quantum, we get just the ```00000000``` result with certainty.
#
# Note that this result comes from a quantum simulator, which is a standard computer calculating what an ideal quantum computer would do. Simulations are only possible for small numbers of qubits (~30 qubits), but they are nevertheless a very useful tool when designing your first quantum circuits. To run on a real device you simply need to replace ```Aer.get_backend('qasm_simulator')``` with the backend object of the device you want to use.
# ## 4. Example: Creating an Adder Circuit <a id="adder"></a>
# ### 4.1 Encoding an input <a id="encoding"></a>
#
# Now let's look at how to encode a different binary string as an input. For this, we need what is known as a NOT gate. This is the most basic operation that you can do in a computer. It simply flips the bit value: ```0``` becomes ```1``` and ```1``` becomes ```0```. For qubits, it is an operation called ```x``` that does the job of the NOT.
#
# Below we create a new circuit dedicated to the job of encoding and call it `qc_encode`. For now, we only specify the number of qubits.
# +
qc_encode = QuantumCircuit(n)
qc_encode.x(7)
qc_encode.draw()
# -
# Extracting results can be done using the circuit we have from before: `qc_output`. Adding the two circuits using `qc_encode + qc_output` creates a new circuit with everything needed to extract an output added at the end.
qc = qc_encode + qc_output
qc.draw()
# Now we can run the combined circuit and look at the results.
counts = execute(qc,Aer.get_backend('qasm_simulator')).result().get_counts()
plot_histogram(counts)
# Now our computer outputs the string ```10000000``` instead.
#
# The bit we flipped, which comes from qubit 7, lives on the far left of the string. This is because Qiskit numbers the bits in a string from right to left. Some prefer to number their bits the other way around, but Qiskit's system certainly has its advantages when we are using the bits to represent numbers. Specifically, it means that qubit 7 is telling us about how many $2^7$s we have in our number. So by flipping this bit, we’ve now written the number 128 in our simple 8-bit computer.
#
# Now try out writing another number for yourself. You could do your age, for example. Just use a search engine to find out what the number looks like in binary (if it includes a ‘0b’, just ignore it), and then add some 0s to the left side if you are younger than 64.
# +
qc_encode = QuantumCircuit(n)
qc_encode.x(1)
qc_encode.x(5)
qc_encode.draw()
# -
# Now we know how to encode information in a computer. The next step is to process it: To take an input that we have encoded, and turn it into an output that we need.
# ### 4.2 Remembering how to add <a id="remembering-add"></a>
# To look at turning inputs into outputs, we need a problem to solve. Let’s do some basic maths. In primary school, you will have learned how to take large mathematical problems and break them down into manageable pieces. For example, how would you go about solving the following?
#
# ```
# 9213
# + 1854
# = ????
# ```
#
# One way is to do it digit by digit, from right to left. So we start with 3+4
# ```
# 9213
# + 1854
# = ???7
# ```
#
# And then 1+5
# ```
# 9213
# + 1854
# = ??67
# ```
#
# Then we have 2+8=10. Since this is a two digit answer, we need to carry the one over to the next column.
#
# ```
# 9213
# + 1854
# = ?067
# ¹
# ```
#
# Finally we have 9+1+1=11, and get our answer
#
# ```
# 9213
# + 1854
# = 11067
# ¹
# ```
#
# This may just be simple addition, but it demonstrates the principles behind all algorithms. Whether the algorithm is designed to solve mathematical problems or process text or images, we always break big tasks down into small and simple steps.
#
# To run on a computer, algorithms need to be compiled down to the smallest and simplest steps possible. To see what these look like, let’s do the above addition problem again but in binary.
#
#
# ```
# 10001111111101
# + 00011100111110
#
# = ??????????????
# ```
#
# Note that the second number has a bunch of extra 0s on the left. This just serves to make the two strings the same length.
#
# Our first task is to do the 1+0 for the column on the right. In binary, as in any number system, the answer is 1. We get the same result for the 0+1 of the second column.
#
# ```
# 10001111111101
# + 00011100111110
#
# = ????????????11
# ```
#
# Next, we have 1+1. As you’ll surely be aware, 1+1=2. In binary, the number 2 is written ```10```, and so requires two bits. This means that we need to carry the 1, just as we would for the number 10 in decimal.
#
# ```
# 10001111111101
# + 00011100111110
# = ???????????011
# ¹
# ```
#
# The next column now requires us to calculate ```1+1+1```. This means adding three numbers together, so things are getting complicated for our computer. But we can still compile it down to simpler operations, and do it in a way that only ever requires us to add two bits together. For this, we can start with just the first two 1s.
#
# ```
# 1
# + 1
# = 10
# ```
#
# Now we need to add this ```10``` to the final ```1``` , which can be done using our usual method of going through the columns.
#
# ```
# 10
# + 01
# = 11
# ```
#
# The final answer is ```11``` (also known as 3).
#
# Now we can get back to the rest of the problem. With the answer of ```11```, we have another carry bit.
#
# ```
# 10001111111101
# + 00011100111110
# = ??????????1011
# ¹¹
# ```
#
# So now we have another 1+1+1 to do. But we already know how to do that, so it’s not a big deal.
#
# In fact, everything left so far is something we already know how to do. This is because, if you break everything down into adding just two bits, there are only four possible things you’ll ever need to calculate. Here are the four basic sums (we’ll write all the answers with two bits to be consistent).
#
# ```
# 0+0 = 00 (in decimal, this is 0+0=0)
# 0+1 = 01 (in decimal, this is 0+1=1)
# 1+0 = 01 (in decimal, this is 1+0=1)
# 1+1 = 10 (in decimal, this is 1+1=2)
# ```
#
# This is called a *half adder*. If our computer can implement this, and if it can chain many of them together, it can add anything.
# ### 4.3 Adding with Qiskit <a id="adding-qiskit"></a>
# Let's make our own half adder using Qiskit. This will include a part of the circuit that encodes the input, a part that executes the algorithm, and a part that extracts the result. The first part will need to be changed whenever we want to use a new input, but the rest will always remain the same.
# ![half adder implemented on a quantum circuit](images/half-adder.svg)
#
# The two bits we want to add are encoded in the qubits 0 and 1. The above example encodes a ```1``` in both these qubits, and so it seeks to find the solution of ```1+1```. The result will be a string of two bits, which we will read out from the qubits 2 and 3. All that remains is to fill in the actual program, which lives in the blank space in the middle.
#
# The dashed lines in the image are just to distinguish the different parts of the circuit (although they can have more interesting uses too). They are made by using the `barrier` command.
#
# The basic operations of computing are known as logic gates. We’ve already used the NOT gate, but this is not enough to make our half adder. We could only use it to manually write out the answers. Since we want the computer to do the actual computing for us, we’ll need some more powerful gates.
#
# To see what we need, let’s take another look at what our half adder needs to do.
#
# ```
# 0+0 = 00
# 0+1 = 01
# 1+0 = 01
# 1+1 = 10
# ```
#
# The rightmost bit in all four of these answers is completely determined by whether the two bits we are adding are the same or different. So for ```0+0``` and ```1+1```, where the two bits are equal, the rightmost bit of the answer comes out ```0```. For ```0+1``` and ```1+0```, where we are adding different bit values, the rightmost bit is ```1```.
#
# To get this part of our solution correct, we need something that can figure out whether two bits are different or not. Traditionally, in the study of digital computation, this is called an XOR gate.
#
# | Input 1 | Input 2 | XOR Output |
# |:-------:|:-------:|:------:|
# | 0 | 0 | 0 |
# | 0 | 1 | 1 |
# | 1 | 0 | 1 |
# | 1 | 1 | 0 |
#
# In quantum computers, the job of the XOR gate is done by the controlled-NOT gate. Since that's quite a long name, we usually just call it the CNOT. In Qiskit its name is ```cx```, which is even shorter. In circuit diagrams, it is drawn as in the image below.
qc_cnot = QuantumCircuit(2)
qc_cnot.cx(0,1)
qc_cnot.draw()
# This is applied to a pair of qubits. One acts as the control qubit (this is the one with the little dot). The other acts as the *target qubit* (with the big circle).
#
# There are multiple ways to explain the effect of the CNOT. One is to say that it looks at its two input bits to see whether they are the same or different. Next, it overwrites the target qubit with the answer. The target becomes ```0``` if they are the same, and ```1``` if they are different.
#
# <img src="images/cnot_xor.svg">
#
# Another way of explaining the CNOT is to say that it does a NOT on the target if the control is ```1```, and does nothing otherwise. This explanation is just as valid as the previous one (in fact, it’s the one that gives the gate its name).
#
# Try the CNOT out for yourself by trying each of the possible inputs. For example, here's a circuit that tests the CNOT with the input ```01```.
qc = QuantumCircuit(2,2)
qc.x(0)
qc.cx(0,1)
qc.measure(0,0)
qc.measure(1,1)
qc.draw()
# If you execute this circuit, you’ll find that the output is ```11```. We can think of this happening because of either of the following reasons.
#
# - The CNOT calculates whether the input values are different and finds that they are, which means that it wants to output ```1```. It does this by writing over the state of qubit 1 (which, remember, is on the left of the bit string), turning ```01``` into ```11```.
#
# - The CNOT sees that qubit 0 is in state ```1```, and so applies a NOT to qubit 1. This flips the ```0``` of qubit 1 into a ```1```, and so turns ```01``` into ```11```.
#
# Here is a table showing all the possible inputs and corresponding outputs of the CNOT gate:
#
# | Input (q1 q0) | Output (q1 q0) |
# |:-------------:|:--------------:|
# | 00 | 00 |
# | 01 | 11 |
# | 10 | 10 |
# | 11 | 01 |
#
# For our half adder, we don’t want to overwrite one of our inputs. Instead, we want to write the result on a different pair of qubits. For this, we can use two CNOTs.
# +
qc_ha = QuantumCircuit(4,2)
# encode inputs in qubits 0 and 1
qc_ha.x(0) # For a=0, remove this line. For a=1, leave it.
qc_ha.x(1) # For b=0, remove this line. For b=1, leave it.
qc_ha.barrier()
# use cnots to write the XOR of the inputs on qubit 2
qc_ha.cx(0,2)
qc_ha.cx(1,2)
qc_ha.barrier()
# extract outputs
qc_ha.measure(2,0) # extract XOR value
qc_ha.measure(3,1)
qc_ha.draw()
# -
# We are now halfway to a fully working half adder. We just have the other bit of the output left to do: the one that will live on qubit 3.
#
# If you look again at the four possible sums, you’ll notice that there is only one case for which this is ```1``` instead of ```0```: ```1+1```=```10```. It happens only when both the bits we are adding are ```1```.
#
# To calculate this part of the output, we could just get our computer to look at whether both of the inputs are ```1```. If they are — and only if they are — we need to do a NOT gate on qubit 3. That will flip it to the required value of ```1``` for this case only, giving us the output we need.
#
# For this, we need a new gate: like a CNOT but controlled on two qubits instead of just one. This will perform a NOT on the target qubit only when both controls are in state ```1```. This new gate is called the *Toffoli*. For those of you who are familiar with Boolean logic gates, it is basically an AND gate.
#
# In Qiskit, the Toffoli is represented with the `ccx` command.
# +
qc_ha = QuantumCircuit(4,2)
# encode inputs in qubits 0 and 1
qc_ha.x(0) # For a=0, remove the this line. For a=1, leave it.
qc_ha.x(1) # For b=0, remove the this line. For b=1, leave it.
qc_ha.barrier()
# use cnots to write the XOR of the inputs on qubit 2
qc_ha.cx(0,2)
qc_ha.cx(1,2)
# use ccx to write the AND of the inputs on qubit 3
qc_ha.ccx(0,1,3)
qc_ha.barrier()
# extract outputs
qc_ha.measure(2,0) # extract XOR value
qc_ha.measure(3,1) # extract AND value
qc_ha.draw()
# -
# In this example, we are calculating ```1+1```, because the two input bits are both ```1```. Let's see what we get.
counts = execute(qc_ha,Aer.get_backend('qasm_simulator')).result().get_counts()
plot_histogram(counts)
# The result is ```10```, which is the binary representation of the number 2. We have built a computer that can solve the famous mathematical problem of 1+1!
#
# Now you can try it out with the other three possible inputs, and show that our algorithm gives the right results for those too.
#
# The half adder contains everything you need for addition. With the NOT, CNOT, and Toffoli gates, we can create programs that add any set of numbers of any size.
#
# These three gates are enough to do everything else in computing too. In fact, we can even do without the CNOT. Additionally, the NOT gate is only really needed to create bits with value ```1```. The Toffoli gate is essentially the atom of mathematics. It is the simplest element, from which every other problem-solving technique can be compiled.
#
# As we'll see, in quantum computing we split the atom.
import qiskit
qiskit.__qiskit_version__
| content/ch-states/atoms-computation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="hTDiNfQkNGs9"
# + [markdown] id="LdMtX0E3NU-V"
# Apple Stock
# Introduction:
# We are going to use Apple's stock price.
#
# Step 1. Import the necessary libraries
# + id="RRnlprlJNYpl"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="h9TmucTBN1f1"
from google.colab import drive
drive.mount('/content/gdrive')
# + id="kO42BGh0N_95"
from google.colab import drive
drive.mount('/content/drive')
# + id="_7iQoqOdNxsc"
import os
os.chdir("/content/drive/MyDrive/colab")
print("Directory changed")
# + id="bhlJER9cNhJ1"
apple = pd.read_csv('data/appl_1980_2014.csv')
apple.head()
# + [markdown] id="ygbW6y52ORWY"
# Step 4. Check out the type of the columns
# + id="oaR-a1nQOUD3"
apple.dtypes
# + [markdown] id="PU3dQSobOY3Z"
# Step 5. Transform the Date column as a datetime type
# + id="_ir2TjyTOcaF"
apple.Date = pd.to_datetime(apple.Date)
apple
# + [markdown] id="0cvvck-KOrPf"
# Step 6. Set the date as the index
# + id="Kcy6PYoBOt4X"
apple.Date = pd.to_datetime(apple.Date)
datetime_index = pd.DatetimeIndex(apple.Date.values)
df2=apple.set_index(datetime_index)
print(df2)
# + [markdown] id="DVP0yJkePGNL"
# Step 7. Is there any duplicate dates?
# + id="Wimds0-aPJxn"
apple.index.nunique() == apple.shape[0]
# + [markdown] id="KyvL3JipPZ_N"
# Step 8. Ops...it seems the index is from the most recent date. Make the first entry the oldest date.
# + id="4wAESAkyXaq4"
apple.index.is_unique
# + id="s1GSoNbcPd-Z"
apple.sort_index()
# + [markdown] id="rFjVc9fsXhcN"
# Step 9. Get the last business day of each month
# + id="SEdmSI7_XlG4"
apple.resample('BM').mean()
# + [markdown] id="aO7W8AcnXs6P"
# Step 10. What is the difference in days between the first day and the oldest¶
# + id="O-47xxNLXvul"
(apple.index.max() - apple.index.min()).days
# + [markdown] id="wypzqvhxX53x"
# Step 11. How many months in the data we have?
# + id="OwCog1iCX9Vw"
len(apple.resample('BM').mean())
# + [markdown] id="fRY2FqxoYDVk"
# Step 12. Plot the 'Adj Close' value. Set the size of the figure to 13.5 x 9 inches
# + id="A3vMRYPOYGEu"
plt.figure(figsize = (13.5, 9))
apple['Adj Close'].plot()
| Apple_Stock.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 稀疏性和L1正则化
# 学习目标:
# - 计算模型大小
# - 通过L1正则化增加稀疏性,减小模型大小
#
# 使用正则化函数是一种降低复杂性的好方法,它会使权重正好为零。对于线性模型,权重为零就相当于完全没有使用相应特征。除了可避免过拟合之外,生成的模型会更加有效。
#
# L1正则化是一种增加稀疏性的方法。
# ### 设置
# 加载加州住房数据集。
# +
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_df = pd.read_csv("https://download.mlcc.google.cn/mledu-datasets/california_housing_train.csv",
sep=',')
california_housing_df = california_housing_df.reindex(np.random.permutation(california_housing_df.index))
# +
# 预处理特征
def preprocess_features(california_housing_df):
"""预处理房价的DataFrame,准备输入特征,添加人为特征
Args:
california_housing_df: 包含加州房价数据的df
Returns:
包含处理后特征的DataFrame
"""
selected_features = california_housing_df[["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# 创建额外的特征
processed_features["rooms_per_person"] = (california_housing_df["total_rooms"] / california_housing_df["population"])
return processed_features
# 预处理目标,添加二值标签(房价是否高昂)作为目标
def preprocess_targets(california_housing_df):
"""从加州房价DataFrame准备目标特征,即标签
Args:
california_housing_dataframe: 包含加州房价数据的df
Returns:
包含目标标签的df
"""
# 分类阈值
value_threshold = 265000
output_targets = pd.DataFrame()
# 使用astype把布尔值转换为二值
output_targets["median_house_value_is_high"] = (california_housing_df["median_house_value"] > value_threshold).astype(float)
return output_targets
# +
# 选择前12000/17000用于训练
training_examples = preprocess_features(california_housing_df.head(12000))
training_targets = preprocess_targets(california_housing_df.head(12000))
# 选择最后的5000用于验证
validation_examples = preprocess_features(california_housing_df.tail(5000))
validation_targets = preprocess_targets(california_housing_df.tail(5000))
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
# -
def my_input_fn(features, targets, batch_size=1,shuffle=True, num_epochs=None):
"""使用多个特征训练一个线性回归器
Args:
features: 特征的DataFrame
targets: 目标的DataFrame
batch_size: 传递给模型的批大小
shuffle: 是否打乱数据
num_epochs: 数据重复的epochs数
Returns:
下一批数据元组(features, labels)
"""
# 转换DataFrame到numpy数组
features = {key:np.array(value) for key,value in dict(features).items()}
# 构建数据集
ds = Dataset.from_tensor_slices((features, targets))
ds = ds.batch(batch_size).repeat(num_epochs)
# 打乱数据
if shuffle:
ds = ds.shuffle(10000)
# 返回下一批数据
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
# 根据分位数计算边界,使每个分桶包含相同数量的元素
def get_quantile_based_boundaries(feature_values, num_buckets):
boundaries = np.arange(1.0, num_buckets) / num_buckets
quantiles = feature_values.quantile(boundaries)
return [quantiles[q] for q in quantiles.keys()]
def construct_bucketized_feature_columns():
"""构建TensorFlow特征列
Returns:
特征列集合
"""
households = tf.feature_column.numeric_column("households")
longitude = tf.feature_column.numeric_column("longitude")
latitude = tf. feature_column.numeric_column("latitude")
total_rooms = tf.feature_column.numeric_column("total_rooms")
total_bedrooms = tf.feature_column.numeric_column("total_bedrooms")
population = tf.feature_column.numeric_column("population")
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
median_income = tf.feature_column.numeric_column("median_income")
rooms_per_person = tf.feature_column.numeric_column("rooms_per_person")
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
training_examples["households"], 10))
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
training_examples["longitude"], 50))
bucketized_latitude = tf.feature_column.bucketized_column(
latitude, boundaries=get_quantile_based_boundaries(
training_examples["latitude"], 50))
bucketized_total_rooms = tf.feature_column.bucketized_column(
total_rooms,boundaries=get_quantile_based_boundaries(
training_examples["total_rooms"], 10))
bucketized_total_bedrooms = tf.feature_column.bucketized_column(
total_bedrooms, boundaries=get_quantile_based_boundaries(
training_examples["total_bedrooms"], 10))
bucketized_population = tf.feature_column.bucketized_column(
population, boundaries=get_quantile_based_boundaries(
training_examples["population"], 10))
bucketized_housing_median_age = tf.feature_column.bucketized_column(
housing_median_age, boundaries=get_quantile_based_boundaries(
training_examples["housing_median_age"], 10))
bucketized_median_income = tf.feature_column.bucketized_column(
median_income, boundaries=get_quantile_based_boundaries(
training_examples["median_income"], 10))
bucketized_rooms_per_person = tf.feature_column.bucketized_column(
rooms_per_person, boundaries=get_quantile_based_boundaries(
training_examples["rooms_per_person"], 10))
long_x_lat = tf.feature_column.crossed_column(
set([bucketized_longitude, bucketized_latitude]), hash_bucket_size=1000)
feature_columns = set([
long_x_lat,
bucketized_longitude,
bucketized_latitude,
bucketized_total_rooms,
bucketized_total_bedrooms,
bucketized_housing_median_age,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person])
return feature_columns
# ### 计算模型大小
# 要计算模型大小,需计算非零参数的数量。我们构造一个辅助函数,函数使用了Estimator的API。
def model_size(estimator):
variables = estimator.get_variable_names()
size = 0
for variable in variables:
# 排除这些变量名
if not any(x in variable for x in ['global_step',
'centered_bias_weight',
'bias_weight',
'Ftrl']):
size += np.count_nonzero(estimator.get_variable_value(variable))
return size
# ### 减小模型大小
# 现在团队要求我们构建一个准确度高的逻辑回归模型,可以告诉我们街区的住房成本是否高昂。
#
# 团队要求模型的**参数数量不超过600个**,且对测试集的**对数损失函数低于0.35**,否则不能发布该模型。
# #### 任务1.查找合适的正则化系数。
# 查找满足上述要求的正则化强度系数。
def train_linear_classifier_model(
learning_rate,
regularization_strength,
steps,
batch_size,
feature_columns,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""使用多个特征训练一个线性回归模型
"""
periods = 10
steps_per_period = steps / periods
# 定义优化器,并加入L1正则化
my_optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate,
l1_regularization_strength=regularization_strength)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
# 创建一个线性回归器
linear_regressor = tf.estimator.LinearClassifier(feature_columns=feature_columns,
optimizer=my_optimizer)
# 创建输入函数
training_input_fn = lambda: my_input_fn(training_examples,training_targets["median_house_value_is_high"], batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets["median_house_value_is_high"], num_epochs=1, shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets["median_house_value_is_high"], num_epochs=1, shuffle=False)
# 训练模型,并在每个周期输出loss
print("Start training...")
print("Log loss (on training data): ")
training_rmse = []
validation_rmse = []
for period in range(0, periods):
linear_regressor.train(input_fn=training_input_fn, steps=steps_per_period)
# 计算预测
training_probabilities = linear_regressor.predict(input_fn=predict_training_input_fn)
training_probabilities = np.array([item["probabilities"] for item in training_probabilities])
validation_probabilities = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_probabilities = np.array([item["probabilities"] for item in validation_probabilities])
# 计算训练和验证的损失
training_log_loss = metrics.log_loss(training_targets, training_probabilities)
validation_log_loss = metrics.log_loss(validation_targets, validation_probabilities)
# 输出结果
print("period %02d : %.2f" % (period, training_log_loss))
training_rmse.append(training_log_loss)
validation_rmse.append(validation_log_loss)
print("Model training finished!")
# 损失随周期变化图
plt.ylabel("Log Loss")
plt.xlabel("Periods")
plt.title("Log Loss via Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validaiton")
plt.legend()
return linear_regressor
# 正则化强度为 0.1 应该就足够了。
# 请注意,有一个需要做出折中选择的地方:正则化越强,我们获得的模型就越小,但会影响分类损失。
linear_classifier = train_linear_classifier_model(
learning_rate=0.1,
regularization_strength=0.25,
steps=600,
batch_size=100,
feature_columns=construct_bucketized_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
print("Model size:", model_size(linear_classifier))
| code/l1_regularization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false nbgrader={"checksum": "cebdb0cf7dac64a3ae23ccb88f6fda3b", "grade": false, "grade_id": "cell-fa48e7f1b94baa5b", "locked": true, "schema_version": 1, "solution": false}
# # Assignment 1
# For this assignment you are welcomed to use other regex resources such a regex "cheat sheets" you find on the web. Feel free to share good resources with your peers in slack!
#
#
# + [markdown] deletable=false editable=false nbgrader={"checksum": "d17f561e3c6c08092810b982d085f5be", "grade": false, "grade_id": "cell-d4da7eb9acee2a6d", "locked": true, "schema_version": 1, "solution": false}
# Before start working on the problems, here is a small example to help you understand how to write your own answers. In short, the solution should be written within the function body given, and the final result should be returned. Then the autograder will try to call the function and validate your returned result accordingly.
# + deletable=false editable=false nbgrader={"checksum": "7eeb5e7d0f0e0137caed9f3b5cb925b1", "grade": false, "grade_id": "cell-4a96535829224b3f", "locked": true, "schema_version": 1, "solution": false}
def example_word_count():
# This example question requires counting words in the example_string below.
example_string = "Amy is 5 years old"
# YOUR CODE HERE.
# You should write your solution here, and return your result, you can comment out or delete the
# NotImplementedError below.
result = example_string.split(" ")
return len(result)
#raise NotImplementedError()
# -
# ## Part A
#
# Find a list of all of the names in the following string using regex.
# + deletable=false nbgrader={"checksum": "29bc8c161c0e246c1e3ef4820cc164f7", "grade": false, "grade_id": "names", "locked": false, "schema_version": 1, "solution": true}
import re
def names():
simple_string = """Amy is 5 years old, and her sister Mary is 2 years old.
Ruth and Peter, their parents, have 3 kids."""
# YOUR CODE HERE
name_list = re.findall("[A-Z][a-z]+", simple_string)
return name_list
#raise NotImplementedError()
names()
# -
def names():
simple_string = """Amy is 5 years old, and her sister Mary is 2 years old.
Ruth and Peter, their parents, have 3 kids."""
# YOUR CODE HERE
name_list = re.findall(r"[A-Z][a-z]+", simple_string)
return name_list
#raise NotImplementedError()
names()
# + deletable=false editable=false nbgrader={"checksum": "ed5c09ac57f7d98130d5abc557f6d6c4", "grade": true, "grade_id": "correct_names", "locked": false, "points": 1, "schema_version": 1, "solution": false}
assert len(names()) == 4, "There are four names in the simple_string"
# -
# Play around
s = 'ABCAC'
print(re.search("A", s))
print(re.match("A", s) == True)
print(bool(re.match("A", s)))
len(re.split('A', s))
# Play Around
s = 'ACBCAC'
print(re.findall("^AC", s))
print(re.findall("[^AC]", s))
print(re.findall("^[AC]", s))
# + [markdown] deletable=false editable=false nbgrader={"checksum": "77b3d100c47e9e41d98f82dfeb7eba9c", "grade": false, "grade_id": "cell-ed64e3464ddd7ba7", "locked": true, "schema_version": 1, "solution": false}
# ## Part B
#
# The dataset file in [assets/grades.txt](assets/grades.txt) contains a line separated list of people with their grade in
# a class. Create a regex to generate a list of just those students who received a B in the course.
# -
# + deletable=false nbgrader={"checksum": "e977a1df674e9fa684e6d172aec92824", "grade": false, "grade_id": "grades", "locked": false, "schema_version": 1, "solution": true}
import re
def grades():
with open ("assets/grades.txt", "r") as file:
grades = file.read()
# YOUR CODE HERE
grade_b = []
for item in re.findall("[\w ]*\:\sB\s?\n?",grades):
#print(re.split("[\:]",item)[0])
grade_b.append(re.split("[\:]",item)[0])
return grade_b
#raise NotImplementedError()
grades()
# -
import re
def student_grades():
with open ("assets/grades.txt", "r") as file:
grades = file.read()
# YOUR CODE HERE
grade_b = []
for item in re.findall("[\w ]*\:\sB\s?\n?",grades):
#print(re.split("[\:]",item)[0])
grade_b.append(re.split("[\:]",item)[0])
return grade_b
#raise NotImplementedError()
grades()
# + deletable=false editable=false nbgrader={"checksum": "e0bcc452d60fc45259e58d3116d25477", "grade": true, "grade_id": "correct_grades", "locked": false, "points": 1, "schema_version": 1, "solution": false}
assert len(grades()) == 16
# + [markdown] deletable=false editable=false nbgrader={"checksum": "36e3e2a3a3e29fa7b823d22476392320", "grade": false, "grade_id": "cell-e253518e37d33f0c", "locked": true, "schema_version": 1, "solution": false}
# ## Part C
#
# Consider the standard web log file in [assets/logdata.txt](assets/logdata.txt). This file records the access a user makes when visiting a web page (like this one!). Each line of the log has the following items:
# * a host (e.g., '192.168.127.12')
# * a user_name (e.g., 'feest6811' **note: sometimes the user name is missing! In this case, use '-' as the value for the username.**)
# * the time a request was made (e.g., '21/Jun/2019:15:45:24 -0700')
# * the post request type (e.g., 'POST /incentivize HTTP/1.1' **note: not everything is a POST!**)
#
# Your task is to convert this into a list of dictionaries, where each dictionary looks like the following:
# ```
# example_dict = {"host":"192.168.127.12",
# "user_name":"feest6811",
# "time":"21/Jun/2019:15:45:24 -0700",
# "request":"POST /incentivize HTTP/1.1"}
# ```
# -
with open("assets/logdata.txt", "r") as file:
logdata = file.read()
len(re.findall("\]\s\"[A-Z]{3,6}\s\/[\d\w+\-%]+\/?[\d\w\-+%]*\/?[\d\w\-+%]*\/?[\d\w\-+%]*\s?HTTP\/[\d]\.[\d]", logdata))
# + deletable=false nbgrader={"checksum": "c04017e59e48b2f4c77bf425ed84b356", "grade": false, "grade_id": "logs", "locked": false, "schema_version": 1, "solution": true}
import re
def logs():
with open("assets/logdata.txt", "r") as file:
logdata = file.read()
# YOUR CODE HERE
list_of_dict = []
pattern="""
(?P<host>[\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}\.[\d]{1,3}) #the host name
(\s\-\s) #an indicator prior to the user name
(?P<user_name>[a-z]+[\d]{4}|\-) # user name
(\s\[) #separator for user name and time
(?P<time>\d{2}\/Jun\/\d{4}\:\d{2}\:\d{2}\:\d{2}\s-0700) #the time
(\]\s\") #separator for time and request
(?P<request>[A-Z]{3,6}\s\/[\d\w+\-%]+\/?[\d\w\-+%]*\/?[\d\w\-+%]*\/?[\d\w\-+%]*\s?HTTP\/[\d]\.[\d]) #the request
(\"\s[\d]+\s[\d]+)"""
for item in re.finditer(pattern,logdata,re.VERBOSE):
list_of_dict.append(item.groupdict())
return list_of_dict
#raise NotImplementedError()
logs()
# -
len(logs())
# + deletable=false editable=false nbgrader={"checksum": "1fd5f2cca190d37c667fb189352540d3", "grade": true, "grade_id": "cell-correct_logs", "locked": true, "points": 1, "schema_version": 1, "solution": false}
assert len(logs()) == 979
one_item={'host': '192.168.127.12',
'user_name': 'feest6811',
'time': '21/Jun/2019:15:45:24 -0700',
'request': 'POST /incentivize HTTP/1.1'}
assert one_item in logs(), "Sorry, this item should be in the log results, check your formating"
# -
# ## Testing Uncommon Strings
test_string = chr(92) + chr(110)
print(len(test_string), test_string)
test_string2 = "\n"
print(len(test_string2), test_string2)
# You will see a blank line after the "1"
# We turn out using another backslash character to escape the special meaning of the original backslash character
test_string3 = "\\n"
print(len(test_string3), test_string3)
print("\\")
print("I am \\nit")
test_string = "I am \\nit"
re.findall(r"\\", test_string)
test_string2 = r"\capturethis\ some other stuff"
re.findall(r'\\(.*)\\', test_string2)
| SIADS_505/assignment1/assignment1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import uproot3
import uproot
import numpy as np
import pandas
import matplotlib.pyplot as plt
evt_max = 1000
evtn_max = 1000
events_20 = uproot3.open("20DM_Mar19_1000.root")["ttrr"]
events_30 = uproot3.open("30DM_Mar19_1000.root")["ttrr"]
events_40 = uproot3.open("40DM_Mar19_1000.root")["ttrr"]
events_50 = uproot3.open("50DM_Mar19_1000.root")["ttrr"]
events_60 = uproot3.open("60DM_Mar19_1000.root")["ttrr"]
events_70 = uproot3.open("70DM_Mar19_1000.root")["ttrr"]
events_80 = uproot3.open("80DM_Mar19_1000.root")["ttrr"]
events_ah = uproot3.open("a_H_Mar19_1000.root")["ttrr"]
events_al = uproot3.open("a_L_Mar19_1000.root")["ttrr"]
events_ph = uproot3.open("p_H_Mar19_1000.root")["ttrr"]
events_pl = uproot3.open("p_L_Mar19_1000.root")["ttrr"]
events_gah = uproot3.open("ga_H_Mar19_1000.root")["ttrr"]
events_gal = uproot3.open("ga_L_Mar19_1000.root")["ttrr"]
#events.keys()
#events.show()
t2t_20 = events_20.pandas.df("m_psd", entrystop=evt_max)
npe_20 = events_20.pandas.df("m_npe", entrystop=evt_max)
t2t_30 = events_30.pandas.df("m_psd", entrystop=evt_max)
npe_30 = events_30.pandas.df("m_npe", entrystop=evt_max)
t2t_40 = events_40.pandas.df("m_psd", entrystop=evt_max)
npe_40 = events_40.pandas.df("m_npe", entrystop=evt_max)
t2t_50 = events_50.pandas.df("m_psd", entrystop=evt_max)
npe_50 = events_50.pandas.df("m_npe", entrystop=evt_max)
t2t_60 = events_60.pandas.df("m_psd", entrystop=evt_max)
npe_60 = events_60.pandas.df("m_npe", entrystop=evt_max)
t2t_70 = events_70.pandas.df("m_psd", entrystop=evt_max)
npe_70 = events_70.pandas.df("m_npe", entrystop=evt_max)
t2t_80 = events_80.pandas.df("m_psd", entrystop=evt_max)
npe_80 = events_80.pandas.df("m_npe", entrystop=evt_max)
t2t_ah = events_ah.pandas.df("m_psd", entrystop=evtn_max)
npe_ah = events_ah.pandas.df("m_npe", entrystop=evtn_max)
t2t_al = events_al.pandas.df("m_psd", entrystop=evtn_max)
npe_al = events_al.pandas.df("m_npe", entrystop=evtn_max)
t2t_ph = events_ph.pandas.df("m_psd", entrystop=evtn_max)
npe_ph = events_ph.pandas.df("m_npe", entrystop=evtn_max)
t2t_pl = events_pl.pandas.df("m_psd", entrystop=evtn_max)
npe_pl = events_pl.pandas.df("m_npe", entrystop=evtn_max)
t2t_gah = events_gah.pandas.df("m_psd", entrystop=evtn_max)
npe_gah = events_gah.pandas.df("m_npe", entrystop=evtn_max)
t2t_gal = events_gal.pandas.df("m_psd", entrystop=evtn_max)
npe_gal = events_gal.pandas.df("m_npe", entrystop=evtn_max)
print(t2t_70[t_cut::100])
print(npe_70[:1178]/2300)
# +
t_cut = 50
end = 200000
plt.figure(0,figsize=(8,6))
plt.scatter(t2t_gah[t_cut::100],npe_gah[:]/2300,s=1,c='black',label='NC')
plt.scatter(t2t_20[t_cut::100],npe_20[:]/2300,s=1,c='blue',label='20 MeV')
plt.scatter(t2t_30[t_cut::100],npe_30[:]/2300,s=1,c='green',label='30 MeV')
plt.scatter(t2t_40[t_cut::100],npe_40[:]/2300,s=1,c='purple',label='40 MeV')
plt.scatter(t2t_50[t_cut::100],npe_50[:]/2300,s=1,c='brown',label='50 MeV')
plt.scatter(t2t_60[t_cut::100],npe_60[:]/2300,s=1,c='cyan',label='60 MeV')
#plt.scatter(t2t_70[t_cut::100],npe_70[:1178]/2300,s=1,c='tomato',label='70 MeV')
plt.scatter(t2t_80[t_cut::100],npe_80[:]/2300,s=1,c='red',label='80 MeV')
plt.title('PSD - FN cut test')
plt.ylim(2, 80)
plt.xlim(.0, .12)
#plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
plt.legend(loc='best')
plt.xlabel('PSD T2T ratio')
plt.ylabel('Visible energy in MeV')
plt.grid(b=None, which='major', axis='both')
# +
time = np.linspace(0,1000,99)
evt_id = 50
psd = []
for i in range(len(t2t_80.m_psd[evt_id][:])-1):
new = t2t_80.m_psd[evt_id][i]-t2t_80.m_psd[evt_id][i+1]
neww = new*npe_80.m_npe[evt_id]
psd.append(neww)
plt.figure(0,figsize=(8,6))
plt.step(time,psd,c='black',label='FastN')
plt.title('PSD - FN cut test')
#plt.ylim(2, 80)
#plt.xlim(.0, .12)
#plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
plt.legend(loc='best')
plt.xlabel('PSD T2T ratio')
plt.ylabel('Visible energy in MeV')
plt.yscale('log')
plt.grid(b=None, which='major', axis='both')
# +
evt_num = 1000
new_t2t_20 = []
new_t2t_30 = []
new_t2t_40 = []
new_t2t_50 = []
new_t2t_60 = []
new_t2t_70 = []
new_t2t_80 = []
new_t2t_gah = []
def new_twindow(evt_num, t2t, npe, new_t2t, start, tailcut, end):
for j in range(evt_num):
psd = []
for i in range(start,end):
new = (t2t.m_psd[j][i]-t2t.m_psd[j][i+1])*npe.m_npe[j]
psd.append(new)
tail = np.sum(psd[tailcut-start:end-start])
total = np.sum(psd[:])
new_t2t.append(tail/total)
#new_twindow(evt_num, t2t_30, npe_30, new_t2t_30, 20, 25, 80)
#new_twindow(evt_num, t2t_80, npe_80, new_t2t_80, 20, 50, 80)
#new_twindow(evt_num, t2t_gah, npe_gah, new_t2t_gah, 20, 50, 80)
#print(new_t2t_30)
# +
import pandas as pd
import numpy as np
T100_800 = {}
df = pd.DataFrame(T100_800)
Mass = list(range(20, 90, 10))
Time = list(range(50, 1050, 50))
for j in range(0,7):
print(20+j*10)
for i in range(0,12):
print(i)
nnew_t2t_20 = []
new_twindow(1000, t2t_20, npe_20, nnew_t2t_20, 20, 25+i*5, 99)
df['DM'+str(Mass[j])+'_'+str(Time[i+4])] = nnew_t2t_20
print(df)
# -
df.to_csv("DM_200_900.csv",index=False)
plt.figure(0,figsize=(8,6))
plt.scatter(new_t2t_20[:evt_num],npe_20[:evt_num],s=3,c='blue',label='DM 20')
plt.scatter(new_t2t_80[:evt_num],npe_80[:evt_num],s=3,c='red',label='DM 80')
plt.scatter(new_t2t_gah[:evt_num],npe_gah[:evt_num],s=3,c='black',label='NC')
plt.title('PSD - FN cut test')
#plt.ylim(2, 80)
#plt.xlim(.0, .12)
#plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
plt.legend(loc='best')
plt.xlabel('PSD T2T ratio')
plt.ylabel('Visible energy in MeV')
plt.grid(b=None, which='major', axis='both')
| HW5/HW5_files/HW5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dl
# language: python
# name: dl
# ---
# + [markdown] id="ffuHOST6CsLm"
# # Generating names with recurrent neural networks
#
# In this programming assignment you'll find yourself delving into the heart (and other intestines) of recurrent neural networks on a class of toy problems.
#
# Struggle to find a name for the variable? Let's see how you'll come up with a name for your son/daughter. Surely no human has expertize over what is a good child name, so let us train RNN instead.
# + id="WulV-Skdzc8Y" language="bash"
#
# shred -u setup_colab.py
#
# wget https://raw.githubusercontent.com/hse-aml/intro-to-dl-pytorch/main/utils/setup_colab.py -O setup_colab.py
# + id="NBDyjj2ezc8Y"
import setup_colab
setup_colab.setup_week05()
# + id="dloEnPemCsLt"
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="zOVCrUnHQmJT"
# ### Fill in your Coursera token and email
# To successfully submit your answers to our grader, please fill in your Coursera submission token and email.
# + id="UAeDUQxwQnUa"
import grading
grader = grading.Grader(
assignment_key="<KEY>",
all_parts=["pttMO", "uly0D", "mf20L", "zwTu9"]
)
# + id="hnklRR5-QnXO"
# token expires every 30 min
COURSERA_TOKEN = "### YOUR TOKEN HERE ###"
COURSERA_EMAIL = "### YOUR EMAIL HERE ###"
# + [markdown] id="mTT2uW5xCsLu"
# # Load data
# The dataset contains ~8k names from different cultures, all in latin transcript.
#
# This notebook has been designed so as to allow you to quickly swap names for something similar: deep learning article titles, IKEA furniture, pokemon names, etc.
# + id="dhFyOX6PCsLv"
start_token = " " # so that the network knows that we're generating a first token
# this is the token for padding,
# we will add fake pad token at the end of names
# to make them of equal size for further batching
pad_token = "#"
with open("names.txt") as f:
names = f.read()[:-1].split('\n')
names = [start_token + name for name in names]
# + id="Kf43mc6CCsLv"
print('number of samples:', len(names))
for x in names[::1000]:
print(x)
names[::1000];
# + id="72rNxCG9CsLv"
MAX_LENGTH = max(map(len, names))
print("max length:", MAX_LENGTH)
plt.title('Sequence length distribution')
plt.hist(list(map(len, names)), bins=25);
# + [markdown] id="T3VEZIuHCsLv"
# # Text processing
#
# First we need to collect a "vocabulary" of all unique tokens i.e. unique characters. We can then encode inputs as a sequence of character ids.
# + id="CLCx5pkcCsLw"
### YOUR CODE HERE: all unique characters in the dataset ###
tokens = set(''.join(names))
tokens.add(pad_token) # adding pad_token
print(list(tokens))
num_tokens = len(tokens)
print ('\nnum_tokens = ', num_tokens)
assert 50 < num_tokens < 60, "Names should contain within 50 and 60 unique tokens depending on encoding"
# + [markdown] id="61QZczNkCsLw"
# ### Cast everything from symbols into identifiers
#
# Instead of symbols we'll feed our recurrent neural network with ids of characters from our dictionary.
#
# To create such dictionary, let's assign `token_to_id`
# + id="9AxRKAp0CsLx"
### YOUR CODE HERE: create a dictionary of {symbol -> its index in tokens}
token_to_id = {t: i for i,t in enumerate(tokens)}
print(token_to_id)
# notice the special characters along the way
assert len(tokens) == len(token_to_id), "dictionaries must have same size"
# + id="vJ4tU0V-CsLx"
def to_matrix(lines, max_len=None, pad=token_to_id[pad_token], dtype='int32', batch_first = True):
"""Casts a list of names into rnn-digestable matrix"""
max_len = max_len or max(map(len, lines))
lines_ix = np.zeros([len(lines), max_len], dtype) + pad # make sure it is padded
for i in range(len(lines)):
line_ix = [token_to_id[c] for c in lines[i]]
lines_ix[i, :len(line_ix)] = line_ix
if not batch_first: # convert [batch, time] into [time, batch]
lines_ix = np.transpose(lines_ix)
return lines_ix
# + id="kyBkrX0BCsLx"
# Example: cast 4 random names to padded matrices (so that we can easily batch them)
print('\n'.join(names[::2000]))
print('\n', to_matrix(names[::2000]))
# + [markdown] id="rn0in_1ICsLx"
# # Defining a recurrent neural network
#
# We can rewrite recurrent neural network as a consecutive application of dense layer to input $x_t$ and previous rnn state $h_t$. This is exactly what we're gonna do now.
# <img src="images/rnn.png" width=600>
#
# Since we're training a language model, there should also be:
# * An embedding layer that converts character id x_t to a vector.
# * An output layer that predicts probabilities of next phoneme based on h_t+1
# + id="Fq_BI6hpUPqS"
import os
import torch, torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # you can change to `cuda`
# os.path('./')
# + id="Yq0HH3UfCsLy"
class CharRNNCell(pl.LightningModule):
"""
Implement the scheme above as torch module
"""
def __init__(self, num_tokens=len(tokens), embedding_size=16, rnn_num_units=64):
super().__init__()
self.num_units = rnn_num_units
self.num_tokens = num_tokens
self.embedding_size = embedding_size
self.rnn_num_units = rnn_num_units
self.embedding = nn.Embedding(num_tokens, embedding_size)
self.rnn_update = nn.Linear(embedding_size + rnn_num_units, rnn_num_units)
self.rnn_to_logits = nn.Linear(rnn_num_units, num_tokens)
def forward(self, x, h_prev):
"""
This method computes h_next(x, h_prev) and log P(x_next | h_next)
We'll call it repeatedly to produce the whole sequence.
:param x: batch of character ids, int64[batch_size]
:param h_prev: previous rnn hidden states, float32 matrix [batch, rnn_num_units]
"""
# get vector embedding of x
### YOUR CODE HERE ###
# x: torch.Size([batch])
x_emb = self.embedding(x) # torch.Size([batch, embedding_size])
# compute next hidden state using self.rnn_update
# hint: use torch.cat(..., dim=...) for concatenation
### YOUR CODE HERE ###
# h_prev.shape: torch.Size([batch, rnn_num_units])
emb_rnn = torch.cat((x_emb, h_prev), 1) # torch.Size([batch, embedding_size + rnn_num_units])
h_next = self.rnn_update(emb_rnn)
assert h_next.size() == h_prev.size() # torch.Size([batch, rnn_num_units])
#compute logits for next character probs
### YOUR CODE HERE ###
logits = self.rnn_to_logits(h_prev) # torch.Size([batch, rnn_num_units])
return h_next, logits
def initial_state(self, batch_size):
""" return rnn state before it processes first input (aka h0) """
return torch.zeros(batch_size, self.num_units)
def general_step(self, batch, batch_idx, mode):
# batch: data, label, lengths
data = batch['data']
label = batch['label']
lengths = batch['lengths']
out = self.forward(data)
loss_fn = nn.BCELoss(reduction='mean')
loss = loss_fn(out, label)
return loss
def general_end(self, outputs, mode):
avg_loss = torch.stack([x[mode+'_loss'] for x in outputs]).mean()
return avg_loss
def training_step(self, batch, batch_idx):
loss = self.general_step(batch, batch_idx, 'train')
return {'loss': loss}
def configure_optimizers(self):
optim = torch.optim.Adam(self.parameters())
return optim
# + [markdown] id="RukLWQHqCsLy"
# # RNN: loop
#
# Once `rnn_one_step` is ready, let's apply it in a loop over name characters to get predictions -- we will generate names character by character starting with start_token:
#
# <img src="images/char-nn.png" width=600>
# + id="JetmBlR3CsLy"
def rnn_loop(char_rnn, batch_ix):
"""
Computes logits_seq(next_character) for all time-steps in batch_ix
:param batch_ix: an int32 matrix of shape [batch, time], output of to_matrix(lines)
"""
batch_ix.to(DEVICE)
batch_size, max_length = batch_ix.size()
hid_state = char_rnn.initial_state(batch_size).to(DEVICE)
logits_seq = []
for x_t in batch_ix.transpose(0,1):
hid_state, logits = char_rnn(x_t, hid_state) # <-- here we call your one-step code
logits_seq.append(logits)
# logits_seq[0].shape: torch.Size([batch_size, logits_seq])
return torch.stack(logits_seq, dim=1) # torch.Size([batch_size=5, max_len=8, len_tokens:56])
# + [markdown] id="EQmar3Z6vzWA"
# Check that the output of rnn_loop has the right format:
# + id="pq_nvp_hCsLz"
batch_ix = to_matrix(names[:5])
batch_ix = torch.tensor(batch_ix, device=DEVICE, dtype=torch.int64) # torch.Size({batch_size, max_len})
logits_seq = rnn_loop(CharRNNCell().to(DEVICE), batch_ix)
# + id="sIE8z_NPvdJs"
## GRADED PART, DO NOT CHANGE!
grader.set_answer("mf20L", tuple(logits_seq.size()))
# + [markdown] id="xs6EJtCyCsL0"
# ## Training
# We train our char-rnn exactly the same way we train any deep learning model, the only difference is that this time we sample strings.
#
# To compute the loss in a vectorized manner, we can take `batch_ix[:, 1:]` -- a matrix of token ids shifted 1 step to the left so i-th element is acutally the "next token" for i-th prediction.
# + id="n9MFAe_bCsL1"
from IPython.display import clear_output
from random import sample
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # you can change to `cuda`
char_rnn = CharRNNCell().to(DEVICE)
opt = torch.optim.Adam(char_rnn.parameters())
criterion = nn.CrossEntropyLoss()
history = []
# + id="NfylXTtzCsL1"
for i in range(1000):
# for simplisity we will
batch_ix = to_matrix(sample(names, 32))
batch_ix = torch.tensor(batch_ix, device=DEVICE, dtype=torch.int64) #torch.Size([batch_size, max_len])
# clear gradients
opt.zero_grad()
# do forward pass
logits_seq = rnn_loop(char_rnn, batch_ix).to(DEVICE)# torch.Size([batch_size, max_len, logits_seq])
# make shifted versions of batch and predictions to compute the loss
predictions_logits = logits_seq.moveaxis(-1, 1)
actual_next_tokens = batch_ix
# compute loss ### YOUR CODE HERE ###
loss = criterion(predictions_logits, actual_next_tokens)
history.append(loss.item())
# train with backprop ### YOUR CODE HERE ###
loss.backward()
opt.step()
assert np.mean(history[:10]) > np.mean(history[-10:]), "RNN didn't converge."
# + id="_HVv8Wjn6EHm"
## GRADED PART, DO NOT CHANGE!
grader.set_answer("zwTu9", int(np.mean(history[:10]) > np.mean(history[-10:])))
# + [markdown] id="Y5-v3rfx4TSp"
# Here we computed loss over all symbols including pad tokens at the end of each name. In practice it would be better to exclude all pad tokens except one for each sequence. We need our model to be able to generate one pad token at the end of the sequence to mark the end of the sequence, but there is no need to generate all next pad tokens (we use them just for
# convenient data representation).
#
# Parameter ignore_index of CrossEntropyLoss allows to do so.
#
#
# + [markdown] id="MVXLPYYECsL1"
# ## RNN: sampling
# Once we've trained our network a bit, let's get to actually generating stuff. All we need is the single rnn step function you have defined in char_rnn.forward.
# + id="FwI8hhSxCsL1"
def generate_sample(char_rnn, seed_phrase=' ', max_length=MAX_LENGTH, temperature=1.0):
'''
The function generates text given a start phrase.
:param seed_phrase: prefix characters. The RNN is asked to continue the phrase
:param max_length: maximum output length, including seed_phrase
:param temperature: coefficient for sampling. Higher temperature produces more chaotic outputs,
smaller temperature converges to the single most likely output
'''
x_sequence = [token_to_id[token] for token in seed_phrase]
x_sequence = torch.tensor([x_sequence], dtype=torch.int64)
hid_state = char_rnn.initial_state(batch_size=1)
#feed the seed phrase, if any
for i in range(len(seed_phrase) - 1):
hid_state, _ = char_rnn(x_sequence[:, i], hid_state)
#start generating
for _ in range(max_length - len(seed_phrase)):
char_rnn.to('cpu')
hid_state, logits_next = char_rnn(x_sequence[:, -1], hid_state)
p_next = F.softmax(logits_next / temperature, dim=-1).data.numpy()[0]
# sample next token and push it back into x_sequence
next_ix = np.random.choice(num_tokens,p=p_next)
next_ix = torch.tensor([[next_ix]], dtype=torch.int64)
x_sequence = torch.cat([x_sequence, next_ix], dim=1)
return ''.join([list(tokens)[ix] for ix in x_sequence.data.numpy()[0]])
# + id="vHdkvurmCsL2"
for _ in range(10):
print(generate_sample(char_rnn))
# + id="2RnK9FZACsL2"
for _ in range(10):
print(generate_sample(char_rnn, seed_phrase=' Trump'))
# + [markdown] id="FbukAsjoCsL8"
# ## More hight-level implementation
# What we just did is a manual low-level implementation of RNN. While it's cool, we guess you won't like the idea of re-writing it from scratch on every occasion.
#
# As you might have guessed, torch has a solution for this. To be more specific, there are two options:
#
# `nn.RNNCell(emb_size, rnn_num_units)` - implements a single step of RNN just like you did. Basically concat-linear-tanh
# `nn.RNN(emb_size, rnn_num_units)` - implements the whole rnn_loop for you.
# There's also `nn.LSTMCell` vs `nn.LSTM`, `nn.GRUCell` vs `nn.GRU`, etc. etc.
#
# In this example we'll rewrite the char_rnn and rnn_loop using high-level rnn API.
# + id="UacxOUHMCsL9"
class CharRNNLoop(nn.Module):
def __init__(self, num_tokens=num_tokens, emb_size=16, rnn_num_units=64):
super(self.__class__, self).__init__()
self.emb = nn.Embedding(num_tokens, emb_size)
self.rnn = nn.RNN(emb_size, rnn_num_units, batch_first=True)
self.hid_to_logits = nn.Linear(rnn_num_units, num_tokens)
def forward(self, x):
"""
Computes log P(next_character) for all time-steps in x
:param x: an int32 matrix of shape [batch, time], output of to_matrix(lines)
:output next_logp: a float32 tensor [batch, time, dictionary_size]
"""
### YOUR CODE HERE ###
# + [markdown] id="jouqgGYkCG3z"
# Train the model using the same training code and check that it works very similar to our hand-written RNN.
# + id="NMQSqcfRBPLD"
model = CharRNNLoop().to(DEVICE)
opt = torch.optim.Adam(model.parameters())
history_high = [] # put the history in this variable for grading
# + id="RiK3feiNBdTk"
### YOUR CODE HERE ###
assert np.mean(history_high[:10]) > np.mean(history_high[-10:]), "RNN didn't converge."
# + id="HpWe48ahB0VE"
## GRADED PART, DO NOT CHANGE!
grader.set_answer("pttMO", int(np.mean(history_high[:10]) > np.mean(history_high[-10:])))
grader.set_answer("uly0D", len(set([generate_sample(char_rnn, ' Sad') for _ in range(25)])))
# + id="HOFdJpvqzc8i"
# grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)
# + [markdown] id="HnOnFHU-CsL9"
# Here's another example with LSTM
# + id="j3sn0jpUCsL9"
import torch, torch.nn as nn
import torch.nn.functional as F
class CharLSTMCell(nn.Module):
"""
Implements something like CharRNNCell, but with LSTM
"""
def __init__(self, num_tokens=len(tokens), embedding_size=16, rnn_num_units=64):
super().__init__()
self.num_units = rnn_num_units
self.emb = nn.Embedding(num_tokens, embedding_size)
self.lstm = nn.LSTMCell(embedding_size, rnn_num_units)
self.rnn_to_logits = nn.Linear(rnn_num_units, num_tokens)
def forward(self, x, prev_state):
(prev_h, prev_c) = prev_state
(next_h, next_c) = self.lstm(self.emb(x), (prev_h, prev_c))
logits = self.rnn_to_logits(next_h)
return (next_h, next_c), logits
def initial_state(self, batch_size):
""" LSTM has two state variables, cell and hid """
return torch.zeros(batch_size, self.num_units), torch.zeros(batch_size, self.num_units)
char_lstm = CharLSTMCell()
# + [markdown] id="pMRUeIzNCsL8"
# # Try it out!
#
# __Disclaimer:__ This part of assignment is entirely optional. You won't receive bonus points for it. However, it's a fun thing to do. Please share your results on course forums.
#
# You've just implemented a recurrent language model that can be tasked with generating any kind of sequence, so there's plenty of data you can try it on:
#
# * Novels/poems/songs of your favorite author
# * News titles/clickbait titles
# * Source code of Linux or Tensorflow
# * Molecules in [smiles](https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system) format
# * Melody in notes/chords format
# * IKEA catalog titles
# * Pokemon names
# * Cards from Magic, the Gathering / Hearthstone
#
# If you're willing to give it a try, here's what you wanna look at:
# * Current data format is a sequence of lines, so a novel can be formatted as a list of sentences. Alternatively, you can change data preprocessing altogether.
# * While some datasets are readily available, others can only be scraped from the web. Try `Selenium` or `Scrapy` for that.
# * Make sure MAX_LENGTH is adjusted for longer datasets.
# * More complex tasks require larger RNN architecture, try more neurons or several layers. It would also require more training iterations.
# * Long-term dependencies in music, novels or molecules are better handled with LSTM or GRU
#
# __Good hunting!__
| Informatics/Deep Learning/1. Intro to DL - HSE/Week_05/week05_generating_names_with_rnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Roboschool simulations of physical robotics with Amazon SageMaker
#
# ---
# ## Introduction
#
# Roboschool is an [open source](https://github.com/openai/roboschool/tree/master/roboschool) physics simulator that is commonly used to train RL policies for simulated robotic systems. Roboschool provides 3D visualization of physical systems with multiple joints in contact with each other and their environment.
#
# This notebook will show how to install Roboschool into the SageMaker RL container, and train pre-built robotics applications that are included with Roboschool.
# ## Pick which Roboschool problem to solve
#
# Roboschool defines a [variety](https://github.com/openai/roboschool/blob/master/roboschool/__init__.py) of Gym environments that correspond to different robotics problems. Here we're highlighting a few of them at varying levels of difficulty:
#
# - **Reacher (easy)** - a very simple robot with just 2 joints reaches for a target
# - **Hopper (medium)** - a simple robot with one leg and a foot learns to hop down a track
# - **Humanoid (difficult)** - a complex 3D robot with two arms, two legs, etc. learns to balance without falling over and then to run on a track
#
# The simpler problems train faster with less computational resources. The more complex problems are more fun.
# Uncomment the problem to work on
roboschool_problem = 'reacher'
#roboschool_problem = 'hopper'
#roboschool_problem = 'humanoid'
# ## Pre-requisites
#
# ### Imports
#
# To get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations.
import sagemaker
import boto3
import sys
import os
import glob
import re
import subprocess
from IPython.display import HTML
import time
from time import gmtime, strftime
sys.path.append("common")
from misc import get_execution_role, wait_for_s3_object
from docker_utils import build_and_push_docker_image
from sagemaker.rl import RLEstimator, RLToolkit, RLFramework
# ### Setup S3 bucket
#
# Set up the linkage and authentication to the S3 bucket that you want to use for checkpoint and the metadata.
sage_session = sagemaker.session.Session()
s3_bucket = sage_session.default_bucket()
s3_output_path = 's3://{}/'.format(s3_bucket)
print("S3 bucket path: {}".format(s3_output_path))
# ### Define Variables
#
# We define variables such as the job prefix for the training jobs *and the image path for the container (only when this is BYOC).*
# create a descriptive job name
job_name_prefix = 'rl-roboschool-'+roboschool_problem
# ### Configure where training happens
#
# You can train your RL training jobs using the SageMaker notebook instance or local notebook instance. In both of these scenarios, you can run the following in either local or SageMaker modes. The local mode uses the SageMaker Python SDK to run your code in a local container before deploying to SageMaker. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. You just need to set `local_mode = True`.
# +
# run in local_mode on this machine, or as a SageMaker TrainingJob?
local_mode = False
if local_mode:
instance_type = 'local'
else:
# If on SageMaker, pick the instance type
instance_type = "ml.c5.2xlarge"
# -
# ### Create an IAM role
#
# Either get the execution role when running from a SageMaker notebook instance `role = sagemaker.get_execution_role()` or, when running from local notebook instance, use utils method `role = get_execution_role()` to create an execution role.
# +
try:
role = sagemaker.get_execution_role()
except:
role = get_execution_role()
print("Using IAM role arn: {}".format(role))
# -
# ### Install docker for `local` mode
#
# In order to work in `local` mode, you need to have docker installed. When running from you local machine, please make sure that you have docker and docker-compose (for local CPU machines) and nvidia-docker (for local GPU machines) installed. Alternatively, when running from a SageMaker notebook instance, you can simply run the following script to install dependenceis.
#
# Note, you can only run a single local notebook at one time.
# only run from SageMaker notebook instance
if local_mode:
# !/bin/bash ./common/setup.sh
# ## Build docker container
#
# We must build a custom docker container with Roboschool installed. This takes care of everything:
#
# 1. Fetching base container image
# 2. Installing Roboschool and its dependencies
# 3. Uploading the new container image to ECR
#
# This step can take a long time if you are running on a machine with a slow internet connection. If your notebook instance is in SageMaker or EC2 it should take 3-10 minutes depending on the instance type.
#
# +
# %%time
cpu_or_gpu = 'gpu' if instance_type.startswith('ml.p') else 'cpu'
repository_short_name = "sagemaker-roboschool-ray-%s" % cpu_or_gpu
docker_build_args = {
'CPU_OR_GPU': cpu_or_gpu,
'AWS_REGION': boto3.Session().region_name,
}
custom_image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args)
print("Using ECR image %s" % custom_image_name)
# -
# ## Write the Training Code
#
# The training code is written in the file “train-coach.py” which is uploaded in the /src directory.
# First import the environment files and the preset files, and then define the main() function.
# !pygmentize src/train-{roboschool_problem}.py
# ## Train the RL model using the Python SDK Script mode
#
# If you are using local mode, the training will run on the notebook instance. When using SageMaker for training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs.
#
# 1. Specify the source directory where the environment, presets and training code is uploaded.
# 2. Specify the entry point as the training code
# 3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container.
# 4. Define the training parameters such as the instance count, job name, S3 path for output and job name.
# 5. Specify the hyperparameters for the RL agent algorithm. The RLCOACH_PRESET or the RLRAY_PRESET can be used to specify the RL agent algorithm you want to use.
# 6. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks.
# +
# %%time
metric_definitions = RLEstimator.default_metric_definitions(RLToolkit.RAY)
estimator = RLEstimator(entry_point="train-%s.py" % roboschool_problem,
source_dir='src',
dependencies=["common/sagemaker_rl"],
image_name=custom_image_name,
role=role,
train_instance_type=instance_type,
train_instance_count=1,
output_path=s3_output_path,
base_job_name=job_name_prefix,
metric_definitions=metric_definitions,
hyperparameters={
# Attention scientists! You can override any Ray algorithm parameter here:
#"rl.training.config.horizon": 5000,
#"rl.training.config.num_sgd_iter": 10,
}
)
estimator.fit(wait=local_mode)
job_name = estimator.latest_training_job.job_name
print("Training job: %s" % job_name)
# -
# ## Visualization
#
# RL training can take a long time. So while it's running there are a variety of ways we can track progress of the running training job. Some intermediate output gets saved to S3 during training, so we'll set up to capture that.
# +
print("Job name: {}".format(job_name))
s3_url = "s3://{}/{}".format(s3_bucket,job_name)
if local_mode:
output_tar_key = "{}/output.tar.gz".format(job_name)
else:
output_tar_key = "{}/output/output.tar.gz".format(job_name)
intermediate_folder_key = "{}/output/intermediate/".format(job_name)
output_url = "s3://{}/{}".format(s3_bucket, output_tar_key)
intermediate_url = "s3://{}/{}".format(s3_bucket, intermediate_folder_key)
print("S3 job path: {}".format(s3_url))
print("Output.tar.gz location: {}".format(output_url))
print("Intermediate folder path: {}".format(intermediate_url))
tmp_dir = "/tmp/{}".format(job_name)
os.system("mkdir {}".format(tmp_dir))
print("Create local folder {}".format(tmp_dir))
# -
# ### Fetch videos of training rollouts
# Videos of certain rollouts get written to S3 during training. Here we fetch the last 10 videos from S3, and render the last one.
recent_videos = wait_for_s3_object(
s3_bucket, intermediate_folder_key, tmp_dir,
fetch_only=(lambda obj: obj.key.endswith(".mp4") and obj.size>0),
limit=10, training_job_name=job_name)
last_video = sorted(recent_videos)[-1] # Pick which video to watch
os.system("mkdir -p ./src/tmp_render/ && cp {} ./src/tmp_render/last_video.mp4".format(last_video))
HTML('<video src="./src/tmp_render/last_video.mp4" controls autoplay></video>')
# ### Plot metrics for training job
# We can see the reward metric of the training as it's running, using algorithm metrics that are recorded in CloudWatch metrics. We can plot this to see the performance of the model over time.
# +
# %matplotlib inline
from sagemaker.analytics import TrainingJobAnalytics
df = TrainingJobAnalytics(job_name, ['episode_reward_mean']).dataframe()
num_metrics = len(df)
if num_metrics == 0:
print("No algorithm metrics found in CloudWatch")
else:
plt = df.plot(x='timestamp', y='value', figsize=(12,5), legend=True, style='b-')
plt.set_ylabel('Mean reward per episode')
plt.set_xlabel('Training time (s)')
# -
# ### Monitor training progress
# You can repeatedly run the visualization cells to get the latest videos or see the latest metrics as the training job proceeds.
| reinforcement_learning/rl_roboschool_ray/rl_roboschool_ray.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Additional training functions
# [`train`](/train.html#train) provides a number of extension methods that are added to [`Learner`](/basic_train.html#Learner) (see below for a list and details), along with three simple callbacks:
#
# - [`ShowGraph`](/train.html#ShowGraph)
# - [`GradientClipping`](/train.html#GradientClipping)
# - [`BnFreeze`](/train.html#BnFreeze)
# + hide_input=true
from fastai.gen_doc.nbdoc import *
from fastai.train import *
from fastai.vision import *
# -
# ## [`Learner`](/basic_train.html#Learner) extension methods
# These methods are automatically added to all [`Learner`](/basic_train.html#Learner) objects created after importing this module. They provide convenient access to a number of callbacks, without requiring them to be manually created.
# + hide_input=true
show_doc(fit_one_cycle)
# + hide_input=true
show_doc(one_cycle_scheduler)
# -
# See [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler) for details.
# + hide_input=true
show_doc(lr_find)
# -
# See [`LRFinder`](/callbacks.lr_finder.html#LRFinder) for details.
# + hide_input=true
show_doc(to_fp16)
# -
# See [`MixedPrecision`](/callbacks.fp16.html#MixedPrecision) for details.
# + hide_input=true
show_doc(to_fp32)
# + hide_input=true
show_doc(mixup)
# + hide_input=true
show_doc(ClassificationInterpretation)
# -
# See [`MixUpCallback`](/callbacks.mixup.html#MixUpCallback) for more details.
# ## Additional callbacks
# We'll show examples below using our MNIST sample. As usual the `on_something` methods are directly called by the fastai library, no need to call them yourself.
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
# + hide_input=true
show_doc(ShowGraph, title_level=3)
# -
# ```python
# learn = cnn_learner(data, models.resnet18, metrics=accuracy, callback_fns=ShowGraph)
# learn.fit(3)
# ```
# ![Training graph](imgs/train_graph.gif)
# + hide_input=true
show_doc(ShowGraph.on_epoch_end)
# + hide_input=true
show_doc(GradientClipping)
# -
learn = cnn_learner(data, models.resnet18, metrics=accuracy,
callback_fns=partial(GradientClipping, clip=0.1))
learn.fit(1)
# + hide_input=true
show_doc(GradientClipping.on_backward_end)
# + hide_input=true
show_doc(BnFreeze)
# -
# For batchnorm layers where `requires_grad==False`, you generally don't want to update their moving average statistics, in order to avoid the model's statistics getting out of sync with its pre-trained weights. You can add this callback to automate this freezing of statistics (internally, it calls `eval` on these layers).
learn = cnn_learner(data, models.resnet18, metrics=accuracy, callback_fns=BnFreeze)
learn.fit(1)
# + hide_input=true
show_doc(BnFreeze.on_epoch_begin)
# -
# ## Undocumented Methods - Methods moved below this line will intentionally be hidden
# ## New Methods - Please document or move to the undocumented section
# + hide_input=true
show_doc(ClassificationInterpretation.plot_top_losses)
# -
#
# + hide_input=true
show_doc(ClassificationInterpretation.from_learner)
# -
#
# + hide_input=true
show_doc(ClassificationInterpretation.top_losses)
# -
#
# + hide_input=true
show_doc(ClassificationInterpretation.confusion_matrix)
# -
#
# + hide_input=true
show_doc(ClassificationInterpretation.most_confused)
# -
#
# + hide_input=true
show_doc(ClassificationInterpretation.plot_confusion_matrix)
# -
#
# + hide_input=true
show_doc(ClassificationInterpretation.plot_multi_top_losses)
# -
#
| docs_src/train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # Train the Machine Learning Model
# <NAME>, ThoughtWorks, June 19, 2020
# ---
# ## Install dependencies
import sys
# !{sys.executable} -m pip install sagemaker -U
# !{sys.executable} -m pip install sagemaker-experiments
# ---
# ## Import Libraries
# +
import pandas as pd
import numpy as np
from time import strftime, gmtime
import sys, os, json
import joblib
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor
from sklearn import metrics
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker.s3 import S3Uploader, S3Downloader
from smexperiments.experiment import Experiment
from smexperiments.trial import Trial
from sagemaker.sklearn.estimator import SKLearn
# -
# ---
# ## Definitions
# +
train_filename = 'final_train.csv'
test_filename = 'final_validate.csv'
train_dir = 'train'
test_dir = 'test'
local_data_dir = 'CD4ML-AWS-Serverless/data'
s3_prefix = 'demandforecast'
model_name = 'decision_tree'
seed = 8675309
# -
# ---
# ## Define the Machine Learning Model and Parameters
# +
model_parameters = {
"random_forest": {
"n_estimators": 10,
"max_features": 0.5
},
"adaboost": {
"n_estimators": 100
},
"gradient_boosting": {
"n_estimators": 200,
"max_depth": 4
},
"decision_tree": {
"criterion": 'mse'
}
}
def get_model_class(model_name):
model_classes = {
'random_forest': RandomForestRegressor,
'adaboost': AdaBoostRegressor,
'gradient_boosting': GradientBoostingRegressor,
'decision_tree': DecisionTreeRegressor
}
return model_classes[model_name]
# -
# ---
# ## Open S3 Session and define bucket
sess = boto3.Session()
account_id = sess.client('sts', region_name=sess.region_name).get_caller_identity()["Account"]
bucket = 'sagemaker-studio-{}-{}'.format(sess.region_name, account_id)
# ---
# ## Load training and validation data from S3 and store it locally
def load_data():
S3Downloader.download('s3://{}/{}/{}/{}'.format(bucket, s3_prefix, train_dir, train_filename),
'{}/{}'.format(local_data_dir, train_dir))
S3Downloader.download('s3://{}/{}/{}/{}'.format(bucket, s3_prefix, test_dir, test_filename),
'{}/{}'.format(local_data_dir, test_dir))
train = pd.read_csv('{}/{}/{}'.format(local_data_dir, train_dir, train_filename), engine='python')
validate = pd.read_csv('{}/{}/{}'.format(local_data_dir, test_dir, test_filename), engine='python')
return train, validate
train, validate = load_data()
train.head()
validate.head()
# ---
# ## Train the model
def train_model(train, model_name='decision_tree', seed=None):
train_dropped = train.drop('unit_sales', axis=1)
target = train['unit_sales']
model_class = get_model_class(model_name)
params = model_parameters[model_name]
print("Training %s model" % model_name)
clf = model_class(random_state=seed, **params)
trained_model = clf.fit(train_dropped, target)
return trained_model, params
model, params = train_model(train, model_name, seed)
# ---
# ## Validate the trained model
# +
def make_predictions(model, validate):
validate_dropped = validate.drop('unit_sales', axis=1).fillna(-1)
validate_preds = model.predict(validate_dropped)
return validate_preds
def write_predictions_and_score(model_name, evaluation_metrics, model):
path = '{}/models/{}/'.format(local_data_dir, model_name)
filename = 'model.pkl'
if not os.path.exists(path):
os.makedirs(path)
print("Writing to {}".format(path + filename))
joblib.dump(model, path + filename)
path = '{}/results/{}/'.format(local_data_dir, model_name)
filename = 'metrics.json'
print("Writing to {}".format(path + filename))
if not os.path.exists(path):
os.makedirs(path)
with open(path + filename, 'w+') as score_file:
json.dump(evaluation_metrics, score_file)
def eval_nwrmsle(predictions, targets, weights):
if type(predictions) == list:
predictions = np.array([np.nan if x < 0 else x for x in predictions])
elif type(predictions) == pd.Series:
predictions[predictions < 0] = np.nan
targetsf = targets.astype(float)
targetsf[targets < 0] = np.nan
weights = 1 + 0.25 * weights
log_square_errors = (np.log(predictions + 1) - np.log(targetsf + 1)) ** 2
return(np.sqrt(np.sum(weights * log_square_errors) / np.sum(weights)))
# +
print("Making prediction on validation data")
validation_predictions = make_predictions(model, validate)
print("Calculating metrics")
evaluation_metrics = {
'nwrmsle' : eval_nwrmsle(validation_predictions, validate['unit_sales'].values, validate['perishable'].values),
'r2_score': metrics.r2_score(y_true=validate['unit_sales'].values, y_pred=validation_predictions)
}
# track.log_metrics(evaluation_metrics)
write_predictions_and_score(model_name, evaluation_metrics, model)
print("Evaluation done with metrics {}.".format(json.dumps(evaluation_metrics)))
# -
# ---
# # SageMaker Training
# +
sagemaker_session = sagemaker.Session()
# Get a SageMaker-compatible role used by this Notebook Instance.
role = get_execution_role()
# +
# train_input = sagemaker_session.upload_data(local_data_dir, key_prefix="{}/{}".format(s3_prefix, local_data_dir))
s3_data='s3://{}/{}/{}'.format(bucket, s3_prefix, train_dir)
s3_input_train = sagemaker.s3_input(s3_data, content_type='csv')
print ("s3_data = " + s3_data)
# +
script_path = './data/scikitmodel.py'
sklearn = SKLearn(
entry_point=script_path,
train_instance_type="ml.c4.xlarge",
role=role,
sagemaker_session=sagemaker_session,
hyperparameters={"criterion": 'mse'})
# -
sklearn.fit({'train': s3_input_train})
# + active=""
# Environment variables:
#
# SM_HOSTS=["algo-1"]
# SM_NETWORK_INTERFACE_NAME=eth0
# SM_HPS={"criterion":"mse"}
# SM_USER_ENTRY_POINT=scikitmodel.py
# SM_FRAMEWORK_PARAMS={}
# SM_RESOURCE_CONFIG={"current_host":"algo-1","hosts":["algo-1"],"network_interface_name":"eth0"}
# SM_INPUT_DATA_CONFIG={"train":{"ContentType":"csv","RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"}}
# SM_OUTPUT_DATA_DIR=/opt/ml/output/data
# SM_CHANNELS=["train"]
# SM_CURRENT_HOST=algo-1
# SM_MODULE_NAME=scikitmodel
# SM_LOG_LEVEL=20
# SM_FRAMEWORK_MODULE=sagemaker_sklearn_container.training:main
# SM_INPUT_DIR=/opt/ml/input
# SM_INPUT_CONFIG_DIR=/opt/ml/input/config
# SM_OUTPUT_DIR=/opt/ml/output
# SM_NUM_CPUS=4
# SM_NUM_GPUS=0
# SM_MODEL_DIR=/opt/ml/model
# SM_MODULE_DIR=s3://sagemaker-us-east-1-261586618408/sagemaker-scikit-learn-2020-06-24-10-30-56-394/source/sourcedir.tar.gz
# SM_TRAINING_ENV={"additional_framework_parameters":{},"channel_input_dirs":{"train":"/opt/ml/input/data/train"},"current_host":"algo-1","framework_module":"sagemaker_sklearn_container.training:main","hosts":["algo-1"],"hyperparameters":{"criterion":"mse"},"input_config_dir":"/opt/ml/input/config","input_data_config":{"train":{"ContentType":"csv","RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"}},"input_dir":"/opt/ml/input","is_master":true,"job_name":"sagemaker-scikit-learn-2020-06-24-10-30-56-394","log_level":20,"master_hostname":"algo-1","model_dir":"/opt/ml/model","module_dir":"s3://sagemaker-us-east-1-261586618408/sagemaker-scikit-learn-2020-06-24-10-30-56-394/source/sourcedir.tar.gz","module_name":"scikitmodel","network_interface_name":"eth0","num_cpus":4,"num_gpus":0,"output_data_dir":"/opt/ml/output/data","output_dir":"/opt/ml/output","output_intermediate_dir":"/opt/ml/output/intermediate","resource_config":{"current_host":"algo-1","hosts":["algo-1"],"network_interface_name":"eth0"},"user_entry_point":"scikitmodel.py"}
# SM_USER_ARGS=["--criterion","mse"]
# SM_OUTPUT_INTERMEDIATE_DIR=/opt/ml/output/intermediate
# SM_CHANNEL_TRAIN=/opt/ml/input/data/train
# SM_HP_CRITERION=mse
# PYTHONPATH=/miniconda3/bin:/miniconda3/lib/python37.zip:/miniconda3/lib/python3.7:/miniconda3/lib/python3.7/lib-dynload:/miniconda3/lib/python3.7/site-packages
#
| .ipynb_checkpoints/TrainModelSM-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Self-Driving Car Engineer Nanodegree
#
# ## Deep Learning
#
# ## Project: Build a Traffic Sign Recognition Classifier
#
# In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary.
#
# > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
# "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
#
# In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.
#
# The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.
#
#
# >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
# ---
# ## Step 0: Load The Data
# +
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = '../data/train.p'
validation_file= '../data/valid.p'
testing_file = '../data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
assert(len(X_train) == len(y_train))
assert(len(X_valid) == len(y_valid))
assert(len(X_test) == len(y_test))
# -
# ---
#
# ## Step 1: Dataset Summary & Exploration
#
# The pickled data is a dictionary with 4 key/value pairs:
#
# - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
# - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
# - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
# - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**
#
# Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results.
# ### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
# +
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = len(X_train)
# TODO: Number of validation examples
n_validation = len(X_valid)
# TODO: Number of testing examples.
n_test = len(X_test)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = 43
print(n_validation)
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
# -
# ### Include an exploratory visualization of the dataset
# Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
#
# The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.
#
# **NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
# +
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
import random
import numpy as np
# Visualizations will be shown in the notebook.
# %matplotlib inline
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image)
print(y_train[index])
# -
# histogram our data with numpy
def histogram(y_train, n_classes):
n, bins = np.histogram(y_train, n_classes)
# get the center of hist
width = 0.5*(bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, n, align='center', width=width)
plt.show()
histogram(y_train, n_classes)
# ----
#
# ## Step 2: Design and Test a Model Architecture
#
# Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).
#
# The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
#
# With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission.
#
# There are various aspects to consider when thinking about this problem:
#
# - Neural network architecture (is the network over or underfitting?)
# - Play around preprocessing techniques (normalization, rgb to grayscale, etc)
# - Number of examples per label (some have more than others).
# - Generate fake data.
#
# Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
# ### Pre-process the Data Set (normalization, grayscale, etc.)
# Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project.
#
# Other pre-processing steps are optional. You can try different techniques to see if it improves performance.
#
# Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
# +
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
from sklearn.utils import shuffle
X_train = np.concatenate([X_train, X_train])
y_train = np.concatenate([y_train, y_train])
print(X_train.shape)
print(y_train.shape)
X_train_grey = np.sum(X_train/3, axis=3, keepdims=True)
X_valid_grey = np.sum(X_valid/3, axis=3, keepdims=True)
X_test_grey = np.sum(X_test/3, axis=3, keepdims=True)
X_train_grey, y_train = shuffle(X_train_grey, y_train)
print(X_train_grey.shape)
histogram(y_train, n_classes)
# -
# ## Setup TensorFlow
# The `EPOCH` and `BATCH_SIZE` values affect the training speed and model accuracy.
#
# You do not need to modify this section.
# +
import tensorflow as tf
EPOCHS = 100
BATCH_SIZE = 128
# -
# ### Model Architecture
# +
### Define your architecture here.
### Feel free to use as many code cells as needed.
from tensorflow.contrib.layers import flatten
def conv2d(x, W, b, strides=1, padd='VALID', name='default'):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding=padd)
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x, name=name)
def maxpool(input_x, k_size=2, strides_size=2, padd ='VALID', name='default'):
ksize = [1, k_size, k_size, 1]
strides = [1, strides_size, strides_size, 1]
return tf.nn.max_pool(input_x, ksize, strides, padding=padd, name=name)
def fc_layer(input_x, W, b):
return tf.add(tf.matmul(input_x, W), b)
# Store layers weight & bias
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
weights = {
'wc1': tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma)),
'wc2': tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma)),
'wd1': tf.Variable(tf.truncated_normal(shape=(400, 200), mean = mu, stddev = sigma)),
'wd2': tf.Variable(tf.truncated_normal(shape=(200, 120), mean = mu, stddev = sigma)),
'wd3': tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma)),
'out': tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean = mu, stddev = sigma))}
biases = {
'bc1': tf.Variable(tf.zeros(6)),
'bc2': tf.Variable(tf.zeros(16)),
'bd1': tf.Variable(tf.zeros(200)),
'bd2': tf.Variable(tf.zeros(120)),
'bd3': tf.Variable(tf.zeros(84)),
'out': tf.Variable(tf.zeros(n_classes))
}
# Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
# Activation.
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = conv2d(x, weights['wc1'], biases['bc1'], name='conv1')
conv1 = maxpool(conv1, name='max1')
#print (conv1.shape)
# Layer 2: Convolutional. Output = 10x10x16.
# Activation.
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'], name='conv2')
conv2 = maxpool(conv2, name='max2')
#print (conv2.shape)
# TODO: Flatten. Input = 5x5x16. Output = 400.
flat_op = tf.contrib.layers.flatten(conv2)
# Layer 3: Fully Connected. Input = 400. Output = 200.
fc1 = fc_layer(flat_op, weights['wd1'], biases['bd1'])
# Activation.
fc1 = tf.nn.relu(fc1)
# Dropout
fc1 = tf.nn.dropout(fc1, keep_prob)
# Layer 4: Fully Connected. Input = 200. Output = 120.
fc2 = fc_layer(fc1, weights['wd2'], biases['bd2'])
# Activation.
fc2 = tf.nn.relu(fc2)
# Dropout
fc2 = tf.nn.dropout(fc2, keep_prob)
# Layer 5: Fully Connected. Input = 120. Output = 84.120
fc3 = fc_layer(fc2, weights['wd3'], biases['bd3'])
# Activation.
fc3 = tf.nn.relu(fc3)
# Layer 6: Fully Connected. Input = 84. Output = 43.
logits = fc_layer(fc3, weights['out'], biases['out'])
return logits
# -
# ## Features and Labels
# `x` is a placeholder for a batch of input images.
# `y` is a placeholder for a batch of output labels.
#
#
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
keep_prob = tf.placeholder(tf.float32)
one_hot_y = tf.one_hot(y, n_classes)
# ### Train, Validate and Test the Model
# A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
# sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
# +
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
# -
# # Train
# +
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
# -
# ## Model Evaluation
# Evaluate how well the loss and accuracy of the model for a given dataset.
#
# +
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
# -
#
#
# ## Train the Model
# Run the training data through the training pipeline to train the model.
#
# Before each epoch, shuffle the training set.
#
# After each epoch, measure the loss and accuracy of the validation set.
#
# Save the model after training.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train_grey)
print("Training...")
print()
for i in range(EPOCHS):
X_train_grey, y_train = shuffle(X_train_grey, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train_grey[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
validation_accuracy = evaluate(X_valid_grey, y_valid)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
# ## EVALUATE
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test_grey, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
training_accuracy = evaluate(X_train_grey, y_train)
print("Training Accuracy = {:.3f}".format(training_accuracy))
# ---
#
# ## Step 3: Test a Model on New Images
#
# To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
#
# You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
# ### Load and Output the Images
# #### If you disable the GPU while calculating this section please use the commented out img_labels
# +
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import glob
saved_image_color = []
images = glob.glob('test_images_from_internet/*')
print(images)
for idx, fname in enumerate(images):
img = cv2.imread(fname)
saved_image_color.append(img)
plt.imshow(img)
plt.show()
saved_image_color = np.asarray(saved_image_color)
saved_images = np.sum(saved_image_color/3, axis=3, keepdims=True)
#img_labels = [31, 25 , 18, 3, 11, 17, 35]
img_labels = [3, 11, 18, 17, 25, 35, 31]
# -
# ### Predict the Sign Type for Each Image
# +
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(saved_images, img_labels)
print("Test Accuracy = {:.3f}".format(test_accuracy))
# -
# ### Analyze Performance
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
test_accuracy *= 100
("Test Accuracy = {:.3f} %".format(test_accuracy))
# ### Output Top 5 Softmax Probabilities For Each Image Found on the Web
# For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here.
#
# The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.
#
# `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
#
# Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:
#
# ```
# # (5, 6) array
# a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
# 0.12789202],
# [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
# 0.15899337],
# [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
# 0.23892179],
# [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
# 0.16505091],
# [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
# 0.09155967]])
# ```
#
# Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:
#
# ```
# TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
# [ 0.28086119, 0.27569815, 0.18063401],
# [ 0.26076848, 0.23892179, 0.23664738],
# [ 0.29198961, 0.26234032, 0.16505091],
# [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],
# [0, 1, 4],
# [0, 5, 1],
# [1, 3, 5],
# [1, 4, 3]], dtype=int32))
# ```
#
# Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.
# +
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
softmax_logits = tf.nn.softmax(logits)
top_k = tf.nn.top_k(softmax_logits, k=5)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, tf.train.latest_checkpoint('.'))
sess_softmax_logits = sess.run(softmax_logits, feed_dict={x: saved_images, keep_prob: 1.0})
sess_top_k = sess.run(top_k, feed_dict={x: saved_images, keep_prob: 1.0})
print("softmax logits = /n {0}".format(sess_softmax_logits))
print()
print("top k 5 predictions = /n {0}".format(sess_top_k))
# -
# ### Project Writeup
#
# Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
# > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n",
# "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
# ---
#
# ## Step 4 (Optional): Visualize the Neural Network's State with Test Images
#
# This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.
#
# Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.
#
# For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.
#
# <figure>
# <img src="visualize_cnn.png" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your output should look something like this (above)</p>
# </figcaption>
# </figure>
# <p></p>
#
# +
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
# -
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
conv1_s = sess.graph.get_tensor_by_name('conv1:0')
outputFeatureMap(saved_images, conv1_s, plt_num=1)
conv1_s = sess.graph.get_tensor_by_name('max1:0')
outputFeatureMap(saved_images, conv1_s, plt_num=2)
conv2_s = sess.graph.get_tensor_by_name('conv2:0')
outputFeatureMap(saved_images, conv2_s, plt_num=3)
conv2_s = sess.graph.get_tensor_by_name('max2:0')
outputFeatureMap(saved_images, conv2_s, plt_num=4)
| Traffic_Sign_Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] editable=true
# # Part I. ETL Pipeline for Pre-Processing the Files
# + [markdown] editable=true
# #### Import Python packages
# + editable=true
# Import Python packages
import pandas as pd
import cassandra
import re
import os
import glob
import numpy as np
import json
import csv
# + [markdown] editable=true
# #### Creating list of filepaths to process original event csv data files
# + editable=true
# checking your current working directory
print(os.getcwd())
# Get your current folder and subfolder event data
filepath = os.getcwd() + '/event_data'
# Create a for loop to create a list of files and collect each filepath
for root, dirs, files in os.walk(filepath):
# Skip reading of checkpoints
if 'checkpoints' not in root:
# join the file path and roots with the subdirectories using glob
file_path_list = glob.glob(os.path.join(root,'*.csv'))
# print(file_path_list)
# + [markdown] editable=true
# #### Processing the files to create the data file csv that will be used for Apache Casssandra tables
# + editable=true
# initiating an empty list of rows that will be generated from each file
full_data_rows_list = []
# for every filepath in the file path list
for f in file_path_list:
# reading csv file
with open(f, 'r', encoding = 'utf8', newline='') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
next(csvreader)
# extracting each data row one by one and append it
for line in csvreader:
#print(line)
full_data_rows_list.append(line)
# uncomment the code below if you would like to get total number of rows
#print(len(full_data_rows_list))
# uncomment the code below if you would like to check to see what the list of event data rows will look like
#print(full_data_rows_list)
# creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \
# Apache Cassandra tables
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)
with open('event_datafile_new.csv', 'w', encoding = 'utf8', newline='') as f:
writer = csv.writer(f, dialect='myDialect')
writer.writerow(['artist','firstName','gender','itemInSession','lastName','length',\
'level','location','sessionId','song','userId'])
for row in full_data_rows_list:
if (row[0] == ''):
continue
writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[12], row[13], row[16]))
# + editable=true
# check the number of rows in your csv file
with open('event_datafile_new.csv', 'r', encoding = 'utf8') as f:
print(sum(1 for line in f))
# + [markdown] editable=true
# # Part II. Create Apache Cassandra tavbles and queries.
#
# ## Located within the Workspace directory is the event_datafile_new.csv which contains the following columns:
# - artist
# - firstName of user
# - gender of user
# - item number in session
# - last name of user
# - length of the song
# - level (paid or free song)
# - location of the user
# - sessionId
# - song title
# - userId
#
# The image below is a screenshot of what the denormalized data should appear like in the <font color=red>**event_datafile_new.csv**</font> after the code above is run:<br>
#
# <img src="images/image_event_datafile_new.jpg">
# + [markdown] editable=true
# #### Creating a Cluster
# + editable=true
# Make a connection to a Cassandra instance your local machine
from cassandra.cluster import Cluster
try:
cluster = Cluster(['127.0.0.1'])
# To establish connection and begin executing queries, need a session
session = cluster.connect()
except Exception as e:
print(e)
# + [markdown] editable=true
# #### Create Keyspace
# + editable=true
# Create a Keyspace
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS udacity
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"""
)
except Exception as e:
print(e)
# + [markdown] editable=true
# #### Set Keyspace
# + editable=true
# Set KEYSPACE to the keyspace specified above
try:
session.set_keyspace('udacity')
except Exception as e:
print(e)
# + [markdown] editable=true
# #### Truncate Printout String for Easy Reading
# + editable=true
# Truncate printout string
def truncate_printout(feature, feature_spacing):
if len(feature) > feature_spacing:
feature = feature[:feature_spacing - 10] + '...'
return feature
# + [markdown] editable=true
# ### Create tables and query
#
# #### 1. Create query to return the artist, song title and song's length in the music app history that was heard during a specifc sessionId and specific itemInSession (e.g. sessionId = 338, and itemInSession = 4)
#
# ##### Expected output: "Name of the artist, title of the song and length of the track"
# ##### Based on: "sessionId and itemInSession"
#
# ##### Composite Key: "sessionid, itemInSession"
# + editable=true
# Create table to query artist, song title and song's length for each sessionid and itemInSession.
query = "CREATE TABLE IF NOT EXISTS music_library_by_session "
query = query + "(sessionid int, itemInSession int, artist text, song text, length float, PRIMARY KEY (sessionid, itemInSession))"
try:
session.execute(query)
except Exception as e:
print(e)
# + editable=true
# Read in CSV and populate table for each input.
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
# Populate table with sessionid, itemInSession, artist, song, length
query = "INSERT INTO music_library_by_session (sessionid, iteminsession, artist, song, length) "
query = query + "VALUES (%s, %s, %s, %s, %s)"
# Assign corresponding column elements.
session.execute(query, (int(line[8]), int(line[3]), line[0], line[9], float(line[5])))
# + editable=true
# Test to verify data populated.
query = "select artist, song, length from music_library_by_session WHERE sessionid=338 and iteminsession=4"
try:
rows = session.execute(query)
except Exception as e:
print(e)
# Print out test results
artist_spacing = 30
song_spacing = 30
length_spacing = 10
template = '{0:' + str(artist_spacing) + '}| {1:' + str(song_spacing) + '}| {2:' + str(length_spacing) + '}'
print(template.format('Artist Name', 'Song Title', 'Song Length'))
print('-' * (artist_spacing + song_spacing + length_spacing))
for row in rows:
artist = truncate_printout(row.artist, artist_spacing)
song = truncate_printout(row.song, song_spacing)
print(template.format(artist, song, row.length))
# + [markdown] editable=true
# #### 2. Create query to return the name of artist, song (sorted by itemInSession) and user (first and last name) for specific userid and specific sessionid (e.g. userid = 10, sessionid = 182)
#
# ##### Expected output: "Name of the artist, title of the song (sorted by itemInSession) and user (first and last name)"
# ##### Based on: "userid and sessionid"
#
# ##### Composite Key: "userid, sessionid"
# ##### Clustering Key: "itemInSession" (added for uniqueness and sorting)
# + editable=true
# Create table to query artist, song title and user first and last name for each userid and sessionid.
query = "CREATE TABLE IF NOT EXISTS music_library_by_user_session "
query = query + "(userid int, sessionid int, itemInSession int, artist text, song text, firstName text, lastName text, PRIMARY KEY ((userid, sessionid), itemInSession))"
try:
session.execute(query)
except Exception as e:
print(e)
# + editable=true
# Read in CSV and populate table for each input.
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
# Populate table with userid, sessionid, artist, song, firstName, lastName
query = "INSERT INTO music_library_by_user_session (userid, sessionid, itemInSession, artist, song, firstName, lastName) "
query = query + "VALUES (%s, %s, %s, %s, %s, %s, %s)"
# Assign corresponding column elements.
session.execute(query, (int(line[10]), int(line[8]), int(line[3]), line[0], line[9], line[1], line[4]))
# + editable=true
# Test to verify data populated.
query = "select itemInSession, artist, song, firstName, lastName from music_library_by_user_session WHERE userid=10 and sessionid=182"
try:
rows = session.execute(query)
except Exception as e:
print(e)
# Print out test results
iteminsession_spacing = 10
artist_spacing = 30
song_spacing = 30
firstname_spacing = 20
lastname_spacing = 20
template = '{0:' + str(iteminsession_spacing) + '}| {1:' + str(artist_spacing) + '}| {2:' + str(song_spacing) + '}| {3:' + str(firstname_spacing) + '}| {4:' + str(lastname_spacing) + '}'
print(template.format('Item', 'Artist Name', 'Song Title', 'User First Name', 'User Last Name' ))
print('-' * (iteminsession_spacing + artist_spacing + song_spacing + firstname_spacing + lastname_spacing))
for row in rows:
artist = truncate_printout(row.artist, artist_spacing)
song = truncate_printout(row.song, song_spacing)
fist_name = truncate_printout(row.firstname, firstname_spacing)
last_name = truncate_printout(row.lastname, lastname_spacing)
print(template.format(str(row.iteminsession), artist, song, fist_name, last_name))
# + [markdown] editable=true
# #### 3. Create query to return every user name (first and last) (sorted by userid) who listened to a specific song (e.g. song ='All Hands Against His Own')
#
# ##### Expected output: "user (first and last name)"
# ##### Based on: "title of song"
#
# ##### Composite Key: "song"
# ##### Clustering Key: "userid" (added for uniqueness and sorting)
# + editable=true
# Create table to query user first and last name for each song.
query = "CREATE TABLE IF NOT EXISTS music_library_by_songs_played "
query = query + "(song text, userid int, firstName text, lastName text, PRIMARY KEY ((song), userid))"
try:
session.execute(query)
except Exception as e:
print(e)
# + editable=true
# Read in CSV and populate table for each input.
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
# Populate table with userid, sessionid, artist, song, firstName, lastName
query = "INSERT INTO music_library_by_songs_played (song, userid, firstName, lastName) "
query = query + "VALUES (%s, %s, %s, %s)"
# Assign corresponding column elements.
session.execute(query, (line[9], int(line[10]), line[1], line[4]))
# + editable=true
# Test to verify data populated.
query = "select userid, firstName, lastName from music_library_by_songs_played WHERE song='All Hands Against His Own'"
try:
rows = session.execute(query)
except Exception as e:
print(e)
# Print out test results
userid_spacing = 10
firstname_spacing = 20
lastname_spacing = 20
template = '{0:' + str(userid_spacing) + '} | {1:' + str(firstname_spacing) + '} | {2:' + str(lastname_spacing) + '}'
print(template.format('User Id', 'User First Name', 'User Last Name' ))
print('-' * (userid_spacing + firstname_spacing + lastname_spacing))
for row in rows:
fist_name = truncate_printout(row.firstname, firstname_spacing)
last_name = truncate_printout(row.lastname, lastname_spacing)
print(template.format(str(row.userid), fist_name, last_name))
# + [markdown] editable=true
# #### Drop the tables before closing out the sessions
# + editable=true
# Drop tables created
query = "drop table if exists music_library_by_session"
try:
rows = session.execute(query)
except Exception as e:
print(e)
query = "drop table if exists music_library_by_user_session"
try:
rows = session.execute(query)
except Exception as e:
print(e)
query = "drop table if exists music_library_by_songs_played"
try:
rows = session.execute(query)
except Exception as e:
print(e)
# + [markdown] editable=true
# #### Close the session and cluster connection¶
# + editable=true
session.shutdown()
cluster.shutdown()
# + editable=true
| data_engineering_nanodegree/project_1b_data_modeling_cassandra/Project_1B_ Project_Template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/bundickm/Daily-Project-Euler/blob/main/12_Highly_Divisible_Triangle_Numbers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="iCwn7RuqU2U1"
# The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle number would be $1 + 2 + 3 + 4 + 5 + 6 + 7 = 28$. The first ten terms would be:
#
# $1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...$
#
# Let us list the factors of the first seven triangle numbers:
#
# 1: 1
#
# 3: 1, 3
#
# 6: 1, 2, 3, 6
#
# 10: 1, 2, 5, 10
#
# 15: 1, 3, 5, 15
#
# 21: 1, 3, 7, 21
#
# 28: 1, 2, 4, 7, 14, 28
#
# We can see that 28 is the first triangle number to have over five divisors.
#
# What is the value of the first triangle number to have over five hundred divisors?
# + id="kr-XimrpU1ts"
def find_triangle_num_with_x_divisors(x):
n = 1
while True:
triangle_num = int(n*(n+1)/2)
divisors = 0
for i in range(1, int(triangle_num**.5)+1):
if (triangle_num % i == 0):
divisors += 2
if divisors > x:
return triangle_num
n += 1
# + colab={"base_uri": "https://localhost:8080/"} id="hvVWoHYcVaMH" outputId="e67bd905-41c9-4038-a8c6-c0efb9a457e2"
print(find_triangle_num_with_x_divisors(500))
# %timeit find_triangle_num_with_x_divisors(500)
# + id="qheb-pO1WXlp"
| 12_Highly_Divisible_Triangle_Numbers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # 深度循环神经网络
#
# :label:`sec_deep_rnn`
#
# 到目前为止,我们只讨论了具有一个单向隐藏层的循环神经网络。
# 其中,隐变量和观测值与具体的函数形式的交互方式是相当随意的。
# 只要交互类型建模具有足够的灵活性,这就不是一个大问题。
# 然而,对于一个单层来说,这可能具有相当的挑战性。
# 之前在线性模型中,我们通过添加更多的层来解决这个问题。
# 而在循环神经网络中,我们首先需要确定如何添加更多的层,
# 以及在哪里添加额外的非线性,因此这个问题有点棘手。
#
# 事实上,我们可以将多层循环神经网络堆叠在一起,
# 通过对几个简单层的组合,产生了一个灵活的机制。
# 特别是,数据可能与不同层的堆叠有关。
# 例如,我们可能希望保持有关金融市场状况
# (熊市或牛市)的宏观数据可用,
# 而微观数据只记录较短期的时间动态。
#
# :numref:`fig_deep_rnn`描述了一个具有$L$个隐藏层的深度循环神经网络,
# 每个隐状态都连续地传递到当前层的下一个时间步和下一层的当前时间步。
#
# ![深度循环神经网络结构](../img/deep-rnn.svg)
# :label:`fig_deep_rnn`
#
# ## 函数依赖关系
#
# 我们可以将深度架构中的函数依赖关系形式化,
# 这个架构是由 :numref:`fig_deep_rnn`中描述了$L$个隐藏层构成。
# 后续的讨论主要集中在经典的循环神经网络模型上,
# 但是这些讨论也适应于其他序列模型。
#
# 假设在时间步$t$有一个小批量的输入数据
# $\mathbf{X}_t \in \mathbb{R}^{n \times d}$
# (样本数:$n$,每个样本中的输入数:$d$)。
# 同时,将$l^\mathrm{th}$隐藏层($l=1,\ldots,L$)
# 的隐状态设为$\mathbf{H}_t^{(l)} \in \mathbb{R}^{n \times h}$
# (隐藏单元数:$h$),
# 输出层变量设为$\mathbf{O}_t \in \mathbb{R}^{n \times q}$
# (输出数:$q$)。
# 设置$\mathbf{H}_t^{(0)} = \mathbf{X}_t$,
# 第$l$个隐藏层的隐状态使用激活函数$\phi_l$,则:
#
# $$\mathbf{H}_t^{(l)} = \phi_l(\mathbf{H}_t^{(l-1)} \mathbf{W}_{xh}^{(l)} + \mathbf{H}_{t-1}^{(l)} \mathbf{W}_{hh}^{(l)} + \mathbf{b}_h^{(l)}),$$
# :eqlabel:`eq_deep_rnn_H`
#
# 其中,权重$\mathbf{W}_{xh}^{(l)} \in \mathbb{R}^{h \times h}$,
# $\mathbf{W}_{hh}^{(l)} \in \mathbb{R}^{h \times h}$和
# 偏置$\mathbf{b}_h^{(l)} \in \mathbb{R}^{1 \times h}$
# 都是第$l$个隐藏层的模型参数。
#
# 最后,输出层的计算仅基于第$l$个隐藏层最终的隐状态:
#
# $$\mathbf{O}_t = \mathbf{H}_t^{(L)} \mathbf{W}_{hq} + \mathbf{b}_q,$$
#
# 其中,权重$\mathbf{W}_{hq} \in \mathbb{R}^{h \times q}$和偏置$\mathbf{b}_q \in \mathbb{R}^{1 \times q}$都是输出层的模型参数。
#
# 与多层感知机一样,隐藏层数目$L$和隐藏单元数目$h$都是超参数。
# 也就是说,它们可以由我们调整的。
# 另外,用门控循环单元或长短期记忆网络的隐状态
# 来代替 :eqref:`eq_deep_rnn_H`中的隐状态进行计算,
# 可以很容易地得到深度门控循环神经网络或深度长短期记忆神经网络。
#
# ## 简洁实现
#
# 实现多层循环神经网络所需的许多逻辑细节在高级API中都是现成的。
# 简单起见,我们仅示范使用此类内置函数的实现方式。
# 以长短期记忆网络模型为例,
# 该代码与之前在 :numref:`sec_lstm`中使用的代码非常相似,
# 实际上唯一的区别是我们指定了层的数量,
# 而不是使用单一层这个默认值。
# 像往常一样,我们从加载数据集开始。
#
# + origin_pos=1 tab=["mxnet"]
from mxnet import npx
from mxnet.gluon import rnn
from d2l import mxnet as d2l
npx.set_np()
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
# + [markdown] origin_pos=3
# 像选择超参数这类架构决策也跟 :numref:`sec_lstm`中的决策非常相似。
# 因为我们有不同的词元,所以输入和输出都选择相同数量,即`vocab_size`。
# 隐藏单元的数量仍然是$256$。
# 唯一的区别是,我们现在(**通过`num_layers`的值来设定隐藏层数**)。
#
# + origin_pos=4 tab=["mxnet"]
vocab_size, num_hiddens, num_layers = len(vocab), 256, 2
device = d2l.try_gpu()
lstm_layer = rnn.LSTM(num_hiddens, num_layers)
model = d2l.RNNModel(lstm_layer, len(vocab))
# + [markdown] origin_pos=6
# ## [**训练**]与预测
#
# 由于使用了长短期记忆网络模型来实例化两个层,因此训练速度被大大降低了。
#
# + origin_pos=7 tab=["mxnet"]
num_epochs, lr = 500, 2
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, device)
# + [markdown] origin_pos=8
# ## 小结
#
# * 在深度循环神经网络中,隐状态的信息被传递到当前层的下一时间步和下一层的当前时间步。
# * 有许多不同风格的深度循环神经网络,
# 如长短期记忆网络、门控循环单元、或经典循环神经网络。
# 这些模型在深度学习框架的高级API中都有涵盖。
# * 总体而言,深度循环神经网络需要大量的调参(如学习率和修剪)
# 来确保合适的收敛,模型的初始化也需要谨慎。
#
# ## 练习
#
# 1. 基于我们在 :numref:`sec_rnn_scratch`中讨论的单层实现,
# 尝试从零开始实现两层循环神经网络。
# 1. 在本节训练模型中,比较使用门控循环单元替换长短期记忆网络后模型的精确度和训练速度。
# 1. 如果增加训练数据,你能够将困惑度降到多低?
# 1. 在为文本建模时,是否可以将不同作者的源数据合并?有何优劣呢?
#
# + [markdown] origin_pos=9 tab=["mxnet"]
# [Discussions](https://discuss.d2l.ai/t/2771)
#
| d2l/mxnet/chapter_recurrent-modern/deep-rnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.0 64-bit (''anaconda3'': virtualenv)'
# metadata:
# interpreter:
# hash: 639da732ab091020ab968f361f5d8817f716db792fde42dd09c3e2e290128d87
# name: 'Python 3.7.0 64-bit (''anaconda3'': virtualenv)'
# ---
# # Bioenergy: tidy table for all sectors
#
# Aggregate different variables in each sector representing energy consumption (or generation) to provide one consumption variable per sector (electricity, heating and cooling, transport).
#
# Then provide agregate consumption variable for all sectors.
# +
import os
import datetime
import pandas as pd
csv_input_dir = 'output'
csv_output_dir = datetime.datetime.today().strftime('%Y-%m-%d')
if not os.path.exists(csv_output_dir):
os.mkdir(csv_output_dir)
selected_codes = ['TOTAL', 'RA000', 'R5110-5150_W6000RI', 'R5200', 'R5300', 'W6210']
bio_codes = ['R5110-5150_W6000RI', 'R5200', 'R5300', 'W6210']
# +
# Standard international energy product classification (SIEC)
# https://dd.eionet.europa.eu
# Download the siec.csv
# Dictionary from siec codes to human readable labels
siec_url = 'http://dd.eionet.europa.eu/vocabulary/eurostat/siec/csv'
siec = pd.read_csv(siec_url)
fuels_dict = {k: v for k, v in zip(siec['Notation'], siec['Label'])}
# -
# ## Three tables from all sectors
el = pd.read_csv(os.path.join(os.path.abspath(csv_input_dir), 'shares_electricity_tidy.csv'), decimal=',')
hc = pd.read_csv(os.path.join(os.path.abspath(csv_input_dir), 'shares_heat_tidy.csv'), decimal=',')
tr = pd.read_csv(os.path.join(os.path.abspath(csv_input_dir), 'shares_transport_tidy.csv'), decimal=',')
# +
# Mask for bioenergy, renewables and total consumption
el_fuels = el['siec'].str.fullmatch('|'.join(selected_codes))
el_ktoe = el['unit'].str.fullmatch('GWH')
hc_fuels = hc['siec'].str.fullmatch('|'.join(selected_codes))
hc_ktoe = hc['unit'].str.fullmatch('KTOE')
tr_fuels = tr['siec'].str.fullmatch('|'.join(selected_codes))
tr_ktoe = tr['unit'].str.fullmatch('KTOE')
# +
# There was a mistake in conversion to ktoe in the previous version
el = el[el_fuels & el_ktoe].loc[:, ['geo', 'year', 'siec', 'unit', 'GFC_ELC', 'GEP_RED']]
# Have one variable with consumption accross fuels
el['electricity_consumption'] = el['GEP_RED']
el.loc[el['siec'] == 'TOTAL', 'electricity_consumption'] = el.loc[el['siec'] == 'TOTAL', 'GFC_ELC']
# Convert from GWh to ktoe
el.loc[:, ['GFC_ELC', 'GEP_RED', 'electricity_consumption']] = el.loc[:, ['GFC_ELC', 'GEP_RED', 'electricity_consumption']] * 3.6 / 41.868
el['unit'] = 'KTOE'
# -
el
# +
hc = hc[hc_fuels & hc_ktoe].loc[:, ['geo', 'year', 'siec', 'unit', 'GHP_RED', 'FC_IND_OTH_E', 'GFC_HEAT_CL_E']]
# Agregate final consumtion by adding together GHP_RED, FC_IND_OTH_E
hc['heat_consumption'] = hc.loc[:, ['GHP_RED', 'FC_IND_OTH_E']].sum(axis=1)
# Have one variable with consumption accross fuels, adding also GFC_HEAT_CL_E
hc.loc[hc['siec'] == 'TOTAL', 'heat_consumption'] = hc.loc[hc['siec'] == 'TOTAL', 'GFC_HEAT_CL_E']
hc.loc[hc['siec'] == 'RA000', 'heat_consumption'] = hc.loc[hc['siec'] == 'RA000', 'GFC_HEAT_CL_E']
# -
hc.head(7)
# +
tr = tr[tr_fuels & tr_ktoe].loc[:, ['geo', 'year', 'siec', 'unit', 'FC_TRA_OTH_E_RED', 'FC_TRA_RAIL_E_RED', 'FC_TRA_ROAD_E_RED', 'GFC_TRA_E_NMULTI', 'GFC_TRA_E_MULTI']]
# Agregate final consumption in road, rail, other by adding together FC_TRA_OTH_E_RED, FC_TRA_RAIL_E_RED, FC_TRA_ROAD_E_RED
tr['transport_consumption'] = tr.loc[:, ['FC_TRA_OTH_E_RED', 'FC_TRA_RAIL_E_RED', 'FC_TRA_ROAD_E_RED']].sum(axis=1)
# Have one variable with consumption accross fuels, adding also
tr.loc[tr['siec'] == 'TOTAL', 'transport_consumption'] = tr.loc[tr['siec'] == 'TOTAL', 'GFC_TRA_E_NMULTI']
tr.loc[tr['siec'] == 'RA000', 'transport_consumption'] = tr.loc[tr['siec'] == 'RA000', 'GFC_TRA_E_NMULTI']
# -
tr
# ## Merge tables
df1 = el.set_index(['geo', 'year', 'siec', 'unit'])
df2 = hc.set_index(['geo', 'year', 'siec', 'unit'])
df3 = tr.set_index(['geo', 'year', 'siec', 'unit'])
# el + hc
df4 = pd.merge(df1, df2, left_index=True, right_index=True, how='outer')
# (el + hc) + tr
df5 = df4.merge(df3, left_index=True, right_index=True, how='outer')
df5
# +
# Aggregate table in ktoe
df5.reset_index(inplace=True)
# Add human readable fuel label
df5['fuel'] = [fuels_dict[code] for code in df5['siec']]
df5 = df5.set_index(['geo', 'year', 'siec', 'fuel', 'unit'])
# -
# Add aggregate consumption accross sectors
df5['all_sectors_consumption'] = df5.loc[:,['electricity_consumption', 'heat_consumption', 'transport_consumption']].sum(axis=1)
df5.to_csv(os.path.join(os.path.abspath(csv_output_dir), 'selected_fuels_across_sectors_ktoe.csv'), decimal=',')
# Aggregate table in TJ
df = df5 * 41.868
df.reset_index(inplace=True)
df['unit'] = 'TJ'
df.to_csv(os.path.join(os.path.abspath(csv_output_dir), 'selected_fuels_across_sectors_tj.csv'), decimal=',')
# Pivot tables with consumption for fuel and year in selected countries
for country in ['CZ', 'AT', 'DK', 'NL', 'PL', 'SK']:
bio = df['siec'].str.fullmatch('|'.join(bio_codes))
geo = df['geo'] == country
geodf = df.loc[bio & geo, ['geo', 'year', 'siec', 'fuel', 'unit', 'all_sectors_consumption']]
geodf = geodf.pivot_table(values='all_sectors_consumption', index='year', columns='fuel')
country_label = country.lower()
geodf.to_csv(os.path.join(os.path.abspath(csv_output_dir), f'{country_label}_selected_fuels_all_sectors_tj.csv'), decimal=',')
# Table dropping original eurostat variable columns in ktoe
df5.loc[:, ['electricity_consumption', 'heat_consumption', 'transport_consumption', 'all_sectors_consumption']].to_csv(os.path.join(os.path.abspath(csv_output_dir), 'selected_fuels_computed_consumption_ktoe.csv'), decimal=',')
df5
# +
# Table dropping original eurostat variable columns in TJ
df6 = df.copy()
df6 = df6.set_index(['geo', 'year', 'siec', 'fuel', 'unit'])
df6.loc[:, ['electricity_consumption', 'heat_consumption', 'transport_consumption', 'all_sectors_consumption']].to_csv(os.path.join(os.path.abspath(csv_output_dir), 'selected_fuels_computed_consumption_tj.csv'), decimal=',')
# -
df6
# Pivot tables with consumption for fuel and year in selected countries
for country in ['CZ', 'AT', 'DK', 'NL', 'PL', 'SK']:
bio = df['siec'].str.fullmatch('|'.join(bio_codes))
geo = df['geo'] == country
geodf = df.loc[bio & geo, ['geo', 'year', 'siec', 'fuel', 'unit', 'all_sectors_consumption']]
geodf = geodf.pivot_table(values='all_sectors_consumption', index='year', columns='fuel')
geodf = geodf.reindex(columns=['Primary solid biofuels', 'Renewable municipal waste', 'Biogases', 'Liquid biofuels'])
geodf.plot.area(title=f'Bioenergy consumption in {country} by fuel', ylabel='final consumption (terajoules)')
| eu/tidy/fuels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
# ### Create our body classifier
car_classifier = cv2.CascadeClassifier('Haarcascades/haarcascade_car.xml')
# ### Initiate video capture for video file
cap = cv2.VideoCapture('Videos/Vehicles.mp4')
# +
# Loop once video is successfully loaded
while cap.isOpened():
#time.sleep(.05)
# Read first frame
ret, frame = cap.read()
# gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Pass frame to our car classifier
cars = car_classifier.detectMultiScale(frame, 1.4, 2)
# Extract bounding boxes for any bodies identified
for (x,y,w,h) in cars:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 255), 2)
cv2.imshow('Car Detection', frame)
if cv2.waitKey(1) == 27: # 27 is the Esc button
break
cap.release()
cv2.destroyAllWindows()
# -
| 15_Car_Detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="static/pybofractal.png" alt="Pybonacci" style="width: 200px;"/>
# <img src="static/cacheme_logo.png" alt="CAChemE" style="width: 300px;"/>
# # The Transport Problem
#
# > **Note:** Adapted from https://github.com/Pyomo/PyomoGallery, see LICENSE.BSD
#
# ## Summary
#
# The goal of the Transport Problem is to select the quantities of an homogeneous good that has several production plants and several punctiform markets as to minimise the transportation costs.
#
# It is the default tutorial for the GAMS language, and GAMS equivalent code is inserted as single-dash comments. The original GAMS code needs slighly different ordering of the commands and it's available at http://www.gams.com/mccarl/trnsport.gms.
#
# ## Problem Statement
#
# The Transport Problem can be formulated mathematically as a linear programming problem using the following model.
#
# ### Sets
#
# $I$ = set of canning plants
# $J$ = set of markets
#
# ### Parameters
#
# $a_i$ = capacity of plant $i$ in cases, $\forall i \in I$ <br />
# $b_j$ = demand at market $j$ in cases, $\forall j \in J$ <br />
# $d_{i,j}$ = distance in thousands of miles, $\forall i \in I, \forall j \in J$ <br />
# $f$ = freight in dollars per case per thousand miles <br />
# $c_{i,j}$ = transport cost in thousands of dollars per case
#
# $c_{i,j}$ is obtained exougenously to the optimisation problem as $c_{i,j} = f \cdot d_{i,j}$, $\forall i \in I, \forall j \in J$
#
# ### Variables
# $x_{i,j}$ = shipment quantities in cases <br />
# z = total transportation costs in thousands of dollars
#
# ### Objective
#
# Minimize the total cost of the shipments: <br />
# $\min_{x} z = \sum_{i \in I} \sum_{j \in J} c_{i,j} x_{i,j}$
#
# ### Constraints
#
#
# Observe supply limit at plant i: <br />
# $\sum_{i \in I} x_{i,j} \leq a_{i}$, $\forall i \in I$
#
# Satisfy demand at market j: <br />
# $\sum_{j \in J} x_{i,j} \geq b_{j}$, $\forall j \in J$
#
# Non-negative transportation quantities <br />
# $x_{i,j} \geq 0$, $\forall i \in I, \forall j \in J$
# ## Pyomo Formulation
# ### Creation of the Model
#
# In pyomo everything is an object. The various components of the model (sets, parameters, variables, constraints, objective..) are all attributes of the main model object while being objects themselves.
#
# There are two type of models in pyomo: A `ConcreteModel` is one where all the data is defined at the model creation. We are going to use this type of model in this tutorial. Pyomo however supports also an `AbstractModel`, where the model structure is firstly generated and then particular instances of the model are generated with a particular set of data.
#
# The first thing to do in the script is to load the pyomo library and create a new `ConcreteModel` object. We have little imagination here, and we call our model "model". You can give it whatever name you want. However, if you give your model an other name, you also need to create a `model` object at the end of your script:
# +
# Import of the pyomo module
from pyomo.environ import *
# Creation of a Concrete Model
model = ConcreteModel()
# -
# ### Set Definitions
#
# Sets are created as attributes object of the main model objects and all the information is given as parameter in the constructor function. Specifically, we are passing to the constructor the initial elements of the set and a documentation string to keep track on what our set represents:
## Define sets ##
# Sets
# i canning plants / seattle, san-diego /
# j markets / new-york, chicago, topeka / ;
model.i = Set(initialize=['seattle','san-diego'], doc='Canning plans')
model.j = Set(initialize=['new-york','chicago', 'topeka'], doc='Markets')
# ### Parameters
#
# Parameter objects are created specifying the sets over which they are defined and are initialised with either a python dictionary or a scalar:
## Define parameters ##
# Parameters
# a(i) capacity of plant i in cases
# / seattle 350
# san-diego 600 /
# b(j) demand at market j in cases
# / new-york 325
# chicago 300
# topeka 275 / ;
model.a = Param(model.i, initialize={'seattle':350,'san-diego':600}, doc='Capacity of plant i in cases')
model.b = Param(model.j, initialize={'new-york':325,'chicago':300,'topeka':275}, doc='Demand at market j in cases')
# Table d(i,j) distance in thousands of miles
# new-york chicago topeka
# seattle 2.5 1.7 1.8
# san-diego 2.5 1.8 1.4 ;
dtab = {
('seattle', 'new-york') : 2.5,
('seattle', 'chicago') : 1.7,
('seattle', 'topeka') : 1.8,
('san-diego','new-york') : 2.5,
('san-diego','chicago') : 1.8,
('san-diego','topeka') : 1.4,
}
model.d = Param(model.i, model.j, initialize=dtab, doc='Distance in thousands of miles')
# Scalar f freight in dollars per case per thousand miles /90/ ;
model.f = Param(initialize=90, doc='Freight in dollars per case per thousand miles')
# A third, powerful way to initialize a parameter is using a user-defined function.
#
# This function will be automatically called by pyomo with any possible (i,j) set. In this case pyomo will actually call `c_init()` six times in order to initialize the `model.c` parameter.
# +
# Parameter c(i,j) transport cost in thousands of dollars per case ;
# c(i,j) = f * d(i,j) / 1000 ;
def c_init(model, i, j):
return model.f * model.d[i,j] / 1000
model.c = Param(model.i, model.j, initialize=c_init, doc='Transport cost in thousands of dollar per case')
# -
# ### Variables
#
# Similar to parameters, variables are created specifying their domain(s). For variables we can also specify the upper/lower bounds in the constructor.
#
# Differently from GAMS, we don't need to define the variable that is on the left hand side of the objective function.
## Define variables ##
# Variables
# x(i,j) shipment quantities in cases
# z total transportation costs in thousands of dollars ;
# Positive Variable x ;
model.x = Var(model.i, model.j, bounds=(0.0,None), doc='Shipment quantities in case')
# ### Constrains
#
# At this point, it should not be a surprise that constrains are again defined as model objects with the required information passed as parameter in the constructor function.
# +
## Define contrains ##
# supply(i) observe supply limit at plant i
# supply(i) .. sum (j, x(i,j)) =l= a(i)
def supply_rule(model, i):
return sum(model.x[i,j] for j in model.j) <= model.a[i]
model.supply = Constraint(model.i, rule=supply_rule, doc='Observe supply limit at plant i')
# demand(j) satisfy demand at market j ;
# demand(j) .. sum(i, x(i,j)) =g= b(j);
def demand_rule(model, j):
return sum(model.x[i,j] for i in model.i) >= model.b[j]
model.demand = Constraint(model.j, rule=demand_rule, doc='Satisfy demand at market j')
# -
# The above code takes advantage of [list comprehensions](https://docs.python.org/2/tutorial/datastructures.html#list-comprehensions), a powerful feature of the python language that provides a concise way to loop over a list. If we take the supply_rule as example, this is actually called two times by pyomo (once for each of the elements of i). Without list comprehensions we would have had to write our function using a for loop, like:
def supply_rule(model, i):
supply = 0.0
for j in model.j:
supply += model.x[i,j]
return supply <= model.a[i]
# Using list comprehension is however quicker to code and more readable.
# ### Objective and Solving
#
# The definition of the objective is similar to those of the constrains, except that most solvers require a scalar objective function, hence a unique function, and we can specify the sense (direction) of the optimisation.
# +
## Define Objective and solve ##
# cost define objective function
# cost .. z =e= sum((i,j), c(i,j)*x(i,j)) ;
# Model transport /all/ ;
# Solve transport using lp minimizing z ;
def objective_rule(model):
return sum(model.c[i,j]*model.x[i,j] for i in model.i for j in model.j)
model.objective = Objective(rule=objective_rule, sense=minimize, doc='Define objective function')
# -
# As we are here looping over two distinct sets, we can see how list comprehension really simplifies the code. The objective function could have been written without list comprehension as:
def objective_rule(model):
obj = 0.0
for ki in model.i:
for kj in model.j:
obj += model.c[ki,kj]*model.x[ki,kj]
return obj
# ### Retrieving the Output
#
# We use the `pyomo_postprocess()` function to retrieve the output and do something with it. For example, we could display solution values (see below), plot a graph with [matplotlib](http://matplotlib.org/) or save it in a csv file.
#
# This function is called by pyomo after the solver has finished.
## Display of the output ##
# Display x.l, x.m ;
def pyomo_postprocess(options=None, instance=None, results=None):
model.x.display()
# We can print model structure information with `model.pprint()` (“pprint” stand for “pretty print”).
# Results are also by default saved in a `results.json` file or, if PyYAML is installed in the system, in `results.yml`.
#
# ### Editing and Running the Script
#
# Differently from GAMS, you can use whatever editor environment you wish to code a pyomo script. If you don't need debugging features, a simple text editor like Notepad++ (in windows), gedit or kate (in Linux) will suffice. They already have syntax highlight for python.
#
# If you want advanced features and debugging capabilities you can use a dedicated Python IDE, like e.g. Spyder.
#
# You will normally run the script as `pyomo solve –solver=glpk transport.py`. You can output solver specific output adding the option `–stream-output`. If you want to run the script as `python transport.py` add the following lines at the end:
# +
# This emulates what the pyomo command-line tools does
from pyomo.opt import SolverFactory
import pyomo.environ
opt = SolverFactory("glpk")
results = opt.solve(model)
# sends results to stdout
results.write()
print("\nDisplaying Solution\n" + '-'*60)
pyomo_postprocess(None, None, results)
# -
# Finally, if you are very lazy and want to run the script with just `./transport.py` (and you are in Linux) add the following lines at the top:
# +
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -
# ## Complete script
#
# Here is the complete script:
# !cat transport.py
# ## Solutions
# Running the model lead to the following output:
# !pyomo solve --solver=glpk transport.py
# By default, the optimization results are stored in the file `results.json`:
# !cat results.json
# This solution shows that the minimum transport costs is attained when 300 cases are sent from the Seattle plant to the Chicago market, 50 cases from Seattle to New-York and 275 cases each are sent from San-Diego plant to New-York and Topeka markets.
#
# The total transport costs will be $153,675.
# ## References
#
# * Original problem formulation:
# - Dantzig, <NAME>, Chapter 3.3. In Linear Programming and Extensions. Princeton University Press, Princeton, New Jersey, 1963.
# * GAMS implementation:
# - <NAME>, Chapter 2: A GAMS Tutorial. In GAMS: A User's Guide. The Scientific Press, Redwood City, California, 1988.
# * Pyomo translation: <NAME>
| 04_TransportProblem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Convert the [MIS faculty directory](https://www.terry.uga.edu/directory/mis/) to Markdown
#
# *You don't need to convert the whole table just few entries*
# | Image | Name | Title | Type | Phone | Email | Office |
# | ----------------------------------------------------------------------------------------------------------------------- | -------------------- | ---------------- | ----------- | ------------ | ------------------------------------------- | ----------------------------------------- |
# | ![](http://directory.domain-account.com/_resources/images/directory/faculty/Karen%20Aguar.jpg) | <NAME> | Lecturer | Faculty | . | [<EMAIL>](mailto:<EMAIL>) | 630 South Lumpkin Street C428 Benson Hall |
# | ![](http://directory.domain-account.com/_resources/images/directory/faculty/JanineAronson_29143-014_portrait_large.jpg) | <NAME> | Professor | Faculty | 706-542-0991 | [<EMAIL>](mailto:<EMAIL>) | 630 South Lumpkin Street C418 Benson Hall |
# | ![](http://directory.domain-account.com/_resources/images/directory/students/eric%20bogert_portrait_large.jpg) | <NAME> | Doctoral Student | PhD Student | . | [<EMAIL>](mailto:<EMAIL>) | 620 South Lumpkin Street B423 Amos Hall |
#
# Another table
# | Name | Title | Type | Phone | Email | Office |
# | -------------- | --------- | ------- | ---------- | -------- | ---------- |
# | [<NAME>](https://www.terry.uga.edu/directory/mis/karen-aguar) | Lecturer | Faculty | . | [<EMAIL>](mailto:<EMAIL>) | 630 S. Lumpkin St. C428 Benson Hall |
# | [<NAME>](https://www.terry.uga.edu/directory/mis/parham-amiri) | Doctoral Student | PhD Student | . | [<EMAIL>](mailto:<EMAIL>) | . |
| 02-workout-solution_markdown_language.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_mxnet_p36
# language: python
# name: conda_mxnet_p36
# ---
# # Amazon SageMaker Object Detection using the Image and JSON format
#
# 1. [Introduction](#Introduction)
# 2. [Setup](#Setup)
# 3. [Data Preparation](#Data-Preparation)
# 1. [Download data](#Download-Data)
# 2. [Prepare Dataset](#Prepare-dataset)
# 3. [Upload to S3](#Upload-to-S3)
# 4. [Training](#Training)
# 5. [Hosting](#Hosting)
# 6. [Inference](#Inference)
# ## Introduction
#
# Object detection is the process of identifying and localizing objects in an image. A typical object detection solution takes in an image as input and provides a bounding box on the image where an object of interest is, along with identifying what object the box encapsulates. But before we have this solution, we need to acquire and process a traning dataset, create and setup a training job for the alorithm so that the aglorithm can learn about the dataset and then host the algorithm as an endpoint, to which we can supply the query image.
#
# This notebook is an end-to-end example introducing the Amazon SageMaker Object Detection algorithm. In this demo, we will demonstrate how to train and to host an object detection model on the [COCO dataset](http://cocodataset.org/) using the Single Shot multibox Detector ([SSD](https://arxiv.org/abs/1512.02325)) algorithm. In doing so, we will also demonstrate how to construct a training dataset using the JSON format as this is the format that the training job will consume. We also allow the RecordIO format, which is illustrated in the [RecordIO Notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/object_detection_pascalvoc_coco/object_detection_recordio_format.ipynb). We will also demonstrate how to host and validate this trained model.
# ## Setup
#
# To train the Object Detection algorithm on Amazon SageMaker, we need to setup and authenticate the use of AWS services. To begin with we need an AWS account role with SageMaker access. This role is used to give SageMaker access to your data in S3 will automatically be obtained from the role used to start the notebook.
# +
# %%time
import sagemaker
from sagemaker import get_execution_role
role = get_execution_role()
print(role)
sess = sagemaker.Session()
# -
# We also need the S3 bucket that you want to use for training and to store the tranied model artifacts. In this notebook, we require a custom bucket that exists so as to keep the naming clean. You can end up using a default bucket that SageMaker comes with as well.
# + tags=["parameters"]
bucket = '<your_s3_bucket_name_here>' # custom bucket name.
# bucket = sess.default_bucket()
prefix = 'DEMO-ObjectDetection'
# +
from sagemaker.amazon.amazon_estimator import get_image_uri
training_image = get_image_uri(sess.boto_region_name, 'object-detection', repo_version="latest")
print (training_image)
# -
# ## Data Preparation
# [MS COCO](http://cocodataset.org/#download) is a large-scale dataset for multiple computer vision tasks, including object detection, segmentation, and captioning. In this notebook, we will use the object detection dataset. Since the COCO is relative large dataset, we will only use the the validation set from 2017 and split them into training and validation sets. The data set from 2017 contains 5000 images with objects from 80 categories.
#
# ### Datset License
# The annotations in this dataset belong to the COCO Consortium and are licensed under a Creative Commons Attribution 4.0 License. The COCO Consortium does not own the copyright of the images. Use of the images must abide by the Flickr Terms of Use. The users of the images accept full responsibility for the use of the dataset, including but not limited to the use of any copies of copyrighted images that they may create from the dataset. Before you use this data for any other purpose than this example, you should understand the data license, described at http://cocodataset.org/#termsofuse"
# ### Download data
# Let us download the 2017 validation datasets from COCO and then unpack them.
# +
import os
import urllib.request
def download(url):
filename = url.split("/")[-1]
if not os.path.exists(filename):
urllib.request.urlretrieve(url, filename)
# MSCOCO validation image files
download('http://images.cocodataset.org/zips/val2017.zip')
download('http://images.cocodataset.org/annotations/annotations_trainval2017.zip')
# + language="bash"
# unzip -qo val2017.zip
# unzip -qo annotations_trainval2017.zip
# rm val2017.zip annotations_trainval2017.zip
# -
# Before using this dataset, we need to perform some data cleaning. The algorithm expects the dataset in a particular JSON format. The COCO dataset, while containing annotations in JSON, does not follow our specifications. We will use this as an opportunity to introduce our JSON format by performing this convertion. To begin with we create appropriate directories for training images, validation images, as well as the annotation files for both.
# + language="bash"
# #Create folders to store the data and annotation files
# mkdir generated train train_annotation validation validation_annotation
# -
# ### Prepare dataset
#
# Next, we should convert the annotation file from the COCO dataset into json annotation files. We will require one annotation for each image.
#
# The Amazon SageMaker Object Detection algorithm expects lables to be indexed from `0`. It also expects lables to be unique, successive and not skip any integers. For instance, if there are ten classes, the algorithm expects and the labels only be in the set `[0,1,2,3,4,5,6,7,8,9]`.
#
# In the COCO validation set unfortunately, the labels do not satistify this requirement. Some indices are skipped and the labels start from `1`. We therefore need a mapper that will convert this index system to our requirement. Let us create a generic mapper therefore that could also be used to other datasets that might have nonunique or even string labels. All we need in a dictionary that would create a key-value mapping where an original label is hashed to a label that we require. Consider the following method that returns such a dictionary for the COCO validation dataset.
# +
import json
import logging
def get_coco_mapper():
original_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 84, 85, 86, 87, 88, 89, 90]
iter_counter = 0
COCO = {}
for orig in original_list:
COCO[orig] = iter_counter
iter_counter += 1
return COCO
# -
# Let us use this dictionary, to create a look up method. Let us do so in a way that any dictionary could be used to create this method.
# +
def get_mapper_fn(map):
def mapper(in_category):
return map[in_category]
return mapper
fix_index_mapping = get_mapper_fn(get_coco_mapper())
# -
# The method `fix_index_mapping` is essentially a look-up method, which we can use to convert lables. Let us now iterate over every annotation in the COCO dataset and prepare our data. Note how the keywords are created and a structure is established. For more information on the JSON format details, refer the [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/object-detection.html).
file_name = './annotations/instances_val2017.json'
with open(file_name) as f:
js = json.load(f)
images = js['images']
categories = js['categories']
annotations = js['annotations']
for i in images:
jsonFile = i['file_name']
jsonFile = jsonFile.split('.')[0]+'.json'
line = {}
line['file'] = i['file_name']
line['image_size'] = [{
'width':int(i['width']),
'height':int(i['height']),
'depth':3
}]
line['annotations'] = []
line['categories'] = []
for j in annotations:
if j['image_id'] == i['id'] and len(j['bbox']) > 0:
line['annotations'].append({
'class_id':int(fix_index_mapping(j['category_id'])),
'top':int(j['bbox'][1]),
'left':int(j['bbox'][0]),
'width':int(j['bbox'][2]),
'height':int(j['bbox'][3])
})
class_name = ''
for k in categories:
if int(j['category_id']) == k['id']:
class_name = str(k['name'])
assert class_name is not ''
line['categories'].append({
'class_id':int(j['category_id']),
'name':class_name
})
if line['annotations']:
with open(os.path.join('generated', jsonFile),'w') as p:
json.dump(line,p)
# +
import os
import json
jsons = os.listdir('generated')
print ('There are {} images have annotation files'.format(len(jsons)))
# -
# After removing the images without annotations, we have 4952 annotated images. Let us split this dataset and create our training and validation datasets, with which our algorithm will train. To do so, we will simply split the dataset into training and validation data and move them to their respective folders.
# +
import shutil
train_jsons = jsons[:4452]
val_jsons = jsons[4452:]
#Moving training files to the training folders
for i in train_jsons:
image_file = './val2017/'+i.split('.')[0]+'.jpg'
shutil.move(image_file, './train/')
shutil.move('./generated/'+i, './train_annotation/')
#Moving validation files to the validation folders
for i in val_jsons:
image_file = './val2017/'+i.split('.')[0]+'.jpg'
shutil.move(image_file, './validation/')
shutil.move('./generated/'+i, './validation_annotation/')
# -
# ### Upload to S3
# Next step in this process is to upload the data to the S3 bucket, from which the algorithm can read and use the data. We do this using multiple channels. Channels are simply directories in the bucket that differentiate between training and validation data. Let us simply call these directories `train` and `validation`. We will therefore require four channels: two for the data and two for annotations, the annotations ones named with the suffixes `_annotation`.
# +
# %%time
train_channel = prefix + '/train'
validation_channel = prefix + '/validation'
train_annotation_channel = prefix + '/train_annotation'
validation_annotation_channel = prefix + '/validation_annotation'
sess.upload_data(path='train', bucket=bucket, key_prefix=train_channel)
sess.upload_data(path='validation', bucket=bucket, key_prefix=validation_channel)
sess.upload_data(path='train_annotation', bucket=bucket, key_prefix=train_annotation_channel)
sess.upload_data(path='validation_annotation', bucket=bucket, key_prefix=validation_annotation_channel)
s3_train_data = 's3://{}/{}'.format(bucket, train_channel)
s3_validation_data = 's3://{}/{}'.format(bucket, validation_channel)
s3_train_annotation = 's3://{}/{}'.format(bucket, train_annotation_channel)
s3_validation_annotation = 's3://{}/{}'.format(bucket, validation_annotation_channel)
# -
# Next we need to setup an output location at S3, where the model artifact will be dumped. These artifacts are also the output of the algorithm's traning job.
s3_output_location = 's3://{}/{}/output'.format(bucket, prefix)
# ## Training
# Now that we are done with all the setup that is needed, we are ready to train our object detector. To begin, let us create a ``sageMaker.estimator.Estimator`` object. This estimator will launch the training job.
od_model = sagemaker.estimator.Estimator(training_image,
role,
train_instance_count=1,
train_instance_type='ml.p3.2xlarge',
train_volume_size = 50,
train_max_run = 360000,
input_mode = 'File',
output_path=s3_output_location,
sagemaker_session=sess)
# The object detection algorithm at its core is the [Single-Shot Multi-Box detection algorithm (SSD)](https://arxiv.org/abs/1512.02325). This algorithm uses a `base_network`, which is typically a [VGG](https://arxiv.org/abs/1409.1556) or a [ResNet](https://arxiv.org/abs/1512.03385). The Amazon SageMaker object detection algorithm supports VGG-16 and ResNet-50 now. It also has a lot of options for hyperparameters that help configure the training job. The next step in our training, is to setup these hyperparameters and data channels for training the model. Consider the following example definition of hyperparameters. See the SageMaker Object Detection [documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/object-detection.html) for more details on the hyperparameters.
#
# One of the hyperparameters here for instance is the `epochs`. This defines how many passes of the dataset we iterate over and determines that training time of the algorithm. For the sake of demonstration let us run only `30` epochs.
od_model.set_hyperparameters(base_network='resnet-50',
use_pretrained_model=1,
num_classes=80,
mini_batch_size=16,
epochs=30,
learning_rate=0.001,
lr_scheduler_step='10',
lr_scheduler_factor=0.1,
optimizer='sgd',
momentum=0.9,
weight_decay=0.0005,
overlap_threshold=0.5,
nms_threshold=0.45,
image_shape=512,
label_width=600,
num_training_samples=4452)
# Now that the hyperparameters are setup, let us prepare the handshake between our data channels and the algorithm. To do this, we need to create the `sagemaker.session.s3_input` objects from our data channels. These objects are then put in a simple dictionary, which the algorithm consumes. Notice that here we use a `content_type` as `image/jpeg` for the image channels and the annoation channels. Notice how unlike the [RecordIO format](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/object_detection_pascalvoc_coco/object_detection_recordio_format.ipynb), we use four channels here.
# +
train_data = sagemaker.session.s3_input(s3_train_data, distribution='FullyReplicated',
content_type='image/jpeg', s3_data_type='S3Prefix')
validation_data = sagemaker.session.s3_input(s3_validation_data, distribution='FullyReplicated',
content_type='image/jpeg', s3_data_type='S3Prefix')
train_annotation = sagemaker.session.s3_input(s3_train_annotation, distribution='FullyReplicated',
content_type='image/jpeg', s3_data_type='S3Prefix')
validation_annotation = sagemaker.session.s3_input(s3_validation_annotation, distribution='FullyReplicated',
content_type='image/jpeg', s3_data_type='S3Prefix')
data_channels = {'train': train_data, 'validation': validation_data,
'train_annotation': train_annotation, 'validation_annotation':validation_annotation}
# -
# We have our `Estimator` object, we have set the hyperparameters for this object and we have our data channels linked with the algorithm. The only remaining thing to do is to train the algorithm. The following cell will train the algorithm. Training the algorithm involves a few steps. Firstly, the instances that we requested while creating the `Estimator` classes are provisioned and are setup with the appropriate libraries. Then, the data from our channels are downloaded into the instance. Once this is done, the training job begins. The provisioning and data downloading will take time, depending on the size of the data. Therefore it might be a few minutes before we start getting data logs for our training jobs. The data logs will also print out Mean Average Precision (mAP) on the validation data, among other losses, for every run of the dataset once or one epoch. This metric is a proxy for the quality of the algorithm.
#
# Once the job has finished a "Job complete" message will be printed. The trained model can be found in the S3 bucket that was setup as `output_path` in the estimator.
od_model.fit(inputs=data_channels, logs=True)
# ## Hosting
# Once the training is done, we can deploy the trained model as an Amazon SageMaker real-time hosted endpoint. This will allow us to make predictions (or inference) from the model. Note that we don't have to host on the same insantance (or type of instance) that we used to train. Training is a prolonged and compute heavy job that require a different of compute and memory requirements that hosting typically do not. We can choose any type of instance we want to host the model. In our case we chose the `ml.p3.2xlarge` instance to train, but we choose to host the model on the less expensive cpu instance, `ml.m4.xlarge`. The endpoint deployment can be accomplished as follows:
object_detector = od_model.deploy(initial_instance_count = 1,
instance_type = 'ml.m4.xlarge')
# ## Inference
# Now that the trained model is deployed at an endpoint that is up-and-running, we can use this endpoint for inference. To do this, let us download an image from [PEXELS](https://www.pexels.com/) which the algorithm has so-far not seen.
# +
# !wget -O test.jpg https://images.pexels.com/photos/980382/pexels-photo-980382.jpeg
file_name = 'test.jpg'
with open(file_name, 'rb') as image:
f = image.read()
b = bytearray(f)
ne = open('n.txt','wb')
ne.write(b)
# -
# Let us use our endpoint to try to detect objects within this image. Since the image is `jpeg`, we use the appropriate `content_type` to run the prediction job. The endpoint returns a JSON file that we can simply load and peek into.
# +
import json
object_detector.content_type = 'image/jpeg'
results = object_detector.predict(b)
detections = json.loads(results)
print (detections)
# -
# The results are in a format that is similar to the input .lst file (See [RecordIO Notebook](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/object_detection_pascalvoc_coco/object_detection_recordio_format.ipynb) for more details on the .lst file definition. )with an addition of a confidence score for each detected object. The format of the output can be represented as `[class_index, confidence_score, xmin, ymin, xmax, ymax]`. Typically, we don't consider low-confidence predictions.
#
# We have provided additional script to easily visualize the detection outputs. You can visulize the high-confidence preditions with bounding box by filtering out low-confidence detections using the script below:
def visualize_detection(img_file, dets, classes=[], thresh=0.6):
"""
visualize detections in one image
Parameters:
----------
img : numpy.array
image, in bgr format
dets : numpy.array
ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...])
each row is one object
classes : tuple or list of str
class names
thresh : float
score threshold
"""
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img=mpimg.imread(img_file)
plt.imshow(img)
height = img.shape[0]
width = img.shape[1]
colors = dict()
for det in dets:
(klass, score, x0, y0, x1, y1) = det
if score < thresh:
continue
cls_id = int(klass)
if cls_id not in colors:
colors[cls_id] = (random.random(), random.random(), random.random())
xmin = int(x0 * width)
ymin = int(y0 * height)
xmax = int(x1 * width)
ymax = int(y1 * height)
rect = plt.Rectangle((xmin, ymin), xmax - xmin,
ymax - ymin, fill=False,
edgecolor=colors[cls_id],
linewidth=3.5)
plt.gca().add_patch(rect)
class_name = str(cls_id)
if classes and len(classes) > cls_id:
class_name = classes[cls_id]
plt.gca().text(xmin, ymin - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor=colors[cls_id], alpha=0.5),
fontsize=12, color='white')
plt.show()
# For the sake of this notebook, we used a small portion of the COCO dataset for training and trained the model with only a few (30) epochs. This implies that the results might not be optimal. To achieve better detection results, you can try to use the more data from COCO dataset and train the model for more epochs. Tuning the hyperparameters, such as `mini_batch_size`, `learning_rate`, and `optimizer`, also helps to get a better detector.
# +
object_categories = ['person', 'bicycle', 'car', 'motorbike', 'aeroplane', 'bus', 'train', 'truck', 'boat',
'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
'hot dog', 'pizza', 'donut', 'cake', 'chair', 'sofa', 'pottedplant', 'bed', 'diningtable',
'toilet', 'tvmonitor', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven',
'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
'toothbrush']
# Setting a threshold 0.20 will only plot detection results that have a confidence score greater than 0.20.
threshold = 0.20
# Visualize the detections.
visualize_detection(file_name, detections['prediction'], object_categories, threshold)
# -
# ## Delete the Endpoint
# Having an endpoint running will incur some costs. Therefore as a clean-up job, we should delete the endpoint.
sagemaker.Session().delete_endpoint(object_detector.endpoint)
| introduction_to_amazon_algorithms/object_detection_pascalvoc_coco/object_detection_image_json_format.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
from sklearn.datasets import make_blobs, make_moons
from sklearn.preprocessing import minmax_scale
from project_helpers import shuffle_neighbors
# %matplotlib inline
# data = np.random.laplace(size=(200, 2))
# data, color = make_blobs(n_samples=1000, centers=2, random_state=12, cluster_std=2.0)
data, color = make_moons(n_samples=300, noise=0.15, random_state=12)
data = minmax_scale(data)
data = np.array([data[:,0], data[:,1], color]).T
fig = plt.figure()
scatter = plt.scatter(data[:, 0], data[:, 1], c=data[:,2])
# +
# https://matplotlib.org/examples/animation/moviewriter.html
IMWriter = manimation.writers.avail['imagemagick']
writer = IMWriter(fps=10, metadata={'title':'Moon Test'})
d = data
fig = plt.figure()
scatter = plt.scatter(d[:, 0], d[:, 1], c=d[:,2])
plt.title('Iteration 0')
margin = 0.2
plt.xlim(d[:,0].min() - margin, d[:,0].max() + margin)
plt.ylim(d[:,1].min() - margin, d[:,1].max() + margin)
with writer.saving(fig, "images/moon_scatter.gif", 100):
for i in range(1, 101):
d = shuffle_neighbors(d)
d = d.copy()
# d[:,0] += np.random.laplace(scale=0.02,size=d[:,0].shape)
# d[:,1] += np.random.laplace(scale=0.02,size=d[:,1].shape)
if (i % 1) == 0:
scatter.remove()
scatter = plt.scatter(d[:, 0], d[:, 1], c=d[:,2])
plt.title('Iteration {}'.format(i))
writer.grab_frame()
# -
| jupyter/NearestNeighborsNoiseAnimation.ipynb |