text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the data
Step2: Plotting the data
|
<ASSISTANT_TASK:>
Python Code:
from datetime import datetime, timedelta
import cartopy.crs as ccrs
import pandas as pd
from metpy.cbook import get_test_data
import metpy.plots as mpplots
data = pd.read_csv(get_test_data('SFC_obs.csv', as_file_obj=False),
infer_datetime_format=True, parse_dates=['valid'])
# Plotting the Observations using a 15 minute time window for surface observations
obs = mpplots.PlotObs()
obs.data = data
obs.time = datetime(1993, 3, 12, 13)
obs.time_window = timedelta(minutes=15)
obs.level = None
obs.fields = ['tmpf', 'dwpf', 'emsl', 'cloud_cover', 'wxsym']
obs.locations = ['NW', 'SW', 'NE', 'C', 'W']
obs.colors = ['red', 'green', 'black', 'black', 'blue']
obs.formats = [None, None, lambda v: format(10 * v, '.0f')[-3:], 'sky_cover',
'current_weather']
obs.vector_field = ('uwind', 'vwind')
obs.reduce_points = 1
# Add map features for the particular panel
panel = mpplots.MapPanel()
panel.layout = (1, 1, 1)
panel.area = 'ga'
panel.projection = ccrs.PlateCarree()
panel.layers = ['coastline', 'borders', 'states']
panel.plots = [obs]
# Collecting panels for complete figure
pc = mpplots.PanelContainer()
pc.size = (10, 10)
pc.panels = [panel]
# Showing the results
pc.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It's also possible to specify additional regressors. For this you need to additionally specify
Step2: So, the only things that we need to specify our model are the onset and the stimuli type, i.e. column 0 and column 5 or 7. Those we can get with the command
Step3: Before we can use the onsets, we first need to split them into the two conditions
Step4: The last thing we now need to to is to put this into a Bunch object and we're done
|
<ASSISTANT_TASK:>
Python Code:
from nipype.interfaces.base import Bunch
conditions = ['faces', 'houses', 'scrambled pix']
onsets = [[0, 30, 60, 90],
[10, 40, 70, 100],
[20, 50, 80, 110]]
durations = [[3], [3], [3]]
subject_info = Bunch(conditions=conditions,
onsets=onsets,
durations=durations)
!cat /data/ds102/sub-01/func/sub-01_task-flanker_run-1_events.tsv
import numpy as np
filename = '/data/ds102/sub-01/func/sub-01_task-flanker_run-1_events.tsv'
trailinfo = np.genfromtxt(filename, delimiter='\t', dtype=None, skip_header=1)
trailinfo = [[t[0], t[7]] for t in trailinfo]
trailinfo
onset1 = []
onset2 = []
for t in trailinfo:
if 'incongruent' in t[1]:
onset2.append(t[0])
else:
onset1.append(t[0])
print onset1
print onset2
from nipype.interfaces.base import Bunch
conditions = ['congruent', 'incongruent']
onsets = [onset1, onset2]
durations = [[2], [2]]
subject_info = Bunch(conditions=conditions,
onsets=onsets,
durations=durations)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: List Comprehensions
Step2: You can do the same thing with
Step3: You can include if statements
Step4: You can do the same thing with
Step5: Now to observations
Step6: Observing Night
Step7: But, you get to observe in Hawaii
Step8: Plot the objects
Step9: Observing Constraints
Step10: Let us add another constraint
Step11: Additional Constraints
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from astropy.table import QTable
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation
import pytz
from astroplan import Observer, FixedTarget
import warnings
warnings.filterwarnings('ignore', category=Warning)
squares = [] # create a blank list
for x in range(10): # foor loop 0 -> 9
squares.append(x**2) # calculate x**2 for each x, add to end of list
squares
squares = [x**2 for x in range(10)]
squares
even_squares = []
for x in range(10):
if (x % 2 == 0):
even_squares.append(x**2)
even_squares
even_squares = [x**2 for x in range(10) if (x % 2 == 0)]
even_squares
target_table = QTable.read('ObjectList.csv', format='ascii.csv')
target_table
targets = [FixedTarget(coord=SkyCoord(ra = RA*u.hourangle, dec = DEC*u.deg), name=Name)
for Name, RA, DEC in target_table]
targets
observe_date = Time("2018-01-01", format='iso')
my_timezone = pytz.timezone('US/Hawaii')
my_location = Observer.at_site('gemini_north')
observe_start = my_location.sun_set_time(observe_date, which='nearest')
observe_end = my_location.sun_rise_time(observe_date, which='next')
print("Observing starts at {0.iso} UTC".format(observe_start))
print("Observing ends at {0.iso} UTC".format(observe_end))
print("Observing starts at {0} local".format(observe_start.to_datetime(my_timezone)))
print("Observing ends at {0} local".format(observe_end.to_datetime(my_timezone)))
# A complete list of built-in observatories can be found by:
#EarthLocation.get_site_names()
observing_length = (observe_end - observe_start).to(u.h)
print("You can observe for {0:.1f} tonight".format(observing_length))
observing_range = [observe_start, observe_end]
%matplotlib inline
import matplotlib.pyplot as plt
from astroplan import time_grid_from_range
from astroplan.plots import plot_sky, plot_airmass
time_grid = time_grid_from_range(observing_range)
fig,ax = plt.subplots(1,1)
fig.set_size_inches(10,10)
fig.tight_layout()
for my_object in targets:
ax = plot_sky(my_object, my_location, time_grid)
ax.legend(loc=0,shadow=True);
fig,ax = plt.subplots(1,1)
fig.set_size_inches(10,5)
fig.tight_layout()
for my_object in targets:
ax = plot_airmass(my_object, my_location, time_grid)
ax.legend(loc=0,shadow=True);
from astroplan import AltitudeConstraint, AirmassConstraint
from astroplan import observability_table
constraints = [AltitudeConstraint(20*u.deg, 80*u.deg)]
observing_table = observability_table(constraints, my_location, targets, time_range=observing_range)
print(observing_table)
constraints.append(AirmassConstraint(2))
observing_table = observability_table(constraints, my_location, targets, time_range=observing_range)
print(observing_table)
from astroplan import moon_illumination
moon_illumination(observe_start)
from astroplan import MoonSeparationConstraint
constraints.append(MoonSeparationConstraint(45*u.deg))
observing_table = observability_table(constraints, my_location, targets, time_range=observing_range)
print(observing_table)
fig,ax = plt.subplots(1,1)
fig.set_size_inches(10,5)
fig.tight_layout()
for i, my_object in enumerate(targets):
if observing_table['ever observable'][i]:
ax = plot_airmass(my_object, my_location, time_grid)
ax.legend(loc=0,shadow=True);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Aggregation Operators
Step2: For $match we use the same syntax that we use for read operations
Step3: group operators
|
<ASSISTANT_TASK:>
Python Code:
import pprint
def get_client():
from pymongo import MongoClient
return MongoClient('mongodb://localhost:27017/')
def get_collection():
return get_client().examples.twitter
collection = get_collection()
def aggregate_and_show(collection, query, limit = True):
_query = query[:]
if limit:
_query.append({"$limit": 5})
result = collection.aggregate(_query)
pprint.pprint(list(r for r in result))
query = [
{"$group": {"_id": "$user.screen_name",
"count": {"$sum": 1}}},
{"$sort": {"count": -1}}
]
aggregate_and_show(collection, query)
query = [
{"$match": {"user.friends_count": {"$gt": 0},
"user.followers_count": {"$gt": 0}}},
{"$project": {"ratio": {"$divide": ["$user.followers_count",
"$user.friends_count"]},
"screen_name": "$user.screen_name"}},
{"$sort": {"ratio": -1}}
]
aggregate_and_show(collection, query)
query = [
{"$unwind": "$entities.user_mentions"},
{"$group": {"_id": "$user.screen_name",
"count": {"$sum": 1}}},
{"$sort": {"count": -1}}
]
aggregate_and_show(collection, query)
#get unique hashtags by user
query = [
{"$unwind": "$entities.hashtags"},
{"$group": {"_id": "$user.screen_name",
"unique_hashtags": {
"$addToSet": "$entities.hashtags.text"
}}},
{"$sort": {"_id": -1}}
]
aggregate_and_show(collection, query)
# find number of unique user mentions
query = [
{"$unwind": "$entities.user_mentions"},
{"$group": {
"_id": "$user.screen_name",
"mset": {
"$addToSet": "$entities.user_mentions.screen_name"
}
}},
{"$unwind": "$mset"},
{"$group": {"_id": "$_id", "count": {"$sum": 1}}},
{"$sort": {"count": -1}}
]
aggregate_and_show(collection, query)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.1) Dealing with Missing Values – Imputation
Step2: A couple of imputation methods were tried in the original Notebook
Step3: 2.2) Dealing with Confidence Labels
Step4: The original Notebook tried a couple of methods of incorporating the confidence labels into the model
Step5: 2.3) Dealing with Class Imbalance
Step6: 3. Pre-Processing
Step7: 3.2) Principal Component Analysis (PCA)
Step8: The cell below will plot how much more of the variance in the data set is explained as the number of principal components included is increased.
Step9: The graph above suggests that the maximum number of principal components should not exceed 300, as less and less variance is explained as the number of principal components included increases beyond 300. For the optimisation, the optimal number of principal components was initially assumed to be 230.
Step10: 4. Model Selection
Step11: The cell below will plot two heat-maps side by side
Step12: The cells below will plot a Validation Curves for Gamma.
Step13: The cells below will plot the Learning Curve.
Step14: Finding Best Number of Principal Components
Step15: Making Predictions
Step16: The following cell will apply the same pre-processing applied to the training data to the test data.
Step17: The following cells will produce predictions on the test data using the final model.
|
<ASSISTANT_TASK:>
Python Code:
#Import Relevant Modules and Packages
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import PCA
from scipy import stats
from sklearn.feature_selection import VarianceThreshold
#see all rows of dataframe
#pd.set_option('display.max_rows', 500)
#Load the complete training data set
training_data = pd.read_csv("/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Training Data Set.csv", header=0, index_col=0)
#Observe the original training data
training_data.head()
#quantify class counts of original training data
training_data.prediction.value_counts()
#Load additional training data
add_training_data = pd.read_csv("/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Additional Training Data Set .csv", header=0, index_col=0)
#observe additional training data
add_training_data
#quantify class counts of additional training data
add_training_data.prediction.value_counts()
#find number of NAs for each column for additional training data
add_training_data.isnull().sum()
#concatenate original training data with additional training data
full_training_data_inc = pd.concat([training_data, add_training_data])
#observe concatenated training data
full_training_data_inc
#imputation via KNN
from fancyimpute import KNN
knn_trial = full_training_data_inc
knn_trial
complete_knn = KNN(k=3).complete(knn_trial)
#convert imputed matrix back to dataframe for visualisation and convert 'prediction' dtype to int
complete_knn_df = pd.DataFrame(complete_knn, index=full_training_data_inc.index, columns=full_training_data_inc.columns)
full_training_data = complete_knn_df
full_training_data.prediction = full_training_data.prediction.astype('int')
full_training_data
#quantify class counts for full training data
full_training_data.prediction.value_counts()
#Load confidence annotations
confidence_labels = pd.read_csv("/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Annotation Confidence .csv", header=0, index_col=0)
#quantify confidence labels (how many are 1, how many are 0.66)
print(confidence_labels.confidence.value_counts())
#observe confidence annotations
confidence_labels
#adding confidence of label column to imputed full training data set
full_train_wcl = pd.merge(full_training_data, confidence_labels, left_index=True, right_index=True)
full_train_wcl
#only keep data instance with confidence label = 1
conf_full_train = full_train_wcl.loc[full_train_wcl['confidence'] == 1]
conf_full_train
#quantify class counts
conf_full_train.prediction.value_counts()
#convert full training data dataframe with confidence instances only to matrix
conf_ft_matrix = conf_full_train.as_matrix(columns=None)
conf_ft_matrix
conf_ft_matrix.shape
#splitting full training data with confidence into inputs and outputs
conf_ft_inputs = conf_ft_matrix[:,0:4608]
print(conf_ft_inputs.shape)
conf_ft_outputs = conf_ft_matrix[:,4608]
print(conf_ft_outputs.shape)
from imblearn.over_sampling import SMOTE
from collections import Counter
#fit over-sampling to training data inputs and putputs
over_sampler = SMOTE(ratio='auto', k_neighbors=5, kind='regular', random_state=0)
over_sampler.fit(conf_ft_inputs, conf_ft_outputs)
#create new inputs and outputs with correct class proportions
resampled_x, resampled_y = over_sampler.fit_sample(conf_ft_inputs, conf_ft_outputs)
#quantify original class proportions prior to over-sampling
Counter(conf_ft_outputs)
#quantify class proportions after over-sampling
Counter(resampled_y)
#assign newly sampled input and outputs to old variable name used for inputs and outputs before
#over-sampling
conf_ft_inputs = resampled_x
conf_ft_outputs = resampled_y
print(Counter(conf_ft_outputs))
#standardise the full training data with confidence labels 1 only
scaler_2 = preprocessing.StandardScaler().fit(conf_ft_inputs)
std_conf_ft_in = scaler_2.transform(conf_ft_inputs)
std_conf_ft_in
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
#preprocessing: PCA (feature construction). High number of pcs chosen to plot a graph
#showing how much more variance is explained as pc number increases
pca_2 = PCA(n_components=700, random_state=0)
std_conf_ft_in_pca = pca_2.fit_transform(std_conf_ft_in)
#quantify amount of variance explained by principal components
print("Total Variance Explained by PCs (%): ", np.sum(pca_2.explained_variance_ratio_))
#calculate a list of cumulative sums for amount of variance explained
cumulative_variance = np.cumsum(pca_2.explained_variance_ratio_)
len(cumulative_variance)
#add 0 to the beginning of the list, otherwise list starts with variance explained by 1 pc
cumulative_variance = np.insert(cumulative_variance, 0, 0)
#define range of pcs
pcs_4_var_exp = np.arange(0,701,1)
len(pcs_4_var_exp)
fig_1 = plt.figure(figsize=(7,4))
plt.title('Number of PCs and Change In Variance Explained')
plt.xlabel('Number of PCs')
plt.ylabel('Variance Explained (%)')
plt.plot(pcs_4_var_exp, cumulative_variance, 'x-', color="r")
plt.show()
#preprocessing: PCA (feature construction)
pca_2 = PCA(n_components=230, random_state=0)
std_conf_ft_in_pca = pca_2.fit_transform(std_conf_ft_in)
#quantify ratio of variance explain by principal components
print("Total Variance Explained by PCs (%): ", np.sum(pca_2.explained_variance_ratio_))
#this cell takes around 7 minutes to run
#parameter optimisation with Exhaustive Grid Search, with class weight
original_c_range = np.arange(0.85, 1.01, 0.01)
gamma_range = np.arange(0.00001, 0.00023, 0.00002)
#define parameter ranges to test
param_grid = [{'C': original_c_range, 'gamma': gamma_range, 'kernel': ['rbf'],
'class_weight':[{0:1.33, 1:1}]}]
#define model to do parameter search on
svr = SVC()
clf = GridSearchCV(svr, param_grid, scoring='accuracy', cv=5,)
clf.fit(std_conf_ft_in_pca, conf_ft_outputs)
#create dictionary of results
results_dict = clf.cv_results_
#convert the results into a dataframe
df_results = pd.DataFrame.from_dict(results_dict)
df_results
#Draw heatmap of the validation accuracy as a function of gamma and C
fig = plt.figure(figsize=(10, 10))
ix=fig.add_subplot(1,2,1)
val_scores = clf.cv_results_['mean_test_score'].reshape(len(original_c_range),len(gamma_range))
val_scores
ax = sns.heatmap(val_scores, linewidths=0.5, square=True, cmap='PuBuGn',
xticklabels=gamma_range, yticklabels=original_c_range, cbar_kws={'shrink':0.5})
ax.invert_yaxis()
plt.yticks(rotation=0, fontsize=10)
plt.xticks(rotation= 70,fontsize=10)
plt.xlabel('Gamma', fontsize=15)
plt.ylabel('C', fontsize=15)
plt.title('Validation Accuracy', fontsize=15)
#Draw heatmap of the validation accuracy as a function of gamma and C
ix=fig.add_subplot(1,2,2)
train_scores = clf.cv_results_['mean_train_score'].reshape(len(original_c_range),len(gamma_range))
train_scores
#plt.figure(figsize=(6, 6))
ax_1 = sns.heatmap(train_scores, linewidths=0.5, square=True, cmap='PuBuGn',
xticklabels=gamma_range, yticklabels=original_c_range, cbar_kws={'shrink':0.5})
ax_1.invert_yaxis()
plt.yticks(rotation=0, fontsize=10)
plt.xticks(rotation= 70,fontsize=10)
plt.xlabel('Gamma', fontsize=15)
plt.ylabel('C', fontsize=15)
plt.title('Training Accuracy', fontsize=15)
plt.show()
#import module/library
from sklearn.model_selection import validation_curve
import matplotlib.pyplot as plt
%matplotlib inline
#specifying gamma parameter range to plot for validation curve
param_range = gamma_range
param_range
#calculating train and validation scores
train_scores, valid_scores = validation_curve(SVC(C=0.92, kernel='rbf', class_weight={0:1.33, 1:1}), std_conf_ft_in_pca, conf_ft_outputs, param_name='gamma',param_range=param_range,scoring='accuracy')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
valid_scores_mean = np.mean(valid_scores, axis=1)
valid_scores_std = np.std(valid_scores, axis=1)
#plotting validation curve
plt.title('Gamma Validation Curve for SVM With RBF Kernel | C=0.92')
plt.xlabel('Gamma')
plt.ylabel('Score')
plt.xticks(rotation=70)
plt.ylim(0.8,1.0)
plt.xlim(0.0001,0.00021)
plt.xticks(param_range)
lw=2
plt.plot(param_range, train_scores_mean, 'o-',label="Training Score", color='darkorange', lw=lw)
plt.fill_between(param_range, train_scores_mean-train_scores_std, train_scores_mean+train_scores_std, alpha=0.2, color='darkorange', lw=lw)
plt.plot(param_range, valid_scores_mean, 'o-',label="Testing Score", color='navy', lw=lw)
plt.fill_between(param_range, valid_scores_mean-valid_scores_std, valid_scores_mean+valid_scores_std, alpha=0.2, color='navy', lw=lw)
plt.legend(loc='best')
plt.show()
#import module/library
from sklearn.model_selection import learning_curve
#define training data size increments
td_size = np.arange(0.1, 1.1, 0.1)
#calculating train and validation scores
train_sizes, train_scores, valid_scores = learning_curve(SVC(C=0.92, kernel='rbf', gamma=0.00011, class_weight={0:1.33, 1:1}), std_conf_ft_in_pca, conf_ft_outputs, train_sizes=td_size ,scoring='accuracy')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
valid_scores_mean = np.mean(valid_scores, axis=1)
valid_scores_std = np.std(valid_scores, axis=1)
#plotting learning curve
fig = plt.figure(figsize=(5,5))
plt.title('Learning Curve with SVM with RBF Kernel| C=0.92 & Gamma = 0.00011', fontsize=9)
plt.xlabel('Train Data Size')
plt.ylabel('Score')
plt.ylim(0.8,1)
lw=2
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training Score")
plt.fill_between(train_sizes, train_scores_mean-train_scores_std, train_scores_mean+train_scores_std, alpha=0.2, color='red', lw=lw)
plt.plot(train_sizes, valid_scores_mean, 'o-', color="g",label="Testing Score")
plt.fill_between(train_sizes, valid_scores_mean-valid_scores_std, valid_scores_mean+valid_scores_std, alpha=0.2, color='green', lw=lw)
plt.legend(loc='best')
plt.show()
#this cell may take several minutes to run
#plot how the number of PC's changes the test accuracy
no_pcs = np.arange(20, 310, 10)
compute_average_of_5 = []
for t in range(0,5):
pcs_accuracy_change = []
for i in no_pcs:
dummy_inputs = std_conf_ft_in
dummy_outputs = conf_ft_outputs
pca_dummy = PCA(n_components=i,)
pca_dummy.fit(dummy_inputs)
dummy_inputs_pca = pca_dummy.transform(dummy_inputs)
dummy_model = SVC(C=0.92, kernel='rbf', gamma=0.00011, class_weight={0:1.33, 1:1})
dummy_model.fit(dummy_inputs_pca, dummy_outputs,)
dummy_scores = cross_val_score(dummy_model, dummy_inputs_pca, dummy_outputs, cv=3, scoring='accuracy')
mean_cv = dummy_scores.mean()
pcs_accuracy_change.append(mean_cv)
print (len(pcs_accuracy_change))
compute_average_of_5.append(pcs_accuracy_change)
#calculate position specific average for the five trials
from __future__ import division
average_acc_4_pcs = [sum(e)/len(e) for e in zip(*compute_average_of_5)]
plt.title('Number of PCs and Change In Accuracy')
plt.xlabel('Number of PCs')
plt.ylabel('Accuracy (%)')
plt.plot(no_pcs, average_acc_4_pcs, 'o-', color="r")
plt.show()
#Load the complete training data set
test_data = pd.read_csv("/Users/Max/Desktop/Max's Folder/Uni Work/Data Science MSc/Machine Learning/ML Kaggle Competition /Data Sets/Testing Data Set.csv", header=0, index_col=0)
##Observe the test data
test_data
#turn test dataframe into matrix
test_data_matrix = test_data.as_matrix(columns=None)
test_data_matrix.shape
#pre-process test data in same way as train data
scaled_test = scaler_2.transform(test_data_matrix)
transformed_test = pca_2.transform(scaled_test)
transformed_test.shape
#define and fit final model with best parameters from grid search
final_model = SVC(C=0.92, cache_size=1000, kernel='rbf', gamma=0.00011, class_weight={0:1.33, 1:1})
final_model.fit(std_conf_ft_in_pca, conf_ft_outputs)
#make test data predictions
predictions = final_model.predict(transformed_test)
#create dictionary for outputs matched with ID
to_export = {'ID': np.arange(1, 4201, 1), 'prediction': predictions}
to_export
#convert to dataframe
final_predictions = pd.DataFrame.from_dict(to_export)
final_predictions
#convert prediction column float type entries to integers
final_predictions = final_predictions.astype('int')
final_predictions
#check properties of predictions: class balance should be 42.86(1):57.14(0)
#i.e. should predict 2400 Class 0 instances, and 1800 Class 1 instances
final_predictions.prediction.value_counts()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We need to create metabolites as well. If we were using an existing model, we could use get_by_id to get the apporpriate Metabolite objects instead.
Step2: Adding metabolites to a reaction requires using a dictionary of the metabolites and their stoichiometric coefficients. A group of metabolites can be added all at once, or they can be added one at a time.
Step3: The gene_reaction_rule is a boolean representation of the gene requirements for this reaction to be active as described in Schellenberger et al 2011 Nature Protocols 6(9)
Step4: At this point in time, the model is still empty
Step5: We will add the reaction to the model, which will also add all associated metabolites and genes
Step6: We can iterate through the model objects to observe the contents
|
<ASSISTANT_TASK:>
Python Code:
from cobra import Model, Reaction, Metabolite
# Best practise: SBML compliant IDs
cobra_model = Model('example_cobra_model')
reaction = Reaction('3OAS140')
reaction.name = '3 oxoacyl acyl carrier protein synthase n C140 '
reaction.subsystem = 'Cell Envelope Biosynthesis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
reaction.objective_coefficient = 0. # this is the default
ACP_c = Metabolite('ACP_c',
formula='C11H21N2O7PRS',
name='acyl-carrier-protein',
compartment='c')
omrsACP_c = Metabolite('3omrsACP_c',
formula='C25H45N2O9PRS',
name='3-Oxotetradecanoyl-acyl-carrier-protein',
compartment='c')
co2_c = Metabolite('co2_c',
formula='CO2',
name='CO2',
compartment='c')
malACP_c = Metabolite('malACP_c',
formula='C14H22N2O10PRS',
name='Malonyl-acyl-carrier-protein',
compartment='c')
h_c = Metabolite('h_c',
formula='H',
name='H',
compartment='c')
ddcaACP_c = Metabolite('ddcaACP_c',
formula='C23H43N2O8PRS',
name='Dodecanoyl-ACP-n-C120ACP',
compartment='c')
reaction.add_metabolites({malACP_c: -1.0,
h_c: -1.0,
ddcaACP_c: -1.0,
co2_c: 1.0,
ACP_c: 1.0,
omrsACP_c: 1.0})
reaction.reaction # This gives a string representation of the reaction
reaction.gene_reaction_rule = '( STM2378 or STM1197 )'
reaction.genes
print('%i reactions in initial model' % len(cobra_model.reactions))
print('%i metabolites in initial model' % len(cobra_model.metabolites))
print('%i genes in initial model' % len(cobra_model.genes))
cobra_model.add_reaction(reaction)
# Now there are things in the model
print('%i reaction in model' % len(cobra_model.reactions))
print('%i metabolites in model' % len(cobra_model.metabolites))
print('%i genes in model' % len(cobra_model.genes))
# Iterate through the the objects in the model
print("Reactions")
print("---------")
for x in cobra_model.reactions:
print("%s : %s" % (x.id, x.reaction))
print("Metabolites")
print("-----------")
for x in cobra_model.metabolites:
print('%s : %s' % (x.id, x.formula))
print("Genes")
print("-----")
for x in cobra_model.genes:
reactions_list_str = "{" + ", ".join((i.id for i in x.reactions)) + "}"
print("%s is associated with reactions: %s" % (x.id, reactions_list_str))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 2 Convert the reserved stock units (RSUs) an employee has in a company to the current cash value.
Step2: Let's take a look at some values and see how we can produce certain outputs.
Step3: Generators
Step4: Let's try Creating
|
<ASSISTANT_TASK:>
Python Code:
# Store even numbers from 0 to 20
even_lst = [num for num in range(21) if num % 2 == 0]
print(even_lst)
cash_value = 20
rsu_dict = {"Max":20, "Willie":13, "Joanna":14}
lst = [rsu_dict[name]*cash_value for name in rsu_dict]
print(lst)
my_dict = {"Ross":19, "Bernie":13, "Micah":15}
cash_value = 20
# [19*20, 13*20, 15*20]
cash_lst = [my_dict[key]*20 for key in my_dict]
print(cash_lst)
rows = 'ABC'
cols = '123'
vowels = ('a', 'e', 'i', 'o', 'u')
sentence = 'cogito ergo sum'
words = sentence.split()
# Produce [A3, B2, C1]
number_letter_lst = [rows[element]+cols[2-element] for element in range(3)]
print(number_letter_lst)
# Produce [A1, B1, C1, A2, B2, C2, A3, B3, C3]
letter_number_lst = [r+c for c in cols for r in rows]
print(letter_number_lst)
x = [s1 + ' x ' + s2
for s1 in (rows[i]+cols[i] for i in range(3))
for s2 in (rows[2-j]+cols[j] for j in range(3))]
print(x)
# Simply accessing rows and cols in a comprehensions [A1, A2, A3, B1, B2, B3, C1, C2, C3]
# Non-Pythonic
lst = []
for r in rows:
for c in cols:
lst.append(r+c)
# Pythonic
lst = [r+c for r in rows for c in cols]
print(lst)
# let's figure this list out with normal syntax
lst = []
for r in (rows[i]+cols[i] for i in range(3)):
for c in (rows[2-i]+cols[i] for i in range(3)):
lst.append(r + 'x' + c)
print(lst)
# shortened
crossed_list = [x + " x " + y for x in (rows[i]+cols[i] for i in range(3)) for y in (rows[2-i]+cols[i] for i in range(3))]
print(crossed_list)
x = sorted(words, key=lambda x: len(x))
print(x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Common plots
Step2: Example
|
<ASSISTANT_TASK:>
Python Code:
import librosa.display
import mir_eval.display
librosa.display.specshow(S, x_axis='time', y_axis='mel')
mir_eval.display.events(ref_beats, color='w', alpha=0.8, linewidth=3)
mir_eval.display.events(est_beats, color='c', alpha=0.8, linewidth=3, linestyle='--')
y_harm, y_perc = librosa.effects.hpss(y, margin=8)
plt.figure(figsize=(12, 4))
mir_eval.display.separation([y_perc, y_harm], sr, labels=['percussive', 'harmonic'])
plt.legend()
Audio(data=numpy.vstack([
mir_eval.sonify.chords()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Corpus acquisition.
Step2: You can try with any other categories. Take into account that the behavior of topic modelling algorithms may depend on the amount of documents available for the analysis. Select a category with at least 100 articles. You can browse the wikipedia category tree here, https
Step3: Now, we have stored the whole text collection in two lists
Step4: 2. Corpus Processing
Step5: 2.2. Stemming vs Lemmatization
Step6: Alternatively, we can apply lemmatization. For english texts, we can use the lemmatizer from NLTK, which is based on WordNet. If you have not used wordnet before, you will likely need to download it from nltk
Step7: Task
Step8: One of the advantages of the lemmatizer method is that the result of lemmmatization is still a true word, which is more advisable for the presentation of text processing results and lemmatization.
Step9: In the second step, let us create a numerical version of our corpus using the doc2bow method. In general, D.doc2bow(token_list) transform any list of tokens into a list of tuples (token_id, n), one per each token in token_list, where token_id is the token identifier (according to dictionary D) and n is the number of occurrences of such token in token_list.
Step10: At this point, it is good to make sure to understand what has happened. In corpus_clean we had a list of token lists. With it, we have constructed a Dictionary, D, which assign an integer identifier to each token in the corpus.
Step11: Note that we can interpret each element of corpus_bow as a sparse_vector. For example, a list of tuples
Step12: and a bow representation of a corpus with
Step13: Before starting with the semantic analyisis, it is interesting to observe the token distribution for the given corpus.
Step14: ids_sorted is a list of all token ids, sorted by decreasing number of occurrences in the whole corpus. For instance, the most frequent term is
Step15: which appears
Step16: In the following we plot the most frequent terms in the corpus.
Step17: Exercise
Step18: Exercise
Step19: 3. Semantic Analysis
Step20: From now on, tfidf can be used to convert any vector from the old representation (bow integer counts) to the new one (TfIdf real-valued weights)
Step21: Or to apply a transformation to a whole corpus
Step22: 3.1. Latent Semantic Indexing (LSI)
Step23: From LSI, we can check both the topic-tokens matrix and the document-topics matrix.
Step24: However, a more useful representation of topics is as a list of tuples (token, value). This is provided by the show_topic method.
Step25: LSI approximates any document as a linear combination of the topic vectors. We can compute the topic weights for any input corpus entered as input to the lsi model.
Step26: Task
Step27: 3.2. Latent Dirichlet Allocation (LDA)
Step28: 3.2.2. LDA using python lda library
Step29: Document-topic distribution
Step30: It allows incremental updates
Step31: Now, we need a dataset to feed the Count_Vectorizer object, by joining all tokens in corpus_clean in a single string, using a space ' ' as separator.
Step32: Now we are ready to compute the token counts.
Step33: Now we can apply the LDA algorithm.
Step34: Task
Step35: Exercise
|
<ASSISTANT_TASK:>
Python Code:
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# import pylab
# Required imports
from wikitools import wiki
from wikitools import category
import nltk
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import gensim
import lda
import lda.datasets
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from test_helper import Test
site = wiki.Wiki("https://en.wikipedia.org/w/api.php")
# Select a category with a reasonable number of articles (>100)
# cat = "Economics"
cat = "Pseudoscience"
print cat
# Loading category data. This may take a while
print "Loading category data. This may take a while..."
cat_data = category.Category(site, cat)
corpus_titles = []
corpus_text = []
for n, page in enumerate(cat_data.getAllMembersGen()):
print "\r Loading article {0}".format(n + 1),
corpus_titles.append(page.title)
corpus_text.append(page.getWikiText())
n_art = len(corpus_titles)
print "\nLoaded " + str(n_art) + " articles from category " + cat
# n = 5
# print corpus_titles[n]
# print corpus_text[n]
# You can comment this if the package is already available.
nltk.download("punkt")
nltk.download("stopwords")
stopwords_en = stopwords.words('english')
corpus_clean = []
for n, art in enumerate(corpus_text):
print "\rProcessing article {0} out of {1}".format(n + 1, n_art),
# This is to make sure that all characters have the appropriate encoding.
art = art.decode('utf-8')
# Tokenize each text entry.
# scode: tokens = <FILL IN>
token_list = word_tokenize(art)
# Convert all tokens in token_list to lowercase, remove non alfanumeric tokens and stem.
# Store the result in a new token list, clean_tokens.
# scode: filtered_tokens = <FILL IN>
filtered_tokens = [token.lower() for token in token_list if token.isalnum()]
# Remove all tokens in the stopwords list and append the result to corpus_clean
# scode: clean_tokens = <FILL IN>
clean_tokens = [token for token in filtered_tokens if token not in stopwords_en]
# scode: <FILL IN>
corpus_clean.append(clean_tokens)
print "\nLet's check the first tokens from document 0 after processing:"
print corpus_clean[0][0:30]
Test.assertTrue(len(corpus_clean) == n_art, 'List corpus_clean does not contain the expected number of articles')
Test.assertTrue(len([c for c in corpus_clean[0] if c in stopwords_en])==0, 'Stopwords have not been removed')
# Select stemmer.
stemmer = nltk.stem.SnowballStemmer('english')
corpus_stemmed = []
for n, token_list in enumerate(corpus_clean):
print "\rStemming article {0} out of {1}".format(n + 1, n_art),
# Convert all tokens in token_list to lowercase, remove non alfanumeric tokens and stem.
# Store the result in a new token list, clean_tokens.
# scode: stemmed_tokens = <FILL IN>
stemmed_tokens = [stemmer.stem(token) for token in token_list]
# Add art to the stemmed corpus
# scode: <FILL IN>
corpus_stemmed.append(stemmed_tokens)
print "\nLet's check the first tokens from document 0 after stemming:"
print corpus_stemmed[0][0:30]
Test.assertTrue((len([c for c in corpus_stemmed[0] if c!=stemmer.stem(c)]) < 0.1*len(corpus_stemmed[0])),
'It seems that stemming has not been applied properly')
# You can comment this if the package is already available.
nltk.download("wordnet")
wnl = WordNetLemmatizer()
# Select stemmer.
corpus_lemmat = []
for n, token_list in enumerate(corpus_clean):
print "\rLemmatizing article {0} out of {1}".format(n + 1, n_art),
# scode: lemmat_tokens = <FILL IN>
lemmat_tokens = [wnl.lemmatize(token) for token in token_list]
# Add art to the stemmed corpus
# scode: <FILL IN>
corpus_lemmat.append(lemmat_tokens)
print "\nLet's check the first tokens from document 0 after stemming:"
print corpus_lemmat[0][0:30]
# Create dictionary of tokens
D = gensim.corpora.Dictionary(corpus_clean)
n_tokens = len(D)
print "The dictionary contains {0} tokens".format(n_tokens)
print "First tokens in the dictionary: "
for n in range(10):
print str(n) + ": " + D[n]
# Transform token lists into sparse vectors on the D-space
# scode: corpus_bow = <FILL IN>
corpus_bow = [D.doc2bow(doc) for doc in corpus_clean]
Test.assertTrue(len(corpus_bow)==n_art, 'corpus_bow has not the appropriate size')
print "Original article (after cleaning): "
print corpus_clean[0][0:30]
print "Sparse vector representation (first 30 components):"
print corpus_bow[0][0:30]
print "The first component, {0} from document 0, states that token 0 ({1}) appears {2} times".format(
corpus_bow[0][0], D[0], corpus_bow[0][0][1])
print "{0} tokens".format(len(D))
print "{0} Wikipedia articles".format(len(corpus_bow))
# SORTED TOKEN FREQUENCIES (I):
# Create a "flat" corpus with all tuples in a single list
corpus_bow_flat = [item for sublist in corpus_bow for item in sublist]
# Initialize a numpy array that we will use to count tokens.
# token_count[n] should store the number of ocurrences of the n-th token, D[n]
token_count = np.zeros(n_tokens)
# Count the number of occurrences of each token.
for x in corpus_bow_flat:
# Update the proper element in token_count
# scode: <FILL IN>
token_count[x[0]] += x[1]
# Sort by decreasing number of occurences
ids_sorted = np.argsort(- token_count)
tf_sorted = token_count[ids_sorted]
print D[ids_sorted[0]]
print "{0} times in the whole corpus".format(tf_sorted[0])
# SORTED TOKEN FREQUENCIES (II):
plt.rcdefaults()
# Example data
n_bins = 25
hot_tokens = [D[i] for i in ids_sorted[n_bins-1::-1]]
y_pos = np.arange(len(hot_tokens))
z = tf_sorted[n_bins-1::-1]/n_art
plt.barh(y_pos, z, align='center', alpha=0.4)
plt.yticks(y_pos, hot_tokens)
plt.xlabel('Average number of occurrences per article')
plt.title('Token distribution')
plt.show()
display()
# SORTED TOKEN FREQUENCIES:
# Example data
plt.semilogy(tf_sorted)
plt.xlabel('Average number of occurrences per article')
plt.title('Token distribution')
plt.show()
display()
# scode: <WRITE YOUR CODE HERE>
# Example data
cold_tokens = [D[i] for i in ids_sorted if tf_sorted[i]==1]
print "There are {0} cold tokens, which represent {1}% of the total number of tokens in the dictionary".format(
len(cold_tokens), float(len(cold_tokens))/n_tokens*100)
# scode: <WRITE YOUR CODE HERE>
# SORTED TOKEN FREQUENCIES (I):
# Count the number of occurrences of each token.
token_count2 = np.zeros(n_tokens)
for x in corpus_bow_flat:
token_count2[x[0]] += (x[1]>0)
# Sort by decreasing number of occurences
ids_sorted2 = np.argsort(- token_count2)
tf_sorted2 = token_count2[ids_sorted2]
# SORTED TOKEN FREQUENCIES (II):
# Example data
n_bins = 25
hot_tokens2 = [D[i] for i in ids_sorted2[n_bins-1::-1]]
y_pos2 = np.arange(len(hot_tokens2))
z2 = tf_sorted2[n_bins-1::-1]/n_art
plt.figure()
plt.barh(y_pos2, z2, align='center', alpha=0.4)
plt.yticks(y_pos2, hot_tokens2)
plt.xlabel('Average number of occurrences per article')
plt.title('Token distribution')
plt.show()
display()
tfidf = gensim.models.TfidfModel(corpus_bow)
doc_bow = [(0, 1), (1, 1)]
tfidf[doc_bow]
corpus_tfidf = tfidf[corpus_bow]
print corpus_tfidf[0][0:5]
# Initialize an LSI transformation
n_topics = 5
# scode: lsi = <FILL IN>
lsi = gensim.models.LsiModel(corpus_tfidf, id2word=D, num_topics=n_topics)
lsi.show_topics(num_topics=-1, num_words=10, log=False, formatted=True)
# SORTED TOKEN FREQUENCIES (II):
plt.rcdefaults()
n_bins = 25
# Example data
y_pos = range(n_bins-1, -1, -1)
# pylab.rcParams['figure.figsize'] = 16, 8 # Set figure size
plt.figure(figsize=(16, 8))
for i in range(n_topics):
### Plot top 25 tokens for topic i
# Read i-thtopic
# scode: <FILL IN>
topic_i = lsi.show_topic(i, topn=n_bins)
tokens = [t[0] for t in topic_i]
weights = [t[1] for t in topic_i]
# Plot
# scode: <FILL IN>
plt.subplot(1, n_topics, i+1)
plt.barh(y_pos, weights, align='center', alpha=0.4)
plt.yticks(y_pos, tokens)
plt.xlabel('Top {0} topic weights'.format(n_bins))
plt.title('Topic {0}'.format(i))
plt.show()
display()
# On real corpora, target dimensionality of
# 200–500 is recommended as a “golden standard”
# Create a double wrapper over the original
# corpus bow tfidf fold-in-lsi
corpus_lsi = lsi[corpus_tfidf]
print corpus_lsi[0]
# Extract weights from corpus_lsi
# scode weight0 = <FILL IN>
weight0 = [doc[0][1] if doc != [] else -np.inf for doc in corpus_lsi]
# Locate the maximum positive weight
nmax = np.argmax(weight0)
print nmax
print weight0[nmax]
print corpus_lsi[nmax]
# Get topic 0
# scode: topic_0 = <FILL IN>
topic_0 = lsi.show_topic(0, topn=n_bins)
# Compute a list of tuples (token, wordcount) for all tokens in topic_0, where wordcount is the number of
# occurences of the token in the article.
# scode: token_counts = <FILL IN>
token_counts = [(t[0], corpus_clean[nmax].count(t[0])) for t in topic_0]
print "Topic 0 is:"
print topic_0
print "Token counts:"
print token_counts
ldag = gensim.models.ldamodel.LdaModel(
corpus=corpus_tfidf, id2word=D, num_topics=10, update_every=1, passes=10)
ldag.print_topics()
# For testing LDA, you can use the reuters dataset
# X = lda.datasets.load_reuters()
# vocab = lda.datasets.load_reuters_vocab()
# titles = lda.datasets.load_reuters_titles()
X = np.int32(np.zeros((n_art, n_tokens)))
for n, art in enumerate(corpus_bow):
for t in art:
X[n, t[0]] = t[1]
print X.shape
print X.sum()
vocab = D.values()
titles = corpus_titles
# Default parameters:
# model = lda.LDA(n_topics, n_iter=2000, alpha=0.1, eta=0.01, random_state=None, refresh=10)
model = lda.LDA(n_topics=10, n_iter=1500, random_state=1)
model.fit(X) # model.fit_transform(X) is also available
topic_word = model.topic_word_ # model.components_ also works
# Show topics...
n_top_words = 8
for i, topic_dist in enumerate(topic_word):
topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]
print('Topic {}: {}'.format(i, ' '.join(topic_words)))
doc_topic = model.doc_topic_
for i in range(10):
print("{} (top topic: {})".format(titles[i], doc_topic[i].argmax()))
# This is to apply the model to a new doc(s)
# doc_topic_test = model.transform(X_test)
# for title, topics in zip(titles_test, doc_topic_test):
# print("{} (top topic: {})".format(title, topics.argmax()))
# Adapted from an example in sklearn site
# http://scikit-learn.org/dev/auto_examples/applications/topics_extraction_with_nmf_lda.html
# You can try also with the dataset provided by sklearn in
# from sklearn.datasets import fetch_20newsgroups
# dataset = fetch_20newsgroups(shuffle=True, random_state=1,
# remove=('headers', 'footers', 'quotes'))
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
print("Loading dataset...")
# scode: data_samples = <FILL IN>
print "*".join(['Esto', 'es', 'un', 'ejemplo'])
data_samples = [" ".join(c) for c in corpus_clean]
print 'Document 0:'
print data_samples[0][0:200], '...'
# Use tf (raw term count) features for LDA.
print("Extracting tf features for LDA...")
n_features = 1000
n_samples = 2000
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
print tf[0][0][0]
print("Fitting LDA models with tf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
# scode: lda = <FILL IN>
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method='online', learning_offset=50., random_state=0)
# doc_topic_prior= 1.0/n_topics, topic_word_prior= 1.0/n_topics)
t0 = time()
corpus_lda = lda.fit_transform(tf)
print corpus_lda[10]/np.sum(corpus_lda[10])
print("done in %0.3fs." % (time() - t0))
print corpus_titles[10]
# print corpus_text[10]
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, 20)
topics = lda.components_
topic_probs = [t/np.sum(t) for t in topics]
#print topic_probs[0]
print -np.sort(-topic_probs[0])
# SORTED TOKEN FREQUENCIES (II):
plt.rcdefaults()
n_bins = 50
# Example data
y_pos = range(n_bins-1, -1, -1)
# pylab.rcParams['figure.figsize'] = 16, 8 # Set figure size
plt.figure(figsize=(16, 8))
for i in range(n_topics):
### Plot top 25 tokens for topic i
# Read i-thtopic
# scode: <FILL IN>
topic_i = topic_probs[i]
rank = np.argsort(- topic_i)[0:n_bins]
tokens = [tf_feature_names[r] for r in rank]
weights = [topic_i[r] for r in rank]
# Plot
# scode: <FILL IN>
plt.subplot(1, n_topics, i+1)
plt.barh(y_pos, weights, align='center', alpha=0.4)
plt.yticks(y_pos, tokens)
plt.xlabel('Top {0} topic weights'.format(n_bins))
plt.title('Topic {0}'.format(i))
plt.show()
display()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some Functions for working with Inception-v3
Step2: Putting it to use!
Step3: How about more than one image
|
<ASSISTANT_TASK:>
Python Code:
import os
import tensorflow as tf
# import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
import pandas as pd
# Create the TensorFlow graph
def create_graph():
model_dir = './CNW_Wildlife_Identification/imagenet'
with gfile.FastGFile(os.path.join(
model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Start a TensorFlow Session and choose our "tensor" for feature extraction
# ('pool_3:0' is the last layer before classification)
def setup():
create_graph() # Only needs to run the first time
with tf.Session() as sess:
# Get the last feature layer (preclassification) from inception-v3
next_to_last_tensor = sess.graph.get_tensor_by_name('pool_3:0')
s = sess
t = next_to_last_tensor
return s,t
# Get the actual features!
def get_features(image, session, tensor):
if not gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = gfile.FastGFile(image, 'rb').read()
predictions = session.run(tensor,
{'DecodeJpeg/contents:0': image_data})
features = np.squeeze(predictions)
return features.reshape(1,-1)
session, tensor = setup()
features = get_features('CNW_Wildlife_Identification/data/first_sample/EK000026-2.JPG',
session, tensor)
print features
directory = 'CNW_Wildlife_Identification/data/first_sample'
image_list = []
for p, dirs, files in os.walk(directory):
for ff in files:
if ff[-4:].lower() == '.jpg':
image_list.append(p+'/'+ff)
image_list
# In practice this is only run once - so setup() is included as part of this function
def get_features_repeatedly(image_list):
'''take list of image file paths
return numpy array of features
'''
create_graph()
with tf.Session() as session:
# Get the last feature layer (preclassification) from inception-v3
tensor = session.graph.get_tensor_by_name('pool_3:0')
nb_features = 2048
features = np.empty((len(image_list),nb_features))
for ind, image in enumerate(image_list):
# if (ind%50 == 0):
# print('Processing %s...' % (image))
print('Processing %s...' % (image))
if not gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = gfile.FastGFile(image, 'rb').read()
predictions = session.run(tensor,
{'DecodeJpeg/contents:0': image_data})
features[ind,:] = np.squeeze(predictions)
return features
lotsafeatures = get_features_repeatedly(image_list)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Backpropagation
Step2: Gradient Descent
Step3: An implementation of a Deep Neural Network
Step4: Backpropagation
Step5: Solving the ODE
Step6: Using neural network
Step7: Using a deep neural network
|
<ASSISTANT_TASK:>
Python Code:
# Autograd will be used for later, so the numpy wrapper for Autograd must be imported
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
def neural_network(params, x):
# Find the weights (including and biases) for the hidden and output layer.
# Assume that params is a list of parameters for each layer.
# The biases are the first element for each array in params,
# and the weights are the remaning elements in each array in params.
w_hidden = params[0]
w_output = params[1]
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
## Hidden layer:
# Add a row of ones to include bias
x_input = np.concatenate((np.ones((1,num_values)), x_input ), axis = 0)
z_hidden = np.matmul(w_hidden, x_input)
x_hidden = sigmoid(z_hidden)
## Output layer:
# Include bias:
x_hidden = np.concatenate((np.ones((1,num_values)), x_hidden ), axis = 0)
z_output = np.matmul(w_output, x_hidden)
x_output = z_output
return x_output
# The trial solution using the deep neural network:
def g_trial(x,params, g0 = 10):
return g0 + x*neural_network(params,x)
# The right side of the ODE:
def g(x, g_trial, gamma = 2):
return -gamma*g_trial
# The cost function:
def cost_function(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial(x,P)
# Find the derivative w.r.t x of the neural network
d_net_out = elementwise_grad(neural_network,1)(P,x)
# Find the derivative w.r.t x of the trial function
d_g_t = elementwise_grad(g_trial,0)(x,P)
# The right side of the ODE
func = g(x, g_t)
err_sqr = (d_g_t - func)**2
cost_sum = np.sum(err_sqr)
return cost_sum
def solve_ode_neural_network(x, num_neurons_hidden, num_iter, lmb):
## Set up initial weigths and biases
# For the hidden layer
p0 = npr.randn(num_neurons_hidden, 2 )
# For the output layer
p1 = npr.randn(1, num_neurons_hidden + 1 ) # +1 since bias is included
P = [p0, p1]
print('Initial cost: %g'%cost_function(P, x))
## Start finding the optimal weigths using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_grad = grad(cost_function,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of two arrays;
# one for the gradient w.r.t P_hidden and
# one for the gradient w.r.t P_output
cost_grad = cost_function_grad(P, x)
P[0] = P[0] - lmb * cost_grad[0]
P[1] = P[1] - lmb * cost_grad[1]
print('Final cost: %g'%cost_function(P, x))
return P
def deep_neural_network(deep_params, x):
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
# Due to multiple hidden layers, define a variable referencing to the
# output of the previous layer:
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output
# The trial solution using the deep neural network:
def g_trial_deep(x,params, g0 = 10):
return g0 + x*deep_neural_network(params,x)
# The same cost function as for the neural network, but calls deep_neural_network instead.
def cost_function_deep(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial_deep(x,P)
# Find the derivative w.r.t x of the neural network
d_net_out = elementwise_grad(deep_neural_network,1)(P,x)
# Find the derivative w.r.t x of the trial function
d_g_t = elementwise_grad(g_trial_deep,0)(x,P)
# The right side of the ODE
func = g(x, g_t)
err_sqr = (d_g_t - func)**2
cost_sum = np.sum(err_sqr)
return cost_sum
def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
# num_hidden_neurons is now a list of number of neurons within each hidden layer
# Find the number of hidden layers:
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 )
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: %g'%cost_function_deep(P, x))
## Start finding the optimal weigths using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_deep_grad = grad(cost_function_deep,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
# in the hidden layers and output layers evaluated at x.
cost_deep_grad = cost_function_deep_grad(P, x)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_deep_grad[l]
print('Final cost: %g'%cost_function_deep(P, x))
return P
def g_analytic(x, gamma = 2, g0 = 10):
return g0*np.exp(-gamma*x)
npr.seed(15)
## Decide the vales of arguments to the function to solve
N = 10
x = np.linspace(0, 1, N)
## Set up the initial parameters
num_hidden_neurons = 10
num_iter = 10000
lmb = 0.001
P = solve_ode_neural_network(x, num_hidden_neurons, num_iter, lmb)
res = g_trial(x,P)
res_analytical = g_analytic(x)
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(x, res_analytical)
plt.plot(x, res[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('x')
plt.ylabel('g(x)')
plt.show()
npr.seed(15)
## Decide the vales of arguments to the function to solve
N = 10
x = np.linspace(0, 1, N)
## Set up the initial parameters
num_hidden_neurons = np.array([10,10])
num_iter = 10000
lmb = 0.001
P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
res = g_trial_deep(x,P)
res_analytical = g_analytic(x)
plt.figure(figsize=(10,10))
plt.title('Performance of a deep neural network solving an ODE compared to the analytical solution')
plt.plot(x, res_analytical)
plt.plot(x, res[0,:])
plt.legend(['analytical','dnn'])
plt.ylabel('g(x)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sample selection
Step2: 1. Google form analysis
Step3: formatted version for nice display
Step4: 1.1 complete sample
Step5: 1.2 Per temporality
Step6: 1.2.2 answered only after
Step7: 1.2.3 answered both before and after
Step8: 1.2.4 pretest vs posttest
Step9: 1.3 Per demography
Step10: 1.3.2 French speakers
Step11: 1.3.3 Female
Step12: 1.3.4 Male
Step13: 1.3.5 biologists
Step14: broad
Step15: 1.3.6 gamers
Step16: broad
Step17: 1.4 answered only after
Step18: plotCorrelationMatrix( _binarizedMatrix, _title='Questions\' Correlations', _abs=False, _clustered=False, _questionNumbers=False )
Step19: thisClustermap.ax_heatmap.annotate(overlay)
Step20: thisClustermap, overlay = plotCorrelationMatrix(
Step21: thisClustermap, overlay = plotCorrelationMatrix(
Step22: 2. Game sessions
Step23: 3. Per session and per user analysis
Step24: completed vs played time
Step25: x = allScores.copy()
Step26: linear regression
Step27: linear regression 2
Step28: linear regression 3
Step29: data = pd.DataFrame(index = range(0, len(xReshaped)), data = xReshaped, columns = ['score'])
Step30: Completed vs non-completed
Step31: Score increase
Step32: Correlations between durations and score on questions
Step33: max chapter vs scores
Step34: 5. Game map
Step35: Sessions (filtered)
Step36: Sessions of dev IDs
Step37: Unique platforms
Step38: Checkpoints passed / furthest checkpoint (unfiltered)
Step39: Session starts
Step40: Duration
Step41: Duration plot
Step42: Phase 1 vs Phase 2 comparison
Step43: Played time on critical checkpoints
|
<ASSISTANT_TASK:>
Python Code:
%run "../Functions/8. RM-GF correlations.ipynb"
%run "../Functions/Plot.ipynb"
### Online 1.52.2
#gfdf = gfdfWebgl1522PretestPosttestUniqueProfilesVolunteers.copy()
#rmdf = rmdfWebgl1522PretestPosttestUniqueProfilesVolunteers.copy()
### Playtest
#gfdf = gfdfPlaytestTotalPretestPosttestUniqueProfilesVolunteers.copy()
#gfdf = gfdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.copy()
#gfdf = gfdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers.copy()
#rmdf = rmdfPlaytestTotalPretestPosttestUniqueProfilesVolunteers.copy()
#rmdf = rmdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.copy()
#rmdf = rmdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers.copy()
### Online 1.60
#gfdf = gfdfWebgl160PretestPosttestUniqueProfilesVolunteers.copy()
#rmdf = rmdfWebgl160PretestPosttestUniqueProfilesVolunteers.copy()
# For quicker allData switching.
gfdf = gfdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.copy()
rmdf = rmdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.copy()
allData = allBinaryDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.copy()
# For quicker allData switching.
#gfdf = gfdfWebgl1522Timed.copy()
#rmdf = rmdfWebgl1522Timed.copy()
print("sample: gform")
print("surveys: %s" % len(gform))
print("unique users: %s" % getUniqueUserCount(gform))
print("RM before: %s" % len(gform[gform[QTemporality] == answerTemporalities[0]]))
print("GF before: %s" % len(getGFormBefores(gform)))
print("RM after: %s" % len(gform[gform[QTemporality] == answerTemporalities[1]]))
print("GF after: %s" % len(getGFormAfters(gform)))
print("unique biologists: %s" % getUniqueUserCount(getSurveysOfBiologists(gform)))
print("unique gamers: %s" % getUniqueUserCount(getSurveysOfGamers(gform)))
print("unique perfect users: %s" % getUniqueUserCount(getSurveysOfUsersWhoAnsweredBoth(gform)))
print("unique perfect users: %s" % getPerfectPretestPostestPairsCount(gform))
print("sample: gfdf")
print("surveys: %s" % len(gfdf))
print("unique users: %s" % getUniqueUserCount(gfdf))
print("RM before: %s" % len(gfdf[gfdf[QTemporality] == answerTemporalities[0]]))
print("GF before: %s" % len(getGFormBefores(gfdf)))
print("RM after: %s" % len(gfdf[gfdf[QTemporality] == answerTemporalities[1]]))
print("GF after: %s" % len(getGFormAfters(gfdf)))
print("unique biologists: %s" % getUniqueUserCount(getSurveysOfBiologists(gfdf)))
print("unique gamers: %s" % getUniqueUserCount(getSurveysOfGamers(gfdf)))
print("unique perfect users: %s" % getUniqueUserCount(getSurveysOfUsersWhoAnsweredBoth(gfdf)))
print("unique perfect users: %s" % getPerfectPretestPostestPairsCount(gfdf))
print("category | count")
print("--- | ---")
print("sample | gform")
print("surveys | %s" % len(gform))
print("unique users | %s" % getUniqueUserCount(gform))
print("RM before | %s" % len(gform[gform[QTemporality] == answerTemporalities[0]]))
print("GF before | %s" % len(getGFormBefores(gform)))
print("RM after | %s" % len(gform[gform[QTemporality] == answerTemporalities[1]]))
print("GF after | %s" % len(getGFormAfters(gform)))
print("unique biologists | %s" % getUniqueUserCount(getSurveysOfBiologists(gform)))
print("unique gamers | %s" % getUniqueUserCount(getSurveysOfGamers(gform)))
print("unique perfect users | %s" % getUniqueUserCount(getSurveysOfUsersWhoAnsweredBoth(gform)))
print("unique perfect users | %s" % getPerfectPretestPostestPairsCount(gform))
print()
#print("(" + str(pd.to_datetime('today').date()) + ")")
print("("+dataFilesNamesStem+")")
print("category | count")
print("--- | ---")
print("sample | gfdf")
print("surveys | %s" % len(gfdf))
print("unique users | %s" % getUniqueUserCount(gfdf))
print("RM before | %s" % len(gfdf[gfdf[QTemporality] == answerTemporalities[0]]))
print("GF before | %s" % len(getGFormBefores(gfdf)))
print("RM after | %s" % len(gfdf[gfdf[QTemporality] == answerTemporalities[1]]))
print("GF after | %s" % len(getGFormAfters(gfdf)))
print("unique biologists | %s" % getUniqueUserCount(getSurveysOfBiologists(gfdf)))
print("unique gamers | %s" % getUniqueUserCount(getSurveysOfGamers(gfdf)))
print("unique perfect users | %s" % getUniqueUserCount(getSurveysOfUsersWhoAnsweredBoth(gfdf)))
print("unique perfect users | %s" % getPerfectPretestPostestPairsCount(gfdf))
print()
#print("(" + str(pd.to_datetime('today').date()) + ")")
print("("+dataFilesNamesStem+")")
#plotSamples(getDemographicSamples(gfdf))
#plotSamples(getTemporalitySamples(gfdf))
gf_befores = getGFormBefores(gfdf)
rm_befores = getRMBefores(gfdf)
gfrm_befores = getRMBefores(getGFormBefores(gfdf))
(gf_befores[QUserId] == rm_befores[QUserId]).all()
#plotSamples(getDemographicSamples(gf_befores))
gf_afters = getGFormAfters(gfdf)
rm_afters = getRMAfters(gfdf)
gfrm_afters = getRMAfters(getGFormBefores(gfdf))
(gf_afters[QUserId] == rm_afters[QUserId]).all()
#plotSamples(getDemographicSamples(gf_afters))
gf_both = getSurveysOfUsersWhoAnsweredBoth(gfdf, gfMode = True, rmMode = False)
rm_both = getSurveysOfUsersWhoAnsweredBoth(gfdf, gfMode = False, rmMode = True)
gfrm_both = getSurveysOfUsersWhoAnsweredBoth(gfdf, gfMode = True, rmMode = True)
#plotSamples(getDemographicSamples(gf_both))
#plotSamples(getDemographicSamples(rm_both))
#plotSamples(getDemographicSamples(gfrm_both))
matrixToDisplay = plotBasicStats(
gfdf,
horizontalPlot=False,
sortedAlong="",
figsize=(12,20),
title = 'percentages of correct answers',
annot=True,
annot_kws={"size": 13},
font_scale=1.3,
);
matrixToDisplay = plotBasicStats(
gfdf,
title = 'percentages of correct answers (sorted)',
sortedAlong="progression",
horizontalPlot=False,
figsize=(12,20),
annot=True,
annot_kws={"size": 13},
font_scale=1.3,
);
if False:
#barIndices = matrixToDisplay.index
barIndices = scientificQuestions
matrixToDisplaySubset = matrixToDisplay.loc[scientificQuestions,:]
pretestBars = matrixToDisplaySubset.loc[barIndices, 'pretest']
posttestBars = matrixToDisplaySubset.loc[barIndices, 'posttest']
plt.bar(np.arange(len(barIndices)), pretestBars)
if False:
# data to plot
#barIndices = matrixToDisplay.index
barIndices = scientificQuestions
matrixToDisplaySubset = matrixToDisplay.loc[scientificQuestions,:]
pretestBars = matrixToDisplaySubset.loc[barIndices, 'pretest']
posttestBars = matrixToDisplaySubset.loc[barIndices, 'posttest']
n_groups = len(barIndices)
# create plot
fig, ax = plt.subplots(figsize=(15,7))
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
rects1 = plt.bar(index, pretestBars, bar_width,
alpha=opacity,
color='b',
label='pretest')
rects2 = plt.bar(index + bar_width, posttestBars, bar_width,
alpha=opacity,
color='g',
label='posttest')
plt.xlabel('questions')
plt.ylabel('percentage correct')
#plt.title('Percentage correct - pretest and posttest')
#plt.xticks(index + bar_width, barIndices, rotation='vertical')
plt.xticks(index + bar_width, questionCategories, rotation='vertical')
plt.legend()
plt.tight_layout()
plt.show()
barIndicesHardcoded = [
'Device: PCONS:GFP:RBS:TER XXX', 'Device: PBAD:GFP:RBS:TER XXX',
'Device: AMPR:RBS:PCONS:TER XXX', 'Device: GFP:RBS:PCONS:TER XXX',
'Device: RBS:PCONS:AMPR:TER XXX', 'Device: RBS:PCONS:FLHDC:TER XXX',
'Function - game: CDS', 'Function: PR', 'Function - biology: CDS',
'Example: CDS', 'Function: Plasmid', 'Function: TER', 'Function: RBS',
'Name: Operator XXX', 'Name: RBS', 'Name: CDS', 'Name: PR',
'Name: Plasmid', 'Name: TER',
'Device: PCONS:RBS:FLHDC:TER', 'Device: PBAD:RBS:ARA:TER', 'Device: PBAD:RBS:GFP:TER',
'Unequip the movement device: effect',
'BioBricks and devices composition', 'Green fluorescence',
'Ampicillin antibiotic', 'Genotype and phenotype',]
if True:
# data to plot
#barIndices = matrixToDisplay.index
barIndices = scientificQuestions
matrixToDisplaySubset = matrixToDisplay.loc[scientificQuestions,:]
matrixToDisplaySubset['qCategory'] = questionCategories
matrixToDisplaySubset = matrixToDisplaySubset.sort_values(by=['qCategory', 'posttest'])
barIndices = matrixToDisplaySubset.index
barIndices = barIndicesHardcoded
matrixToDisplaySubset = matrixToDisplaySubset.loc[barIndices,:]
barIndices = matrixToDisplaySubset.index
pretestBars = matrixToDisplaySubset.loc[barIndices, 'pretest']
posttestBars = matrixToDisplaySubset.loc[barIndices, 'posttest']
n_groups = len(barIndices)
# create plot
fig, ax = plt.subplots(figsize=(15,7))
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
rects1 = plt.bar(index, pretestBars, bar_width,
alpha=opacity,
color='b',
label='pretest')
rects2 = plt.bar(index + bar_width, posttestBars, bar_width,
alpha=opacity,
color='g',
label='posttest')
#plt.xlabel('questions')
plt.ylabel('Correct answers')
#plt.title('Percentage correct - pretest and posttest')
#plt.xticks(index + bar_width, barIndices.map(questionCategoriesDictionary), rotation='vertical')
plt.xticks(index + bar_width, barIndices, rotation='vertical')
#plt.xticks(index + bar_width, questionCategories, rotation='vertical')
plt.legend()
plt.tight_layout()
plt.show()
if True:
# data to plot
#barIndices = matrixToDisplay.index
barIndices = scientificQuestions
matrixToDisplaySubset = matrixToDisplay.loc[scientificQuestions,:]
matrixToDisplaySubset['qCategory'] = questionCategories
matrixToDisplaySubset = matrixToDisplaySubset.sort_values(by=['qCategory', 'posttest'])
barIndices = matrixToDisplaySubset.index
for qCategory in set(questionCategories):
questionsOfThatCategory = [k for k, v in questionCategoriesDictionary.items() if v == qCategory]
barIndices = questionsOfThatCategory
bars = matrixToDisplaySubset.loc[barIndices,:].sort_values(by=['posttest'], ascending=False)
barIndices = bars.index
pretestBars = bars['pretest']
posttestBars = bars['posttest']
n_groups = len(barIndices)
# create plot
fig, ax = plt.subplots(figsize=(12, n_groups*2))
plt.xlim(0,85)
index = np.arange(len(questionsOfThatCategory))
bar_width = 0.35
opacity = 0.8
rects1 = plt.barh(index + bar_width, pretestBars, bar_width,
alpha=opacity,
color='b',
label='pretest')
rects2 = plt.barh(index, posttestBars, bar_width,
alpha=opacity,
color='g',
label='posttest')
plt.xlabel('correct answers (%)')
#plt.yticks(index + bar_width, barIndices)
plt.legend()
plt.tight_layout()
plt.show()
fig.savefig("score pretest posttest h big nolabel " + qCategory)
#matrixToDisplay.to_csv("../../data/sortedPrePostProgression.csv")
#matrixToDisplay.T
cohortEN = gfdf[gfdf[QLanguage] == enLanguageID]
#plotSamples(getTemporalitySamples(cohortEN))
cohortFR = gfdf[gfdf[QLanguage] == frLanguageID]
#plotSamples(getTemporalitySamples(cohortFR))
cohortF = gfdf[gfdf[QGender] == 'Female']
#plotSamples(getTemporalitySamples(cohortF))
cohortM = gfdf[gfdf[QGender] == 'Male']
#plotSamples(getTemporalitySamples(cohortM))
cohortBioS = getSurveysOfBiologists(gfdf)
#plotSamples(getTemporalitySamples(cohortBioS))
cohortBioB = getSurveysOfBiologists(gfdf, False)
#plotSamples(getTemporalitySamples(cohortBioB))
cohortGamS = getSurveysOfGamers(gfdf)
#plotSamples(getTemporalitySamples(cohortGamS))
cohortGamB = getSurveysOfGamers(gfdf, False)
#plotSamples(getTemporalitySamples(cohortGamB))
#T-tests between pretest and posttest scores among some player groups
plotBasicStats(gfdf, horizontalPlot=True, sortedAlong="progression", figsize=(20,4));
plotBasicStats(cohortF, horizontalPlot=True, sortedAlong="progression", figsize=(20,4));
plotBasicStats(cohortM, horizontalPlot=True, sortedAlong="progression", figsize=(20,4));
plotBasicStats(cohortGamB, horizontalPlot=True, sortedAlong="progression", figsize=(20,4));
sciBinarizedBefore = getAllBinarized(getRMBefores(gfdf))
#sciBinarizedBefore = getAllBinarized(getGFBefores())
#plotCorrelationMatrix( _binarizedMatrix, _title='Questions\' Correlations', _abs=False, _clustered=False, _questionNumbers=False ):
plotCorrelationMatrix(
sciBinarizedBefore,
_abs=False,
_clustered=False,
_questionNumbers=True,
_annot = True,
_figsize = (20,20),
_title='Correlations on survey questions before',
)
sciBinarizedAfter = getAllBinarized(getRMAfters(gfdf))
#plotCorrelationMatrix( _binarizedMatrix, _title='Questions\' Correlations', _abs=False, _clustered=False, _questionNumbers=False ):
plotCorrelationMatrix(
sciBinarizedAfter,
_abs=False,
_clustered=False,
_questionNumbers=True,
_annot = True,
_figsize = (20,20),
_title='Correlations on survey questions after',
)
#plotCorrelationMatrix( _binarizedMatrix, _title='Questions\' Correlations', _abs=False, _clustered=False, _questionNumbers=False ):
thisClustermap, overlay = plotCorrelationMatrix(
sciBinarizedAfter,
_abs=False,
_clustered=True,
_questionNumbers=True,
_annot = True,
_figsize = (20,20),
_metric='correlation'
)
allQuestions = correctAnswers + demographicAnswers
allBinarized = getAllBinarized(gfdf, _source = allQuestions)
allBinarizedBefore = getAllBinarized(getRMBefores(gfdf), _source = allQuestions)
allBinarizedAfter = getAllBinarized(getRMAfters(gfdf), _source = allQuestions)
#plotCorrelationMatrix( _binarizedMatrix, _title='Questions\' Correlations', _abs=False, _clustered=False, _questionNumbers=False ):
plotCorrelationMatrix(
allBinarized,
_abs=True,
_clustered=False,
_questionNumbers=True,
_annot = True,
_figsize = (20,20),
_title='Correlation of all answers',
)
#plotCorrelationMatrix( _binarizedMatrix, _title='Questions\' Correlations', _abs=False, _clustered=False, _questionNumbers=False ):
plotCorrelationMatrix(
allBinarizedBefore,
_abs=False,
_clustered=False,
_questionNumbers=True,
_annot = True,
_figsize = (20,20),
_title='Correlations on all questions before',
)
plotCorrelationMatrix(
allBinarizedAfter,
_abs=False,
_clustered=False,
_questionNumbers=True,
_annot = True,
_figsize = (20,20),
_title='Correlation of all answers after',
)
allBinarizedAfterSub = allBinarizedAfter.copy()
allBinarizedAfterSub = allBinarizedAfterSub.loc[:,['Age'] + scientificQuestions]
plotCorrelationMatrix(
allBinarizedAfterSub,
_abs=False,
_clustered=False,
_questionNumbers=True,
_annot = True,
_figsize = (20,20),
_title='Correlation of all answers after',
)
#startDate = minimum152Date
#endDate = maximum152Date
startDate = rmdf['userTime'].min().date() - datetime.timedelta(days=1)
endDate = rmdf['userTime'].max().date() + datetime.timedelta(days=1)
valuesPerDay = rmdf['userTime'].map(lambda t: t.date()).value_counts().sort_index()
plotPerDay(valuesPerDay, title='RedMetrics events', startDate=startDate, endDate=endDate)
valuesPerDay[pd.to_datetime('2017-09-01', utc=True).date():pd.to_datetime('2017-09-30', utc=True).date()]
valuesPerDay = rmdf[rmdf['type'] == 'start']['userTime'].map(lambda t: t.date()).value_counts().sort_index()
plotPerDay(valuesPerDay, title='sessions', startDate=startDate, endDate=endDate)
valuesPerDay[pd.to_datetime('2017-09-01', utc=True).date():pd.to_datetime('2017-09-30', utc=True).date()]
valuesPerDay = rmdf.groupby('userId').agg({ "userTime": np.min })['userTime'].map(lambda t: t.date()).value_counts().sort_index()
plotPerDay(valuesPerDay, title='game users', startDate=startDate, endDate=endDate)
valuesPerDay[pd.to_datetime('2017-09-01', utc=True).date():pd.to_datetime('2017-09-30', utc=True).date()]
valuesPerDay = gfdf.groupby(localplayerguidkey).agg({ QTimestamp: np.min })[QTimestamp].map(lambda t: t.date()).value_counts().sort_index()
plotPerDay(valuesPerDay, title='survey answers', startDate=startDate, endDate=endDate)
valuesPerDay[pd.to_datetime('2017-09-01', utc=True).date():pd.to_datetime('2017-09-30', utc=True).date()]
beforesPerDay = gfdf[gfdf[QTemporality] == answerTemporalities[0]].groupby(localplayerguidkey).agg({ QTimestamp: np.min })[QTimestamp].map(lambda t: t.date()).value_counts().sort_index()
aftersPerDay = gfdf[gfdf[QTemporality] == answerTemporalities[1]].groupby(localplayerguidkey).agg({ QTimestamp: np.min })[QTimestamp].map(lambda t: t.date()).value_counts().sort_index()
undefinedPerDay = gfdf[gfdf[QTemporality] == answerTemporalities[2]].groupby(localplayerguidkey).agg({ QTimestamp: np.min })[QTimestamp].map(lambda t: t.date()).value_counts().sort_index()
plotPerDay(beforesPerDay, title='survey befores', startDate=startDate, endDate=endDate)
plotPerDay(aftersPerDay, title='survey afters', startDate=startDate, endDate=endDate)
plotPerDay(undefinedPerDay, title='survey undefined', startDate=startDate, endDate=endDate)
#pretests = gform[gform[QTemporality] == answerTemporalities[0]]
#pretests[pretests[QBBFunctionPlasmid] == ]
binarized = sciBinarizedBefore
intermediaryNumerator = getCrossCorrectAnswers(binarized).round().astype(int)*100
percentagesCrossCorrect = (intermediaryNumerator / binarized.shape[0]).round().astype(int)
totalPerQuestion = np.dot(np.ones(binarized.shape[0]), binarized)
sciBinarizedBefore.columns[totalPerQuestion == 0]
getPercentageCrossCorrect(sciBinarizedBefore, figsize=(40,40))
getPercentageCrossCorrect(sciBinarizedAfter, figsize=(40,40))
len(gfdf), len(getAllResponders(gfdf))
matrixToDisplay = plotBasicStats(gfdf, horizontalPlot=True, sortedAlong="progression", figsize=(20,4));
subjectCount = allData.shape[1]
measuredPretest = 100*allData.loc[pretestScientificQuestions,:].sum(axis='columns')/subjectCount
measuredPretest.index = scientificQuestions
measuredPosttest = 100*allData.loc[posttestScientificQuestions,:].sum(axis='columns')/subjectCount
measuredPosttest.index = scientificQuestions
measuredDelta2 = (measuredPosttest - measuredPretest)
measuredDelta2 = pd.DataFrame(measuredDelta2.round().astype(int))
measuredDelta2.columns = ["measuredDelta2"]
measuredDelta2 = measuredDelta2.sort_values(by = "measuredDelta2", ascending = True).T
_fig = plt.figure(figsize=(20,2))
_ax1 = plt.subplot(111)
_ax1.set_title("measuredDelta2")
sns.heatmap(
measuredDelta2,
ax=_ax1,
cmap=plt.cm.jet,
square=True,
annot=True,
fmt='d',
vmin=0,
vmax=100,
)
#(matrixToDisplay.loc['progression',scientificQuestions] - measuredDelta2.loc['measuredDelta2',scientificQuestions])
testDF = pd.DataFrame(columns=[
'pretest1', 'posttest1', 'measuredDelta',
'pretest2', 'posttest2', 'matrixToDisplay'], data = 0, index= scientificQuestions)
testDF['pretest1'] = measuredPretest
testDF['posttest1'] = measuredPosttest
testDF['measuredDelta'] = measuredDelta2.T['measuredDelta2']
testDF['pretest2'] = matrixToDisplay.T['pretest'][scientificQuestions]
testDF['posttest2'] = matrixToDisplay.T['posttest'][scientificQuestions]
testDF['matrixToDisplay'] = matrixToDisplay.T['progression'][scientificQuestions]
testDF = testDF.round().astype(int)
#testDF
measuredDelta = allData.loc[deltaScientificQuestions,:].sum(axis='columns')
measuredDelta.mean(), measuredDelta.median()
#measuredDelta.sort_values()
#pretestData = getAllUserVectorData( gfdf[gfdf[QTemporality] == answerTemporalities[0]], _source = correctAnswers )
#posttestData = getAllUserVectorData( gfdf[gfdf[QTemporality] == answerTemporalities[1]], _source = correctAnswers )
plotAllUserVectorDataCorrelationMatrix(
allData.T,
_abs=False,
_figsize = (40,40),
_clustered=False
)
demographicCriteria = demographicQuestions.copy()
plotAllUserVectorDataCorrelationMatrix(
allData.T,
_abs=False,
_figsize = (20,20),
_clustered=False,
columnSubset=[]\
+ completionTimesCriteria
+ totalTimesCriteria
+ pretestScientificQuestions
#+ posttestScientificQuestions
#+ deltaScientificQuestions
+ overallScoreCriteria
#+ demographicCriteria
)
#completers = rmdf[rmdf['type'] == 'complete'][QUserId]
#nonCompleter = rmdf[~rmdf[QUserId].isin(completers)][QUserId].iloc[0]
#getUserDataVector(nonCompleter)#.loc[14,:]
#allData.shape
#allData.index
data = pd.DataFrame(index=allData.columns, columns=["time", "posttestScore", "deltaScore","completed"])
for userId in data.index:
data.loc[userId, "time"] = getPlayedTimeUser(userId, _rmDF = rmdf)['tutorial']['totalSpentTime'].total_seconds()
data.loc[userId, "posttestScore"] = allData.loc['scoreposttest', userId]
data.loc[userId, "pretestScore"] = allData.loc['scorepretest', userId]
data.loc[userId, "deltaScore"] = allData.loc['scoredelta', userId]
data.loc[userId, "completed"] = allData.loc['complete', userId]
data.shape
x = data["posttestScore"]
x2 = data[data["completed"]==1]["posttestScore"]
y = data["time"]
y2 = data[data["completed"]==1]["time"]
plt.figure(figsize=(12, 4))
ax1 = plt.subplot(121)
plt.scatter(x, y)#, c='blue', alpha=0.5)
plt.scatter(x2, y2)#, c='red', alpha=0.5)
plt.xlabel('score')
plt.ylabel('time')
plt.title("time against score, n=" + str(len(x)))
#ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax2 = plt.subplot(122)
plt.scatter(y, x)
plt.scatter(y2, x2)
plt.xlabel('time')
plt.ylabel('score')
plt.title("score against time, n=" + str(len(x)))
ax2.legend(loc='center left', bbox_to_anchor=(-1.2, 0.9), labels =["unfinished games","completed games"])
plt.show()
x = data["posttestScore"].astype(float)
x2 = data[data["completed"]==1]["posttestScore"].astype(float)
y = data["time"].astype(float)
y2 = data[data["completed"]==1]["time"].astype(float)
# Get the linear models
lm_original = np.polyfit(x, y, 1)
# calculate the y values based on the co-efficients from the model
r_x, r_y = zip(*((i, i*lm_original[0] + lm_original[1]) for i in x))
# Put in to a data frame, to keep is all nice
lm_original_plot = pd.DataFrame({
'scores' : r_x,
'times' : r_y
})
lm_original_plot = lm_original_plot.drop_duplicates()
lm_original_plot = lm_original_plot.sort_values(by="scores")
lm_original_plot = lm_original_plot.drop(lm_original_plot.index[1:-1])
plt.figure(figsize=(6, 4))
ax = plt.subplot(111)
plt.scatter(x, y)
plt.scatter(x2, y2)
# Plot the original data and model
#lm_original_plot.plot(kind='line', color='Red', x='scores', y='times', ax=ax)
plt.plot('scores', 'times', data=lm_original_plot, color='Red')
plt.xlabel('score')
plt.ylabel('time')
plt.show()
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
x = data["posttestScore"].astype(float)
x2 = data[data["completed"]==1]["posttestScore"].astype(float)
y = data["time"].astype(float)
y2 = data[data["completed"]==1]["time"].astype(float)
xReshaped = x.values.reshape(-1, 1)
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(xReshaped, y)
# Make predictions using the testing set
pred = regr.predict(xReshaped)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y, pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y, pred))
# Plot outputs
plt.scatter(x, y, color='black')
plt.plot(x, pred, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
regr.intercept_,regr.coef_
sns.regplot(x=x, y=y, color="b")
plt.scatter(x2, y2, color='red')
plt.xlabel("score")
plt.ylabel("time played")
#import patsy
import statsmodels.formula.api as smf
data2 = data.astype(float)
### STATSMODELS ###
timeScoreformula = 'time ~ posttestScore'
# create a fitted model
lm1 = smf.ols(formula=timeScoreformula, data=data2).fit()
# print the coefficients
#lm1.params
#lm1.summary()
# print the confidence intervals for the model coefficients
lm1.conf_int()
# print the p-values for the model coefficients
# Represents the probability that the coefficient is actually zero
lm1.pvalues
# print the R-squared value for the model
lm1.rsquared
### STATSMODELS ###
timeScoreformula = 'time ~ posttestScore'
lm1 = smf.ols(formula=timeScoreformula, data=data2).fit()
lm2 = smf.ols(formula=timeScoreformula, data=data2[data2["completed"] == 0]).fit()
lm3 = smf.ols(formula=timeScoreformula, data=data2[data2["completed"] == 1]).fit()
lm1.rsquared,lm2.rsquared,lm3.rsquared
data['deltaScoreRate'] = data['deltaScore']/data['pretestScore']
meanDelta = data['deltaScore'].mean()
meanPretest = data['pretestScore'].mean()
meanDelta/meanPretest
overallScoreCriteria = ["scorepretest", "scoreposttest", "scoredelta",]
stemTimesCriteria = ["ch" + "{0:0=2d}".format(i) for i in range(0,15)]
completionTimesCriteria = [st + "completion" for st in stemTimesCriteria] + ["completionTime"]
totalTimesCriteria = [st + "total" for st in stemTimesCriteria] + ["totalTime"]
allData2 = allData.T.rename(str,axis="columns")
allData3 = allData2[allData2['ch00completion'] < pd.Timedelta.max.total_seconds()]
len(allData3)
allData2[allData2[criterionLabel]>9e+09]
sectionNb = '01'
criterionLabel = 'ch' + sectionNb + 'completion'
sectionName = 'tutorial.Checkpoint' + sectionNb
testUserId = allData2[allData2[criterionLabel]>9e+09].index[0]
#rmdf or rmdfConcat
_rmdf = rmdfConcat
_rmdf[(_rmdf[QUserId] == testUserId) \
& (_rmdf['type'] == 'reach') \
& (_rmdf['section'] == 'tutorial.Checkpoint' + sectionNb) \
].loc[:, ['section', 'userTime']]
testUserId
_rmdf[(_rmdf[QUserId] == testUserId)]
gfdf[gfdf[QUserId] == testUserId]
#chosenPrefix = answerTemporalities[0]
chosenPrefix = answerTemporalities[1]
#chosenPrefix = "delta"
#warning: not the same as displayed columns, see lower
chosenCriteria = [chosenPrefix + " " + q for q in scientificQuestions] + overallScoreCriteria
durationsScoresCorrelations = pd.DataFrame(index=completionTimesCriteria+totalTimesCriteria, columns=chosenCriteria, data=np.nan)
durationsScoresCorrelations = durationsScoresCorrelations.rename(str, axis='rows')
annotationMatrix = np.empty(shape=[durationsScoresCorrelations.shape[0], 1], dtype=int)
#annotationMatrix2D = np.empty(durationsScoresCorrelations.shape, dtype=str)
allData2 = allData.T.rename(str,axis="columns")
for i in range(len(durationsScoresCorrelations.index)):
checkpoint = durationsScoresCorrelations.index[i]
allData3 = allData2[allData2[checkpoint] < pd.Timedelta.max.total_seconds()]
annotationMatrix[i] = len(allData3)
for q in durationsScoresCorrelations.columns:
corr = np.corrcoef(allData3[checkpoint], allData3[q])
if corr[0,0] < 0:
print("[" + checkpoint + ";" + q + "]:" + str(corr[0,0]))
#if pd.isnull(corr[0,1]):
# print("[" + checkpoint + ";" + q + "] null")
durationsScoresCorrelations.loc[checkpoint, q] = corr[0,1]
_fig, (_a0, _a1) = plt.subplots(1,2, gridspec_kw = {'width_ratios':[50, 1]}, figsize=(15,10))
#_a0.set_title("correlations between times and " + chosenPrefix + " scores")
_a0.set_title("correlations between times and scores")
durationsScoresCorrelations.columns = [q for q in scientificQuestions] + ["pretest score", "posttest score", "score increase",]
sns.heatmap(durationsScoresCorrelations, ax=_a0, cmap=plt.cm.jet, square=True, vmin=-1, vmax=1,
# annot=True,
# annot=annotationMatrix2D
#cbar_kws= {'panchor':(0.0, 0.0)}
)
_a1.set_title("")
sns.heatmap(annotationMatrix, ax=_a1, annot=annotationMatrix)
_fig.tight_layout()
#chosenPrefix = answerTemporalities[0]
#chosenPrefix = answerTemporalities[1]
#chosenPrefix = "delta"
#warning: not the same as displayed columns, see lower
#questions1 = [QAge,QGender]
#questions1 = [QEnjoyed]
#questions2 = [
# QCuriosityBiology,QCuriositySyntheticBiology,QCuriosityVideoGames,
# QCuriosityEngineering,
## QPlayed,
# QAge,QGender,
# QInterestVideoGames,
# QInterestBiology,QStudiedBiology,QPlayVideoGames,
## QHeardSynBioOrBioBricks,
## QVolunteer,
# QEnjoyed]
#questions2 = [
# QCuriosityBiology,
# QCuriositySyntheticBiology,
# QCuriosityVideoGames,
# QCuriosityEngineering,
# QPlayed,
#QAge,
#QGender,
# QInterestVideoGames,
# QInterestBiology,
# QStudiedBiology,
# QPlayVideoGames,
# QHeardSynBioOrBioBricks,
# QVolunteer,
# QEnjoyed #use only posttest value
# ]
questions2 = [
QCuriosityEngineering,
QCuriosityBiology,
QCuriositySyntheticBiology,
QInterestBiology,
QStudiedBiology,
QCuriosityVideoGames,
QInterestVideoGames,
QPlayVideoGames,
# QPlayed,
QAge,
QGender,
# QHeardSynBioOrBioBricks,
# QVolunteer,
# QEnjoyed #use only posttest value
]
#chosenCriteria1 = completionTimesCriteria+totalTimesCriteria
#chosenCriteria1 = ["posttest " + q for q in scientificQuestions] + overallScoreCriteria
chosenCriteria1 = overallScoreCriteria
#chosenCriteria1 = ["pretest " + q for q in questions1]
#chosenCriteria1 = ["posttest " + q for q in questions1]
#chosenCriteria2 = ["posttest " + q for q in questions2]
#chosenCriteria2 = ["pretest " + q for q in questions2] + ["posttest " + QEnjoyed]
#chosenCriteria2 = ["posttest " + q for q in scientificQuestions] + overallScoreCriteria
chosenCriteria2 = ["pretest " + q for q in questions2]
#chosenCriteria2 = ["maxChapter"]
criteriaScoresCorrelations = pd.DataFrame(index=chosenCriteria1, columns=chosenCriteria2, data=np.nan)
criteriaScoresCorrelations = criteriaScoresCorrelations.rename(str, axis='rows')
annotationMatrix = np.empty(shape=[criteriaScoresCorrelations.shape[0], 1], dtype=int)
#annotationMatrix2D = np.empty(durationsScoresCorrelations.shape, dtype=str)
#allData2 = allData.T.rename(str,axis="columns")
#allData2 = allBinaryDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers
allData2 = allNumericDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers
allData2 = allData2.T.rename(str,axis="columns")
for i in range(len(criteriaScoresCorrelations.index)):
criterion1i = criteriaScoresCorrelations.index[i]
allData3 = allData2
if criterion1i in completionTimesCriteria:
allData3 = allData2[allData2[criterion1i] < pd.Timedelta.max.total_seconds()]
annotationMatrix[i] = len(allData3)
for criterion2j in criteriaScoresCorrelations.columns:
corr = np.corrcoef(allData3[criterion1i], allData3[criterion2j])
if corr[0,0] < 0:
print("[" + criterion1i + ";" + criterion2j + "]:" + str(corr[0,0]))
#if pd.isnull(corr[0,1]):
# print("[" + criterion1i + ";" + criterion2j + "] null")
criteriaScoresCorrelations.loc[criterion1i, criterion2j] = corr[0,1]
#index 1
#criteriaScoresCorrelations.index = scientificQuestions + ["pretest score", "posttest score", "score increase"]
#criteriaScoresCorrelations.index = questions1
criteriaScoresCorrelations.index = ["pretest score", "posttest score", "score increase"]
#columns 2
criteriaScoresCorrelations.columns = questions2
#criteriaScoresCorrelations.columns = questions2 + [QEnjoyed]
#criteriaScoresCorrelations.columns = scientificQuestions + ["pretest score", "posttest score", "score increase"]
#criteriaScoresCorrelations.columns = ["max. checkpoint"]
# (10,20) big
# (12,5) small
#_fig, (_a0) = plt.subplots(1,1, figsize=(10,18))
_fig, (_a0) = plt.subplots(1,1, figsize=(6,10))
#_fig, (_a0, _a1) = plt.subplots(
# 1,2, figsize=(5,25), gridspec_kw = {'width_ratios':[15, 1]})
# 2,1, figsize=(17,12), gridspec_kw = {'height_ratios':[30, 1]})
#sns.set(font_scale=1)
#sns.set(font_scale=1.3)
sns.set(font_scale=1.7)
data = criteriaScoresCorrelations.T
#_a0.set_title("correlations between times and demographic criteria")
#_a0.set_title("correlations between scores and demographic criteria")
#_a0.set_title("correlations between (age, gender) and (curiosity, interest, practice, enjoyment)")
#_a0.set_title("correlations between enjoyment and age, gender, curiosity, interest, practice, enjoyment")
#plt.title("correlations between enjoyment and age, gender, curiosity, interest, practice")
#_a0.set_title("correlations between times and scores")
#_a0.set_title("correlations between scores and maximum checkpoint reached")
_a0.set_anchor('C')
sns.heatmap(data, ax=_a0,
#cmap=plt.cm.jet,
cmap="RdBu_r",
square=True, vmin=-1, vmax=1,
annot=True,
# cbar = False,
# annot=annotationMatrix2D
#cbar_kws= {'panchor':(0.0, 0.0)}
#cbar_kws = dict(use_gridspec=False,location="right"),
annot_kws={"size": 13},
#annot_kws={"size": 13},
)
#_a1.set_anchor('C')
#data = annotationMatrix.T
#sns.heatmap(data, ax=_a1, annot=data, square=True,
# cbar = False,xticklabels=False,yticklabels=False,annot_kws={"size": 12})
_fig.tight_layout()
_fig, (_a1) = plt.subplots(1,1, figsize=(10,5))
_a1.set_anchor('C')
#data = pd.Series(data=annotationMatrix.flatten(), index=completionTimesCriteria+totalTimesCriteria)
data = annotationMatrix.T
sns.heatmap(data,
ax=_a1,
annot=data,
square=True,
cbar = False,
#xticklabels=False,
xticklabels=completionTimesCriteria+totalTimesCriteria,
yticklabels=False,
#yticklabels=completionTimesCriteria+totalTimesCriteria,
annot_kws={"size": 12})
_fig.tight_layout()
i = 0
checkpoint = durationsScoresCorrelations.index[i]
print(checkpoint + ": " + str(len(allData2[allData2[checkpoint] < pd.Timedelta.max.total_seconds()])))
testUserId = gfdf[QUserId].unique()[12]
getCheckpointsTotalTimesUser(testUserId, rmdf)
#timedSectionnedEvents.to_csv("ch4.csv", encoding=csvEncoding)
#getAllResponders(gfdf), _source = correctAnswers, _rmDF = rmdf
#testUserId = "4731525f-62dd-4128-ab56-3991b403e17e"
#getUserDataVector(testUserId,_source = correctAnswers, _rmDF = rmdf)
# delta or posttest?
# posttest: values 0, 1 managed in plotCorrectedAnswerPerMaxCheckpoint
# delta can't work: values 0, 1 and -1 not managed in plotCorrectedAnswerPerMaxCheckpoint
chosenPrefix = "posttest"
chosenQuestions = [chosenPrefix + " " + q for q in scientificQuestions]
criteria = ["maxChapter","complete"] + chosenQuestions + overallScoreCriteria
#data = allBinaryDataPlaytestPhase1PretestPosttestUniqueProfiles.loc[criteria,:]
data = allBinaryDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.loc[criteria,:]
#data = allBinaryDataPlaytestPhase2PretestPosttestUniqueProfiles.loc[criteria,:]
#data = allBinaryDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers.loc[criteria,:]
#data = allNumericDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.loc[criteria,:]
criterion1 = chosenPrefix + ' Function - biology: CDS'
criterion2 = chosenPrefix + ' Device: RBS:PCONS:FLHDC:TER XXX'
xIndex = 'maxChapter'
dataT = data.T
def getCheckpointThreshold(bars0, bars1, thresholdRatio = .9):
totalCount = np.sum(bars0) + np.sum(bars1)
cumulative0 = np.cumsum(list(reversed(bars0)))
cumulative1 = np.cumsum(list(reversed(bars1)))
result = 0
#np.argmax(cumulative1>=thresholdCount)
for i in range(len(cumulative1)):
thresholdCount = np.floor(thresholdRatio * (cumulative0[i] + cumulative1[i]))
if cumulative1[i] < thresholdCount:
result = 15-i
break
return result
from matplotlib import rc
import pandas as pd
# stacked horizontal bar plot; cf df.plot.barh?
def plotCorrectedAnswerPerMaxCheckpoint(dataT, criterion, saveFig=False, plotFig=True, thresholdRatio=0.9):
# y-axis in bold
rc('font', weight='bold')
# Values of each group
bars0 = [len(dataT[(dataT[criterion]==0) & (dataT[xIndex]==maxChapterValue)]) for maxChapterValue in range(15)]
bars1 = [len(dataT[(dataT[criterion]==1) & (dataT[xIndex]==maxChapterValue)]) for maxChapterValue in range(15)]
if plotFig:
# Heights of bars1 + bars2 (TO DO better)
bars = [bars0[i] + bars1[i] for i in range(len(bars0))]
# The position of the bars on the x-axis
r = [i for i in range(15)]
# Names of group and bar width
names = [i for i in range(15)]
barWidth = 1
fig, ax = plt.subplots(1,1, figsize=(10,6))
# Create red bars
ax.bar(r, bars0, color='#cc0c28', edgecolor='white', width=barWidth)
# Create green bars (middle), on top of the firs ones
ax.bar(r, bars1, bottom=bars0, color='#557f2d', edgecolor='white', width=barWidth)
# Custom X axis
plt.xticks(r, names, fontweight='bold')
plt.xlabel("max. checkpoint")
plt.ylabel("count")
plt.title("Answers to question '" + criterion + "' against max. checkpoint, n=" + str(len(dataT.index)))
ax.legend(["incorrect", "correct"],
bbox_to_anchor=(0.7, 0.7),
# loc="upper center",
)
# Show graphic
plt.show()
if saveFig:
#correctedAnswersPerMaxCheckpoint
questionTitle = "cAPMC-'" + criterion.replace(" ", "_").replace(":", "") + "'"
try:
fig.savefig(questionTitle)
except:
print("- savefig failed for " + questionTitle)
return [bars0, bars1, getCheckpointThreshold(bars0, bars1, thresholdRatio)]
[bars0, bars1, threshold] = plotCorrectedAnswerPerMaxCheckpoint(dataT, criterion2, saveFig=False, plotFig=True)
threshold
getCheckpointThreshold(bars0, bars1, thresholdRatio = 1)
np.cumsum(bars1)
thresholdsCheckpoints = pd.Series(index = chosenQuestions, data = 15, name = "thresholdsCheckpoints")
for criterion in chosenQuestions:
[bars0, bars1, threshold] = plotCorrectedAnswerPerMaxCheckpoint(
dataT,
criterion,
saveFig=False,
plotFig=False,
thresholdRatio=0.8
)
thresholdsCheckpoints[criterion] = threshold
thresholdsCheckpoints
thresholdsCheckpoints
def plotCheckpointsFromThreshold(dataT, criterion, saveFig=False):
xs = []
ys = []
for x in np.linspace(0.5,1,11):
[bars0, bars1, thresholdCheckpoint] = plotCorrectedAnswerPerMaxCheckpoint(
dataT,
criterion,
saveFig=False,
plotFig=False,
thresholdRatio=x
)
xs += [x]
ys += [thresholdCheckpoint]
#print("x=" + str(x) +": " + str(thresholdCheckpoint))
fig = plt.figure(figsize=(12, 4))
ax1 = plt.subplot(111)
plt.plot(xs, ys)
plt.ylim((-0.5, 14.5))
plt.xlabel('threshold')
plt.ylabel('checkpoint')
plt.title("Checkpoint against threshold, for question '" + criterion + "'")
plt.show()
if saveFig:
#correctedAnswersPerMaxCheckpoint
questionTitle = "cFT-'" + criterion.replace(" ", "_").replace(":", "") + "'"
try:
fig.savefig(questionTitle)
except:
print("- savefig failed for " + questionTitle)
return ys
ys = plotCheckpointsFromThreshold(dataT, criterion2)
def getMostFrequentThreshold(ys):
result = [x for x in ys if ((x != 15) & (x != 0))]
if len(result) == 0:
return Counter(ys).most_common(1)[0]
else:
return Counter(result).most_common(1)[0]
from collections import Counter
thresholdsCheckpoints2 = pd.DataFrame(index = chosenQuestions, columns = ['threshold', 'count'], data = 15)
for criterion in chosenQuestions:
ys = plotCheckpointsFromThreshold(dataT, criterion, saveFig=False)
thresholdsCheckpoints2.loc[criterion, 'threshold'] = getMostFrequentThreshold(ys)[0]
thresholdsCheckpoints2.loc[criterion, 'count'] = getMostFrequentThreshold(ys)[1]
thresholdsCheckpoints2
#for criterion in criteria:
criterion = 'scoreposttest'
x = data.loc["maxChapter",:].values
y = data.loc[criterion,:].values
plt.figure(figsize=(6, 6))
ax1 = plt.subplot(111)
plt.scatter(x, y)#, c='blue', alpha=0.5)
plt.xlabel('max. checkpoint')
plt.ylabel("posttest score")
plt.title("Posttest score against max. checkpoint, n=" + str(len(x)))
plt.show()
sns.regplot(x=x, y=y, color="b", x_estimator=np.mean)
plt.xlabel("max. checkpoint")
plt.ylabel("posttest score")
plt.title("Posttest score against max. checkpoint, n=" + str(len(x)))
#import patsy
import statsmodels.formula.api as smf
dataT = data.T.astype(float)
### STATSMODELS ###
scoreCheckpointformula = criterion + ' ~ maxChapter'
# create a fitted model
lm1 = smf.ols(formula=scoreCheckpointformula, data=dataT).fit()
# print the coefficients
#lm1.params
#lm1.summary()
# print the confidence intervals for the model coefficients
lm1.conf_int()
# print the p-values for the model coefficients
# Represents the probability that the coefficient is actually zero
lm1.pvalues
# print the R-squared value for the model
lm1.rsquared
from scipy import optimize
#x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ,11, 12, 13, 14, 15], dtype=float)
#y = np.array([5, 7, 9, 11, 13, 15, 28.92, 42.81, 56.7, 70.59, 84.47, 98.36, 112.25, 126.14, 140.03])
def piecewise_linear(x, x0, y0, k1, k2):
return np.piecewise(x, [x < x0], [lambda x:k1*x + y0-k1*x0, lambda x:k2*x + y0-k2*x0])
p , e = optimize.curve_fit(piecewise_linear, x, y)
xd = np.linspace(0, 14, 100)
plt.plot(x, y, "o")
plt.plot(xd, piecewise_linear(xd, *p))
plt.xlabel("max. checkpoint")
plt.ylabel("posttest score")
# piecewise regression
plt.title("Posttest score against max. checkpoint, segmented regression")
#players = rmdf.loc[:, playerFilteringColumns]
players = safeGetNormalizedRedMetricsCSV( rmdf )
players.shape
#players = players.dropna(how='any')
#players.head(1)
#rmdf.head(1)
players.shape[0]
#players = players[~players['userId'].isin(excludedIDs)];
#players.shape[0]
sessionscount = players["sessionId"].nunique()
sessionscount
uniqueplayers = players['userId']
uniqueplayers = uniqueplayers.unique()
uniqueplayers.shape[0]
#uniqueplayers
uniqueplatforms = players['customData.platform'].unique()
uniqueplatforms
checkpoints = rmdf.loc[:, ['type', 'section', 'sessionId']]
checkpoints = checkpoints[checkpoints['type']=='reach'].loc[:,['section','sessionId']]
checkpoints = checkpoints[checkpoints['section'].str.startswith('tutorial', na=False)]
checkpoints = checkpoints.groupby("sessionId")
checkpoints = checkpoints.max()
#len(checkpoints)
checkpoints.head()
maxCheckpointTable = pd.DataFrame({"maxCheckpoint" : checkpoints.values.flatten()})
maxCheckpointCounts = maxCheckpointTable["maxCheckpoint"].value_counts()
maxCheckpointCounts['Start'] = None
maxCheckpointCounts = maxCheckpointCounts.sort_index()
print('\nmaxCheckpointCounts=\n{0}'.format(str(maxCheckpointCounts)))
maxCheckpointCountsTable = pd.DataFrame({"maxCheckpoint" : maxCheckpointCounts.values})
maxCheckpointCountsTableCount = maxCheckpointCountsTable.sum(0)[0]
maxCheckpointCountsTableCount
checkpoints.count()
maxCheckpointCountsTable.head()
maxCheckpointCountsTable.describe()
genericTreatment( maxCheckpointCountsTable, "best checkpoint reached", "game sessions", 0, maxCheckpointCountsTableCount, False, True )
#starts = rmdf.loc[:, checkpointsRelevantColumns]
#starts = checkpoints[checkpoints['type']=='start'].loc[:,['playerId']]
#starts = checkpoints[checkpoints['section'].str.startswith('tutorial', na=False)]
#starts = checkpoints.groupby("playerId")
#starts = checkpoints.max()
#starts.head()
startTutorial1Count = sessionscount
neverReachedGameSessionCount = startTutorial1Count - maxCheckpointCountsTableCount
fullMaxCheckpointCounts = maxCheckpointCounts
fullMaxCheckpointCounts['Start'] = neverReachedGameSessionCount
fullMaxCheckpointCountsTable = pd.DataFrame({"fullMaxCheckpoint" : fullMaxCheckpointCounts.values})
genericTreatment( fullMaxCheckpointCountsTable, "best checkpoint reached", "game sessions", 0, startTutorial1Count, False, True )
print('\nfullMaxCheckpointCountsTable=\n{0}'.format(fullMaxCheckpointCountsTable))
fullMaxCheckpointCountsTable.describe()
durations = players.groupby("sessionId").agg({ "serverTime": [ np.min, np.max ] })
durations["duration"] = pd.to_datetime(durations["serverTime"]["amax"]) - pd.to_datetime(durations["serverTime"]["amin"])
durations["duration"] = durations["duration"].map(lambda x: np.timedelta64(x, 's'))
durations = durations.sort_values(by=['duration'], ascending=[False])
durations.head()
type(durations)
#durations.loc[:,'duration']
#durations = durations[4:]
durations["duration_seconds"] = durations["duration"].map(lambda x: pd.Timedelta(x).seconds)
maxDuration = np.max(durations["duration_seconds"])
durations["duration_rank"] = durations["duration_seconds"].rank(ascending=False)
ax = durations.plot(x="duration_rank", y="duration_seconds")
plt.xlabel("game session")
plt.ylabel("time played (s)")
#plt.legend('')
ax.legend_.remove()
plt.xlim(0, sessionscount)
plt.ylim(0, maxDuration)
durations["duration_seconds"].describe()
#durations.head()
getCompletedRate(rmdfPlaytestTotalPretestPosttestUniqueProfilesVolunteers),\
getCompletedRate(rmdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers),\
getCompletedRate(rmdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers),\
rmdfPlaytestPhase1PretestPosttestUniqueProfilesVolunteers[QUserId].nunique(),\
rmdfPlaytestPhase2PretestPosttestUniqueProfilesVolunteers[QUserId].nunique()
getCompletedRate(rmdfWebgl1522Timed),\
getCompletedRate(rmdfWebgl160Timed)
### Scores
scoresPhase1 = allDataPlaytestPhase1PretestPosttestUniqueProfiles.loc['scoreposttest',:]
scoresPhase2 = allDataPlaytestPhase2PretestPosttestUniqueProfiles.loc['scoreposttest',:]
ttest = ttest_ind(scoresPhase1, scoresPhase2)
ttest
print("t test: statistic=" + repr(ttest.statistic) + " pvalue=" + repr(ttest.pvalue))
scoresPhase1 = allDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.loc['scoreposttest',:]
scoresPhase2 = allDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers.loc['scoreposttest',:]
ttest = ttest_ind(scoresPhase1, scoresPhase2)
ttest
print("t test: statistic=" + repr(ttest.statistic) + " pvalue=" + repr(ttest.pvalue))
nbs = ["{0:0=2d}".format(i) for i in range(0,15)]
completions = ['ch' + nb + 'completion' for nb in nbs]
totals = ['ch' + nb + 'total' for nb in nbs]
timeLabels = ['totalTime', 'completionTime'] + completions + totals
for timeLabel in timeLabels:
timesPhase1 = allDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.loc[timeLabel,:]
timesPhase2 = allDataPlaytestPhase2PretestPosttestUniqueProfilesVolunteers.loc[timeLabel,:]
ttest = ttest_ind(timesPhase1, timesPhase2)
ttest
print(timeLabel + " t test: statistic=" + repr(ttest.statistic) + " pvalue=" + repr(ttest.pvalue))
allDataPlaytestPhase1PretestPosttestUniqueProfilesVolunteers.index.tolist()
getRecordPlayer(rmdf1522, gform)
getRecordPlayer(rmdf160, gform)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: View data schema and size
Step2: Preview data (alternate way)
Step4: Load sample into Pandas dataframe
Step5: Explore datafame
Step6: The Python variable trips is now a Pandas DataFrame. The .head() function above prints the first 5 rows of a DataFrame.
Step7: Distribution analysis
Step8: It appears that trips are being charged substantial fares despite having 0 distance.
Step9: It appears that we have a lot of invalid data that is being coded as zero distance and some fare amounts that are definitely illegitimate. Let's remove them from our analysis. We can do this by modifying the BigQuery query to keep only trips longer than zero miles and fare amounts that are at least the minimum cab fare ($2.50).
Step10: Looking at the samples above, we can see that the total amount reflects fare amount, toll and tip somewhat arbitrarily -- this is because when customers pay cash, the tip is not known. In any case tips are discretionary and shoud not be included in our fare estimation tool.
|
<ASSISTANT_TASK:>
Python Code:
from google.cloud import bigquery
PROJECT = !gcloud config get-value project
PROJECT = PROJECT[0]
%env PROJECT=$PROJECT
%%bigquery --project $PROJECT
#standardSQL
SELECT
*
FROM
`nyc-tlc.yellow.trips`
WHERE
RAND() < .0000001 -- sample a small fraction of the data
%%bigquery --project $PROJECT
#standardSQL
SELECT
*
FROM
`nyc-tlc.yellow.trips`
WHERE
-- repeatable 1/5000th sample
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 5000)) = 1
bq = bigquery.Client(project=PROJECT)
query_string =
#standardSQL
SELECT
*
FROM
`nyc-tlc.yellow.trips`
Where
-- repeatable 1/5000th sample
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 5000)) = 1
trips = bq.query(query_string).to_dataframe()
print(type(trips))
trips.head()
trips.describe()
# first 10 rows with trip_distance == 0
trips[trips["trip_distance"] == 0][:10]
%matplotlib inline
trips.plot(x="trip_distance", y="fare_amount", kind="scatter")
# first 10 rows with toll_amount > 0
trips[trips["tolls_amount"] > 0][:10]
%%bigquery --project $PROJECT
#standardSQL
SELECT
(tolls_amount + fare_amount) AS fare_amount, -- label
pickup_datetime,
pickup_longitude,
pickup_latitude,
dropoff_longitude,
dropoff_latitude
FROM
`nyc-tlc.yellow.trips`
WHERE
-- Clean Data
trip_distance > 0
AND passenger_count > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
-- repeatable 1/5000th sample
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 5000)) = 1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Guia inicial de TensorFlow 2.0 para principiantes
Step2: Carga y prepara el conjunto de datos MNIST. Convierte los ejemplos de numeros enteros a numeros de punto flotante
Step3: Construye un modelo tf.keras.Sequential apilando capas. Escoge un optimizador y una funcion de perdida para el entrenamiento de tu modelo
Step4: Entrena y evalua el modelo
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Installa TensorFlow
import tensorflow as tf
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5)
model.evaluate(x_test, y_test, verbose=2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Load a shapefile that represents the river network
Step2: Alright, let's see what fields we read in with this shapefile
Step3: Great! Looks like we have length (reach length), upstream drainage area (drainage area), x and y verticies of each link/reach (x and y of polyline), and bed elevation (topographic elevation).
Step4: Our network consists of 29 links between 30 nodes. In the plot above, X and Y represent the plan-view coordinates of the node locations.
Step5: We must distinguish between topographic elevation (the top surface of the bed sediment) and bedrock elevation (the surface of the river in the absence of modeled sediment).
Step6: In order to track sediment motion, we classify parcels as either active (representing mobile surface sediment) or inactive (immobile subsurface) during each timestep. The active parcels are the most recent parcels to arrive in the link. During a timestep, active parcels are transported downstream (increasing their location_in_link, which is a normalized value ranging from 0 to 1) according to a sediment transport formula.
Step7: In addition to the required parcel attributes listed above, you can designate optional parcel characteristics, depending on your needs. For example
Step8: We now collect the arrays into a dictionary of variables, some of which will be tracked through time (["item_id", "time"]), and others of which will remain constant through time
Step9: With all of the required attributes collected, we can create the parcels DataRecord. Often, parcels will eventually transport off of the downstream-most link. To track these parcels, we have designated a "dummy_element" here, which has index value -2.
Step10: 3. Run the NetworkSedimentTransporter
Step11: Before running the NST, we need to determine flow direction on the grid (upstream and downstream for each link). To do so, we initalize and run a Landlab flow director component
Step12: Then, we initialize the network sediment transporter
Step13: Now we are ready to run the model forward in time
Step14: 4. Plot the model results
Step15: Non-network plotting
Step16: We can also plot individual parcel characteristics. The plot below shows the total transport distance of each parcel through the whole model run as a function of the parcel's grain size (during the final timestep).
Step17: The plot below is an example of accessing variables associated with the grid (grid.at_link.X, or grid.at_node.X), as well as a variable associated with this instance of NetworkModelGrid (nmg.X)
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings("ignore")
import os
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
from landlab.components import FlowDirectorSteepest, NetworkSedimentTransporter
from landlab.data_record import DataRecord
from landlab.grid.network import NetworkModelGrid
from landlab.plot import graph
from landlab.io import read_shapefile
from landlab import ExampleData
from landlab.plot import plot_network_and_parcels
%matplotlib inline
datadir = ExampleData("io/shapefile", case="methow").base
shp_file = datadir / "MethowSubBasin.shp"
points_shapefile = datadir / "MethowSubBasin_Nodes_4.shp"
grid = read_shapefile(
shp_file,
points_shapefile=points_shapefile,
node_fields=["usarea_km2", "Elev_m"],
link_fields=["usarea_km2", "Length_m"],
link_field_conversion={
"usarea_km2": "drainage_area",
"Slope": "channel_slope",
"Length_m": "reach_length",
},
node_field_conversion={
"usarea_km2": "drainage_area",
"Elev_m": "topographic__elevation",
},
threshold=0.01,
)
grid.at_link.keys()
grid.at_node.keys()
graph.plot_graph(grid, at="node,link")
grid.number_of_links
grid.number_of_nodes
grid.at_node["bedrock__elevation"] = grid.at_node["topographic__elevation"].copy()
grid.at_link["channel_width"] = 1 * np.ones(grid.number_of_links) # m
grid.at_link["flow_depth"] = 0.5 * np.ones(grid.number_of_links) # m
# element_id is the link on which the parcel begins.
element_id = np.repeat(np.arange(grid.number_of_links), 50)
element_id = np.expand_dims(element_id, axis=1)
volume = 1 * np.ones(np.shape(element_id)) # (m3)
active_layer = np.ones(np.shape(element_id)) # 1= active, 0 = inactive
density = 2650 * np.ones(np.size(element_id)) # (kg/m3)
abrasion_rate = 0 * np.ones(np.size(element_id)) # (mass loss /m)
# Lognormal GSD
medianD = 0.15 # m
mu = np.log(medianD)
sigma = np.log(2) # assume that D84 = sigma*D50
np.random.seed(0)
D = np.random.lognormal(
mu, sigma, np.shape(element_id)
) # (m) the diameter of grains in each parcel
time_arrival_in_link = np.random.rand(np.size(element_id), 1)
location_in_link = np.random.rand(np.size(element_id), 1)
lithology = ["quartzite"] * np.size(element_id)
variables = {
"abrasion_rate": (["item_id"], abrasion_rate),
"density": (["item_id"], density),
"lithology": (["item_id"], lithology),
"time_arrival_in_link": (["item_id", "time"], time_arrival_in_link),
"active_layer": (["item_id", "time"], active_layer),
"location_in_link": (["item_id", "time"], location_in_link),
"D": (["item_id", "time"], D),
"volume": (["item_id", "time"], volume),
}
items = {"grid_element": "link", "element_id": element_id}
parcels = DataRecord(
grid,
items=items,
time=[0.0],
data_vars=variables,
dummy_elements={"link": [NetworkSedimentTransporter.OUT_OF_NETWORK]},
)
timesteps = 10 # total number of timesteps
dt = 60 * 60 * 24 * 2 # length of timestep (seconds)
fd = FlowDirectorSteepest(grid, "topographic__elevation")
fd.run_one_step()
nst = NetworkSedimentTransporter(
grid,
parcels,
fd,
bed_porosity=0.3,
g=9.81,
fluid_density=1000,
transport_method="WilcockCrowe",
)
for t in range(0, (timesteps * dt), dt):
nst.run_one_step(dt)
print("Model time: ", t / (60 * 60 * 24), "days passed")
timestep_of_interest = 6
originating_link = 27
# filter the parcels to calculate total volumes of only the parcels that originated in the chosen link
parcelfilter = np.zeros_like(parcels.dataset.element_id, dtype=bool)
parcelfilter[:, timestep_of_interest] = (
parcels.dataset.element_id[:, 0] == originating_link
)
vol_orig_link = parcels.calc_aggregate_value(
xr.Dataset.sum, "volume", at="link", filter_array=parcelfilter, fill_value=0.0
)
fig = plot_network_and_parcels(
grid,
parcels,
link_attribute=vol_orig_link,
link_attribute_title="Vol of sed originating on link x",
network_linewidth=5,
parcel_alpha=0,
)
parcel_vol_on_grid = parcels.dataset["volume"].values
parcel_vol_on_grid[parcels.dataset["element_id"].values == -2] = 0
# plt.figure(figsize=(8,6))
plt.plot(
np.asarray(parcels.time_coordinates) / (60 * 60 * 24),
np.sum(parcel_vol_on_grid, axis=0),
"-",
linewidth=3,
alpha=0.5,
)
plt.ylabel("Total volume of parcels on grid $[m^3]$")
plt.xlabel("Time [days]")
plt.show()
plt.loglog(parcels.dataset.D[:, -1], nst._distance_traveled_cumulative, ".")
plt.xlabel("Parcel grain size (m)")
plt.ylabel("Cumulative parcel travel distance (m)")
# Note: some of the smallest grain travel distances can exceed the length of the
# grid by "overshooting" during a single timestep of high transport rate
plt.plot(grid.at_link["channel_slope"], nst.d_mean_active, ".")
plt.xlabel("Channel slope (m/m)")
plt.ylabel("Mean grain size of active layer (m)")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generating Synthetic Data
Step2: Next, simliar to the sparse linear inverse demo, we create Bernoull-Gaussian data.
Step3: Now, we create a random transform $A$ and output $z_1 = Az_0 + w$.
Step4: Representing the GLM as a Multi-Layer Network
Step5: We next create a list of message handlers, one for each of the two unknown variables, z0 and z1. Again, we will just use the simple message handlers.
Step6: Running the Multi-Layer VAMP Solver
Step7: We now run the solver by calling the solve() method. For a small problem like this, this should be close to instantaneous.
Step8: The VAMP solver estimate is the field zhat. Since there are two variables, zhat is a list. We extract the estimate zhat0=zhat[0] for z0. Then, we plot the first column (icol=0) of the true data and estimate.
Step10: Since the probit measurement model is invariant to the scaling of vector z0, we measure errors via a debiased normalized MSE computed with the following function.
Step11: We can then measure the debiased normalized MSE of the estimate.
Step12: Finally, we can plot the actual and predicted debiased MSE as a function of the iteration number. When solver was contructed, we passed an argument hist_list=['zhat', 'zhatvar']. This indicated to store the value of the estimate zhat and predicted error variance zhatvar with each iteration. We can recover these values from solver.hist_dict, the history dictionary. We then plot the predicted and actual MSE for each of the two variables. We see that the two match well. Also, the iterations are indexed as "half-iterations" since each iteration takes two passes.
|
<ASSISTANT_TASK:>
Python Code:
# Add the vampyre path to the system path
import os
import sys
vp_path = os.path.abspath('../../')
if not vp_path in sys.path:
sys.path.append(vp_path)
import vampyre as vp
# Load the other packages
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
# Parameters
nz0 = 512 # number of components per column of z0
nz1 = 4096 # number of components per column of z1
ncol = 10 # number of columns
# Compute the shapes
zshape0 = (nz0,ncol) # Shape of z0 matrix
zshape1 = (nz1,ncol) # Shape of z1 matrix
Ashape = (nz1,nz0) # Shape of A matrix
# Paramaeters
sparse_rat = 0.1 # sparsity ratio
zmean_act = 0 # mean for the active components
zvar_act = 1 # variance for the active components
snr = 30 # SNR in dB
# Generate the random input
z0 = np.random.normal(zmean_act, np.sqrt(zvar_act), zshape0)
u = np.random.uniform(0, 1, zshape0) < sparse_rat
z0 = z0*u
# Random transform
b = np.zeros(zshape1)
A = np.random.normal(0, 1/np.sqrt(nz0), Ashape)
Az0 = A.dot(z0)
# Add noise
wvar = np.mean(np.abs(Az0)**2)*np.power(10, -0.1*snr)
z1 = Az0 + np.random.normal(0,np.sqrt(wvar), zshape1)
# Quantize
thresh = 0
y = (z1 > thresh)
# Create estimator for the input prior
map_est = False
est0_gauss = vp.estim.GaussEst(zmean_act,zvar_act,zshape0,map_est=map_est)
est0_dis = vp.estim.DiscreteEst(0,1,zshape0)
est_in = vp.estim.MixEst([est0_gauss,est0_dis],[sparse_rat,1-sparse_rat])
# Estimator for the output
est_out = vp.estim.HardThreshEst(y,zshape1,thresh=thresh)
# Estimtor for the linear transform
Aop = vp.trans.MatrixLT(A,zshape0)
est_lin = vp.estim.LinEstimTwo(Aop,b,wvar)
# Put all estimators in a list
est_list = [est_in,est_lin,est_out]
# Create the message handlers
damp=1
msg_hdl0 = vp.estim.MsgHdlSimp(map_est=map_est, shape=zshape0,damp=damp)
msg_hdl1 = vp.estim.MsgHdlSimp(map_est=map_est, shape=zshape1,damp=damp)
# Put the handlers in a list
msg_hdl_list = [msg_hdl0,msg_hdl1]
nit = 10 # number of iterations
solver = solver = vp.solver.MLVamp(est_list,msg_hdl_list,comp_cost=True,\
hist_list=['zhat','zhatvar'],nit=nit)
solver.solve()
zhat = solver.zhat
zhat0 = zhat[0]
icol = 0 # column to plot
t = np.array(range(nz0))
plt.plot(t,z0[:,icol])
plt.plot(t,zhat0[:,icol])
plt.axis([0,nz0,-3,3])
def debias_mse(zhat,ztrue):
If zhat and ztrue are 1D vectors, the function computes the *debiased normalized MSE* defined as:
dmse_lin = min_c ||ztrue-c*zhat||^2/||ztrue||^2 = (1-|zhat'*ztrue|^2/||ztrue||^2||zhat||^2)
The function returns the value in dB: dmse = 10*log10(dmse_lin)
If zhat and ztrue are matrices, dmse_lin is computed for each column and then averaged over the columns
zcorr = np.abs(np.sum(zhat.conj()*ztrue,axis=0))**2
zhatpow = np.sum(np.abs(zhat)**2,axis=0)
zpow = np.sum(np.abs(ztrue)**2,axis=0)
tol = 1e-8
if np.any(zhatpow < tol) or np.any(zpow < tol):
dmse = 0
else:
dmse = 10*np.log10(np.mean(1 - zcorr/zhatpow/zpow))
return dmse
ztrue = [z0,z1]
zhat = solver.zhat
nvar = len(ztrue)
dmse = np.zeros(nvar)
for i in range(nvar):
zhati = zhat[i]
ztruei = ztrue[i]
dmse[i] = debias_mse(zhati,ztruei)
print("z{0:d} d-MSE: {1:7.2f}".format(i, dmse[i]))
# Compute the MSE as a function of the iteration
zhat_hist = solver.hist_dict['zhat']
zvar_hist = solver.hist_dict['zhatvar']
nit = len(zhat_hist)
mse_act = np.zeros((nit,nvar))
mse_pred = np.zeros((nit,nvar))
for ivar in range(nvar):
zpowi = np.mean(np.abs(ztrue[ivar])**2, axis=0)
for it in range(nit):
zhati = zhat_hist[it][ivar]
zhatvari = zvar_hist[it][ivar]
mse_act[it,ivar] = debias_mse(zhati,ztrue[ivar])
mse_pred[it,ivar] = 10*np.log10(np.mean(zhatvari/zpowi))
for ivar in range(nvar):
plt.subplot(1,nvar,ivar+1)
plt.plot(range(nit), mse_act[:,ivar], 'o-', linewidth=2)
plt.plot(range(nit), mse_pred[:,ivar], 's', linewidth=1)
plt.xlabel('Half iteration')
if (ivar == 0):
plt.ylabel('Normalized MSE (dB)')
plt.legend(['Actual', 'Predicted'])
plt.title("z{0:d}".format(ivar))
plt.grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prerequisites
Step2: Visualizing the MRI
Step3: Getting our MRI voxel EEG locations to head (and MRI surface RAS) coords
Step4: We can then get our transformation from the MRI coordinate frame (where our
Step5: Let's apply this digitization to our dataset, and in the process
Step6: Now we can do standard sensor-space operations like make joint plots of
Step7: Getting a source estimate
Step8: Now we can actually compute the forward
Step9: Finally let's compute the inverse and apply it
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD Style.
import os.path as op
import nibabel
from nilearn.plotting import plot_glass_brain
import numpy as np
import mne
from mne.channels import compute_native_head_t, read_custom_montage
from mne.viz import plot_alignment
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
bem_dir = op.join(subjects_dir, 'sample', 'bem')
fname_bem = op.join(bem_dir, 'sample-5120-5120-5120-bem-sol.fif')
fname_src = op.join(bem_dir, 'sample-oct-6-src.fif')
misc_path = mne.datasets.misc.data_path()
fname_T1_electrodes = op.join(misc_path, 'sample_eeg_mri', 'T1_electrodes.mgz')
fname_mon = op.join(misc_path, 'sample_eeg_mri', 'sample_mri_montage.elc')
img = nibabel.load(fname_T1_electrodes) # original subject MRI w/EEG
ras_mni_t = mne.transforms.read_ras_mni_t('sample', subjects_dir) # from FS
mni_affine = np.dot(ras_mni_t['trans'], img.affine) # vox->ras->MNI
img_mni = nibabel.Nifti1Image(img.dataobj, mni_affine) # now in MNI coords!
plot_glass_brain(img_mni, cmap='hot_black_bone', threshold=0., black_bg=True,
resampling_interpolation='nearest', colorbar=True)
dig_montage = read_custom_montage(fname_mon, head_size=None, coord_frame='mri')
dig_montage.plot()
trans = compute_native_head_t(dig_montage)
print(trans) # should be mri->head, as the "native" space here is MRI
raw = mne.io.read_raw_fif(fname_raw)
raw.pick_types(meg=False, eeg=True, stim=True, exclude=()).load_data()
raw.set_montage(dig_montage)
raw.plot_sensors(show_names=True)
raw.set_eeg_reference(projection=True)
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events)
cov = mne.compute_covariance(epochs, tmax=0.)
evoked = epochs['1'].average() # trigger 1 in auditory/left
evoked.plot_joint()
fig = plot_alignment(
evoked.info, trans=trans, show_axes=True, surfaces='head-dense',
subject='sample', subjects_dir=subjects_dir)
fwd = mne.make_forward_solution(
evoked.info, trans=trans, src=fname_src, bem=fname_bem, verbose=True)
inv = mne.minimum_norm.make_inverse_operator(
evoked.info, fwd, cov, verbose=True)
stc = mne.minimum_norm.apply_inverse(evoked, inv)
brain = stc.plot(subjects_dir=subjects_dir, initial_time=0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Process MEG data
Step2: Compute the evoked response
Step3: Inverse modeling
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import mne
from mne.datasets import sample
from mne.minimum_norm import (make_inverse_operator, apply_inverse,
write_inverse_operator)
mne.set_log_level('WARNING')
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.Raw(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_r=1) # event trigger and conditions
tmin = -0.2 # start of each epoch (200ms before the trigger)
tmax = 0.5 # end of each epoch (500ms after the trigger)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
exclude='bads')
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=baseline, reject=reject)
# compute regularized noise covariance
noise_cov = mne.compute_covariance(
epochs, tmax=0., method=['shrunk', 'empirical'])
fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info)
evoked = epochs.average()
evoked.plot()
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag')
# Read the forward solution and compute the inverse operator
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Restrict forward solution as necessary for MEG
fwd = mne.pick_types_forward(fwd, meg=True, eeg=False)
# make an M/EEG, MEG-only, and EEG-only inverse operators
info = evoked.info
inverse_operator = make_inverse_operator(info, fwd, noise_cov,
loose=0.2, depth=0.8)
write_inverse_operator('sample_audvis-meg-oct-6-inv.fif',
inverse_operator)
# Compute inverse solution
method = "dSPM"
snr = 3.
lambda2 = 1. / snr ** 2
stc = apply_inverse(evoked, inverse_operator, lambda2,
method=method, pick_ori=None)
# visualize
subjects_dir = data_path + '/subjects'
brain = stc.plot(surface='inflated', hemi='rh', subjects_dir=subjects_dir)
brain.set_data_time_index(45)
brain.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True)
brain.show_view('lateral')
# morph data to average brain
stc_fsaverage = stc.morph(subject_to='fsaverage', subjects_dir=subjects_dir)
brain_fsaverage = stc_fsaverage.plot(surface='inflated', hemi='rh',
subjects_dir=subjects_dir)
brain_fsaverage.set_data_time_index(45)
brain_fsaverage.scale_data_colormap(fmin=8, fmid=12, fmax=15, transparent=True)
brain_fsaverage.show_view('lateral')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Clickable Surface
Step2: Design our own texture
Step3: Lines
Step4: Camera
Step6: Parametric Functions
|
<ASSISTANT_TASK:>
Python Code:
ball = Mesh(geometry=SphereGeometry(radius=1), material=LambertMaterial(color='red'), position=[2,1,0])
scene = Scene(children=[ball, AmbientLight(color=0x777777), make_text('Hello World!', height=.6)])
c = PerspectiveCamera(position=[0,5,5], up=[0,0,1], children=[DirectionalLight(color='white',
position=[3,5,1],
intensity=0.5)])
renderer = Renderer(camera=c, scene = scene, controls=[OrbitControls(controlling=c)])
display(renderer)
ball.geometry.radius=0.5
import time, math
ball.material.color = 0x4400dd
for i in range(1,150,2):
ball.geometry.radius=i/100.
ball.material.color +=0x000300
ball.position = [math.cos(i/10.), math.sin(i/50.), i/100.]
time.sleep(.05)
nx,ny=(20,20)
xmax=1
x = np.linspace(-xmax,xmax,nx)
y = np.linspace(-xmax,xmax,ny)
xx, yy = np.meshgrid(x,y)
z = xx**2-yy**2
#z[6,1] = float('nan')
surf_g = SurfaceGeometry(z=list(z[::-1].flat),
width=2*xmax,
height=2*xmax,
width_segments=nx-1,
height_segments=ny-1)
surf = Mesh(geometry=surf_g, material=LambertMaterial(map=height_texture(z[::-1], 'YlGnBu_r')))
surfgrid = SurfaceGrid(geometry=surf_g, material=LineBasicMaterial(color='black'))
hover_point = Mesh(geometry=SphereGeometry(radius=0.05), material=LambertMaterial(color='hotpink'))
scene = Scene(children=[surf, surfgrid, hover_point, AmbientLight(color=0x777777)])
c = PerspectiveCamera(position=[0,3,3], up=[0,0,1],
children=[DirectionalLight(color='white', position=[3,5,1], intensity=0.6)])
click_picker = Picker(root=surf, event='dblclick')
hover_picker = Picker(root=surf, event='mousemove')
renderer = Renderer(camera=c, scene = scene, controls=[OrbitControls(controlling=c), click_picker, hover_picker])
def f(name, value):
print("Clicked on %s"%value)
point = Mesh(geometry=SphereGeometry(radius=0.05),
material=LambertMaterial(color='red'),
position=value)
scene.children = list(scene.children)+[point]
click_picker.on_trait_change(f, 'point')
link((hover_point, 'position'), (hover_picker, 'point'))
h = HTML()
def g(name, value):
h.value="Green point at (%.3f, %.3f, %.3f)"%tuple(value)
g(None, hover_point.position)
hover_picker.on_trait_change(g, 'point')
display(h)
display(renderer)
# when we change the z values of the geometry, we need to also change the height map
surf_g.z = list((-z[::-1]).flat)
surf.material.map = height_texture(-z[::-1])
import numpy as np
from scipy import ndimage
import matplotlib
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
jet = matplotlib.cm.get_cmap('jet')
np.random.seed(int(1)) # start random number generator
n = int(5) # starting points
size = int(32) # size of image
im = np.zeros((size,size)) # create zero image
points = size*np.random.random((2, n**2)) # locations of seed values
im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = size # seed high values
im = ndimage.gaussian_filter(im, sigma=size/(float(4)*n)) # smooth high values into surrounding areas
im *= 1/np.max(im)# rescale to be in the range [0,1]
rgba_im = img_as_ubyte(jet(im)) # convert the values to rgba image using the jet colormap
rgba_list = list(rgba_im.flat) # make a flat list
t = DataTexture(data=rgba_list, format='RGBAFormat', width=size, height=size)
geometry = SphereGeometry()#TorusKnotGeometry(radius=2, radialSegments=200)
material = LambertMaterial(map=t)
myobject = Mesh(geometry=geometry, material=material)
c = PerspectiveCamera(position=[0,3,3], fov=40, children=[DirectionalLight(color=0xffffff, position=[3,5,1], intensity=0.5)])
scene = Scene(children=[myobject, AmbientLight(color=0x777777)])
renderer = Renderer(camera=c, scene = scene, controls=[OrbitControls(controlling=c)])
display(renderer)
# On windows, linewidth of the material has no effect
size = 4
linesgeom = PlainGeometry(vertices=[[0,0,0],[size,0,0],[0,0,0],[0,size,0],[0,0,0],[0,0,size]],
colors = ['red', 'red', 'green', 'green', 'white', 'orange'])
lines = Line(geometry=linesgeom,
material=LineBasicMaterial( linewidth=5, vertexColors='VertexColors'),
type='LinePieces')
scene = Scene(children=[lines, DirectionalLight(color=0xccaabb, position=[0,10,0]),AmbientLight(color=0xcccccc)])
c = PerspectiveCamera(position=[0,10,10])
renderer = Renderer(camera=c, scene = scene, controls=[OrbitControls(controlling=c)])
display(renderer)
geometry = SphereGeometry(radius=4)
t = ImageTexture(imageuri="")
material = LambertMaterial(color='white', map=t)
sphere = Mesh(geometry=geometry, material=material)
point = Mesh(geometry=SphereGeometry(radius=.1),
material=LambertMaterial(color='red'))
c = PerspectiveCamera(position=[0,10,10], fov=40, children=[DirectionalLight(color='white',
position=[3,5,1],
intensity=0.5)])
scene = Scene(children=[sphere, point, AmbientLight(color=0x777777)])
p=Picker(event='mousemove', root=sphere)
renderer = Renderer(camera=c, scene = scene, controls=[OrbitControls(controlling=c), p])
coords = Text()
display(coords)
display(renderer)
#dlink((p,'point'), (point, 'position'), (coords, 'value'))
#
#camera=WebCamera()
#display(camera)
#display(Link(widgets=[[camera, 'imageurl'], [t, 'imageuri']]))
f =
function f(origu,origv) {
// scale u and v to the ranges I want: [0, 2*pi]
var u = 2*Math.PI*origu;
var v = 2*Math.PI*origv;
var x = Math.sin(u);
var y = Math.cos(v);
var z = Math.cos(u+v);
return new THREE.Vector3(x,y,z)
}
surf_g = ParametricGeometry(func=f);
surf = Mesh(geometry=surf_g,material=LambertMaterial(color='green', side ='FrontSide'))
surf2 = Mesh(geometry=surf_g,material=LambertMaterial(color='yellow', side ='BackSide'))
scene = Scene(children=[surf, surf2, AmbientLight(color=0x777777)])
c = PerspectiveCamera(position=[5,5,3], up=[0,0,1],children=[DirectionalLight(color='white', position=[3,5,1], intensity=0.6)])
renderer = Renderer(camera=c,scene = scene,controls=[OrbitControls(controlling=c)])
display(renderer)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DataFrames
Step2: Exercise 1
Step3: Print value counts for <tt>prglngth</tt> and compare to results published in the codebook
Step4: Compute the mean birthweight.
Step5: Create a new column named <tt>totalwgt_kg</tt> that contains birth weight in kilograms. Compute its mean. Remember that when you create a new column, you have to use dictionary syntax, not dot notation.
Step6: One important note
Step7: Use a boolean Series to select the records for the pregnancies that ended in live birth.
Step8: Count the number of live births with <tt>birthwgt_lb</tt> between 0 and 5 pounds (including both). The result should be 1125.
Step9: Count the number of live births with <tt>birthwgt_lb</tt> between 9 and 95 pounds (including both). The result should be 798
Step10: Use <tt>birthord</tt> to select the records for first babies and others. How many are there of each?
Step11: Compute the mean weight for first babies and others.
Step12: Compute the mean <tt>prglngth</tt> for first babies and others. Compute the difference in means, expressed in hours.
Step13: Exercise 2
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
import pandas as pd
%matplotlib inline
import nsfg
df = nsfg.ReadFemPreg()
df.head()
pregordr = df['pregordr']
pregordr[2:5]
birthord_counts = df.birthord.value_counts().sort_index()
birthord_counts
birthord_counts.plot(kind='bar')
df['prglngth_cut'] = pd.cut(df.prglngth,bins=[0,13,26,50])
df.prglngth_cut.value_counts().sort_index()
df.totalwgt_lb.mean()
df['totalwgt_kg'] = 0.45359237 * df.totalwgt_lb
df.totalwgt_kg.mean()
lve_birth = df.outcome == 1
lve_birth.tail()
live = df[df.outcome == 1]
len(live)
len(live[(0<=live.birthwgt_lb) & (live.birthwgt_lb<=5)])
len(live[(9<=live.birthwgt_lb) & (live.birthwgt_lb<95)])
firsts = df[df.birthord==1]
others = df[df.birthord>1]
len(firsts), len(others)
firsts.totalwgt_lb.mean(), others.totalwgt_lb.mean()
firsts.prglngth.mean(), others.prglngth.mean()
import thinkstats2
resp = thinkstats2.ReadStataDct('2002FemResp.dct').ReadFixedWidth('2002FemResp.dat.gz', compression='gzip')
preg = nsfg.ReadFemPreg()
preg_map = nsfg.MakePregMap(preg)
for index, pregnum in resp.pregnum.iteritems():
caseid = resp.caseid[index]
indices = preg_map[caseid]
# check that pregnum from the respondent file equals
# the number of records in the pregnancy file
if len(indices) != pregnum:
print(caseid, len(indices), pregnum)
break
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sensor de tacte
Step2: Sensor de llum
Step3: Sensor de so (micròfon)
Step4: Sensor ultrasònic
Step5: <img src="img/interrupt.png" align="right">
Step6: És el moment de fer nous programes amb els sensors, però abans cal desconnectar el robot d'esta pàgina.
|
<ASSISTANT_TASK:>
Python Code:
from functions import connect, touch, light, sound, ultrasonic, disconnect
connect(12)
touch() # Per a executar repetidament, useu Ctrl + Enter
light() # Per a executar repetidament, useu Ctrl + Enter
sound() # Per a executar repetidament, useu Ctrl + Enter
ultrasonic() # Per a executar repetidament, useu Ctrl + Enter
from functions import test_sensors
test_sensors()
disconnect()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading in data sets
Step2: Layers
Step3: Feature columns
|
<ASSISTANT_TASK:>
Python Code:
# import and check version
import tensorflow as tf
# tf can be really verbose
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
# a small sanity check, does tf seem to work ok?
hello = tf.constant('Hello TF!')
sess = tf.Session()
print(sess.run(hello))
sess.close()
x = tf.placeholder(tf.float32)
y = tf.placeholder(tf.float32)
z = x + y
r = tf.random_normal([10, 2])
dataset = tf.data.Dataset.from_tensor_slices(r)
iterator = dataset.make_initializable_iterator()
next_row = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer)
while True:
try:
data = sess.run(next_row)
print(data)
print(sess.run(z, feed_dict={x: data[0], y: data[1]}))
except tf.errors.OutOfRangeError:
break
x = tf.placeholder(tf.float32, shape=[None, 3])
y = tf.layers.dense(inputs=x, units=1)
with tf.Session() as sess:
try:
print(sess.run(y, {x: [[1, 2, 3], [4, 5, 6]]}))
except tf.errors.FailedPreconditionError as fpe:
print(fpe.message)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(y, {x: [[1, 2, 3], [4, 5, 6]]}))
y = tf.layers.dense(inputs=x, units=2, activation=tf.nn.tanh)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(y, {x: [[1, 2, 3], [4, 5, 6]]}))
features = {
'sales' : [[5], [10], [8], [9]],
'department': ['sports', 'sports', 'gardening', 'gardening']
}
# numeric values are simple
sales_column = tf.feature_column.numeric_column('sales')
columns = {
sales_column
}
inputs = tf.feature_column.input_layer(features, columns)
# categories are harders, as NNs only accept dense numeric values
categorical_department_column = tf.feature_column.categorical_column_with_vocabulary_list(
'department', ['sports', 'gardening'])
columns = {
sales_column,
categorical_department_column
}
# we can decide if we want the category to be encoded as embedding or multi-hot
try:
inputs = tf.feature_column.input_layer(features, columns)
except ValueError as ve:
print(ve)
multi_hot_department_column = tf.feature_column.indicator_column(categorical_department_column)
columns = {
sales_column,
multi_hot_department_column
}
inputs = tf.feature_column.input_layer(features, columns)
# feature columns also need initialization
var_init = tf.global_variables_initializer()
table_init = tf.tables_initializer()
with tf.Session() as sess:
sess.run((var_init, table_init))
# first two are departments last entry is just sales as is
print(sess.run(inputs))
# multi (one in our case) hot encoding of departments
columns = {
multi_hot_department_column
}
inputs = tf.feature_column.input_layer(features, columns)
var_init = tf.global_variables_initializer()
table_init = tf.tables_initializer()
with tf.Session() as sess:
sess.run((var_init, table_init))
print(sess.run(inputs))
# alternative, embedding in three dimensions
embedding_department_column = tf.feature_column.embedding_column(categorical_department_column, dimension=3)
columns = {
embedding_department_column
}
inputs = tf.feature_column.input_layer(features, columns)
var_init = tf.global_variables_initializer()
table_init = tf.tables_initializer()
with tf.Session() as sess:
sess.run((var_init, table_init))
print(sess.run(inputs))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If we want to use our shiny python scripts, we'll need to import them too.
Step2: If we want a quick man-page style summary, we can call help(spy). We can also call help(spy.<func>) for more information on the function <func>. Since the data that we're looking at is in a different directory, let's specify that now.
Step3: Reading Full Files
Step4: We have q (potential vorticity, this is a QG example). Let's read and plot our initial q field.
Step5: What did that just do? Let's break it down and look at the inputs.
Step6: Note that this is ordered as (Nx,Ny). If we had wanted MATLAB style ordering (Ny,Nx), we could have used the optional argument ordering = 'matlab' (useful for some plotting). The default is ordering = 'natural' (useful because it feels right).
Step7: Perfect, now we have our grid vectors. We can also load in grid information, such as the domain size and limits. This is stored in a class that has a method called display. These are illustrated below.
Step8: Plotting
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Tells the system to plot in-line, only necessary for iPython notebooks,
# not regular command-line python
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
import time
# Now that we have our packages, we need data. The file 'make_2d_data.py' will
# generate a sample data set. Let's run that now (may take a minute)
execfile('make_2d_data.py')
import spinspy as spy
import matpy as mp
help(spy.set_path)
print('=======')
print('=======')
print('=======')
help(spy)
spy.set_path('Data/2d')
ls Data/2d
q = spy.reader('q', 0, [0,-1], [0,-1])
q.shape
x,y = spy.get_grid()
print('The shape of x is {0} and the shape of y is {1}'.format(x.shape,y.shape))
data = spy.get_params()
print(data.Nx,data.Ny,data.Nz)
print('---')
data.display()
q = spy.reader('q', 10)
plt.figure(1)
t0 = time.clock()
plt.contour(x,y,q.T) # Transpose for plotting order
t1 = time.clock()
print('Time to plot: {0:1.2e}'.format(t1-t0))
plt.title('q');
plt.xlabel('x');
plt.ylabel('y');
plt.figure(2)
t0 = time.clock()
plt.pcolor(x,y,q.transpose()); # Regrettably, the default colormap is still jet...
t1 = time.clock()
print('Time to plot: {0:1.2e}'.format(t1-t0))
plt.colorbar()
plt.title('Height Field');
plt.xlabel('x');
plt.ylabel('y');
plt.figure(3)
t0 = time.clock()
plt.pcolormesh(x,y,q.T, cmap='darkjet') # So we made darkjet!
t1 = time.clock()
print('Time to plot: {0:1.2e}'.format(t1-t0))
plt.colorbar()
plt.title('Height Field')
plt.xlabel('x')
plt.ylabel('y')
plt.axis('tight')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Expected
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import cm, colors
import numpy as np
from ipywidgets import interact
plt.rcParams['figure.figsize'] = 16, 8
import sys
sys.path.append('../..')
from crocodile.simulate import *
from crocodile.synthesis import *
from util.visualize import *
N = 500; max_uv=25000
max_lm = 1/512/16; step_lm = 1/32768
bounds_lm = (-max_lm,max_lm,step_lm)
extent_lm = (-max_lm,max_lm,-max_lm,max_lm)
extent_uv = extent=(-max_uv,max_uv,-max_uv,max_uv)
def draw_uv(ax, s_split):
ax.set_xlabel('u [$\lambda$]'); ax.set_ylabel('v [$\lambda$]')
ax.set_title("Interference pattern")
return ax.imshow(s_split.real, norm=colors.Normalize(vmin=-1, vmax=1), extent=extent_uv, origin='lower')
def draw_sky(ax, ls, ms):
ax.set_title("Sky (around phase centre)")
ax.set_xlabel('l [$1$]'); ax.set_ylabel('m [$1$]')
ax.plot(ls, ms, 'ro')
ax.set_xlim([-max_lm, max_lm])
ax.set_ylim([-max_lm, max_lm])
def show_pattern_2(l1,m1, l2,m2):
# Generate uvw coordinates
v,u = max_uv*2*coordinates2(N)
w = np.full(u.shape, 0, dtype=float)
uvw = np.concatenate(np.transpose((u,v,w)))
# Simulate for a full uv plane
s = simulate_point(uvw, l1, m1)
s += simulate_point(uvw, l2, m2)
fig = plt.figure()
draw_sky(fig.add_subplot(121), [l1,l2], [m1,m2])
im = draw_uv(fig.add_subplot(122), np.array(np.split(s, N)))
fig.colorbar(im,shrink=.4,pad=0.025)
plt.show()
interact(show_pattern_2,l1=bounds_lm, m1=bounds_lm,l2=bounds_lm, m2=bounds_lm);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Model
Step3: Training
Step4: Evaluation
|
<ASSISTANT_TASK:>
Python Code:
BERT_MODEL = 'bert-base-uncased'
BATCH_SIZE = 16 if "base" in BERT_MODEL else 2
GRADIENT_ACCUMULATION_STEPS = 1 if "base" in BERT_MODEL else 8
MAX_SEQ_LENGTH = 100
PREFIX = "junkfood_but"
import sys
sys.path.append('../')
import ndjson
import glob
from quillnlp.models.bert.preprocessing import preprocess, create_label_vocabulary
train_file = f"../data/interim/{PREFIX}_train_withprompt_diverse200.ndjson"
synth_files = glob.glob(f"../data/interim/{PREFIX}_train_withprompt_*.ndjson")
dev_file = f"../data/interim/{PREFIX}_dev_withprompt.ndjson"
test_file = f"../data/interim/{PREFIX}_test_withprompt.ndjson"
with open(train_file) as i:
train_data = ndjson.load(i)
synth_data = []
for f in synth_files:
if "allsynth" in f:
continue
with open(f) as i:
synth_data += ndjson.load(i)
with open(dev_file) as i:
dev_data = ndjson.load(i)
with open(test_file) as i:
test_data = ndjson.load(i)
label2idx = create_label_vocabulary(train_data)
idx2label = {v:k for k,v in label2idx.items()}
target_names = [idx2label[s] for s in range(len(idx2label))]
train_dataloader = preprocess(train_data, BERT_MODEL, label2idx, MAX_SEQ_LENGTH, BATCH_SIZE)
dev_dataloader = preprocess(dev_data, BERT_MODEL, label2idx, MAX_SEQ_LENGTH, BATCH_SIZE)
test_dataloader = preprocess(test_data, BERT_MODEL, label2idx, MAX_SEQ_LENGTH, BATCH_SIZE, shuffle=False)
import torch
from quillnlp.models.bert.models import get_bert_classifier
device = "cuda" if torch.cuda.is_available() else "cpu"
model = get_bert_classifier(BERT_MODEL, len(label2idx), device=device)
from quillnlp.models.bert.train import train
output_model_file = train(model, train_dataloader, dev_dataloader, BATCH_SIZE, GRADIENT_ACCUMULATION_STEPS, device)
from quillnlp.models.bert.train import evaluate
from sklearn.metrics import precision_recall_fscore_support, classification_report
print("Loading model from", output_model_file)
device="cpu"
model = get_bert_classifier(BERT_MODEL, len(label2idx), model_file=output_model_file, device=device)
model.eval()
_, test_correct, test_predicted = evaluate(model, test_dataloader, device)
print("Test performance:", precision_recall_fscore_support(test_correct, test_predicted, average="micro"))
print(classification_report(test_correct, test_predicted, target_names=target_names))
c = 0
for item, predicted, correct in zip(test_data, test_predicted, test_correct):
assert item["label"] == idx2label[correct]
c += (item["label"] == idx2label[predicted])
print("{}#{}#{}".format(item["text"], idx2label[correct], idx2label[predicted]))
print(c)
print(c/len(test_data))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoding Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Sentence to Sequence
Step48: Translate
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
# TODO: Implement Function
x = [[source_vocab_to_int.get(word, 0) for word in sentence.split()] \
for sentence in source_text.split('\n')]
y = [[target_vocab_to_int.get(word, 0) for word in sentence.split()] \
for sentence in target_text.split('\n')]
source_id_text = []
target_id_text = []
for i in range(len(x)):
n1 = len(x[i])
n2 = len(y[i])
source_id_text.append(x[i])
target_id_text.append(y[i] + [target_vocab_to_int['<EOS>']])
return (source_id_text, target_id_text)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
# TODO: Implement Function
input_text = tf.placeholder(tf.int32,[None, None], name="input")
target_text = tf.placeholder(tf.int32,[None, None], name="targets")
learning_rate = tf.placeholder(tf.float32, name="learning_rate")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return input_text, target_text, learning_rate, keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
# TODO: Implement Function
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_decoding_input(process_decoding_input)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
# TODO: Implement Function
enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
enc_cell_drop = tf.contrib.rnn.DropoutWrapper(enc_cell, output_keep_prob=keep_prob)
_, enc_state = tf.nn.dynamic_rnn(enc_cell_drop, rnn_inputs, dtype=tf.float32)
return enc_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
# TODO: Implement Function
train_dec_fm = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
train_seq2seq, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, train_dec_fm, \
dec_embed_input, sequence_length, scope=decoding_scope)
train_drop = tf.nn.dropout(train_seq2seq, keep_prob)
train_logits = output_fn(train_drop)
return train_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
# TODO: Implement Function
infer_decoder_fn = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn, encoder_state, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size)
inference_seq2seq, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, infer_decoder_fn, scope=decoding_scope)
inference_logits = tf.nn.dropout(inference_seq2seq, keep_prob)
return inference_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size)] * num_layers)
dec_cell_drop = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
# Output Layer
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size,\
None, scope=decoding_scope)
with tf.variable_scope("decoding") as decoding_scope:
train_logits = decoding_layer_train(encoder_state, dec_cell_drop, dec_embed_input,\
sequence_length, decoding_scope, output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
infer_logits = decoding_layer_infer(encoder_state, dec_cell_drop, dec_embeddings,\
target_vocab_to_int['<GO>'],target_vocab_to_int['<EOS>'], sequence_length,\
vocab_size, decoding_scope, output_fn, keep_prob)
return train_logits, infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
encoder_state = encoding_layer(embed_input, rnn_size, num_layers, keep_prob)
processed_target_data = process_decoding_input(target_data, target_vocab_to_int, batch_size)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, processed_target_data)
train_logits, infer_logits = decoding_layer(dec_embed_input, dec_embeddings, encoder_state, target_vocab_size,\
sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return train_logits, infer_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 20
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 512
# Number of Layers
num_layers = 1
# Embedding Size
encoding_embedding_size = 256
decoding_embedding_size = 256
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.7
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
import time
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
# TODO: Implement Function
words = sentence.split(" ")
word_ids = []
for word in words:
word = word.lower()
if word in vocab_to_int:
word_id = vocab_to_int[word]
else:
word_id = vocab_to_int['<UNK>']
word_ids.append(word_id)
return word_ids
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem set #2
Step2: Problem set #3
Step3: Problem set #4
Step4: Problem set #5
Step5: Specifying a field other than name, area or elevation for the sort parameter should fail silently, defaulting to sorting alphabetically. Expected output
Step6: Paste your code
|
<ASSISTANT_TASK:>
Python Code:
import requests
data = requests.get('http://localhost:5000/lakes').json()
print(len(data), "lakes")
for item in data[:10]:
print(item['name'], "- elevation:", item['elevation'], "m / area:", item['area'], "km^2 / type:", item['type'])
import requests
data = requests.get('http://localhost:5000/lakes?type=salt').json()
avg_area = sum([x['area'] for x in data if x['area'] is not None]) / len(data)
avg_elev = sum([x['elevation'] for x in data if x['elevation'] is not None]) / len(data)
print("average area:", int(avg_area))
print("average elevation:", int(avg_elev))
import requests
data = requests.get('http://localhost:5000/lakes?sort=elevation').json()
for item in [x['name'] for x in data if x['elevation'] is not None][:15]:
print("*", item)
import requests
data = requests.get('http://localhost:5000/lakes?sort=area&type=caldera').json()
for item in data:
print("*", item['name'])
import requests
data = requests.get('http://localhost:5000/lakes', params={'type': "' OR true; --"}).json()
data
import requests
data = requests.get('http://localhost:5000/lakes', params={'sort': "florb"}).json()
[x['name'] for x in data[:5]]
import pg8000
import decimal
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route('/lakes')
def give_lakes():
conn = pg8000.connect(database = 'mondial', user = 'rebeccaschuetz')
cursor = conn.cursor()
sort = request.args.get('sort', 'name')
type_param = request.args.get('type', None)
# to get rid of not valid type_params:
cursor.execute('SELECT name, elevation, area, type FROM lake WHERE type = %s LIMIT 1', [type_param])
if not cursor.fetchone():
lakes_list = []
if type_param:
if sort == 'elevation' or sort == 'area':
cursor.execute('SELECT name, elevation, area, type FROM lake WHERE type = %s ORDER BY ' + sort + ' desc', [type_param])
else:
sort = 'name'
cursor.execute('SELECT name, elevation, area, type FROM lake WHERE type = %s ORDER BY ' + sort, [type_param])
else:
if sort == 'elevation' or sort == 'area':
cursor.execute('SELECT name, elevation, area, type FROM lake ORDER BY ' + sort + ' desc')
else:
sort = 'name'
cursor.execute('SELECT name, elevation, area, type FROM lake ORDER BY ' + sort)
lakes_list = []
for item in cursor.fetchall():
def decimal_to_int(x):
if isinstance(x, decimal.Decimal):
return int(x)
else:
return None
# elevation = item[1]
# if elevation:
# elevation = int(elevation)
# area = item[2]
# if area:
# area = int(area)
# lakes_dict = {'name': item[0],
# 'elevation': elevation,
# 'area': area,
# 'type': item[3]}
lakes_dict = {'name': item[0],
'elevation': decimal_to_int(item[1]),
'area': decimal_to_int(item[2]),
'type': item[3]}
lakes_list.append(lakes_dict)
for dictionary in lakes_list:
print(dictionary)
return jsonify(lakes_list)
app.run()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run the Demo
Step2: Video
Step3: Audio
Step4: Advanced options
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
from ipywidgets import interact
import ipywidgets as widgets
from common import common_vcu_demo_camera_encode_streamout
import os
from ipywidgets import HBox, VBox, Text, Layout
video_capture_device=widgets.Text(value='',
placeholder='"/dev/video1"',
description='Camera Dev Id:',
style={'description_width': 'initial'},
#layout=Layout(width='35%', height='30px'),
disabled=False)
address_path=widgets.Text(value='',
placeholder='192.168.1.101 ',
description='Client IP:',
disabled=False)
HBox([video_capture_device, address_path])
codec_type=widgets.RadioButtons(
options=['avc', 'hevc'],
description='Codec Type:',
disabled=False)
video_size=widgets.RadioButtons(
options=['640x480', '1280x720', '1920x1080', '3840x2160'],
description='Resolution:',
description_tooltip='To select the values, please refer USB Camera Capabilities section',
disabled=False)
sink_name=widgets.RadioButtons(
options=['none', 'fakevideosink'],
description='Video Sink:',
disabled=False)
HBox([codec_type, video_size, sink_name])
device_id=Text(value='',
placeholder='(optional) "hw:1"',
description='Input Dev:',
description_tooltip='To select the values, please refer Determine Audio Device Names section',
disabled=False)
device_id
audio_sink={'none':['none'], 'aac':['auto','alsasink','pulsesink'],'vorbis':['auto','alsasink','pulsesink']}
audio_src={'none':['none'], 'aac':['auto','alsasrc','pulseaudiosrc'],'vorbis':['auto','alsasrc','pulseaudiosrc']}
#val=sorted(audio_sink, key = lambda k: (-len(audio_sink[k]), k))
def print_audio_sink(AudioSink):
pass
def print_audio_src(AudioSrc):
pass
def select_audio_sink(AudioCodec):
audio_sinkW.options = audio_sink[AudioCodec]
audio_srcW.options = audio_src[AudioCodec]
audio_codecW = widgets.RadioButtons(options=sorted(audio_sink.keys(), key=lambda k: len(audio_sink[k])), description='Audio Codec:')
init = audio_codecW.value
audio_sinkW = widgets.RadioButtons(options=audio_sink[init], description='Audio Sink:')
audio_srcW = widgets.RadioButtons(options=audio_src[init], description='Audio Src:')
#j = widgets.interactive(print_audio_sink, AudioSink=audio_sinkW)
k = widgets.interactive(print_audio_src, AudioSrc=audio_srcW)
i = widgets.interactive(select_audio_sink, AudioCodec=audio_codecW)
HBox([i, k])
frame_rate=widgets.Text(value='',
placeholder='(optional) 15, 30, 60',
description='Frame Rate:',
disabled=False)
bit_rate=widgets.Text(value='',
placeholder='(optional) 1000, 20000',
description='Bit Rate(Kbps):',
style={'description_width': 'initial'},
disabled=False)
gop_length=widgets.Text(value='',
placeholder='(optional) 30, 60',
description='Gop Length',
disabled=False)
display(HBox([bit_rate, frame_rate, gop_length]))
no_of_frames=Text(value='',
placeholder='(optional) 1000, 2000',
description=r'<p>Frame Nos:</p>',
#layout=Layout(width='25%', height='30px'),
disabled=False)
output_path=widgets.Text(value='',
placeholder='(optional)',
description='Output Path:',
disabled=False)
periodicity_idr=widgets.Text(value='',
placeholder='(optional) 30, 40, 50',
description='Periodicity Idr:',
style={'description_width': 'initial'},
#layout=Layout(width='35%', height='30px'),
disabled=False)
#entropy_buffers
#output_path
#gop_length
HBox([periodicity_idr, no_of_frames, output_path])
#entropy_buffers
show_fps=widgets.Checkbox(
value=False,
description='show-fps',
#style={'description_width': 'initial'},
disabled=False)
compressed_mode=widgets.Checkbox(
value=False,
description='compressed-mode',
disabled=False)
HBox([compressed_mode, show_fps])
from IPython.display import clear_output
from IPython.display import Javascript
def run_all(ev):
display(Javascript('IPython.notebook.execute_cells_below()'))
def clear_op(event):
clear_output(wait=True)
return
button1 = widgets.Button(
description='Clear Output',
style= {'button_color':'lightgreen'},
#style= {'button_color':'lightgreen', 'description_width': 'initial'},
layout={'width': '300px'}
)
button2 = widgets.Button(
description='',
style= {'button_color':'white'},
#style= {'button_color':'lightgreen', 'description_width': 'initial'},
layout={'width': '83px'}
)
button1.on_click(run_all)
button1.on_click(clear_op)
def start_demo(event):
#clear_output(wait=True)
arg = [];
arg = common_vcu_demo_camera_encode_streamout.cmd_line_args_generator(device_id.value, video_capture_device.value, video_size.value, codec_type.value, audio_codecW.value, frame_rate.value, output_path.value, no_of_frames.value, bit_rate.value, show_fps.value, audio_srcW.value, periodicity_idr.value, gop_length.value, compressed_mode.value, sink_name.value, address_path.value);
#!sh vcu-demo-camera-encode-decode-display.sh $arg > logs.txt 2>&1
!sh vcu-demo-camera-encode-streamout.sh $arg
return
button = widgets.Button(
description='click to start camera-encode-streamout demo',
style= {'button_color':'lightgreen'},
#style= {'button_color':'lightgreen', 'description_width': 'initial'},
layout={'width': '300px'}
)
button.on_click(start_demo)
HBox([button, button2, button1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We first try to solve a complicated problem
Step2: Lets transform the left hand side in teh same way
Step3: We can simplify this as well and just take the derivative directly
Step4: This was a round about way to do something simple, but we can compare them directly to make sure
Step5: Or we can plot it
Step6: Can make a substitution to evaluate the function
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import sympy
sympy.init_printing() # pretty printing
# must define symbolic variables before us
x, y = sympy.symbols('x y', real=True)
y = x**2*sympy.sqrt(3*x-2)/(x+1)**2
y # This causes it to output formatted nicely.
# Taking the natural log of this:
a = sympy.ln(x**2*sympy.sqrt(3*x-2)/(x+1)**2)
a # Keep in mind that the left hand side is ln(y)
# Taking its derivative with respect to x
# So, we took the log of the right side, then the derivative
sympy.diff(a,x)
# That is kind of messy. Let the computer simplify it
sympy.simplify(sympy.diff(a,x))
# We could have done this all at once
sympy.simplify(sympy.diff(sympy.ln(y),x))
sympy.simplify(sympy.diff(sympy.ln(y), x)*y)
sympy.simplify(sympy.diff(y, x))
sympy.simplify(sympy.diff(y, x)) == sympy.simplify(sympy.diff(sympy.ln(y),x)*y)
sympy.plot((sympy.diff(y, x), (x, 0.2, 10)), (y, (x, 0.5, 10)))
# To change colors
# show = False delays the plot until we can set all of the parameters
# legend turns on the legend and uses the labels we have later.
p = sympy.plot((sympy.diff(y,x),(x,0.2,10)), (y, (x,0.5,10)), show = False, legend = True)
p[0].line_color = 'blue'
p[0].label = '$\\frac{dy}{dx}$'
p[1].line_color = 'green'
p[1].label = '$y$'
p.show()
from sympy.abc import alpha
y.subs(x,alpha)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Start
Step2: Continue
|
<ASSISTANT_TASK:>
Python Code:
#!pip install graphistry -q
#!pip install OTXv2 -q
import graphistry
import pandas as pd
from OTXv2 import OTXv2, IndicatorTypes
from gotx import G_OTX
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
otx = OTXv2("MY_KEY")
g_otx = G_OTX(graphistry, otx)
lockergoga_pulses = otx.search_pulses('LockerGoga').get('results')
lockergoga_pulses_df = g_otx.pulses_to_df(lockergoga_pulses)
lockergoga_indicators_df = g_otx.pulses_to_indicators_df(lockergoga_pulses)
g = g_otx.indicatormap(lockergoga_pulses_df, lockergoga_indicators_df)
g.plot()
ip_pulses = otx.get_indicator_details_by_section(IndicatorTypes.IPv4, lockergoga_indicators_df[lockergoga_indicators_df['indicator_type'] == 'IPv4'].values[0][0])
ip_pulses_df = g_otx.indicator_details_by_section_to_pulses_df(ip_pulses)
ip_indicators_df = g_otx.indicator_details_by_section_to_indicators_df(ip_pulses)
g_otx.indicatormap(ip_pulses_df, ip_indicators_df).plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Topic purpose
Step2: Probability theory and Bayesian inference
Step3: Integration over the evidence
Step4: A simple mixture model
Step5: Practical
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import sklearn.datasets, sklearn.linear_model, sklearn.neighbors
import sklearn.manifold, sklearn.cluster
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os, time
import scipy.io.wavfile, scipy.signal
import pymc as mc
import cv2
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (18.0, 10.0)
%%javascript
IPython.OutputArea.auto_scroll_threshold = 9999;
def joint_marginal(cov):
# create an independent 2D normal distribution
x,y = np.meshgrid(np.linspace(-3,3,50), np.linspace(-3,3,50))
pos = np.empty(x.shape + (2,))
pos[:,:,0] = x
pos[:,:,1] = y
joint_pdf = scipy.stats.multivariate_normal.pdf(pos, [0,0], cov)
fig = plt.figure()
# plot the joint
ax = fig.add_subplot(2,2,1)
ax.axis('equal')
plt.title("Joint p(x,y)")
ax.pcolor(x,y,joint_pdf, cmap='viridis')
# plot the marginals
ax = fig.add_subplot(2,2,3)
ax.axis('equal')
plt.title("Marginal $p(x) = \int\ p(x,y) dy$")
ax.plot(x[0,:], np.sum(joint_pdf, axis=0))
ax = fig.add_subplot(2,2,2)
ax.axis('equal')
plt.title("Marginal $p(y) = \int\ p(x,y) dx$")
ax.plot(np.sum(joint_pdf, axis=1), x[0,:])
# plot p(x|y)
ax = fig.add_subplot(2,2,4)
ax.axis('equal')
plt.title("Conditional $p(x|y) = \\frac{p(x,y)}{p(x)}$")
marginal = np.tile(np.sum(joint_pdf, axis=0), (joint_pdf.shape[0],1))
ax.pcolor(x,y,joint_pdf/marginal, cmap='viridis')
joint_marginal([[1,0],[0.5,1]])
def prior_posterior():
mean = 0
std = 1
prior = scipy.stats.norm(mean,std)
evidence = scipy.stats.norm(1, 0.1)
xs = np.linspace(-5,5,200)
plt.plot(xs, prior.pdf(xs), label="Prior")
for i in range(10):
# this is **not** inference! just a visual example!
mean = 0.8*mean + 0.2*1
std = 0.8*std + 0.2*0.05
v = evidence.rvs()
plt.plot([v,v],[0,1], 'c', alpha=0.7)
plt.plot(xs, scipy.stats.norm(mean,std).pdf(xs), 'k:', alpha=0.5)
plt.plot([v,v],[0,1], 'c', alpha=0.7, label="Observations")
plt.plot(xs, scipy.stats.norm(mean,std).pdf(xs), 'g', label="Posterior")
plt.legend()
prior_posterior()
### Bayesian Linear Regression with pymc
### We use Monte Carlo sampling to estimate the distribution of a linear function with a normally
### distributed error, given some observed data.
### Vaguely based on: http://matpalm.com/blog/2012/12/27/dead_simple_pymc/ and http://sabermetricinsights.blogspot.co.uk/2014/05/bayesian-linear-regression-with-pymc.html
## Utility function to plot the graph of a PyMC model
def show_dag(model):
dag = mc.graph.dag(model)
dag.write("graph.png",format="png")
from IPython.display import Image
i = Image(filename='graph.png')
return i
## generate data with a known distribution
## this will be our "observed" data
x = np.sort(np.random.uniform(0,20, (50,)))
m = 2
c = 15
# Add on some measurement noise, with std. dev. 3.0
epsilon = data = np.random.normal(0,3, x.shape)
y = m * x + c + epsilon
plt.plot(x,y, '.', label="Datapoints")
plt.plot(x, m*x+c, '--', lw=3, label="True")
plt.legend()
plt.xlabel("x")
plt.xlabel("y")
## Now, set up the PyMC model
## specify the prior distribution of the unknown line function variables
## Here, we assume a normal distribution over m and c
m_unknown = mc.Normal('m', 0, 0.01)
c_unknown = mc.Normal('c', 0, 0.001)
## specify a prior over the precision (inverse variance) of the error term
# precision = 1/variance
## Here we specify a uniform distribution from 0.001 to 10.0
precision = mc.Uniform('precision', lower=0.001, upper=10.0)
# specify the observed input variable
# we use a normal distribution, but this has no effect -- the values are fixed and the paramters
# never updated; this is just a way of transforming x into a variable pymc can work with
# (it's really a hack)
x_obs = mc.Normal("x_obs", 0, 1, value=x, observed=True)
@mc.deterministic(plot=False)
def line(m=m_unknown, c=c_unknown, x=x_obs):
return x*m+c
# specify the observed output variable (note if use tau instead of sigma, we use the precision paramterisation)
y_obs = mc.Normal('y_obs', mu=line, tau=precision, value=y, observed=True)
model = mc.Model([m_unknown, c_unknown, precision, x_obs, y_obs])
# display the graphical model
show_dag(model)
# sample from the distribution
mcmc = mc.MCMC(model)
mcmc.sample(iter=10000)
## plot histograms of possible parameter values
plt.figure()
plt.hist(mcmc.trace("m")[:], normed=True, bins=30)
plt.title("Estimate of m")
plt.figure()
plt.hist(mcmc.trace("c")[:], normed=True, bins=30)
plt.title("Estimate of c")
plt.figure()
plt.hist(np.sqrt(1.0/mcmc.trace("precision")[:]), normed=True, bins=30)
plt.title("Estimate of epsilon std.dev.")
plt.figure()
## now plot overlaid samples from the linear function
ms = mcmc.trace("m")[:]
cs = mcmc.trace("c")[:]
plt.title("Sampled fits")
plt.plot(x, y, '.', label="Observed")
plt.plot(x, x*m+c, '--', label="True")
xf = np.linspace(-20,40,200)
for m,c in zip(ms[::20], cs[::20]):
plt.plot(xf, xf*m+c, 'r-', alpha=0.005)
plt.legend()
plt.xlim(-20,40)
plt.ylim(-40,80)
## Adapted from the example given at
## http://stackoverflow.com/questions/18987697/how-to-model-a-mixture-of-3-normals-in-pymc
n = 3
ndata = 500
## A Dirichlet model specifies the distribution over categories
## All 1 means that every category is equally likely
dd = mc.Dirichlet('dd', theta=(1,)*n)
## This variable "selects" the category (i.e. the normal distribution)
## to use. The Dirichlet distribution sets the prior over the categories.
category = mc.Categorical('category', p=dd, size=ndata)
## Now we set our priors the precision and mean of each normal distribution
## Note the use of "size" to generate a **vector** of variables (i.e. one for each category)
## We expect the precision of each normal to be Gamma distributed (this mainly forces it to be positive!)
precs = mc.Gamma('precs', alpha=0.1, beta=0.1, size=n)
## And the means of the normal to be normally distributed, with a precision of 0.001 (i.e. std. dev 1000)
means = mc.Normal('means', 0, 0.001, size=n)
## These deterministic functions link the means of the observed distribution to the categories
## They just select one of the elements of the mean/precision vector, given the current value of category
## The input variables must be specified in the parameters, so that PyMC knows which variables to pass to it
@mc.deterministic
def mean(category=category, means=means):
return means[category]
@mc.deterministic
def prec(category=category, precs=precs):
return precs[category]
## Generate synthetic mixture-of-normals data, with means at -50,0,+50, and std. dev of 1
v = np.random.randint( 0, n, ndata)
data = (v==0)*(np.random.normal(50,5,ndata)) + (v==1)*(np.random.normal(-50,5,ndata)) + (v==2)*np.random.normal(0,5,ndata)
## Plot the original data
plt.hist(data, bins=50)
## Now we specify the variable we observe -- which is normally distributed, *but*
## we don't know the mean or precision. Instead, we pass the **functions** mean() and pred()
## which will be used at each sampling step.
## We specify the observed values of this node, and tell PyMC these are observed
## This is all that is needed to specify the model
obs = mc.Normal('obs', mean, prec, value=data, observed = True)
## Now we just bundle all the variables together for PyMC
model = mc.Model({'dd': dd,
'category': category,
'precs': precs,
'means': means,
'obs': obs})
def show_dag(model):
dag = mc.graph.dag(model)
dag.write("graph.png",format="png")
from IPython.display import Image
i = Image(filename='graph.png')
return i
show_dag(model)
mcmc = mc.MCMC(model)
## Now we tell the sampler what method to use
## Metropolis works well, but we must tell PyMC to use a specific
## discrete sampler for the category variable to get good results in a reasonable time
mcmc.use_step_method(mc.AdaptiveMetropolis, model.means)
mcmc.use_step_method(mc.AdaptiveMetropolis, model.precs)
mcmc.use_step_method(mc.DiscreteMetropolis, model.category) ## this step is key!
mcmc.use_step_method(mc.AdaptiveMetropolis, model.dd)
## Run the sampler
mcmc.sample(iter=125000, burn=2000)
plt.figure()
plt.hist(mcmc.trace('means').gettrace()[:], normed=True)
plt.title("Estimated means")
plt.legend(['Component 1', 'Component 2', 'Component 3'])
plt.figure()
## show the result in terms of std. dev. (i.e sqrt(1.0/precision))
plt.title("Estimated std. dev")
plt.hist(np.sqrt(1.0/mcmc.trace('precs').gettrace()[:]), normed=True)
plt.legend(['Component 1', 'Component 2', 'Component 3'])
## the variables in the model should go in the list passed to Model
model = mc.Model([])
## see the graphical representation of the model
show_dag(model)
## Construct a sampler
mcmc = mc.MCMC(model)
## Sample from the result; you should try changing the number of iterations
mcmc.sample(iter=10000)
## Use the trace methods from pymc to explore the distribution of values
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 下面是狄利克雷分布的图像,其中两个水平轴是单纯形平面上的坐标轴,垂直轴对应于概率密度的值。
Step2: ${\alpha_k}=1$的情形:
Step3: ${\alpha_k}=0.1$的情形:
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from scipy.stats import dirichlet
import matplotlib.tri as tri
from matplotlib import cm
corners = np.array([[0, 0], [1, 0], [0.5, 0.75**0.5]])
triangle = tri.Triangulation(corners[:, 0], corners[:, 1])
refiner = tri.UniformTriRefiner(triangle)
trimesh = refiner.refine_triangulation(subdiv=4)
plt.figure(figsize=(8, 4))
for (i, mesh) in enumerate((triangle, trimesh)):
plt.subplot(1, 2, i+ 1)
plt.triplot(mesh)
plt.axis('off')
plt.axis('equal')
# Mid-points of triangle sides opposite of each corner
midpoints = [(corners[(i + 1) % 3] + corners[(i + 2) % 3]) / 2.0 \
for i in range(3)]
def xy2bc(xy, tol=1.e-3):
'''Converts 2D Cartesian coordinates to barycentric.'''
s = [(corners[i] - midpoints[i]).dot(xy - midpoints[i]) / 0.75 \
for i in range(3)]
return np.clip(s, tol, 1.0 - tol)
def dirichlet_pdf(x, al):
from operator import mul
from math import gamma
alpha = np.array(al)
coef = gamma(np.sum(alpha)) / reduce(mul, [gamma(a) for a in alpha])
return coef * reduce(mul, [xx ** (aa - 1) for (xx, aa)in zip(x, alpha)])
def draw_dirichlet(alpha, nlevels=200, subdiv=8, **kwargs):
refiner = tri.UniformTriRefiner(triangle)
trimesh = refiner.refine_triangulation(subdiv=subdiv)
pvals = [dirichlet_pdf(xy2bc(xy) , alpha) for xy in zip(trimesh.x, trimesh.y)]
fig = plt.figure(figsize=(10,8))
ax = fig.gca(projection='3d')
ax.plot_trisurf(trimesh.x, trimesh.y, pvals, cmap=cm.jet, linewidth=0.01)
plt.axis('equal')
plt.show()
draw_dirichlet([10,10,10])
draw_dirichlet([1,1,1])
draw_dirichlet([0.1,0.1,0.1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q1. Concatenate x1 and x2.
Step2: Q2. Repeat x three time element-wise.
Step3: Q3-1. Capitalize the first letter of x element-wise.<br/>
Step4: Q4. Make the length of each element 20 and the string centered / left-justified / right-justified with paddings of _.
Step5: Q5. Encode x in cp500 and decode it again.
Step6: Q6. Insert a space between characters of x.
Step7: Q7-1. Remove the leading and trailing whitespaces of x element-wise.<br/>
Step8: Q8. Split the element of x with spaces.
Step9: Q9. Split the element of x to multiple lines.
Step10: Q10. Make x a numeric string of 4 digits with zeros on its left.
Step11: Q11. Replace "John" with "Jim" in x.
Step12: Comparison
Step13: Q13. Return x1 != x2, element-wise.
Step14: String information
Step15: Q15. Count the lowest index of "l" in x, element-wise.
Step16: Q16-1. Check if each element of x is composed of digits only.<br/>
Step17: Q17. Check if each element of x starts with "hi".
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import numpy as np
author = "kyubyong. https://github.com/Kyubyong/numpy_exercises"
np.__version__
x1 = np.array(['Hello', 'Say'], dtype=np.str)
x2 = np.array([' world', ' something'], dtype=np.str)
out = np.char.add(x1, x2)
print(out)
x = np.array(['Hello ', 'Say '], dtype=np.str)
out = np.char.multiply(x, 3)
print(out)
x = np.array(['heLLo woRLd', 'Say sOmething'], dtype=np.str)
capitalized = np.char.capitalize(x)
lowered = np.char.lower(x)
uppered = np.char.upper(x)
swapcased = np.char.swapcase(x)
titlecased = np.char.title(x)
print("capitalized =", capitalized)
print("lowered =", lowered)
print("uppered =", uppered)
print("swapcased =", swapcased)
print("titlecased =", titlecased)
x = np.array(['hello world', 'say something'], dtype=np.str)
centered = np.char.center(x, 20, fillchar='_')
left = np.char.ljust(x, 20, fillchar='_')
right = np.char.rjust(x, 20, fillchar='_')
print("centered =", centered)
print("left =", left)
print("right =", right)
x = np.array(['hello world', 'say something'], dtype=np.str)
encoded = np.char.encode(x, 'cp500')
decoded = np.char.decode(encoded,'cp500')
print("encoded =", encoded)
print("decoded =", decoded)
x = np.array(['hello world', 'say something'], dtype=np.str)
out = np.char.join(" ", x)
print(out)
x = np.array([' hello world ', '\tsay something\n'], dtype=np.str)
stripped = np.char.strip(x)
lstripped = np.char.lstrip(x)
rstripped = np.char.rstrip(x)
print("stripped =", stripped)
print("lstripped =", lstripped)
print("rstripped =", rstripped)
x = np.array(['Hello my name is John'], dtype=np.str)
out = np.char.split(x)
print(out)
x = np.array(['Hello\nmy name is John'], dtype=np.str)
out = np.char.splitlines(x)
print(out)
x = np.array(['34'], dtype=np.str)
out = np.char.zfill(x, 4)
print(out)
x = np.array(['Hello nmy name is John'], dtype=np.str)
out = np.char.replace(x, "John", "Jim")
print(out)
x1 = np.array(['Hello', 'my', 'name', 'is', 'John'], dtype=np.str)
x2 = np.array(['Hello', 'my', 'name', 'is', 'Jim'], dtype=np.str)
out = np.char.equal(x1, x2)
print(out)
x1 = np.array(['Hello', 'my', 'name', 'is', 'John'], dtype=np.str)
x2 = np.array(['Hello', 'my', 'name', 'is', 'Jim'], dtype=np.str)
out = np.char.not_equal(x1, x2)
print(out)
x = np.array(['Hello', 'my', 'name', 'is', 'Lily'], dtype=np.str)
out = np.char.count(x, "l")
print(out)
x = np.array(['Hello', 'my', 'name', 'is', 'Lily'], dtype=np.str)
out = np.char.find(x, "l")
print(out)
# compare
# print(np.char.index(x, "l"))
# => This raises an error!
x = np.array(['Hello', 'I', 'am', '20', 'years', 'old'], dtype=np.str)
out1 = np.char.isdigit(x)
out2 = np.char.islower(x)
out3 = np.char.isupper(x)
print("Digits only =", out1)
print("Lower cases only =", out2)
print("Upper cases only =", out3)
x = np.array(['he', 'his', 'him', 'his'], dtype=np.str)
out = np.char.startswith(x, "hi")
print(out)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's fetch the raw data of $CO_2$ measurements at Mauna Loa from the noaa.gov website
Step2: As explained in the file co2_mm_mlo.txt, the average column are the raw data of $CO_2$ values averaged over a month, and some months are missing. The trend then subtracts "seasonal cycle" computed over a 7 year window. The missing values in trend are then linearly interpolated. Finally, the average column then contains the trend value plus average seasonal cycle (i.e. average and interpolated contain the same values except for missing months, which are "intelligently" interpolated). We should do this analysis ourselves in the notebook directly from the average data only, but for now let's reuse this analysis.
Step3: Let's get numbers for $N$ (year 2010) and $N_0$ (year 1980) by reading it off the $y$-axis
Step4: Temperature changes
Step5: Warming over the past 30 years using satellite measuremenets (see a separate notebook for this calculation)
Step6: We'll use the satellite measurements, as arguably they have less systematic errors.
Step7: Calculation of temperature sensitivity
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import urllib
# Only execute this if you want to regenerate the downloaded file
open("data/co2_mm_mlo.txt", "wb").write(urllib.request.urlopen("ftp://ftp.cmdl.noaa.gov/ccg/co2/trends/co2_mm_mlo.txt").read())
D = loadtxt("data/co2_mm_mlo.txt")
years = D[:, 2]
average = D[:, 3]
interpolated = D[:, 4]
trend = D[:, 5]
plot(years, interpolated, "r-", lw=1.5, label="monthly average")
plot(years, trend, "k-", label="trend")
xlabel("Year")
ylabel("$CO_2$ concentration [ppm]")
title("Atmospheric $CO_2$ concentrations at Mauna Loa")
legend(loc="upper left");
idx1980 = sum(years < 1980)
idx2010 = sum(years < 2010)
N0 = trend[idx1980]
N = trend[idx2010]
print("N0 = %.2f ppm (year %.3f)" % (N0, years[idx1980]))
print("N = %.2f ppm (year %.3f)" % (N, years[idx2010]))
dTdt = 0.24995728742972512 # C / decade
dTdt = 0.13764588789937693 # C / decade
dT = dTdt * 3 # 3 decades
dT
from math import log
deltaT = dT * log(2) / log(1.0*N/N0)
print("∆T = ", deltaT, "C")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now you are ready to run the main part of the code. Click on the cell below and then type Shift+Enter. If you do not get an error, you should receive a message indicating that a new file has been written to your output folder.
|
<ASSISTANT_TASK:>
Python Code:
##### Configuration #####
# Configure the filename
filename = "file.txt"
# Configure the path to input directory
input_path = "C:\Users\USERNAME\Desktop\we1s-test\input"
# input_path = "/Users/USERNAME/Desktop/we1s-test/input"
# Configure the path to output directory
output_path = "C:\Users\USERNAME\Desktop\we1s-test\output"
# output_path = "/Users/USERNAME/Desktop/we1s-test/output"
##### End of Configuration #####
# Import the os package to manage file paths
import os
# Create input and out file paths
input_file = os.path.join(input_path, filename)
output_file = os.path.join(output_path, filename)
# Open the input file and read it
f = open(input_file)
text = f.read()
f.close()
print("The input file says: " + text)
# Convert the text to lower case
text = text.lower()
# Open the output file for writing and save the new text to it
output_file = os.path.join(output_path, "file2.txt")
f = open(output_file, "w")
f.write(text)
f.close()
print("I've just written a new file called 'file2.txt' to your output folder. Check it out!")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's go over the columns
Step2: <a id='pipeline'></a>
Step3: Now that we've imported the data, let's take a look at which fields are available for each dataset.
Step4: Now that we know what fields we have access to, let's see what this data looks like when we run it through Pipeline.
Step5: Here, you'll notice that each security is mapped to the corresponding value, so you could grab any security to get what you need.
|
<ASSISTANT_TASK:>
Python Code:
# For use in Quantopian Research, exploring interactively
from quantopian.interactive.data.quandl import cboe_vix as dataset
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
# Let's use blaze to understand the data a bit using Blaze dshape()
dataset.dshape
# And how many rows are there?
# N.B. we're using a Blaze function to do this, not len()
dataset.count()
# Let's see what the data looks like. We'll grab the first three rows.
dataset[:3]
# Plotting this DataFrame since 2007
df = odo(dataset, pd.DataFrame)
df.head(5)
# So we can plot it, we'll set the index as the `asof_date`
df['asof_date'] = pd.to_datetime(df['asof_date'])
df = df.set_index(['asof_date'])
df.head(5)
import matplotlib.pyplot as plt
df.vix_open.plot(label=str(dataset))
plt.ylabel(str(dataset))
plt.legend()
plt.title("Graphing %s since %s" % (str(dataset), min(df.index)))
# Import necessary Pipeline modules
from quantopian.pipeline import Pipeline
from quantopian.research import run_pipeline
from quantopian.pipeline.factors import AverageDollarVolume
# Import the datasets available
from quantopian.pipeline.data.quandl import cboe_vix
print "Here are the list of available fields per dataset:"
print "---------------------------------------------------\n"
def _print_fields(dataset):
print "Dataset: %s\n" % dataset.__name__
print "Fields:"
for field in list(dataset.columns):
print "%s - %s" % (field.name, field.dtype)
print "\n"
_print_fields(cboe_vix)
print "---------------------------------------------------\n"
pipe = Pipeline()
pipe.add(cboe_vix.vix_open.latest, 'open_vix')
# Setting some basic liquidity strings (just for good habit)
dollar_volume = AverageDollarVolume(window_length=20)
top_1000_most_liquid = dollar_volume.rank(ascending=False) < 1000
pipe.set_screen(top_1000_most_liquid & cboe_vix.vix_open.latest.notnan())
# The show_graph() method of pipeline objects produces a graph to show how it is being calculated.
pipe.show_graph(format='png')
# run_pipeline will show the output of your pipeline
pipe_output = run_pipeline(pipe, start_date='2013-11-01', end_date='2013-11-25')
pipe_output
# This section is only importable in the backtester
from quantopian.algorithm import attach_pipeline, pipeline_output
# General pipeline imports
from quantopian.pipeline import Pipeline
from quantopian.pipeline.factors import AverageDollarVolume
# For use in your algorithms via the pipeline API
from quantopian.pipeline.data.quandl import cboe_vix
def make_pipeline():
# Create our pipeline
pipe = Pipeline()
# Screen out penny stocks and low liquidity securities.
dollar_volume = AverageDollarVolume(window_length=20)
is_liquid = dollar_volume.rank(ascending=False) < 1000
# Create the mask that we will use for our percentile methods.
base_universe = (is_liquid)
# Add the datasets available
pipe.add(cboe_vix.vix_open.latest, 'vix_open')
# Set our pipeline screens
pipe.set_screen(is_liquid)
return pipe
def initialize(context):
attach_pipeline(make_pipeline(), "pipeline")
def before_trading_start(context, data):
results = pipeline_output('pipeline')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tutorial - How to work with the OpenEnergy Platform (OEP)
Step2: 1. Select data
Step3: <div class="alert alert-block alert-info">
Step4: 3. Make calculations
Step5: Sum the installed Capacity by fuels and add the Unit MW to a new column.
|
<ASSISTANT_TASK:>
Python Code:
__copyright__ = "Zentrum für nachhaltige Energiesysteme Flensburg"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://github.com/openego/data_processing/blob/master/LICENSE"
__author__ = "wolfbunke"
import requests
import pandas as pd
from IPython.core.display import HTML
# oedb
oep_url= 'http://oep.iks.cs.ovgu.de/'
# token
your_token = ''
# select powerplant data
schema = 'supply'
table = 'ego_dp_conv_powerplant'
where = 'version=v0.2.10'
conv_powerplants = requests.get(oep_url+'/api/v0/schema/'+schema+'/tables/'+table+'/rows/?where='+where, )
conv_powerplants.status_code
df_pp = pd.DataFrame(conv_powerplants.json())
df_pp.info()
results = df_pp[['capacity','fuel']].groupby('fuel').sum()
results['units'] = 'MW'
results
# Write DataFrame as csv
results.to_csv('Conventional_powerplants_germany.csv',
sep=',',
float_format='%.3f',
decimal='.',
date_format='%Y-%m-%d',
encoding='utf-8',
if_exists="replace")
# Write the results as xlsx file
writer = pd.ExcelWriter('Conventional_powerplants_germany.xlsx', engine='xlsxwriter')
# write results of installed Capacity by fuels
results.to_excel(writer, index=False, sheet_name='Installed Capacities by fuel')
# write orgininal data in second sheet
df_pp.to_excel(writer, index=False, sheet_name='Conventional Powerplants')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: It's an xml file, so we'll need the xml.etree parser, and pandas so that we can import into a dataframe
Step2: Now, the reference answers are in the second daughter node of the tree. We can extract these and store them in a dictionary. To distinguish between reference answer tokens and student response tokens, I'm going to append each token in the reference answers with <code>_RA</code>, and each of the tokens in a student response with <code>_SR</code>.
Step3: Next, we need to extract each of the student responses. These are in the third daughter node
Step4: OK, that seems to work OK. Now, let's define a function that takes a filename, and returns the list of token dictionaries
Step5: We now have a function which takes a filename and returns a list of tokenised student responses and reference answers
Step6: So next we need to be able to build a document frequency dictionary from a list of tokenised documents.
Step7: Next, define a function which takes a list of tokens and a document frequency dictionary, and returns a dictionary of the tf.idf values for each of the tokens in the list. Note
Step8: Finally, we want to convert the outputs for all of the responses into a dataframe.
Step9: Cool, that seems to work. Now just need to do it for the complete set of files. Just use beetle/train/core for the time being.
Step10: Use os.walk to get the files
Step11: We can now do the same as before, but this time using all the files to construct the final dataframe. We also need a series containing the accuracy measures.
Step12: And finish by exporting to a csv file
|
<ASSISTANT_TASK:>
Python Code:
filename='semeval2013-task7/semeval2013-Task7-5way/beetle/train/Core/FaultFinding-BULB_C_VOLTAGE_EXPLAIN_WHY1.xml'
import pandas as pd
from xml.etree import ElementTree as ET
tree=ET.parse(filename)
r=tree.getroot()
from string import punctuation
def to_tokens(textIn):
'''Convert the input textIn to a list of tokens'''
tokens_ls=[t.lower().strip(punctuation) for t in textIn.split()]
# remove any empty tokens
return [t for t in tokens_ls if t]
str='"Help!" yelped the banana, who was obviously scared out of his skin.'
print(str)
print(to_tokens(str))
refAnswers_dict={refAnswer.attrib['id']:[t+'_RA' for t in to_tokens(refAnswer.text)]
for refAnswer in r[1]}
refAnswers_dict
print(r[2][0].text)
r[2][0].attrib
responses_ls=[]
for (i, studentResponse) in enumerate(r[2]):
if 'answerMatch' in studentResponse.attrib:
matchTokens_ls=refAnswers_dict[studentResponse.attrib['answerMatch']]
else:
matchTokens_ls=[]
responses_ls.append({'accuracy':studentResponse.attrib['accuracy'],
'text':studentResponse.text,
'tokens':[t+'_SR' for t in to_tokens(studentResponse.text)] + matchTokens_ls})
responses_ls[36]
def extract_token_dictionaries(filenameIn):
# Localise the to_tokens function
def to_tokens_local(textIn):
'''Convert the input textIn to a list of tokens'''
tokens_ls=[t.lower().strip(punctuation) for t in textIn.split()]
# remove any empty tokens
return [t for t in tokens_ls if t]
tree=ET.parse(filenameIn)
root=tree.getroot()
refAnswers_dict={refAnswer.attrib['id']:[t+'_RA' for t in to_tokens_local(refAnswer.text)]
for refAnswer in root[1]}
responsesOut_ls=[]
for (i, studentResponse) in enumerate(root[2]):
if 'answerMatch' in studentResponse.attrib:
matchTokens_ls=refAnswers_dict[studentResponse.attrib['answerMatch']]
else:
matchTokens_ls=[]
responsesOut_ls.append({'accuracy':studentResponse.attrib['accuracy'],
'text':studentResponse.text,
'tokens':[t+'_SR' for t in to_tokens_local(studentResponse.text)] \
+ matchTokens_ls})
return responsesOut_ls
extract_token_dictionaries(filename)[:2]
def document_frequencies(listOfTokenLists):
# Build the dictionary of all tokens used:
token_set=set()
for tokenList in listOfTokenLists:
token_set=token_set.union(set(tokenList))
# Then return the document frequency counts for each token
return {t:len([l for l in listOfTokenLists if t in l])
for t in token_set}
tokenLists_ls=[x['tokens'] for x in extract_token_dictionaries(filename)]
document_frequencies(tokenLists_ls)
from collections import Counter
def get_tfidf(tokens_ls, docFreq_dict):
tf_dict=Counter(tokens_ls)
return {t:tf_dict[t]/docFreq_dict[t] for t in tf_dict if t in docFreq_dict}
get_tfidf('the cat sat on the mat'.split(), {'cat':2, 'the':1})
# Extract the data from the file:
tokenDictionaries_ls=extract_token_dictionaries(filename)
# Build the lists of responses:
tokenLists_ls=[x['tokens'] for x in extract_token_dictionaries(filename)]
# Build the document frequency dict
docFreq_dict=document_frequencies(tokenLists_ls)
# Create the tf.idf for each response:
tfidf_ls=[get_tfidf(tokens_ls, docFreq_dict) for tokens_ls in tokenLists_ls]
# Now, create a dataframe which is indexed by the token dictionary:
trainingText_df=pd.DataFrame(index=docFreq_dict.keys())
# Use the index of responses in the list as column headers:
for (i, tokens_ls) in enumerate(tfidf_ls):
trainingText_df[i]=pd.Series(tokens_ls, index=trainingText_df.index)
# Finally, transpose, and replace the NaNs with 0:
trainingText_df.fillna(0).T
!ls semeval2013-task7/semeval2013-Task7-5way/beetle/train/Core/
import os
tokenDictionaries_ls=[]
# glob would have been easier...
for (root, dirs, files) in os.walk('semeval2013-task7/semeval2013-Task7-5way/beetle/train/Core/'):
for filename in files:
if filename[-4:]=='.xml':
tokenDictionaries_ls.extend(extract_token_dictionaries(os.path.join(root, filename)))
# Now we've extracted the information from all the files. We can now construct the dataframe
# in the same way as before:
# Build the lists of responses:
tokenLists_ls=[x['tokens'] for x in tokenDictionaries_ls]
# Build the document frequency dict
docFreq_dict=document_frequencies(tokenLists_ls)
# Now, create a dataframe which is indexed by the tokens
# in the token frequency dictionary:
trainingText_df=pd.DataFrame(index=docFreq_dict.keys())
# Populate the dataframe with the tf.idf for each response. Also,
# create a dictionary of the accuracy values while we're at it.
accuracy_dict={}
for (i, response_dict) in enumerate(tokenDictionaries_ls):
trainingText_df[i]=pd.Series(get_tfidf(response_dict['tokens'], docFreq_dict),
index=trainingText_df.index)
accuracy_dict[i]=response_dict['accuracy']
# Finally, transpose, and replace the NaNs with 0:
trainingText_df=trainingText_df.fillna(0).T
# Also, to make it easier to store in a single csv file, let's put the accuracy
# values in a column called "accuracy_txt":
trainingText_df['accuracy_txt']=pd.Series(accuracy_dict)
# And to have a final column containing a numerical equivalent of the
# accuracy_txt column (called accuracy_num ):
labels_dict={label:i for (i, label) in enumerate(set(trainingText_df['accuracy_txt']))}
trainingText_df['accuracy_num']=[labels_dict[l] for l in trainingText_df['accuracy_txt']]
trainingText_df.head()
trainingText_df.to_csv('beetleTrainingData.csv', index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There you see! Even a 3% ethernet frame loss translates to 12% packet loss for jumbo ping test. This is same as what we observed. Does the math agree?
|
<ASSISTANT_TASK:>
Python Code:
import random
import numpy as np
sampleCount = 100000 # total events in our simulation
p = 0.03 # ethernet frame loss probability
grpSize = 4 # packet count per datagram, 5000 bytes = 4 packets
grpEventCount = int(sampleCount/grpSize) # datagram count
# generate random packets with p% packet loss
events = np.random.choice([0,1],
size=sampleCount,
p=[p,1-p])
# group discrete packets into a datagram
grpEvents = events.reshape(grpEventCount,grpSize)
# function to determine datagram loss
def checkFailure(grpEvent):
return (np.count_nonzero(grpEvent) < grpSize) # Return 1 if the success count is less than 3
# count the result
failCount = 0
for grpEvent in grpEvents:
failCount += checkFailure(grpEvent)
print("The probability of a group failure is {:.2f}%".format(failCount/len(grpEvents)*100))
1- (1-p)**4
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load image
Step2: Detect Edges
Step3: View Edges
|
<ASSISTANT_TASK:>
Python Code:
# Load image
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Load image as greyscale
image_gray = cv2.imread('images/plane_256x256.jpg', cv2.IMREAD_GRAYSCALE)
# Calculate median intensity
median_intensity = np.median(image_gray)
# Set thresholds to be one standard deviation above and below median intensity
lower_threshold = int(max(0, (1.0 - 0.33) * median_intensity))
upper_threshold = int(min(255, (1.0 + 0.33) * median_intensity))
# Apply canny edge detector
image_canny = cv2.Canny(image_gray, lower_threshold, upper_threshold)
# Show image
plt.imshow(image_canny, cmap='gray'), plt.axis("off")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup the matplotlib environment to make the plots look pretty.
Step2: Introduction
Step3: Analyse the ratio
Step4: Not very legible, right? But some things can be observed in the above figure anyway. First of all, there's a "dip" in the number of generated fragments around $6.5\times 10^4$. This was caused by the fact that the number of generated fragments, $N$, exceeding a certain length $L_c$ is given by a power law
Step5: The more fragments were generated in a collision, the fewer fragments a given collision gave rise to in the final population. This seems counter-intuitive because large collisions are expected to contribute more to the long-term growth of the debris population than others, which generate fewer fragments [2]. However, these plots do not contradict this thesis about the reasons for the growth of the number of debris because they do not show which collisions will drive the predicted growth of the number of objects in orbit. Rather, they show which collisions are likely to results in many follow-on collisions that will amplify the number of fragments that the collisions generate, thus fuelling the "Kessler syndrome"[3]. They do not necessarily say that th number of resulting fragments will be large on absolute terms.
Step6: Most collisions had a ratio of less than $2.0$. Only
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot
assert matplotlib.__version__>='1.5.1'
import numpy
assert numpy.__version__>='1.10.4'
# Show the plots inside the notebook.
%matplotlib inline
# Make the figures high-resolution.
%config InlineBackend.figure_format='retina'
# Various font sizes.
ticksFontSize=18
labelsFontSizeSmall=20
labelsFontSize=30
titleFontSize=34
legendFontSize=14
matplotlib.rc('xtick', labelsize=ticksFontSize)
matplotlib.rc('ytick', labelsize=ticksFontSize)
# Colourmaps.
cm=matplotlib.pyplot.cm.get_cmap('viridis')
import urllib2, numpy
from __future__ import print_function
# All collisions.
lines=urllib2.urlopen('https://raw.githubusercontent.com/AleksanderLidtke/\
AnalyseCollisionFragments/master/AllColGenerated').read(856393*25) # no. lines * no. chars per line
allColGen=numpy.array(lines.split('\n')[1:-1],dtype=numpy.float64) # Skip the header and the last empty line
lines=urllib2.urlopen('https://raw.githubusercontent.com/AleksanderLidtke/\
AnalyseCollisionFragments/master/AllColResulting').read(856393*25)
allColRes=numpy.array(lines.split('\n')[1:-1],dtype=numpy.float64)
assert allColGen.shape==allColRes.shape
print("Read data for {} collisions.".format(allColGen.size))
# Catastrophic collisions (a subset of all collisions).
lines=urllib2.urlopen('https://raw.githubusercontent.com/AleksanderLidtke/\
AnalyseCollisionFragments/master/CatColGenerated').read(500227*25) # Fewer lines for the subset of all collisions.
catColGen=numpy.array(lines.split('\n')[1:-1],dtype=numpy.float64)
lines=urllib2.urlopen('https://raw.githubusercontent.com/AleksanderLidtke/\
AnalyseCollisionFragments/master/CatColResulting').read(500227*25)
catColRes=numpy.array(lines.split('\n')[1:-1],dtype=numpy.float64)
assert catColGen.shape==catColRes.shape
print("Read data for {} catastrophic collisions.".format(catColGen.size))
# Compute the ratios.
allRatios=allColRes/allColGen
catRatios=catColRes/catColGen
# Plot.
fig=matplotlib.pyplot.figure(figsize=(12,8))
ax=fig.gca()
matplotlib.pyplot.grid(linewidth=1)
ax.set_xlabel(r"$Time\ (s)$",fontsize=labelsFontSize)
ax.set_ylabel(r"$Response\ (-)$",fontsize=labelsFontSize)
ax.set_xlim(0,7)
ax.set_ylim(-2,2)
ax.plot(allColGen,allRatios,alpha=1.0,label=r"$All\ collisions$",marker='o',c='k',markersize=1,mew=0,lw=0)
ax.plot(catColGen,catRatios,alpha=1.0,label=r"$Catastrophic$",marker='x',c='r',markersize=1,mew=2,lw=0)
ax.set_xlabel(r"$No.\ generated\ fragments\ \geq10\ cm$",fontsize=labelsFontSize)
ax.set_ylabel(r"$Resulting-to-generated\ ratio$",fontsize=labelsFontSize)
ax.set_xlim(0,12000)
ax.set_ylim(0,10)
ax.ticklabel_format(axis='x', style='sci', scilimits=(-2,-1))
ax.tick_params(axis='both',reset=False,which='both',length=5,width=1.5)
matplotlib.pyplot.subplots_adjust(left=0.1,right=0.95,top=0.95,bottom=0.1)
box=ax.get_position()
ax.set_position([box.x0+box.width*0.0,box.y0+box.height*0.05,box.width*0.99,box.height*0.88])
ax.legend(bbox_to_anchor=(0.5,1.14),loc='upper center',prop={'size':legendFontSize},fancybox=True,\
shadow=True,ncol=3)
fig.show()
bins=numpy.arange(0,allColGen.max(),500)
means=numpy.zeros(bins.size-1)
medians=numpy.zeros(bins.size-1)
meansCat=numpy.zeros(bins.size-1)
mediansCat=numpy.zeros(bins.size-1)
for i in range(bins.size-1):
means[i]=numpy.mean(allRatios[(allColGen>=bins[i]) & (allColGen<bins[i+1])])
medians[i]=numpy.median(allRatios[(allColGen>=bins[i]) & (allColGen<bins[i+1])])
meansCat[i]=numpy.mean(catRatios[(catColGen>=bins[i]) & (catColGen<bins[i+1])])
mediansCat[i]=numpy.median(catRatios[(catColGen>=bins[i]) & (catColGen<bins[i+1])])
# Plot.
fig=matplotlib.pyplot.figure(figsize=(14,8))
ax=fig.gca()
matplotlib.pyplot.grid(linewidth=2)
ax.plot(bins[:-1],means,alpha=1.0,label=r"$Mean,\ all$",marker=None,c='k',lw=3,ls='--')
ax.plot(bins[:-1],medians,alpha=1.0,label=r"$Median,\ all$",marker=None,c='k',lw=3,ls=':')
ax.plot(bins[:-1],meansCat,alpha=1.0,label=r"$Mean,\ catastrophic$",marker=None,c='r',lw=3,ls='--')
ax.plot(bins[:-1],mediansCat,alpha=1.0,label=r"$Median,\ catastrophic$",marker=None,c='r',lw=3,ls=':')
ax.set_xlabel(r"$No.\ generated\ fragments\ \geq10\ cm$",fontsize=labelsFontSize)
ax.set_ylabel(r"$Resulting-to-generated\ ratio$",fontsize=labelsFontSize)
ax.set_xlim(0,12000)
ax.set_ylim(0,1)
ax.ticklabel_format(axis='x', style='sci', scilimits=(-2,-1))
ax.tick_params(axis='both',reset=False,which='both',length=5,width=1.5)
matplotlib.pyplot.subplots_adjust(left=0.1,right=0.95,top=0.92,bottom=0.1)
box=ax.get_position()
ax.set_position([box.x0+box.width*0.0,box.y0+box.height*0.05,box.width*0.99,box.height*0.88])
ax.legend(bbox_to_anchor=(0.5,1.18),loc='upper center',prop={'size':legendFontSize},fancybox=True,\
shadow=True,ncol=2)
fig.show()
ratioBins=numpy.linspace(0,2,100)
# Get colours for every bin of the number of generated fragments.
cNorm=matplotlib.colors.Normalize(vmin=0, vmax=bins.size-1)
scalarMap=matplotlib.cm.ScalarMappable(norm=cNorm,cmap=cm)
histColours=[]
for i in range(0,bins.size-1):
histColours.append(scalarMap.to_rgba(i))
# Plot the histograms.
fig=matplotlib.pyplot.figure(figsize=(14,8))
ax=fig.gca()
matplotlib.pyplot.grid(linewidth=2)
ax.set_xlabel(r"$Resulting-to-generated\ ratio$",fontsize=labelsFontSize)
ax.set_ylabel(r"$Fraction\ of\ collisions$",fontsize=labelsFontSize)
for i in range(bins.size-1):
ax.hist(allRatios[(allColGen>=bins[i]) & (allColGen<bins[i+1])],\
ratioBins,normed=1,cumulative=1,histtype='step',ls='solid',\
color=histColours[i],label=r"${}-{},\ all$".format(bins[i],bins[i+1]))
ax.hist(catRatios[(catColGen>=bins[i]) & (catColGen<bins[i+1])],\
ratioBins,normed=1,cumulative=1,histtype='step',ls='dashed',\
color=histColours[i],label=r"${}-{},\ cat$".format(bins[i],bins[i+1]))
ax.set_xlim(0,2)
ax.ticklabel_format(axis='y', style='sci', scilimits=(-2,-1))
ax.tick_params(axis='both',reset=False,which='both',length=5,width=1.5)
matplotlib.pyplot.subplots_adjust(left=0.1,right=0.95,top=0.92,bottom=0.1)
box=ax.get_position()
ax.set_position([box.x0+box.width*0.0,box.y0+box.height*0.05,box.width*0.99,box.height*0.6])
ax.legend(bbox_to_anchor=(0.5,1.8),loc='upper center',prop={'size':legendFontSize},fancybox=True,\
shadow=True,ncol=5)
fig.show()
numpy.sum(allRatios>=2.0)/float(allRatios.size)*100
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Primera diferencia hacia atrás
Step2: Primera diferencia centrada
Step3: Segunda diferencia hacia adelante
Step4: Segunda diferencia hacia atrás
Step5: Segunda diferencia centrada
|
<ASSISTANT_TASK:>
Python Code:
def g(x):
resultado = - 0.1*x**4 - 0.15*x**3 - 0.5*x**2 - 0.25*x + 1.2
return resultado
def fx_adelante(f,x,h):
derivada = (f(x+h) - f(x))/h
return derivada
print('f\'(0.5) =', fx_adelante(g,0.5,0.25))
def fx_atras(f,x,h):
derivada = (f(x) - f(x-h))/h
return derivada
print('f\'(0.5) =', fx_atras(g,0.5,0.25))
def fx_centrada(f,x,h):
derivada = (fx_adelante(f,x,h) + fx_atras(f,x,h))/2
return derivada
print('f\'(0.5) =', fx_centrada(g,0.5,0.25))
def fxx_adelante(f,x,h):
derivada = (f(x+2*h) - 2*f(x+h) + f(x))/h**2
return derivada
print('f\'\'(0.5) =', fxx_adelante(g,0.5,0.25))
def fxx_atras(f,x,h):
derivada = (f(x) - 2*f(x-h) + f(x-2*h))/h**2
return derivada
print('f\'\'(0.5) =', fxx_atras(g,0.5,0.25))
def fxx_centrada(f,x,h):
derivada = (fxx_adelante(f,x,h) - fxx_atras(f,x,h))/h
return derivada
print('f\'\'(0.5) =', fxx_centrada(g,0.5,0.25))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'awi', 'awi-cm-1-0-hr', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 사용자 정의 학습
Step2: 붓꽃 분류 문제
Step3: 데이터 탐색
Step4: 처음 5개의 데이터로부터 다음을 주목하세요.
Step5: 각각의 레이블은 "setosa"와 같은 문자형 이름과 연관되어있습니다. 하지만 머신러닝은 전형적으로 숫자형 값에 의존합니다. 레이블을 다음과 같이 맵핑(mapping) 합니다.
Step6: tf.data.Dataset 생성
Step7: make_csv_dataset 함수는 (features, label) 쌍으로 구성된 tf.data.Dataset을 반환합니다. features는 딕셔너리 객체인
Step8: 유사한 특성의 값은 같이 그룹 되어있거나, 배치 돼있다는 사실에 주목하세요. 각 샘플 행의 필드는 해당 특성 배열에 추가됩니다. batch_size를 조절하여 이 특성 배열에 저장된 샘플의 수를 설정하세요.
Step10: 모델 구축 단계를 단순화하기 위해, 특성 딕셔너리를 (batch_size, num_features)의 형태를 가지는 단일 배열로 다시 구성하는 함수를 생성합니다.
Step11: 그 후 각 (features,label)쌍의 특성을 훈련 데이터셋에 쌓기위해 tf.data.Dataset.map 메서드를 사용합니다.
Step12: 데이터셋의 특성 요소는 이제 형태가 (batch_size, num_features)인 배열입니다. 첫 5개행의 샘플을 살펴봅시다.
Step13: 모델 타입 선정
Step14: 활성화 함수(activation function)는 각 층에서 출력의 크기를 결정합니다. 이러한 비선형성은 중요하며, 활성화 함수가 없는 모델은 하나의 층과 동일하다고 생각할 수 있습니다. 사용 가능한 활성화 함수는 많지만, ReLU가 은닉층에 주로 사용됩니다.
Step15: 각 샘플은 각 클래스에 대한 로짓(logit)을 반환합니다.
Step16: tf.argmax는 예측된 값 중 가장 큰 확률(원하는 클래스)을 반환합니다. 하지만 모델이 아직 훈련되지 않았으므로 이는 좋은 예측이 아닙니다.
Step17: 모델 훈련하기
Step18: 모델을 최적화하기 위해 사용되는 그래디언트(gradient)를 계산하기 위해 tf.GradientTape 컨텍스트를 사용합니다. 더 자세한 정보는 즉시 실행 가이드를 확인하세요.
Step19: 옵티마이저 생성
Step20: 이 값들을 단일 최적화 단계를 계산하기 위해 사용합니다.
Step21: 훈련 루프
Step22: 시간에 따른 손실함수 시각화
Step23: 모델 유효성 평가
Step24: 테스트 데이터 세트를 사용한 모델 평가
Step25: 마지막 배치에서 모델이 올바르게 예측한 것을 확인할 수 있습니다.
Step26: 훈련된 모델로 예측하기
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import matplotlib.pyplot as plt
import tensorflow.compat.v1 as tf
print("텐서플로 버전: {}".format(tf.__version__))
print("즉시 실행: {}".format(tf.executing_eagerly()))
train_dataset_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv"
train_dataset_fp = tf.keras.utils.get_file(fname=os.path.basename(train_dataset_url),
origin=train_dataset_url)
print("데이터셋이 복사된 위치: {}".format(train_dataset_fp))
!head -n5 {train_dataset_fp}
# CSV 파일안에서 컬럼의 순서
column_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
feature_names = column_names[:-1]
label_name = column_names[-1]
print("특성: {}".format(feature_names))
print("레이블: {}".format(label_name))
class_names = ['Iris setosa', 'Iris versicolor', 'Iris virginica']
batch_size = 32
train_dataset = tf.contrib.data.make_csv_dataset(
train_dataset_fp,
batch_size,
column_names=column_names,
label_name=label_name,
num_epochs=1)
features, labels = next(iter(train_dataset))
features
plt.scatter(features['petal_length'].numpy(),
features['sepal_length'].numpy(),
c=labels.numpy(),
cmap='viridis')
plt.xlabel("petal length")
plt.ylabel("sepal length");
def pack_features_vector(features, labels):
Pack the features into a single array.
features = tf.stack(list(features.values()), axis=1)
return features, labels
train_dataset = train_dataset.map(pack_features_vector)
features, labels = next(iter(train_dataset))
print(features[:5])
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, activation=tf.nn.relu, input_shape=(4,)), # input shape required
tf.keras.layers.Dense(10, activation=tf.nn.relu),
tf.keras.layers.Dense(3)
])
predictions = model(features)
predictions[:5]
tf.nn.softmax(predictions[:5])
print("예측: {}".format(tf.argmax(predictions, axis=1)))
print(" 레이블: {}".format(labels))
def loss(model, x, y):
y_ = model(x)
return tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
l = loss(model, features, labels)
print("손실 테스트: {}".format(l))
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
global_step = tf.contrib.eager.Variable(0)
loss_value, grads = grad(model, features, labels)
print("단계: {}, 초기 손실: {}".format(global_step.numpy(),
loss_value.numpy()))
optimizer.apply_gradients(zip(grads, model.trainable_variables), global_step)
print("단계: {}, 손실: {}".format(global_step.numpy(),
loss(model, features, labels).numpy()))
## Note: 이 셀을 다시 실행하면 동일한 모델의 변수가 사용됩니다.
from tensorflow import contrib
tfe = contrib.eager
# 도식화를 위해 결과를 저장합니다.
train_loss_results = []
train_accuracy_results = []
num_epochs = 201
for epoch in range(num_epochs):
epoch_loss_avg = tfe.metrics.Mean()
epoch_accuracy = tfe.metrics.Accuracy()
# 훈련 루프 - 32개의 배치를 사용합니다.
for x, y in train_dataset:
# 모델을 최적화합니다.
loss_value, grads = grad(model, x, y)
optimizer.apply_gradients(zip(grads, model.trainable_variables),
global_step)
# 진행 상황을 추적합니다.
epoch_loss_avg(loss_value) # 현재 배치 손실을 추가합니다.
# 예측된 레이블과 실제 레이블 비교합니다.
epoch_accuracy(tf.argmax(model(x), axis=1, output_type=tf.int32), y)
# epoch 종료
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
if epoch % 50 == 0:
print("에포크 {:03d}: 손실: {:.3f}, 정확도: {:.3%}".format(epoch,
epoch_loss_avg.result(),
epoch_accuracy.result()))
fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))
fig.suptitle('Training Metrics')
axes[0].set_ylabel("loss", fontsize=14)
axes[0].plot(train_loss_results)
axes[1].set_ylabel("Accuracy", fontsize=14)
axes[1].set_xlabel("epoch", fontsize=14)
axes[1].plot(train_accuracy_results);
test_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv"
test_fp = tf.keras.utils.get_file(fname=os.path.basename(test_url),
origin=test_url)
test_dataset = tf.contrib.data.make_csv_dataset(
test_fp,
batch_size,
column_names=column_names,
label_name='species',
num_epochs=1,
shuffle=False)
test_dataset = test_dataset.map(pack_features_vector)
test_accuracy = tfe.metrics.Accuracy()
for (x, y) in test_dataset:
logits = model(x)
prediction = tf.argmax(logits, axis=1, output_type=tf.int32)
test_accuracy(prediction, y)
print("테스트 세트 정확도: {:.3%}".format(test_accuracy.result()))
tf.stack([y,prediction],axis=1)
predict_dataset = tf.convert_to_tensor([
[5.1, 3.3, 1.7, 0.5,],
[5.9, 3.0, 4.2, 1.5,],
[6.9, 3.1, 5.4, 2.1]
])
predictions = model(predict_dataset)
for i, logits in enumerate(predictions):
class_idx = tf.argmax(logits).numpy()
p = tf.nn.softmax(logits)[class_idx]
name = class_names[class_idx]
print("예 {} 예측: {} ({:4.1f}%)".format(i, name, 100*p))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's set up REBOUNDx and add radiation_forces. We also have to set the speed of light in the units we want to use.
Step2: By default, the radiation_forces effect assumes the particle at index 0 is the source of the radiation. If you'd like to use a different one, or it's possible that the radiation source might move to a different index (e.g. with a custom merger routine), you can add a radiation_source flag to the appropriate particle like this
Step3: Here we show how to add two dust grains to the simulation in different ways. Let's first initialize their orbits. In both cases we use the orbital elements of Saturn's irregular satellite Phoebe, which the dust grains will inherit upon release (Tamayo et al. 2011). Since the dust grains don't interact with one another, putting them on top of each other is OK.
Step4: Now we add the grains' physical properties. In order for particles to feel radiation forces, we have to set their beta parameter. $\beta$ is tha ratio of the radiation force to the gravitational force from the star (Burns et al. 1979). One can either set it directly
Step5: or we can calculate it from more fundamental parameters. REBOUNDx has a convenience function that takes the gravitional constant, speed of light, radiation source's mass and luminosity, and then the grain's physical radius, bulk density, and radiation pressure coefficient Q_pr (Burns et al. 1979, equals 1 in the limit that the grain size is >> the radiation's wavelength).
Step6: Now let's run for 100 years (about 3 Saturn orbits), and look at how the eccentricity varies over a Saturn year
|
<ASSISTANT_TASK:>
Python Code:
import rebound
import reboundx
import numpy as np
sim = rebound.Simulation()
sim.G = 6.674e-11 # SI units
sim.dt = 1.e4 # Initial timestep in sec.
sim.N_active = 2 # Make it so dust particles don't interact with one another gravitationally
sim.add(m=1.99e30, hash="Sun") # add Sun with mass in kg
sim.add(m=5.68e26, a=1.43e12, e=0.056, pomega = 0., f=0., hash="Saturn") # Add Saturn at pericenter
ps = sim.particles
rebx = reboundx.Extras(sim)
rf = rebx.load_force("radiation_forces")
rebx.add_force(rf)
rf.params["c"] = 3.e8
ps["Sun"].params["radiation_source"] = 1
a = 1.3e10 # in meters
e = 0.16
inc = 175*np.pi/180.
Omega = 0. # longitude of node
omega = 0. # argument of pericenter
f = 0. # true anomaly
# Add two dust grains with the same orbit
sim.add(primary=ps["Saturn"], a=a, e=e, inc=inc, Omega=Omega, omega=omega, f=f, hash="p1")
sim.add(primary=ps["Saturn"], a=a, e=e, inc=inc, Omega=Omega, omega=omega, f=f, hash="p2")
ps["p1"].params["beta"] = 0.01
grain_radius = 1.e-5 # grain radius in m
density = 1000. # kg/m^3 = 1g/cc
Q_pr = 1.
luminosity = 3.85e26 # Watts
ps["p2"].params["beta"] = rebx.rad_calc_beta(sim.G, rf.params["c"], ps[0].m, luminosity, grain_radius, density, Q_pr)
print("Particle 2's beta parameter = {0}".format(ps["p2"].params["beta"]))
yr = 365*24*3600 # s
Noutput = 1000
times = np.linspace(0,100.*yr, Noutput)
e1, e2 = np.zeros(Noutput), np.zeros(Noutput)
sim.move_to_com() # move to center of mass frame first
for i, time in enumerate(times):
sim.integrate(time)
e1[i] = ps["p1"].calculate_orbit(primary=ps["Saturn"]).e
e2[i] = ps["p2"].calculate_orbit(primary=ps["Saturn"]).e
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(15,5))
ax.plot(times/yr, e1, label=r"$\beta$={0:.1e}".format(ps["p1"].params["beta"]))
ax.plot(times/yr, e2, label=r"$\beta$={0:.1e}".format(ps["p2"].params["beta"]))
ax.set_xlabel('Time (yrs)', fontsize=24)
ax.set_ylabel('Eccentricity', fontsize=24)
plt.legend(fontsize=24)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Keras の例による量子化認識トレーニング
Step2: 量子化認識トレーニングを使用せずに、MNIST のモデルをトレーニングする
Step3: 量子化認識トレーニングを使用して、事前トレーニング済みモデルをクローンおよびファインチューニングする
Step4: モデルをベースラインに対してトレーニングおよび評価する
Step5: この例では、ベースラインと比較し、量子化認識トレーニング後のテスト精度の損失は、最小限あるいはゼロです。
Step6: TFLite バックエンドの量子化モデルを作成する
Step7: TF から TFLite への精度の永続性を確認する
Step8: 量子化されたモデルを評価し、TensorFlow の精度が TFLite バックエンドに持続されていることを確認します。
Step9: 量子化でモデルが 1/4 になることを確認する
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
! pip install -q tensorflow
! pip install -q tensorflow-model-optimization
import tempfile
import os
import tensorflow as tf
from tensorflow import keras
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=1,
validation_split=0.1,
)
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
q_aware_model = quantize_model(model)
# `quantize_model` requires a recompile.
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
q_aware_model.summary()
train_images_subset = train_images[0:1000] # out of 60000
train_labels_subset = train_labels[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=500, epochs=1, validation_split=0.1)
_, baseline_model_accuracy = model.evaluate(
test_images, test_labels, verbose=0)
_, q_aware_model_accuracy = q_aware_model.evaluate(
test_images, test_labels, verbose=0)
print('Baseline test accuracy:', baseline_model_accuracy)
print('Quant test accuracy:', q_aware_model_accuracy)
converter = tf.lite.TFLiteConverter.from_keras_model(q_aware_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
quantized_tflite_model = converter.convert()
import numpy as np
def evaluate_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for i, test_image in enumerate(test_images):
if i % 1000 == 0:
print('Evaluated on {n} results so far.'.format(n=i))
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
print('\n')
# Compare prediction results with ground truth labels to calculate accuracy.
prediction_digits = np.array(prediction_digits)
accuracy = (prediction_digits == test_labels).mean()
return accuracy
interpreter = tf.lite.Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
test_accuracy = evaluate_model(interpreter)
print('Quant TFLite test_accuracy:', test_accuracy)
print('Quant TF test accuracy:', q_aware_model_accuracy)
# Create float TFLite model.
float_converter = tf.lite.TFLiteConverter.from_keras_model(model)
float_tflite_model = float_converter.convert()
# Measure sizes of models.
_, float_file = tempfile.mkstemp('.tflite')
_, quant_file = tempfile.mkstemp('.tflite')
with open(quant_file, 'wb') as f:
f.write(quantized_tflite_model)
with open(float_file, 'wb') as f:
f.write(float_tflite_model)
print("Float model in Mb:", os.path.getsize(float_file) / float(2**20))
print("Quantized model in Mb:", os.path.getsize(quant_file) / float(2**20))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Introduction
Step2: After being fitted, the model can then be used to predict new values
Step3: SVMs decision function depends on some subset of the training data, called the support vectors. Some properties of these support vectors can be found in members support_vectors_, support_ and n_support
Step4: 3.2.1 Example
Step5: Now, we select two classes and two attributes.
Step6: 3.2.2. Data normalization
Step7: Now, we can normalize training and test data. Observe in the code that the same transformation should be applied to training and test data. This is the reason why normalization with the test data is done using the means and the variances computed with the training set.
Step8: The following figure generates a plot of the normalized training data.
Step9: In order to apply the gradient descent rule, we need to define two methods
Step10: We can test the behavior of the gradient descent method by fitting a logistic regression model with ${\bf z}({\bf x}) = (1, {\bf x}^\intercal)^\intercal$.
Step11: 3.2.3. Free parameters
Step12: 3.2.5. Polynomial Logistic Regression
|
<ASSISTANT_TASK:>
Python Code:
# To visualize plots in the notebook
%matplotlib inline
# Imported libraries
#import csv
#import random
#import matplotlib
#import matplotlib.pyplot as plt
#import pylab
#import numpy as np
#from mpl_toolkits.mplot3d import Axes3D
#from sklearn.preprocessing import PolynomialFeatures
#from sklearn import linear_model
from sklearn import svm
X = [[0, 0], [1, 1]]
y = [0, 1]
clf = svm.SVC()
clf.fit(X, y)
clf.predict([[2., 2.]])
# get support vectors
print clf.support_vectors_
# get indices of support vectors
print clf.support_
# get number of support vectors for each class
print clf.n_support_
# Adapted from a notebook by Jason Brownlee
def loadDataset(filename, split):
xTrain = []
cTrain = []
xTest = []
cTest = []
with open(filename, 'rb') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for i in range(len(dataset)-1):
for y in range(4):
dataset[i][y] = float(dataset[i][y])
item = dataset[i]
if random.random() < split:
xTrain.append(item[0:4])
cTrain.append(item[4])
else:
xTest.append(item[0:4])
cTest.append(item[4])
return xTrain, cTrain, xTest, cTest
with open('iris.data', 'rb') as csvfile:
lines = csv.reader(csvfile)
xTrain_all, cTrain_all, xTest_all, cTest_all = loadDataset('iris.data', 0.66)
nTrain_all = len(xTrain_all)
nTest_all = len(xTest_all)
print 'Train: ' + str(nTrain_all)
print 'Test: ' + str(nTest_all)
# Select attributes
i = 0 # Try 0,1,2,3
j = 1 # Try 0,1,2,3 with j!=i
# Select two classes
c0 = 'Iris-versicolor'
c1 = 'Iris-virginica'
# Select two coordinates
ind = [i, j]
# Take training test
X_tr = np.array([[xTrain_all[n][i] for i in ind] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1])
C_tr = [cTrain_all[n] for n in range(nTrain_all)
if cTrain_all[n]==c0 or cTrain_all[n]==c1]
Y_tr = np.array([int(c==c1) for c in C_tr])
n_tr = len(X_tr)
# Take test set
X_tst = np.array([[xTest_all[n][i] for i in ind] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1])
C_tst = [cTest_all[n] for n in range(nTest_all)
if cTest_all[n]==c0 or cTest_all[n]==c1]
Y_tst = np.array([int(c==c1) for c in C_tst])
n_tst = len(X_tst)
def normalize(X, mx=None, sx=None):
# Compute means and standard deviations
if mx is None:
mx = np.mean(X, axis=0)
if sx is None:
sx = np.std(X, axis=0)
# Normalize
X0 = (X-mx)/sx
return X0, mx, sx
# Normalize data
Xn_tr, mx, sx = normalize(X_tr)
Xn_tst, mx, sx = normalize(X_tst, mx, sx)
# Separate components of x into different arrays (just for the plots)
x0c0 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==0]
x1c0 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==0]
x0c1 = [Xn_tr[n][0] for n in range(n_tr) if Y_tr[n]==1]
x1c1 = [Xn_tr[n][1] for n in range(n_tr) if Y_tr[n]==1]
# Scatterplot.
labels = {'Iris-setosa': 'Setosa',
'Iris-versicolor': 'Versicolor',
'Iris-virginica': 'Virginica'}
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
def logregFit(Z_tr, Y_tr, rho, n_it):
# Data dimension
n_dim = Z_tr.shape[1]
# Initialize variables
nll_tr = np.zeros(n_it)
pe_tr = np.zeros(n_it)
w = np.random.randn(n_dim,1)
# Running the gradient descent algorithm
for n in range(n_it):
# Compute posterior probabilities for weight w
p1_tr = logistic(np.dot(Z_tr, w))
p0_tr = logistic(-np.dot(Z_tr, w))
# Compute negative log-likelihood
nll_tr[n] = - np.dot(Y_tr.T, np.log(p1_tr)) - np.dot((1-Y_tr).T, np.log(p0_tr))
# Update weights
w += rho*np.dot(Z_tr.T, Y_tr - p1_tr)
return w, nll_tr
def logregPredict(Z, w):
# Compute posterior probability of class 1 for weights w.
p = logistic(np.dot(Z, w))
# Class
D = [int(round(pn)) for pn in p]
return p, D
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 200 # Number of iterations
# Compute Z's
Z_tr = np.c_[np.ones(n_tr), Xn_tr]
Z_tst = np.c_[np.ones(n_tst), Xn_tst]
n_dim = Z_tr.shape[1]
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print "The optimal weights are:"
print w
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
print "The NLL after training is " + str(nll_tr[len(nll_tr)-1])
# Create a regtangular grid.
x_min, x_max = Xn_tr[:, 0].min(), Xn_tr[:, 0].max()
y_min, y_max = Xn_tr[:, 1].min(), Xn_tr[:, 1].max()
dx = x_max - x_min
dy = y_max - y_min
h = dy /400
xx, yy = np.meshgrid(np.arange(x_min - 0.1 * dx, x_max + 0.1 * dx, h),
np.arange(y_min - 0.1 * dx, y_max + 0.1 * dy, h))
X_grid = np.array([xx.ravel(), yy.ravel()]).T
# Compute Z's
Z_grid = np.c_[np.ones(X_grid.shape[0]), X_grid]
# Compute the classifier output for all samples in the grid.
pp, dd = logregPredict(Z_grid, w)
# Put the result into a color plot
plt.plot(x0c0, x1c0,'r.', label=labels[c0])
plt.plot(x0c1, x1c1,'g+', label=labels[c1])
plt.xlabel('$x_' + str(ind[0]) + '$')
plt.ylabel('$x_' + str(ind[1]) + '$')
plt.legend(loc='best')
plt.axis('equal')
pp = pp.reshape(xx.shape)
plt.contourf(xx, yy, pp, cmap=plt.cm.copper)
# Parameters of the algorithms
rho = float(1)/50 # Learning step
n_it = 500 # Number of iterations
g = 5 # Degree of polynomial
# Compute Z_tr
poly = PolynomialFeatures(degree=g)
Z_tr = poly.fit_transform(Xn_tr)
# Normalize columns (this is useful to make algorithms more stable).)
Zn, mz, sz = normalize(Z_tr[:,1:])
Z_tr = np.concatenate((np.ones((n_tr,1)), Zn), axis=1)
# Compute Z_tst
Z_tst = poly.fit_transform(Xn_tst)
Zn, mz, sz = normalize(Z_tst[:,1:], mz, sz)
Z_tst = np.concatenate((np.ones((n_tst,1)), Zn), axis=1)
# Convert target arrays to column vectors
Y_tr2 = Y_tr[np.newaxis].T
Y_tst2 = Y_tst[np.newaxis].T
# Running the gradient descent algorithm
w, nll_tr = logregFit(Z_tr, Y_tr2, rho, n_it)
# Classify training and test data
p_tr, D_tr = logregPredict(Z_tr, w)
p_tst, D_tst = logregPredict(Z_tst, w)
# Compute error rates
E_tr = D_tr!=Y_tr
E_tst = D_tst!=Y_tst
# Error rates
pe_tr = float(sum(E_tr)) / n_tr
pe_tst = float(sum(E_tst)) / n_tst
# NLL plot.
plt.plot(range(n_it), nll_tr,'b.:', label='Train')
plt.xlabel('Iteration')
plt.ylabel('Negative Log-Likelihood')
plt.legend()
print "The optimal weights are:"
print w
print "The final error rates are:"
print "- Training: " + str(pe_tr)
print "- Test: " + str(pe_tst)
print "The NLL after training is " + str(nll_tr[len(nll_tr)-1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Working with tff's ClientData.
Step2: Manipulating a ClientData object
Step3: Inspecting the first dataset can tell us what type of examples are in the ClientData.
Step5: Note that the dataset yields collections.OrderedDict objects that have pixels and label keys, where pixels is a tensor with shape [28, 28]. Suppose we wish to flatten our inputs out to shape [784]. One possible way we can do this would be to apply a pre-processing function to our ClientData object.
Step7: We may want in addition to perform some more complex (and possibly stateful) preprocessing, for example shuffling.
Step8: Interfacing with a tff.Computation
Step9: Before we begin working with this IterativeProcess, one comment on the semantics of ClientData is in order. A ClientData object represents the entirety of the population available for federated training, which in general is not available to the execution environment of a production FL system and is specific to simulation. ClientData indeed gives the user the capacity to bypass federated computing entirely and simply train a server-side model as usual via ClientData.create_tf_dataset_from_all_clients.
Step10: If we take this route, however, we will be unable to trivially move to multimachine simulation. The datasets we construct in the local TensorFlow runtime can capture state from the surrounding python environment, and fail in serialization or deserialization when they attempt to reference state which is no longer available to them. This can manifest for example in the inscrutable error from TensorFlow's tensor_util.cc
Step11: We could invoke dataset_computation and receive an eager dataset in the Python runtime, but the real power of this approach is exercised when we compose with an iterative process or another computation to avoid materializing these datasets in the global eager runtime at all. TFF provides a helper function tff.simulation.compose_dataset_computation_with_iterative_process which can be used to do exactly this.
Step12: Both this tff.templates.IterativeProcesses and the one above run the same way; but former accepts preprocessed client datasets, and the latter accepts strings representing client ids, handling both dataset construction and preprocessing in its body--in fact state can be passed between the two.
Step13: Scaling to large numbers of clients
Step14: In fact, this is effectively what the helper we used is doing under the hood (plus performing appropriate type checking and manipulation). We could even have expressed the same logic slightly differently, by serializing preprocess_and_shuffle into a tff.Computation, and decomposing the federated_map into one step which constructs un-preprocessed datasets and another which runs preprocess_and_shuffle at each client.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title Set up open-source environment
#@test {"skip": true}
# tensorflow_federated_nightly also bring in tf_nightly, which
# can causes a duplicate tensorboard install, leading to errors.
!pip uninstall --yes tensorboard tb-nightly
!pip install --quiet --upgrade tensorflow_federated
!pip install --quiet --upgrade nest_asyncio
import nest_asyncio
nest_asyncio.apply()
#@title Import packages
import collections
import time
import tensorflow as tf
import tensorflow_federated as tff
client_data, _ = tff.simulation.datasets.emnist.load_data()
first_client_id = client_data.client_ids[0]
first_client_dataset = client_data.create_tf_dataset_for_client(
first_client_id)
print(first_client_dataset.element_spec)
# This information is also available as a `ClientData` property:
assert client_data.element_type_structure == first_client_dataset.element_spec
def preprocess_dataset(dataset):
Create batches of 5 examples, and limit to 3 batches.
def map_fn(input):
return collections.OrderedDict(
x=tf.reshape(input['pixels'], shape=(-1, 784)),
y=tf.cast(tf.reshape(input['label'], shape=(-1, 1)), tf.int64),
)
return dataset.batch(5).map(
map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE).take(5)
preprocessed_client_data = client_data.preprocess(preprocess_dataset)
# Notice that we have both reshaped and renamed the elements of the ordered dict.
first_client_dataset = preprocessed_client_data.create_tf_dataset_for_client(
first_client_id)
print(first_client_dataset.element_spec)
def preprocess_and_shuffle(dataset):
Applies `preprocess_dataset` above and shuffles the result.
preprocessed = preprocess_dataset(dataset)
return preprocessed.shuffle(buffer_size=5)
preprocessed_and_shuffled = client_data.preprocess(preprocess_and_shuffle)
# The type signature will remain the same, but the batches will be shuffled.
first_client_dataset = preprocessed_and_shuffled.create_tf_dataset_for_client(
first_client_id)
print(first_client_dataset.element_spec)
def model_fn():
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
])
return tff.learning.from_keras_model(
model,
# Note: input spec is the _batched_ shape, and includes the
# label tensor which will be passed to the loss function. This model is
# therefore configured to accept data _after_ it has been preprocessed.
input_spec=collections.OrderedDict(
x=tf.TensorSpec(shape=[None, 784], dtype=tf.float32),
y=tf.TensorSpec(shape=[None, 1], dtype=tf.int64)),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
trainer = tff.learning.build_federated_averaging_process(
model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.01))
selected_client_ids = preprocessed_and_shuffled.client_ids[:10]
preprocessed_data_for_clients = [
preprocessed_and_shuffled.create_tf_dataset_for_client(
selected_client_ids[i]) for i in range(10)
]
state = trainer.initialize()
for _ in range(5):
t1 = time.time()
state, metrics = trainer.next(state, preprocessed_data_for_clients)
t2 = time.time()
print('loss {}, round time {}'.format(metrics['train']['loss'], t2 - t1))
print('dataset computation without preprocessing:')
print(client_data.dataset_computation.type_signature)
print('\n')
print('dataset computation with preprocessing:')
print(preprocessed_and_shuffled.dataset_computation.type_signature)
trainer_accepting_ids = tff.simulation.compose_dataset_computation_with_iterative_process(
preprocessed_and_shuffled.dataset_computation, trainer)
for _ in range(5):
t1 = time.time()
state, metrics = trainer_accepting_ids.next(state, selected_client_ids)
t2 = time.time()
print('loss {}, round time {}'.format(metrics['train']['loss'], t2 - t1))
selected_clients_type = tff.FederatedType(preprocessed_and_shuffled.dataset_computation.type_signature.parameter, tff.CLIENTS)
@tff.federated_computation(trainer.next.type_signature.parameter[0], selected_clients_type)
def new_next(server_state, selected_clients):
preprocessed_data = tff.federated_map(preprocessed_and_shuffled.dataset_computation, selected_clients)
return trainer.next(server_state, preprocessed_data)
manual_trainer_with_preprocessing = tff.templates.IterativeProcess(initialize_fn=trainer.initialize, next_fn=new_next)
print(trainer_accepting_ids.next.type_signature)
print(manual_trainer_with_preprocessing.next.type_signature)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NaNにする行と列の番号をランダムに生成する
Step2: 欠損値を確認
Step3: 欠損値を除外する
Step4: 欠損値を補間する
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import pandas_datareader.data as web#株価など時系列データをwebから取得するパッケージ
from datetime import datetime
ticker = ['AMZN','FB','INTU']
start = '2012-01-01'#datetime(2016,1,1)
end = '2013-12-31'#datetime(2016,12,31)
df = web.DataReader(ticker,'google',start,end)['Close',:,:]
df2 = df.copy()
rand_row = np.random.randint(1,len(df),50)
rand_col = np.random.randint(1,len(df.columns),50)
for i,j in zip(rand_row,rand_col):
df.iloc[i,j] = np.nan
df
df.isnull()
df.notnull()
df.count()
df.dropna()
df.dropna(axis=0)
df.dropna(axis=1)
df.dropna(axis=0,how='any')
df.dropna(axis=1,how='all')
for i in rand_row:
df.iloc[i] = np.nan
df
df.dropna(axis=0,how='all')
df.fillna(0)
df.fillna({'FB':0,'INTU':100})
df.fillna(method='ffill')
df.fillna(method='bfill')
df.fillna(method='backfill',limit=70)
df.fillna(method='ffill',axis=1)
df.fillna(method='pad',inplace=True)
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: the original reaclib source
Step2: evaluate the rate at a given temperature (in K)
Step3: a human readable string describing the rate, and the nuclei involved
Step4: get the temperature sensitivity about some reference T
Step5: plot the rate's temperature dependence
Step6: the form of the rate with density and composition weighting -- this is what appears in a dY/dt equation
Step7: output a python function that can evaluate the rate (T-dependence part)
Step8: working with a group of rates
Step9: print an overview of the network described by this rate collection
Step10: show a network diagram
Step11: write a function containing the ODEs that evolve this network
|
<ASSISTANT_TASK:>
Python Code:
r = reaclib.Rate("reaclib-rates/c13-pg-n14-nacr")
print(r.original_source)
r.eval(1.e9)
print(r)
print(r.reactants)
print(r.products)
print(r.get_rate_exponent(2.e7))
r.plot()
print(r.ydot_string())
print(r.function_string())
files = ["c12-pg-n13-ls09",
"c13-pg-n14-nacr",
"n13--c13-wc12",
"n13-pg-o14-lg06",
"n14-pg-o15-im05",
"n15-pa-c12-nacr",
"o14--n14-wc12",
"o15--n15-wc12"]
rc = reaclib.RateCollection(files)
print(rc)
rc.print_network_overview()
rc.plot()
rc.make_network("test.py")
# %load test.py
import numpy as np
import reaclib
ip = 0
ihe4 = 1
ic12 = 2
ic13 = 3
in13 = 4
in14 = 5
in15 = 6
io14 = 7
io15 = 8
nnuc = 9
A = np.zeros((nnuc), dtype=np.int32)
A[ip] = 1
A[ihe4] = 4
A[ic12] = 12
A[ic13] = 13
A[in13] = 13
A[in14] = 14
A[in15] = 15
A[io14] = 14
A[io15] = 15
def o15_n15(tf):
# o15 --> n15
rate = 0.0
# wc12w
rate += np.exp( -5.17053)
return rate
def n15_pa_c12(tf):
# p + n15 --> he4 + c12
rate = 0.0
# nacrn
rate += np.exp( 27.4764 + -15.253*tf.T913i + 1.59318*tf.T913
+ 2.4479*tf.T9 + -2.19708*tf.T953 + -0.666667*tf.lnT9)
# nacrr
rate += np.exp( -6.57522 + -1.1638*tf.T9i + 22.7105*tf.T913
+ -2.90707*tf.T9 + 0.205754*tf.T953 + -1.5*tf.lnT9)
# nacrr
rate += np.exp( 20.8972 + -7.406*tf.T9i
+ -1.5*tf.lnT9)
# nacrr
rate += np.exp( -4.87347 + -2.02117*tf.T9i + 30.8497*tf.T913
+ -8.50433*tf.T9 + -1.54426*tf.T953 + -1.5*tf.lnT9)
return rate
def c13_pg_n14(tf):
# p + c13 --> n14
rate = 0.0
# nacrn
rate += np.exp( 18.5155 + -13.72*tf.T913i + -0.450018*tf.T913
+ 3.70823*tf.T9 + -1.70545*tf.T953 + -0.666667*tf.lnT9)
# nacrr
rate += np.exp( 13.9637 + -5.78147*tf.T9i + -0.196703*tf.T913
+ 0.142126*tf.T9 + -0.0238912*tf.T953 + -1.5*tf.lnT9)
# nacrr
rate += np.exp( 15.1825 + -13.5543*tf.T9i
+ -1.5*tf.lnT9)
return rate
def c12_pg_n13(tf):
# p + c12 --> n13
rate = 0.0
# ls09n
rate += np.exp( 17.1482 + -13.692*tf.T913i + -0.230881*tf.T913
+ 4.44362*tf.T9 + -3.15898*tf.T953 + -0.666667*tf.lnT9)
# ls09r
rate += np.exp( 17.5428 + -3.77849*tf.T9i + -5.10735*tf.T913i + -2.24111*tf.T913
+ 0.148883*tf.T9 + -1.5*tf.lnT9)
return rate
def n13_pg_o14(tf):
# p + n13 --> o14
rate = 0.0
# lg06n
rate += np.exp( 18.1356 + -15.1676*tf.T913i + 0.0955166*tf.T913
+ 3.0659*tf.T9 + -0.507339*tf.T953 + -0.666667*tf.lnT9)
# lg06r
rate += np.exp( 10.9971 + -6.12602*tf.T9i + 1.57122*tf.T913i
+ -1.5*tf.lnT9)
return rate
def n14_pg_o15(tf):
# p + n14 --> o15
rate = 0.0
# im05n
rate += np.exp( 17.01 + -15.193*tf.T913i + -0.161954*tf.T913
+ -7.52123*tf.T9 + -0.987565*tf.T953 + -0.666667*tf.lnT9)
# im05r
rate += np.exp( 6.73578 + -4.891*tf.T9i
+ 0.0682*tf.lnT9)
# im05r
rate += np.exp( 7.65444 + -2.998*tf.T9i
+ -1.5*tf.lnT9)
# im05n
rate += np.exp( 20.1169 + -15.193*tf.T913i + -4.63975*tf.T913
+ 9.73458*tf.T9 + -9.55051*tf.T953 + 0.333333*tf.lnT9)
return rate
def o14_n14(tf):
# o14 --> n14
rate = 0.0
# wc12w
rate += np.exp( -4.62354)
return rate
def n13_c13(tf):
# n13 --> c13
rate = 0.0
# wc12w
rate += np.exp( -6.7601)
return rate
def rhs(t, Y, rho, T):
tf = reaclib.Tfactors(T)
lambda_o15_n15 = o15_n15(tf)
lambda_n15_pa_c12 = n15_pa_c12(tf)
lambda_c13_pg_n14 = c13_pg_n14(tf)
lambda_c12_pg_n13 = c12_pg_n13(tf)
lambda_n13_pg_o14 = n13_pg_o14(tf)
lambda_n14_pg_o15 = n14_pg_o15(tf)
lambda_o14_n14 = o14_n14(tf)
lambda_n13_c13 = n13_c13(tf)
dYdt = np.zeros((nnuc), dtype=np.float64)
dYdt[ip] = (
-rho*Y[ip]*Y[in15]*lambda_n15_pa_c12
-rho*Y[ic13]*Y[ip]*lambda_c13_pg_n14
-rho*Y[ip]*Y[ic12]*lambda_c12_pg_n13
-rho*Y[ip]*Y[in13]*lambda_n13_pg_o14
-rho*Y[ip]*Y[in14]*lambda_n14_pg_o15
)
dYdt[ihe4] = (
+rho*Y[ip]*Y[in15]*lambda_n15_pa_c12
)
dYdt[ic12] = (
-rho*Y[ip]*Y[ic12]*lambda_c12_pg_n13
+rho*Y[ip]*Y[in15]*lambda_n15_pa_c12
)
dYdt[ic13] = (
-rho*Y[ic13]*Y[ip]*lambda_c13_pg_n14
+Y[in13]*lambda_n13_c13
)
dYdt[in13] = (
-rho*Y[ip]*Y[in13]*lambda_n13_pg_o14
-Y[in13]*lambda_n13_c13
+rho*Y[ip]*Y[ic12]*lambda_c12_pg_n13
)
dYdt[in14] = (
-rho*Y[ip]*Y[in14]*lambda_n14_pg_o15
+rho*Y[ic13]*Y[ip]*lambda_c13_pg_n14
+Y[io14]*lambda_o14_n14
)
dYdt[in15] = (
-rho*Y[ip]*Y[in15]*lambda_n15_pa_c12
+Y[io15]*lambda_o15_n15
)
dYdt[io14] = (
-Y[io14]*lambda_o14_n14
+rho*Y[ip]*Y[in13]*lambda_n13_pg_o14
)
dYdt[io15] = (
-Y[io15]*lambda_o15_n15
+rho*Y[ip]*Y[in14]*lambda_n14_pg_o15
)
return dYdt
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Contacted cones per bipolar cell (Figure3B)
Step2: Contacted bipolar cells per cone (Figure 3D)
Step3: Total number of contacted BC per cone
Step4: Alternative S-cone selection
Step5: Contacted cones per bipolar cell (alternative S-cone selection, supp. figure 5B)
Step6: Contacted bipolar cells per cone (alternative S-cone selection, supp. figure 5C)
Step7: Contacts between rod bipolar cells and cones
Step8: Color selectivity of rod bipolar cells (Figure 5D)
Step9: Contacted rod bipolar cells per cone (Figure 5E)
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.stats import itemfreq
import matplotlib
import matplotlib.pyplot as plt
from scipy.io import loadmat
import pandas as pd
import seaborn as sns
from sklearn import cross_validation
from sklearn import svm
from sklearn.decomposition import PCA
%matplotlib inline
matplotlib.rc('font',**{'family':'sans-serif','sans-serif':['Arial']})
matplotlib.rcParams.update({'mathtext.default': 'regular'})
matplotlib.rcParams.update({'font.size': 14})
sns.set_style("whitegrid")
BC_ids=np.loadtxt('data/BC_IDs_new').astype(int)
contact_summary=pd.read_pickle('data/cone_contact_predictions')
cut_off_cones=np.array([73,72,71,74,201,202,211,203,88,204,142,34,4,30,199])+2000
cones_outside_cbc9=np.array([193,209,200,198,197,199,73,72,71,74,69,67,66,211,86,88,87,120,85,204,84,207,128,114,126,127,125,142,130,104,106,175,135])+2000
BC_excluded=np.array([691,709,827,836])
blue_cones=np.array([2006,2007,2009,2024,2028,2029])
all_cones=np.unique(contact_summary['cone'].as_matrix())
green_cones=all_cones[(np.in1d(all_cones,blue_cones,invert=True))&(np.in1d(all_cones,cones_outside_cbc9,invert=True))]
true_contacts_green=contact_summary.ix[(contact_summary['prediction']==1)&(contact_summary['cone_type']=='green'),:4]
true_contacts_blue=contact_summary.ix[(contact_summary['prediction']==1)&(contact_summary['cone_type']=='blue'),:4]
true_contacts=contact_summary.ix[(contact_summary['prediction']==1)]
stat_bc_contacts_green=pd.DataFrame(BC_ids[(BC_ids[:,4]>=58)&(BC_ids[:,4]<=71)&np.in1d(BC_ids[:,0],BC_excluded,invert=True)][:,[0,4]],columns=['cell','type'])
stat_bc_contacts_blue=stat_bc_contacts_green.copy()
green_contact_freq_type=itemfreq(true_contacts_green['cell'].as_matrix())
for i in range(stat_bc_contacts_green.shape[0]):
stat_bc_contacts_green.loc[i,'count']=0
try:
stat_bc_contacts_green.ix[i,'count']=green_contact_freq_type[green_contact_freq_type[:,0]==stat_bc_contacts_green.ix[i,'cell'],1]
except ValueError:
continue
blue_contact_freq_type=itemfreq(true_contacts_blue['cell'].as_matrix())
for i in range(stat_bc_contacts_blue.shape[0]):
stat_bc_contacts_blue.loc[i,'count']=0
try:
stat_bc_contacts_blue.ix[i,'count']=blue_contact_freq_type[blue_contact_freq_type[:,0]==stat_bc_contacts_blue.ix[i,'cell'],1]
except ValueError:
continue
stat_bc_contacts_all=pd.DataFrame(BC_ids[(BC_ids[:,4]>=58)&(BC_ids[:,4]<=71)&np.in1d(BC_ids[:,0],BC_excluded,invert=True)][:,[0,4]],columns=['cell','type'])
all_contact_freq_type=itemfreq(true_contacts['cell'].as_matrix())
for i in range(stat_bc_contacts_all.shape[0]):
stat_bc_contacts_all.loc[i,'count']=0
try:
stat_bc_contacts_all.ix[i,'count']=all_contact_freq_type[all_contact_freq_type[:,0]==stat_bc_contacts_all.ix[i,'cell'],1]
except ValueError:
continue
for i in range(stat_bc_contacts_all.shape[0]):
stat_bc_contacts_all.loc[i,'count_rounded']=np.clip((stat_bc_contacts_all.ix[i,'count']+1)//2,0,4)
stat_cone_contacts_all=pd.DataFrame(np.concatenate((np.tile(all_cones,14).reshape(-1,1),np.repeat(np.arange(58,72),all_cones.shape[0]).reshape(-1,1)),axis=1),columns=['cone','type'])
for i in range(stat_cone_contacts_all.shape[0]):
stat_cone_contacts_all.loc[i,'count']=np.sum((true_contacts['cone']==stat_cone_contacts_all.ix[i,'cone'])&\
(true_contacts['type']==stat_cone_contacts_all.ix[i,'type']))
stat_cone_contacts_green=pd.DataFrame(np.concatenate((np.tile(green_cones,14).reshape(-1,1),np.repeat(np.arange(58,72),green_cones.shape[0]).reshape(-1,1)),axis=1),columns=['cone','type'])
stat_cone_contacts_blue=pd.DataFrame(np.concatenate((np.tile(blue_cones,14).reshape(-1,1),np.repeat(np.arange(58,72),blue_cones.shape[0]).reshape(-1,1)),axis=1),columns=['cone','type'])
for i in range(stat_cone_contacts_green.shape[0]):
stat_cone_contacts_green.loc[i,'count']=np.sum((true_contacts_green['cone']==stat_cone_contacts_green.ix[i,'cone'])&\
(true_contacts_green['type']==stat_cone_contacts_green.ix[i,'type']))
for i in range(stat_cone_contacts_blue.shape[0]):
stat_cone_contacts_blue.loc[i,'count']=np.sum((true_contacts_blue['cone']==stat_cone_contacts_blue.ix[i,'cone'])&\
(true_contacts_blue['type']==stat_cone_contacts_blue.ix[i,'type']))
stat_cone_contacts_joined=pd.concat({'S-cones': stat_cone_contacts_blue, 'M-cones': stat_cone_contacts_green})
stat_cone_contacts_joined=stat_cone_contacts_joined.reset_index().drop('level_1',axis=1).rename(columns={'level_0':'cone_type'})
stat_bc_contacts_joined=pd.concat({'S-cones': stat_bc_contacts_blue, 'M-cones': stat_bc_contacts_green})
stat_bc_contacts_joined=stat_bc_contacts_joined.reset_index().drop('level_1',axis=1).rename(columns={'level_0':'cone_type'})
stat_cone_contacts_joined_no_rbc=stat_cone_contacts_joined[stat_cone_contacts_joined['type']!=71]
stat_bc_contacts_joined_no_rbc=stat_bc_contacts_joined[stat_bc_contacts_joined['type']!=71]
labels = ['1','2','3A','3B','4','5T','5O','5I','X','6','7','8','9']
sns.set(font='Arial',style='white',context='paper',rc={"xtick.major.size": 0, "ytick.major.size": 4})
with matplotlib.rc_context({"lines.linewidth": 0.7}):
plt.figure(figsize=(8/2.54,4/2.54))
ax=sns.barplot(x='type',y='count',data=stat_bc_contacts_joined_no_rbc,hue='cone_type',order=np.arange(58,71),hue_order=['S-cones','M-cones'],ci=95)
ax.set_xticklabels(labels)
ax.set(ylabel='# cones',ylim=(0,10),xlabel='BC types')
ax.spines['left'].set_position(('outward',3))
legend=plt.legend(loc='upper left')
legend.get_title().set_fontsize(8)
sns.despine()
# plt.savefig('figures/joined_contacts_per_bc.svg',bbox_inches='tight',dpi=300)
plt.show()
labels = ['1','2','3A','3B','4','5T','5O','5I','X','6','7','8','9']
sns.set(font='Arial',context='paper',style='white',rc={"xtick.major.size": 0, "ytick.major.size": 4})
with matplotlib.rc_context({"lines.linewidth": 0.7}):
plt.figure(figsize=(8/2.54,4/2.54))
ax=sns.barplot(x='type',y='count',data=stat_cone_contacts_joined_no_rbc,hue='cone_type',order=np.arange(58,71),hue_order=['S-cones','M-cones'],ci=95)
ax.set_xticklabels(labels)
ax.set(ylabel='# BCs',xlabel='BC types',ylim=(0,3.5),yticks=[0,1,2,3])
ax.spines['left'].set_position(('outward',3))
legend=plt.legend(bbox_to_anchor=(0.95, 1))
legend.get_title().set_fontsize(8)
sns.despine()
# plt.savefig('figures/joined_contacts_per_cone.svg',bbox_inches='tight',dpi=300)
plt.show()
cone_means=np.loadtxt('data/cone_means_rot.txt')
central_cones=cone_means[(cone_means[:,2]>np.min(cone_means[:,2])+500)&(cone_means[:,2]<np.max(cone_means[:,2])-500)&\
(cone_means[:,3]>np.min(cone_means[:,3])+500)&(cone_means[:,3]<np.max(cone_means[:,3])-500),0]
print('Central S-cones:',np.sum(np.in1d(central_cones,blue_cones)))
print('Central M-cones:',np.sum(np.in1d(central_cones,green_cones)))
stat_overall_cone_contacts=pd.concat({'S cones':pd.DataFrame(blue_cones,columns=['cone']),'M cones':pd.DataFrame(green_cones,columns=['cone'])})
stat_overall_cone_contacts=stat_overall_cone_contacts.reset_index().drop('level_1',axis=1).rename(columns={'level_0':'cone_type'})
for i in range(stat_overall_cone_contacts.shape[0]):
stat_overall_cone_contacts.loc[i,'count']=np.sum(true_contacts['cone']==stat_overall_cone_contacts.ix[i,'cone'])
for i in range(stat_overall_cone_contacts.shape[0]):
stat_overall_cone_contacts.loc[i,'points']=np.sum(contact_summary[(contact_summary['cone']==stat_overall_cone_contacts.ix[i,'cone'])\
&contact_summary['prediction']==1]['count'])
stat_overall_cone_contacts_central=stat_overall_cone_contacts[np.in1d(stat_overall_cone_contacts['cone'],central_cones)]
stat_overall_cone_contacts=pd.concat({'S cones':pd.DataFrame(blue_cones,columns=['cone']),'M cones':pd.DataFrame(green_cones,columns=['cone'])})
stat_overall_cone_contacts=stat_overall_cone_contacts.reset_index().drop('level_1',axis=1).rename(columns={'level_0':'cone_type'})
for i in range(stat_overall_cone_contacts.shape[0]):
stat_overall_cone_contacts.loc[i,'count']=np.sum(true_contacts['cone']==stat_overall_cone_contacts.ix[i,'cone'])
stat_overall_cone_contacts.loc[i,'count_on']=np.sum(true_contacts[true_contacts['type']>62]['cone']==stat_overall_cone_contacts.ix[i,'cone'])
stat_overall_cone_contacts.loc[i,'count_off']=np.sum(true_contacts[true_contacts['type']<63]['cone']==stat_overall_cone_contacts.ix[i,'cone'])
for i in range(stat_overall_cone_contacts.shape[0]):
stat_overall_cone_contacts.loc[i,'points']=np.sum(contact_summary[(contact_summary['cone']==stat_overall_cone_contacts.ix[i,'cone'])\
&contact_summary['prediction']==1]['count'])
stat_overall_cone_contacts.loc[i,'points_on']=np.sum(contact_summary[(contact_summary['cone']==stat_overall_cone_contacts.ix[i,'cone'])\
&(contact_summary['prediction']==1)&(contact_summary['type']>62)]['count'])
stat_overall_cone_contacts.loc[i,'points_off']=np.sum(contact_summary[(contact_summary['cone']==stat_overall_cone_contacts.ix[i,'cone'])\
&(contact_summary['prediction']==1)&(contact_summary['type']<63)]['count'])
stat_overall_cone_contacts_central=stat_overall_cone_contacts[np.in1d(stat_overall_cone_contacts['cone'],central_cones)]
print('Average number of contacted BC per M-cone (center):',\
np.mean(stat_overall_cone_contacts_central[stat_overall_cone_contacts_central['cone_type']=='M cones']['count']),\
np.std(stat_overall_cone_contacts_central[stat_overall_cone_contacts_central['cone_type']=='M cones']['count'])\
/np.sqrt(np.sum(stat_overall_cone_contacts_central['cone_type']=='M cones')-1))
print('Average number of contacted BC per S-cone (center):',\
np.mean(stat_overall_cone_contacts_central[stat_overall_cone_contacts_central['cone_type']=='S cones']['count']),\
np.std(stat_overall_cone_contacts_central[stat_overall_cone_contacts_central['cone_type']=='S cones']['count'])\
/np.sqrt(np.sum(stat_overall_cone_contacts_central['cone_type']=='S cones')-1))
print('Average number of contact points per M-cone (center):',\
np.mean(stat_overall_cone_contacts_central[stat_overall_cone_contacts_central['cone_type']=='M cones']['points']),\
np.std(stat_overall_cone_contacts_central[stat_overall_cone_contacts_central['cone_type']=='M cones']['points'])\
/np.sqrt(np.sum(stat_overall_cone_contacts_central['cone_type']=='M cones')-1))
print('Average number of contact points per S-cone (center):',\
np.mean(stat_overall_cone_contacts_central[stat_overall_cone_contacts_central['cone_type']=='S cones']['points']),\
np.std(stat_overall_cone_contacts_central[stat_overall_cone_contacts_central['cone_type']=='S cones']['points'])\
/np.sqrt(np.sum(stat_overall_cone_contacts_central['cone_type']=='S cones')-1))
blue_cones_alt=np.array([2007,2006,2010,2029,2061,2009,2031,2014,2024,2023,2100,2028,2017,2090])
green_cones_alt=all_cones[(np.in1d(all_cones,blue_cones_alt,invert=True))&(np.in1d(all_cones,cones_outside_cbc9,invert=True))]
contact_summary_alt=contact_summary.copy()
for i in range(contact_summary.shape[0]):
if contact_summary_alt.ix[i,'cone'] in blue_cones_alt:
contact_summary_alt.loc[i,'cone_type']='S-cone'
elif contact_summary_alt.ix[i,'cone'] in green_cones_alt:
contact_summary_alt.loc[i,'cone_type']='M-cone'
else:
contact_summary_alt.loc[i,'cone_type']='outside'
true_contacts_green_alt=contact_summary_alt.ix[(contact_summary_alt['prediction']==1)&(contact_summary_alt['cone_type']=='M-cone'),:4]
true_contacts_blue_alt=contact_summary_alt.ix[(contact_summary_alt['prediction']==1)&(contact_summary_alt['cone_type']=='S-cone'),:4]
stat_bc_contacts_green_alt=pd.DataFrame(BC_ids[(BC_ids[:,4]>=58)&(BC_ids[:,4]<=71)&np.in1d(BC_ids[:,0],BC_excluded,invert=True)][:,[0,4]],columns=['cell','type'])
stat_bc_contacts_blue_alt=stat_bc_contacts_green_alt.copy()
green_contact_freq_type_alt=itemfreq(true_contacts_green_alt['cell'].as_matrix())
for i in range(stat_bc_contacts_green_alt.shape[0]):
stat_bc_contacts_green_alt.loc[i,'count']=0
try:
stat_bc_contacts_green_alt.ix[i,'count']=green_contact_freq_type_alt[green_contact_freq_type_alt[:,0]==stat_bc_contacts_green_alt.ix[i,'cell'],1]
except ValueError:
continue
blue_contact_freq_type_alt=itemfreq(true_contacts_blue_alt['cell'].as_matrix())
for i in range(stat_bc_contacts_blue_alt.shape[0]):
stat_bc_contacts_blue_alt.loc[i,'count']=0
try:
stat_bc_contacts_blue_alt.ix[i,'count']=blue_contact_freq_type_alt[blue_contact_freq_type_alt[:,0]==stat_bc_contacts_blue_alt.ix[i,'cell'],1]
except ValueError:
continue
stat_cone_contacts_green_alt=pd.DataFrame(np.concatenate((np.tile(green_cones_alt,14).reshape(-1,1),np.repeat(np.arange(58,72),green_cones_alt.shape[0]).reshape(-1,1)),axis=1),columns=['cone','type'])
stat_cone_contacts_blue_alt=pd.DataFrame(np.concatenate((np.tile(blue_cones_alt,14).reshape(-1,1),np.repeat(np.arange(58,72),blue_cones_alt.shape[0]).reshape(-1,1)),axis=1),columns=['cone','type'])
for i in range(stat_cone_contacts_green_alt.shape[0]):
stat_cone_contacts_green_alt.loc[i,'count']=np.sum((true_contacts_green_alt['cone']==stat_cone_contacts_green_alt.ix[i,'cone'])&\
(true_contacts_green_alt['type']==stat_cone_contacts_green_alt.ix[i,'type']))
for i in range(stat_cone_contacts_blue_alt.shape[0]):
stat_cone_contacts_blue_alt.loc[i,'count']=np.sum((true_contacts_blue_alt['cone']==stat_cone_contacts_blue_alt.ix[i,'cone'])&\
(true_contacts_blue_alt['type']==stat_cone_contacts_blue_alt.ix[i,'type']))
stat_cone_contacts_joined_alt=pd.concat({'S-cones': stat_cone_contacts_blue_alt, 'M-cones': stat_cone_contacts_green_alt})
stat_cone_contacts_joined_alt=stat_cone_contacts_joined_alt.reset_index().drop('level_1',axis=1).rename(columns={'level_0':'cone_type'})
stat_bc_contacts_joined_alt=pd.concat({'S-cones': stat_bc_contacts_blue_alt, 'M-cones': stat_bc_contacts_green_alt})
stat_bc_contacts_joined_alt=stat_bc_contacts_joined_alt.reset_index().drop('level_1',axis=1).rename(columns={'level_0':'cone_type'})
stat_cone_contacts_joined_no_rbc_alt=stat_cone_contacts_joined_alt[stat_cone_contacts_joined_alt['type']!=71]
stat_bc_contacts_joined_no_rbc_alt=stat_bc_contacts_joined_alt[stat_bc_contacts_joined_alt['type']!=71]
#alternativ blue cone selection
labels = ['1','2','3A','3B','4','5T','5O','5I','X','6','7','8','9']
sns.set(font='Arial',style='white',context='paper',rc={"xtick.major.size": 0, "ytick.major.size": 4})
with matplotlib.rc_context({"lines.linewidth": 0.7}):
plt.figure(figsize=(8/2.54,4/2.54))
ax=sns.barplot(x='type',y='count',data=stat_bc_contacts_joined_no_rbc_alt,hue='cone_type',order=np.arange(58,71),hue_order=['S-cones','M-cones'],ci=95)
ax.set_xticklabels(labels)
ax.set(ylabel='# cones',ylim=(0,10),xlabel='BC types')
ax.spines['left'].set_position(('outward',3))
legend=plt.legend(loc='upper left')
legend.get_title().set_fontsize(8)
sns.despine()
# plt.savefig('figures/joined_contacts_per_bc_alternative.svg',bbox_inches='tight',dpi=300)
plt.show()
#alternativ blue cone selection
labels = ['1','2','3A','3B','4','5T','5O','5I','X','6','7','8','9']
sns.set(font='Arial',context='paper',style='white',rc={"xtick.major.size": 0, "ytick.major.size": 4})
with matplotlib.rc_context({"lines.linewidth": 0.7}):
plt.figure(figsize=(8/2.54,4/2.54))
ax=sns.barplot(x='type',y='count',data=stat_cone_contacts_joined_no_rbc_alt,hue='cone_type',order=np.arange(58,71),hue_order=['S-cones','M-cones'],ci=95)
ax.set_xticklabels(labels)
ax.set(ylabel='# BCs',xlabel='BC types',ylim=(0,3.5),yticks=[0,1,2,3])
ax.spines['left'].set_position(('outward',3))
legend=plt.legend(bbox_to_anchor=(0.95, 1))
legend.get_title().set_fontsize(8)
sns.despine()
# plt.savefig('figures/joined_contacts_per_cone_alternative.svg',bbox_inches='tight',dpi=300)
plt.show()
plt.figure(figsize=(2.5/2.54,3/2.54))
sns.set(font='Arial',style='white',context='paper',rc={"xtick.major.size": 0, "ytick.major.size": 4})
ax=sns.countplot(x='count',data=stat_bc_contacts_all[stat_bc_contacts_all['type']==71],order=np.arange(0,5),color='grey')
ncount=len(stat_bc_contacts_all[stat_bc_contacts_all['type']==71]['cell'])
ax.set(xlabel='Contacted cones',ylabel='# RBCs',yticks=([0,20,40,60]))
ax2=ax.twinx()
ax2.set(ylim=([0,ax.get_ylim()[1]/ncount*100]),yticks=[0,10,20,30,40],yticklabels=['0','10','20','30','40'],ylabel='Fraction [%]')
ax.spines['left'].set_position(('outward',3))
ax.spines['right'].set_position(('outward',3))
ax2.spines['left'].set_position(('outward',3))
ax2.spines['right'].set_position(('outward',3))
sns.despine(right=False)
# plt.savefig('figures/rbc_contacts_per_bc.svg',bbox_inches='tight',dpi=300)
plt.show()
print('RBC - M cone contacts:',np.sum(stat_bc_contacts_joined[(stat_bc_contacts_joined['type']==71)&(stat_bc_contacts_joined['cone_type']=='M-cones')]['count']))
print('RBC - S cone contacts:',np.sum(stat_bc_contacts_joined[(stat_bc_contacts_joined['type']==71)&(stat_bc_contacts_joined['cone_type']=='S-cones')]['count']))
print('RBC contacts per M cone:',np.mean(stat_cone_contacts_joined[(stat_cone_contacts_joined['type']==71)&(stat_cone_contacts_joined['cone_type']=='M-cones')]['count']))
print('RBC contacts per S cone:',np.mean(stat_cone_contacts_joined[(stat_cone_contacts_joined['type']==71)&(stat_cone_contacts_joined['cone_type']=='S-cones')]['count']))
sns.set(font='Arial',style='white',context='paper',rc={"xtick.major.size": 0, "ytick.major.size": 4})
with matplotlib.rc_context({"lines.linewidth": 0.7}):
plt.figure(figsize=(2.5/2.54,3/2.54))
ax=sns.pointplot(x='cone_type',y='count',data=stat_cone_contacts_joined[(stat_cone_contacts_joined['type']==71)],order=['S-cones','M-cones'],ci=95,\
linestyles='',markers='s',palette=sns.color_palette(),scale=1.5)
ax.set(ylabel='# RBCs',ylim=(0,2),xlabel='Cone types',xticklabels=['S','M'])
ax.spines['left'].set_position(('outward',3))
sns.despine()
# plt.savefig('figures/rbc_color_selectivity.svg',bbox_inches='tight',dpi=300)
plt.show()
plt.figure(figsize=(2.5/2.54,3/2.54))
sns.set(font='Arial',style='white',context='paper',rc={"xtick.major.size": 0, "ytick.major.size": 4})
ax=sns.countplot(x='count',data=stat_cone_contacts_all[stat_cone_contacts_all['type']==71],order=np.arange(0,4),color='grey')
ncount=len(stat_cone_contacts_all[stat_cone_contacts_all['type']==71]['cone'])
ax.set(xlabel='Contacted RBCs',ylabel='# cones',yticks=([0,20,40,60]))
ax2=ax.twinx()
ax2.set(ylim=([0,ax.get_ylim()[1]/ncount*100]),yticks=[0,10,20,30,40],yticklabels=['0','10','20','30','40'],ylabel='Fraction [%]')
ax.spines['left'].set_position(('outward',3))
ax.spines['right'].set_position(('outward',3))
ax2.spines['left'].set_position(('outward',3))
ax2.spines['right'].set_position(('outward',3))
sns.despine(right=False)
# plt.savefig('figures/rbc_contacts_per_cone.svg',bbox_inches='tight',dpi=300)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download a file of DeepVariant examples from Google Cloud Storage to the local file storage of this notebook. DeepVariant examples are stored in TFRecord format.
Step2: Show one DeepVariant example
Step3: You can also save the images as .png files
Step4: This creates 10 PNG images labeled with locus IDs and truth labels.
Step5: Flexible, modular functions
|
<ASSISTANT_TASK:>
Python Code:
# Nucleus is the only dependency we need to install. We need at least
# nucleus 0.5.0, since this was the first release of Nucleus that includes the
# visualization utilities we are using here.
%%capture
! pip install google-nucleus==0.5.1
# Nucleus may show an error about failing to build a wheel, but it still
# installs successfully.
import tensorflow as tf
from nucleus.util import vis
print(tf.__version__)
!gsutil -q cp gs://deepvariant/datalab-testdata/make_examples_datalab.tfrecord.gz /tmp/make_examples_colab.tfrecord.gz
examples_path = '/tmp/make_examples_colab.tfrecord.gz'
dataset = tf.data.TFRecordDataset(examples_path, compression_type="GZIP")
# Take a single example and show the deepvariant pileup image for it.
for e in dataset.take(1):
example = tf.train.Example()
example.ParseFromString(e.numpy())
# For example, in chr20:19503712_T_C, T is the reference allele and C is the
# alternate allele that is proposed in this pileup image.
print("Locus ID with alt:", vis.locus_id_with_alt(example))
print("Draw the channels:")
vis.draw_deepvariant_pileup(example)
print("Truth label:", vis.label_from_example(example))
for e in dataset.take(10):
example = tf.train.Example()
example.ParseFromString(e.numpy())
# For example, in chr20:19503712_T_C, T is the reference allele and C is the
# alternate allele that is proposed in this pileup image.
filename = 'pileup_{}_truth={}.png'.format(vis.locus_id_with_alt(example), vis.label_from_example(example))
vis.draw_deepvariant_pileup(example, path=filename, show=False)
! ls
for e in dataset.take(1):
example = tf.train.Example()
example.ParseFromString(e.numpy())
print("Extracting the variant:")
print(vis.variant_from_example(example))
# Get the pileup image as an array.
arr = vis.get_image_array_from_example(example)
print("\nShape of the pileup numpy array:", arr.shape)
# Split that array into channels.
channels = vis.split_3d_array_into_channels(arr)
print("\nNumber of channels:", len(channels))
# Visualize the numpy array as an image.
print("\nShowing just the first channel:")
vis.array_to_png(channels[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <span style="color
|
<ASSISTANT_TASK:>
Python Code:
import cobra
from cobra.flux_analysis import pfba
import pandas as pd # for nice tables
pd.set_option('display.max_colwidth', -1)
from utils import show_map
import escher
map_loc = './maps/e_coli_core.Core metabolism.json' # the escher map used below
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
M = cobra.io.load_json_model('models/e_coli_core.json')
model = M.copy() # this way we can edit model but leave M unaltered
b = show_map([],map_loc)
b.display_in_notebook()
sol = model.optimize()
b = show_map(sol,map_loc)
b.display_in_notebook()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ... or explicitly using
Step2: Properties
Step3: As a convenience, some of the most common properties can also be set when the axes are created
Step4: And the same properties can be used with the
Step5: Scale
Step6: Note that Toyplot handles negative values correctly, and provides sensible results for values near zero by rendering them using a small linear region around the origin.
Step7: Of course, you are free to specify any base you like, using the tuple notation
|
<ASSISTANT_TASK:>
Python Code:
import numpy
y = numpy.linspace(0, 1, 20) ** 2
import toyplot
canvas, axes, mark = toyplot.plot(y, width=300)
canvas = toyplot.Canvas(width=300)
axes = canvas.axes()
axes.plot(y);
x = numpy.linspace(0, 2 * numpy.pi)
y = numpy.sin(x)
import toyplot.locator
canvas = toyplot.Canvas(width=600, height=300)
axes = canvas.axes()
axes.label.text = "Trigonometry 101"
axes.x.label.text = "x"
axes.y.label.text = "sin(x)"
axes.x.ticks.show = True
axes.x.ticks.locator = toyplot.locator.Explicit(
[0, numpy.pi / 2, numpy.pi, 3 * numpy.pi / 2, 2 * numpy.pi],
["0", u"\u03c0 / 2", u"\u03c0", u"3 \u03c0 / 2", u"2 \u03c0"])
mark = axes.plot(x, y)
x = numpy.linspace(0, 10, 100)
y = 40 + x ** 2
canvas = toyplot.Canvas(300, 300)
axes = canvas.axes(label="Toyplot Users", xlabel="Days", ylabel="Users")
mark = axes.plot(x, y)
toyplot.plot(x, y, label="Toyplot Users", xlabel="Days", ylabel="Users", ymin=0, width=300);
x = numpy.linspace(-1000, 1000)
canvas = toyplot.Canvas(width=700)
axes = canvas.axes(grid=(2, 2, 0, 0), xscale="linear", yscale="linear")
axes.plot(x, x, marker="o")
axes = canvas.axes(grid=(2, 2, 0, 1), xscale="log", yscale="linear")
axes.plot(x, x, marker="o")
axes = canvas.axes(grid=(2, 2, 1, 0), xscale="linear", yscale="log")
axes.plot(x, x, marker="o")
axes = canvas.axes(grid=(2, 2, 1, 1), xscale="log", yscale="log")
axes.plot(x, x, marker="o");
canvas = toyplot.Canvas(width=700)
axes = canvas.axes(grid=(2,2,0), xscale="log")
axes.plot(x, x)
axes = canvas.axes(grid=(2,2,1), xscale="log10")
axes.plot(x, x)
axes = canvas.axes(grid=(2,2,2), xscale=("log", 10))
axes.plot(x, x);
toyplot.plot(x, x, xscale=("log", 4), width=400);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dataset
Step2: Check the data real quick
Step3: Preparing the data for training
Step4: Long short-term memory (LSTM) RNNs
Step5: Attach the gradients
Step6: Softmax Activation
Step7: Cross-entropy loss function
Step8: Averaging the loss over the sequence
Step9: Optimizer
Step11: Define the model
Step12: Test and visualize predictions
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import mxnet as mx
from mxnet import nd, autograd
import numpy as np
from collections import defaultdict
mx.random.seed(1)
# ctx = mx.gpu(0)
ctx = mx.cpu(0)
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from datetime import datetime
# import mpld3
sns.set_style('whitegrid')
#sns.set_context('notebook')
sns.set_context('poster')
# Make inline plots vector graphics instead of raster graphics
from IPython.display import set_matplotlib_formats
#set_matplotlib_formats('pdf', 'svg')
set_matplotlib_formats('pdf', 'png')
SEQ_LENGTH = 100 + 1 # needs to be at least the seq_length for training + 1 because of the time shift between inputs and labels
NUM_SAMPLES_TRAINING = 5000 + 1
NUM_SAMPLES_TESTING = 100 + 1
CREATE_DATA_SETS = False # True if you don't have the data files or re-create them
def gimme_one_random_number():
return nd.random_uniform(low=0, high=1, shape=(1,1)).asnumpy()[0][0]
def create_one_time_series(seq_length=10):
freq = (gimme_one_random_number()*0.5) + 0.1 # 0.1 to 0.6
ampl = gimme_one_random_number() + 0.5 # 0.5 to 1.5
x = np.sin(np.arange(0, seq_length) * freq) * ampl
return x
def create_batch_time_series(seq_length=10, num_samples=4):
column_labels = ['t'+str(i) for i in range(0, seq_length)]
df = pd.DataFrame(create_one_time_series(seq_length=seq_length)).transpose()
df.columns = column_labels
df.index = ['s'+str(0)]
for i in range(1, num_samples):
more_df = pd.DataFrame(create_one_time_series(seq_length=seq_length)).transpose()
more_df.columns = column_labels
more_df.index = ['s'+str(i)]
df = pd.concat([df, more_df], axis=0)
return df # returns a dataframe of shape (num_samples, seq_length)
# Create some time-series
# uncomment below to force predictible random numbers
# mx.random.seed(1)
if CREATE_DATA_SETS:
data_train = create_batch_time_series(seq_length=SEQ_LENGTH, num_samples=NUM_SAMPLES_TRAINING)
data_test = create_batch_time_series(seq_length=SEQ_LENGTH, num_samples=NUM_SAMPLES_TESTING)
# Write data to csv
data_train.to_csv("../data/timeseries/train.csv")
data_test.to_csv("../data/timeseries/test.csv")
else:
data_train = pd.read_csv("../data/timeseries/train.csv", index_col=0)
data_test = pd.read_csv("../data/timeseries/test.csv", index_col=0)
# num_sampling_points = min(SEQ_LENGTH, 400)
# (data_train.sample(4).transpose().iloc[range(0, SEQ_LENGTH, SEQ_LENGTH//num_sampling_points)]).plot()
# print(data_train.loc[:,data_train.columns[:-1]]) # inputs
# print(data_train.loc[:,data_train.columns[1:]]) # outputs (i.e. inputs shift by +1)
batch_size = 64
batch_size_test = 1
seq_length = 16
num_batches_train = data_train.shape[0] // batch_size
num_batches_test = data_test.shape[0] // batch_size_test
num_features = 1 # we do 1D time series for now, this is like vocab_size = 1 for characters
# inputs are from t0 to t_seq_length - 1. because the last point is kept for the output ("label") of the penultimate point
data_train_inputs = data_train.loc[:,data_train.columns[:-1]]
data_train_labels = data_train.loc[:,data_train.columns[1:]]
data_test_inputs = data_test.loc[:,data_test.columns[:-1]]
data_test_labels = data_test.loc[:,data_test.columns[1:]]
train_data_inputs = nd.array(data_train_inputs.values).reshape((num_batches_train, batch_size, seq_length, num_features))
train_data_labels = nd.array(data_train_labels.values).reshape((num_batches_train, batch_size, seq_length, num_features))
test_data_inputs = nd.array(data_test_inputs.values).reshape((num_batches_test, batch_size_test, seq_length, num_features))
test_data_labels = nd.array(data_test_labels.values).reshape((num_batches_test, batch_size_test, seq_length, num_features))
train_data_inputs = nd.swapaxes(train_data_inputs, 1, 2)
train_data_labels = nd.swapaxes(train_data_labels, 1, 2)
test_data_inputs = nd.swapaxes(test_data_inputs, 1, 2)
test_data_labels = nd.swapaxes(test_data_labels, 1, 2)
print('num_samples_training={0} | num_batches_train={1} | batch_size={2} | seq_length={3}'.format(NUM_SAMPLES_TRAINING, num_batches_train, batch_size, seq_length))
print('train_data_inputs shape: ', train_data_inputs.shape)
print('train_data_labels shape: ', train_data_labels.shape)
# print(data_train_inputs.values)
# print(train_data_inputs[0]) # see what one batch looks like
num_inputs = num_features # for a 1D time series, this is just a scalar equal to 1.0
num_outputs = num_features # same comment
num_hidden_units = [8, 8] # num of hidden units in each hidden LSTM layer
num_hidden_layers = len(num_hidden_units) # num of hidden LSTM layers
num_units_layers = [num_features] + num_hidden_units
########################
# Weights connecting the inputs to the hidden layer
########################
Wxg, Wxi, Wxf, Wxo, Whg, Whi, Whf, Who, bg, bi, bf, bo = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}
for i_layer in range(1, num_hidden_layers+1):
num_inputs = num_units_layers[i_layer-1]
num_hidden_units = num_units_layers[i_layer]
Wxg[i_layer] = nd.random_normal(shape=(num_inputs,num_hidden_units), ctx=ctx) * .01
Wxi[i_layer] = nd.random_normal(shape=(num_inputs,num_hidden_units), ctx=ctx) * .01
Wxf[i_layer] = nd.random_normal(shape=(num_inputs,num_hidden_units), ctx=ctx) * .01
Wxo[i_layer] = nd.random_normal(shape=(num_inputs,num_hidden_units), ctx=ctx) * .01
########################
# Recurrent weights connecting the hidden layer across time steps
########################
Whg[i_layer] = nd.random_normal(shape=(num_hidden_units, num_hidden_units), ctx=ctx) * .01
Whi[i_layer] = nd.random_normal(shape=(num_hidden_units, num_hidden_units), ctx=ctx) * .01
Whf[i_layer] = nd.random_normal(shape=(num_hidden_units, num_hidden_units), ctx=ctx) * .01
Who[i_layer] = nd.random_normal(shape=(num_hidden_units, num_hidden_units), ctx=ctx) * .01
########################
# Bias vector for hidden layer
########################
bg[i_layer] = nd.random_normal(shape=num_hidden_units, ctx=ctx) * .01
bi[i_layer] = nd.random_normal(shape=num_hidden_units, ctx=ctx) * .01
bf[i_layer] = nd.random_normal(shape=num_hidden_units, ctx=ctx) * .01
bo[i_layer] = nd.random_normal(shape=num_hidden_units, ctx=ctx) * .01
########################
# Weights to the output nodes
########################
Why = nd.random_normal(shape=(num_units_layers[-1], num_outputs), ctx=ctx) * .01
by = nd.random_normal(shape=num_outputs, ctx=ctx) * .01
params = []
for i_layer in range(1, num_hidden_layers+1):
params += [Wxg[i_layer], Wxi[i_layer], Wxf[i_layer], Wxo[i_layer], Whg[i_layer], Whi[i_layer], Whf[i_layer], Who[i_layer], bg[i_layer], bi[i_layer], bf[i_layer], bo[i_layer]]
params += [Why, by] # add the output layer
for param in params:
param.attach_grad()
def softmax(y_linear, temperature=1.0):
lin = (y_linear-nd.max(y_linear)) / temperature
exp = nd.exp(lin)
partition = nd.sum(exp, axis=0, exclude=True).reshape((-1,1))
return exp / partition
def cross_entropy(yhat, y):
return - nd.mean(nd.sum(y * nd.log(yhat), axis=0, exclude=True))
def rmse(yhat, y):
return nd.mean(nd.sqrt(nd.sum(nd.power(y - yhat, 2), axis=0, exclude=True)))
def average_ce_loss(outputs, labels):
assert(len(outputs) == len(labels))
total_loss = 0.
for (output, label) in zip(outputs,labels):
total_loss = total_loss + cross_entropy(output, label)
return total_loss / len(outputs)
def average_rmse_loss(outputs, labels):
assert(len(outputs) == len(labels))
total_loss = 0.
for (output, label) in zip(outputs,labels):
total_loss = total_loss + rmse(output, label)
return total_loss / len(outputs)
def SGD(params, learning_rate):
for param in params:
# print('grrrrr: ', param.grad)
param[:] = param - learning_rate * param.grad
def adam(params, learning_rate, M , R, index_adam_call, beta1, beta2, eps):
k = -1
for param in params:
k += 1
M[k] = beta1 * M[k] + (1. - beta1) * param.grad
R[k] = beta2 * R[k] + (1. - beta2) * (param.grad)**2
# bias correction since we initilized M & R to zeros, they're biased toward zero on the first few iterations
m_k_hat = M[k] / (1. - beta1**(index_adam_call))
r_k_hat = R[k] / (1. - beta2**(index_adam_call))
if((np.isnan(M[k].asnumpy())).any() or (np.isnan(R[k].asnumpy())).any()):
# print('GRRRRRR ', M, K)
stop()
# print('grrrrr: ', param.grad)
param[:] = param - learning_rate * m_k_hat / (nd.sqrt(r_k_hat) + eps)
# print('m_k_hat r_k_hat', m_k_hat, r_k_hat)
return params, M, R
# def adam(params, learning_rate, M, R, index_iteration, beta1=0.9, beta2=0.999, eps=1e-8):
# for k, param in enumerate(params):
# if k==0:
# print('batch_iteration {}: {}'.format(index_iteration, param))
# M[k] = beta1 * M[k] + (1. - beta1) * param.grad
# R[k] = beta2 * R[k] + (1. - beta2) * (param.grad)**2
# m_k_hat = M[k] / (1. - beta1**(index_iteration))
# r_k_hat = R[k] / (1. - beta2**(index_iteration))
# param[:] = param - learning_rate * m_k_hat / (nd.sqrt(r_k_hat) + eps)
# # print(beta1, beta2, M, R)
# if k==0:
# print('batch_iteration {}: {}'.format(index_iteration, param.grad))
# for k, param in enumerate(params):
# print('batch_iteration {}: {}'.format(index_iteration, param))
# return M, R
def single_lstm_unit_calcs(X, c, Wxg, h, Whg, bg, Wxi, Whi, bi, Wxf, Whf, bf, Wxo, Who, bo):
g = nd.tanh(nd.dot(X, Wxg) + nd.dot(h, Whg) + bg)
i = nd.sigmoid(nd.dot(X, Wxi) + nd.dot(h, Whi) + bi)
f = nd.sigmoid(nd.dot(X, Wxf) + nd.dot(h, Whf) + bf)
o = nd.sigmoid(nd.dot(X, Wxo) + nd.dot(h, Who) + bo)
#######################
c = f * c + i * g
h = o * nd.tanh(c)
return c, h
def deep_lstm_rnn(inputs, h, c, temperature=1.0):
h: dict of nd.arrays, each key is the index of a hidden layer (from 1 to whatever).
Index 0, if any, is the input layer
outputs = []
# inputs is one BATCH of sequences so its shape is number_of_seq, seq_length, features_dim
# (latter is 1 for a time series, vocab_size for a character, n for a n different times series)
for X in inputs:
# X is batch of one time stamp. E.g. if each batch has 37 sequences, then the first value of X will be a set of the 37 first values of each of the 37 sequences
# that means each iteration on X corresponds to one time stamp, but it is done in batches of different sequences
h[0] = X # the first hidden layer takes the input X as input
for i_layer in range(1, num_hidden_layers+1):
# lstm units now have the 2 following inputs:
# i) h_t from the previous layer (equivalent to the input X for a non-deep lstm net),
# ii) h_t-1 from the current layer (same as for non-deep lstm nets)
c[i_layer], h[i_layer] = single_lstm_unit_calcs(h[i_layer-1], c[i_layer], Wxg[i_layer], h[i_layer], Whg[i_layer], bg[i_layer], Wxi[i_layer], Whi[i_layer], bi[i_layer], Wxf[i_layer], Whf[i_layer], bf[i_layer], Wxo[i_layer], Who[i_layer], bo[i_layer])
yhat_linear = nd.dot(h[num_hidden_layers], Why) + by
# yhat is a batch of several values of the same time stamp
# this is basically the prediction of the sequence, which overlaps most of the input sequence, plus one point (character or value)
# yhat = softmax(yhat_linear, temperature=temperature)
# yhat = nd.sigmoid(yhat_linear)
# yhat = nd.tanh(yhat_linear)
yhat = yhat_linear # we cant use a 1.0-bounded activation function since amplitudes can be greater than 1.0
outputs.append(yhat) # outputs has same shape as inputs, i.e. a list of batches of data points.
# print('some shapes... yhat outputs', yhat.shape, len(outputs) )
return (outputs, h, c)
def test_prediction(one_input_seq, one_label_seq, temperature=1.0):
#####################################
# Set the initial state of the hidden representation ($h_0$) to the zero vector
##################################### # some better initialization needed??
h, c = {}, {}
for i_layer in range(1, num_hidden_layers+1):
h[i_layer] = nd.zeros(shape=(batch_size_test, num_units_layers[i_layer]), ctx=ctx)
c[i_layer] = nd.zeros(shape=(batch_size_test, num_units_layers[i_layer]), ctx=ctx)
outputs, h, c = deep_lstm_rnn(one_input_seq, h, c, temperature=temperature)
loss = rmse(outputs[-1][0], one_label_seq)
return outputs[-1][0].asnumpy()[-1], one_label_seq.asnumpy()[-1], loss.asnumpy()[-1], outputs, one_label_seq
def check_prediction(index):
o, label, loss, outputs, labels = test_prediction(test_data_inputs[index], test_data_labels[index], temperature=1.0)
prediction = round(o, 3)
true_label = round(label, 3)
outputs = [float(i.asnumpy().flatten()) for i in outputs]
true_labels = list(test_data_labels[index].asnumpy().flatten())
# print(outputs, '\n----\n', true_labels)
df = pd.DataFrame([outputs, true_labels]).transpose()
df.columns = ['predicted', 'true']
# print(df)
rel_error = round(100. * (prediction / true_label - 1.0), 2)
# print('\nprediction = {0} | actual_value = {1} | rel_error = {2}'.format(prediction, true_label, rel_error))
return df
epochs = 48 # at some point, some nans appear in M, R matrices of Adam. TODO investigate why
moving_loss = 0.
learning_rate = 0.001 # 0.1 works for a [8, 8] after about 70 epochs of 32-sized batches
# Adam Optimizer stuff
beta1 = .9
beta2 = .999
index_adam_call = 0
# M & R arrays to keep track of momenta in adam optimizer. params is a list that contains all ndarrays of parameters
M = {k: nd.zeros_like(v) for k, v in enumerate(params)}
R = {k: nd.zeros_like(v) for k, v in enumerate(params)}
df_moving_loss = pd.DataFrame(columns=['Loss', 'Error'])
df_moving_loss.index.name = 'Epoch'
# needed to update plots on the fly
%matplotlib notebook
fig, axes_fig1 = plt.subplots(1,1, figsize=(6,3))
fig2, axes_fig2 = plt.subplots(1,1, figsize=(6,3))
for e in range(epochs):
############################
# Attenuate the learning rate by a factor of 2 every 100 epochs
############################
if ((e+1) % 80 == 0):
learning_rate = learning_rate / 2.0 # TODO check if its ok to adjust learning_rate when using Adam Optimizer
h, c = {}, {}
for i_layer in range(1, num_hidden_layers+1):
h[i_layer] = nd.zeros(shape=(batch_size, num_units_layers[i_layer]), ctx=ctx)
c[i_layer] = nd.zeros(shape=(batch_size, num_units_layers[i_layer]), ctx=ctx)
for i in range(num_batches_train):
data_one_hot = train_data_inputs[i]
label_one_hot = train_data_labels[i]
with autograd.record():
outputs, h, c = deep_lstm_rnn(data_one_hot, h, c)
loss = average_rmse_loss(outputs, label_one_hot)
loss.backward()
# SGD(params, learning_rate)
index_adam_call += 1 # needed for bias correction in Adam optimizer
params, M, R = adam(params, learning_rate, M, R, index_adam_call, beta1, beta2, 1e-8)
##########################
# Keep a moving average of the losses
##########################
if (i == 0) and (e == 0):
moving_loss = nd.mean(loss).asscalar()
else:
moving_loss = .99 * moving_loss + .01 * nd.mean(loss).asscalar()
df_moving_loss.loc[e] = round(moving_loss, 4)
############################
# Predictions and plots
############################
data_prediction_df = check_prediction(index=e)
axes_fig1.clear()
data_prediction_df.plot(ax=axes_fig1)
fig.canvas.draw()
prediction = round(data_prediction_df.tail(1)['predicted'].values.flatten()[-1], 3)
true_label = round(data_prediction_df.tail(1)['true'].values.flatten()[-1], 3)
rel_error = round(100. * np.abs(prediction / true_label - 1.0), 2)
print("Epoch = {0} | Loss = {1} | Prediction = {2} True = {3} Error = {4}".format(e, moving_loss, prediction, true_label, rel_error ))
axes_fig2.clear()
if e == 0:
moving_rel_error = rel_error
else:
moving_rel_error = .9 * moving_rel_error + .1 * rel_error
df_moving_loss.loc[e, ['Error']] = moving_rel_error
axes_loss_plot = df_moving_loss.plot(ax=axes_fig2, secondary_y='Loss', color=['r','b'])
axes_loss_plot.right_ax.grid(False)
# axes_loss_plot.right_ax.set_yscale('log')
fig2.canvas.draw()
%matplotlib inline
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: An example is shown below with its output
Step2: Compact Approach
Step3: More Examples
Step4: Calculating Compound Amount
Step5: Compact Approach
Step6: Calculating Compound Mass
Step7: Identifying Elements in Compounds
Step8: Calculating Stoichiometry Coefficients
Step9: We can determine the coefficients for a list of elements using the auxi.tools.chemistry.stoichiometry.stoichiometry_coefficients function
Step10: Calculating Element Mass Fractions
Step11: Converting Compounds
|
<ASSISTANT_TASK:>
Python Code:
from auxi.tools.chemistry import stoichiometry
molarmass_FeO = stoichiometry.molar_mass("FeO")
molarmass_CO2 = stoichiometry.molar_mass("CO2")
molarmass_FeCr2O4 = stoichiometry.molar_mass("FeCr2O4")
print("Molar mass of FeO :", molarmass_FeO, "kg/kmol")
print("Molar mass of CO2 :", molarmass_CO2, "kg/kmol")
print("Molar mass of FeCr2O4:", molarmass_FeCr2O4, "kg/kmol")
from auxi.tools.chemistry.stoichiometry import molar_mass as mm
molarmass_FeO = mm("FeO")
print("Molar mass of FeO :", molarmass_FeO, "kg/kmol")
print("Molar mass of CO2 :", mm("CO2"), "kg/kmol")
print("Molar mass of FeCr2O4:", mm("FeCr2O4"), "kg/kmol")
from auxi.tools.chemistry.stoichiometry import molar_mass as mm
def print_molar_mass(compound):
print("Molar mass of", compound, "is", mm(compound), "kg/kmol.")
print_molar_mass("FeO1.5")
print_molar_mass("Fe2O3")
print_molar_mass("FeOTiO2")
print_molar_mass("FeTiO3")
print_molar_mass("Fe2(CO3)3")
print_molar_mass("Fe2C3O9")
print_molar_mass("H2O")
print_molar_mass("H")
print_molar_mass("He")
print_molar_mass("Au")
from auxi.tools.chemistry import stoichiometry
m_FeO = 10.0
n_FeO = stoichiometry.amount("FeO", m_FeO)
print("There is", n_FeO, "kmol of FeO in", m_FeO , "kg of the compound.")
m_CO2 = 12.3
n_CO2 = stoichiometry.amount("CO2", m_CO2)
print("There is", n_CO2, "kmol of CO2 in", m_CO2 , "kg of the compound.")
m_FeCr2O4 = 453.0
n_FeCr2O4 = stoichiometry.amount("FeCr2O4", m_FeCr2O4)
print("There is", n_FeCr2O4, "kmol of FeCr2O4 in",
m_FeCr2O4 , "kg of the compound.")
from auxi.tools.chemistry.stoichiometry import amount
m_FeO = 10.0
n_FeO = amount("FeO", m_FeO)
print("There is", n_FeO, "kmol of FeO in", m_FeO , "kg of the compound.")
m_CO2 = 12.3
n_CO2 = amount("CO2", m_CO2)
print("There is", n_CO2, "kmol of CO2 in", m_CO2 , "kg of the compound.")
m_FeCr2O4 = 453.0
n_FeCr2O4 = amount("FeCr2O4", m_FeCr2O4)
print("There is", n_FeCr2O4, "kmol of FeCr2O4 in",
m_FeCr2O4 , "kg of the compound.")
from auxi.tools.chemistry import stoichiometry
n_FeO = 10.0
m_FeO = stoichiometry.mass("FeO", n_FeO)
print("There is", m_FeO, "kg of FeO in", n_FeO , "kmol of the compound.")
m_CO2 = 12.3
n_CO2 = stoichiometry.mass("CO2", m_CO2)
print("There is", m_CO2, "kg of CO2 in", n_CO2 , "kmol of the compound.")
m_FeCr2O4 = 453.0
n_FeCr2O4 = stoichiometry.mass("FeCr2O4", m_FeCr2O4)
print("There is", m_FeCr2O4, "kg of FeCr2O4 in",
n_FeCr2O4 , "kmol of the compound.")
from auxi.tools.chemistry import stoichiometry
elements_Fe2O3 = stoichiometry.elements(["Fe2O3"])
print("Fe2O3 contains these elements:", elements_Fe2O3)
elements_CO2 = stoichiometry.elements(["CO2"])
print("CO2 contains these elements:", elements_CO2)
elements_Fe2Cr2O4 = stoichiometry.elements(["Fe2Cr2O4"])
print("Fe2Cr2O4 contains these elements:", elements_Fe2Cr2O4)
elements_Al2S3O12 = stoichiometry.elements(["Al2(SO4)3"])
print("Al2(SO4)3 contains these elements:", elements_Al2S3O12)
elements_all = stoichiometry.elements(["Fe2O3", "CO2", "Fe2Cr2O4", "Al2(SO4)3"])
print("Fe2O3, CO2, Fe2Cr2O4 and Al2(SO4)3 contain these elements:",
elements_all)
from auxi.tools.chemistry import stoichiometry
coeff_Fe2O3_Fe = stoichiometry.stoichiometry_coefficient("Fe2O3", "Fe")
print("Stoichiometry coefficient of Fe in Fe2O3:", coeff_Fe2O3_Fe)
coeff_Fe2O3_O = stoichiometry.stoichiometry_coefficient("Fe2O3", "O")
print("Stoichiometry coefficient of O in Fe2O3:", coeff_Fe2O3_O)
coeff_Fe2O3_C = stoichiometry.stoichiometry_coefficient("Fe2O3", "C")
print("Stoichiometry coefficient of C in Fe2O3:", coeff_Fe2O3_C)
from auxi.tools.chemistry import stoichiometry
elements = ["Fe", "O", "C", "Ar"]
st_Fe2O3 = stoichiometry.stoichiometry_coefficients("Fe2O3", elements)
print("Stoichiometry coefficient of", elements, "in Fe2O3:",
st_Fe2O3)
elements = ["Al", "Ca", "Fe", "Si", "O", "C", "H"]
st_Lawsonite = stoichiometry.stoichiometry_coefficients("CaAl2Si2O7O2H2H2O",
elements)
print("Stoichiometry coefficient of", elements,
"in Lawsonite (CaAl2(Si2O7)(OH)2·H2O):", st_Lawsonite)
from auxi.tools.chemistry import stoichiometry
y_Fe2O3_Fe = stoichiometry.element_mass_fraction("Fe2O3", "Fe")
print("Mass fraction of Fe in Fe2O3:", y_Fe2O3_Fe)
y_Fe2O3_O = stoichiometry.element_mass_fraction("Fe2O3", "O")
print("Mass fraction of O in Fe2O3:", y_Fe2O3_O)
y_Fe2O3_C = stoichiometry.element_mass_fraction("Fe2O3", "C")
print("Mass fraction of C in Fe2O3:", y_Fe2O3_C)
from auxi.tools.chemistry import stoichiometry
elements = ["Fe", "O", "C", "Ar"]
y_Fe2O3 = stoichiometry.element_mass_fractions("Fe2O3", elements)
print("Mass fractions of", elements, "in Fe2O3:", y_Fe2O3)
elements = ["Al", "Ca", "Fe", "Si", "O", "C", "H"]
y_Lawsonite = stoichiometry.element_mass_fractions("CaAl2Si2O7O2H2H2O", elements)
print("Mass fractions of", elements,
"in Lawsonite (CaAl2(Si2O7)(OH)2·H2O):",
y_Lawsonite)
from auxi.tools.chemistry import stoichiometry
m_Fe2O3 = 10.0
m_Fe = stoichiometry.convert_compound(m_Fe2O3, "Fe2O3", "Fe", "Fe")
print("From", m_Fe2O3, "kg of Fe2O3,", m_Fe ,
"kg of Fe can be produced.")
m_Fe = 10.0
m_Fe2O3 = stoichiometry.convert_compound(m_Fe, "Fe", "Fe2O3", "Fe")
print("When", m_Fe, "kg of Fe is oxidised completely,", m_Fe2O3 ,
"kg of Fe2O3 will be produced.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generating data
Step2: Model specification
Step3: For the sunspot model, the posterior distribution of αα is concentrated between 0.6 and 1.2, indicating that we should expect more components to contribute non-negligible amounts to the mixture than for the Old Faithful waiting time model.
Step4: Indeed, we see that between ten and fifteen mixture components have appreciable posterior expected weight.
Step5: We now calculate and plot the fitted density estimate.
Step6: Again, we can decompose the posterior expected density into weighted mixture densities.
|
<ASSISTANT_TASK:>
Python Code:
# pymc3.distributions.DensityDist?
import matplotlib.pyplot as plt
import matplotlib as mpl
from pymc3 import Model, Normal, Slice
from pymc3 import sample
from pymc3 import traceplot
from pymc3.distributions import Interpolated
from theano import as_op
import theano.tensor as tt
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
import numpy as np
import pymc3 as pm
import scipy as sp
import seaborn as sns
from statsmodels.datasets import get_rdataset
from theano import tensor as tt
%matplotlib inline
%load_ext version_information
%version_information pymc3, statsmodels, pandas
sunspot_df = get_rdataset('sunspot.year', cache=True).data
sunspot_df.head()
sunspot_df.plot(x='time')
SEED = 8675309 # from random.org
np.random.seed(SEED)
K = 50
N = sunspot_df.shape[0]
def stick_breaking(beta):
portion_remaining = tt.concatenate([[1], tt.extra_ops.cumprod(1 - beta)[:-1]])
return beta * portion_remaining
with pm.Model() as model:
alpha = pm.Gamma('alpha', 1., 1.)
beta = pm.Beta('beta', 1, alpha, shape=K)
w = pm.Deterministic('w', stick_breaking(beta))
mu = pm.Uniform('mu', 0., 300., shape=K)
obs = pm.Mixture('obs', w, pm.Poisson.dist(mu), observed=sunspot_df['sunspot.year'])
with model:
step = pm.Metropolis()
trace = pm.sample(10000, step=step, tune=90000, random_seed=SEED, njobs=6)
pm.traceplot(trace, varnames=['alpha']);
fig, ax = plt.subplots(figsize=(8, 6))
plot_w = np.arange(K) + 1
ax.bar(plot_w - 0.5, trace['w'].mean(axis=0), width=1., lw=0);
ax.set_xlim(0.5, K);
ax.set_xlabel('Component');
ax.set_ylabel('Posterior expected mixture weight');
x_plot = np.arange(250)
post_pmf_contribs = sp.stats.poisson.pmf(np.atleast_3d(x_plot),
trace['mu'][:, np.newaxis, :])
post_pmfs = (trace['w'][:, np.newaxis, :] * post_pmf_contribs).sum(axis=-1)
post_pmf_low, post_pmf_high = np.percentile(post_pmfs, [2.5, 97.5], axis=0)
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(sunspot_df['sunspot.year'].values, bins=40, normed=True, lw=0, alpha=0.75);
ax.fill_between(x_plot, post_pmf_low, post_pmf_high,
color='gray', alpha=0.45)
ax.plot(x_plot, post_pmfs[0],
c='gray', label='Posterior sample densities');
ax.plot(x_plot, post_pmfs[::200].T, c='gray');
ax.plot(x_plot, post_pmfs.mean(axis=0),
c='k', label='Posterior expected density');
ax.set_xlabel('Yearly sunspot count');
ax.set_yticklabels([]);
ax.legend(loc=1);
fig, ax = plt.subplots(figsize=(8, 6))
ax.hist(sunspot_df['sunspot.year'].values, bins=40, normed=True, lw=0, alpha=0.75);
ax.plot(x_plot, post_pmfs.mean(axis=0),
c='k', label='Posterior expected density');
ax.plot(x_plot, (trace['w'][:, np.newaxis, :] * post_pmf_contribs).mean(axis=0)[:, 0],
'--', c='k', label='Posterior expected\nmixture components\n(weighted)');
ax.plot(x_plot, (trace['w'][:, np.newaxis, :] * post_pmf_contribs).mean(axis=0),
'--', c='k');
ax.set_xlabel('Yearly sunspot count');
ax.set_yticklabels([]);
ax.legend(loc=1);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tokens
Step2: 21 tokens extracted, which include words and punctuation.
Step3: And we can apply it to an entire book, "The Prince" by Machiavelli that we used last time
Step4: As mentioned above, the NTLK tokeniser works in a more sophisticated way than just splitting by spaces, therefore we got this time more tokens.
Step5: As you see, it is not splitting just after each full stop but check if it's part of an acronym (U.S.) or a number (0.99).
Step6: Most common tokens
Step7: Comma is the most common
Step8: We can also remove any capital letters before tokenising
Step9: Now we removed the punctuation and the capital letters but the most common token is "the", not a significative word ...
Step10: This would work but would leave out also tokens such as I and you which are actually significative.
Step11: Now we excluded words such as the but we can improve further the list by looking at semantically similar words, such as plural and singular versions.
Step12: Stemming
Step13: And now we apply one of the NLTK stemmer, the Porter stemmer
Step14: As you see, all 5 different words have been reduced to the same stem and would be now the same lexical token.
Step15: Now the word princ is counted 281 times, exactly like the sum of prince and princes.
Step16: Lemma
Step17: We tell the lemmatise that the words are nouns. In this case it considers the same lemma words such as list (singular noun) and lists (plural noun) but leave as they are the other words.
Step18: We get a different result if we say that the words are verbs.
Step19: It works with different adjectives, it doesn't look only at prefixes and suffixes.
Step20: Yes, the lemma now is prince.
Step21: The NLTK function pos_tag() will tag each token with the estimated PoS.
Step22: Which are the most common PoS in The Prince book?
Step24: It's not nouns (NN) but interections (IN) such as preposition or conjunction.
|
<ASSISTANT_TASK:>
Python Code:
sampleText1 = "The Elephant's 4 legs: THE Pub! You can't believe it or can you, the believer?"
sampleText2 = "Pierre Vinken, 61 years old, will join the board as a nonexecutive director Nov. 29."
import nltk
s1Tokens = nltk.word_tokenize(sampleText1)
s1Tokens
len(s1Tokens)
s2Tokens = nltk.word_tokenize(sampleText2)
s2Tokens
# If you would like to work with the raw text you can use 'bookRaw'
with open('../datasets/ThePrince.txt', 'r') as f:
bookRaw = f.read()
bookTokens = nltk.word_tokenize(bookRaw)
bookText = nltk.Text(bookTokens) # special format
nBookTokens= len(bookTokens) # or alternatively len(bookText)
print ("*** Analysing book ***")
print ("The book is {} chars long".format (len(bookRaw)))
print ("The book has {} tokens".format (nBookTokens))
text1 = "This is the first sentence. A liter of milk in the U.S. costs $0.99. Is this the third sentence? Yes, it is!"
sentences = nltk.sent_tokenize(text1)
len(sentences)
sentences
sentences = nltk.sent_tokenize(bookRaw) # extract sentences
nSent = len(sentences)
print ("The book has {} sentences".format (nSent))
print ("and each sentence has in average {} tokens".format (nBookTokens / nSent))
def get_top_words(tokens):
# Calculate frequency distribution
fdist = nltk.FreqDist(tokens)
return fdist.most_common()
topBook = get_top_words(bookTokens)
# Output top 20 words
topBook[:20]
topWords = [(freq, word) for (word,freq) in topBook if word.isalpha() and freq > 400]
topWords
def preprocessText(text, lowercase=True):
if lowercase:
tokens = nltk.word_tokenize(text.lower())
else:
tokens = nltk.word_tokenize(text)
return [word for word in tokens if word.isalpha()]
bookWords = preprocessText(bookRaw)
topBook = get_top_words(bookWords)
# Output top 20 words
topBook[:20]
print ("*** Analysing book ***")
print ("The text has now {} words (tokens)".format (len(bookWords)))
meaningfulWords = [word for (word,freq) in topBook if len(word) > 5 and freq > 80]
sorted(meaningfulWords)
from nltk.corpus import stopwords
stopwordsEN = set(stopwords.words('english')) # english language
betterWords = [w for w in bookWords if w not in stopwordsEN]
topBook = get_top_words(betterWords)
# Output top 20 words
topBook[:20]
'princes' in betterWords
betterWords.count("prince") + betterWords.count("princes")
input1 = "List listed lists listing listings"
words1 = input1.lower().split(' ')
words1
porter = nltk.PorterStemmer()
[porter.stem(t) for t in words1]
stemmedWords = [porter.stem(w) for w in betterWords]
topBook = get_top_words(stemmedWords)
topBook[:20] # Output top 20 words
from nltk.stem.snowball import SnowballStemmer
stemmerIT = SnowballStemmer("italian")
inputIT = "Io ho tre mele gialle, tu hai una mela gialla e due pere verdi"
wordsIT = inputIT.split(' ')
[stemmerIT.stem(w) for w in wordsIT]
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
words1
[lemmatizer.lemmatize(w, 'n') for w in words1] # n = nouns
[lemmatizer.lemmatize(w, 'v') for w in words1] # v = verbs
words2 = ['good', 'better']
[porter.stem(w) for w in words2]
[lemmatizer.lemmatize(w, 'a') for w in words2]
lemmatisedWords = [lemmatizer.lemmatize(w, 'n') for w in betterWords]
topBook = get_top_words(lemmatisedWords)
topBook[:20] # Output top 20 words
text1 = "Children shouldn't drink a sugary drink before bed."
tokensT1 = nltk.word_tokenize(text1)
nltk.pos_tag(tokensT1)
nltk.help.upenn_tagset('RB')
tokensAndPos = nltk.pos_tag(bookTokens)
posList = [thePOS for (word, thePOS) in tokensAndPos]
fdistPos = nltk.FreqDist(posList)
fdistPos.most_common(5)
nltk.help.upenn_tagset('IN')
# Parsing sentence structure
text2 = nltk.word_tokenize("Alice loves Bob")
grammar = nltk.CFG.fromstring(
S -> NP VP
VP -> V NP
NP -> 'Alice' | 'Bob'
V -> 'loves'
)
parser = nltk.ChartParser(grammar)
trees = parser.parse_all(text2)
for tree in trees:
print(tree)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Description
Step7: 1.4. Land Atmosphere Flux Exchanges
Step8: 1.5. Atmospheric Coupling Treatment
Step9: 1.6. Land Cover
Step10: 1.7. Land Cover Change
Step11: 1.8. Tiling
Step12: 2. Key Properties --> Conservation Properties
Step13: 2.2. Water
Step14: 2.3. Carbon
Step15: 3. Key Properties --> Timestepping Framework
Step16: 3.2. Time Step
Step17: 3.3. Timestepping Method
Step18: 4. Key Properties --> Software Properties
Step19: 4.2. Code Version
Step20: 4.3. Code Languages
Step21: 5. Grid
Step22: 6. Grid --> Horizontal
Step23: 6.2. Matches Atmosphere Grid
Step24: 7. Grid --> Vertical
Step25: 7.2. Total Depth
Step26: 8. Soil
Step27: 8.2. Heat Water Coupling
Step28: 8.3. Number Of Soil layers
Step29: 8.4. Prognostic Variables
Step30: 9. Soil --> Soil Map
Step31: 9.2. Structure
Step32: 9.3. Texture
Step33: 9.4. Organic Matter
Step34: 9.5. Albedo
Step35: 9.6. Water Table
Step36: 9.7. Continuously Varying Soil Depth
Step37: 9.8. Soil Depth
Step38: 10. Soil --> Snow Free Albedo
Step39: 10.2. Functions
Step40: 10.3. Direct Diffuse
Step41: 10.4. Number Of Wavelength Bands
Step42: 11. Soil --> Hydrology
Step43: 11.2. Time Step
Step44: 11.3. Tiling
Step45: 11.4. Vertical Discretisation
Step46: 11.5. Number Of Ground Water Layers
Step47: 11.6. Lateral Connectivity
Step48: 11.7. Method
Step49: 12. Soil --> Hydrology --> Freezing
Step50: 12.2. Ice Storage Method
Step51: 12.3. Permafrost
Step52: 13. Soil --> Hydrology --> Drainage
Step53: 13.2. Types
Step54: 14. Soil --> Heat Treatment
Step55: 14.2. Time Step
Step56: 14.3. Tiling
Step57: 14.4. Vertical Discretisation
Step58: 14.5. Heat Storage
Step59: 14.6. Processes
Step60: 15. Snow
Step61: 15.2. Tiling
Step62: 15.3. Number Of Snow Layers
Step63: 15.4. Density
Step64: 15.5. Water Equivalent
Step65: 15.6. Heat Content
Step66: 15.7. Temperature
Step67: 15.8. Liquid Water Content
Step68: 15.9. Snow Cover Fractions
Step69: 15.10. Processes
Step70: 15.11. Prognostic Variables
Step71: 16. Snow --> Snow Albedo
Step72: 16.2. Functions
Step73: 17. Vegetation
Step74: 17.2. Time Step
Step75: 17.3. Dynamic Vegetation
Step76: 17.4. Tiling
Step77: 17.5. Vegetation Representation
Step78: 17.6. Vegetation Types
Step79: 17.7. Biome Types
Step80: 17.8. Vegetation Time Variation
Step81: 17.9. Vegetation Map
Step82: 17.10. Interception
Step83: 17.11. Phenology
Step84: 17.12. Phenology Description
Step85: 17.13. Leaf Area Index
Step86: 17.14. Leaf Area Index Description
Step87: 17.15. Biomass
Step88: 17.16. Biomass Description
Step89: 17.17. Biogeography
Step90: 17.18. Biogeography Description
Step91: 17.19. Stomatal Resistance
Step92: 17.20. Stomatal Resistance Description
Step93: 17.21. Prognostic Variables
Step94: 18. Energy Balance
Step95: 18.2. Tiling
Step96: 18.3. Number Of Surface Temperatures
Step97: 18.4. Evaporation
Step98: 18.5. Processes
Step99: 19. Carbon Cycle
Step100: 19.2. Tiling
Step101: 19.3. Time Step
Step102: 19.4. Anthropogenic Carbon
Step103: 19.5. Prognostic Variables
Step104: 20. Carbon Cycle --> Vegetation
Step105: 20.2. Carbon Pools
Step106: 20.3. Forest Stand Dynamics
Step107: 21. Carbon Cycle --> Vegetation --> Photosynthesis
Step108: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
Step109: 22.2. Growth Respiration
Step110: 23. Carbon Cycle --> Vegetation --> Allocation
Step111: 23.2. Allocation Bins
Step112: 23.3. Allocation Fractions
Step113: 24. Carbon Cycle --> Vegetation --> Phenology
Step114: 25. Carbon Cycle --> Vegetation --> Mortality
Step115: 26. Carbon Cycle --> Litter
Step116: 26.2. Carbon Pools
Step117: 26.3. Decomposition
Step118: 26.4. Method
Step119: 27. Carbon Cycle --> Soil
Step120: 27.2. Carbon Pools
Step121: 27.3. Decomposition
Step122: 27.4. Method
Step123: 28. Carbon Cycle --> Permafrost Carbon
Step124: 28.2. Emitted Greenhouse Gases
Step125: 28.3. Decomposition
Step126: 28.4. Impact On Soil Properties
Step127: 29. Nitrogen Cycle
Step128: 29.2. Tiling
Step129: 29.3. Time Step
Step130: 29.4. Prognostic Variables
Step131: 30. River Routing
Step132: 30.2. Tiling
Step133: 30.3. Time Step
Step134: 30.4. Grid Inherited From Land Surface
Step135: 30.5. Grid Description
Step136: 30.6. Number Of Reservoirs
Step137: 30.7. Water Re Evaporation
Step138: 30.8. Coupled To Atmosphere
Step139: 30.9. Coupled To Land
Step140: 30.10. Quantities Exchanged With Atmosphere
Step141: 30.11. Basin Flow Direction Map
Step142: 30.12. Flooding
Step143: 30.13. Prognostic Variables
Step144: 31. River Routing --> Oceanic Discharge
Step145: 31.2. Quantities Transported
Step146: 32. Lakes
Step147: 32.2. Coupling With Rivers
Step148: 32.3. Time Step
Step149: 32.4. Quantities Exchanged With Rivers
Step150: 32.5. Vertical Grid
Step151: 32.6. Prognostic Variables
Step152: 33. Lakes --> Method
Step153: 33.2. Albedo
Step154: 33.3. Dynamics
Step155: 33.4. Dynamic Lake Extent
Step156: 33.5. Endorheic Basins
Step157: 34. Lakes --> Wetlands
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inm', 'inm-cm5-h', 'land')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If the above cell ran without error, we're ready to go!
Step2: Download public 1000 Genomes data
Step3: Importing data from VCF
Step4: Next we read the written file, assigning the variable mt (for matrix table).
Step5: Getting to know our data
Step6: Alternatively
Step7: Here is how to peek at the first few sample IDs
Step8: To look at the first few genotype calls, we can use entries along with select and take. The take method collects the first n rows into a list. Alternatively, we can use the show method, which prints the first n rows to the console in a table format.
Step9: Adding column fields
Step10: A good way to peek at the structure of a Table is to look at its schema.
Step11: To peek at the first few values, use the show method
Step12: Now we'll use this table to add sample annotations to our dataset, storing the annotations in column fields in our MatrixTable. First, we'll print the existing column schema
Step13: We use the annotate_cols method to join the table with the MatrixTable containing our dataset.
Step14: Query functions and the Hail Expression Language
Step15: stats is an aggregation function that produces some useful statistics about numeric collections. We can use this to see the distribution of the CaffeineConsumption phenotype.
Step16: However, these metrics aren't perfectly representative of the samples in our dataset. Here's why
Step17: Since there are fewer samples in our dataset than in the full thousand genomes cohort, we need to look at annotations on the dataset. We can use aggregate_cols to get the metrics for only the samples in our dataset.
Step18: The functionality demonstrated in the last few cells isn't anything especially new
Step19: We can list the counts in descending order using Python's Counter class.
Step20: It's nice to see that we can actually uncover something biological from this small dataset
Step21: Quality Control
Step22: Plotting the QC metrics is a good place to start.
Step23: Often, these metrics are correlated.
Step24: Removing outliers from the dataset will generally improve association results. We can make arbitrary cutoffs and use them to filter
Step25: Next is genotype QC. It's a good idea to filter out genotypes where the reads aren't where they should be
Step26: Variant QC is a bit more of the same
Step27: These statistics actually look pretty good
Step28: These filters removed about 15% of sites (we started with a bit over 10,000). This is NOT representative of most sequencing datasets! We have already downsampled the full thousand genomes dataset to include more common variants than we'd expect by chance.
Step29: Looking at the bottom of the above printout, you can see the linear regression adds new row fields for the beta, standard error, t-statistic, and p-value.
Step30: This doesn't look like much of a skyline. Let's check whether our GWAS was well controlled using a Q-Q (quantile-quantile) plot.
Step31: Confounded!
Step32: Now that we've got principal components per sample, we may as well plot them! Human history exerts a strong effect in genetic datasets. Even with a 50MB sequencing dataset, we can recover the major human populations.
Step33: Now we can rerun our linear regression, controlling for sample sex and the first few principal components. We'll do this with input variable the number of alternate alleles as before, and again with input variable the genotype dosage derived from the PL field.
Step34: We'll first make a Q-Q plot to assess inflation...
Step35: That's more like it! This shape is indicative of a well-controlled (but not especially well-powered) study. And now for the Manhattan plot
Step36: We have found a caffeine consumption locus! Now simply apply Hail's Nature paper function to publish the result.
Step37: What if we want to group by minor allele frequency bin and hair color, and calculate the mean GQ?
Step38: We've shown that it's easy to aggregate by a couple of arbitrary statistics. This specific examples may not provide especially useful pieces of information, but this same pattern can be used to detect effects of rare variation
|
<ASSISTANT_TASK:>
Python Code:
import hail as hl
hl.init()
from hail.plot import show
from pprint import pprint
hl.plot.output_notebook()
hl.utils.get_1kg('data/')
hl.import_vcf('data/1kg.vcf.bgz').write('data/1kg.mt', overwrite=True)
mt = hl.read_matrix_table('data/1kg.mt')
mt.rows().select().show(5)
mt.row_key.show(5)
mt.s.show(5)
mt.entry.take(5)
table = (hl.import_table('data/1kg_annotations.txt', impute=True)
.key_by('Sample'))
table.describe()
table.show(width=100)
print(mt.col.dtype)
mt = mt.annotate_cols(pheno = table[mt.s])
mt.col.describe()
pprint(table.aggregate(hl.agg.counter(table.SuperPopulation)))
pprint(table.aggregate(hl.agg.stats(table.CaffeineConsumption)))
table.count()
mt.count_cols()
mt.aggregate_cols(hl.agg.counter(mt.pheno.SuperPopulation))
pprint(mt.aggregate_cols(hl.agg.stats(mt.pheno.CaffeineConsumption)))
snp_counts = mt.aggregate_rows(hl.agg.counter(hl.Struct(ref=mt.alleles[0], alt=mt.alleles[1])))
pprint(snp_counts)
from collections import Counter
counts = Counter(snp_counts)
counts.most_common()
p = hl.plot.histogram(mt.DP, range=(0,30), bins=30, title='DP Histogram', legend='DP')
show(p)
mt.col.describe()
mt = hl.sample_qc(mt)
mt.col.describe()
p = hl.plot.histogram(mt.sample_qc.call_rate, range=(.88,1), legend='Call Rate')
show(p)
p = hl.plot.histogram(mt.sample_qc.gq_stats.mean, range=(10,70), legend='Mean Sample GQ')
show(p)
p = hl.plot.scatter(mt.sample_qc.dp_stats.mean, mt.sample_qc.call_rate, xlabel='Mean DP', ylabel='Call Rate')
show(p)
mt = mt.filter_cols((mt.sample_qc.dp_stats.mean >= 4) & (mt.sample_qc.call_rate >= 0.97))
print('After filter, %d/284 samples remain.' % mt.count_cols())
ab = mt.AD[1] / hl.sum(mt.AD)
filter_condition_ab = ((mt.GT.is_hom_ref() & (ab <= 0.1)) |
(mt.GT.is_het() & (ab >= 0.25) & (ab <= 0.75)) |
(mt.GT.is_hom_var() & (ab >= 0.9)))
fraction_filtered = mt.aggregate_entries(hl.agg.fraction(~filter_condition_ab))
print(f'Filtering {fraction_filtered * 100:.2f}% entries out of downstream analysis.')
mt = mt.filter_entries(filter_condition_ab)
mt = hl.variant_qc(mt)
mt.row.describe()
mt = mt.filter_rows(mt.variant_qc.AF[1] > 0.01)
mt = mt.filter_rows(mt.variant_qc.p_value_hwe > 1e-6)
print('Samples: %d Variants: %d' % (mt.count_cols(), mt.count_rows()))
gwas = hl.linear_regression_rows(y=mt.pheno.CaffeineConsumption,
x=mt.GT.n_alt_alleles(),
covariates=[1.0])
gwas.row.describe()
p = hl.plot.manhattan(gwas.p_value)
show(p)
p = hl.plot.qq(gwas.p_value)
show(p)
eigenvalues, pcs, _ = hl.hwe_normalized_pca(mt.GT)
pprint(eigenvalues)
pcs.show(5, width=100)
mt = mt.annotate_cols(scores = pcs[mt.s].scores)
p = hl.plot.scatter(mt.scores[0],
mt.scores[1],
label=mt.pheno.SuperPopulation,
title='PCA', xlabel='PC1', ylabel='PC2')
show(p)
gwas = hl.linear_regression_rows(
y=mt.pheno.CaffeineConsumption,
x=mt.GT.n_alt_alleles(),
covariates=[1.0, mt.pheno.isFemale, mt.scores[0], mt.scores[1], mt.scores[2]])
p = hl.plot.qq(gwas.p_value)
show(p)
p = hl.plot.manhattan(gwas.p_value)
show(p)
entries = mt.entries()
results = (entries.group_by(pop = entries.pheno.SuperPopulation, chromosome = entries.locus.contig)
.aggregate(n_het = hl.agg.count_where(entries.GT.is_het())))
results.show()
entries = entries.annotate(maf_bin = hl.cond(entries.info.AF[0]<0.01, "< 1%",
hl.cond(entries.info.AF[0]<0.05, "1%-5%", ">5%")))
results2 = (entries.group_by(af_bin = entries.maf_bin, purple_hair = entries.pheno.PurpleHair)
.aggregate(mean_gq = hl.agg.stats(entries.GQ).mean,
mean_dp = hl.agg.stats(entries.DP).mean))
results2.show()
table = hl.import_table('data/1kg_annotations.txt', impute=True).key_by('Sample')
mt = hl.read_matrix_table('data/1kg.mt')
mt = mt.annotate_cols(pheno = table[mt.s])
mt = hl.sample_qc(mt)
mt = mt.filter_cols((mt.sample_qc.dp_stats.mean >= 4) & (mt.sample_qc.call_rate >= 0.97))
ab = mt.AD[1] / hl.sum(mt.AD)
filter_condition_ab = ((mt.GT.is_hom_ref() & (ab <= 0.1)) |
(mt.GT.is_het() & (ab >= 0.25) & (ab <= 0.75)) |
(mt.GT.is_hom_var() & (ab >= 0.9)))
mt = mt.filter_entries(filter_condition_ab)
mt = hl.variant_qc(mt)
mt = mt.filter_rows(mt.variant_qc.AF[1] > 0.01)
eigenvalues, pcs, _ = hl.hwe_normalized_pca(mt.GT)
mt = mt.annotate_cols(scores = pcs[mt.s].scores)
gwas = hl.linear_regression_rows(
y=mt.pheno.CaffeineConsumption,
x=mt.GT.n_alt_alleles(),
covariates=[1.0, mt.pheno.isFemale, mt.scores[0], mt.scores[1], mt.scores[2]])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Initializing and configuring the MovieLens Environment
Step2: 3. Initializing the Agent
Step3: 4. Define and link the evaluation metrics
Step4: 5. Initialize & configure the Replay Buffer
Step5: Now we have a Replay buffer but we also need something to fill it with. Often a common practice is to have
Step7: 6. Setup and Train the Model
Step8: Now we have all the components ready to start training the model. Here is the process for Training the model
Step9: Note
Step10: One last task before starting the training
Step11: <img src='./assets/example_tensorboard.png'>
|
<ASSISTANT_TASK:>
Python Code:
!pip install --quiet --upgrade --force-reinstall tensorflow==2.4 tensorflow_probability==0.12.1 tensorflow-io==0.17.0 --use-feature=2020-resolver
!pip install tf_agents==0.7.1 --quiet gast==0.3.3 --upgrade --use-feature=2020-resolver
import functools
import os
from absl import app
from absl import flags
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.agents import dropout_thompson_sampling_agent as dropout_ts_agent
from tf_agents.bandits.agents import lin_ucb_agent
from tf_agents.bandits.agents import linear_thompson_sampling_agent as lin_ts_agent
from tf_agents.bandits.agents import neural_epsilon_greedy_agent as eps_greedy_agent
from tf_agents.bandits.agents.examples.v2 import trainer
from tf_agents.bandits.environments import environment_utilities
#from tf_agents.bandits.environments import movielens_per_arm_py_environment
from tf_agents.bandits.environments import movielens_py_environment
from tf_agents.metrics import tf_metrics
from tf_agents.bandits.metrics import tf_metrics as tf_bandit_metrics
from tf_agents.bandits.networks import global_and_arm_feature_network
from tf_agents.environments import tf_py_environment
from tf_agents.networks import q_network
from tf_agents.drivers import dynamic_step_driver
from tf_agents.eval import metric_utils
from tf_agents.policies import policy_saver
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import time_step as ts
# If there are version / incompatibility errors, make sure you restarted the kernel and use !pip freeze in a new cell to check whether the correct TF and tf_agents version had been installed.
# Create target Directory if don't exist
from datetime import date
today = date.today()
fdate = date.today().strftime('%d_%m_%Y')
root_path = os.getcwd()
log_path = "{}/{}".format(root_path, fdate)
if not os.path.exists(log_path):
os.mkdir(log_path)
print("Directory {} Created".format(fdate))
else:
print("Directory {} already exists".format(fdate))
print("Full path is {}".format(log_path))
# initialize the movielens pyenvironment with default parameters
NUM_ACTIONS = None # take this as 20
RANK_K = None # take rank as 20
BATCH_SIZE = None # take batch size as 8
data_path = "gs://ta-reinforecement-learning/dataset/movielens.data" # specify the path to the movielens.data OR get it from the GCS bucket
#TODO: replace the data path if needed
env = movielens_py_environment.MovieLensPyEnvironment(
data_path, RANK_K, BATCH_SIZE, num_movies=NUM_ACTIONS)
environment = tf_py_environment.TFPyEnvironment(env)
# Replace these values by reading the above instructions carefully
EPSILON = 0
LAYERS = None
LR = 0
DROPOUT_RATE = 0
# Initialize the Qnetwork
network = q_network.QNetwork(
input_tensor_spec=environment.time_step_spec().observation,
action_spec=environment.action_spec(),
fc_layer_params=LAYERS)
# Creating a neuron Epsilon greedy agent with an optimizer,
# Epsilon exploration value, learning & dropout rate
# Replace all the `None` values with the required values
agent = eps_greedy_agent.NeuralEpsilonGreedyAgent(
time_step_spec=None,# get the spec/format of the environment
action_spec=None, # get the spec/format of the environment
reward_network=None, #q network goes here
optimizer=None #start w/ adam optimizer with a learning rate of .002
epsilon=EPSILON) # we recommend an exploration of value of 1%)
# Making functions for computing optimal reward/action and attaching the env variable to it using partial functions, so it doesnt need to be passed with every invocation
optimal_reward_fn = functools.partial(
environment_utilities.compute_optimal_reward_with_movielens_environment,
environment=environment)
optimal_action_fn = functools.partial(
environment_utilities.compute_optimal_action_with_movielens_environment,
environment=environment)
# Initilializing the regret and suboptimal arms metric using the optimal reward and action functions
regret_metric = tf_bandit_metrics.RegretMetric(optimal_reward_fn)
suboptimal_arms_metric = tf_bandit_metrics.SuboptimalArmsMetric(
optimal_action_fn)
step_metric = tf_metrics.EnvironmentSteps()
metrics = [tf_metrics.NumberOfEpisodes(), #equivalent to number of steps in bandits problem
regret_metric, # measures regret
suboptimal_arms_metric, # number of times the suboptimal arms are pulled
tf_metrics.AverageReturnMetric(batch_size=environment.batch_size) # the average return
]
#TODO
STEPS_PER_LOOP = None
# TFUniformReplayBuffer is the most commonly used replay buffer in TF-Agents. Use 'tf_uniform_replay_buffer.TFUniformReplayBuffer' to create one.
buf = None
#TODO: setup the replay observer as a list to capture both metrics, step metrics and provide access to the function to load data from the driver into the buffer
replay_observer = None
driver = dynamic_step_driver.DynamicStepDriver(
env=None,
policy=None,
num_steps=STEPS_PER_LOOP * environment.batch_size,
observers=None)
AGENT_CHECKPOINT_NAME = 'agent'
STEP_CHECKPOINT_NAME = 'step'
CHECKPOINT_FILE_PREFIX = 'ckpt'
def restore_and_get_checkpoint_manager(root_dir, agent, metrics, step_metric):
Restores from `root_dir` and returns a function that writes checkpoints.
trackable_objects = {metric.name: metric for metric in metrics}
trackable_objects[AGENT_CHECKPOINT_NAME] = agent
trackable_objects[STEP_CHECKPOINT_NAME] = step_metric
checkpoint = tf.train.Checkpoint(**trackable_objects)
checkpoint_manager = tf.train.CheckpointManager(checkpoint=checkpoint,
directory=root_dir,
max_to_keep=5)
latest = checkpoint_manager.latest_checkpoint
if latest is not None:
print('Restoring checkpoint from %s.', latest)
checkpoint.restore(latest)
print('Successfully restored to step %s.', step_metric.result())
else:
print('Did not find a pre-existing checkpoint. '
'Starting from scratch.')
return checkpoint_manager
checkpoint_manager = restore_and_get_checkpoint_manager(
log_path, agent, metrics, step_metric)
saver = policy_saver.PolicySaver(agent.policy)
summary_writer = tf.summary.create_file_writer(log_path)
summary_writer.set_as_default()
#TODO
# Replace `None` with the above given values
AGENT_ALPHA = None
TRAINING_LOOPS = None
## TRAINING
#TOFINISH: define number of training loops and write the training function
TRAINING_LOOPS = None # We recommend doing 15k loops
import warnings
warnings.filterwarnings('ignore')
for _ in range(TRAINING_LOOPS):
# step 1: We first use the DynamicStepdriver instance to collect experience
#(trajectories) from the environment and fill up the replay buffer.
# step 2: We then extract all the stored experience from the replay buffer by
#specfiying the batch size and num_steps the same as we initialized the driver with.
# We extract it as tf.dataset instance.
# step 3: We then iterate on the tf.dataset and the first sample we draw
#actually has all the data batch_size*num_time_steps
# step 4: The agent then trains on the acquired experience
train_loss = agent.train(experience).loss
# step 5: the replay buffer is cleared to make space for new data
# step 6: Log the metrics and store them on disk
metric_utils.log_metrics(metrics)
for metric in metrics:
metric.tf_summaries(train_step=step_metric.result())
# step 7: Save the Agent ( via checkpoints) as well as the policy
checkpoint_manager.save()
saver.save(os.path.join("./", 'policy_%d' % step_metric.result()))
print("tensorboard dev upload --logdir {} --name \"(optional) My latest experiment\" --description \"(optional) Agent trained\"".format(log_path))
import numpy as np
feature = np.reshape(environment._observe()[0], (1,20))
feature.shape
## Inference
step = ts.TimeStep(
tf.constant(
ts.StepType.FIRST, dtype=tf.int32, shape=[1],
name='step_type'),
tf.constant(0.0, dtype=tf.float32, shape=[1], name='reward'),
tf.constant(1.0, dtype=tf.float32, shape=[1], name='discount'),
tf.constant(feature,
dtype=tf.float64, shape=[1, 20],
name='observation'))
agent.policy.action(step).action.numpy()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a date index from the date column
Step2: resample()
Step3: You need to call resample with the rule parameter, then you need to call some sort of aggregation function. This is because due to resampling, we need some sort of mathematical rule to join the rows by (mean,sum,count,etc...)
Step5: Custom Resampling
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
# Grab data
# Faster alternative
# df = pd.read_csv('time_data/walmart_stock.csv',index_col='Date')
df = pd.read_csv('time_data/walmart_stock.csv')
df.head()
df['Date'] = df['Date'].apply(pd.to_datetime)
df.head()
df.set_index('Date',
inplace = True)
df.head()
# Our index
df.index
# Yearly Means
df.resample(rule = 'A').mean()
def first_day(entry):
Returns the first instance of the period, regardless of samplling rate.
return entry[0]
df.resample(rule = 'A').apply(first_day)
df['Close'].resample('A').mean().plot(kind = 'bar')
plt.title('Yearly Mean Close Price for Walmart')
df['Open'].resample('M').max().plot(kind = 'bar',
figsize = (16, 6))
plt.title('Monthly Max Opening Price for Walmart')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: 1.2 Training the ML algorithm
Step 1
Step6: Step 2
Step9: Step 3
Step12: Step 4
Step13: Excercise
|
<ASSISTANT_TASK:>
Python Code:
import csv
sentiment_csv = ""
def feature_extractor(word):
last_l = word[-1]
first_l = word[0]
return { 'first_letter' : first_l, 'last_letter' : last_l}
def main():
i_word = input("Enter the word ").lower()
features = feature_extractor(i_word)
print(features)
main()
import csv
def feature_extractor(word):
Extract the features for a given word and return a dictonary of the features
start_letter = word[0]
last_letter = word[-1]
return {'start_letter' : start_letter,'last_letter' : last_letter}
def ML_train(sentiment_corpus):
Create feature set from the corpus given to to it.
feature_set = []
with open(sentiment_corpus,'rt',encoding = 'utf-8') as sentobj:
sentiment_handle = csv.reader(sentobj)
for sentiment in sentiment_handle:
new_row = []
new_row.append(feature_extractor(sentiment[0])) #get the dictionary of features for a word
if int(sentiment[1]) >= 0: # Club the sentiment values (-5 to + 5) to just positive or negative
new_row.append('positive')
else:
new_row.append('negative')
feature_set.append(new_row)
print(feature_set)
def main():
sentiment_csv = "C:/Users/kmpoo/Dropbox/HEC/Teaching/Python for PhD Mar 2018/python4phd/Session 3/Sent/word_sentiment.csv"
ML_train(sentiment_csv)
main()
import csv
import random
def feature_extractor(word):
Extract the features for a given word and return a dictonary of the features
start_letter = word[0]
last_letter = word[-1]
return {'start_letter' : start_letter,'last_letter' : last_letter}
def ML_train(sentiment_corpus):
Create feature set from the corpus given to to it. Split the feature set into training and testing sets
feature_set = []
with open(sentiment_corpus,'rt',encoding = 'utf-8') as sentobj:
sentiment_handle = csv.reader(sentobj)
for sentiment in sentiment_handle:
new_row = []
new_row.append(feature_extractor(sentiment[0])) #get the dictionary of features for a word
if int(sentiment[1]) >= 0: # Club the sentiment values (-5 to + 5) to just positive or negative
new_row.append('positive')
else:
new_row.append('negative')
feature_set.append(new_row)
random.shuffle(feature_set)
# We need to shuffle the features since the word_sentiment.csv had words arranged in alphabetical order
train_set = feature_set[:1500] #the first 1500 words becomes our training set
test_set = feature_set[1500:]
print(len(test_set))
def main():
sentiment_csv = "C:/Users/kmpoo/Dropbox/HEC/Teaching/Python for PhD Mar 2018/python4phd/Session 3/Sent/word_sentiment.csv"
ML_train(sentiment_csv)
main()
import csv
import random
import nltk
def feature_extractor(word):
Extract the features for a given word and return a dictonary of the features
start_letter = word[0]
last_letter = word[-1]
return {'start_letter' : start_letter,'last_letter' : last_letter}
def ML_train(sentiment_corpus):
Create feature set from the corpus given to to it. Split the feature set into training and testing sets.
Train the classifier using the naive Bayes model and return the classifier.
feature_set = []
with open(sentiment_corpus,'rt',encoding = 'utf-8') as sentobj:
sentiment_handle = csv.reader(sentobj)
for sentiment in sentiment_handle:
new_row = []
new_row.append(feature_extractor(sentiment[0])) #get the dictionary of features for a word
if int(sentiment[1]) >= 0: # Club the sentiment values (-5 to + 5) to just positive or negative
new_row.append('positive')
else:
new_row.append('negative')
feature_set.append(new_row)
random.shuffle(feature_set)
# We need to shuffle the features since the word_sentiment.csv had words arranged in alphabetical order
train_set = feature_set[:1500] #the first 1500 words becomes our training set
test_set = feature_set[1500:]
classifier = nltk.NaiveBayesClassifier.train(train_set)
# Note: to create the classifier we need to provide a dictonary of features and the label ONLY
return classifier
def main():
sentiment_csv = "C:/Users/kmpoo/Dropbox/HEC/Teaching/Python for PhD Mar 2018/python4phd/Session 3/Sent/word_sentiment.csv"
classifier = ML_train(sentiment_csv)
input_word = input('Enter a word ').lower()
sentiment = classifier.classify(feature_extractor(input_word))
print('Sentiment of word "', input_word,'" is : ',sentiment)
main()
import csv
import random
import nltk
def feature_extractor(word):
Extract the features for a given word and return a dictonary of the features
start_letter = word[0]
last_letter = word[-1]
return {'start_letter' : start_letter,'last_letter' : last_letter}
def ML_train(sentiment_corpus):
Create feature set from the corpus given to to it. Split the feature set into training and testing sets.
Train the classifier using the naive Bayes model and return the classifier.
feature_set = []
with open(sentiment_corpus,'rt',encoding = 'utf-8') as sentobj:
sentiment_handle = csv.reader(sentobj)
for sentiment in sentiment_handle:
new_row = []
new_row.append(feature_extractor(sentiment[0])) #get the dictionary of features for a word
if int(sentiment[1]) >= 0: # Club the sentiment values (-5 to + 5) to just positive or negative
new_row.append('positive')
else:
new_row.append('negative')
feature_set.append(new_row)
random.shuffle(feature_set)
# We need to shuffle the features since the word_sentiment.csv had words arranged in alphabetical order
train_set = feature_set[:1500] #the first 1500 words becomes our training set
test_set = feature_set[1500:]
classifier = nltk.NaiveBayesClassifier.train(train_set)
# Note: to create the classifier we need to provide a dictonary of features and the label ONLY
print('Test accuracy of the classifier = ',nltk.classify.accuracy(classifier, test_set))
print(classifier.show_most_informative_features())
return classifier
def main():
sentiment_csv = "C:/Users/kmpoo/Dropbox/HEC/Teaching/Python for PhD Mar 2018/python4phd/Session 3/Sent/word_sentiment.csv"
classifier = ML_train(sentiment_csv)
input_word = input('Enter a word ').lower()
sentiment = classifier.classify(feature_extractor(input_word))
print('Sentiment of word "', input_word,'" is : ',sentiment)
main()
#Enter code here
#
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Training data
Step2: Network architecture
Step3: Connect the layers
Step4: Load parameters and build Theano graph
Step5: <img src="theano_graph.svg">
Step6: Sampling from the model
Step7: Probability of a text
Step8: Most likely phrases from a bag of words
Step9: Least likely phrases
Step10: Morphology
Step11: Structure
|
<ASSISTANT_TASK:>
Python Code:
# Load training file to get vocabulary
text_file = 'biblia.txt' # input file
with codecs.open(text_file, 'r', 'utf-8') as f:
data = f.read()
chars = list(set(data))
vocab_size = len(chars)
char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}
print "Total number of chars:", len(data)
print "Vocabulary size:", vocab_size
print data[21000:22000]
# Define the model structure
embedding_size = 256 # number of hidden units per layer
# Input
lookup = LookupTable(length=vocab_size, dim=embedding_size)
# Layer 1
fork1 = Fork(output_names=['linear1', 'gates1'],
input_dim=embedding_size, output_dims=[embedding_size, embedding_size * 2])
fork1.name = 'fork1'
grnn1 = GatedRecurrent(dim=embedding_size)
grnn1.name = 'grnn1'
# Layer 2
fork2 = Fork(output_names=['linear2', 'gates2'],
input_dim=embedding_size, output_dims=[embedding_size, embedding_size * 2])
fork2.name = 'fork2'
grnn2 = GatedRecurrent(dim=embedding_size)
grnn2.name = 'grnn2'
# Softmax layer
hidden_to_output = Linear(name='hidden_to_output', input_dim=embedding_size,
output_dim=vocab_size)
softmax = NDimensionalSoftmax()
# Propagate x until top brick to get y_hat predictions
x = tensor.imatrix('features') # input
y = tensor.imatrix('targets') # output
embedding = lookup.apply(x)
linear1, gates1 = fork1.apply(embedding)
h1 = grnn1.apply(linear1, gates1)
h1.name = 'h1'
linear2, gates2 = fork2.apply(h1)
h2 = grnn2.apply(linear2, gates2)
h2.name = 'h2'
linear3 = hidden_to_output.apply(h2)
linear3.name = 'linear3'
y_hat = softmax.apply(linear3, extra_ndim=1)
y_hat.name = 'y_hat'
# COST
cost = softmax.categorical_cross_entropy(y, linear3, extra_ndim=1).mean()
cost.name = 'cost'
model = Model(cost)
# Load model parameters from a file
with open('grnn_best.tar') as model_file:
model_params = model.get_parameter_dict().keys()
param_vals = {k:v for k,v in load_parameters(model_file).iteritems() if k in model_params}
model.set_parameter_values(param_vals)
# Define Theano graph
y, x = model.inputs
softmax = NDimensionalSoftmax()
linear_output = [v for v in model.variables if v.name == 'linear3'][0]
y_hat = softmax.apply(linear_output, extra_ndim=1)
predict = theano.function([x], y_hat)
#theano.printing.pydotprint(predict, outfile="theano_graph.svg", format = 'svg', var_with_name_simple=True)
#take activations of last element
activations = [h1[-1].flatten(), h2[-1].flatten()]
initial_states = [grnn1.parameters[-1], grnn2.parameters[-1]]
states_as_params = [tensor.vector(dtype=initial.dtype) for initial in initial_states]
#Get prob. distribution of the last element in the last seq of the batch
fprop = theano.function([x] + states_as_params, activations + [y_hat[-1, -1, :]], givens=zip(initial_states, states_as_params))
def sample(x_curr, states_values, fprop, temperature=1.0):
'''
Propagate x_curr sequence and sample next element according to
temperature sampling.
Return: sampled element and a list of the hidden activations produced by fprop.
'''
activations = fprop(x_curr, *states_values)
probs = activations.pop().astype('float64')
probs = probs / probs.sum()
if numpy.random.binomial(1, temperature) == 1:
sample = numpy.random.multinomial(1, probs).nonzero()[0][0]
else:
sample = probs.argmax()
return sample, activations, probs[sample]
def init_params(primetext=u''):
if not primetext or len(primetext) == 0:
primetext = ix_to_char[numpy.random.randint(vocab_size)]
primetext = ''.join([ch for ch in primetext if ch in char_to_ix.keys()])
if len(primetext) == 0:
raise Exception('primetext characters are not in the vocabulary')
x_curr = numpy.expand_dims(
numpy.array([char_to_ix[ch] for ch in primetext], dtype='uint8'), axis=1)
states_values = [initial.get_value() for initial in initial_states]
return x_curr, states_values
def stochastic_sampling(length, primetext=u'', temperature=1.0):
x_curr, states_values = init_params(primetext)
sys.stdout.write('Starting sampling\n' + primetext)
for _ in range(length):
idx, states_values, probs = sample(x_curr, states_values, fprop, temperature)
sys.stdout.write(ix_to_char[idx])
x_curr = [[idx]]
sys.stdout.write('\n')
def beam_sampling(length, primetext=u'', beam_size=5, temperature=1.0):
x_curr, states_values = init_params(primetext)
inputs = [x_curr] * beam_size
states = [states_values] * beam_size
logprobs = numpy.zeros((beam_size, 1))
seqs = numpy.zeros((length+x_curr.shape[0], beam_size))
seqs[0:x_curr.shape[0], :] = numpy.repeat(x_curr, beam_size, axis=1)
for k in range(length):
probs = numpy.zeros((beam_size,beam_size))
indices = numpy.zeros((beam_size,beam_size), dtype='int32')
hstates = numpy.empty((beam_size,beam_size), dtype=list)
for i in range(beam_size):
for j in range(beam_size):
indices[i][j], hstates[i][j], probs[i][j] = sample(inputs[i], states[i], fprop, temperature)
probs = numpy.log(probs) + logprobs
best_idx = probs.argmax(axis=1)
inputs = [[[idx]] for idx in indices[range(beam_size), best_idx]]
states = [hs for hs in hstates[range(beam_size), best_idx]]
logprobs = probs[range(beam_size), best_idx].reshape((beam_size, 1))
seqs[k +x_curr.shape[0], :] = numpy.array(inputs).flatten()
return logprobs.flatten(), numpy.array(seqs).squeeze()
logprobs, seqs = beam_sampling(100, primetext=u'blanco ', beam_size = 7, temperature = 1.0)
for i in logprobs.flatten().argsort()[::-1]:
print 'log P(s) = {0:3.3f}. Sample: '.format(logprobs.flatten()[i]) + u''.join([ix_to_char[ix] for ix in numpy.array(seqs).squeeze()[:,i]])
print '~' * 50
stochastic_sampling(3000, primetext=u'El sentido de la vida es', temperature=0.3)
# Function to calculate the probability of a text
def log_likelihood(text):
text = ''.join([ch for ch in text if ch in char_to_ix])
x_curr = numpy.expand_dims(numpy.array([char_to_ix[ch] for ch in text], dtype='uint8'), axis=1)
probs = predict(x_curr).squeeze()
return sum([numpy.log(probs[i,c]) for i,c in enumerate(x_curr[1:].flatten())])
log_likelihood("buscad, y hallaréis")
log_likelihood("this is a test")
from itertools import permutations
bow = [' ', 'hombre', 'ama', 'a', 'el']
perms = [' '.join(perm) for perm in permutations(bow)]
for p, t in sorted([(-log_likelihood(text),text) for text in perms])[:20]:
print p, t
perms = [' '.join(perm) for perm in permutations(bow)]
for p, t in sorted([(-log_likelihood(text),text) for text in perms])[-20:]:
print p, t
from itertools import permutations
from random import shuffle
text = list(u'mnpu')
perms = [''.join(perm) for perm in permutations(text)]
for p, t in sorted([(-log_likelihood(text),text) for text in perms])[:5]:
print p, t
print "------------------"
for p, t in sorted([(-log_likelihood(text),text) for text in perms])[-5:]:
print p, t
print stochastic_sampling(400, u"(Lc. ", temperature = 0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PARA RECORDAR
Step2: Usando la función binom de python podemos graficar la función de distribución binomial para este caso.
Step3: $\textbf{FIGURA 1.}$ Distribución binomial para el ejemplo.
Step4: $\textbf{FIGURA 2.}$ Galaxias en el espacio profundo.
Step5: Si decimos que la distribución que se determinó en el paso anterior es una distribución de Poisson (suposición), podemos decir cosas como
Step6: Comparemos ahora la distribución obtenida con la correspondiente distribución de Poisson
Step7: $\textbf{FIGURA 3.}$ Distribución de Poisson ideal con respecto a la generada por los datos.
|
<ASSISTANT_TASK:>
Python Code:
dado = np.array([5, 3, 3, 2, 5, 1, 2, 3, 6, 2, 1, 3, 6, 6, 2, 2, 5, 6, 4, 2, 1, 3, 4, 2, 2, 5, 3, 3,
2, 2, 2, 1, 6, 2, 2, 6, 1, 3, 3, 3, 4, 4, 6, 6, 1, 2, 2, 6, 1, 4, 2, 5, 3, 6, 6, 3,
5, 2, 2, 4, 2, 2, 4, 4, 3, 3, 1, 2, 6, 1, 3, 3, 5, 4, 6, 6, 4, 2, 5, 6, 1, 4, 5, 4, 3, 5,
4, 1, 4, 6, 6, 6, 3, 1, 5, 6, 4, 3, 4, 6, 3, 5, 2, 6, 3, 6, 1, 4, 3, 4, 1])
suma = np.array([8, 5, 6, 5, 8, 4, 12, 4, 11, 6, 4, 6, 7, 6, 4, 3, 8, 8, 4, 6, 8, 12, 3, 8, 5, 7, 9, 9,
7, 6, 4, 8, 6, 3, 7, 6, 9, 12, 6, 11, 5, 9, 8, 5, 10, 12, 4, 11, 7, 10, 8, 8, 9, 7, 7, 5])
prob = 10./36 # probabilidad de sacar una suma inferior a 6
#prob = 6./21 # probabilidad de sacar una suma inferior a 6
#np.where(suma[0:8]<6)
mediaS = suma.size*prob # media de la distribución binomial
devS = np.sqrt(suma.size*prob*(1.-prob)) # desviación estándar de la distribución binomial
real = np.where(suma<6) # where entrega la info en un tuple de una posición donde está el array
real = real[0] # extraemos la información del tuple en la posición uno y la guardamos en real
duda = 16 # x, número de éxitos cuya probabilidad se quiere conocer
Prob = 0 # probabilidad de tener un número de éxitos inferior o igual a duda
for cont in range(0,duda):
Prob = Prob + (math.factorial(suma.size)/(math.factorial(cont)*math.factorial(suma.size - cont))) \
*prob**cont*(1.-prob)**(suma.size-cont)
print('La probabilidad de que la suma sea inferior a 6 es %.2f' % prob)
print('Número total de pruebas igual a %d' % suma.size)
print('Suma promedio igual a %.1f' %mediaS)
print('Desviación estándar de la suma = %.1f' % devS)
print('Número de veces que suma menos de 6 en la muestra es %.1f' % real.size)
print('La probabilidad de que el número de éxitos en una muestra de %d sea \
inferior o igual a %d, donde el éxito es que la suma sea inferior a 6, es %.4f' %(suma.size,duda,Prob))
n = suma.size
p = prob
x = np.arange(0,30)
histB = stats.binom.pmf(x, n, p)
plt.figure(1)
plt.rcParams['figure.figsize'] = 20, 6 # para modificar el tamaño de la figura
plt.plot(x, histB, 'bo', ms=8, label='Distribucion binomial')
plt.xlabel('Numero de exitos')
plt.ylabel('Probabilidad')
ProbB = np.sum(histB[0:duda])
print('Probabilidad de que en solo %d ocasiones la suma sea inferior a 6 es %.4f' %(duda,ProbB))
Ima = misc.imread('HDF-bw.jpg') # Se lee la imagen como matriz en escala de 8 bit
plt.rcParams['figure.figsize'] = 20, 6 # para modificar el tamaño de la figura
Imab = Ima[100:500,100:700,1] # La imagen original tenía tres canales (RGB); se elige un canal y se recorta
plt.figure(2)
plt.imshow(Imab, cmap='gray')
plt.rcParams['figure.figsize'] = 18, 15 # para modificar el tamaño de la figura
fil, col = Imab.shape # número de filas y columnas de la imagen
numlado = 10 # Número de imágenes por lado
contar = 1
plt.figure(5)
for enfil in range(1,numlado+1):
for encol in range(1,numlado+1):
plt.subplot(numlado,numlado,contar)
plt.imshow(Imab[(enfil-1)*np.int(fil/numlado):enfil*np.int(fil/numlado), \
(encol-1)*np.int(col/numlado):encol*np.int(col/numlado)],cmap='gray')
frame1 = plt.gca()
frame1.axes.get_yaxis().set_visible(False)
frame1.axes.get_xaxis().set_visible(False)
contar = contar + 1
# Para el caso de 7x7 imágenes en gal se presentan el número de galaxias contadas
gal = np.array([2., 3., 6., 5., 4., 9., 10., \
2., 3., 7., 1., 3., 1., 6., \
6., 5., 4., 3., 4., 2., 4., \
4., 6., 3., 3., 4., 3., 2., \
5., 4., 2., 2., 6., 5., 9., \
4., 7., 2., 3., 3., 3., 5., \
6., 3., 4., 7., 4., 6., 7.])
la = np.mean(gal) # Valor promedio del conjunto de datos
# Distribución del conjunto de datos. La primera fila es el número de galaxias, la segunda es el número de veces que
# se repite dicho número de galaxias
distriGal = np.array([[0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10.],[0., 1., 8., 11., 10., 5., 6., 4., 0., 2., 1.]])
print('Valor promedio del conjunto de datos = %.2f' % la)
plt.figure(figsize=(16,9))
plt.plot(distriGal[0,:],distriGal[1,:]/gal.size,'r*',ms=10,label='Distribución datos con promedio %.1f' % la)
plt.legend()
plt.xlabel('Número de galaxias en el intervalo')
plt.ylabel('Rata de ocurrencia')
plt.grid()
num = 2. # Número de galaxias que se espera encontrar
prob = (la**num*np.exp(-la)/math.factorial(num))*100 # Probabilidad de encontrar dicho número de galaxias
x = np.arange(0,20) # rango de datos: número de galaxias
histP = stats.poisson.pmf(x, la) # función de probabilidad de Poisson
ProbP = (np.sum(histP[0:int(num)+1]))*100 # Probabilidad acumulada
print('Promedio de galaxias en el área estudiada = %.2f' % la)
print('La probabilidad de que se observe en la imagen del espacio profundo %d galaxias es = %.1f%%' % (num,prob))
print('Probabilidad de observar hasta %d galaxias = %.1f%%' %(num,ProbP))
plt.figure(figsize=(16,9))
plt.plot(x, histP, 'bo--', ms=8, label='Distribución de Poisson con $\lambda=$ %.1f' % la)
plt.plot(distriGal[0,:],distriGal[1,:]/gal.size,'r*--',ms=10,label='Conjunto de datos con promedio %.1f' % la)
plt.xlabel('Numero de galaxias (sucesos)')
plt.ylabel('Rata de ocurrencia')
plt.legend()
plt.grid()
plt.figure(4)
plt.rcParams['figure.figsize'] = 12, 6 # para modificar el tamaño de la figura
probP = np.zeros(20)
for la in range(1,10,2):
for num in range(0,20):
probP[num] = la**num*np.exp(-la)/math.factorial(num)
plt.plot(probP,marker='.',ms=15,label='$\lambda = %d$' %la)
mu = la # media aritmética
sigma = np.sqrt(la) # desviación estándar
x = np.arange(0,20,1)
f = (1./np.sqrt(2*np.pi*sigma**2))*np.exp(-(x-mu)**2/(2*sigma**2))
plt.plot(f,marker='*',ms=10,color='black',label='$ \overline{x} = %d , \ \sigma = %.1f$'%(mu,sigma))
plt.xlabel('Evento')
plt.ylabel('Probabilidad')
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The <a href="http
Step2: We will take some time off now to assign each document to a category, to ease our work later on. Since the two last documents refer to classification we will label them as "Relevant" and the first document as "Not relevant". Since we only have two categories, this makes it a binary problem and we will represent "Relevant" as 1 and "Not relevant" as -1.
Step3: We now create our document collection
Step4: Now we will create the object responsible for the hashed BoW representation. We are going to specify that we want a hash size of 8 bits, which will be translated to a dimension of size 2^8 = 256 (powers of 2 are considered to speed up computatins) and a tokenizer that creates 5-grams. We will also specify that we want to
Step5: And that was it!<br>
Step6: We have now created our svm. The parameter C specifies the regularization constant. The best choice for this parameter will usually be selected after a model selection process.<br>
Step7: When the execution finishes, we will have learned our so desired linear model! Mind that for large collections the above call can take hours.<br>
Step8: We can see that it misclassified the first document. This has to do with the nature of our overly-simplified toy dataset which doesn't provide enough information. However, another option of the HashedDocDotFeatures class will allow us to extract some more information from the same dataset!<br>
Step9: If we do not specify these numbers, as we did not do before, then, (you maybe have guessed it!) they are set by default to the following values, n=1, k=0!
Step10: Better!<br>
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from shogun import StringCharFeatures, RAWBYTE, HashedDocDotFeatures, NGramTokenizer
doc_1 = "this is the first document"
doc_2 = "document classification introduction"
doc_3 = "a third document about classification"
document_collection = [doc_1, doc_2, doc_3]
from shogun import BinaryLabels
from numpy import array
labels = BinaryLabels(array([-1, 1, 1]))
string_features = StringCharFeatures(document_collection, RAWBYTE)
hash_size = 8
tokenizer = NGramTokenizer(5)
normalize = True
hashed_feats = HashedDocDotFeatures(hash_size, string_features, tokenizer, normalize)
from shogun import SVMOcas
C = 0.1
epsilon = 0.01
svm = SVMOcas(C, hashed_feats, labels)
svm.set_epsilon(epsilon)
_=svm.train()
predicted_labels = svm.apply()
print (predicted_labels.get_labels())
k = 3 # number of tokens, up to which we allow it to skip
n = 3 # number of tokens, up to which we allow it to combine
hashed_feats_quad = HashedDocDotFeatures(hash_size, string_features, tokenizer, normalize, n, k)
svm.set_features(hashed_feats_quad)
svm.train()
predicted_labels = svm.apply()
print(predicted_labels.get_labels())
from pylab import *
# HashedDocDotFeatures results
hashed_training_examples = [5000, 10000, 15000, 20000, 25000, 30000, 50000, 100000]
# For C=1
hashed_C_1_sec = [2682.750000,5202.690000,8120.460000,10846.410000,13944.200000,17016.840000,30496.720000,66302.950000]
hashed_C_1_roc = [0.980730,0.986382,0.988894,0.990666,0.991602,0.991957,0.993680,0.995184]
# For C=0.1
hashed_C_01_sec = [1074.130000,2142.390000,3434.710000,4641.380000,5984.530000,7206.040000,12864.270000,28393.540000]
hashed_C_01_roc = [0.976560,0.982660,0.985251,0.987380,0.988368,0.989022,0.990950,0.993197]
# Spectrum kernel results
kernel_training_examples = [5000, 10000, 15000, 20000, 25000]
# For C=1
kernel_C_1_sec = [2912.410000,6543.220000,10840.550000,16108.360000,19899.610000]
kernel_C_1_roc = [0.971284,0.976628,0.979715,0.982084,0.984355]
# For C=0.1
kernel_C_01_sec = [1441.380000,3261.870000,5071.040000,7568.130000,10436.430000]
kernel_C_01_roc = [0.946308,0.955245,0.961576,0.965204,0.968264]
figure(figsize=(12,6))
subplot(1,2,1)
plot(hashed_training_examples, hashed_C_1_sec, 'b')
plot(kernel_training_examples, kernel_C_1_sec, 'r')
title("Time comparison for C=1")
xlabel("Number of examples")
ylabel("Time in seconds")
legend(["HashedDocDotFeatures", "Spectrum Kernel"], loc=2)
subplot(1,2,2)
plot(hashed_training_examples, hashed_C_1_roc, 'b')
plot(kernel_training_examples, kernel_C_1_roc, 'r')
title("Area under ROC comparison for C=1")
xlabel("Number of examples")
ylabel("auROC")
_=legend(["HashedDocDotFeatures", "Spectrum Kernel"], loc=4)
clf
figure(figsize=(12,6))
subplot(1,2,1)
plot(hashed_training_examples, hashed_C_01_sec, 'b')
plot(kernel_training_examples, kernel_C_01_sec, 'r')
title("Time comparison for C=0.1")
xlabel("Number of examples")
ylabel("Time in seconds")
ylim((0,70000))
legend(["HashedDocDotFeatures", "Spectrum Kernel"], loc=2)
subplot(1,2,2)
plot(hashed_training_examples, hashed_C_01_roc, 'b')
plot(kernel_training_examples, kernel_C_01_roc, 'r')
title("Area under ROC comparison for C=0.1")
xlabel("Number of examples")
ylabel("auROC")
_=legend(["HashedDocDotFeatures", "Spectrum Kernel"], loc=4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df=pd.DataFrame(data=[[1,1,2,5],[1,3,4,1],[4,1,2,5],[5,1,4,9],[1,1,2,5]],columns=['val', 'col1','col2','3col'])
def g(df):
cols = list(df.filter(like='col'))
df['index_original'] = df.groupby(cols)[cols[0]].transform('idxmax')
for i in range(len(df)):
i = len(df) - 1 - i
origin = df.loc[i, 'index_original']
if i <= origin:
continue
if origin == df.loc[origin, 'index_original']:
df.loc[origin, 'index_original'] = i
df.loc[i, 'index_original'] = df.loc[origin, 'index_original']
return df[df.duplicated(subset=cols, keep='last')]
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the RegOnline output into a pandas DataFrame
Step2: Extract the Sunday Sessions
Step3: Let's create two new columns in our DataFrame
Step4: Now loop through the five unique sessions, updating the PrimerID column for each participant
Step5: Create an abbreviated code for each session. This will be added to the nametag to spark conversation among participants.
Step6: A quick preview of the first few rows to see the result
Step7: Extract the Monday Sessions
Step8: Extract Tuesday Sessions
Step9: Combine the DataFrames
Step10: Now create a new DataFrame that is a subset of the newdf with only the columns of interest. Also, make sure the DataFrame is sorted by lastname, the index is reset, and it's a copy of newdf instead of a pointer to newdf.
Step11: Now replace all empty cells for "Company" to a very general location
Step12: Replace NaNs for PrimerID with the "Not Attending" ID
Step13: Check for NaNs in the Monday ID
Step14: Replace NaNs for the MonID with the "Not Attending" ID
Step15: Replace NaNs for the TueID with the "Not Attending" ID
Step16: Test out the wrap-around text for institute for participants that have long institution names. This regular expression will look for institutions (or Companies, as RegOnline refers to them), and find items that have a '/', and if no '/', either a '-', ',', or 'at' in the text. If so, add a newline character to make the text wrap around to the next line.
Step17: And test a cell that is long, contains at, but at is part of a longer word
Step18: And a quick test on a few more institutions
Step19: Now update the full Company column of the DataFrame
Step20: Plot Labels
|
<ASSISTANT_TASK:>
Python Code:
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib
#%matplotlib inline
df = pd.read_excel('/Users/matt/projects/EPRV/data/missings2.xls', encoding='utf-8')
df.columns
df.loc[16:17]
#df['AgendaItem'].str.contains('Doppler Primer:')
sundf = df[df['AgendaItem'].str.contains('Doppler Primer:')].copy()
len(sundf)
sundf['PrimerID'] = 0
sundf['Primer'] = [re.search(r'(.*):\s(.*)$', item).group(2) for item in sundf['AgendaItem']]
sundf[['AgendaItem', 'Primer']].head(3)
sundf['Primer'].unique()
dopID = 0
for agItem in sundf['Primer'].unique():
sundf.loc[sundf['Primer'] == agItem, 'PrimerID'] = dopID
dopID += 1
sun_ses = ['NA', 'SA', 'IC', 'DC', 'SM']
sundf[['AgendaItem', 'Primer', 'PrimerID']].head(4)
mondf = df[df['AgendaItem'].str.contains('Monday Break-out:')].copy()
len(mondf)
mondf['MonID'] = 0
mondf['Monday'] = [re.search(r'(.*):\s(.*)$', item).group(2) for item in mondf['AgendaItem']]
mondf['Monday'].unique()
monID = 0
for agItem in mondf['Monday'].unique():
mondf.loc[mondf['Monday'] == agItem, 'MonID'] = monID
monID += 1
mondf['Monday'].unique()
mon_ses = ['NA', 'FS', 'TC', 'BC', 'FC']
mondf[['AgendaItem', 'Monday', 'MonID']].head(4)
tuedf = df[df['AgendaItem'].str.contains('Tuesday Break-out:')].copy()
len(tuedf)
tuedf['TueID'] = 0
tuedf['Tuesday'] = [re.search(r'(.*):\s(.*)$', item).group(2) for item in tuedf['AgendaItem']]
tuedf['Tuesday'].unique()
tuesID = 0
for agItem in tuedf['Tuesday'].unique():
tuedf.loc[tuedf['Tuesday'] == agItem, 'TueID'] = tuesID
tuesID += 1
tuedf['Tuesday'].unique()
tue_ses = ['NA', 'ST', 'DC', 'LB', 'PS']
tuedf[['AgendaItem', 'Tuesday', 'TueID']].head(4)
fulldf = df[['RegId', 'GroupId', 'FirstName', 'LastName', 'Company']]
print(len(fulldf))
fulldf = fulldf.drop_duplicates()
print(len(fulldf))
print(len(sundf))
print(len(mondf))
print(len(tuedf))
fulldf.columns
sundf.columns
newdf = pd.merge(fulldf, sundf, on=['RegId', 'GroupId', 'FirstName', 'LastName', 'Company'], how='left')
print(len(newdf))
newdf = pd.merge(newdf, mondf, on=['RegId', 'GroupId', 'FirstName', 'LastName', 'Company'], how='left')
print(len(newdf))
newdf = pd.merge(newdf, tuedf, on=['RegId', 'GroupId', 'FirstName', 'LastName', 'Company'], how='left')
print(len(newdf))
newdf.head(5)
newdf.columns
finaldf = newdf[['FirstName', 'LastName', 'Company', 'Primer', 'PrimerID', 'Monday', 'MonID', 'Tuesday', 'TueID']].sort('LastName').reset_index().copy()
finaldf.head(5)
len(finaldf)
finaldf.columns
finaldf.Company = ['Earth' if pd.isnull(company_el) else company_el for company_el in finaldf.Company]
finaldf.PrimerID = [4 if pd.isnull(primerid_el) else primerid_el for primerid_el in finaldf.PrimerID]
len(finaldf[pd.isnull(finaldf['MonID'])])
finaldf.MonID = [4 if pd.isnull(monid_el) else monid_el for monid_el in finaldf.MonID]
len(finaldf[pd.isnull(finaldf['MonID'])])
len(finaldf[pd.isnull(finaldf['TueID'])])
finaldf.TueID = [4 if pd.isnull(tueid_el) else tueid_el for tueid_el in finaldf.TueID]
len(finaldf[pd.isnull(finaldf['TueID'])])
p = re.compile ('(/|^(?!.*/).*-|^(?!.*/).*,|^(?!.*/).*\sat\s)')
p.subn(r'\1\n', finaldf.loc[2].Company)[0]
#p.subn(r'\1\n', finaldf.loc[53].Company)[0]
[p.sub(r'\1\n', company_el) if len(company_el) > 30 else company_el for company_el in finaldf.head(5).Company.values]
finaldf.Company = [p.sub(r'\1\n', company_el) if len(company_el) > 30 else company_el for company_el in finaldf.Company.values]
png = mpimg.imread('/Users/matt/projects/EPRV/images/NameTag2.png')
png.shape
import matplotlib.font_manager as mfm
fontpaths = fontpaths=['/System/Library/Fonts/',
'/Library/Fonts',
'/Library/Fonts/Microsoft',
'/usr/X11/lib/X11/fonts',
'/opt/X11/share/fonts',
'/Users/matt/Library/Fonts']
blaa = mfm.findSystemFonts(fontpaths=fontpaths)
colors = ['#FFE2A9', '#4BA4D8', '#768085', '#BF5338', '#335B8F']
colors2 = ['#335B8F', '#BF5338', '#768085', '#4BA4D8', '#FFE2A9']
colors3 = ['#4BA4D8', '#FFE2A9', '#BF5338', '#768085', '#335B8F']
circ_ypos = 775
name_dict = {'family': 'YaleNew-Roman',
'color': '#D6E8E1',
'weight': 'bold',
'size': 28
}
company_dict = {'family': 'YaleNew-Roman',
'color': '#D6E8E1',
'weight': 'bold',
'size': 16
}
circle_dict = {'family': 'YaleNew-Roman',
'color': '#1D2523',
'weight': 'normal',
'size': 20
}
def change_name_size(name, name_dict):
if len(name) < 16:
name_dict['size'] = 28
elif ((len(name) >= 16) and (len(name) < 19)):
name_dict['size'] = 24
elif ((len(name) >= 19) and (len(name) < 24)):
name_dict['size'] = 20
elif ((len(name) >= 24) and (len(name) < 30)):
name_dict['size'] = 17
else:
name_dict['size'] = 16
return name_dict
def change_company_size(company, company_dict):
newlines = len(re.findall(r'\n', finaldf.loc[0].Company))
if newlines == 0:
if len(company) < 15:
company_dict['size'] = 18
elif ((len(company) >= 15) and (len(company) < 30)):
company_dict['size'] = 14
elif ((len(company) >= 30) and (len(company) < 40)):
company_dict['size'] = 12
elif ((len(company) >= 40) and (len(company) < 50)):
company_dict['size'] = 10
else:
company_dict['size'] = 8
else:
if len(company) < 15:
company_dict['size'] = 18
elif ((len(company) >= 15) and (len(company) < 40)):
company_dict['size'] = 14
elif ((len(company) >= 40) and (len(company) < 50)):
company_dict['size'] = 12
else:
company_dict['size'] = 10
return company_dict
# The HP Color LaserJet CP4020 offsets things by 1/16th of an inch left-to-right.
# This fudge factor should fix that:
hrz_fdg = 1. / 16./ 8.5
leftarr = np.array([0.0294, 0.5, 0.0294, 0.5, 0.0294, 0.5]) + hrz_fdg
bottomarr = [0.091, 0.091, 0.364, 0.364, 0.637, 0.637]
width = 0.4706
height = 0.273
# loop through the total number of pages:
for page in range(int(np.ceil((len(finaldf))/6.))):
print('Now on page: {}'.format(page))
fig = plt.figure(figsize=(8.5, 11))
for indx in range(6):
# add an if statement to handle the last page if there are less than
# six participants remaining:
if ((page*6 + indx) < len(finaldf)):
rect = [leftarr[indx], bottomarr[indx], width, height]
ax = fig.add_axes(rect)
ax.imshow(png)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
print(u'Now making name tag for: {} {}'.format(finaldf.loc[page*6 + indx].FirstName, finaldf.loc[page*6 + indx].LastName))
#add name text:
name = finaldf.loc[page*6 + indx].FirstName + ' ' + finaldf.loc[page*6 + indx].LastName
this_name_dict = change_name_size(name, name_dict)
ax.text(600, 500, name, fontdict=this_name_dict, horizontalalignment='center')
#add company text:
company = finaldf.loc[page*6 + indx].Company
this_co_dict = change_company_size(company, company_dict)
ax.text(600, 625, company, fontdict=this_co_dict, horizontalalignment='center')
#add circles for sessions:
circ1 = plt.Circle((750, circ_ypos), 70, color=colors[int(finaldf.loc[page*6 + indx].PrimerID)])
fig.gca().add_artist(circ1)
ax.text(750, circ_ypos + 27.5, sun_ses[int(finaldf.loc[page*6 + indx].PrimerID)], fontdict=circle_dict, horizontalalignment='center')
circ2 = plt.Circle((925, circ_ypos), 70, color=colors2[int(finaldf.loc[page*6 + indx].MonID)])
fig.gca().add_artist(circ2)
ax.text(925, circ_ypos + 27.5, mon_ses[int(finaldf.loc[page*6 + indx].MonID)], fontdict=circle_dict, horizontalalignment='center')
circ3 = plt.Circle((1100, circ_ypos), 70, color=colors3[int(finaldf.loc[page*6 + indx].TueID)])
fig.gca().add_artist(circ3)
ax.text(1100, circ_ypos + 27.5, tue_ses[int(finaldf.loc[page*6 + indx].TueID)], fontdict=circle_dict, horizontalalignment='center')
plt.savefig('../nametags/more_missing_nameTags_bold_p'+str(page)+'.png', dpi=300)
finaldf.columns
finaldf.FirstName.values
finaldf.LastName.values
hrz_fdg = 1. / 16./ 8.5
leftarr = np.array([0.0294, 0.5, 0.0294, 0.5, 0.0294, 0.5])
leftarr + hrz_fdg
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Probar la visualización con los primeris n vectores
Step2: Podemos ver como se puede reconstruir la imagen sin utilizar toda la información de la matriz original,
|
<ASSISTANT_TASK:>
Python Code:
#######################################################################
### Parte 2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
# Importar imagen
imagen = Image.open('C:/Users/Data Mining/Documents/ITAM/Propedeutico/Alumnos/PropedeuticoDataScience2017/Alumnos/Leonardo_Marin/black_and_white.jpg')
imagen_gris = imagen.convert('LA') ## Convertir a escala de grises
## Convertir la imagen a una matriz
imagen_mat = np.array(list(imagen_gris.getdata(band=0)), float)
imagen_mat.shape = (imagen_gris.size[1], imagen_gris.size[0])
imagen_mat = np.matrix(imagen_mat)
plt.imshow(imagen_mat, cmap='gray')
imagen_gris
## Desompoición singular
U, sigma, V = np.linalg.svd(imagen_mat)
# n= 1
j = 1
matriz_equivalente = np.matrix(U[:, :j]) * np.diag(sigma[:j]) * np.matrix(V[:j, :])
plt.imshow(matriz_equivalente, cmap='gray')
# n = 5
j = 5
matriz_equivalente = np.matrix(U[:, :j]) * np.diag(sigma[:j]) * np.matrix(V[:j, :])
plt.imshow(matriz_equivalente, cmap='gray')
# n = 25
j = 25
matriz_equivalente = np.matrix(U[:, :j]) * np.diag(sigma[:j]) * np.matrix(V[:j, :])
plt.imshow(matriz_equivalente, cmap='gray')
# n = 50
j = 50
matriz_equivalente = np.matrix(U[:, :j]) * np.diag(sigma[:j]) * np.matrix(V[:j, :])
plt.imshow(matriz_equivalente, cmap='gray')
A = np.array([[1,0],[1,2]])
A
def pseudoinversa(A):
U,s,V=np.linalg.svd(A)
peudoinversa = V*(np.diag(1/s))*U.T
return peudoinversa
B = pseudoinversa(A)
B
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dan lezen we lezen de gegevens in
Step2: Samenbrengen en voorbereiden van de data
Step3: Voor de tentoonstellingen moeten we ook nog inperken op thema, en ook de permanente tentoonstellingen eruit zwieren. Bvendien lopen tentoonstellingen ook gedurende een zekere periode, dus we moeten ook controleren op tentoonstellingen die nog voor 1 januari 2014 beginnen, maar wel nog tijdens 2014 lopen. Idem voor einde van het jaar.
Step4: We gaan nu de de organisator omzetten naar een typering. We moeten eerst beginnen met die mapping van organisator naar typering op te bouwen op basis van de gegevens die Simon wist aan te leveren.
Step5: Nu voegen we alles mooi samen, selecteren enkel de juiste kolommen, en gooien ook duplicaten op basis van datum, gemeente en tekst eruit. Bovendien hebben we de kolom met speelmomenten ook niet meer nodig.
Step6: We kunnen kort inspecteren hoe deze data eruitzien.
Step7: We zien dat elke lijn een event beschrijving bevat, de plaats waar een event plaatsvindt, en ook de datum. Merk op dat events die op meerdere dagen plaatsvinden een aparte lijn krijgen. We zullen hiermee rekening houden in de interpretatie van de resultaten.
Step8: En hoeveel daarvan hebben geen beschrijving?
Step9: Zo, we kunnen deze dataset nu mooi uitschrijven naar een Excel bestand, zodat Simon nog enkele laatste correcties en aanvullingen kan doorvoeren.
Step10: Landsvermeldingen
Step11: We kunnen voor de volledigheid eventjes een overzicht maken van (organisatoren, discipline).
Step12: Voor onze analyse hebben we ook nood aan een lijst van namen van landen, coordinaten voor de plaatsnamen, en ook een manueel gemaakte mapping om de plaatsnamen in de UiTdatabank gegevens te normaliseren.
Step13: Laten we even in detail deze tabellen bekijken. De landen
Step14: Voor elk land weten we in welk (staatkundig) continent het ligt, en we hebben in de kolom 'Mention' verschillende manieren waarop dat land kan voorkomen in de tekst.
Step15: De kolom Gemeente Origineel is de naam van de gemeente in de uitdatabank gegevens, en we kunnen de naam in de kolom Fusiegemeente en Province (English) gebruiken om een genormaliseerd zicht te krijgen.
Step16: Hiermee kunnen we voor iedere Fusiegemeente (zie vorige tabel) de latitude en longitude ophalen.
|
<ASSISTANT_TASK:>
Python Code:
from pandas import read_excel, read_csv, DataFrame, Series, concat
from datetime import datetime
from codecs import open
from re import compile
from json import dumps
from datetime import datetime
from random import sample
from collections import Counter
from itertools import combinations
df_podium = read_excel("ruwe data/podium.xlsx", sheetname='theaterdans1014')
df_bk = read_excel("ruwe data/beeldendekunsten.xlsx", sheetname='UitRapport')
df_muziek = read_excel("ruwe data/muziek.xlsx", sheetname='Int nat reg')
df_podium["Organisator"] = df_podium["Typologie organisator"]
df_podium = df_podium[df_podium["Datum"].between(datetime(2014, 1, 1), datetime(2014, 12, 31))]
datumregex = compile(r"\d\d/\d\d/\d\d")
df_muziek_expanded = df_muziek.copy()
for row in df_muziek_expanded.iterrows():
speelmomenten = row[1]["Speelmomenten"]
if str(speelmomenten) != "nan":
for speelmoment in datumregex.findall(speelmomenten):
speelmoment_dt = datetime(int("20" + speelmoment.split("/")[2]),
int(speelmoment.split("/")[1]),
int(speelmoment.split("/")[0]))
if speelmoment_dt != row[1]["Datum"]:
df_muziek_expanded = df_muziek_expanded.append(
Series(
{"Discipline": row[1]["Discipline"],
"Subdiscipline": row[1]["Subdiscipline"],
"Tekst": row[1]["Tekst"],
"Datum": speelmoment_dt,
"Gemeente": row[1]["Gemeente"]
}, name=speelmoment_dt.isoformat() + " " + str(row[0])
)
)
df_muziek_expanded = df_muziek_expanded[df_muziek_expanded["Datum"].between(datetime(2014, 1, 1), datetime(2014, 12, 31))]
subdisciplines = ["Beeldhouwkunst",
"Fotografie",
"Grafiek",
"Installatiekunst",
"Kunst en kunsteducatie",
"Meerdere kunstvormen",
"Schilderkunst"]
df_bk_filtered = df_bk[df_bk["Datum tot"] != datetime(1900, 1, 1)]
df_bk_filtered = df_bk_filtered[df_bk_filtered["Subdiscipline"].isin(subdisciplines)]
df_bk_filtered = df_bk_filtered[
(df_bk_filtered["Datum van"].between(datetime(2014, 1, 1), datetime(2014, 12, 31))) |
(df_bk_filtered["Datum tot"].between(datetime(2014, 1, 1), datetime(2014, 12, 31)))
]
df_bk_filtered["Datum"] = df_bk_filtered["Datum van"]
df_bk_filtered = df_bk_filtered.drop(["Datum van", "Datum tot"], axis=1)
typering = read_excel("extra gegevens/typologie-organisatoren-plat.xlsx")
def simplify_key(k):
return str(str(k).lower().strip().replace(" ", "").encode("ascii", "replace")).replace("?", "").replace("_", "").replace('"', '').lstrip("b'").rstrip("'").replace('"', '').replace("'", "").replace(".", "").replace(",", "")
def map_organisator_naar_typologie_plat(item, mapping):
key = simplify_key(item)
try:
return mapping[mapping["key"] == key]["value"].values[0]
except IndexError:
onmapbaar.add(key)
onmapbaar = set()
df_bk_filtered["typering"] = df_bk_filtered["Organisator"].apply(map_organisator_naar_typologie_plat, args=(typering,))
df_muziek_expanded["typering"] = df_muziek_expanded["Organisator"].apply(map_organisator_naar_typologie_plat, args=(typering,))
df_podium["typering"] = df_podium["Organisator"].apply(map_organisator_naar_typologie_plat, args=(typering,))
df_muziek_expanded_clean = df_muziek_expanded.drop("Speelmomenten", axis=1)
df_podium_clean = df_podium.drop("Typologie organisator", axis=1)
df = concat([df_podium_clean, df_bk_filtered, df_muziek_expanded_clean])
df.drop_duplicates(subset=["Datum", "Titel", "Discipline", "Subdiscipline", "Gemeente", "Tekst"], inplace=True)
df.drop(["Titel", "Organisator"], axis=1, inplace=True)
df.head()
df["Discipline"].value_counts()
print("podium", len(df[(df["Tekst"].str.strip() == "") & (df["Discipline"] == "podium")]["Tekst"]),
"concert", len(df[(df["Tekst"].str.strip() == "") & (df["Discipline"] == "Concert")]["Tekst"]),
"beeldend", len(df[(df["Tekst"].str.strip() == "") & (df["Discipline"] == "Tentoonstelling")]["Tekst"]))
def encode(item):
return item.replace("\x08", "")
df["Tekst"] = df["Tekst"].apply(encode)
df.to_excel("samengevoegde data/df.xlsx")
df = read_excel("samengevoegde data/df.xlsx")
df.groupby(["typering", "Discipline"]).size()
typering = read_csv("extra gegevens/mapping_udb-gemeente_fusie-gemeente.csv", delimiter=';')
coord = read_csv("extra gegevens/coordinaten.csv", delimiter=';')
landen = read_excel("extra gegevens/landen.xlsx", sheetname="uitgebreide lijst 2014")
landen.head()
typering.tail()
coord.head()
niet_vlaams = ["Jodoigne", "Tournai", "Escanaffles", "Houffalize", "Haulchin",
"Braine l'Alleud", "Tourinnes-la-Grosse", "Liège", "Marchienne-au-Pont",
"Eupen", "Lessines", "Charleroi"]
count = 1
kwic = []
aantal_treffers = 0
for row in df.iterrows():
if count % 5000 == 0:
print(count, "of", len(df.index))
count += 1
tekst = row[1]["Tekst"]
gemeente = row[1]["Gemeente"]
organisatie = row[1]["typering"]
if str(gemeente) != "nan" and str(gemeente) not in niet_vlaams:
for land in set(landen["Land"].values):
regex = compile(r"\b(" + r"|".join(landen[landen["Land"] == land]["Mention"]) + r")\b")
matches = regex.finditer(str(tekst))
for match in matches:
aantal_treffers += 1
typeringlijn = typering[typering["Gemeente Origineel"] == gemeente]
fusiegemeente = typeringlijn["Fusiegemeente"].values[0]
provincie = typeringlijn["Province (English)"].values[0]
continent = landen[landen["Land"] == land]["Continent (staatkundig)"].values[0]
discipline = row[1]["Discipline"]
subdiscipline = row[1]["Subdiscipline"]
uid = count
kwic_lijn = [uid,
tekst[:match.start()],
tekst[match.start():match.end()],
tekst[match.end():],
gemeente,
land,
discipline,
subdiscipline]
kwic.append(kwic_lijn)
DataFrame(kwic, columns=["uid", "left context", "keyword", "right context", "gemeente", "land", "discipline", "subdiscipline"]).to_excel("kwic.xlsx")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and check data
Step2: ## Analysis
Step3: Does improved weight pruning outperforms regular SET
Step4: No significant difference between the two approaches
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import tabulate
import pprint
import click
import numpy as np
import pandas as pd
from ray.tune.commands import *
from nupic.research.frameworks.dynamic_sparse.common.browser import *
exps = ['improved_magpruning_eval1', ]
paths = [os.path.expanduser("~/nta/results/{}".format(e)) for e in exps]
df = load_many(paths)
df.head(5)
# replace hebbian prine
df['hebbian_prune_perc'] = df['hebbian_prune_perc'].replace(np.nan, 0.0, regex=True)
df['weight_prune_perc'] = df['weight_prune_perc'].replace(np.nan, 0.0, regex=True)
df.columns
df.shape
df.iloc[1]
df.groupby('model')['model'].count()
# Did any trials failed?
df[df["epochs"]<30]["epochs"].count()
# Removing failed or incomplete trials
df_origin = df.copy()
df = df_origin[df_origin["epochs"]>=30]
df.shape
# which ones failed?
# failed, or still ongoing?
df_origin['failed'] = df_origin["epochs"]<30
df_origin[df_origin['failed']]['epochs']
# helper functions
def mean_and_std(s):
return "{:.3f} ± {:.3f}".format(s.mean(), s.std())
def round_mean(s):
return "{:.0f}".format(round(s.mean()))
stats = ['min', 'max', 'mean', 'std']
def agg(columns, filter=None, round=3):
if filter is None:
return (df.groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
else:
return (df[filter].groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
agg(['model'])
agg(['on_perc', 'model'])
agg(['weight_prune_perc', 'model'])
agg(['on_perc', 'pruning_early_stop', 'model'])
agg(['on_perc', 'pruning_early_stop', 'model'])
agg(['pruning_early_stop'])
agg(['model', 'pruning_early_stop'])
agg(['on_perc', 'pruning_early_stop'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: classes buyers and sellers
Step2: Construct the market
Step3: Observer
Step4: Example Market
Step5: run the model
Step6: Operations Research Formulation
Step7: Time of last run
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import random as rnd
import pandas as pd
import numpy as np
import time
import datetime
import calendar
# fix what is missing with the datetime/time/calendar package
def add_months(sourcedate,months):
month = sourcedate.month - 1 + months
year = int(sourcedate.year + month / 12 )
month = month % 12 + 1
day = min(sourcedate.day,calendar.monthrange(year, month)[1])
return datetime.date(year,month,day)
# measure how long it takes to run the script
startit = time.time()
dtstartit = datetime.datetime.now()
class Seller():
def __init__(self, name):
self.name = name
self.wta = []
self.step = 0
self.prod = 2000
self.lb_price = 10
self.ub_price = 20
self.reserve = 500000
#multiple market idea, also ga away from market
self.subscr_market = {}
# the supplier has n quantities that they can sell
# they may be willing to sell this quantity anywhere from a lower price of l
# to a higher price of u
def set_quantity(self):
n = self.prod
l = self.lb_price
u = self.ub_price
wta = []
for i in range(n):
p = rnd.uniform(l, u)
wta.append(p)
self.wta = wta
def get_name(self):
return self.name
def get_asks(self):
return self.wta
def clear_wta(self):
self.wta = []
def extract(self, cur_extraction):
if self.reserve > 0:
self.reserve = self.reserve - cur_extraction
else:
self.prod = 0
class Buyer():
def __init__(self, name):
self.name = name
self.wtp = []
self.step = 0
self.base_demand = 0
self.max_demand = 0
self.lb_price = 10
self.ub_price = 20
# the supplier has n quantities that they can buy
# they may be willing to sell this quantity anywhere from a lower price of l
# to a higher price of u
def set_quantity(self):
n = int(self.consumption(self.step))
l = self.lb_price
u = self.ub_price
wtp = []
for i in range(n):
p = rnd.uniform(l, u)
wtp.append(p)
self.wtp = wtp
# gets a little to obvious
def get_name(self):
return self.name
# return list of willingness to pay
def get_bids(self):
return self.wtp
# is this neccesary?
def clear_wtp(self):
self.wtp = []
def consumption(self, x):
# make it initialise to seller
b = self.base_demand
m = self.max_demand
y = b + m * (.5 * (1 + np.cos((x/6)*np.pi)))
return(y)
# the book is an object of the market used for the clearing procedure
class Book():
def __init__(self):
self.ledger = pd.DataFrame(columns = ("role","name","price","cleared"))
def set_asks(self,seller_list):
# ask each seller their name
# ask each seller their willingness
# for each willingness append the data frame
for seller in seller_list:
seller_name = seller.get_name()
seller_price = seller.get_asks()
for price in seller_price:
self.ledger=self.ledger.append({"role":"seller","name":seller_name,"price":price,"cleared":"in process"},
ignore_index=True)
def set_bids(self,buyer_list):
# ask each seller their name
# ask each seller their willingness
# for each willingness append the data frame
for buyer in buyer_list:
buyer_name = buyer.get_name()
buyer_price = buyer.get_bids()
for price in buyer_price:
self.ledger=self.ledger.append({"role":"buyer","name":buyer_name,"price":price,"cleared":"in process"},
ignore_index=True)
def update_ledger(self,ledger):
self.ledger = ledger
def get_ledger(self):
return self.ledger
def clean_ledger(self):
self.ledger = pd.DataFrame(columns = ("role","name","price","cleared"))
class Market():
def __init__(self):
self.count = 0
self.last_price = ''
self.book = Book()
self.b = []
self.s = []
self.buyer_list = []
self.seller_list = []
self.buyer_dict = {}
self.seller_dict = {}
self.ledger = ''
def update_seller(self):
for i in self.seller_dict:
self.seller_dict[i].step += 1
self.seller_dict[i].set_quantity()
def update_buyer(self):
for i in self.buyer_dict:
self.buyer_dict[i].step += 1
self.buyer_dict[i].set_quantity()
def add_buyer(self,buyer):
self.b.append(buyer)
self.buyer_list.append(buyer)
def add_seller(self,seller):
self.s.append(seller)
self.seller_list.append(seller)
def set_book(self):
self.book.set_bids(self.buyer_list)
self.book.set_asks(self.seller_list)
def get_ledger(self):
self.ledger = self.book.get_ledger()
return self.ledger
def get_bids(self):
# this is a data frame
ledger = self.book.get_ledger()
rows= ledger.loc[ledger['role'] == 'buyer']
# this is a series
prices=rows['price']
# this is a list
bids = prices.tolist()
return bids
def get_asks(self):
# this is a data frame
ledger = self.book.get_ledger()
rows = ledger.loc[ledger['role'] == 'seller']
# this is a series
prices=rows['price']
# this is a list
asks = prices.tolist()
return asks
# return the price at which the market clears
# this fails because there are more buyers then sellers
def get_clearing_price(self):
# buyer makes a bid starting with the buyer which wants it most
b = self.get_bids()
s = self.get_asks()
# highest to lowest
self.b=sorted(b, reverse=True)
# lowest to highest
self.s=sorted(s, reverse=False)
# find out whether there are more buyers or sellers
# then drop the excess buyers or sellers; they won't compete
n = len(b)
m = len(s)
# there are more sellers than buyers
# drop off the highest priced sellers
if (m > n):
s = s[0:n]
matcher = n
# There are more buyers than sellers
# drop off the lowest bidding buyers
else:
b = b[0:m]
matcher = m
# It's possible that not all items sold actually clear the market here
count = 0
for i in range(matcher):
if (self.b[i] > self.s[i]):
count +=1
self.last_price = self.b[i]
# copy count to market
self.count = count
return self.last_price
# TODO: Annotate the ledger
def annotate_ledger(self,clearing_price):
ledger = self.book.get_ledger()
for index, row in ledger.iterrows():
if (row['role'] == 'seller'):
if (row['price'] < clearing_price):
ledger.loc[index,'cleared'] = 'True'
else:
ledger.loc[index,'cleared'] = 'False'
else:
if (row['price'] > clearing_price):
ledger.loc[index,'cleared'] = 'True'
else:
ledger.loc[index,'cleared'] = 'False'
self.book.update_ledger(ledger)
def get_units_cleared(self):
return self.count
def clean_ledger(self):
self.ledger = ''
self.book.clean_ledger()
def run_it(self):
self.pre_clearing_operation()
self.clearing_operation()
self.after_clearing_operation()
#pre clearing empty out the last run and start
# clean ledger is kind of sloppy, rewrite functions to overide the ledger
def pre_clearing_operation(self):
self.clean_ledger()
self.update_buyer()
self.update_seller()
def clearing_operation(self):
self.set_book()
clearing_price = self.get_clearing_price()
self.annotate_ledger(clearing_price)
def after_clearing_operation(self):
for i in self.seller_dict:
name = self.seller_dict[i].name
cur_extract = len(self.book.ledger[(self.book.ledger.cleared == 'True') &
(self.book.ledger.name == name)])
self.seller_dict[i].extract(cur_extract)
class Observer():
def __init__(self, x, y, z):
self.init_buyer = x
self.init_seller = y
self.maxrun = z
self.hist_book = []
self.buyer_dict = {}
self.seller_dict = {}
self.timetick = 0
self.gas_market = ''
self.reserve = []
def set_buyer(self, buyer_info):
for name in buyerinfo:
self.buyer_dict[name] = Buyer('%s' % name)
self.buyer_dict[name].base_demand = buyer_info[name]['b']
self.buyer_dict[name].max_demand = buyer_info[name]['m']
def set_seller(self, seller_info):
for name in seller_info:
self.seller_dict[name] = Seller('%s' % name)
self.seller_dict[name].prod = seller_info[name][0]
def get_reserve(self):
reserve = []
for name in self.seller_dict:
reserve.append(self.seller_dict[name].reserve)
return reserve
def set_market(self):
self.gas_market = Market()
#add suplliers and buyers to this market
for supplier in self.seller_dict.values():
self.gas_market.add_seller(supplier)
for buyer in self.buyer_dict.values():
self.gas_market.add_buyer(buyer)
self.gas_market.seller_dict = self.seller_dict
self.gas_market.buyer_dict = self.buyer_dict
def run_it(self):
# Timing
# time initialising
startit_init = time.time()
#initialise, setting up all the agents
first_run = True
if first_run:
self.set_buyer(self.init_buyer)
self.set_seller(self.init_seller)
self.set_market()
first_run=False
# time init stop
stopit_init = time.time() - startit_init
print('%s : init' % stopit_init)
for period in range(self.maxrun):
# time the period
startit_period = time.time()
self.timetick += 1
print('#######################################')
period_now = add_months(period_null, self.timetick-1)
print(period_now.strftime('%Y-%b'))
# real action on the market
self.gas_market.run_it()
# data collection
p_clearing = self.gas_market.last_price
q_sold = self.gas_market.count
self.reserve.append([period_now.strftime('%Y-%b'),*self.get_reserve()])
# recording the step_info
# since this operation can take quite a while, print after every operation
period_time = time.time() - startit_period
print('%s : period time' % period_time)
self.hist_book.append([period_now.strftime('%Y-%b'), p_clearing, q_sold])
# Show some real consumption data, for more data see folder data analytics
#read montly consumption data of 2010 into a dataframe
df = pd.read_csv('2010cbstestrun.csv', header=0, index_col=0)
df = df.transpose()
#plot the 2010 monthly consumption data
df.plot();
df
# make initialization dictionary
init_buyer = {'elec':{'b':400, 'm' : 673}, 'indu':{'b':400, 'm':1171}, 'home':{'b': 603, 'm': 3615}}
init_seller = {'netherlands' : (2000, 0, 10), 'Russia' : (2000, 0, 10)}
# make a history book to record every timestep
hist_book = []
# set the starting time
period_null= datetime.date(2010,1,1)
# create observer and run the model
# first data about buyers then sellers and then model ticks
years = 1
timestep = 12
obser1 = Observer(init_buyer, init_seller, years*timestep)
obser1.run_it()
#get the info from the observer
hist_book = obser1.hist_book
# recording the total run
def write_to_csv(hist_book):
f = open('hist_book.csv', 'a')
for item in hist_book:
f.write('%s,%s\n' % (item[0], item[1]))
f.close()
#write_to_csv(hist_book)
# make a dataframe of clearing prices
df_hb = pd.DataFrame(hist_book)
df_hb = df_hb.set_index(0)
df_hb.index.name = 'month'
df_hb.rename(columns={1: 'price', 2: 'quantity'}, inplace=True)
# timeit
stopit = time.time()
dtstopit = datetime.datetime.now()
print('it took us %s seconds to get to this conclusion' % (stopit-startit))
print('in another notation (h:m:s) %s'% (dtstopit - dtstartit))
# print the run results
price = df_hb['price']
fig = price.plot()
plt.ylabel('€ / unit')
plt.show()
quantity = df_hb['quantity']
fig = quantity.plot()
plt.ylabel('quantity')
plt.show()
# print the time of last run
print('last run of this notebook:')
time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime())
#df_hb
df_res = pd.DataFrame(obser1.reserve, columns=['time', *[i for i in init_seller]])
df_res = df_res.set_index('time')
df_res.plot();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from keras.models import Sequential
from keras.layers import Dense
from sklearn.datasets import make_blobs
from sklearn.preprocessing import MinMaxScaler
from numpy import array
X, y = make_blobs(n_samples=100, centers=2, n_features=2, random_state=1)
scalar = MinMaxScaler()
scalar.fit(X)
X = scalar.transform(X)
model = Sequential()
model.add(Dense(4, input_shape=(2,), activation='relu'))
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam')
model.fit(X, y, epochs=500, verbose=0)
Xnew = array([[0.89337759, 0.65864154]])
ynew = model.predict_classes(Xnew)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def getSum(n , d ) :
sum = 0 ;
for i in range(n + 1 ) :
if(i % 10 == d ) :
sum += i
return sum
if __name__== "__main __":
n , d = 30 , 3
print(getSum(n , d ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Test N2 case
Step2: Reference results from GetCro
Step3: Test LF calculations (CG version)
Step4: With sym summation
|
<ASSISTANT_TASK:>
Python Code:
# Imports
import numpy as np
import pandas as pd
import xarray as xr
# Special functions
# from scipy.special import sph_harm
import spherical_functions as sf
import quaternion
# Performance & benchmarking libraries
# from joblib import Memory
# import xyzpy as xyz
import numba as nb
# Timings with ttictoc or time
# https://github.com/hector-sab/ttictoc
# from ttictoc import TicToc
import time
# Package fns.
# For module testing, include path to module here
import sys
import os
if sys.platform == "win32":
modPath = r'D:\code\github\ePSproc' # Win test machine
else:
modPath = r'/home/femtolab/github/ePSproc/' # Linux test machine
sys.path.append(modPath)
import epsproc as ep
# TODO: tidy this up!
from epsproc.util import matEleSelector
from epsproc.geomFunc import geomCalc, geomUtils
from epsproc.geomFunc.lfblmGeom import lfblmXprod
# Load data from modPath\data
dataPath = os.path.join(modPath, 'data', 'photoionization')
dataFile = os.path.join(dataPath, 'n2_3sg_0.1-50.1eV_A2.inp.out') # Set for sample N2 data for testing
# Scan data file
dataSet = ep.readMatEle(fileIn = dataFile)
dataXS = ep.readMatEle(fileIn = dataFile, recordType = 'CrossSection') # XS info currently not set in NO2 sample file.
# Plot cross sections using Xarray functionality
dataXS[0].sel({'Type':'L', 'XC':'SIGMA'}).plot.line(x='Eke');
# Plot B2
dataXS[0].sel({'Type':'L', 'XC':'BETA'}).plot.line(x='Eke');
# Set parameters
SFflag = False # Multiply matrix elements by SF?
symSum = False # Sum over symmetries?
phaseConvention = 'S'
thres = 1e-2
selDims = {'it':1, 'Type':'L'}
thresDims = 'Eke'
# Set terms for testing - NOTE ORDERING HERE may affect CG term!!!
dlistMatE = ['lp', 'l', 'L', 'mp', 'm', 'M'] # Match published terms
dlistP = ['p1', 'p2', 'L', 'mup', 'mu', 'M']
# dlistMatE = ['l', 'lp', 'L', 'm', 'mp', 'M'] # Standard terms
# dlistP = ['p1', 'p2', 'L', 'mu', 'mup', 'M']
# Set matrix elements
matE = dataSet[0].copy()
# Calculate betas
BetaNormXS, BetaNorm, BetaRaw, XSmatE = lfblmXprod(matE, symSum = symSum, SFflag = SFflag,
thres = thres, thresDims = thresDims, selDims = selDims,
phaseConvention = phaseConvention,
dlistMatE = dlistMatE, dlistP = dlistP)
# Here BetaNormXS includes the correct normalisation term as per the original formalism, and XSmatE is the sum of the squared matrix elements, as used for the normalisation (== cross section without correct scaling).
plotThres = None
ep.util.matEleSelector(XSmatE, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke', col='Sym');
ep.util.matEleSelector(BetaNormXS, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke', col='Sym');
# Summing over M gives the final LF terms, as defined above.
# The B0 term (==cross section) is not correctly scaled here.
# The B2 term matches the GetCro reference results.
ep.util.matEleSelector(BetaNormXS.unstack('LM').sum('M'), thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke', col='Sym');
# Set parameters
SFflag = False # Multiply matrix elements by SF?
symSum = True # Sum over symmetries?
phaseConvention = 'S'
thres = 1e-2
selDims = {'it':1, 'Type':'L'}
thresDims = 'Eke'
# Set terms for testing - NOTE ORDERING HERE may affect CG term!!!
dlistMatE = ['lp', 'l', 'L', 'mp', 'm', 'M'] # Match published terms
dlistP = ['p1', 'p2', 'L', 'mup', 'mu', 'M']
# dlistMatE = ['l', 'lp', 'L', 'm', 'mp', 'M'] # Standard terms
# dlistP = ['p1', 'p2', 'L', 'mu', 'mup', 'M']
# Set matrix elements
matE = dataSet[0].copy()
# Calculate betas
BetaNormXS, BetaNorm, BetaRaw, XSmatE = lfblmXprod(matE, symSum = symSum, SFflag = SFflag,
thres = thres, thresDims = thresDims, selDims = selDims,
phaseConvention = phaseConvention,
dlistMatE = dlistMatE, dlistP = dlistP)
# Here BetaNormXS includes the correct normalisation term as per the original formalism, and XSmatE is the sum of the squared matrix elements, as used for the normalisation (== cross section without correct scaling).
plotThres = None
ep.util.matEleSelector(XSmatE, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke');
ep.util.matEleSelector(BetaNormXS, thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke');
# Summing over M gives the final LF terms, as defined above.
# The B0 term (==cross section) is not correctly scaled here.
# The B2 term matches the GetCro reference results.
ep.util.matEleSelector(BetaNormXS.unstack('LM').sum('M'), thres = plotThres, dims='Eke', sq=True, drop=True).real.plot.line(x='Eke');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: Setup KFP Host
Step3: Setup Google Cloud Project
Step4: Setup Google Cloud Credentials
Step5: Upload your service account file (colab specific code block)
Step6: Setup your local runtime enviorment
Step7: Enable the below Google Cloud Services for the solution
Step9: Adjust other varables
Step12: Reset the local context for local development if needed
Step13: Instantiate the KFP Client
Step15: Create Google Cloud Storage bucket and folder - function
Step16: Test locally create_gcs_bucket_folder
Step18: Create BigQuery dataset - function
Step19: Test locally create_bq_ds
Step22: Load the data to BigQuery - function
Step23: Test locally load_bq_ds
Step27: Create the BigQuery ML model - function
Step28: Test locally create_bq_ml
Step33: Evaluate the BigQuery ML model - function
Step34: Test locally evaluate_ml_model
Step37: Prepare dataset for batch prediction with BigQuery ML - function
Step38: Test locally create batch prediction dataset
Step41: Make batch prediction - function
Step42: Test locally batch prediction
Step44: Determine the new revision of the model - function
Step45: Test locally get_bqml_model_version
Step49: Export the BigQuery ML model to the Google Cloud Storage bucket - function
Step50: Test locally export_bqml_model_to_gcs
Step53: Deploy the ML model - function
Step54: Test locally deploy_ml_model_online_pred
Step58: Make online prediction - function
Step59: Test locally predict_online_ml_model
Step62: Define the KubeFlow Pipeline (KFP)
Step64: Compile, watch out for errors in the pipeline composition
Step66: Create an experiment and run the pipeline immediately
Step67: If there is error in the pipeline, you will see that in the KubeFlow Pipelines UI in the Experiments section. If you encounter any errors, identify the issue, fix it in the Python function, unit test the function, update the pipeline defintion, compile, create an experiment, and run the experiment. Iterate through the process until you successfully run a pipeline.
Step69: The utilities method in the section below provides convinient way to delete the Google Cloud resources. You can use the methods while developing your pipeline components.
Step71: Delete Google Cloud Storage folder
Step73: Delete Google Cloud Storage bucket
Step75: Delete the table in BigQuery
Step77: Delete the dataset in BigQuery
Step79: Delete the Google Cloud Storage folders which contains exported model artifacts
Step81: Delete the Cloud AI Platform Prediction models
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install google-cloud-storage #for Storage Account
!pip install google-cloud #for cloud sdk
!pip install google-cloud-bigquery #for BigQuery
!pip install google-cloud-bigquery-storage #for BigQuery Storage client
!pip install kfp # Install the KFP AI SDK
RPM_GCP_KFP_HOST = "<Your KFP pipeline host>"
# set the Google Cloud project id
RPM_GCP_PROJECT = "<Your Google Cloud project>" #for local !bash
# download the ServiceAccount key and provide the path to the file below
RPM_GCP_APPLICATION_CREDENTIALS = "<Full path with the file name to the above downloaded json file>"
# uncomment the below code in codelab environment
# from google.colab import files
# # Upload service account key
# keyfile_upload = files.upload()
# RPM_GCP_APPLICATION_CREDENTIALS = list(keyfile_upload.keys())[0]
!export RPM_GCP_PROJECT
!echo $RPM_GCP_PROJECT
# et the desired Google Cloud project
!gcloud config set project $RPM_GCP_PROJECT
# alidate that the Google Cloud project has been set properly.
!gcloud info --format='value(config.project)'
import os
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = RPM_GCP_APPLICATION_CREDENTIALS
# set the account
!gcloud auth activate-service-account --key-file=$RPM_GCP_APPLICATION_CREDENTIALS
# set the proper Permission for the required Google Cloud Services
!gcloud services enable \
storage-component.googleapis.com \
bigquery.googleapis.com \
ml.googleapis.com \
notebooks.googleapis.com
# validate that all desired Permission have been set properl.
!gcloud services list | grep 'storage-component.googleapis.com\|bigquery.googleapis.com\|ml.googleapis.com\|notebooks.googleapis.com'
# load_params
import json
def load_params():
The variables are used in the pipeline.
Provide appropriate variables for your environments
Set apppriate variables with the pattern RPM_*
(these are IMMUTABLE variables those acts as default)
You could print all the variables used in your environment
(e.g. local environment) which starts with RPM_* or rpm_*
(comes handy while troubleshooting)
Returns:
dict: all python variables used in the pipeline
return {
'RPM_GCP_PROJECT': RPM_GCP_PROJECT,
'RPM_LOCATION': 'us-central1-b', # KFP/K8s cluster zone
'RPM_PVC_DATASET_FOLDER_NAME': 'rpm_ds',
'RPM_PVC_NAME': 'rpm-vol',
# create the bucket if it don't exists
'RPM_GCP_STORAGE_BUCKET': '',
# create the folder if it don't exists
'RPM_GCP_STORAGE_BUCKET_FOLDER': '',
'RPM_DEFAULT_BUCKET_EXT': '_retail_propensity_model_assets',
'RPM_DEFAULT_BUCKET_FOLDER_NAME': 'rpm_data_set',
'RPM_BQ_DATASET_NAME': '', # create the dataset if it don't exists
'RPM_BQ_TABLE_NAME': '',
'RPM_DEFAULT_BQ_TABLE_NAME_EXT': '_tbl',
'RPM_DEFAULT_DATA_SET_NAME_EXT': '_rpm_data_set',
'RPM_MODEL_NAME': '',
'RPM_DEFAULT_MODEL_NAME': 'rpm_bqml_model',
'RPM_MODEL_EXPORT_PATH': '',
'RPM_DEFAULT_MODEL_EXPORT_PATH': 'bqml/model/export/',
'RPM_MODEL_VER_PREFIX': 'V_',
'RPM_RUNTIME_VERSION': '1.15', # do not change
'RPM_PYTHON_VERSION': '3.7', # do not change
'RPM_CLUSTER_NAME': 'cluster-1', # KFP/K8s cluster name
# variables created by the program
# from user supplied set or from the program defaults
'rpm_bq_ds_name': '',
'rpm_gcs_rpm_ds_url': '',
'rpm_file_name': '',
'rpm_table_id': '',
'rpm_bqml_model': '',
'rpm_bqml_model_export_path': '',
'rpm_model_version': '',
'rpm_model_uri': '',
'rpm_pred_table_id': '',
}
all_vars = load_params()
RPM_DS_DOWNLOAD_EXPERIMENT_NAME = 'GoogleStore Retail Pipeline'
# reset_local_context
def reset_local_context():
Resets all the variables used in the local environment.
Comes handy while deveoping and testing the code locally.
try:
del globals()['local_context']
except KeyError as e:
print('local_context not found!!!')
print(e)
globals().get('local_context') # validate that the local variable is removed
# reset_local_context() # use before testing a component locally if needed
# get_local_context
def get_local_context(init_var=None):
Define local rpm_context object
The funtion sets the appropriate variables to
execute the code in a local environment.
local_context contains all the variables used in the local envrionmnet.
You could check the variable before and after the call to find out the
desired result (comes handy while developing
and testing the code locally)
Args:
init_var (:obj:`dict`, optional): The dict object overrides the existing
local context (use sparingly only, when needed)
Returns:
dict: all python variables used in the pipeline
global local_context
local_context = globals().get('local_context')
if not local_context:
local_context = load_params()
if init_var:
local_context = init_var
local_context["RPM_PVC_NAME"] = os.environ["HOME"]
if not local_context.get("rpm_bq_ds_name"):
local_context["rpm_bq_ds_name"] = f"{all_vars['RPM_GCP_PROJECT'].replace('-','_')}_rpm_data_set"
if not local_context.get("rpm_gcs_rpm_ds_url"):
local_context["rpm_gcs_rpm_ds_url"] = f"gs://{all_vars['RPM_GCP_PROJECT']}_retail_propensity_model_assets/rpm_data_set/"
if not local_context.get("rpm_table_id"):
local_context["rpm_table_id"] = f"{all_vars['RPM_GCP_PROJECT']}.{all_vars['RPM_GCP_PROJECT'].replace('-','_')}_rpm_data_set.{all_vars['RPM_GCP_PROJECT'].replace('-','_')}_rpm_data_set_tbl"
if not local_context.get("RPM_MODEL_NAME"):
local_context["rpm_bqml_model"] = "rpm_bqml_model"
if not local_context.get("rpm_bqml_model_export_path"):
local_context["rpm_bqml_model_export_path"] = "bqml/model/export/V_1/"
if not local_context.get("rpm_model_version"):
local_context["rpm_model_version"] = "V_1"
if not local_context["rpm_model_uri"]:
local_context["rpm_model_uri"] = f"gs://{all_vars['RPM_GCP_PROJECT']}_retail_propensity_model_assets/rpm_data_set/bqml/model/export/V_1/"
if not local_context["rpm_pred_table_id"]:
local_context["rpm_pred_table_id"] = f"{all_vars['RPM_GCP_PROJECT']}.{all_vars['RPM_GCP_PROJECT'].replace('-','_')}_rpm_data_set.{all_vars['RPM_GCP_PROJECT'].replace('-','_')}_rpm_data_set_pred_tbl"
print (local_context)
return local_context
def test_comp_local(func):
local_context = get_local_context()
import json
new_local_context_str = func(json.dumps(local_context))
print(f'type: {type(new_local_context_str)}; new_local_context_str:{new_local_context_str}')
local_context = json.loads(new_local_context_str)
def update_local_context(output):
print(f'type: {type(output)}; new_local_context_str:{output}')
local_context = json.loads(output[0])
# Create a KFP Client and Validate that you are able to access the KFP Pipelines
# You will be using the KFP HOST to deploy the KFP pipeline (experiment) and lauch the experiment
import kfp
kfp_client = kfp.Client(host=RPM_GCP_KFP_HOST)
kfp_client.LOCAL_KFP_CONTEXT
# create_gcs_bucket_folder
from typing import NamedTuple
def create_gcs_bucket_folder(ctx: str,
RPM_GCP_STORAGE_BUCKET: str,
RPM_GCP_PROJECT: str,
RPM_DEFAULT_BUCKET_EXT: str,
RPM_GCP_STORAGE_BUCKET_FOLDER: str,
RPM_DEFAULT_BUCKET_FOLDER_NAME: str
) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_gcs_rpm_ds_url', str),
]):
The function (also used as a base for a KFP Component) creates a
Google Cloud Storage bucket and a folder if they don't exist.
The idea is to create the bucket and folder only on the first
run of the pipeline.
The pipeline uses the same storage account and the folder
for repeated runs.
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_STORAGE_BUCKET(:obj:`str`): User supplied Storage Bucket name
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
RPM_DEFAULT_BUCKET_EXT:(:obj:`str`): Name of the bucket,
when user hasn't supplied a bucket name
RPM_GCP_STORAGE_BUCKET_FOLDER:(:obj:`str`): User supplied folder name
RPM_DEFAULT_BUCKET_FOLDER_NAME:(:obj:`str`): Name for creating a
bucket, when User hasn't supplied a folder name
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_gcs_rpm_ds_url(:obj:`str`): Full Google Cloud Storage path with
bucket name and folder name
# loading rpm_context string
import json
rpm_context = json.loads(ctx)
print(rpm_context)
RPM_GCP_STORAGE_BUCKET = rpm_context['RPM_GCP_STORAGE_BUCKET']
RPM_GCP_PROJECT = rpm_context['RPM_GCP_PROJECT']
RPM_DEFAULT_BUCKET_EXT = rpm_context['RPM_DEFAULT_BUCKET_EXT']
RPM_GCP_STORAGE_BUCKET_FOLDER = rpm_context['RPM_GCP_STORAGE_BUCKET_FOLDER']
RPM_DEFAULT_BUCKET_FOLDER_NAME = rpm_context['RPM_DEFAULT_BUCKET_FOLDER_NAME']
if RPM_GCP_STORAGE_BUCKET:
gcs_storage_bucket_name = RPM_GCP_STORAGE_BUCKET
else:
gcs_storage_bucket_name = RPM_GCP_PROJECT + RPM_DEFAULT_BUCKET_EXT
if RPM_GCP_STORAGE_BUCKET_FOLDER:
gcs_folder_name = RPM_GCP_STORAGE_BUCKET_FOLDER + '/'
else:
gcs_folder_name = RPM_DEFAULT_BUCKET_FOLDER_NAME + '/'
print(f"{gcs_storage_bucket_name} bucket and {gcs_folder_name} will be used in the project.")
rpm_gcs_rpm_ds_url = f"gs://{gcs_storage_bucket_name}/{gcs_folder_name}"
print(rpm_gcs_rpm_ds_url)
rpm_context['rpm_gcs_rpm_ds_url'] = rpm_gcs_rpm_ds_url
# defining the install function
import subprocess
def install(name):
subprocess.call(['pip', 'install', name])
pacakages_to_install = ['google-cloud', 'google-cloud-storage']
for each_package in pacakages_to_install:
install(each_package)
print(f"'{each_package}' package installed :)")
cmd = f"gcloud config set project {rpm_context['RPM_GCP_PROJECT']}"
print(cmd)
process = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
from google.cloud import storage
from google.cloud.exceptions import NotFound
from google.cloud.exceptions import Forbidden
import traceback
def check_storage_bucket_and_folder(bucket_name, folder_name):
if not bucket_name or not folder_name:
return(False, False)
client = storage.Client()
try: # check if the bucket exists and that we have the proper permission
bucket = client.get_bucket(bucket_name)
print(f"Bucket: {bucket_name} exists.")
bucket_exists = True
try:
blob = bucket.get_blob(folder_name)
if blob is None:
print(f"Folder name {folder_name} does not exist!")
folder_exists = False
else:
print(f"Folder name {folder_name} exist.")
folder_exists = True
except:
print(f"Folder name {folder_name} doest not exist!")
folder_exists = False
except Forbidden as e:
print(f"Sorry, you don't have access to the bucket: {bucket_name}!")
print(e)
error = traceback.format_exc()
print(error)
bucket_exists = False
folder_exists = False
except NotFound as e:
print(f"Sorry, the bucket: {bucket_name} does not exist!")
print(e)
error = traceback.format_exc()
print(error)
bucket_exists = False
folder_exists = False
return(bucket_exists, folder_exists)
# Create a bucket if it doesn't exists
def create_storage_bucket(bucket_name):
if bucket_name:
client = storage.Client()
try:
bucket = client.create_bucket(bucket_name)
print(f"Bucket {bucket.name} created")
return True
except Exception as e:
print(f"Bucket {bucket_name} couldn't be created")
print(e)
error = traceback.format_exc()
print(error)
return False
else:
print(f"Bucket {bucket_name} couldn't be created. Name is empty.")
return False
# Create the folder in the bucket
def create_storage_folder(bucket_name, folder_name):
if len(bucket_name) == 0 or len(folder_name) == 0:
print(f"Folder {folder_name} couldn't be created. Name is empty.")
return False
else:
client = storage.Client()
try:
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(folder_name)
blob.upload_from_string('')
print(f"Folder {blob.name} created")
return True
except Exception as e:
print(f"Folder {folder_name} couldn't be created")
print(e)
error = traceback.format_exc()
print(error)
return False
result = check_storage_bucket_and_folder(gcs_storage_bucket_name,
gcs_folder_name)
if result[0] == False:
create_storage_bucket(gcs_storage_bucket_name)
if result[1] == False:
create_storage_folder(gcs_storage_bucket_name, gcs_folder_name)
return (
json.dumps(rpm_context),
rpm_context['rpm_gcs_rpm_ds_url']
)
# test locally create_gcs_bucket_folder
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(create_gcs_bucket_folder(
json.dumps(local_context),
local_context['RPM_GCP_STORAGE_BUCKET'],
local_context['RPM_GCP_PROJECT'],
local_context['RPM_DEFAULT_BUCKET_EXT'],
local_context['RPM_GCP_STORAGE_BUCKET_FOLDER'],
local_context['RPM_DEFAULT_BUCKET_FOLDER_NAME']
))
# create_bq_ds
from typing import NamedTuple
def create_bq_ds(ctx: str,
RPM_GCP_PROJECT: str,
RPM_BQ_DATASET_NAME: str,
RPM_LOCATION: str
) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_bq_ds_name', str),
]):
The function(also used as a base for a KFP Component) creates a
BigQuery dataset if don't exist.
The idea is to create DataSet only on the first run of the pipeline.
The pipeline uses the same DataSet for repeated runs.
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
RPM_BQ_DATASET_NAME:(:obj:`str`): Name of the dataset.
RPM_LOCATION:(:obj:`str`): Location of the Google Cloud region
of the BigQuery dataset
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_bq_ds_name(:obj:`str`): The dataset name
used in the rest of the pipeline
import json
rpm_context = json.loads(ctx)
print(rpm_context)
rpm_bq_ds_name = rpm_context['RPM_BQ_DATASET_NAME']
if not rpm_bq_ds_name:
rpm_bq_ds_name = \
f"{rpm_context['RPM_GCP_PROJECT']}{rpm_context['RPM_DEFAULT_DATA_SET_NAME_EXT']}"
rpm_bq_ds_name = rpm_bq_ds_name.replace('-', '_')
rpm_context['rpm_bq_ds_name'] = rpm_bq_ds_name
import subprocess
import traceback
def exec_cmd(cmd):
try:
print(cmd)
process = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
except subprocess.CalledProcessError as e:
error = traceback.format_exc()
print(error)
print(e.output)
exec_cmd(f"gcloud config set project {rpm_context['RPM_GCP_PROJECT']}")
def install(name):
subprocess.call(['pip', 'install', name])
pacakages_to_install = ['google-cloud', 'google-cloud-bigquery']
for each_package in pacakages_to_install:
install(each_package)
print(f"'{each_package}' package installed :)")
from google.cloud import bigquery
from google.cloud.exceptions import NotFound
client = bigquery.Client()
dataset_id = f"{rpm_context['RPM_GCP_PROJECT']}.{rpm_bq_ds_name}"
ds_found = True
try:
client.get_dataset(dataset_id) # Make an API request.
print('Dataset {} already exists'.format(dataset_id))
except NotFound:
print('Dataset {} is not found'.format(dataset_id))
ds_found = False
if ds_found is False:
try:
# Construct a full Dataset object to send to the API.
dataset = bigquery.Dataset(dataset_id)
dataset.location = rpm_context['RPM_LOCATION'].split('-')[0].upper()
dataset = client.create_dataset(dataset) # Make an API request.
print('Created dataset {}.{} in location: {}.'.\
format(client.project, dataset.dataset_id, dataset.location))
except Exception as e:
error = traceback.format_exc()
print(error)
print(e)
raise RuntimeError(f"Can't create the BigQuery DS {dataset_id}")
return (
json.dumps(rpm_context),
rpm_context['rpm_bq_ds_name']
)
# test locally create_bq_ds
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(create_bq_ds(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['RPM_BQ_DATASET_NAME'],
local_context['RPM_LOCATION']))
from typing import NamedTuple
def load_bq_ds(ctx: str,
RPM_GCP_PROJECT: str,
RPM_BQ_TABLE_NAME: str,
RPM_DEFAULT_BQ_TABLE_NAME_EXT: str,
rpm_bq_ds_name: str, ) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_table_id', str),
]):
The function(also used as a base for a KFP Component)
loads the data to a BigQuery table.
You need to replace the component with your data source
e.g. you might download the data from a different source,
in which case you to code those steps
Decide on your load strategy here such add or append.
Furthermore you could cache this KFP component
if the load is just a onetime thing.
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
RPM_BQ_TABLE_NAME:(:obj:`str`): Name of the table.
RPM_DEFAULT_BQ_TABLE_NAME_EXT:(:obj:`str`): Default table name
if the user didn't provide one(RPM_BQ_TABLE_NAME)
rpm_bq_ds_name(:obj:`str`): The dataset name
used in the rest of the pipeline
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_table_id(:obj:`str`): The table name
used in the rest of the pipeline
import json
rpm_context = json.loads(ctx)
print(rpm_context)
rpm_bq_ds_name = rpm_context['rpm_bq_ds_name']
dataset_id = f"{rpm_context['RPM_GCP_PROJECT']}.{rpm_bq_ds_name}"
if not rpm_context['RPM_BQ_TABLE_NAME']:
rpm_table_id = f"{dataset_id}.{rpm_bq_ds_name}{rpm_context['RPM_DEFAULT_BQ_TABLE_NAME_EXT']}"
else:
rpm_table_id = f"{dataset_id}.{rpm_context['RPM_BQ_TABLE_NAME']}"
rpm_context['rpm_table_id'] = rpm_table_id
import subprocess
def install(name):
subprocess.call(['pip', 'install', name])
pacakages_to_install = ['google-cloud', 'google-cloud-bigquery']
for each_package in pacakages_to_install:
install(each_package)
print(f"'{each_package}' package installed :)")
query = f
# select initial features and label to feed into our model
CREATE OR REPLACE TABLE {rpm_table_id}
OPTIONS(
description="Google Store curated Data"
) AS
SELECT
fullVisitorId,
bounces,
time_on_site,
will_buy_on_return_visit # <--- our label
FROM
# features
(SELECT
fullVisitorId,
IFNULL(totals.bounces, 0) AS bounces,
IFNULL(totals.timeOnSite, 0) AS time_on_site
FROM
`bigquery-public-data.google_analytics_sample.*`
WHERE
totals.newVisits = 1
AND date BETWEEN '20160801' AND '20170430') # train on first 9 months
JOIN
(SELECT
fullvisitorid,
IF(COUNTIF(totals.transactions > 0 AND totals.newVisits IS NULL) > 0, 1, 0) AS will_buy_on_return_visit
FROM
`bigquery-public-data.google_analytics_sample.*`
GROUP BY fullvisitorid)
USING (fullVisitorId)
ORDER BY time_on_site DESC # order by most time spent first
print(query)
import traceback
from google.cloud import bigquery
try:
client = bigquery.Client()
print(query)
query_job = client.query(query) # Make an API request.
print(f"Table {rpm_table_id} created.")
except Exception as e:
error = traceback.format_exc()
print(error)
print(e)
raise RuntimeError(f"Can't create the table {rpm_table_id}")
destination_table = rpm_table_id
print(f"{destination_table}")
return (
json.dumps(rpm_context),
rpm_context['rpm_table_id']
)
# test locally load_bq_ds
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(load_bq_ds(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['RPM_BQ_TABLE_NAME'],
local_context['RPM_DEFAULT_BQ_TABLE_NAME_EXT'],
local_context['rpm_bq_ds_name'],
))
# create_bq_ml
from typing import NamedTuple
def create_bq_ml(ctx: str,
RPM_GCP_PROJECT: str,
RPM_MODEL_NAME: str,
RPM_DEFAULT_MODEL_NAME: str,
rpm_bq_ds_name: str,
rpm_table_id: str ) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_bqml_model', str),
]):
The function(also used as a base for a KFP Component) creates a model
from the data that you have already loaded previously.
You need to adjust the model type, model hyperparamters, features,
and label depending on your need.
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
RPM_MODEL_NAME:(:obj:`str`): Name of the model.
RPM_DEFAULT_MODEL_NAME:(:obj:`str`): Default model name
if the user didn't provide one(RPM_MODEL_NAME)
rpm_bq_ds_name(:obj:`str`): The dataset name
used in the rest of the pipeline
rpm_table_id(:obj:`str`): The table name
used in the rest of the pipeline
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_bqml_model(:obj:`str`): The model name
used in the rest of the pipeline
import json
rpm_context = json.loads(ctx)
print(rpm_context)
rpm_bqml_model = rpm_context['RPM_MODEL_NAME']
if not rpm_bqml_model:
rpm_bqml_model = rpm_context['RPM_DEFAULT_MODEL_NAME']
rpm_bqml_model = rpm_bqml_model.replace('-', '_')
rpm_context['rpm_bqml_model'] = rpm_bqml_model
import subprocess
import traceback
def exec_cmd(cmd):
try:
print(cmd)
process = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
return process.stdout
except subprocess.CalledProcessError as e:
error = traceback.format_exc()
print(error)
print(e.output)
return e.output
exec_cmd(f"gcloud config set project {rpm_context['RPM_GCP_PROJECT']}")
bqml_create_sql = f
CREATE OR REPLACE MODEL
{rpm_context['rpm_bq_ds_name']}.{rpm_bqml_model}
OPTIONS
( model_type='LOGISTIC_REG',
auto_class_weights=TRUE,
input_label_cols=['will_buy_on_return_visit']) AS
SELECT
* EXCEPT(fullVisitorId)
FROM
{rpm_context['rpm_table_id'].replace(RPM_GCP_PROJECT+'.', '')}
# you can uncomment the below query to try out the XGBoost model
# bqml_create_sql = f
# CREATE OR REPLACE MODEL
# \`{rpm_context['rpm_bq_ds_name']}.{rpm_bqml_model}\`
# OPTIONS(MODEL_TYPE='BOOSTED_TREE_CLASSIFIER',
# BOOSTER_TYPE = 'GBTREE',
# NUM_PARALLEL_TREE = 1,
# MAX_ITERATIONS = 50,
# TREE_METHOD = 'HIST',
# EARLY_STOP = FALSE,
# SUBSAMPLE = 0.85,
# INPUT_LABEL_COLS = ['will_buy_on_return_visit'])
# AS
# SELECT
# * EXCEPT(fullVisitorId)
# FROM
# \`{rpm_context['rpm_table_id']}\`
#
exec_cmd(f'bq query --use_legacy_sql=false "{bqml_create_sql}"')
bq_model_created = exec_cmd(f"bq ls -m --format=pretty {rpm_context['rpm_bq_ds_name']} | grep {rpm_bqml_model}")
if not bq_model_created:
raise RuntimeError(f"Please check if the model {rpm_context['rpm_bq_ds_name']} created.")
return (
json.dumps(rpm_context),
rpm_context['rpm_bqml_model']
)
# test locally create_bq_ml
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(create_bq_ml(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['RPM_MODEL_NAME'],
local_context['RPM_DEFAULT_MODEL_NAME'],
local_context['rpm_bq_ds_name'],
local_context['rpm_table_id']))
# evaluate_ml_model
from typing import NamedTuple
def evaluate_ml_model(ctx: str,
RPM_GCP_PROJECT: str,
rpm_bq_ds_name: str,
rpm_bqml_model: str, ) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_eval_query', str),
('rpm_eval_result', str),
]):
The function(also used as a base for a KFP Component) evaluates
the model you created.
Update your selection criteria and stop the pipeline
if the model didn't meet the criteria
You can raise an exception to stop the pipeline
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
rpm_bq_ds_name(:obj:`str`): The dataset name
used in the rest of the pipeline
rpm_bqml_model(:obj:`str`): The model name
used in the rest of the pipeline
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_eval_query(:obj:`str`): The evaluate sql query,
which is saved for auditing purpose in the pipeline artifacts
rpm_eval_result(:obj:`str`): The result of the evaluated query,
which is saved for auditing purpose in the pipeline artifacts
import json
rpm_context = json.loads(ctx)
print(rpm_context)
import subprocess
import traceback
def exec_cmd(cmd):
try:
print(cmd)
process = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
return(process.stdout, 0)
except subprocess.CalledProcessError as e:
error = traceback.format_exc()
print(error)
print(e.output)
return(e.output, 1)
exec_cmd(f"gcloud config set project {rpm_context['RPM_GCP_PROJECT']}")
bqml_eval_query = f
SELECT
roc_auc, CASE WHEN roc_auc > .9 THEN 'good'
WHEN roc_auc > .8 THEN 'fair' WHEN roc_auc > .7 THEN 'decent'
WHEN roc_auc > .6 THEN 'not great' ELSE 'poor' END AS modelquality
FROM
ML.EVALUATE(MODEL {rpm_bq_ds_name}.{rpm_bqml_model})
rpm_eval_result = exec_cmd(f'bq query --use_legacy_sql=false --format=json "{bqml_eval_query}"')
print(rpm_eval_result)
rpm_context['bqml_eval_query'] = bqml_eval_query
rpm_context['rpm_eval_result'] = rpm_eval_result
return (
json.dumps(rpm_context),
f{bqml_eval_query},
f{rpm_eval_result},
)
# test locally evaluate_ml_model
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(evaluate_ml_model(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['rpm_bq_ds_name'],
local_context['rpm_bqml_model'],
))
# create_batch_prediction_dataset
from typing import NamedTuple
def create_batch_prediction_dataset(ctx: str,
RPM_GCP_PROJECT: str,
rpm_bq_ds_name: str,
rpm_table_id: str ) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_pred_table_id', str),
]):
The function(also used as a base for a KFP Component)
creates a BigQuery table which contains the input data for
which we want predictions.
You might not need this this componenet with
your input table if already exists.
You might need some transformation or filteration on your input data,
in which case you need to make appropriate code change.
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
rpm_bq_ds_name(:obj:`str`): The dataset name used in the rest of the pipeline
rpm_table_id(:obj:`str`): The table name used in the rest of the pipeline
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_pred_table_id(:obj:`str`): The table that contains the input data,
which we want batch predict later
import json
rpm_context = json.loads(ctx)
print(rpm_context)
import subprocess
def install(name):
subprocess.call(['pip', 'install', name])
pacakages_to_install = ['google-cloud', 'google-cloud-bigquery']
for each_package in pacakages_to_install:
install(each_package)
print(f"'{each_package}' package installed :)")
rpm_pred_table_id = rpm_table_id.replace('_tbl', '_pred_tbl')
query = f
# create the input table to conduct batch predict
CREATE OR REPLACE TABLE {rpm_pred_table_id}
OPTIONS(
description="Input data for prediction"
) AS
SELECT *
FROM {rpm_context['rpm_table_id']}
LIMIT 10
print(query)
import traceback
from google.cloud import bigquery
try:
client = bigquery.Client()
query_job = client.query(query) # Make an API request.
print(f"Table {rpm_pred_table_id} created.")
except Exception as e:
error = traceback.format_exc()
print(error)
print(e)
raise RuntimeError(f"Can't create the table {rpm_pred_table_id}")
rpm_context['rpm_pred_table_id'] = rpm_pred_table_id
return (json.dumps(rpm_context),
rpm_context['rpm_pred_table_id'],
)
#test locally create batch prediction dataset
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(create_batch_prediction_dataset(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['rpm_bq_ds_name'],
local_context['rpm_table_id'],
))
# predict_batch_ml_model
from typing import NamedTuple
def predict_batch_ml_model(ctx: str,
RPM_GCP_PROJECT: str,
rpm_bq_ds_name: str,
rpm_bqml_model: str,
rpm_pred_table_id: str, ) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_predict_batch_output', str),
]):
The function(also used as a base for a KFP Component) uses the model
to predict the data in mass.
You migth also need to save the predicted values
at an appropriate repository of your choice.
Currently the predicted value is printed on the console
and returned as an output from the function.
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
rpm_bq_ds_name(:obj:`str`): The dataset name
used in the rest of the pipeline
rpm_bqml_model(:obj:`str`): The model name
used in the rest of the pipeline
rpm_pred_table_id(:obj:`str`): The table that contains the input data
which we want batch predict later
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_predict_batch_output(:obj:`str`): The output f
rom the batch prediction
import json
rpm_context = json.loads(ctx)
print(rpm_context)
import subprocess
def install(name):
subprocess.call(['pip', 'install', name])
pacakages_to_install = ["google-cloud", "google-cloud-bigquery"]
for each_package in pacakages_to_install:
install(each_package)
print(f"'{each_package}' package installed :)")
query = f
# predict the inputs (rows) from the input table
SELECT
fullVisitorId, predicted_will_buy_on_return_visit
FROM
ML.PREDICT(MODEL {rpm_bq_ds_name}.{rpm_bqml_model},
(
SELECT
fullVisitorId,
bounces,
time_on_site
from {rpm_pred_table_id}
))
print(query)
import traceback
from google.cloud import bigquery
output = ""
try:
client = bigquery.Client()
query_job = client.query(query) # Make an API request.
print("The query data:")
for row in query_job:
# Row values can be accessed by field name or index.
print(f"row data: {row}")
output += str(row)
except Exception as e:
error = traceback.format_exc()
print(error)
print(e)
raise RuntimeError(f"Can't batch predict")
rpm_context['rpm_predict_output'] = output
return (
json.dumps(rpm_context),
rpm_context['rpm_predict_output'],
)
#test locally batch prediction
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(predict_batch_ml_model(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['rpm_bq_ds_name'],
local_context['rpm_bqml_model'],
local_context['rpm_pred_table_id'],
))
# get_bqml_model_version
from typing import NamedTuple
def get_bqml_model_version(ctx: str,
RPM_GCP_PROJECT: str,
RPM_MODEL_EXPORT_PATH: str,
RPM_DEFAULT_MODEL_EXPORT_PATH: str,
RPM_MODEL_VER_PREFIX: str,
rpm_gcs_rpm_ds_url: str ) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_bqml_model_export_path', str),
('rpm_model_version', str),
]):
The function(also used as a base for a KFP Component) determines
the revision of the models.
It checkes the current version and increments by 1.
It prepares the folder for the BigQuery ML to export the model.
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
RPM_MODEL_EXPORT_PATH(:obj:`str`): User supplied model export path
RPM_DEFAULT_MODEL_EXPORT_PATH(:obj:`str`): Uses the default path,
if the user didn't provide a path
RPM_MODEL_VER_PREFIX(:obj:`str`): The folder with prefix
rpm_pred_table_id(:obj:`str`): The table that contains the input data
which we want batch predict later
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_bqml_model_export_path(:obj:`str`): the path
to which we can export the model
rpm_model_version(:obj:`str`): the version which we will use when
we deploy the model to Cloud AI Prediction
import json
rpm_context = json.loads(ctx)
print(rpm_context)
# defining the install function
import subprocess
import os
def install(name):
subprocess.call(['pip', 'install', name])
pacakages_to_install = ['google-cloud', 'google-cloud-storage']
for each_package in pacakages_to_install:
install(each_package)
print(f"'{each_package}' package installed :)")
cmd = f"gcloud config set project {rpm_context['RPM_GCP_PROJECT']}"
print(cmd)
process = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
from google.cloud import storage
from google.cloud.exceptions import NotFound
from google.cloud.exceptions import Forbidden
import traceback
client = storage.Client()
try:
bucket_name= rpm_context['rpm_gcs_rpm_ds_url'].split('/')[2]
rpm_bq_ds_name = rpm_context['rpm_gcs_rpm_ds_url'].split('/')[3]
rpm_bqml_model_export_path = rpm_context['RPM_MODEL_EXPORT_PATH']
if not rpm_bqml_model_export_path:
rpm_bqml_model_export_path = rpm_context['RPM_DEFAULT_MODEL_EXPORT_PATH']
bucket = client.get_bucket(bucket_name)
print(f'Details: {bucket}')
folder_name = os.path.join(rpm_bq_ds_name, rpm_bqml_model_export_path)
blob = bucket.get_blob(folder_name)
model_version = 0
if blob is None:
print(f"Folder name {folder_name} does not exist!")
print(f"{bucket_name}, {folder_name}")
blob = bucket.blob(folder_name)
blob.upload_from_string('')
print(f"Folder name {folder_name} created.")
else:
print(f"Folder name {folder_name} exist.")
client = storage.Client()
blobs = client.list_blobs(bucket_name, prefix=folder_name)
print('Blobs:')
for blob in blobs:
print(f"blob name: {blob.name}")
curr_ver = blob.name.replace(folder_name, '')
print(f"folder_name: {folder_name}")
print(f"after folder_name replace: {curr_ver}")
if rpm_context['RPM_MODEL_VER_PREFIX'] in curr_ver \
and len(curr_ver.split('/')) == 2 and \
len(curr_ver.split('/')[1]) == 0:
curr_ver = curr_ver.replace(rpm_context['RPM_MODEL_VER_PREFIX'], '').replace('/','').split('/')[0]
model_version = max(model_version, int(curr_ver))
# increment the model version
model_version += 1
model_version_full_name = f"{rpm_context['RPM_MODEL_VER_PREFIX']}{model_version}/"
folder_name = os.path.join(folder_name, model_version_full_name)
print(f"Going to create folder {folder_name} created.")
blob = bucket.get_blob(folder_name)
blob = bucket.blob(folder_name)
blob.upload_from_string('')
print(f"Folder name {folder_name} created.")
rpm_context['rpm_bqml_model_export_path'] = os.path.join(rpm_bqml_model_export_path, model_version_full_name)
rpm_context['rpm_model_version'] = model_version_full_name.rstrip('/')
except Forbidden as e:
print(f"Sorry, you don't have access to the bucket: {bucket_name}!")
print(e)
error = traceback.format_exc()
print(error)
except NotFound as e:
print(f"Sorry, the bucket: {bucket_name} does not exist!")
print(e)
error = traceback.format_exc()
print(error)
return (
json.dumps(rpm_context),
rpm_context['rpm_bqml_model_export_path'],
rpm_context['rpm_model_version']
)
# test locally get_bqml_model_version
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(get_bqml_model_version(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['RPM_MODEL_EXPORT_PATH'],
local_context['RPM_DEFAULT_MODEL_EXPORT_PATH'],
local_context['RPM_MODEL_VER_PREFIX'],
local_context['rpm_gcs_rpm_ds_url']))
# export_bqml_model_to_gcs
from typing import NamedTuple
def export_bqml_model_to_gcs(ctx: str,
RPM_GCP_PROJECT: str,
RPM_PVC_NAME: str,
RPM_PVC_DATASET_FOLDER_NAME: str,
rpm_bqml_model_export_path: str,
rpm_gcs_rpm_ds_url: str,
rpm_bq_ds_name: str,
rpm_bqml_model: str, ) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_model_uri', str),
]):
The function(also used as a base for a KFP Component) exports
the BigQuery ML model.
It also saves the the details used in the model
e.g. losses, learning rate adjustment, #of iterations.
It also saves the evaluation details e.g. roc, accuracy, etc.
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
RPM_PVC_NAME:(:obj:`str`): Ther persitent volume name
RPM_MODEL_EXPORT_PATH(:obj:`str`): The path to store the temporary files
before we upload to Google Cloud Storage
RPM_PVC_DATASET_FOLDER_NAME(:obj:`str`): The folder name to store
the temporary files before we upload to Google Cloud Storage
rpm_bqml_model_export_path(:obj:`str`): The path
to which we can export the model
rpm_gcs_rpm_ds_url(:obj:`str`): Full Google Cloud Storage path
with bucket name and folder name
rpm_bq_ds_name(:obj:`str`): The dataset name
used in the rest of the pipeline
rpm_bqml_model(:obj:`str`): The model name
used in the rest of the pipeline
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_model_uri(:obj:`str`): The path to where we export the model
import json
rpm_context = json.loads(ctx)
print(rpm_context)
import subprocess
import traceback
def exec_cmd(cmd):
try:
print(cmd)
process = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
return process.stdout
except subprocess.CalledProcessError as e:
error = traceback.format_exc()
print(error)
print(e.output)
return e.output
import os
if not rpm_context['rpm_bqml_model_export_path']:
raise RuntimeError("Can't export the BigQuery model: export destination is empty!")
import os
path_to_ds = os.path.join(rpm_context['RPM_PVC_NAME'],
rpm_context['RPM_PVC_DATASET_FOLDER_NAME'])
# check if that the dataset directory already exists
exec_cmd(f"test -d {path_to_ds} && echo 'Exists' || echo 'Does not exist'")
# create the datset directory
exec_cmd(f"mkdir -p {path_to_ds}")
# validate that the dataset directory has been created
exec_cmd(f"test -d {path_to_ds} && echo 'Exists' || echo 'Does not exist'")
rpm_bqml_model_export_path = os.path.join(rpm_context['rpm_gcs_rpm_ds_url'],
rpm_context['rpm_bqml_model_export_path'])
rpm_bqml_model_export_path=rpm_bqml_model_export_path.rstrip('/')
exec_cmd(f"gcloud config set project {rpm_context['RPM_GCP_PROJECT']}")
cmd = f"bq extract -m {rpm_context['rpm_bq_ds_name']}.{rpm_context['rpm_bqml_model']} {rpm_bqml_model_export_path}"
print(cmd)
exec_cmd(cmd)
rpm_context['rpm_model_uri'] = \
os.path.join(rpm_context['rpm_gcs_rpm_ds_url'],
rpm_context['rpm_bqml_model_export_path'])
bqml_eval_query = f
SELECT
*
FROM
ML.EVALUATE(MODEL `{rpm_bq_ds_name}.{rpm_bqml_model}`)
rpm_eval_output = exec_cmd(f"bq query --use_legacy_sql=false --format=json '{bqml_eval_query}'")
print(rpm_eval_output)
bqml_train_detail_query = f
SELECT
*
FROM
ML.TRAINING_INFO(MODEL `{rpm_bq_ds_name}.{rpm_bqml_model}`)
bqml_train_detail_query_output = exec_cmd(f"bq query --use_legacy_sql=false --format=json '{bqml_train_detail_query}'")
print(bqml_train_detail_query_output)
path_to_ds = f"/{rpm_context['RPM_PVC_NAME']}/{rpm_context['RPM_PVC_DATASET_FOLDER_NAME']}/"
import os
# export the eval model output in a file called evalu_details.txt
rpm_eval_output_filename = os.path.join(path_to_ds, 'eval_detail.txt')
with open(rpm_eval_output_filename, 'w') as outfile:
outfile.write(rpm_eval_output)
exec_cmd(f"cat {rpm_eval_output_filename}")
exec_cmd(f"gsutil -m cp {rpm_eval_output_filename} {rpm_context['rpm_model_uri']}")
# export the training details in a file called train_details.txt
bqml_train_detail_query_filename = os.path.join(path_to_ds, 'train_detail.txt')
with open(bqml_train_detail_query_filename, 'w') as outfile:
outfile.write(bqml_train_detail_query_output)
exec_cmd(f"cat {bqml_train_detail_query_filename}")
exec_cmd(f"gsutil -m cp {bqml_train_detail_query_filename} {rpm_context['rpm_model_uri']}")
return (json.dumps(rpm_context),
rpm_context['rpm_model_uri'],
)
# test locally export_bqml_model_to_gcs
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(export_bqml_model_to_gcs(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['RPM_PVC_NAME'],
local_context['RPM_PVC_DATASET_FOLDER_NAME'],
local_context['rpm_bqml_model_export_path'],
local_context['rpm_gcs_rpm_ds_url'],
local_context['rpm_bq_ds_name'],
local_context['rpm_bqml_model'],
))
from typing import NamedTuple
def deploy_ml_model_online_pred(ctx: str,
RPM_GCP_PROJECT: str,
RPM_LOCATION: str,
RPM_RUNTIME_VERSION: str,
RPM_PYTHON_VERSION: str,
rpm_model_uri: str,
rpm_bqml_model: str,
rpm_model_version: str,
rpm_gcs_rpm_ds_url: str, ) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_url_to_monitor', str),
('rpm_model_region', str),
]):
The function(also used as a base for a KFP Component) deploys
the model that is exported above to Cloud AI Platform Prediction
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
RPM_LOCATION:(:obj:`str`): Google Cloud region
where we are going to deploy the model
RPM_RUNTIME_VERSION(:obj:`str`): The runtime version
of the caip predicted
RPM_PYTHON_VERSION(:obj:`str`): The python version
of the caip predicted
rpm_model_uri(:obj:`str`): The path to where we export the model
rpm_bqml_model(:obj:`str`): The model name
used in the rest of the pipeline
rpm_model_version(:obj:`str`): The version which we will use when
we deploy the model to caip prediction
rpm_gcs_rpm_ds_url(:obj:`str`): Full Google Cloud Storage path
with bucket name and folder name
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_url_to_monitor(:obj:`str`): The url in the Google Cloud Console
which you can use to monitor
rpm_model_region(:obj:`str`): The Google Cloud region
where you are going to deploy the model
import json
rpm_context = json.loads(ctx)
print(rpm_context)
rpm_context['rpm_url_to_monitor'] = f"https://console.cloud.google.com/ai-platform/models/{rpm_bqml_model}/versions"
model_region = RPM_LOCATION[:-2]
rpm_context['rpm_model_region'] = model_region
import subprocess
import traceback
def exec_cmd(cmd):
try:
print(cmd)
process = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
return(process.stdout, 0)
except subprocess.CalledProcessError as e:
error = traceback.format_exc()
print(error)
print(e.output)
return(e.output, 1)
exec_cmd(f"gcloud config set project {rpm_context['RPM_GCP_PROJECT']}")
(output, returncode) = \
exec_cmd(f"gcloud ai-platform models list --format='value(name)' | grep {rpm_bqml_model}")
print(f'output:{output}, returncode:{returncode}')
if returncode == 0: #grep returns 1 if nothing is found
print(f"{rpm_bqml_model} already exists")
else:
print(f"{rpm_bqml_model} doesn't exists. Creating...")
(output_create, returncode_create) = \
exec_cmd(f'gcloud ai-platform models create --regions={model_region} {rpm_bqml_model}')
print(f"output:{output_create}, returncode:{returncode_create}")
if returncode_create != 0:
raise RuntimeError(f"Can't create the ML Model {rpm_bqml_model}")
print(f"{rpm_bqml_model} created.")
(output, returncode) = \
exec_cmd(f"gcloud ai-platform versions list --model {rpm_bqml_model} --format='value(name)' | grep {rpm_model_version}")
if returncode == 0: #grep returns 1 if nothing is found
print(f"{rpm_bqml_model} with version {rpm_model_version} already exists")
else:
print(f"{rpm_bqml_model} with version {rpm_model_version} doesn't exists. Creating...")
cmd = f
gcloud ai-platform versions create --model={rpm_bqml_model} \
{rpm_model_version} \
--framework=tensorflow --python-version={RPM_PYTHON_VERSION} \
--runtime-version={RPM_RUNTIME_VERSION} \
--origin={rpm_model_uri} \
--staging-bucket=gs://{rpm_gcs_rpm_ds_url.split('/')[2]}
(output_create, returncode_create) = exec_cmd(cmd)
print(f"output:{output_create}, returncode:{returncode_create}")
if returncode_create != 0:
raise RuntimeError(f"Can't create the ML Model {rpm_bqml_model} with version {rpm_model_version}!!!")
print(f"{rpm_bqml_model} with version {rpm_model_version} created.")
print(f"Monitor models at {rpm_context['rpm_url_to_monitor']}")
return (
json.dumps(rpm_context),
rpm_context['rpm_url_to_monitor'],
rpm_context['rpm_model_region']
)
# test locally deploy_ml_model_online_pred
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(deploy_ml_model_online_pred(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['RPM_LOCATION'],
local_context['RPM_RUNTIME_VERSION'],
local_context['RPM_PYTHON_VERSION'],
local_context['rpm_model_uri'],
local_context['rpm_bqml_model'],
local_context['rpm_model_version'],
local_context['rpm_gcs_rpm_ds_url'],
))
# predict_online_ml_model
from typing import NamedTuple
def predict_online_ml_model(ctx: str,
RPM_GCP_PROJECT: str,
RPM_PVC_NAME: str,
RPM_PVC_DATASET_FOLDER_NAME: str,
rpm_bqml_model: str,
rpm_model_version: str, ) -> NamedTuple('Outputs', [
('rpm_context', str),
('rpm_predict_online_output', str),
]):
The function(also used as a base for a KFP Component)
does the online prediction.
This is to confirm that the endpoint is available and ready to serve.
Args:
ctx(:obj:`str`): The dict object with all the variables
used in the pipeline
RPM_GCP_PROJECT:(:obj:`str`): Google Cloud project for deployment
RPM_PVC_NAME:(:obj:`str`): Ther persitent volume name
RPM_PVC_DATASET_FOLDER_NAME(:obj:`str`): The folder name to store
the temporary files before we upload to Google Cloud Storage
rpm_bqml_model(:obj:`str`): The model name
used in the rest of the pipeline
rpm_model_version(:obj:`str`): The version which we will use
when we deploy the model to Cloud AI Platform Prediction
Returns:
Outputs(:obj: `tuple`): Returns the below outputs:
rpm_context(:obj:`str`): All variables used in the pipeline
rpm_url_to_monitor(:obj:`str`): The url in the Google Cloud Console
which you can use to monitor
rpm_predict_online_output(:obj:`str`): The output
from the online prediction
import json
rpm_context = json.loads(ctx)
print(rpm_context)
import subprocess
import traceback
def exec_cmd(cmd):
try:
print(cmd)
process = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
return(process.stdout, 0)
except subprocess.CalledProcessError as e:
error = traceback.format_exc()
print(error)
print(e.output)
return(e.output, 1)
exec_cmd(f"gcloud config set project {rpm_context['RPM_GCP_PROJECT']}")
import os
path_to_ds = os.path.join(rpm_context['RPM_PVC_NAME'],
rpm_context['RPM_PVC_DATASET_FOLDER_NAME'])
# check if that the dataset directory already exists
exec_cmd(f"test -d {path_to_ds} && echo 'Exists' || echo 'Does not exist'")
# create the datset directory
exec_cmd(f"mkdir -p {path_to_ds}")
# validate that the dataset directory has been created
exec_cmd(f"test -d {path_to_ds} && echo 'Exists' || echo 'Does not exist'")
input_data = {"bounces": 0, "time_on_site": 7363}
filename = os.path.join(path_to_ds, 'input.json')
with open(filename, 'w') as outfile:
outfile.write(input_data)
cmd = f
gcloud ai-platform predict --model {rpm_bqml_model} \
--version {rpm_model_version} --json-instances {filename}
(output, returncode) = exec_cmd(cmd)
print(f"Predicted results for {input_data} is {output} ")
rpm_context['rpm_predict_online_output'] = output
return (
json.dumps(rpm_context),
rpm_context['rpm_predict_online_output'],
)
# test locally predict_online_ml_model
# You could unit test the above code in your local environment.
# You don't need to execute the code, when simply building or running the KFP pipeline (experiment)
local_context = get_local_context()
import json
update_local_context(predict_online_ml_model(
json.dumps(local_context),
local_context['RPM_GCP_PROJECT'],
local_context['RPM_PVC_NAME'],
local_context['RPM_PVC_DATASET_FOLDER_NAME'],
local_context['rpm_bqml_model'],
local_context['rpm_model_version'],
))
# define the pipeline
import kfp.components as comp
def create_kfp_comp(rpm_comp):
Converts a Python function to a component
and returns a task(ContainerOp) factory
Returns:
Outputs (:obj: `ContainerOp`): returns the operation
return comp.func_to_container_op(
func=rpm_comp,
base_image="google/cloud-sdk:latest"
)
# reload the properties; undo any properties set to test component locally
all_vars = load_params()
from kfp.dsl import pipeline, VolumeOp
import kfp.dsl as dsl
import json
# define the pipeline metadata
@pipeline(
name='Propensity to purchase using BigQuery ML',
description='Propensity model if a customer is likely to purchase'
)
# define the pipeline
def bq_googlestr_dataset_to_bq_to_caip_pipeline(
data_path = all_vars['RPM_PVC_NAME'] #you can pass input variables
):
The function defines the pipeline.
Args:
data_path:(:obj:`str`): the volume to store the temporary files
rpm_context = json.dumps(all_vars)
gcs_bucket_folder_op = create_kfp_comp(create_gcs_bucket_folder)(
rpm_context,
all_vars['RPM_GCP_STORAGE_BUCKET'],
all_vars['RPM_GCP_PROJECT'],
all_vars['RPM_DEFAULT_BUCKET_EXT'],
all_vars['RPM_GCP_STORAGE_BUCKET_FOLDER'],
all_vars['RPM_DEFAULT_BUCKET_FOLDER_NAME']
)
create_bq_ds_op = create_kfp_comp(create_bq_ds)(
gcs_bucket_folder_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
all_vars['RPM_BQ_DATASET_NAME'],
all_vars['RPM_LOCATION']
)
load_bq_ds_op = create_kfp_comp(load_bq_ds)(
create_bq_ds_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
all_vars['RPM_BQ_TABLE_NAME'],
all_vars['RPM_DEFAULT_BQ_TABLE_NAME_EXT'],
create_bq_ds_op.outputs['rpm_bq_ds_name'],
)
create_bq_ml_op = create_kfp_comp(create_bq_ml)(
load_bq_ds_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
all_vars['RPM_MODEL_NAME'],
all_vars['RPM_DEFAULT_MODEL_NAME'],
create_bq_ds_op.outputs['rpm_bq_ds_name'],
load_bq_ds_op.outputs['rpm_table_id']
)
evaluate_ml_model_op = create_kfp_comp(evaluate_ml_model)(
create_bq_ml_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
create_bq_ds_op.outputs['rpm_bq_ds_name'],
create_bq_ml_op.outputs['rpm_bqml_model'],
)
create_batch_prediction_dataset_op = create_kfp_comp(create_batch_prediction_dataset)(
evaluate_ml_model_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
create_bq_ds_op.outputs['rpm_bq_ds_name'],
load_bq_ds_op.outputs['rpm_table_id'],
)
predict_batch_ml_model_op = create_kfp_comp(predict_batch_ml_model)(
evaluate_ml_model_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
create_bq_ds_op.outputs['rpm_bq_ds_name'],
create_bq_ml_op.outputs['rpm_bqml_model'],
create_batch_prediction_dataset_op.outputs['rpm_pred_table_id'],
)
get_versioned_bqml_model_export_path_op = create_kfp_comp(get_bqml_model_version)(
create_bq_ml_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
all_vars['RPM_MODEL_EXPORT_PATH'],
all_vars['RPM_DEFAULT_MODEL_EXPORT_PATH'],
all_vars['RPM_MODEL_VER_PREFIX'],
gcs_bucket_folder_op.outputs['rpm_gcs_rpm_ds_url']
)
# create a volume where the dataset will be temporarily stored.
pvc_op = VolumeOp(
name=all_vars['RPM_PVC_NAME'],
resource_name=all_vars['RPM_PVC_NAME'],
size="20Gi",
modes=dsl.VOLUME_MODE_RWO
)
export_bqml_model_to_gcs_op = create_kfp_comp(export_bqml_model_to_gcs)(get_versioned_bqml_model_export_path_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
all_vars['RPM_PVC_NAME'],
all_vars['RPM_PVC_DATASET_FOLDER_NAME'],
get_versioned_bqml_model_export_path_op.outputs['rpm_bqml_model_export_path'],
gcs_bucket_folder_op.outputs['rpm_gcs_rpm_ds_url'],
create_bq_ds_op.outputs['rpm_bq_ds_name'],
create_bq_ml_op.outputs['rpm_bqml_model'],
)
export_bqml_model_to_gcs_op.add_pvolumes({data_path: pvc_op.volume})
model_deploy_op = create_kfp_comp(deploy_ml_model_online_pred)(
export_bqml_model_to_gcs_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
all_vars['RPM_LOCATION'],
all_vars['RPM_RUNTIME_VERSION'],
all_vars['RPM_PYTHON_VERSION'],
export_bqml_model_to_gcs_op.outputs['rpm_model_uri'],
create_bq_ml_op.outputs['rpm_bqml_model'],
get_versioned_bqml_model_export_path_op.outputs['rpm_model_version'],
gcs_bucket_folder_op.outputs['rpm_gcs_rpm_ds_url'],
)
predict_online_ml_model_op = create_kfp_comp(predict_online_ml_model)(
model_deploy_op.outputs['rpm_context'],
all_vars['RPM_GCP_PROJECT'],
all_vars['RPM_PVC_NAME'],
all_vars['RPM_PVC_DATASET_FOLDER_NAME'],
create_bq_ml_op.outputs['rpm_bqml_model'],
get_versioned_bqml_model_export_path_op.outputs['rpm_model_version'],
)
predict_online_ml_model_op.add_pvolumes({data_path: pvc_op.volume})
# don't cache the following comps
# the below is for model versioning only
get_versioned_bqml_model_export_path_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
export_bqml_model_to_gcs_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
model_deploy_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
predict_online_ml_model_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
# don't cache any comps
# you don't want to cache any comps when you are repetatively integration testing
gcs_bucket_folder_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
create_bq_ds_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
load_bq_ds_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
create_bq_ml_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
evaluate_ml_model_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
create_batch_prediction_dataset_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
predict_batch_ml_model_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
get_versioned_bqml_model_export_path_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
export_bqml_model_to_gcs_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
model_deploy_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
predict_online_ml_model_op.execution_options.caching_strategy.max_cache_staleness = "P0D"
#compile the pipeline
def complie_pipeline(pipeline_func):
Compile the pipeline, watch out for errors in the pipeline composition.
Args:
pipeline_func (:obj:`bq_googlestr_dataset_to_bq_to_caip_pipeline`):
The pipeline definition
Returns:
pipeline_filename (:obj:`str`): the compressed file compipled file
to upload to CloudAI Platform Prediction
pipeline_func (:obj:`str`): bq_googlestr_dataset_to_bq_to_caip_pipeline,
name of the the pipeline
arguments (:obj:`str`): the arguments to pass to the pipeline
when you launch it
pipeline_func = pipeline_func
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
arguments = {}
return (pipeline_filename, pipeline_func, arguments)
pipeline_filename, pipeline_func, arguments = complie_pipeline(bq_googlestr_dataset_to_bq_to_caip_pipeline)
# create and run an experiment
def create_experiment_and_run():
Create an experiment and run the pipeline immediately.
Please use the links in the output to go directly to the experiment/run launched in the browser
client = kfp.Client(RPM_GCP_KFP_HOST)
experiment = client.create_experiment(RPM_DS_DOWNLOAD_EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(
experiment_id=experiment.id,
job_name=run_name,
pipeline_package_path=pipeline_filename,
params=arguments)
create_experiment_and_run()
# Data Exploration !!! BE CAREFUL !!! adjust the query to sample the data.
# 1. Get pandas df from BigQuery
# 2. plot histogram using matplotlib
#########################
from google.cloud import bigquery as bq
import pandas as pd
rpm_context = get_local_context()
client = bq.Client(project=rpm_context["RPM_GCP_PROJECT"])
# adjust the below query to grab only a sample dataset e.g. use a where clause.
df = client.query('''
SELECT *
FROM `%s.%s`
LIMIT 10
''' % (rpm_context["rpm_bq_ds_name"], rpm_context["rpm_table_id"].split('.')[2])).to_dataframe()
df.head()
df.tail()
df.info()
df.shape
import matplotlib.pyplot as plt
%matplotlib inline
plt.close('all')
df.hist(bins=50, figsize=(20,15))
plt.show()
# takes a bit of time...BE CAREFUL!!!
# works on local Jupyter instance.
import pandas_profiling as pp
pp.ProfileReport(df)
# delete_pod_pvc
def delete_pod_pvc(ctx: str) -> str:
Removes the Pods and Persistence Volume (PVCs) created in the pipeline,
This is not recommendated to use it in a production enviornment.
Comes handy in the iterative development and testing phases of the SDLC.
!!! BE CAREFUL !!!!
Args:
ctx(:obj:`str`): The dict object
with all the variables in the local context
# loading rpm_context string
import json
rpm_context = json.loads(ctx)
print(rpm_context)
import subprocess
def exec_cmd (cmd):
try:
print(cmd)
process = subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
except subprocess.CalledProcessError as e:
error = traceback.format_exc()
print(error)
print(e)
exec_cmd(f"gcloud config set project {rpm_context['RPM_GCP_PROJECT']}")
exec_cmd(f"gcloud container clusters get-credentials {rpm_context['RPM_CLUSTER_NAME']} --zone {rpm_context['RPM_LOCATION']} --project {rpm_context['RPM_GCP_PROJECT']}")
exec_cmd(''' for pod in `kubectl get pod | grep 'bq-public-google-ds-to-bq-' | awk -F ' ' '{print $1}'`; do echo kubectl delete pod $pod; kubectl delete pod $pod; done ''')
exec_cmd(''' for pvc in `kubectl get pvc | grep 'bq-public-google-ds-to-bq-' | awk -F ' ' '{print $1}'`; do echo kubectl delete pvc $pvc; kubectl delete pvc $pvc; done ''')
exec_cmd(''' for pod in `kubectl get pod | grep 'bq-public-google-ds-to-bq-' | awk -F ' ' '{print $1}'`; do echo kubectl patch pod $pod -p '{"metadata":{"finalizers":null}}'; kubectl patch pod $pod -p '{"metadata":{"finalizers":null}}'; done ''')
exec_cmd(''' for pvc in `kubectl get pvc | grep 'bq-public-google-ds-to-bq-' | awk -F ' ' '{print $1}'`; do echo kubectl patch pvc $pvc -p '{"metadata":{"finalizers":null}}'; kubectl patch pvc $pvc -p '{"metadata":{"finalizers":null}}'; done ''')
test_comp_local(delete_pod_pvc)
# delete the storage folder
from google.cloud import storage
from google.cloud.exceptions import NotFound
from google.cloud.exceptions import Forbidden
import traceback
def delete_storage_folder(bucket_name, folder_name):
Deletes a folder in the Google Cloust Storage,
This is not recommendated to use it in a production enviornment.
Comes handy in the iterative development and testing phases of the SDLC.
!!! BE CAREFUL !!!!
Args:
bucket_name(:obj:`str`): The Cloud Storage bucket name,
where the folder exists
folder_name(:obj:`str`): The folder that we want to delete
Returns:
(:obj:`boolean`): True if we are able to scucessfully delete the folder
if len(bucket_name) == 0 or len(folder_name) == 0:
print(f"Folder {folder_name} couldn't be deleted. Name is empty.")
return False
else:
client = storage.Client()
try:
bucket = client.get_bucket(bucket_name)
blob = bucket.get_blob(folder_name)
if blob is None:
print(f"Folder name {folder_name} does not exist!")
return False
else:
bucket.delete_blobs(blobs=bucket.list_blobs(prefix=folder_name))
print(f"Folder {folder_name} deleted")
return True
except Exception as e:
print(f"Folder {folder_name} couldn't be deleted")
print(e)
error = traceback.format_exc()
print(error)
return False
# delete storage folder if desired...!!!BE CAREFUL!!!!
local_context = get_local_context()
delete_storage_folder(local_context['rpm_gcs_rpm_ds_url'].split('/')[2],
local_context['rpm_gcs_rpm_ds_url'].split('/')[3]+'/')
#delete the bucket
from google.cloud import storage
from google.cloud.exceptions import NotFound
from google.cloud.exceptions import Forbidden
import traceback
def delete_storage_bucket (bucket_name):
Deletes a folder in the Google Cloust Storage,
This is not recommendated to use it in a production enviornment.
Comes handy in the iterative development and testing phases of the SDLC.
!!! BE CAREFUL !!!!
Args:
bucket_name(:obj:`str`): The Cloud Storage bucket name,
that we want to delete
Returns:
(:obj:`boolean`): True if we are able to scucessfully delete the folder
if bucket_name:
client = storage.Client()
try:
bucket = client.get_bucket(bucket_name)
bucket.delete()
print(f"Bucket {bucket.name} deleted")
return True
except Exception as e:
print(f"Bucket {bucket_name} couldn't be deleted")
print(e)
error = traceback.format_exc()
print(error)
return False
else:
print(f"Bucket {bucket_name} couldn't be deleted. Name is empty.")
return False
# delete storage bucket if desired...!!! BE CAREFUL !!!!
delete_storage_bucket(get_local_context()['rpm_gcs_rpm_ds_url'].split('/')[2])
#delete BigQuery table if not needed...!!! BE CAREFUL !!!
def delete_table(table_id):
Deletes a BigQuery table
This is not recommendated to use it in a production enviornment.
Comes handy in the iterative development and testing phases of the SDLC.
!!! BE CAREFUL !!!!
Args:
table_id(:obj:`str`): The BigQuery table name that we want to delete
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# client.delete_table(table_id, not_found_ok=True) # Make an API request.
client.delete_table(table_id) # Make an API request.
print("Deleted table '{}'.".format(table_id))
#delete the table in the BigQuery
delete_table(get_local_context()['rpm_table_id'])
def delete_dataset(dataset_id):
Deletes a BigQuery dataset
This is not recommendated to use it in a production enviornment.
Comes handy in the iterative development and testing phases of the SDLC.
!!! BE CAREFUL !!!!
Args:
dataset_id(:obj:`str`): The BigQuery dataset name that we want to delete
# [START bigquery_delete_dataset]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# dataset_id = 'your-project.your_dataset'
# Use the delete_contents parameter to delete a dataset and its contents.
# Use the not_found_ok parameter to not receive an error if the
# dataset has already been deleted.
client.delete_dataset(
dataset_id, delete_contents=True, not_found_ok=True
) # Make an API request.
print("Deleted dataset '{}'.".format(dataset_id))
#delete the BigQuery dataset
rpm_context = get_local_context()
delete_dataset(f"{rpm_context['RPM_GCP_PROJECT']}.{rpm_context['rpm_bq_ds_name']}")
# delete the Cloud Storage folders where the models are saved
local_context = get_local_context()
bucket_name= local_context['rpm_gcs_rpm_ds_url'].split('/')[2]
rpm_bq_ds_name = local_context['rpm_gcs_rpm_ds_url'].split('/')[3]
rpm_bqml_model_export_path = local_context['RPM_MODEL_EXPORT_PATH']
if not rpm_bqml_model_export_path:
rpm_bqml_model_export_path = local_context["RPM_DEFAULT_MODEL_EXPORT_PATH"]
folder_name = os.path.join(rpm_bq_ds_name, rpm_bqml_model_export_path)
assert delete_storage_folder(bucket_name, folder_name) == True
#exec a cmd in python; an utility func
import subprocess
import traceback
def exec_cmd(cmd):
Executes an OS command.
Args:
cmd(:obj:`str`): The OS command
Returns:
(:obj:`str`): The output of the execution of the OS command
(:obj:`str`): The returned code of the excecution of the OS command
try:
print(cmd)
process = subprocess.run(cmd, shell=True, check=True,
stdout=subprocess.PIPE, universal_newlines=True)
print(f"'output from {cmd}': {process.stdout}")
return (process.stdout, 0)
except subprocess.CalledProcessError as e:
error = traceback.format_exc()
print(error)
print(e.output)
return (e.output, 1)
#delete the model
def delete_caip_model():
Deletes the models from the Cloud AI Platform Prediction
local_context = get_local_context()
(output, returncode) = exec_cmd(f"gcloud ai-platform versions list --model {local_context['rpm_bqml_model']} --format='value(name)'")
for each_ver in output.split('\n'):
print(each_ver)
cmd = f"gcloud ai-platform versions delete {each_ver} --model={local_context['rpm_bqml_model']}"
exec_cmd(cmd)
cmd = f'gcloud ai-platform models delete {local_context["rpm_bqml_model"]}'
exec_cmd(cmd)
delete_caip_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 6.1.3 SGDの欠点
Step2: 6.1.4 Momentum
Step3: 6.1.5 AdaGrad
Step4: 6.1.6 Adam
Step5: 6.1.7 どの更新手法を用いるか?
Step6: 上記ではAdaGradが一番良く見えるが、扱う問題やハイパーパラメータ(学習係数など)の設定値によって結果が変わる。
Step7: SGD以外の手法が早く学習できている。AdaGradが最も早く学習出来ているように見える。一般にSGDよりほかの3つの手法のほうが早く学習でき、時には最終的な認識性能も高くなる。
Step8: 上記の上表を見ると、標準偏差1のガウス分布で与えられた重みの初期値において各層のアクティベーションは0や1に偏っている。
Step9: 6.2.3 ReLUの場合の重みの初期値
Step10: 「std=0.01」の場合いずれの層でもアクティベーションは小さくなるため、逆伝播の際の勾配も小さくなる。「Xavierの初期値」の場合層が深くなるにつれて偏りが少しづつ大きくなる。これより層を深くすればアクティベーションの方よりも大きくなり、学習の際に勾配消失問題が発生してしまう。「Heの初期値」においては均一の広がりであることが見て取れる。逆伝播の際にもよい結果が得られる。
Step11: 「std=0.01」ではほとんど学習が進んでいない。アクティベーションが0に偏っており、勾配が殆どないため重みの更新がほとんど行われていないためと考えられる。
Step12: 重みの初期値の標準偏差を変えたときの学習経過は上記になる。実践がBatch Normあり、破線がBatch Normなしである。多くのケースでBatch Normを利用したほうが学習が早く進んでいる。
Step13: エポック(訓練データを全て見終わった単位)毎に、訓練データとテストデータの認識精度を確認した結果が上記となる。
Step14: Weight decayを加えた結果、訓練データとテストデータ間の差は縮まった事が分かる。一方で訓練データの認識性能が下がったことも注目すべきである。
Step15: Dropout=0.2とした場合、上記結果となる。訓練データセットとテストデータセットの差が縮まってきており、上手く学習が行えている。このようにDropoutを行なうと表現力を保ったまま、過学習を抑制することが出来る。
|
<ASSISTANT_TASK:>
Python Code:
class SGD:
def __init__(self, lr=0.01):
self.lr = lr
def update(self, params, grads):
for key in params.keys():
params[key] -= self.lr * grads[key]
# cf.http://d.hatena.ne.jp/white_wheels/20100327/p3
import numpy as np
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
def _numerical_gradient_no_batch(f, x, axis='x'):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = float(tmp_val) + h
fxh1 = f(x, axis) # f(x+h)
x[idx] = tmp_val - h
fxh2 = f(x, axis) # f(x-h)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val # 値を元に戻す
return grad
def numerical_gradient(f, X):
if X.ndim == 1:
return _numerical_gradient_no_batch(f, X)
else:
grad = np.zeros_like(X)
for idx, x in enumerate(X):
if(idx == 0):
grad[idx] = _numerical_gradient_no_batch(f, x, axis='x')
elif(idx == 1):
grad[idx] = _numerical_gradient_no_batch(f, x, axis='y')
return grad
def function_2(x, axis='x'):
if(axis == 'x'):
return np.sum(x**2 / 20)
elif(axis == 'y'):
return np.sum(x**2)
if __name__ == '__main__':
x = np.arange(-10, 10, 1)
y = np.arange(-10, 10, 1)
X, Y = np.meshgrid(x, y)
X = X.flatten()
Y = Y.flatten()
grad = numerical_gradient(function_2, np.array([X, Y]) )
plt.figure()
plt.quiver(X, Y, -grad[0], -grad[1], angles="xy",color="#666666")#,headwidth=10,scale=40,color="#444444")
plt.xlim([-10, 10])
plt.ylim([-10, 10])
plt.xlabel('x')
plt.ylabel('y')
plt.grid()
plt.legend()
plt.draw()
plt.show()
class Momentum:
def __init__(self, lr=0.01, momentum=0.9):
self.lr = lr
self.momentum = momentum
self.v = None
def update(self, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)
for key in params.keys():
self.v[key] = self.momentum*self.v[key] - self.lr*grads[key]
params[key] += self.v[key]
class AdaGrad:
def __init__(self, lr=0.01):
self.lr = lr
self.h = None
def update(self, params, grads):
if self.h is None:
self.h = {}
for key, val in params.items():
self.h[key] = np.zeros_like(val)
for key in params.keys():
self.h[key] += grads[key] * grads[key]
params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7)
class Adam:
def __init__(self, lr=0.001, beta1=0.9, beta2=0.999):
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.iter = 0
self.m = None
self.v = None
def update(self, params, grads):
if self.m is None:
self.m, self.v = {}, {}
for key, val in params.items():
self.m[key] = np.zeros_like(val)
self.v[key] = np.zeros_like(val)
self.iter += 1
lr_t = self.lr * np.sqrt(1.0 - self.beta2**self.iter) / (1.0 - self.beta1**self.iter)
for key in params.keys():
#self.m[key] = self.beta1*self.m[key] + (1-self.beta1)*grads[key]
#self.v[key] = self.beta2*self.v[key] + (1-self.beta2)*(grads[key]**2)
self.m[key] += (1 - self.beta1) * (grads[key] - self.m[key])
self.v[key] += (1 - self.beta2) * (grads[key]**2 - self.v[key])
params[key] -= lr_t * self.m[key] / (np.sqrt(self.v[key]) + 1e-7)
#unbias_m += (1 - self.beta1) * (grads[key] - self.m[key]) # correct bias
#unbisa_b += (1 - self.beta2) * (grads[key]*grads[key] - self.v[key]) # correct bias
#params[key] += self.lr * unbias_m / (np.sqrt(unbisa_b) + 1e-7)
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
from src.optimizer import *
def f(x, y):
return x**2 / 20.0 + y**2
def df(x, y):
return x / 10.0, 2.0*y
init_pos = (-7.0, 2.0)
params = {}
params['x'], params['y'] = init_pos[0], init_pos[1]
grads = {}
grads['x'], grads['y'] = 0, 0
optimizers = OrderedDict()
optimizers["SGD"] = SGD(lr=0.95)
optimizers["Momentum"] = Momentum(lr=0.1)
optimizers["AdaGrad"] = AdaGrad(lr=1.5)
optimizers["Adam"] = Adam(lr=0.3)
idx = 1
for key in optimizers:
optimizer = optimizers[key]
x_history = []
y_history = []
params['x'], params['y'] = init_pos[0], init_pos[1]
for i in range(30):
x_history.append(params['x'])
y_history.append(params['y'])
grads['x'], grads['y'] = df(params['x'], params['y'])
optimizer.update(params, grads)
x = np.arange(-10, 10, 0.01)
y = np.arange(-5, 5, 0.01)
X, Y = np.meshgrid(x, y)
Z = f(X, Y)
# for simple contour line
mask = Z > 7
Z[mask] = 0
# plot
plt.subplot(2, 2, idx)
idx += 1
plt.plot(x_history, y_history, 'o-', color="red")
plt.contour(X, Y, Z)
plt.ylim(-10, 10)
plt.xlim(-10, 10)
plt.plot(0, 0, '+')
#colorbar()
#spring()
plt.title(key)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import matplotlib.pyplot as plt
from src.mnist import load_mnist
from src.util import smooth_curve
from src.multi_layer_net import MultiLayerNet
from src.optimizer import *
# 0:MNISTデータの読み込み==========
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
train_size = x_train.shape[0]
batch_size = 128
max_iterations = 2000
# 1:実験の設定==========
optimizers = {}
optimizers['SGD'] = SGD()
optimizers['Momentum'] = Momentum()
optimizers['AdaGrad'] = AdaGrad()
optimizers['Adam'] = Adam()
#optimizers['RMSprop'] = RMSprop()
networks = {}
train_loss = {}
for key in optimizers.keys():
networks[key] = MultiLayerNet(
input_size=784, hidden_size_list=[100, 100, 100, 100],
output_size=10)
train_loss[key] = []
# 2:訓練の開始==========
for i in range(max_iterations):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
for key in optimizers.keys():
grads = networks[key].gradient(x_batch, t_batch)
optimizers[key].update(networks[key].params, grads)
loss = networks[key].loss(x_batch, t_batch)
train_loss[key].append(loss)
if i % 100 == 0:
print( "===========" + "iteration:" + str(i) + "===========")
for key in optimizers.keys():
loss = networks[key].loss(x_batch, t_batch)
print(key + ":" + str(loss))
# 3.グラフの描画==========
markers = {"SGD": "o", "Momentum": "x", "AdaGrad": "s", "Adam": "D"}
x = np.arange(max_iterations)
for key in optimizers.keys():
plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key)
plt.xlabel("iterations")
plt.ylabel("loss")
plt.ylim(0, 1)
plt.legend()
plt.show()
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def ReLU(x):
return np.maximum(0, x)
def tanh(x):
return np.tanh(x)
input_data = np.random.randn(1000, 100) # 1000個のデータ
node_num = 100 # 各隠れ層のノード(ニューロン)の数
hidden_layer_size = 5 # 隠れ層が5層
activations = {} # ここにアクティベーションの結果を格納する
activations01 = {} # ここにアクティベーションの結果を格納する
activationsxa = {} # ここにアクティベーションの結果を格納する
x = input_data
x01 = input_data
xxa = input_data
for i in range(hidden_layer_size):
if i != 0:
x = activations[i-1]
x01 = activations01[i-1]
xxa = activationsxa[i-1]
# 初期値の値をいろいろ変えて実験しよう!
w = np.random.randn(node_num, node_num) * 1
w01 = np.random.randn(node_num, node_num) * 0.01
wxa = np.random.randn(node_num, node_num) * np.sqrt(1.0 / node_num)
# w = np.random.randn(node_num, node_num) * np.sqrt(2.0 / node_num)
a = np.dot(x, w)
a01 = np.dot(x01, w01)
axa = np.dot(xxa, wxa)
# 活性化関数の種類も変えて実験しよう!
z = sigmoid(a)
z01 = sigmoid(a01)
zxa = sigmoid(axa)
# z = ReLU(a)
# z = tanh(a)
activations[i] = z
activations01[i] = z01
activationsxa[i] = zxa
# ヒストグラムを描画(標準偏差1)
for i, a in activations.items():
plt.subplot(1, len(activations), i+1)
plt.title(str(i+1) + "-layer")
if i != 0: plt.yticks([], [])
# plt.xlim(0.1, 1)
# plt.ylim(0, 7000)
plt.hist(a.flatten(), 30, range=(0,1))
plt.show()
# ヒストグラムを描画(標準偏差0.1)
for i, a in activations01.items():
plt.subplot(1, len(activations), i+1)
plt.title(str(i+1) + "-layer")
if i != 0: plt.yticks([], [])
# plt.xlim(0.1, 1)
# plt.ylim(0, 7000)
plt.hist(a.flatten(), 30, range=(0,1))
plt.show()
# ヒストグラムを描画(標準偏差 Xavier)
for i, a in activationsxa.items():
plt.subplot(1, len(activations), i+1)
plt.title(str(i+1) + "-layer")
if i != 0: plt.yticks([], [])
# plt.xlim(0.1, 1)
# plt.ylim(0, 7000)
plt.hist(a.flatten(), 30, range=(0,1))
plt.show()
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def ReLU(x):
return np.maximum(0, x)
def tanh(x):
return np.tanh(x)
input_data = np.random.randn(1000, 100) # 1000個のデータ
node_num = 100 # 各隠れ層のノード(ニューロン)の数
hidden_layer_size = 5 # 隠れ層が5層
activations001 = {} # ここにアクティベーションの結果を格納する
activationsxa = {} # ここにアクティベーションの結果を格納する
activationshe = {} # ここにアクティベーションの結果を格納する
x001 = input_data
xxa = input_data
xhe = input_data
for i in range(hidden_layer_size):
if i != 0:
x001 = activations001[i-1]
xxa = activationsxa[i-1]
xhe = activationshe[i-1]
# 初期値の値をいろいろ変えて実験しよう!
w001 = np.random.randn(node_num, node_num) * 0.01
wxa = np.random.randn(node_num, node_num) * np.sqrt(1.0 / node_num)
whe = np.random.randn(node_num, node_num) * np.sqrt(2.0 / node_num)
a001 = np.dot(x001, w001)
axa = np.dot(xxa, wxa)
ahe = np.dot(xhe, whe)
z001 = ReLU(a001)
zxa = ReLU(axa)
zhe = ReLU(ahe)
activations001[i] = z001
activationsxa[i] = zxa
activationshe[i] = zhe
# ヒストグラムを描画(標準偏差0.01)
for i, a in activations001.items():
plt.subplot(1, len(activations), i+1)
plt.title(str(i+1) + "-layer")
if i != 0: plt.yticks([], [])
plt.xlim(0.1, 1)
plt.ylim(0, 7000)
plt.hist(a.flatten(), 30, range=(0,1))
plt.show()
# ヒストグラムを描画(Xavier)
for i, a in activationsxa.items():
plt.subplot(1, len(activations), i+1)
plt.title(str(i+1) + "-layer")
if i != 0: plt.yticks([], [])
plt.xlim(0.1, 1)
plt.ylim(0, 7000)
plt.hist(a.flatten(), 30, range=(0,1))
plt.show()
# ヒストグラムを描画(He)
for i, a in activationshe.items():
plt.subplot(1, len(activations), i+1)
plt.title(str(i+1) + "-layer")
if i != 0: plt.yticks([], [])
plt.xlim(0.1, 1)
plt.ylim(0, 7000)
plt.hist(a.flatten(), 30, range=(0,1))
plt.show()
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from src.mnist import load_mnist
from src.util import smooth_curve
from src.multi_layer_net import MultiLayerNet
from src.optimizer import SGD
# 0:MNISTデータの読み込み==========
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
train_size = x_train.shape[0]
batch_size = 128
max_iterations = 2000
# 1:実験の設定==========
weight_init_types = {'std=0.01': 0.01, 'Xavier': 'sigmoid', 'He': 'relu'}
optimizer = SGD(lr=0.01)
networks = {}
train_loss = {}
for key, weight_type in weight_init_types.items():
networks[key] = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100],
output_size=10, weight_init_std=weight_type)
train_loss[key] = []
# 2:訓練の開始==========
for i in range(max_iterations):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
for key in weight_init_types.keys():
grads = networks[key].gradient(x_batch, t_batch)
optimizer.update(networks[key].params, grads)
loss = networks[key].loss(x_batch, t_batch)
train_loss[key].append(loss)
if i % 100 == 0:
print("===========" + "iteration:" + str(i) + "===========")
for key in weight_init_types.keys():
loss = networks[key].loss(x_batch, t_batch)
print(key + ":" + str(loss))
# 3.グラフの描画==========
markers = {'std=0.01': 'o', 'Xavier': 's', 'He': 'D'}
x = np.arange(max_iterations)
for key in weight_init_types.keys():
plt.plot(x, smooth_curve(train_loss[key]), marker=markers[key], markevery=100, label=key)
plt.xlabel("iterations")
plt.ylabel("loss")
plt.ylim(0, 2.5)
plt.legend()
plt.show()
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from src.mnist import load_mnist
from src.multi_layer_net_extend import MultiLayerNetExtend
from src.optimizer import SGD, Adam
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
# 学習データを削減
x_train = x_train[:1000]
t_train = t_train[:1000]
max_epochs = 20
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.01
def __train(weight_init_std):
bn_network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100, 100, 100, 100], output_size=10,
weight_init_std=weight_init_std, use_batchnorm=True)
network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100, 100, 100, 100], output_size=10,
weight_init_std=weight_init_std)
optimizer = SGD(lr=learning_rate)
train_acc_list = []
bn_train_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
epoch_cnt = 0
for i in range(1000000000):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
for _network in (bn_network, network):
grads = _network.gradient(x_batch, t_batch)
optimizer.update(_network.params, grads)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
bn_train_acc = bn_network.accuracy(x_train, t_train)
train_acc_list.append(train_acc)
bn_train_acc_list.append(bn_train_acc)
# print("epoch:" + str(epoch_cnt) + " | " + str(train_acc) + " - " + str(bn_train_acc))
epoch_cnt += 1
if epoch_cnt >= max_epochs:
break
return train_acc_list, bn_train_acc_list
# 3.グラフの描画==========
plt.figure(figsize=(10,9))
weight_scale_list = np.logspace(0, -4, num=16)
x = np.arange(max_epochs)
for i, w in enumerate(weight_scale_list):
# print( "============== " + str(i+1) + "/16" + " ==============")
train_acc_list, bn_train_acc_list = __train(w)
plt.subplot(4,4,i+1)
plt.title("W:" + str(w))
if i == 15:
plt.plot(x, bn_train_acc_list, label='Batch Normalization', markevery=2)
plt.plot(x, train_acc_list, linestyle = "--", label='Normal(without BatchNorm)', markevery=2)
else:
plt.plot(x, bn_train_acc_list, markevery=2)
plt.plot(x, train_acc_list, linestyle="--", markevery=2)
plt.ylim(0, 1.0)
if i % 4:
plt.yticks([])
else:
plt.ylabel("accuracy")
if i < 12:
plt.xticks([])
else:
plt.xlabel("epochs")
plt.legend(loc='lower right')
plt.show()
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from src.mnist import load_mnist
from src.multi_layer_net import MultiLayerNet
from src.optimizer import SGD
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
# 過学習を再現するために、学習データを削減
x_train = x_train[:300]
t_train = t_train[:300]
# weight decay(荷重減衰)の設定 =======================
weight_decay_lambda = 0 # weight decayを使用しない場合
# weight_decay_lambda = 0.1
# ====================================================
network = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100], output_size=10,
weight_decay_lambda=weight_decay_lambda)
optimizer = SGD(lr=0.01)
max_epochs = 201
train_size = x_train.shape[0]
batch_size = 100
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
epoch_cnt = 0
for i in range(1000000000):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grads = network.gradient(x_batch, t_batch)
optimizer.update(network.params, grads)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
# print("epoch:" + str(epoch_cnt) + ", train acc:" + str(train_acc) + ", test acc:" + str(test_acc))
epoch_cnt += 1
if epoch_cnt >= max_epochs:
break
# 3.グラフの描画==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from src.mnist import load_mnist
from src.multi_layer_net import MultiLayerNet
from src.optimizer import SGD
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
# 過学習を再現するために、学習データを削減
x_train = x_train[:300]
t_train = t_train[:300]
# weight decay(荷重減衰)の設定 =======================
# weight_decay_lambda = 0 # weight decayを使用しない場合
weight_decay_lambda = 0.1
# ====================================================
network = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100], output_size=10,
weight_decay_lambda=weight_decay_lambda)
optimizer = SGD(lr=0.01)
max_epochs = 201
train_size = x_train.shape[0]
batch_size = 100
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
epoch_cnt = 0
for i in range(1000000000):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grads = network.gradient(x_batch, t_batch)
optimizer.update(network.params, grads)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
# print("epoch:" + str(epoch_cnt) + ", train acc:" + str(train_acc) + ", test acc:" + str(test_acc))
epoch_cnt += 1
if epoch_cnt >= max_epochs:
break
# 3.グラフの描画==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(max_epochs)
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
import os
import sys
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from src.mnist import load_mnist
from src.multi_layer_net_extend import MultiLayerNetExtend
from src.trainer import Trainer
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
# 過学習を再現するために、学習データを削減
x_train = x_train[:300]
t_train = t_train[:300]
# Dropuoutの有無、割り合いの設定 ========================
use_dropout = True # Dropoutなしのときの場合はFalseに
dropout_ratio = 0.2
# ====================================================
network = MultiLayerNetExtend(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100],
output_size=10, use_dropout=use_dropout, dropout_ration=dropout_ratio)
trainer = Trainer(network, x_train, t_train, x_test, t_test,
epochs=301, mini_batch_size=100,
optimizer='sgd', optimizer_param={'lr': 0.01}, verbose=False)
trainer.train()
train_acc_list, test_acc_list = trainer.train_acc_list, trainer.test_acc_list
# グラフの描画==========
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, marker='o', label='train', markevery=10)
plt.plot(x, test_acc_list, marker='s', label='test', markevery=10)
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from src.mnist import load_mnist
from src.multi_layer_net import MultiLayerNet
from src.util import shuffle_dataset
from src.trainer import Trainer
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True)
# 高速化のため訓練データの削減
x_train = x_train[:500]
t_train = t_train[:500]
# 検証データの分離
validation_rate = 0.20
validation_num = x_train.shape[0] * validation_rate
x_train, t_train = shuffle_dataset(x_train, t_train)
x_val = x_train[:int(validation_num)]
t_val = t_train[:int(validation_num)]
x_train = x_train[int(validation_num):]
t_train = t_train[int(validation_num):]
def __train(lr, weight_decay, epocs=50):
network = MultiLayerNet(input_size=784, hidden_size_list=[100, 100, 100, 100, 100, 100],
output_size=10, weight_decay_lambda=weight_decay)
trainer = Trainer(network, x_train, t_train, x_val, t_val,
epochs=epocs, mini_batch_size=100,
optimizer='sgd', optimizer_param={'lr': lr}, verbose=False)
trainer.train()
return trainer.test_acc_list, trainer.train_acc_list
# ハイパーパラメータのランダム探索======================================
optimization_trial = 100
results_val = {}
results_train = {}
for _ in range(optimization_trial):
# 探索したハイパーパラメータの範囲を指定===============
weight_decay = 10 ** np.random.uniform(-8, -4)
lr = 10 ** np.random.uniform(-6, -2)
# ================================================
val_acc_list, train_acc_list = __train(lr, weight_decay)
print("val acc:" + str(val_acc_list[-1]) + " | lr:" + str(lr) + ", weight decay:" + str(weight_decay))
key = "lr:" + str(lr) + ", weight decay:" + str(weight_decay)
results_val[key] = val_acc_list
results_train[key] = train_acc_list
# グラフの描画========================================================
print("=========== Hyper-Parameter Optimization Result ===========")
graph_draw_num = 20
col_num = 5
row_num = int(np.ceil(graph_draw_num / col_num))
i = 0
for key, val_acc_list in sorted(results_val.items(), key=lambda x:x[1][-1], reverse=True):
print("Best-" + str(i+1) + "(val acc:" + str(val_acc_list[-1]) + ") | " + key)
plt.subplot(row_num, col_num, i+1)
plt.title("Best-" + str(i+1))
plt.ylim(0.0, 1.0)
if i % 5: plt.yticks([])
plt.xticks([])
x = np.arange(len(val_acc_list))
plt.plot(x, val_acc_list)
plt.plot(x, results_train[key], "--")
i += 1
if i >= graph_draw_num:
break
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As specified before, addtwo is the nodename, Node is the Nodetype, Function(...) is the interface_function and add_node is the labelname of the this node. In this particular case, we created an artificial input field, called x_input, an artificial output field called val_output and specified that this node should run the function add_two().
Step2: After all input fields are specified, we can run the node with run()
Step3: And what is the output of this node?
Step4: Example of a neuroimaging node
Step5: In the Interface tutorial, we were able to specify the input file with the in_file parameter. This works exactly the same way in this case, where the interface is in a node. The only thing that we have to be careful about when we use a node is to specify where this node should be executed. This is only relevant for when we execute a node by itself, but not when we use them in a Workflow.
Step6: As we know from the Interface tutorial, the skull stripped output is stored under res.outputs.out_file. So let's take a look at the before and the after
Step7: Exercise 1
Step8: Exercise 2
|
<ASSISTANT_TASK:>
Python Code:
# Import Node and Function module
from nipype import Node, Function
# Create a small example function
def add_two(x_input):
return x_input + 2
# Create Node
addtwo = Node(Function(input_names=["x_input"],
output_names=["val_output"],
function=add_two),
name='add_node')
addtwo.inputs.x_input = 4
addtwo.run()
temp_res = addtwo.run()
temp_res.outputs
addtwo.result.outputs
# Import BET from the FSL interface
from nipype.interfaces.fsl import BET
# Import the Node module
from nipype import Node
# Create Node
bet = Node(BET(frac=0.3), name='bet_node')
in_file = '/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'
# Specify node inputs
bet.inputs.in_file = in_file
bet.inputs.out_file = '/output/node_T1w_bet.nii.gz'
res = bet.run()
from nilearn.plotting import plot_anat
%matplotlib inline
import matplotlib.pyplot as plt
plot_anat(in_file, title='BET input', cut_coords=(10,10,10),
display_mode='ortho', dim=-1, draw_cross=False, annotate=False);
plot_anat(res.outputs.out_file, title='BET output', cut_coords=(10,10,10),
display_mode='ortho', dim=-1, draw_cross=False, annotate=False);
# write your solution here
# Import the Node module
from nipype import Node
# Import IsotropicSmooth from the FSL interface
from nipype.interfaces.fsl import IsotropicSmooth
# Define a node
smooth_node = Node(IsotropicSmooth(), name="smoothing")
smooth_node.inputs.in_file = '/data/ds000114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz'
smooth_node.inputs.fwhm = 4
smooth_node.inputs.out_file = '/output/node_T1w_smooth.nii.gz'
smooth_res = smooth_node.run()
# write your solution here
from nilearn.plotting import plot_anat
%pylab inline
plot_anat(smooth_node.inputs.in_file, title='smooth input', cut_coords=(10,10,10),
display_mode='ortho', dim=-1, draw_cross=False, annotate=False);
plot_anat(smooth_res.outputs.out_file, title='smooth output', cut_coords=(10,10,10),
display_mode='ortho', dim=-1, draw_cross=False, annotate=False);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bcc', 'sandbox-3', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data delivered by Geoff Reeves 9/6/2016
Step2: Oberved Data
Step3: Make diagnostic plots of the posteriour distributions as created using MCMC.
Step4: Predictions based on this model
Step5: Plot up many lines for a feel at uncertantity
Step6: Same as previous figure with the red lines overlayed as 100 joint draws from the model posterior in order to show spread.
|
<ASSISTANT_TASK:>
Python Code:
# http://onlinelibrary.wiley.com/doi/10.1002/2016JA022652/epdf
import pymc3
import tqdm
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
import spacepy.plot as spp
import seaborn as sns
sns.set(font_scale=1.5)
# min_Dst, min_L
data = np.asarray([
65.000, 3.8000,
50.000, 3.7000,
67.000, 3.5000,
61.000, 3.4000,
77.000, 3.2000,
99.000, 2.8900,
87.000, 2.8000,
98.000, 2.8000,
96.000, 2.8000,
93.000, 2.3000,
92.000, 2.3000,
225.00, 2.3000,
206.00, 2.3000,
125.00, 2.3000]).reshape((-1,2))
dst = data[:,0]
minL = data[:,1]
print(dst, minL, data.dtype)
# make bins in Dst
dst_bins = np.arange(25, 300, 10)
print(dst_bins)
dst_bins_centers = np.asarray([dst_bins[:-1] + np.diff(dst_bins)/2]).T[:,0]
print(dst_bins_centers, dst_bins_centers.shape)
n_events_dig = np.digitize(dst, dst_bins)
print(n_events_dig)
n_events = np.zeros(len(dst_bins)-1)
success = np.zeros_like(n_events)
for i, v in enumerate(np.unique(n_events_dig)):
n_events[v-1] = np.sum(n_events_dig==v)
success[v-1] = np.sum(minL[n_events_dig==v] <= 2.4)
print(n_events)
print(success)
plt.plot(dst, minL, 'o')
plt.xlim((25, 250))
plt.ylim((2.2, 4.0))
plt.xlabel('Min Dst')
plt.ylabel('Min L Shell')
plt.plot(dst, minL, 'o')
plt.xlim((25, 250))
plt.ylim((2.2, 4.0))
plt.xlabel('Min Dst')
plt.ylabel('Min L Shell')
for v in dst_bins:
plt.axvline(v, lw=0.25)
plt.axhline(2.4, c='r', lw=1)
ind = n_events > 0
for i, j in zip(success[ind], n_events[ind]):
print(i,j)
# define priors
# these are wide uninformative priors
# alpha = pymc.Normal('alpha', mu=0, tau=1.0/5**2)
# beta = pymc.Normal('beta', mu=0, tau=1.0/10**2)
with pymc3.Model() as model:
alpha = pymc3.Uniform('alpha', -100, 100)
beta = pymc3.Uniform('beta', -100, 100)
# cannot feed in zero events
ind = n_events > 0
# define likelihood
p = pymc3.math.invlogit(alpha + beta*dst_bins_centers[ind])
y = pymc3.Binomial('y_obs', n=n_events[ind], p=p, observed=success[ind])
step = pymc3.Metropolis([alpha, beta])
trace = pymc3.sample(10000, njobs=5, tune=5000, step=step)
pymc3.traceplot(trace, combined=True)
pymc3.summary(trace)
xp = np.linspace(dst_bins_centers[ind].min(), dst_bins_centers[ind].max(), 100)
a = trace['alpha'].mean()
b = trace['beta'].mean()
y_val = pymc3.math.invlogit(a + b*xp).eval()
plt.plot(xp, y_val)
plt.scatter(dst_bins_centers[ind], success[ind]/n_events[ind], s=50);
plt.xlabel('Minimum Dst')
plt.ylabel('Probability slot is filled')
plt.gca().ticklabel_format(useOffset=False)
# get the minimum Dst where 99% should be successes
for percentage in [50,75,90,95,99]:
ind99 = y_val >= percentage/100
minDst99 = xp[ind99][0]
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a {1}% percent of a slot filling at this energy'.format(minDst99, percentage))
pymc3.math.invlogit(trace['alpha'][4] + trace['beta'][4]*xp).eval()
# one should be able to get estimates of the line uncertainity
ilu = np.empty((1000, len(xp)), dtype=float)
ilu = []
for ii, v in tqdm.tqdm(enumerate(np.random.randint(0, len(trace['alpha']), 100)), total=100):
if trace['alpha'][v] == 0:
continue
if trace['beta'][v] == 0:
continue
ilu.append(pymc3.math.invlogit(trace['alpha'][v] + trace['beta'][v]*xp).eval())
ilu=np.asarray(ilu)
ilu.shape
plt.figure(figsize=(8,5))
xp = np.linspace(dst_bins_centers[ind].min(), dst_bins_centers[ind].max(), 100)
for v in ilu:
plt.plot(xp, v, alpha=.3, c='r')
plt.plot(xp, np.percentile(ilu, [25, 50, 75],axis=0).T, c='k', lw=3)
# a = trace['alpha'].mean()
# b = trace['beta'].mean()
# plt.plot(xp, invlogit(a + b*xp).value)
# a = trace['alpha'].stats()['quantiles'][50]
# b = trace['beta'].stats()['quantiles'][50]
# plt.plot(xp, invlogit(a + b*xp).value, c='y')
plt.scatter(dst_bins_centers[ind], success[ind]/n_events[ind], s=50);
plt.xlabel('Minimum Dst')
plt.ylabel('Probability slot is filled')
xp.shape, np.percentile(ilu, [25, 50, 75],axis=0).T.shape
ii50 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.5)[0]
ii75 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.75)[0]
ii90 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.90)[0]
ii95 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.95)[0]
ii99 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.99)[0]
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 50% percent of a slot filling at this energy'.format(xp[ii50[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 75% percent of a slot filling at this energy'.format(xp[ii75[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 90% percent of a slot filling at this energy'.format(xp[ii90[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 95% percent of a slot filling at this energy'.format(xp[ii95[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 99% percent of a slot filling at this energy'.format(xp[ii99[0]]))
ii50 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.5)[0]
ii75 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.75)[0]
ii90 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.90)[0]
ii95 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.95)[0]
ii99 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.99)[0]
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 50% percent of a slot filling at this energy'.format(xp[ii50[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 75% percent of a slot filling at this energy'.format(xp[ii75[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 90% percent of a slot filling at this energy'.format(xp[ii90[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 95% percent of a slot filling at this energy'.format(xp[ii95[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 99% percent of a slot filling at this energy'.format(xp[ii99[0]]))
ii50 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.5)[0]
ii75 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.75)[0]
ii90 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.90)[0]
ii95 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.95)[0]
ii99 = np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.99)[0]
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 50% percent of a slot filling at this energy'.format(xp[ii50[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 75% percent of a slot filling at this energy'.format(xp[ii75[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 90% percent of a slot filling at this energy'.format(xp[ii90[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 95% percent of a slot filling at this energy'.format(xp[ii95[0]]))
print('At a minimum Dst of {0:0.0f}nT it is predicted to have a 99% percent of a slot filling at this energy'.format(xp[ii99[0]]))
v50 = [xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.5)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.5)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.5)[0][0]]]
v75 = [xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.75)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.75)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.75)[0][0]]]
v90 = [xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.90)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.90)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.90)[0][0]]]
v95 = [xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.95)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.95)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.95)[0][0]]]
v99 = [xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,2] > 0.99)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,1] > 0.99)[0][0]],
xp[np.where(np.percentile(ilu, [25, 50, 75],axis=0).T[:,0] > 0.99)[0][0]]]
v50, v75, v90, v95, v99
print("a 50% probability of slot filling for {0:.0f}<dst<{1:.0f}".format(v50[0], v50[2]))
print("a 75% probability of slot filling for {0:.0f}<dst<{1:.0f}".format(v75[0], v75[2]))
print("a 90% probability of slot filling for {0:.0f}<dst<{1:.0f}".format(v90[0], v90[2]))
print("a 95% probability of slot filling for {0:.0f}<dst<{1:.0f}".format(v95[0], v95[2]))
print("a 99% probability of slot filling for {0:.0f}<dst<{1:.0f}".format(v99[0], v99[2]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from keras.models import Sequential
from keras.layers import Dense
from sklearn.datasets import make_regression
from sklearn.preprocessing import MinMaxScaler
from numpy import array
X, y = make_regression(n_samples=100, n_features=2, noise=0.1, random_state=1)
scalarX, scalarY = MinMaxScaler(), MinMaxScaler()
scalarX.fit(X)
scalarY.fit(y.reshape(100,1))
X = scalarX.transform(X)
y = scalarY.transform(y.reshape(100,1))
model = Sequential()
model.add(Dense(4, input_shape=(2,), activation='relu'))
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mse', optimizer='adam')
model.fit(X, y, epochs=1000, verbose=0)
Xnew = array([[0.29466096, 0.30317302]])
ynew = model.predict(Xnew)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Start main dataset by USA companies from companies.csv
Step2: Extract company category features
Step3: Since there are too many categories, we'll be selecting the top 50 more frequent ones.
Step4: So now we added more 50 categories to our dataset.
Step5: Analyzing date variables
Step6: Change dataset index
Step7: Extract acquisitions features
Step8: Extract investments feature
Step9: Extract average number of investors and amount invested per round
Step10: Drop useless features
Step11: Normalize numeric variables
Step12: Normalize date variables
Step13: Extract state_code features
Step14: As we did for the categories variable, in order to decrease the amount of features in our dataset, let's just select the top 15 more frequent states (which cover already 82% of our companies)
Step15: Move status to the end of dataframe and save to file
|
<ASSISTANT_TASK:>
Python Code:
#All imports here
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing
from datetime import datetime
from dateutil import relativedelta
%matplotlib inline
#Let's start by importing our csv files into dataframes
df_companies = pd.read_csv('data/companies.csv')
df_acquisitions = pd.read_csv('data/acquisitions.csv')
df_investments = pd.read_csv('data/investments.csv')
df_rounds = pd.read_csv('data/rounds.csv')
#Our final database will be stored in 'startups_USA'
startups_USA = df_companies[df_companies['country_code'] == 'USA']
startups_USA.head()
from operator import methodcaller
def split_categories(categories):
#get a unique list of the categories
splitted_categories = list(categories.astype('str').unique())
#split each category by |
splitted_categories = map(methodcaller("split", "|"), splitted_categories)
#flatten the list of sub categories
splitted_categories = [item for sublist in splitted_categories for item in sublist]
return splitted_categories
def explore_categories(categories, top_n_categories):
cat = split_categories(categories)
print 'There are in total {} different categories'.format(len(cat))
prob = pd.Series(cat).value_counts()
print prob.head()
#select first <top_n_categories>
mask = prob > prob[top_n_categories]
head_prob = prob.loc[mask].sum()
tail_prob = prob.loc[~mask].sum()
total_sum = prob.sum()
prob = prob.loc[mask]
prob2 = pd.DataFrame({'top '+str(top_n_categories)+' categories': head_prob, 'others': tail_prob},index=[0])
fig, axs = plt.subplots(2,1, figsize=(15,6))
prob.plot(kind='bar', ax=axs[0])
prob2.plot(kind='bar', ax=axs[1])
for bar in axs[1].patches:
height = bar.get_height()
axs[1].text(bar.get_x() + bar.get_width()/2., 0.50*height, '%.2f' % (float(height)/float(total_sum)*100) + "%", ha='center', va='top')
fig.tight_layout()
plt.xticks(rotation=90)
plt.show()
explore_categories(startups_USA['category_list'], top_n_categories=50)
def expand_top_categories_into_dummy_variables(df):
cat = df['category_list'].astype('str')
cat_count = cat.str.split('|').apply(lambda x: pd.Series(x).value_counts()).sum()
#Get a dummy dataset for categories
dummies = cat.str.get_dummies(sep='|')
#Count of categories splitted first 50)
top50categories = list(cat_count.sort_values(ascending=False).index[:50])
#Create a dataframe with the 50 top categories to be concatenated later to the complete dataframe
categories_df = dummies[top50categories]
categories_df = categories_df.add_prefix('Category_')
return pd.concat([df, categories_df], axis=1, ignore_index=False)
startups_USA = expand_top_categories_into_dummy_variables(startups_USA)
startups_USA.head()
startups_USA['funding_rounds'].hist(bins=range(1,10))
plt.title("Histogram of the number of funding rounds")
plt.ylabel('Number of companies')
plt.xlabel('Number of funding rounds')
#funding_total_usd
#funding_rounds
plt.subplot()
startups_USA[startups_USA['funding_total_usd'] != '-']. \
set_index('name')['funding_total_usd'] \
.astype(float) \
.sort_values(ascending=False)\
[:30].plot(kind='barh', figsize=(5,7))
plt.gca().invert_yaxis()
plt.title('Companies with highest total funding')
plt.ylabel('Companies')
plt.xlabel('Total amount of funding (USD)')
# Investment types
df_rounds['funding_round_type'].value_counts()
import warnings
warnings.filterwarnings('ignore')
#Iterate over each kind of funding type, and add two new features for each into the dataframe
def add_dummy_for_funding_type(df, aggr_rounds, funding_type):
funding_df = aggr_rounds.iloc[aggr_rounds.index.get_level_values('funding_round_type') == funding_type].reset_index()
funding_df.columns = funding_df.columns.droplevel()
funding_df.columns = ['company_permalink', funding_type, funding_type+'_funding_total_usd', funding_type+'_funding_rounds']
funding_df = funding_df.drop(funding_type,1)
new_df = pd.merge(df, funding_df, on='company_permalink', how='left')
new_df = new_df.fillna(0)
return new_df
def expand_investment_rounds(df, df_rounds):
#Prepare an aggregated rounds dataframe grouped by company and funding type
rounds_agg = df_rounds.groupby(['company_permalink', 'funding_round_type'])['raised_amount_usd'].agg({'amount': [ pd.Series.sum, pd.Series.count]})
#Get available unique funding types
funding_types = list(rounds_agg.index.levels[1])
#Prepare the dataframe where all the dummy features for each funding type will be added (number of rounds and total sum for each type)
rounds_df = df[['permalink']]
rounds_df = rounds_df.rename(columns = {'permalink':'company_permalink'})
#For each funding type, add two more columns to rounds_df
for funding_type in funding_types:
rounds_df = add_dummy_for_funding_type(rounds_df, rounds_agg, funding_type)
#remove the company_permalink variable, since it's already available in the companies dataframe
rounds_df = rounds_df.drop('company_permalink', 1)
#set rounds_df to have the same index of the other dataframes
rounds_df.index = df.index
return pd.concat([df, rounds_df], axis=1, ignore_index=False)
startups_USA = expand_investment_rounds(startups_USA, df_rounds)
startups_USA.head()
startups_USA = startups_USA.set_index('permalink')
import warnings
warnings.filterwarnings('ignore')
def extract_feature_number_of_acquisitions(df, df_acquisitions):
number_of_acquisitions = df_acquisitions.groupby(['acquirer_permalink'])['acquirer_permalink'].agg({'amount': [ pd.Series.count]}).reset_index()
number_of_acquisitions.columns = number_of_acquisitions.columns.droplevel()
number_of_acquisitions.columns = ['permalink', 'number_of_acquisitions']
number_of_acquisitions = number_of_acquisitions.set_index('permalink')
number_of_acquisitions = number_of_acquisitions.fillna(0)
new_df = df.join(number_of_acquisitions)
new_df['number_of_acquisitions'] = new_df['number_of_acquisitions'].fillna(0)
return new_df
startups_USA = extract_feature_number_of_acquisitions(startups_USA, df_acquisitions)
import warnings
warnings.filterwarnings('ignore')
def extract_feature_number_of_investments(df, df_investments):
number_of_investments = df_investments.groupby(['investor_permalink'])['investor_permalink'].agg({'amount': [ pd.Series.count]}).reset_index()
number_of_investments.columns = number_of_investments.columns.droplevel()
number_of_investments.columns = ['permalink', 'number_of_investments']
number_of_investments = number_of_investments.set_index('permalink')
number_of_unique_investments = df_investments.groupby(['investor_permalink'])['company_permalink'].agg({'amount': [ pd.Series.nunique]}).reset_index()
number_of_unique_investments.columns = number_of_unique_investments.columns.droplevel()
number_of_unique_investments.columns = ['permalink', 'number_of_unique_investments']
number_of_unique_investments = number_of_unique_investments.set_index('permalink')
new_df = df.join(number_of_investments)
new_df['number_of_investments'] = new_df['number_of_investments'].fillna(0)
new_df = new_df.join(number_of_unique_investments)
new_df['number_of_unique_investments'] = new_df['number_of_unique_investments'].fillna(0)
return new_df
startups_USA = extract_feature_number_of_investments(startups_USA, df_investments)
import warnings
warnings.filterwarnings('ignore')
def extract_feature_avg_investors_per_round(df, investments):
number_of_investors_per_round = investments.groupby(['company_permalink', 'funding_round_permalink'])['investor_permalink'].agg({'investor_permalink': [ pd.Series.count]}).reset_index()
number_of_investors_per_round.columns = number_of_investors_per_round.columns.droplevel(0)
number_of_investors_per_round.columns = ['company_permalink', 'funding_round_permalink', 'count']
number_of_investors_per_round = number_of_investors_per_round.groupby(['company_permalink']).agg({'count': [ pd.Series.mean]}).reset_index()
number_of_investors_per_round.columns = number_of_investors_per_round.columns.droplevel(0)
number_of_investors_per_round.columns = ['company_permalink', 'number_of_investors_per_round']
number_of_investors_per_round = number_of_investors_per_round.set_index('company_permalink')
new_df = df.join(number_of_investors_per_round)
new_df['number_of_investors_per_round'] = new_df['number_of_investors_per_round'].fillna(-1)
return new_df
def extract_feature_avg_amount_invested_per_round(df, investments):
investmentsdf = investments.copy()
investmentsdf['raised_amount_usd'] = investmentsdf['raised_amount_usd'].astype(float)
avg_amount_invested_per_round = investmentsdf.groupby(['company_permalink', 'funding_round_permalink'])['raised_amount_usd'].agg({'raised_amount_usd': [ pd.Series.mean]}).reset_index()
avg_amount_invested_per_round.columns = avg_amount_invested_per_round.columns.droplevel(0)
avg_amount_invested_per_round.columns = ['company_permalink', 'funding_round_permalink', 'mean']
avg_amount_invested_per_round = avg_amount_invested_per_round.groupby(['company_permalink']).agg({'mean': [ pd.Series.mean]}).reset_index()
avg_amount_invested_per_round.columns = avg_amount_invested_per_round.columns.droplevel(0)
avg_amount_invested_per_round.columns = ['company_permalink', 'avg_amount_invested_per_round']
avg_amount_invested_per_round = avg_amount_invested_per_round.set_index('company_permalink')
new_df = df.join(avg_amount_invested_per_round)
new_df['avg_amount_invested_per_round'] = new_df['avg_amount_invested_per_round'].fillna(-1)
return new_df
startups_USA = extract_feature_avg_investors_per_round(startups_USA, df_investments)
startups_USA = extract_feature_avg_amount_invested_per_round(startups_USA, df_investments)
startups_USA.head()
#drop features
startups_USA = startups_USA.drop(['name','homepage_url', 'category_list', 'region', 'city', 'country_code'], 1)
#move status to the end of the dataframe
cols = list(startups_USA)
cols.append(cols.pop(cols.index('status')))
startups_USA = startups_USA.ix[:, cols]
def normalize_numeric_features(df, columns_to_scale = None):
min_max_scaler = preprocessing.MinMaxScaler()
startups_normalized = df.copy()
#Convert '-' to zeros in funding_total_usd
startups_normalized['funding_total_usd'] = startups_normalized['funding_total_usd'].replace('-', 0)
#scale numeric features
startups_normalized[columns_to_scale] = min_max_scaler.fit_transform(startups_normalized[columns_to_scale])
return startups_normalized
columns_to_scale = list(startups_USA.filter(regex=(".*(funding_rounds|funding_total_usd)|(number_of|avg_).*")).columns)
startups_USA = normalize_numeric_features(startups_USA, columns_to_scale)
def date_to_age_in_months(date):
if date != date or date == 0: #is NaN
return 0
date1 = datetime.strptime(date, '%Y-%m-%d')
date2 = datetime.strptime('2017-01-01', '%Y-%m-%d') #get age until 01/01/2017
delta = relativedelta.relativedelta(date2, date1)
return delta.years * 12 + delta.months
def normalize_date_variables(df):
date_vars = ['founded_at', 'first_funding_at', 'last_funding_at']
for var in date_vars:
df[var] = df[var].map(date_to_age_in_months)
df = normalize_numeric_features(df, date_vars)
return df
startups_USA = normalize_date_variables(startups_USA)
def explore_states(states, top_n_states):
print 'There are in total {} different states'.format(len(states.unique()))
prob = pd.Series(states).value_counts()
print prob.head()
#select first <top_n_categories>
mask = prob > prob[top_n_states]
head_prob = prob.loc[mask].sum()
tail_prob = prob.loc[~mask].sum()
total_sum = prob.sum()
prob = prob.loc[mask]
prob2 = pd.DataFrame({'top '+str(top_n_states)+' states': head_prob, 'others': tail_prob},index=[0])
fig, axs = plt.subplots(2,1, figsize=(15,6))
prob.plot(kind='bar', ax=axs[0])
prob2.plot(kind='bar', ax=axs[1])
for bar in axs[1].patches:
height = bar.get_height()
axs[1].text(bar.get_x() + bar.get_width()/2., 0.50*height, '%.2f' % (float(height)/float(total_sum)*100) + "%", ha='center', va='top')
fig.tight_layout()
plt.xticks(rotation=90)
plt.show()
explore_states(startups_USA['state_code'], top_n_states=15)
def expand_top_states_into_dummy_variables(df):
states = df['state_code'].astype('str')
#Get a dummy dataset for categories
dummies = pd.get_dummies(states)
#select top most frequent states
top15states = list(states.value_counts().sort_values(ascending=False).index[:15])
#Create a dataframe with the 15 top states to be concatenated later to the complete dataframe
states_df = dummies[top15states]
states_df = states_df.add_prefix('State_')
new_df = pd.concat([df, states_df], axis=1, ignore_index=False)
new_df = new_df.drop(['state_code'], axis=1)
return new_df
startups_USA = expand_top_states_into_dummy_variables(startups_USA)
cols = list(startups_USA)
cols.append(cols.pop(cols.index('status')))
startups_USA = startups_USA.ix[:, cols]
startups_USA.to_csv('data/startups_pre_processed.csv')
startups_USA.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font size='7' face='Times New Roman'><b>1. <u>Primal</u></b></font>
Step2: <font size='7' face='Times New Roman'><b>2. <u>Dual</u></b></font>
|
<ASSISTANT_TASK:>
Python Code:
# Imports
import numpy as np
import gurobipy as gbp
import datetime as dt
# Constants
Aij = np.random.randint(5, 50, 250000)
Aij = Aij.reshape(500,500)
AijSum = np.sum(Aij)
Cj = np.random.randint(10, 20, 500)
CjSum = np.sum(Cj)
Bi = np.random.randint(10, 20, 500)
BiSum = np.sum(Bi)
# Matrix Shape
rows = range(len(Aij))
cols = range(len(Aij[0]))
# Instantiate Model
mPrimal_Standard_GUROBI = gbp.Model(' -- Standard Primal Linear Programming Problem -- ')
# Set Focus to Optimality
gbp.setParam('MIPFocus', 2)
# Decision Variables
desc_var = []
for dest in cols:
desc_var.append([])
desc_var[dest].append(mPrimal_Standard_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='y'+str(dest+1)))
# Surplus Variables
surp_var = []
for orig in rows:
surp_var.append([])
surp_var[orig].append(mPrimal_Standard_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='s'+str(orig+1)))
# Update Model
mPrimal_Standard_GUROBI.update()
#Objective Function
mPrimal_Standard_GUROBI.setObjective(gbp.quicksum(Cj[dest]*desc_var[dest][0]
for dest in cols),
gbp.GRB.MINIMIZE)
# Constraints
for orig in rows:
mPrimal_Standard_GUROBI.addConstr(gbp.quicksum(Aij[orig][dest]*desc_var[dest][0]
for dest in cols)
- surp_var[orig][0]
- Bi[orig] == 0)
# Optimize
mPrimal_Standard_GUROBI.optimize()
# Write LP file
mPrimal_Standard_GUROBI.write('LP.lp')
print '\n*************************************************************************'
print ' | Decision Variables'
for v in mPrimal_Standard_GUROBI.getVars():
print ' | ', v.VarName, '=', v.x
print '*************************************************************************'
val = mPrimal_Standard_GUROBI.objVal
print ' | Objective Value ------------------ ', val
print ' | Aij Sum -------------------------- ', AijSum
print ' | Cj Sum --------------------------- ', CjSum
print ' | Bi Sum --------------------------- ', BiSum
print ' | Matrix Dimensions ---------------- ', Aij.shape
print ' | Date/Time ------------------------ ', dt.datetime.now()
print '*************************************************************************'
print '-- Gurobi Standard Primal Linear Programming Problem --'
print '\nJames Gaboardi, 2015'
# Instantiate Model
mDual_Standard_GUROBI = gbp.Model(' -- Standard Dual Linear Programming Problem -- ')
# Set Focus to Optimality
gbp.setParam('MIPFocus', 2)
# Decision Variables
desc_var = []
for orig in rows:
desc_var.append([])
desc_var[orig].append(mDual_Standard_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='u'+str(orig+1)))
# Slack Variables
slack_var = []
for dest in cols:
slack_var.append([])
slack_var[dest].append(mDual_Standard_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='t'+str(dest+1)))
# Update Model
mDual_Standard_GUROBI.update()
#Objective Function
mDual_Standard_GUROBI.setObjective(gbp.quicksum(Bi[orig]*desc_var[orig][0]
for orig in rows),
gbp.GRB.MAXIMIZE)
# Constraints
for dest in cols:
mDual_Standard_GUROBI.addConstr(gbp.quicksum(Aij[orig][dest]*desc_var[orig][0]
for orig in rows) +
slack_var[dest][0] -
Cj[dest] == 0)
# Optimize
mDual_Standard_GUROBI.optimize()
# Write LP file
mDual_Standard_GUROBI.write('LP.lp')
print '\n*************************************************************************'
print ' | Decision Variables'
for v in mDual_Standard_GUROBI.getVars():
print ' | ', v.VarName, '=', v.x
print '*************************************************************************'
val = mDual_Standard_GUROBI.objVal
print ' | Objective Value ------------------ ', val
print ' | Aij Sum -------------------------- ', AijSum
print ' | Cj Sum --------------------------- ', CjSum
print ' | Bi Sum --------------------------- ', BiSum
print ' | Matrix Dimensions ---------------- ', Aij.shape
print ' | Date/Time ------------------------ ', dt.datetime.now()
print '*************************************************************************'
print '-- Gurobi Standard Dual Linear Programming Problem --'
print '\nJames Gaboardi, 2015'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting Data
Step2: Thermodynamic Calculations
Step3: Basic Skew-T Plotting
Step4: Advanced Skew-T Plotting
Step5: Adding a Hodograph
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import Hodograph, SkewT
from metpy.units import units
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('nov11_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.get_wind_components(wind_speed, wind_dir)
# Calculate the LCL
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
print(lcl_pressure, lcl_temperature)
# Calculate the parcel profile.
parcel_prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r', linewidth=2)
skew.plot(p, Td, 'g', linewidth=2)
skew.plot_barbs(p, u, v)
# Show the plot
plt.show()
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL temperature as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Create a hodograph
# Create an inset axes object that is 40% width and height of the
# figure and put it in the upper right hand corner.
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, wind_speed) # Plot a line colored by wind speed
# Show the plot
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2D random walk
Step2: asymmetric random walk
|
<ASSISTANT_TASK:>
Python Code:
n_trials = 1000 # number of trials (i.e., number of walks)
n_steps = 1000 # number of steps taken during each trial
distances = [] # use this to keep track of distance for each trial
this_trial = 0
# loop over trials
while this_trial < n_trials:
# reset step and distance to zero
this_distance = 0
step=0
while(step < n_steps):
# random number is either 0 or 1 (heads/tails)
if random.randint(0,1) > 0:
this_distance += 1
else:
this_distance -= 1
step+=1
# keep track of distances
distances.append(abs(this_distance))
this_trial += 1
plt.hist(distances)
distances = np.array(distances) # convert to numpy array to make life easier
# in 1D, asymptotic solution for mean walk length is (n_steps*2/pi)**(1/2)
print(math.sqrt(n_steps*2.0/math.pi),distances.mean(),distances.min(),distances.max())
n_trials_2D = 100 # number of trials (i.e., number of walks)
n_steps_2D = 100 # number of steps per trial
distances_2D = [] # to keep track of distances of each walk
this_trial = 0
# lists for x, y position of *each step* for the last trial.
# (This is just for visualization purposes - we want to make a plot.)
xpos=[]
ypos=[]
while this_trial < n_trials_2D:
x,y = 0.0,0.0
# keep track of walker position for last trial
if this_trial == n_trials_2D - 1:
xpos.append(x)
ypos.append(y)
step=0
# go until we reach the number of steps we want to take
while(step < n_steps_2D):
# pick a random angle between zero and 2*pi
theta = random.random()*2.0*math.pi
# step x and y in that direction
x += math.cos(theta)
y += math.sin(theta)
# if last trial, keep track of walker positions
if this_trial == n_trials_2D - 1:
xpos.append(x)
ypos.append(y)
step+=1
this_distance = (x**2 + y**2)**0.5
#print(this_trial, abs(this_distance))
distances_2D.append(abs(this_distance))
this_trial += 1
plt.hist(distances_2D)
distances_2D = np.array(distances_2D)
# expect roughly sqrt(n_steps_2D) to be the right answer!
print(math.sqrt(n_steps_2D),distances_2D.mean(),distances_2D.min(),distances_2D.max())
plt.plot(0.0,0.0,'ro',xpos,ypos)
#plt.xlim(-16,16)
#plt.ylim(-16,16)
n_trials = 100000
n_steps = 100
prob_right = 0.6
distances = []
this_trial = 0
while this_trial < n_trials:
this_distance = 0
step=0
while(step < n_steps):
if random.random() < prob_right:
this_distance += 1
else:
this_distance -= 1
step+=1
#print(this_trial, abs(this_distance))
distances.append(this_distance)
this_trial += 1
plt.hist(distances)
distances = np.array(distances)
#print(math.sqrt(n_steps*2.0/math.pi),distances.mean(),distances.min(),distances.max())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up Network and Geometry
Step2: Before we move on to setting up our fluid and physics objects, we must constrict throats in the z and y direction by a factor (Gostick et al included this tightening of throats in only these two directions to create realistic anisotropy in the model). For his SGL simulation, Gostick uses a constriction factor of .95. Finally, because we have changed values for pore and throat diameters (first by accounting for pores and throats that are too big, and the finally constricting throats in the y and z directions), we must recalculate all pore and throat values relying on these diameters.
Step3: OpenPNM makes it very easy to visualize the network we have generated through the "Visualization" methods. We can create vtk files to be viewed using ParaView (downloadable at http
Step4: An example is seen here
Step5: We are now ready to establish physical properties for our fluid objects. To do this, we will
Step6: Running Ordinary Percolation, Fickian Diffusion, and Stokes Flow
Step7: This algorithm performed a start to finish simulation, which fully flooded the network. The 'results()' command can be used to update the phase occupancy values throughout the network. To save some computation, we will filter the invasion points so that relative transport properties can be calculated approximately every 5% increment in saturation. The OrdinaryPercolation object has a method to return the intrusion data as a named tuple of Capillary Pressure (Pcap) and Saturation of the non-wetting phase (Snwp).
Step8: We now define a helper function to update the phases and properties with the results of the OP algorithm. The multiphase conduit conductance model looks at the phase occupancy in the conduits made by the 1/2 pore - throat - 1/2 pore neighbor elements. When the mode is 'strict' the phase must occupy all three elements for the conduit to be considered open to flow for that phase. If the phase is not present in at least one of the elements in the conduit then the throat conductance is divided by 6 orders of magnitude. In this way the conductivity is severely reduced by the presence of the other phase and flow must go around, thus decreasing the permeability/diffusivity of the network.
Step9: The following call will get the pore and throat phase occupancy which is an array of 1s and 0s representing that the phase occupies a particular pore or throat, update the phase objects and and multiphase conductanct models to the physics objects
Step10: The next step will be to calculate effective diffusivity and permeability at different saturations. Note that we want to run Fickian diffusion and Stokes flow algorithms at different points within our ordinary percolation process.
Step11: Now we can repeat the algorithms at each filtered pressure. This process takes about 1 minute.
Step12: Now we can plot the results including those from the paper
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import openpnm as op
import matplotlib.pyplot as plt
import openpnm.models as mods
%matplotlib inline
np.random.seed(10)
Lc = 40.5e-6
# 1. Set up network
sgl = op.network.Cubic(shape=[26, 26, 10], spacing=Lc, name='SGL10BA')
sgl.add_boundary_pores()
proj = sgl.project
wrk = op.Workspace()
wrk.settings['loglevel'] = 50
# 2. Set up geometries
Ps = sgl.pores('*boundary', mode='not')
Ts = sgl.find_neighbor_throats(pores=Ps, mode='xnor', flatten=True)
geo = op.geometry.GenericGeometry(network=sgl,pores=Ps,throats=Ts,name='geo')
geo.add_model(propname='pore.seed',
model=mods.misc.random,
element='pore',
num_range=[0, 0.8834],
seed=None)
geo.add_model(propname='throat.seed',
model=mods.misc.from_neighbor_pores,
pore_prop='pore.seed',
mode='min')
geo.add_model(propname='pore.diameter',
model=mods.geometry.pore_size.weibull,
shape=3.07,
loc=19.97e-6,
scale=1.6e-5)
geo.add_model(propname='throat.diameter',
model=mods.geometry.throat_size.weibull,
shape=3.07,
loc=19.97e-6,
scale=1.6e-5)
geo.add_model(propname='pore.area',
model=mods.geometry.pore_area.sphere)
geo.add_model(propname='pore.volume',
model=mods.geometry.pore_volume.sphere)
geo.add_model(propname='throat.length',
model=mods.geometry.throat_length.ctc)
geo.add_model(propname='throat.volume',
model=mods.geometry.throat_volume.cylinder)
geo.add_model(propname='throat.area',
model=mods.geometry.throat_area.cylinder)
geo.add_model(propname='throat.surface_area',
model=mods.geometry.throat_surface_area.cylinder)
geo.add_model(propname='throat.endpoints',
model=mods.geometry.throat_endpoints.spherical_pores)
geo.add_model(propname='throat.conduit_lengths',
model=mods.geometry.throat_length.conduit_lengths)
Ps = sgl.pores('*boundary')
Ts = sgl.find_neighbor_throats(pores=Ps, mode='or')
boun = op.geometry.Boundary(network=sgl, pores=Ps, throats=Ts, name='boun')
throats = geo.throats()
connected_pores = sgl.find_connected_pores(throats)
x1 = [sgl['pore.coords'][pair[0]][0] for pair in connected_pores]
x2 = [sgl['pore.coords'][pair[1]][0] for pair in connected_pores]
same_x = [x - y == 0 for x, y in zip(x1,x2)]
factor = [s*.95 + (not s)*1 for s in same_x]
throat_diameters = sgl['throat.diameter'][throats]*factor
geo['throat.diameter'] = throat_diameters
geo.regenerate_models(exclude=['throat.diameter'])
import openpnm.io.VTK as iovtk
iovtk.save(network=sgl, filename='network_SGL')
air = op.phases.Air(network = sgl, name = 'air')
water = op.phases.Water(network = sgl, name = 'water')
# Reset pore contact angle
water['pore.contact_angle'] = 100.0
phys_water = op.physics.Standard(network=sgl, phase=water, geometry=geo)
phys_air = op.physics.Standard(network=sgl, phase=air, geometry=geo)
phys_water_b = op.physics.Standard(network=sgl, phase=water, geometry=boun)
phys_air_b = op.physics.Standard(network=sgl, phase=air, geometry=boun)
inlets = sgl.pores('bottom_boundary')
used_inlets = [inlets[x] for x in range(0, len(inlets), 2)]
OP_1 = op.algorithms.OrdinaryPercolation(project=proj)
OP_1.set_inlets(pores=used_inlets)
OP_1.setup(phase=water, pore_volume='pore.volume', throat_volume='throat.volume')
OP_1.run(points=100)
data = OP_1.get_intrusion_data()
# Filter for evenly spaced sat inc. first and last
filter_pc = [data.Pcap[0]]
sat = [data.Snwp[0]]
for i, pc in enumerate(data.Pcap):
if data.Snwp[i] - sat[-1] > 0.05:
filter_pc.append(pc)
sat.append(data.Snwp[i])
filter_pc.append(data.Pcap[-1])
sat.append(data.Snwp[-1])
def update_phase_and_phys(results):
water['pore.occupancy'] = results['pore.occupancy']
air['pore.occupancy'] = 1-results['pore.occupancy']
water['throat.occupancy'] = results['throat.occupancy']
air['throat.occupancy'] = 1-results['throat.occupancy']
# Add multiphase conductances
mode='strict'
phys_air.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_diffusive_conductance',
throat_conductance='throat.diffusive_conductance',
mode=mode)
phys_water.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_diffusive_conductance',
throat_conductance='throat.diffusive_conductance',
mode=mode)
phys_air.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_hydraulic_conductance',
throat_conductance='throat.hydraulic_conductance',
mode=mode)
phys_water.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_hydraulic_conductance',
throat_conductance='throat.hydraulic_conductance',
mode=mode)
phys_air_b.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_diffusive_conductance',
throat_conductance='throat.diffusive_conductance',
mode=mode)
phys_water_b.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_diffusive_conductance',
throat_conductance='throat.diffusive_conductance',
mode=mode)
phys_air_b.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_hydraulic_conductance',
throat_conductance='throat.hydraulic_conductance',
mode=mode)
phys_water_b.add_model(model=mods.physics.multiphase.conduit_conductance,
propname='throat.conduit_hydraulic_conductance',
throat_conductance='throat.hydraulic_conductance',
mode=mode)
update_phase_and_phys(OP_1.results(Pc=1e3))
perm_air = {'0': [], '1': [], '2': []}
diff_air = {'0': [], '1': [], '2': []}
perm_water = {'0': [], '1': [], '2': []}
diff_water = {'0': [], '1': [], '2': []}
max_Pc = max(OP_1['throat.invasion_pressure'])
num_seq = 20
pore_volumes = sgl['pore.volume']
throat_volumes = sgl['throat.volume']
totV = np.sum(pore_volumes) + np.sum(throat_volumes)
K_air_single_phase = [None, None, None]
D_air_single_phase = [None, None, None]
K_water_single_phase = [None, None, None]
D_water_single_phase = [None, None, None]
bounds = [['front', 'back'], ['left', 'right'], ['top', 'bottom']]
for bound_increment in range(len(bounds)):
# Run Single phase algs effective properties
BC1_pores = sgl.pores(labels=bounds[bound_increment][0]+'_boundary')
BC2_pores = sgl.pores(labels=bounds[bound_increment][1]+'_boundary')
# Effective permeability : air
sf_air = op.algorithms.StokesFlow(network=sgl, phase=air)
sf_air.setup(conductance='throat.hydraulic_conductance')
sf_air.set_value_BC(values=0.6, pores=BC1_pores)
sf_air.set_value_BC(values=0.2, pores=BC2_pores)
sf_air.run()
K_air_single_phase[bound_increment] = sf_air.calc_effective_permeability()
proj.purge_object(obj=sf_air)
# Effective diffusivity : air
fd_air = op.algorithms.FickianDiffusion(network=sgl,phase=air)
fd_air.setup(conductance='throat.diffusive_conductance')
fd_air.set_value_BC(values=0.6, pores=BC1_pores)
fd_air.set_value_BC(values=0.2, pores=BC2_pores)
fd_air.run()
D_air_single_phase[bound_increment] = fd_air.calc_effective_diffusivity()
proj.purge_object(obj=fd_air)
# Effective permeability : water
sf_water = op.algorithms.StokesFlow(network=sgl, phase=water)
sf_water.setup(conductance='throat.hydraulic_conductance')
sf_water.set_value_BC(values=0.6, pores=BC1_pores)
sf_water.set_value_BC(values=0.2, pores=BC2_pores)
sf_water.run()
K_water_single_phase[bound_increment] = sf_water.calc_effective_permeability()
proj.purge_object(obj=sf_water)
# Effective diffusivity : water
fd_water = op.algorithms.FickianDiffusion(network=sgl,phase=water)
fd_water.setup(conductance='throat.diffusive_conductance')
fd_water.set_value_BC(values=0.6, pores=BC1_pores)
fd_water.set_value_BC(values=0.2, pores=BC2_pores)
fd_water.run()
D_water_single_phase[bound_increment] = fd_water.calc_effective_diffusivity()
proj.purge_object(obj=fd_water)
for Pc in filter_pc:
update_phase_and_phys(OP_1.results(Pc=Pc))
print('-' * 80)
print('Pc', Pc)
for bound_increment in range(len(bounds)):
BC1_pores = sgl.pores(labels=bounds[bound_increment][0]+'_boundary')
BC2_pores = sgl.pores(labels=bounds[bound_increment][1]+'_boundary')
# Multiphase
sf_air = op.algorithms.StokesFlow(network=sgl,phase=air)
sf_air.setup(conductance='throat.conduit_hydraulic_conductance')
sf_water = op.algorithms.StokesFlow(network=sgl,phase=water)
sf_water.setup(conductance='throat.conduit_hydraulic_conductance')
fd_air = op.algorithms.FickianDiffusion(network=sgl,phase=air)
fd_air.setup(conductance='throat.conduit_diffusive_conductance')
fd_water = op.algorithms.FickianDiffusion(network=sgl,phase=water)
fd_water.setup(conductance='throat.conduit_diffusive_conductance')
#BC1
sf_air.set_value_BC(values=0.6, pores=BC1_pores)
sf_water.set_value_BC(values=0.6, pores=BC1_pores)
fd_air.set_value_BC(values=0.6, pores=BC1_pores)
fd_water.set_value_BC(values=0.6, pores=BC1_pores)
#BC2
sf_air.set_value_BC(values=0.2, pores=BC2_pores)
sf_water.set_value_BC(values=0.2, pores=BC2_pores)
fd_air.set_value_BC(values=0.2, pores=BC2_pores)
fd_water.set_value_BC(values=0.2, pores=BC2_pores)
# Run Multiphase algs
sf_air.run()
sf_water.run()
fd_air.run()
fd_water.run()
Keff_air_mphase = sf_air.calc_effective_permeability()
Deff_air_mphase = fd_air.calc_effective_diffusivity()
Keff_water_mphase = sf_air.calc_effective_permeability()
Deff_water_mphase = fd_water.calc_effective_diffusivity()
Kr_eff_air = Keff_air_mphase / K_air_single_phase[bound_increment]
Kr_eff_water = Keff_water_mphase / K_water_single_phase[bound_increment]
Dr_eff_air = Deff_air_mphase / D_air_single_phase[bound_increment]
Dr_eff_water = Deff_water_mphase / D_water_single_phase[bound_increment]
perm_air[str(bound_increment)].append(Kr_eff_air)
diff_air[str(bound_increment)].append(Dr_eff_air)
perm_water[str(bound_increment)].append(Kr_eff_water)
diff_water[str(bound_increment)].append(Dr_eff_water)
proj.purge_object(obj=sf_air)
proj.purge_object(obj=sf_water)
proj.purge_object(obj=fd_air)
proj.purge_object(obj=fd_water)
#NBVAL_IGNORE_OUTPUT
from matplotlib.font_manager import FontProperties
%matplotlib inline
# Data points taken directly from Gostick's graphs using GraphClick
gostick_saturation_1 = [0.008, 0.04, 0.093, 0.14, 0.193, 0.246, 0.293, 0.337, 0.395, 0.442, 0.496,
0.542, 0.59, 0.641, 0.687, 0.748, 0.793, 0.838, 0.894, 0.945, 0.986]
gostick_perm_air_case1 = [0.917, 0.821, 0.68, 0.568, 0.466, 0.366, 0.286, 0.204, 0.144, 0.096, 0.051, 0.024,
0.003, -1.08E-04, -1.96E-04, -3.12E-04, -3.97E-04, -4.84E-04, -5.90E-04, 0.002, 0.002]
gostick_saturation_2 = [0.99, 0.899, 0.847, 0.802, 0.75, 0.701, 0.645, 0.594, 0.546, 0.497, 0.449,
0.398, 0.348, 0.298, 0.245, 0.196, 0.147, 0.094, 0.044, 0.003]
gostick_perm_water = [0.935, 0.774, 0.709, 0.664, 0.618, 0.572, 0.514, 0.461, 0.401, 0.347,
0.284, 0.211, 0.145, 0.084, 0.044, 0.024, 0.012, 0.001, 0.001, 0.001]
gostick_saturation_3 =[0.006, 0.05, 0.102, 0.151, 0.199, 0.247, 0.297, 0.348, 0.399, 0.447, 0.496,
0.546, 0.597, 0.645, 0.699, 0.75, 0.798, 0.846, 0.899, 0.949, 0.983]
gostick_diff_air_case1 = [0.939, 0.836, 0.725, 0.626, 0.531, 0.442, 0.353, 0.27, 0.203, 0.14, 0.085, 0.048,
0.008, 5.49E-04, 4.48E-04, 3.50E-04, 2.59E-04, 1.67E-04, 0.003, 0.003, 0.003]
gostick_saturation_4 = [0.985, 0.946, 0.898, 0.846, 0.795, 0.749, 0.695, 0.643, 0.596, 0.545, 0.496, 0.448,
0.396, 0.346, 0.298, 0.251, 0.196, 0.146, 0.094]
gostick_diff_water = [0.941, 0.901, 0.853, 0.809, 0.756, 0.7, 0.638, 0.569, 0.503, 0.428, 0.36, 0.291, 0.214, 1.48E-01,
8.00E-02, 4.50E-02, 2.30E-02, 1.60E-02, 0.005]
fontP = FontProperties()
fontP.set_size('small')
# Setting up subplots
fig = plt.figure(figsize=(6, 10), dpi=80, facecolor='w', edgecolor='k')
ax1 = fig.add_subplot(211) #top
ax2 = fig.add_subplot(212) #bottom
x_values1 = [x/20 for x in range(21)]
z = '.75'
# Plots for subplot1 - strict permeability
p1, = ax1.plot(sat, perm_water['0'], color = 'k', linestyle = '-', marker = 'o')
p2, = ax1.plot(sat, perm_water['1'], color = z, linestyle = '-', marker = 'o')
p3, = ax1.plot(sat, perm_water['2'], color = 'b', linestyle = '-', marker = 'o')
p4, = ax1.plot(sat, perm_air['0'], color = 'k', linestyle = '-', marker = '^')
p5, = ax1.plot(sat, perm_air['1'], color = z, linestyle = '-', marker = '^')
p6, = ax1.plot(sat, perm_air['2'], color = 'b', linestyle = '-', marker = '^')
p10, = ax1.plot(x_values1, [x**(3) for x in x_values1], 'k--')
ax1.plot(x_values1, [(1-x)**(3) for x in x_values1], 'k--')
gs1, = ax1.plot(gostick_saturation_1, gostick_perm_air_case1, color = 'r', linestyle = '-', marker = 'D')
gs2, = ax1.plot(gostick_saturation_2, gostick_perm_water, color = 'r', linestyle = '-', marker = 'o')
ax1.set_ylabel('permeability')
ax1.set_xlabel("saturation")
ax1.set_ylim([0,1])
ax1.set_xlim([0,1])
# Need to work on legend to match up with the right things
lgd1 = ax1.legend([p1, p2, p3, p4, p5, p6, p10, gs1, gs2],
["KrWater,x", "KrWater,y", "KrWater,z",
"KrAir,x","KrAir,y","KrAir,z", "a = 3",
"Gostick et al \n KrAir,x (case 1)",
"Gostick et al \n KrWater,x"],
loc='center left', bbox_to_anchor=(1, 0.5), prop=fontP)
# Plots for subplot4 - diffusivity
p11, = ax2.plot(sat, diff_water['0'], color = 'k', linestyle = '-', marker = 'o')
p12, = ax2.plot(sat, diff_water['1'], color = z, linestyle = '-', marker = 'o')
p13, = ax2.plot(sat, diff_water['2'], color = 'b', linestyle = '-', marker = 'o')
p14, = ax2.plot(sat, diff_air['0'], color = 'k', linestyle = '-', marker = '^')
p15, = ax2.plot(sat, diff_air['1'], color = z, linestyle = '-', marker = '^')
p16, = ax2.plot(sat, diff_air['2'], color = 'b', linestyle = '-', marker = '^')
p20, = ax2.plot(x_values1, [x**(2) for x in x_values1], 'k--')
ax2.plot(x_values1, [(1-x)**(2) for x in x_values1], 'k--')
gs3, = ax2.plot(gostick_saturation_3, gostick_diff_air_case1, color = 'r', linestyle = '-', marker = 'D')
gs4, = ax2.plot(gostick_saturation_4, gostick_diff_water, color = 'r', linestyle = '-', marker = 'o')
ax2.set_ylabel('diffusivity')
ax2.set_xlabel("saturation")
ax2.set_ylim([0,1])
ax2.set_xlim([0,1])
lgd2 = ax2.legend([p11, p12, p13, p14, p15, p16, p20, gs3, gs4],
["DrWater,x", "DrWater,y", "DrWater,z",
"DrAir,x","DrAir,y","DrAir,z", "a = 2",
"Gostick et al \n DrAir,x (case 1)",
"Gostick et al \n DrWater,x"],
loc='center left', bbox_to_anchor=(1, 0.5), prop=fontP)
fig.subplots_adjust(left=0.13, right=.7, top=0.95, bottom=0.05)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Training data was collected in the Self-Driving Car simulator on Mac OS using a Playstation 3 console controller.
Step7: Image Preprocessor
Step11: Track 1 Training Dataset class
Step12: Instantiates the Track 1 training dataset, prints details about the object then prints the first 5 elements of the dataframe.
Step13: Feature Plotting
Step14: Steering Histogram Plot
Step15: Steering Line Plot
Step16: Explore the features
Step17: Here I randomize the training data and inject to first 10 measurements into the batch generator. Note each item in X_train is an instance of the RecordingMeasurement class.
Step18: Visualize batch features
Step19: Network Architecture
Step21: Track1 extends BaseNetwork.
Step22: Instantiate the classifier
Step23: Train the network
|
<ASSISTANT_TASK:>
Python Code:
class RecordingMeasurement:
A representation of a vehicle's state at a point in time while driving
around a track during recording.
Features available are:
left_camera_view - An image taken by the LEFT camera.
center_camera_view - An image taken by the CENTER camera.
right_camera_view - An image taken by the RIGHT camera.
steering_angle - A normalized steering angle in the range -1 to 1.
speed - The speed in which the vehicle was traveling at measurement time.
This class serves the following purposes:
1. Provides convenience getter methods for left, center and camera images.
In an effort to reduce memory footprint, they're essentially designed
to lazily instantiate (once) the actual image array at the time the
method is invoked.
2. Strips whitespace off the left, center, and right camera image paths.
3. Casts the original absolute path of each camera image to a relative path.
This adds reassurance the image will load on any computer.
4. Provides a convenient #is_valid_measurment method which encapsulates
pertinent logic to ensure data quality is satisfactory.
def __init__(self, measurement_data):
self.measurement_data = measurement_data
self.steering_angle = round(float(measurement_data['steering']), 4)
self.speed = round(float(measurement_data['speed']), 4)
l = measurement_data['left'].strip()
c = measurement_data['center'].strip()
r = measurement_data['right'].strip()
# cast absolute path to relative path to be environment agnostic
l, c, r = [('./IMG/' + os.path.split(file_path)[1]) for file_path in (l, c, r)]
self.left_camera_view_path = l
self.center_camera_view_path = c
self.right_camera_view_path = r
def is_valid_measurement(self):
Return true if the original center image is available to load.
return os.path.isfile(self.center_camera_view_path)
def left_camera_view(self):
Lazily instantiates the left camera view image at first call.
if not hasattr(self, '__left_camera_view'):
self.__left_camera_view = self.__load_image(self.left_camera_view_path)
return self.__left_camera_view
def center_camera_view(self):
Lazily instantiates the center camera view image at first call.
if not hasattr(self, '__center_camera_view'):
self.__center_camera_view = self.__load_image(self.center_camera_view_path)
return self.__center_camera_view
def right_camera_view(self):
Lazily instantiates the right camera view image at first call.
if not hasattr(self, '__right_camera_view'):
self.__right_camera_view = self.__load_image(self.right_camera_view_path)
return self.__right_camera_view
def __load_image(self, imagepath):
image_array = None
if os.path.isfile(imagepath):
image_array = misc.imread(imagepath)
else:
print('File Not Found: {}'.format(imagepath))
return image_array
def __str__(self):
results = []
results.append('Image paths:')
results.append('')
results.append(' Left camera path: {}'.format(self.left_camera_view_path))
results.append(' Center camera path: {}'.format(self.center_camera_view_path))
results.append(' Right camera path: {}'.format(self.right_camera_view_path))
results.append('')
results.append('Additional features:')
results.append('')
results.append(' Steering angle: {}'.format(self.steering_angle))
results.append(' Speed: {}'.format(self.speed))
return '\n'.join(results)
def preprocess_image(image_array, output_shape=(40, 80), colorspace='yuv'):
Reminder:
Source image shape is (160, 320, 3)
Our preprocessing algorithm consists of the following steps:
1. Converts BGR to YUV colorspace.
This allows us to leverage luminance (Y channel - brightness - black and white representation),
and chrominance (U and V - blue–luminance and red–luminance differences respectively)
2. Crops top 31.25% portion and bottom 12.5% portion.
The entire width of the image is preserved.
This allows the model to generalize better to unseen roadways since we clop
artifacts such as trees, buildings, etc. above the horizon. We also clip the
hood from the image.
3. Finally, I allow users of this algorithm the ability to specify the shape of the final image via
the output_shape argument.
Once I've cropped the image, I resize it to the specified shape using the INTER_AREA
interpolation agorithm as it is the best choice to preserve original image features.
See `Scaling` section in OpenCV documentation:
http://docs.opencv.org/trunk/da/d6e/tutorial_py_geometric_transformations.html
# convert image to another colorspace
if colorspace == 'yuv':
image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2YUV)
elif colorspace == 'hsv':
image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2HSV)
elif colorspace == 'rgb':
image_array = cv2.cvtColor(image_array, cv2.COLOR_BGR2RGB)
# [y1:y2, x1:x2]
#
# crops top 31.25% portion and bottom 12.5% portion
#
# The entire width of the image is preserved
image_array = image_array[50:140, 0:320]
# Let's blur the image to smooth out some of the artifacts
kernel_size = 5 # Must be an odd number (3, 5, 7...)
image_array = cv2.GaussianBlur(image_array, (kernel_size, kernel_size), 0)
# resize image to output_shape
image_array = cv2.resize(image_array, (output_shape[1], output_shape[0]), interpolation=cv2.INTER_AREA)
return image_array
class Track1Dataset:
Parses driving_log.csv and constructs training, validation and test datasets corresponding to
measurements taken at various points in time while recording on track 1.
* X_train - A set of examples used for learning, that is to fit the parameters [i.e., weights] of the
classifier.
* X_val - A set of examples used to tune the hyperparameters [i.e., architecture, not weights] of a
classifier, for example to choose the number of hidden units in a neural network.
* X_test - A set of examples used only to assess the performance [generalization] of a fully-specified
classifier.
* y_train, y_val, y_test - The steering angle corresponding to their respective X features.
DRIVING_LOG_PATH = './driving_log.csv'
def __init__(self, validation_split_percentage=0.2, test_split_percentage=0.05):
self.X_train = []
self.X_val = []
self.X_test = []
self.y_train = []
self.y_val = []
self.y_test = []
self.dataframe = None
self.headers = []
self.__loaded = False
self.__load(validation_split_percentage=validation_split_percentage,
test_split_percentage=test_split_percentage)
assert self.__loaded == True, 'The dataset was not loaded. Perhaps driving_log.csv is missing.'
def __load(self, validation_split_percentage, test_split_percentage):
Splits the training data into a validation and test dataset.
* X_train - A set of examples used for learning, that is to fit the parameters [i.e., weights] of the classifier.
* X_val - A set of examples used to tune the hyperparameters [i.e., architecture, not weights] of a
classifier, for example to choose the number of hidden units in a neural network.
* X_test - A set of examples used only to assess the performance [generalization] of a fully-specified
classifier.
* y_train, y_val, y_test - The steering angle corresponding to their respective X features.
if not self.__loaded:
X_train, y_train, headers, df = [], [], [], None
# read in driving_log.csv and construct the
# initial X_train and y_train before splitting
# it into validation and testing sets.
if os.path.isfile(self.DRIVING_LOG_PATH):
df = pd.read_csv(self.DRIVING_LOG_PATH)
headers = list(df.columns.values)
for index, measurement_data in df.iterrows():
measurement = RecordingMeasurement(measurement_data=measurement_data)
X_train.append(measurement)
y_train.append(measurement.steering_angle)
self.__loaded = True
# generate the validation set
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=validation_split_percentage,
random_state=0)
X_train, y_train, X_val, y_val = np.array(X_train), np.array(y_train, dtype=np.float32), \
np.array(X_val), np.array(y_val, dtype=np.float32)
# generate the test set
X_train, X_test, y_train, y_test = train_test_split(
X_train,
y_train,
test_size=test_split_percentage,
random_state=0)
X_train, y_train, X_test, y_test = np.array(X_train), np.array(y_train, dtype=np.float32), \
np.array(X_test), np.array(y_test, dtype=np.float32)
self.X_train = X_train
self.X_val = X_val
self.X_test = X_test
self.y_train = y_train
self.y_val = y_val
self.y_test = y_test
self.dataframe = df
self.headers = headers
def batch_generator(self, X, Y, label, num_epochs, batch_size=32, output_shape=(160, 320), flip_images=True,
classifier=None, colorspace='yuv'):
A custom batch generator with the main goal of reducing memory footprint
on computers and GPUs with limited memory space.
Infinitely yields `batch_size` elements from the X and Y datasets.
During batch iteration, this algorithm randomly flips the image
and steering angle to reduce bias towards a specific steering angle/direction.
population = len(X)
counter = 0
_index_in_epoch = 0
_tot_epochs = 0
batch_size = min(batch_size, population)
batch_count = int(math.ceil(population / batch_size))
assert X.shape[0] == Y.shape[0], 'X and Y size must be identical.'
print('Batch generating against the {} dataset with population {} and shape {}'.format(label, population,
X.shape))
while True:
counter += 1
print('batch gen iter {}'.format(counter))
for i in range(batch_count):
start_i = _index_in_epoch
_index_in_epoch += batch_size
if _index_in_epoch >= population:
# Save the classifier to support manual early stoppage
if classifier is not None:
classifier.save()
print(' sampled entire population. reshuffling deck and resetting all counters.')
perm = np.arange(population)
np.random.shuffle(perm)
X = X[perm]
Y = Y[perm]
start_i = 0
_index_in_epoch = batch_size
_tot_epochs += 1
end_i = _index_in_epoch
X_batch = []
y_batch = []
for j in range(start_i, end_i):
steering_angle = Y[j]
measurement = X[j]
center_image = measurement.center_camera_view()
if center_image is not None:
image = preprocess_image(center_image, output_shape=output_shape, colorspace=colorspace)
# Here I throw in a random image flip to reduce bias towards
# a specific direction/steering angle.
if flip_images and random.random() > 0.5:
X_batch.append(np.fliplr(image))
y_batch.append(-steering_angle)
else:
X_batch.append(image)
y_batch.append(steering_angle)
yield np.array(X_batch), np.array(y_batch)
def __str__(self):
results = []
results.append('{} Stats:'.format(self.__class__.__name__))
results.append('')
results.append(' [Headers]')
results.append('')
results.append(' {}'.format(self.headers))
results.append('')
results.append('')
results.append(' [Shapes]')
results.append('')
results.append(' training features: {}'.format(self.X_train.shape))
results.append(' training labels: {}'.format(self.y_train.shape))
results.append('')
results.append(' validation features: {}'.format(self.X_val.shape))
results.append(' validation labels: {}'.format(self.y_val.shape))
results.append('')
results.append(' test features: {}'.format(self.X_test.shape))
results.append(' test labels: {}'.format(self.y_test.shape))
results.append('')
results.append(' [Dataframe sample]')
results.append('')
results.append(str(self.dataframe.head(n=5)))
return '\n'.join(results)
dataset = Track1Dataset(validation_split_percentage=0.2, test_split_percentage=0.05)
print(dataset)
%matplotlib inline
import matplotlib.pyplot as plt
dataset.dataframe.plot.hist(alpha=0.5)
dataset.dataframe['steering'].plot.hist(alpha=0.5)
dataset.dataframe['steering'].plot(alpha=0.5)
print('Center camera view shape:\n\n{}\n'.format(dataset.X_train[0].center_camera_view().shape))
print(dataset.X_train[0])
perm = np.arange(len(dataset.X_train))
np.random.shuffle(perm)
output_shape = (40, 80, 3)
generator = dataset.batch_generator(
colorspace='yuv',
X=dataset.X_train[0:10],
Y=dataset.y_train[0:10],
output_shape=output_shape,
label='batch feature exploration',
num_epochs=1,
batch_size=10
)
from zimpy.plot.image_plotter import ImagePlotter
# Grab the first 10 items from the training set and
X_batch, y_batch = next(generator)
# print(X_batch.shape)
# print(y_batch.shape)
# Cast to string so they render nicely in graph
y_batch = [str(x) for x in y_batch]
ImagePlotter.plot_images(X_batch, y_batch, rows=2, columns=5)
ImagePlotter.plot_images(X_batch[:,:,:,0], y_batch, rows=2, columns=5)
ImagePlotter.plot_images(X_batch[:,:,:,1], y_batch, rows=2, columns=5)
ImagePlotter.plot_images(X_batch[:,:,:,2], y_batch, rows=2, columns=5)
class BaseNetwork:
WEIGHTS_FILE_NAME = 'model_final.h5'
MODEL_FILE_NAME = 'model_final.json'
def __init__(self):
self.uuid = uuid.uuid4()
self.model = None
self.weights = None
def fit(self, X_train, y_train, X_val, y_val, nb_epoch=2, batch_size=32, samples_per_epoch=None, output_shape=(40, 80, 3)):
raise NotImplementedError
def build_model(self, input_shape, output_shape, learning_rate=0.001, dropout_prob=0.1, activation='relu'):
raise NotImplementedError
def save(self):
print('Saved {} model.'.format(self.__class__.__name__))
self.__persist()
def __persist(self):
save_dir = os.path.join(os.path.dirname(__file__))
weights_save_path = os.path.join(save_dir, self.WEIGHTS_FILE_NAME)
model_save_path = os.path.join(save_dir, self.MODEL_FILE_NAME)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
self.model.save_weights(weights_save_path)
with open(model_save_path, 'w') as outfile:
json.dump(self.model.to_json(), outfile)
def __str__(self):
results = []
if self.model is not None:
results.append(self.model.summary())
return '\n'.join(results)
class Track1(BaseNetwork):
def fit(self, model, dataset, nb_epoch=2, batch_size=32, samples_per_epoch=None, output_shape=(40, 80, 3)):
# Fit the model leveraging the custom
# batch generator baked into the
# dataset itself.
history = model.fit_generator(
dataset.batch_generator(
X=dataset.X_train,
Y=dataset.y_train,
label='train set',
num_epochs=nb_epoch,
batch_size=batch_size,
output_shape=output_shape,
classifier=self
),
nb_epoch=nb_epoch,
samples_per_epoch=len(X_train),
verbose=2,
validation_data=dataset.batch_generator(
X=dataset.X_val,
Y=dataset.y_val,
label='validation set',
num_epochs=nb_epoch,
batch_size=batch_size,
output_shape=output_shape
)
)
print(history.history)
self.save()
def build_model(self, input_shape, output_shape, learning_rate=0.001, dropout_prob=0.1, activation='relu'):
Inital zero-mean normalization input layer.
A 4-layer deep neural network with 4 fully connected layers at the top.
ReLU activation used on each convolution layer.
Dropout of 10% (default) used after initially flattening after convolution layers.
Dropout of 10% (default) used after first fully connected layer.
Adam optimizer with 0.001 learning rate (default) used in this network.
Mean squared error loss function was used since this is a regression problem and MSE is
quite common and robust for regression analysis.
model = Sequential()
model.add(Lambda(lambda x: x / 255 - 0.5,
input_shape=input_shape,
output_shape=output_shape))
model.add(Convolution2D(24, 5, 5, border_mode='valid', activation=activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(36, 5, 5, border_mode='valid', activation=activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(48, 5, 5, border_mode='same', activation=activation))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3, border_mode='same', activation=activation))
model.add(Flatten())
model.add(Dropout(dropout_prob))
model.add(Dense(1024, activation=activation))
model.add(Dropout(dropout_prob))
model.add(Dense(100, activation=activation))
model.add(Dense(50, activation=activation))
model.add(Dense(10, activation=activation))
model.add(Dense(1, init='normal'))
optimizer = Adam(lr=learning_rate)
model.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
self.model = model
model.summary()
return model
output_shape=(40, 80, 3)
clf = Track1()
model = clf.build_model(
input_shape=output_shape,
output_shape=output_shape,
learning_rate=0.001,
dropout_prob=0.1,
activation='relu'
)
if False:
clf.fit(
model,
dataset,
nb_epoch=2,
batch_size=32
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a ClusterEnsemble object by passing in a numpy array (or list) of redshifts
Step2: Display what we have so far
Step3: Add richness values to the dataframe
Step4: Access any column of the dataframe as an array
Step5: If you don't want units, you can get just the values
Step6: Or access the Pandas DataFrame directly
Step7: Change the redshifts
Step8: Change the mass or richness values
Step9: Change the parameters in the mass-richness relation
Step10: Calculate $\Sigma(r)$ and $\Delta\Sigma(r)$ for NFW model
Step11: Calculate Miscentered NFW Profiles
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import numpy as np
from astropy import units
from matplotlib import pyplot as plt
%matplotlib inline
from clusterlensing import ClusterEnsemble
z = [0.1,0.2,0.3]
c = ClusterEnsemble(z)
c.describe
c.show()
n200 = np.ones(3)*20.
c.n200 = n200
c.show()
print('z: \t', c.z)
print('n200: \t', c.n200)
print('r200: \t', c.r200)
print('m200: \t', c.m200)
print('c200: \t', c.c200)
print('rs: \t', c.rs)
c.r200.value
c.dataframe
c.z = np.array([0.4,0.5,0.6])
c.dataframe
c.m200 = [3e13,2e14,1e15]
c.show()
c.n200 = [20,30,40]
c.show()
c.massrich_slope = 1.5
c.show()
# Show basic table without Pandas formatting
c.show(notebook = False)
rmin, rmax = 0.1, 5. # Mpc
nbins = 20
rbins = np.logspace(np.log10(rmin), np.log10(rmax), num = nbins)
print('rbins range from', rbins.min(), 'to', rbins.max(), 'Mpc')
c.calc_nfw(rbins) # calculate the profiles
sigma = c.sigma_nfw # access the profiles
deltasigma = c.deltasigma_nfw
sigma[0]
fig = plt.figure(figsize=(12,5))
fig.suptitle('Centered NFW Cluster Profiles', size=30)
first = fig.add_subplot(1,2,1)
second = fig.add_subplot(1,2,2)
for rich, profile in zip(c.n200,deltasigma):
first.plot(rbins, profile, label='$N_{200}=$ '+str(rich))
first.set_xscale('log')
first.set_xlabel('$r\ [\mathrm{Mpc}]$', fontsize=20)
first.set_ylabel('$\Delta\Sigma(r)\ [\mathrm{M}_\mathrm{sun}/\mathrm{pc}^2]$',
fontsize=20)
first.set_xlim(rbins.min(), rbins.max())
first.legend(fontsize=20)
for rich, profile in zip(c.n200,sigma):
second.plot(rbins, profile, label='$N_{200}=$ '+str(rich))
second.set_xscale('log')
second.set_xlabel('$r\ [\mathrm{Mpc}]$', fontsize=20)
second.set_ylabel('$\Sigma(r)\ [\mathrm{M}_\mathrm{sun}/\mathrm{pc}^2]$',
fontsize=20)
second.set_xlim(0.05, 1.)
second.set_xlim(rbins.min(), rbins.max())
second.legend(fontsize=20)
fig.tight_layout()
plt.subplots_adjust(top=0.88)
offsets = np.array([0.1,0.1,0.1]) #same length as number of clusters
c.calc_nfw(rbins, offsets=offsets)
deltasigma_off = c.deltasigma_nfw
sigma_off = c.sigma_nfw
fig = plt.figure(figsize=(12,5))
fig.suptitle('Miscentered NFW Cluster Profiles', size=30)
first = fig.add_subplot(1,2,1)
second = fig.add_subplot(1,2,2)
for rich, profile in zip(c.n200,deltasigma_off):
first.plot(rbins, profile, label='$N_{200}=$ '+str(rich))
first.set_xscale('log')
first.set_xlabel('$r\ [\mathrm{Mpc}]$', fontsize=20)
ytitle = '$\Delta\Sigma^\mathrm{sm}(r)\ [\mathrm{M}_\mathrm{sun}/\mathrm{pc}^2]$'
first.set_ylabel(ytitle, fontsize=20)
first.set_xlim(rbins.min(), rbins.max())
first.legend(fontsize=20)
for rich, profile in zip(c.n200,sigma_off):
second.plot(rbins, profile, label='$N_{200}=$ '+str(rich))
second.set_xscale('log')
second.set_xlabel('$r\ [\mathrm{Mpc}]$', fontsize=20)
ytitle = '$\Sigma^\mathrm{sm}(r)\ [\mathrm{M}_\mathrm{sun}/\mathrm{pc}^2]$'
second.set_ylabel(ytitle, fontsize=20)
second.set_xlim(rbins.min(), rbins.max())
second.legend(fontsize=20)
fig.tight_layout()
plt.subplots_adjust(top=0.88)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in an Image
Step9: Ideas for Lane Detection Pipeline
Step10: Test Images
Step11: Build a Lane Finding Pipeline
Step12: Test on Videos
Step13: Let's try the one with the solid white lane on the right first ...
Step15: Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
Step17: Improve the draw_lines() function
Step19: Writeup and Submission
|
<ASSISTANT_TASK:>
Python Code:
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
import math
def grayscale(img):
Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
Applies the Canny transform
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
Applies a Gaussian Noise kernel
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
# Right/Left Slope
lslope = []
rslope = []
# Right/Left Centers
lcenter = []
rcenter = []
for line in lines:
for x1, y1, x2, y2 in line:
slope = (y2 - y1) / (x2 - x1)
center = [(x1 + x2) / 2, (y1 + y2) / 2]
if slope > 0.5 and slope < 1.0: # Right Lane
rslope.append(slope)
rcenter.append(center)
if slope < -0.5 and slope > -1.0: # Left Lane
lslope.append(slope)
lcenter.append(center)
lslope_avg = np.sum(lslope) / len(lslope)
rslope_avg = np.sum(rslope) / len(rslope)
lcenter_avg = np.divide(np.sum(lcenter, axis=0), len(lcenter))
rcenter_avg = np.divide(np.sum(rcenter, axis=0), len(rcenter))
ly1 = int(img.shape[0])
lx1 = int((ly1 - lcenter_avg[1]) / lslope_avg + lcenter_avg[0])
ly2 = int(img.shape[0] * 0.6)
lx2 = int((ly2 - lcenter_avg[1]) / lslope_avg + lcenter_avg[0])
ry1 = int(img.shape[0])
rx1 = int((ry1 - rcenter_avg[1]) / rslope_avg + rcenter_avg[0])
ry2 = int(img.shape[0] * 0.6)
rx2 = int((ry2 - rcenter_avg[1]) / rslope_avg + rcenter_avg[0])
cv2.line(img, (lx1, ly1), (lx2, ly2), color, thickness)
cv2.line(img, (rx1, ry1), (rx2, ry2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]),
minLineLength=min_line_len,
maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines, thickness=10)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
return cv2.addWeighted(initial_img, α, img, β, γ)
import os
os.listdir("test_images/")
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
low_threshold = 50 # Canny edge detection
high_threshold = 150 # Canny edge detection
kernel_size = 5 # Gaussian blurring
rho = 2 # Hough Tranform, distance resolution in pixels of the Hough grid
theta = np.pi / 180 # Hough Tranform, angular resolution in radians of Hough grid
threshold = 15 # Hough Tranform, minimum number of votes (intersections in Hough grid cell)
min_line_len = 40 # Hough Tranform, minimum number of pixels making up a line
max_line_gap = 20 # Hough Tranform, maximum gap in pixels between connectable line segments
original_images = os.listdir('test_images/')
for image in original_images:
img = mpimg.imread('test_images/' + image)
vertices = np.array([[(0, img.shape[0]), (450, 320),
(510, 320), (img.shape[1], img.shape[0])]],
dtype=np.int32) # Image mask polygon
gray_img = grayscale(img) # Greyed out image
edge_img = canny(gray_img, low_threshold, high_threshold) # Canny edges
mask_img = region_of_interest(edge_img, vertices) # Region of interest
line_img = hough_lines(mask_img, rho, theta, threshold,
min_line_len, max_line_gap)
lane_line_img = weighted_img(img, line_img)
#cv2.imwrite('test_images_output/' + image, lane_line_img)
mpimg.imsave('test_images_output/' + image, lane_line_img)
#reading in an image
image = mpimg.imread('test_images_output/whiteCarLaneSwitch.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image)
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
vertices = np.array([[(0, image.shape[0]), (450, 320),
(510, 320), (image.shape[1], image.shape[0])]],
dtype=np.int32) # Image mask polygon
gray_img = grayscale(image) # Greyed out image
edge_img = canny(gray_img, low_threshold, high_threshold) # Canny edges
mask_img = region_of_interest(edge_img, vertices) # Region of interest
line_img = hough_lines(mask_img, rho, theta, threshold,
min_line_len, max_line_gap)
lane_line_img = weighted_img(image, line_img)
return lane_line_img
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(white_output))
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(yellow_output))
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML(
<video width="960" height="540" controls>
<source src="{0}">
</video>
.format(challenge_output))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get familiar with the following basic tensorflow methods
Step2: Linear Regression example
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
# Define C=B*A in a symbolic way
A = tf.Variable(tf.ones([10]))
B = tf.constant(np.ones(10)*2, tf.float32)
C = tf.multiply(A, B)
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
# initialize variables
sess.run(init)
# run the graph and evaluate C
c = sess.run([C])
print('c:', c)
# Generate ground truth 100 x, y data points in NumPy, y = 3.0 * x + 1.0
# Regress for W and b that compute y_data = W * x_data + b
x_data = np.random.rand(100).astype(np.float32)
y_data = 3.0 * x_data + 1.0
plt.plot(x_data, y_data)
# define trainable variables
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.zeros([1]))
# define graph operations
y = tf.multiply(W, x_data) + b
# define loss, L2
loss = tf.reduce_mean(tf.square(y - y_data))
# define optimizer for training
train_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.5).minimize(loss)
# define the operation that initializes variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
# initialization
sess.run(init)
# starting training
training_iters = 100
for step in range(training_iters):
if step % 20 == 0 or (step+1)==training_iters:
print(step, sess.run(W), sess.run(b))
# run optimizer during training
_ = sess.run([train_optimizer])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic Element, Specie and Composition objects
Step2: You can see that units are printed for atomic masses and ionic radii. Pymatgen comes with a complete system of managing units in pymatgen.core.unit. A Unit is a subclass of float that attaches units and handles conversions. For example,
Step3: Please refer to the Units example for more information on units. Species are like Elements, except they have an explicit oxidation state. They can be used wherever Element is used for the most part.
Step4: A Composition is essentially an immutable mapping of Elements/Species with amounts, and useful properties like molecular weight, get_atomic_fraction, etc. Note that you can conveniently either use an Element/Specie object or a string as keys (this is a feature).
Step5: Lattice & Structure objects
Step6: A Structure object represents a crystal structure (lattice + basis). A Structure is essentially a list of PeriodicSites with the same Lattice. Let us now create a CsCl structure.
Step7: The Structure object contains many useful manipulation functions. Since Structure is essentially a list, it contains a simple pythonic API for manipulation its sites. Some examples are given below. Please note that there is an immutable version of Structure known as IStructure, for the use case where you really need to enforce that the structure does not change. Conversion between these forms of Structure can be performed using from_sites().
Step8: Basic analyses
Step9: We also have an extremely powerful structure matching tool.
Step10: Input/output
Step11: The vaspio_set module provides a means o obtain a complete set of VASP input files for performing calculations. Several useful presets based on the parameters used in the Materials Project are provided.
|
<ASSISTANT_TASK:>
Python Code:
import pymatgen as mg
si = mg.Element("Si")
print("Atomic mass of Si is {}".format(si.atomic_mass))
print("Si has a melting point of {}".format(si.melting_point))
print("Ionic radii for Si: {}".format(si.ionic_radii))
print("Atomic mass of Si in kg: {}".format(si.atomic_mass.to("kg")))
fe2 = mg.Specie("Fe", 2)
print(fe2.atomic_mass)
print(fe2.ionic_radius)
comp = mg.Composition("Fe2O3")
print("Weight of Fe2O3 is {}".format(comp.weight))
print("Amount of Fe in Fe2O3 is {}".format(comp["Fe"]))
print("Atomic fraction of Fe is {}".format(comp.get_atomic_fraction("Fe")))
print("Weight fraction of Fe is {}".format(comp.get_wt_fraction("Fe")))
# Creates cubic Lattice with lattice parameter 4.2
lattice = mg.Lattice.cubic(4.2)
print(lattice.lengths_and_angles)
structure = mg.Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
print("Unit cell vol = {}".format(structure.volume))
print("First site of the structure is {}".format(structure[0]))
structure.make_supercell([2, 2, 1]) #Make a 3 x 2 x 1 supercell of the structure
del structure[0] #Remove the first site
structure.append("Na", [0,0,0]) #Append a Na atom.
structure[-1] = "Li" #Change the last added atom to Li.
structure[0] = "Cs", [0.01, 0.5, 0] #Shift the first atom by 0.01 in fractional coordinates in the x-direction.
immutable_structure = mg.IStructure.from_sites(structure) #Create an immutable structure (cannot be modified).
print(immutable_structure)
#Determining the symmetry
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
finder = SpacegroupAnalyzer(structure)
print("The spacegroup is {}".format(finder.get_space_group_symbol()))
from pymatgen.analysis.structure_matcher import StructureMatcher
#Let's create two structures which are the same topologically, but with different elements, and one lattice is larger.
s1 = mg.Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
s2 = mg.Structure(mg.Lattice.cubic(5), ["Rb", "F"], [[0, 0, 0], [0.5, 0.5, 0.5]])
m = StructureMatcher()
print(m.fit_anonymous(s1, s2)) #Returns a mapping which maps s1 and s2 onto each other. Strict element fitting is also available.
#Convenient IO to various formats. Format is intelligently determined from file name and extension.
structure.to(filename="POSCAR")
structure.to(filename="CsCl.cif")
#Or if you just supply fmt, you simply get a string.
print(structure.to(fmt="poscar"))
print(structure.to(fmt="cif"))
#Reading a structure from a file.
structure = mg.Structure.from_file("POSCAR")
from pymatgen.io.vasp.sets import MPRelaxSet
v = MPRelaxSet(structure)
v.write_input("MyInputFiles") #Writes a complete set of input files for structure to the directory MyInputFiles
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Training the network
Step9: Check out your predictions
Step10: Thinking about your results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = lambda x: 1/(1+np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
output_errors = targets - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors) # errors propagated to the hidden layer
hidden_grad = hidden_outputs*(1-hidden_outputs)
# TODO: Update the weights
self.weights_hidden_to_output += self.lr * np.dot(output_errors, hidden_outputs.T)# update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * np.dot((hidden_grad*hidden_errors), inputs.T) # update input-to-hidden weights with gradient descent step
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)# signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import sys
### Set the hyperparameters here ###
epochs = 3000
learning_rate = 0.01
hidden_nodes = 15
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Environment Preparation
Step2: Install Analytics Zoo
Step3: Distributed TensorFlow (v1.15) using Orca APIs
Step4: Step 1
Step5: This is the only place where you need to specify local or distributed mode. View Orca Context for more details.
Step6: Step 3
Step7: Step 4
Step8: Next, fit the Estimator.
Step9: Finally, evaluate using the Estimator.
Step10: Now, the accuracy of this model has reached 98%.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Install jdk8
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
import os
# Set environment variable JAVA_HOME.
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
!update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
!java -version
# Install latest pre-release version of Analytics Zoo
# Installing Analytics Zoo from pip will automatically install pyspark, bigdl, and their dependencies.
!pip install --pre --upgrade analytics-zoo
# Install required dependencies
!pip install tensorflow==1.15.0 tensorflow-datasets==2.1.0
# import necesary libraries and modules
import argparse
from zoo.orca.learn.tf.estimator import Estimator
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca import OrcaContext
OrcaContext.log_output = True # recommended to set it to True when running Analytics Zoo in Jupyter notebook (this will display terminal's stdout and stderr in the Jupyter notebook).
cluster_mode = "local"
if cluster_mode == "local":
init_orca_context(cluster_mode="local", cores=4) # run in local mode
elif cluster_mode == "k8s":
init_orca_context(cluster_mode="k8s", num_nodes=2, cores=2) # run on K8s cluster
elif cluster_mode == "yarn":
init_orca_context(cluster_mode="yarn-client", num_nodes=2, cores=2) # run on Hadoop YARN cluster
import tensorflow as tf
def accuracy(logits, labels):
predictions = tf.argmax(logits, axis=1, output_type=labels.dtype)
is_correct = tf.cast(tf.equal(predictions, labels), dtype=tf.float32)
return tf.reduce_mean(is_correct)
def lenet(images):
with tf.variable_scope('LeNet', [images]):
net = tf.layers.conv2d(images, 32, (5, 5), activation=tf.nn.relu, name='conv1')
net = tf.layers.max_pooling2d(net, (2, 2), 2, name='pool1')
net = tf.layers.conv2d(net, 64, (5, 5), activation=tf.nn.relu, name='conv2')
net = tf.layers.max_pooling2d(net, (2, 2), 2, name='pool2')
net = tf.layers.flatten(net)
net = tf.layers.dense(net, 1024, activation=tf.nn.relu, name='fc3')
logits = tf.layers.dense(net, 10)
return logits
# tensorflow inputs
images = tf.placeholder(dtype=tf.float32, shape=(None, 28, 28, 1))
# tensorflow labels
labels = tf.placeholder(dtype=tf.int32, shape=(None,))
logits = lenet(images)
loss = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(logits=logits, labels=labels))
acc = accuracy(logits, labels)
import tensorflow_datasets as tfds
def preprocess(data):
data['image'] = tf.cast(data["image"], tf.float32) / 255.
return data['image'], data['label']
# get DataSet
dataset_dir = "~/tensorflow_datasets"
mnist_train = tfds.load(name="mnist", split="train", data_dir=dataset_dir)
mnist_test = tfds.load(name="mnist", split="test", data_dir=dataset_dir)
mnist_train = mnist_train.map(preprocess)
mnist_test = mnist_test.map(preprocess)
est = Estimator.from_graph(inputs=images,
outputs=logits,
labels=labels,
loss=loss,
optimizer=tf.train.AdamOptimizer(),
metrics={"acc": acc})
max_epoch = 1
est.fit(data=mnist_train,
batch_size=320,
epochs=max_epoch,
validation_data=mnist_test)
result = est.evaluate(mnist_test)
print(result)
est.save_tf_checkpoint("/tmp/lenet/model")
# Stop orca context when your program finishes
stop_orca_context()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Read the data
Step3: GTZAN Melspectrogram Generator
Step4: Custom CNN (Melspectrogram version)
Step6: Loss function
Step7: Majority Vote
Step8: Compared to the classical approach, we are winning now!
|
<ASSISTANT_TASK:>
Python Code:
@description: Method to split a song into multiple songs using overlapping windows
def splitsongs(X, y, window = 0.05, overlap = 0.5):
# Empty lists to hold our results
temp_X = []
temp_y = []
# Get the input song array size
xshape = X.shape[0]
chunk = int(xshape*window)
offset = int(chunk*(1.-overlap))
# Split the song and create new ones on windows
spsong = [X[i:i+chunk] for i in range(0, xshape - chunk + offset, offset)]
for s in spsong:
if s.shape[0] != chunk:
continue
temp_X.append(s)
temp_y.append(y)
return np.array(temp_X), np.array(temp_y)
@description: Method to convert a list of songs to a np array of melspectrograms
def to_melspectrogram(songs, n_fft=1024, hop_length=256):
# Transformation function
melspec = lambda x: librosa.feature.melspectrogram(x, n_fft=n_fft,
hop_length=hop_length, n_mels=128)[:,:,np.newaxis]
# map transformation of input songs to melspectrogram using log-scale
tsongs = map(melspec, songs)
# np.array([librosa.power_to_db(s, ref=np.max) for s in list(tsongs)])
return np.array(list(tsongs))
def split_convert(X, y):
arr_specs, arr_genres = [], []
# Convert to spectrograms and split into small windows
for fn, genre in zip(X, y):
signal, sr = librosa.load(fn)
signal = signal[:song_samples]
# Convert to dataset of spectograms/melspectograms
signals, y = splitsongs(signal, genre)
# Convert to "spec" representation
specs = to_melspectrogram(signals)
# Save files
arr_genres.extend(y)
arr_specs.extend(specs)
return np.array(arr_specs), to_categorical(arr_genres)
def read_data(src_dir, genres, song_samples):
# Empty array of dicts with the processed features from all files
arr_fn = []
arr_genres = []
# Get file list from the folders
for x,_ in genres.items():
folder = src_dir + x
for root, subdirs, files in os.walk(folder):
for file in files:
file_name = folder + "/" + file
# Save the file name and the genre
arr_fn.append(file_name)
arr_genres.append(genres[x])
# Split into train and test
X_train, X_test, y_train, y_test = train_test_split(
arr_fn, arr_genres, test_size=0.3, random_state=42, stratify=arr_genres
)
# Split into small segments and convert to spectrogram
X_train, y_train = split_convert(X_train, y_train)
X_test, y_test = split_convert(X_test, y_test)
return X_train, X_test, y_train, y_test
# Parameters
gtzan_dir = '../data/genres/'
song_samples = 660000
genres = {'metal': 0, 'disco': 1, 'classical': 2, 'hiphop': 3, 'jazz': 4,
'country': 5, 'pop': 6, 'blues': 7, 'reggae': 8, 'rock': 9}
# Read the data
X_train, X_test, y_train, y_test = read_data(gtzan_dir, genres, song_samples)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# Histogram for train and test
values, count = np.unique(np.argmax(y_train, axis=1), return_counts=True)
plt.bar(values, count)
values, count = np.unique(np.argmax(y_test, axis=1), return_counts=True)
plt.bar(values, count)
plt.show()
from tensorflow.keras.utils import Sequence
class GTZANGenerator(Sequence):
def __init__(self, X, y, batch_size=64, is_test = False):
self.X = X
self.y = y
self.batch_size = batch_size
self.is_test = is_test
def __len__(self):
return int(np.ceil(len(self.X)/self.batch_size))
def __getitem__(self, index):
# Get batch indexes
signals = self.X[index*self.batch_size:(index+1)*self.batch_size]
# Apply data augmentation
if not self.is_test:
signals = self.__augment(signals)
return signals, self.y[index*self.batch_size:(index+1)*self.batch_size]
def __augment(self, signals, hor_flip = 0.5, random_cutout = 0.5):
spectrograms = []
for s in signals:
signal = copy(s)
# Perform horizontal flip
if np.random.rand() < hor_flip:
signal = np.flip(signal, 1)
# Perform random cutoout of some frequency/time
if np.random.rand() < random_cutout:
lines = np.random.randint(signal.shape[0], size=3)
cols = np.random.randint(signal.shape[0], size=4)
signal[lines, :, :] = -80 # dB
signal[:, cols, :] = -80 # dB
spectrograms.append(signal)
return np.array(spectrograms)
def on_epoch_end(self):
self.indexes = np.arange(len(self.X))
np.random.shuffle(self.indexes)
return None
def conv_block(x, n_filters, pool_size=(2, 2)):
x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding='same')(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=pool_size, strides=pool_size)(x)
x = Dropout(0.25)(x)
return x
# Model Definition
def create_model(input_shape, num_genres):
inpt = Input(shape=input_shape)
x = conv_block(inpt, 16)
x = conv_block(x, 32)
x = conv_block(x, 64)
x = conv_block(x, 128)
x = conv_block(x, 256)
# Global Pooling and MLP
x = Flatten()(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu',
kernel_regularizer=tf.keras.regularizers.l2(0.02))(x)
x = Dropout(0.25)(x)
predictions = Dense(num_genres,
activation='softmax',
kernel_regularizer=tf.keras.regularizers.l2(0.02))(x)
model = Model(inputs=inpt, outputs=predictions)
return model
model = create_model(X_train[0].shape, 10)
model.summary()
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
reduceLROnPlat = ReduceLROnPlateau(
monitor='val_loss',
factor=0.95,
patience=3,
verbose=1,
mode='min',
min_delta=0.0001,
cooldown=2,
min_lr=1e-5
)
# Generators
batch_size = 128
train_generator = GTZANGenerator(X_train, y_train)
steps_per_epoch = np.ceil(len(X_train)/batch_size)
validation_generator = GTZANGenerator(X_test, y_test)
val_steps = np.ceil(len(X_test)/batch_size)
hist = model.fit_generator(
train_generator,
steps_per_epoch=steps_per_epoch,
validation_data=validation_generator,
validation_steps=val_steps,
epochs=150,
verbose=1,
callbacks=[reduceLROnPlat])
score = model.evaluate(X_test, y_test, verbose=0)
print("val_loss = {:.3f} and val_acc = {:.3f}".format(score[0], score[1]))
plt.figure(figsize=(15,7))
plt.subplot(1,2,1)
plt.plot(hist.history['accuracy'], label='train')
plt.plot(hist.history['val_accuracy'], label='validation')
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.subplot(1,2,2)
plt.plot(hist.history['loss'], label='train')
plt.plot(hist.history['val_loss'], label='validation')
plt.title('Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.tight_layout()
plt.show()
#http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
preds = np.argmax(model.predict(X_test), axis = 1)
y_orig = np.argmax(y_test, axis = 1)
cm = confusion_matrix(preds, y_orig)
keys = OrderedDict(sorted(genres.items(), key=lambda t: t[1])).keys()
plt.figure(figsize=(10,10))
plot_confusion_matrix(cm, keys, normalize=True)
def majority_vote(scores):
values, counts = np.unique(scores,return_counts=True)
ind = np.argmax(counts)
return values[ind]
preds = model.predict(X_test, batch_size=128, verbose=0)
# Each sound was divided into 39 segments in our custom function
scores_songs = np.split(np.argmax(preds, axis=1), 300)
scores_songs = [majority_vote(scores) for scores in scores_songs]
# Same analysis for split
label = np.split(np.argmax(y_test, axis=1), 300)
label = [majority_vote(l) for l in label]
from sklearn.metrics import accuracy_score
print("majority voting system (acc) = {:.3f}".format(accuracy_score(label, scores_songs)))
# Save the model
model.save('../models/custom_cnn_2d.h5')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We define the model, adapted from the Keras CIFAR-10 example
Step2: We train the model using the
Step3: Now let's train the model again, using the XLA compiler.
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
# Check that GPU is available: cf. https://colab.research.google.com/notebooks/gpu.ipynb
assert(tf.test.is_gpu_available())
tf.keras.backend.clear_session()
tf.config.optimizer.set_jit(False) # Start with XLA disabled.
def load_data():
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train.astype('float32') / 256
x_test = x_test.astype('float32') / 256
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)
return ((x_train, y_train), (x_test, y_test))
(x_train, y_train), (x_test, y_test) = load_data()
def generate_model():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), padding='same', input_shape=x_train.shape[1:]),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Conv2D(32, (3, 3)),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(64, (3, 3), padding='same'),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Conv2D(64, (3, 3)),
tf.keras.layers.Activation('relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10),
tf.keras.layers.Activation('softmax')
])
model = generate_model()
def compile_model(model):
opt = tf.keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
model = compile_model(model)
def train_model(model, x_train, y_train, x_test, y_test, epochs=25):
model.fit(x_train, y_train, batch_size=256, epochs=epochs, validation_data=(x_test, y_test), shuffle=True)
def warmup(model, x_train, y_train, x_test, y_test):
# Warm up the JIT, we do not wish to measure the compilation time.
initial_weights = model.get_weights()
train_model(model, x_train, y_train, x_test, y_test, epochs=1)
model.set_weights(initial_weights)
warmup(model, x_train, y_train, x_test, y_test)
%time train_model(model, x_train, y_train, x_test, y_test)
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# We need to clear the session to enable JIT in the middle of the program.
tf.keras.backend.clear_session()
tf.config.optimizer.set_jit(True) # Enable XLA.
model = compile_model(generate_model())
(x_train, y_train), (x_test, y_test) = load_data()
warmup(model, x_train, y_train, x_test, y_test)
%time train_model(model, x_train, y_train, x_test, y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1
Step2: Solution 1
Step3: Supplement
Step4: Decorator with argument
Step5: Decorator assignment
Step6: Decorator assignment (2)
Step8: Decorator scenario
Step9: Case 2
Step10: Case 3
Step11: Case 4
Step12: Decorator trick
Step13: Flask
Step14: Context manager
Step15: 上面 with 的使用方式同等於下面列的程式碼
Step16: Context manager decorator
Step17: 到這邊你可能會疑惑這兩邊哪裡一樣?然後 yield 的東西呢?
Step18: Contextmanager limitation
Step19: with statement
Step20: with + @contextmanager
Step21: Case 1
Step22: Case 2
Step23: Study 1
|
<ASSISTANT_TASK:>
Python Code:
def deco(func):
def wrapped():
print('Before func')
func()
print('After func')
return wrapped
@deco
def f1():
print('This is f1 function')
f1()
def deco(func):
def wrapped():
print('Before func')
func()
print('After func')
return wrapped
@deco
def f1():
print('This is f1 function')
print(f1.__name__)
from functools import wraps
def deco(func):
@wraps(func)
def wrapped():
print('Before func')
func()
print('After func')
return wrapped
@deco
def f1():
print('This is f1 function')
print(f1.__name__)
import time
from functools import wraps
def deco(func):
@wraps(func)
def wrapped(*args, **kwargs):
print('Before func')
print('---')
print('{} argument:\nargs = {}\nkwargs = {}\n'.format(
func.__name__, args, kwargs
))
func(*args, **kwargs)
print('---')
print('After func')
return wrapped
@deco
def f1(*args, **kwargs):
print('This is f1 function')
f1(1, '2', [3], {'4': 4}, time=time.ctime())
from functools import wraps
def tag(name):
def deco(func):
@wraps(func)
def wrapped(*args, **kwargs):
print('<{}>'.format(name))
func(*args, **kwargs)
print('</{}>'.format(name))
return wrapped
return deco
@tag('p')
def content(*args, **kwargs):
for i in args:
print(i)
content('Hello World.', 'This is second argument.')
from functools import wraps
def tag(name):
def deco(func):
@wraps(func)
def wrapped(*args, **kwargs):
print('<{}>'.format(name))
func(*args, **kwargs)
print('</{}>'.format(name))
return wrapped
return deco
tag_p = tag('p')
@tag_p
def content(*args, **kwargs):
for i in args:
print(i)
content('Hello World.', 'This is second argument.')
from functools import wraps
def tag(name):
def deco(func):
@wraps(func)
def wrapped(*args, **kwargs):
print('<{}>'.format(name))
func(*args, **kwargs)
print('</{}>'.format(name))
return wrapped
return deco
tag_div = tag('div')
tag_p = tag('p')
@tag_div
@tag_p
def content(*args, **kwargs):
for i in args:
print(i)
content('Hello World.', 'This is second argument.')
import time
import logging
from functools import wraps
LOGGER = logging.getLogger(__name__)
def func_profiling(func):
@wraps(func)
def wrapped(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
time_spent = time.time() - start_time
fullname = '{}.{}'.format(func.__module__, func.__name__)
LOGGER.debug('{}[args={}, kwargs={}] completed in {}'.format(
fullname, args, kwargs, time_spent
))
return result
return wrapped
@func_profiling
def test_func_profiling(msg=None):
import random
sleep_sec = random.randrange(1,3)
LOGGER.debug('random sleep in {} sec'.format(sleep_sec))
time.sleep(sleep_sec)
LOGGER.info(msg)
LOGGER.debug('Wake up')
if __name__ == '__main__':
testing
import sys
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)12s:L%(lineno)3s [%(levelname)8s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout
)
test_func_profiling('Hello World')
from functools import wraps
def author(email):
def sub_command(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
func(*args, **kwargs)
except ValueError:
'''you can use your own defined exception'''
print('some useful message to debug')
except Exception:
print('Unexpected exception, please contant author: {}'.format(email))
return wrapped
return sub_command
@author('afun@example.com')
def divide(a, b):
print('{}/{} = {}'.format(a, b, a/b))
return a/b
divide(6, 2)
divide(6, 0)
from functools import lru_cache
@lru_cache()
def heavy_jobs(x):
print('do some heavy jobs with input {}'.format(x))
return x+1000
print(heavy_jobs(1))
print(heavy_jobs(1))
print(heavy_jobs(2))
print(heavy_jobs(1))
print(heavy_jobs(2))
import json
from functools import wraps
def cache_json(filename):
def deco(func):
@wraps(func)
def wrapped(*args, **kwargs):
try:
print('Before try wrapped')
return json.load(open(filename))
except FileNotFoundError:
print('Before except wrapped')
data = func(*args, **kwargs)
json.dump(data, open(filename, 'w'))
return data
return wrapped
return deco
@cache_json('heavy.json')
def heavy_jobs(*args, **kwargs):
print('do heavy jobs')
if 'count' in kwargs:
return kwargs['count']
return
print(heavy_jobs(user='afun', count=5))
from functools import wraps
function_map = {}
def deco(func):
global function_map
function_map[func.__name__] = func
@wraps(func)
def wrapped(*args, **kwargs):
func(*args, **kwargs)
return wrapped
@deco
def f1():
print('This is f1')
@deco
def f2():
print('This is f2')
print(function_map)
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'Index page'
@app.route('/hello')
def hello():
return 'Hello, world!'
@app.route('/post/<int:post_id>')
def show_post(post_id):
return 'Post {}'.format(post_id)
class my_context(object):
def __enter__(self):
print('in enter')
return 'enter'
def __exit__(self, *excinfo):
print('in exit')
return 'exit'
with my_context() as f:
print('Hello')
print('World')
context = my_context()
obj = context.__enter__()
try:
print('Hello')
print('World')
except Exception as e:
if context.__exit__(sys.exc_info()):
raise e
else:
context.__exit__()
from contextlib import contextmanager
@contextmanager
def my_context():
print('do things in enter')
yield 'It is a feature, not a bug!!!'
print('do things in exit')
with my_context() as obj:
print('Hello')
print('World')
@contextmanager
def my_context():
print('do things in enter')
yield 'It is a feature, not a bug!!!'
print('do things in exit')
# 透過 contextmanager 轉換, 與下面這段程式碼等價
class my_context(object):
def __enter__(self):
print('do things in enter')
return 'It is a feature, not a bug!!!'
def __exit__(self, *excinfo):
print('do things in exit')
return 'exit'
from contextlib import contextmanager
@contextmanager
def my_context():
yield 'Hello'
yield 'World'
with my_context():
print('line 1')
print('line 2')
from contextlib import contextmanager
@contextmanager
def context_loop():
for i in range(100):
yield i
with context_loop():
print('line 1')
print('line 2')
from contextlib import contextmanager
@contextmanager
def context_condition(cond):
if cond:
yield 'in condition'
else:
yield 'else case'
with context_condition(True) as f1:
print(f1)
print('line 1')
print('line 2')
with context_condition(False) as f2:
print(f2)
print('line 3')
print('line 4')
from contextlib import contextmanager
@contextmanager
def context_try(cond):
try:
yield 'normal'
except Exception:
print('exception')
finally:
print('finally')
with context_try(True):
print('line 1')
print('line 2')
fp = open('test.txt', 'w+')
try:
fp.write('Hello world')
finally:
fp.close()
# 上面這種寫法透過 with 實現的話會變成下面這種程式碼
with open('test.txt', 'w+') as fp:
fp.write('Hello world')
from contextlib import contextmanager
@contextmanager
def tag(name):
print('<{}>'.format(name))
yield
print('</{}>'.format(name))
with tag('div'):
print('content 1')
from contextlib import contextmanager
@contextmanager
def tag(name):
print('<{}>'.format(name))
yield
print('</{}>'.format(name))
# nested
with tag('div'):
with tag('p'):
print('content 1')
print()
# multiple
with tag('div'), tag('h1'):
print('content 2')
from contextlib import redirect_stdout
with open('test_redirect.txt', 'w+') as f:
with redirect_stdout(f):
help(redirect_stdout)
print('afun defined message')
from contextlib import contextmanager
import time
@contextmanager
def timeit(name=''):
start_time = time.time()
yield
elapse_time = time.time() - start_time
print('{} - completed in {:.6f}'.format(name, elapse_time))
# note: comprehension speed seens slower when range(10)
test = [i for i in range(100000)]
with timeit('afun 1'):
a = [i for i in test if i % 2 == 0]
with timeit('afun 2'):
b = []
for i in test:
if i % 2 == 0:
b.append(i)
@timeit('afun 3')
def func(test_list):
result = []
for i in test_list:
if i % 2 == 0:
result.append(i)
return result
c = func(test)
from functools import wraps
# contextmanager 原始碼
# 這邊就是我們上面提過的 decorator 寫法
# 雖然他是回傳 _GeneratorContextManager 這個 helper class, 但其實這邊主要是透過 ContextDecorator 實作
def contextmanager(func):
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, args, kwds)
return helper
# ContextDecorator 原始碼
# _recreate_cm 會回傳自己的 instance
class ContextDecorator(object):
def _recreate_cm(self):
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authentication and Authorisation
Step3: Post this step, we will have full access to twitter api's
Step9: Streaming with tweepy
Step12: Drawbacks of twitter streaming API
|
<ASSISTANT_TASK:>
Python Code:
import logging # python logging module
# basic format for logging
logFormat = "%(asctime)s - [%(levelname)s] (%(funcName)s:%(lineno)d) %(message)s"
# logs will be stored in tweepy.log
logging.basicConfig(filename='tweepy.log', level=logging.INFO,
format=logFormat, datefmt="%Y-%m-%d %H:%M:%S")
import tweepy # importing all the modules required
import socket # will be used to create sockets
import json # manipulate json
from httplib import IncompleteRead
# Keep these tokens secret, as anyone can have full access to your
# twitter account, using these tokens
consumerKey = "#"
consumerSecret = "#"
accessToken = "#"
accessTokenSecret = "#"
# Performing the authentication and authorization, post this step
# we will have full access to twitter api's
def connectToTwitter():
Connect to twitter.
try:
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
logging.info("Successfully logged in to twitter.")
return api, auth
except Exception as e:
logging.info("Something went wrong in oauth, please check your tokens.")
logging.error(e)
# Tweet listner class which subclasses from tweepy.StreamListener
class TweetListner(tweepy.StreamListener):
Twitter stream listner
def __init__(self, csocket):
self.clientSocket = csocket
def dataProcessing(self, data):
Process the data, before sending to spark streaming
sendData = {} # data that is sent to spark streamer
user = data.get("user", {})
name = user.get("name", "undefined").encode('utf-8')
followersCount = user.get("followers_count", 0)
sendData["name"] = name
sendData["followersCount"] = followersCount
#data_string = "{}:{}".format(name, followersCount)
self.clientSocket.send(json.dumps(sendData) + u"\n") # append new line character, so that spark recognizes it
logging.debug(json.dumps(sendData))
def on_data(self, raw_data):
Called when raw data is received from connection.
return False to stop stream and close connection.
try:
data = json.loads(raw_data)
self.dataProcessing(data)
#self.clientSocket.send(json.dumps(sendData) + u"\n") # Because the connection was breaking
return True
except Exception as e:
logging.error("An unhandled exception has occured, check your data processing")
logging.error(e)
raise e
def on_error(self, status_code):
Called when a non-200 status code is returned
logging.error("A non-200 status code is returned")
return True
# Creating a proxy socket
def createProxySocket(host, port):
Returns a socket which can be used to connect
to spark.
try:
s = socket.socket() # initialize socket instance
s.bind((host, port)) # bind to the given host and port
s.listen(5) # Enable a server to accept connections.
logging.info("Listening on the port {}".format(port))
cSocket, address = s.accept() # waiting for a connection
logging.info("Received Request from: {}".format(address))
return cSocket
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE: # Address in use
logging.error("The given host:port {}:{} is already in use"\
.format(host, port))
logging.info("Trying on port: {}".format(port + 1))
return createProxySocket(host, port + 1)
def getWOEIDForTrendsAvailable(api, place):
Returns the WOEID of the country if the trend is available there.
# Iterate through trends
data = api.trends_available()
for item in data:
if item["name"] == place: # Use place = "Worldwide" to get woeid of world
woeid = item["woeid"]
break
return woeid #name = India, woeid
# Get the list of trending topics from twitter
def getTrendingTopics(api, woeid):
Get the top trending topics from twitter
data = api.trends_place(woeid)
listOfTrendingTopic = [trend["name"] for trend in data[0]["trends"]]
return listOfTrendingTopic
if __name__ == "__main__":
try:
api, auth = connectToTwitter() # connecting to twitter
# Global information is available by using 1 as the WOEID
# woeid = getWOEIDForTrendsAvailable(api, "Worldwide") # get the woeid of the worldwide
woeid = 1
trendingTopics = getTrendingTopics(api, woeid)[:10] # Pick only top 10 trending topics
host = "localhost"
port = 8888
cSocket = createProxySocket(host, port) # Creating a socket
while True:
try:
# Connect/reconnect the stream
tweetStream = tweepy.Stream(auth, TweetListner(cSocket)) # Stream the twitter data
# DON'T run this approach async or you'll just create a ton of streams!
tweetStream.filter(track=trendingTopics) # Filter on trending topics
except IncompleteRead:
# Oh well, reconnect and keep trucking
continue
except KeyboardInterrupt:
# Or however you want to exit this loop
tweetStream.disconnect()
break
except Exception as e:
logging.error("Unhandled exception has occured")
logging.error(e)
continue
except KeyboardInterrupt: # Keyboard interrupt called
logging.error("KeyboardInterrupt was hit")
except Exception as e:
logging.error("Unhandled exception has occured")
logging.error(e)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Simple graph example
Step3: Bellow we define Graph, Node, Edge classes.
Step4: Then we define and register reduce/factory for dumping/loading.
Step5: Now we can load AXON message with graph definition into Graph object and dump it.
Step6: Numpy arrays
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, unicode_literals
from axon.api import loads, dumps
from IPython.display import HTML, display
text =
graph {
nodes: [
&1 node {x:1 y:1}
&2 node {x:1 y:2}
&3 node {x:2 y:2}
]
edges: [
edge {*1 *2}
edge {*1 *3}
edge {*2 *3}
]
}
class Base(object):
#
def __str__(self):
return '%s: %r' % (self.__class__.__name__, self.__dict__)
#
__repr__ = __str__
class Graph(Base):
def __init__(self, nodes=None, edges=None):
self.nodes = list(nodes) if nodes else []
self.edges = list(edges) if edges else []
class Node(Base):
def __init__(self, x, y):
self.x = x
self.y = y
class Edge(Base):
def __init__(self, p1, p2):
self.left = p1
self.right = p2
from axon.utils import factory, reduce
import axon
@factory('graph')
def create_graph(attrs, args):
return Graph(**attrs)
@factory('node')
def create_node(attrs, args):
return Node(**attrs)
@factory('edge')
def create_edge(attrs, args):
return Edge(*args)
@reduce(Graph)
def reduce_graph(graph):
return axon.node('graph', {'nodes': graph.nodes, 'edges': graph.edges})
@reduce(Node)
def reduce_node(node):
return axon.node('node', {'x': node.x, 'y': node.y})
@reduce(Edge)
def reduce_edge(edge):
return axon.node('edge', None, [edge.left, edge.right])
g = loads(text, mode='strict')
display(HTML(u'<b>Graph object:</b>'))
print(g[0])
display(HTML(u'<b>Compact dump:</b>'))
print(dumps(g, crossref=1))
display(HTML(u'<b>Formatted dump without braces:</b>'))
print(dumps(g, pretty=1, crossref=1, hsize=4))
display(HTML(u'<b>Formatted dump with braces:</b>'))
print(dumps(g, pretty=1, braces=1, crossref=1, hsize=4))
from axon import dump_as_str, as_unicode, factory, reduce
import numpy as np
@factory('ndarray')
def create_array(mapping, sequence):
shape = mapping.get('shape', None)
dtype = mapping['dtype']
if type(dtype) is list:
dtype = [(str(n), str(t)) for n, t in dtype]
a = np.array(sequence, dtype=dtype)
if shape is not None:
a.shape = shape
return a
@reduce(np.ndarray)
def reduce_array(a):
signes = {'<', '=', '>', '!'}
if len(a.dtype.descr) > 1:
dtype = [
(as_unicode(n), (as_unicode(t[1:]) \
if t[0] in signes \
else as_unicode(t)))
for n, t in a.dtype.descr]
return axon.node('ndarray', {'dtype':dtype}, a.tolist())
else:
dtype_str = a.dtype.str
dtype_str = as_unicode(dtype_str[1:]) \
if dtype_str[0] in signes \
else as_unicode(dtype_str)
return axon.node('ndarray', {'shape': a.shape, 'dtype':as_unicode(dtype_str)}, a.tolist())
dump_as_str(np.int8)
dump_as_str(np.int16)
dump_as_str(np.int32)
dump_as_str(np.int64)
dump_as_str(np.float16)
dump_as_str(np.float32)
dump_as_str(np.float64)
dump_as_str(np.float128)
dump_as_str(np.int_)
dump_as_str(np.float_)
dump_as_str(np.double)
a = np.array([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
display(HTML('<b>Compact form:</b>'))
text = dumps([a])
print(text)
b = loads(text, mode="strict")[0]
display(HTML('<b>Formatted form with braces:</b>'))
text = dumps([a], pretty=1, braces=1, hsize=4)
print(text)
display(HTML('<b>Formatted form with braces:</b>'))
text = dumps([a], pretty=1, hsize=4)
print(text)
b = loads(text, mode="strict")[0]
a = np.array(
[(1, 2, 3.0), (3, 4, 5.0), (4, 5, 6.0)],
dtype=[('x', int), ('y', int), ('z', float)])
text = dumps([a])
print('val=', text)
b = loads(text, mode="strict")[0]
print('val=', repr(b))
display(HTML('<b>Formatted form</b>:'))
text = dumps([a])
print('val=', text)
display(HTML('<b>Formatted form</b>:'))
text = dumps([a], pretty=1, braces=1, hsize=3)
print('val=', text)
display(HTML('<b>Indented form:</b>'))
text = dumps([a], pretty=1, hsize=3)
print('val=', text)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The top and bottom "3" digits
Step2: UMAP
|
<ASSISTANT_TASK:>
Python Code:
%%capture
!curl -L https://github.com/tensorflow/privacy/releases/download/0.2.3/order.tgz -o order.tgz
!tar zxvf order.tgz
mnist_priv_train = np.load('data/order_mnist_priv_train.npy')
mnist_priv_test = np.load('data/order_mnist_priv_test.npy')
mnist_priv_train.shape
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train_orig = x_train
x_train.shape
trainList = []
for i, d in enumerate(np.argsort(mnist_priv_train)):
trainList.append({
'priv_order': d,
'y': y_train[i],
'i': i
})
df = pd.DataFrame(trainList)
top3df = df[df['y'] == 3].sort_values(['priv_order'], ascending=True).head(10)
f, axarr = plt.subplots(1, 10)
for i, d in enumerate(top3df['i'].to_list()):
axarr[i].imshow(x_train[d])
bot3df = df[df['y'] == 3].sort_values(['priv_order'], ascending=False).head(10)
f, axarr = plt.subplots(1, 10)
for i, d in enumerate(bot3df['i'].to_list()):
axarr[i].imshow(x_train[d])
train, test = tf.keras.datasets.mnist.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.array(train_data, dtype=np.float32) / 255
test_data = np.array(test_data, dtype=np.float32) / 255
train_data = train_data.reshape(train_data.shape[0], 28, 28, 1)
test_data = test_data.reshape(test_data.shape[0], 28, 28, 1)
train_labels = np.array(train_labels, dtype=np.int32)
test_labels = np.array(test_labels, dtype=np.int32)
train_labels = tf.keras.utils.to_categorical(train_labels, num_classes=10)
test_labels = tf.keras.utils.to_categorical(test_labels, num_classes=10)
model_784 = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, 8,
strides=2,
padding='same',
activation='relu',
input_shape=(28, 28, 1)),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Conv2D(32, 4,
strides=2,
padding='valid',
activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(28*28, activation='relu', name='embedding_784'),
tf.keras.layers.Dense(32, activation='relu', name='embedding_32'),
tf.keras.layers.Dense(10, name='logit')
])
model_784.summary()
loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.losses.Reduction.NONE)
model_784.compile(loss=loss, optimizer="adam", metrics=["accuracy"])
model_784.fit(
train_data,
train_labels,
validation_data=(test_data, test_labels),
epochs=10,
batch_size=250,
verbose=2,
)
embedding_layer_model_784 = tf.keras.Model(
inputs=model_784.input,
outputs=model_784.get_layer('embedding_784').output)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
train_embeddings_784 = sess.run(embedding_layer_model_784(train_data))
def umapDigit(digit=0, embeddings=train_embeddings_784, digit_type='', slug='784_'):
dfN = df[df['y'] == digit]
embeddingsN = embeddings.take(dfN['i'].to_list(), axis=0)
reducer = umap.UMAP(random_state=42, min_dist=.05, n_neighbors=8)
umap_xy = reducer.fit_transform(embeddingsN)
fig, ax = plt.subplots(figsize=(6, 6))
color = dfN['priv_order']
plt.scatter(umap_xy[:, 0], umap_xy[:, 1], c=color, cmap="Spectral", s=3)
plt.setp(ax, xticks=[], yticks=[])
plt.title("MNIST " + str(digit) + " - UMAP", fontsize=18)
plt.show()
rootdir = 'umap-digits/'
outpath = rootdir + 'umap_train_' + slug + digit_type + str(digit) + '.npy'
with open(outpath, 'w') as outfile:
np.save(outfile, umap_xy)
for i in range(0, 10):
umapDigit(i)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's have a regression example
Step2: Creating the model
Step3: Maximum-A-Posteriori
Step4: Hamiltonian Monte Carlo
Step5: Plotting simulated functions
Step6: Sparse Regression
|
<ASSISTANT_TASK:>
Python Code:
from matplotlib import pyplot
%matplotlib inline
import IPython
import torch
import numpy
import sys, os
sys.path.append(os.path.join(os.getcwd(),'..'))
pyplot.style.use('ggplot')
import candlegp
import candlegp.training.hmc
N = 12
X = torch.rand(N,1).double()
Y = (torch.sin(12*X) + 0.6*torch.cos(25*X) + torch.randn(N,1).double()*0.1+3.0).squeeze(1)
pyplot.figure()
pyplot.plot(X.numpy(), Y.numpy(), 'kx', mew=2)
k = candlegp.kernels.Matern52(1, lengthscales=torch.tensor([0.3], dtype=torch.double),
variance=torch.tensor([1.0], dtype=torch.double))
mean = candlegp.mean_functions.Linear(torch.tensor([1], dtype=torch.double), torch.tensor([0], dtype=torch.double))
m = candlegp.models.GPR(X, Y.unsqueeze(1), kern=k, mean_function=mean)
m.likelihood.variance.set(torch.tensor([0.01], dtype=torch.double))
m
xstar = torch.linspace(0,1,100).double()
mu, var = m.predict_y(xstar.unsqueeze(1))
cred_size = (var**0.5*2).squeeze(1)
mu = mu.squeeze(1)
pyplot.plot(xstar.numpy(),mu.data.numpy(),'b')
pyplot.fill_between(xstar.numpy(),mu.data.numpy()+cred_size.data.numpy(), mu.data.numpy()-cred_size.data.numpy(),facecolor='0.75')
pyplot.plot(X.numpy(), Y.numpy(), 'kx', mew=2)
opt = torch.optim.LBFGS(m.parameters(), lr=1e-2, max_iter=40)
def eval_model():
obj = m()
opt.zero_grad()
obj.backward()
return obj
for i in range(50):
obj = m()
opt.zero_grad()
obj.backward()
opt.step(eval_model)
if i%5==0:
print(i,':',obj.item())
m
xstar = torch.linspace(0,1,100).double()
mu, var = m.predict_y(xstar.unsqueeze(1))
cred_size = (var**0.5*2).squeeze(1)
mu = mu.squeeze(1)
pyplot.plot(xstar.numpy(),mu.data.numpy(),'b')
pyplot.fill_between(xstar.numpy(),mu.data.numpy()+cred_size.data.numpy(), mu.data.numpy()-cred_size.data.numpy(),facecolor='0.75')
pyplot.plot(X.numpy(), Y.numpy(), 'kx', mew=2)
k2 = candlegp.kernels.RBF(1, lengthscales=torch.tensor([0.3], dtype=torch.double),
variance=torch.tensor([1.0], dtype=torch.double))
mean2 = candlegp.mean_functions.Linear(torch.tensor([1], dtype=torch.double), torch.tensor([0], dtype=torch.double))
m2 = candlegp.models.GPR(X, Y.unsqueeze(1), kern=k2, mean_function=mean2)
m2.load_state_dict(m.state_dict())
dt = torch.double
m2.likelihood.variance.prior = candlegp.priors.Gamma(1.0,1.0, dtype=dt)
m2.kern.variance.prior = candlegp.priors.Gamma(1.0,1.0, dtype=dt)
m2.kern.lengthscales.prior = candlegp.priors.Gamma(1.0,1.0,dtype=dt)
m2.mean_function.A.prior = candlegp.priors.Gaussian(0.0,10.0, dtype=dt)
m2.mean_function.b.prior = candlegp.priors.Gaussian(0.0,10.0, dtype=dt)
print("likelihood with priors",m2().item())
m2
# res = candlegp.training.hmc.hmc_sample(m2,500,0.2,burn=50, thin=10)
res = candlegp.training.hmc.hmc_sample(m2,50,0.2,burn=50, thin=10)
pyplot.plot(res[0]); pyplot.title("likelihood");
for (n,p0),p,c in zip(m.named_parameters(),res[1:],['r','g','b','y','b']):
pyplot.plot(torch.stack(p).squeeze().numpy(), c=c, label=n)
pyplot.plot((0,len(p)),(p0.data.view(-1)[0],p0.data.view(-1)[0]), c=c)
pyplot.legend();
xstar = torch.linspace(0,1,100).double()
mc_params = torch.stack([torch.cat(p, dim=0).view(-1) for p in res[1:]], dim=1)
allsims = []
for ps in mc_params[:50]:
for mp, p in zip(m2.parameters(), ps):
with torch.no_grad():
mp.set(p)
allsims.append(m2.predict_f_samples(xstar.unsqueeze(1), 1).squeeze(0).t())
allsims = torch.cat(allsims, dim=0)
pyplot.plot(xstar.numpy(),allsims.data.numpy().T, 'b', lw=2, alpha=0.1)
mu, var = m.predict_y(xstar.unsqueeze(1))
cred_size = (var**0.5*2).squeeze(1)
mu = mu.squeeze(1)
pyplot.plot(xstar.numpy(),mu.data.numpy(),'b')
pyplot.fill_between(xstar.numpy(),mu.data.numpy()+cred_size.data.numpy(), mu.data.numpy()-cred_size.data.numpy(),facecolor='0.75')
pyplot.plot(X.numpy(), Y.numpy(), 'kx', mew=2)
k3 = candlegp.kernels.RBF(1, lengthscales=torch.tensor([0.3], dtype=torch.double),
variance=torch.tensor([1.0], dtype=torch.double))
mean3 = candlegp.mean_functions.Linear(torch.tensor([1], dtype=torch.double), torch.tensor([0], dtype=torch.double))
m3 = candlegp.models.SGPR(X, Y.unsqueeze(1), k3, X[:7].clone(), mean_function=mean3)
m3.likelihood.variance.set(torch.tensor([0.01], dtype=torch.double))
m3
opt = torch.optim.LBFGS(m3.parameters(), lr=1e-2, max_iter=40)
def eval_model():
obj = m3()
opt.zero_grad()
obj.backward()
return obj
for i in range(50):
obj = m3()
opt.zero_grad()
obj.backward()
opt.step(eval_model)
if i%5==0:
print(i,':',obj.item())
m3
xstar = torch.linspace(0,1,100).double()
mu, var = m3.predict_y(xstar.unsqueeze(1))
cred_size = (var**0.5*2).squeeze(1)
mu = mu.squeeze(1)
pyplot.plot(xstar.numpy(),mu.data.numpy(),'b')
pyplot.fill_between(xstar.numpy(),mu.data.numpy()+cred_size.data.numpy(), mu.data.numpy()-cred_size.data.numpy(),facecolor='0.75')
pyplot.plot(X.numpy(), Y.numpy(), 'kx', mew=2)
pyplot.plot(m3.Z.data.numpy(), torch.zeros(m3.Z.size(0)).numpy(),'o')
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.