markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
5. Run WEAP**Please wait, it will take ~1-3 minutes** to finish calcualting the two WEAP Areas with their many scenarios | # Run WEAP
WEAP.Areas("Bear_River_WEAP_Model_2017_Conservation").Open
print WEAP.ActiveArea.Name
WEAP.ActiveArea = "Bear_River_WEAP_Model_2017_Conservation"
print WEAP.ActiveArea.Name
print 'Please wait 1-3 min for the calculation to finish'
WEAP.Calculate(2006,10,True)
WEAP.SaveArea
print '\n \n The calculation has been done and saved'
print WEAP.CalculationTime
print '\n \n Done'
| _____no_output_____ | BSD-3-Clause | 3_VisualizePublish/07_Step7_Serve_NewScenarios_WEAP.ipynb | WamdamProject/WaMDaM_JupyterNotebooks |
5.1 Get the unmet demand or Cache County sites in both the reference and the conservation scenarios | Scenarios=['Reference','Cons25PercCacheUrbWaterUse','Incr25PercCacheUrbWaterUse']
DemandSites=['Logan Potable','North Cache Potable','South Cache Potable']
UnmetDemandEstimate_Ref = pd.DataFrame(columns = DemandSites)
UnmetDemandEstimate_Cons25 = pd.DataFrame(columns = DemandSites)
UnmetDemandEstimate_Incr25 = pd.DataFrame(columns = DemandSites)
UnmetDemandEstimate= pd.DataFrame(columns = Scenarios)
for scen in Scenarios:
if scen=='Reference':
for site in DemandSites:
param="\Demand Sites\%s: Unmet Demand[Acre-Foot]"%(site)
# print param
for year in range (1966,2006):
value=WEAP.ResultValue(param, year, 1, scen, year, WEAP.NumTimeSteps)
UnmetDemandEstimate_Ref.loc[year, [site]]=value
elif scen=='Cons25PercCacheUrbWaterUse':
for site in DemandSites:
param="\Demand Sites\%s: Unmet Demand[Acre-Foot]"%(site)
# print param
for year in range (1966,2006):
value=WEAP.ResultValue(param, year, 1, scen, year, WEAP.NumTimeSteps)
UnmetDemandEstimate_Cons25.loc[year, [site]]=value
elif scen=='Incr25PercCacheUrbWaterUse':
for site in DemandSites:
param="\Demand Sites\%s: Unmet Demand[Acre-Foot]"%(site)
# print param
for year in range (1966,2006):
value=WEAP.ResultValue(param, year, 1, scen, year, WEAP.NumTimeSteps)
UnmetDemandEstimate_Incr25.loc[year, [site]]=value
UnmetDemandEstimate_Ref['Cache Total']=UnmetDemandEstimate_Ref[DemandSites].sum(axis=1)
UnmetDemandEstimate_Cons25['Cache Total']=UnmetDemandEstimate_Cons25[DemandSites].sum(axis=1)
UnmetDemandEstimate_Incr25['Cache Total']=UnmetDemandEstimate_Incr25[DemandSites].sum(axis=1)
UnmetDemandEstimate['Reference']=UnmetDemandEstimate_Ref['Cache Total']
UnmetDemandEstimate['Cons25PercCacheUrbWaterUse']=UnmetDemandEstimate_Cons25['Cache Total']
UnmetDemandEstimate['Incr25PercCacheUrbWaterUse']=UnmetDemandEstimate_Incr25['Cache Total']
UnmetDemandEstimate=UnmetDemandEstimate.rename_axis('Year',axis="columns")
print 'Done estimating the unment demnd pecentage for each scenario'
# display(UnmetDemandEstimate) | _____no_output_____ | BSD-3-Clause | 3_VisualizePublish/07_Step7_Serve_NewScenarios_WEAP.ipynb | WamdamProject/WaMDaM_JupyterNotebooks |
5.2 Get the unmet demand as a percentage for the scenarios |
########################################################################
# estimate the total reference demand for Cahce county to calcualte the percentage
result_df_UseCase= pd.read_sql_query(Query_UseCase_text, conn)
subsets = result_df_UseCase.groupby(['ScenarioName'])
for subset in subsets.groups.keys():
if subset=='Bear River WEAP Model 2017': # reference
df_Seasonal = subsets.get_group(name=subset)
df_Seasonal=df_Seasonal.reset_index()
# display (df_Seasonal)
Tot=df_Seasonal["SeasonNumericValue"].tolist()
float_lst = [float(x) for x in Tot]
Annual_Demand=sum(float_lst)
print Annual_Demand
########################################################################
years =UnmetDemandEstimate.index.values
Reference_vals =UnmetDemandEstimate['Reference'].tolist()
Reference_vals_perc =((numpy.array([Reference_vals]))/Annual_Demand)*100
Cons25PercCacheUrbWaterUse_vals =UnmetDemandEstimate['Cons25PercCacheUrbWaterUse'].tolist()
Cons25PercCacheUrbWaterUse_vals_perc =((numpy.array([Cons25PercCacheUrbWaterUse_vals]))/Annual_Demand)*100
Incr25PercCacheUrbWaterUse_vals =UnmetDemandEstimate['Incr25PercCacheUrbWaterUse'].tolist()
Incr25PercCacheUrbWaterUse_vals_perc =((numpy.array([Incr25PercCacheUrbWaterUse_vals]))/Annual_Demand)*100
print 'done estimating unmet demnd the percentages' | _____no_output_____ | BSD-3-Clause | 3_VisualizePublish/07_Step7_Serve_NewScenarios_WEAP.ipynb | WamdamProject/WaMDaM_JupyterNotebooks |
5.3 Export the unmet demand percent into Excel to load them into WaMDaM | # display(UnmetDemandEstimate)
import xlsxwriter
from collections import OrderedDict
UnmetDemandEstimate.to_csv('UnmetDemandEstimate.csv')
ExcelFileName='Test.xlsx'
years =UnmetDemandEstimate.index.values
#print years
Columns=['ObjectType','InstanceName','ScenarioName','AttributeName','DateTimeStamp','Value']
# these three columns have fixed values for all the rows
ObjectType='Demand Site'
InstanceName='Cache County Urban'
AttributeName='UnmetDemand'
# this dict contains the keysL (scenario name) and the values are in a list
# years exist in UnmetDemandEstimate. We then need to add day and month to the year date
# like this format: # DateTimeStamp= 1/1/1993
Scenarios = OrderedDict()
Scenarios['Bear River WEAP Model 2017_result'] = Reference_vals_perc
Scenarios['Incr25PercCacheUrbWaterUse_result'] = Incr25PercCacheUrbWaterUse_vals_perc
Scenarios['Cons25PercCacheUrbWaterUse_result'] = Cons25PercCacheUrbWaterUse_vals_perc
#print Incr25PercCacheUrbWaterUse_vals_perc
workbook = xlsxwriter.Workbook(ExcelFileName)
sheet = workbook.add_worksheet('sheet')
# write headers
for i, header_name in enumerate(Columns):
sheet.write(0, i, header_name)
row = 1
col = 0
for scenario_name in Scenarios.keys():
for val_list in Scenarios[scenario_name]:
# print val_list
for i, val in enumerate(val_list):
# print years[i]
date_timestamp = '1/1/{}'.format(years[i])
sheet.write(row, 0, ObjectType)
sheet.write(row, 1, InstanceName)
sheet.write(row, 2, scenario_name)
sheet.write(row, 3, AttributeName)
sheet.write(row, 4, date_timestamp)
sheet.write(row, 5, val)
row += 1
workbook.close()
print 'done writing to Excel'
print 'Next, copy the exported data into a WaMDaM workbook template for the WEAP model'
| _____no_output_____ | BSD-3-Clause | 3_VisualizePublish/07_Step7_Serve_NewScenarios_WEAP.ipynb | WamdamProject/WaMDaM_JupyterNotebooks |
6. Plot the unmet demad for all the scenarios and years |
trace2 = go.Scatter(
x=years,
y=Reference_vals_perc[0],
name = 'Reference demand',
mode = 'lines+markers',
marker = dict(
color = '#264DFF',
))
trace3 = go.Scatter(
x=years,
y=Cons25PercCacheUrbWaterUse_vals_perc[0],
name = 'Conserve demand by 25%',
mode = 'lines+markers',
marker = dict(
color = '#3FA0FF'
))
trace1 = go.Scatter(
x=years,
y=Incr25PercCacheUrbWaterUse_vals_perc[0],
name = 'Increase demand by 25%',
mode = 'lines+markers',
marker = dict(
color = '#290AD8'
))
layout = dict(
#title = "Use Case 3.3",
yaxis = dict(
title = "Annual unmet demand (%)",
tickformat= ',',
showline=True,
dtick='5',
ticks='outside',
ticklen=10,
tickcolor='#000',
gridwidth=1,
showgrid=True,
),
xaxis = dict(
# title = "Updated input parameters in the <br>Bear_River_WEAP_Model_2017",
# showline=True,
ticks='inside',
tickfont=dict(size=22),
tickcolor='#000',
gridwidth=1,
showgrid=True,
ticklen=25
),
legend=dict(
x=0.05,y=1.1,
bordercolor='#00000f',
borderwidth=2
),
width=1100,
height=700,
#paper_bgcolor='rgb(233,233,233)',
#plot_bgcolor='rgb(233,233,233)',
margin=go.Margin(l=130,b=200),
font=dict(size=25,family='arial',color='#00000f'),
showlegend=True
)
data = [trace1, trace2,trace3]
# create a figure object
fig = dict(data=data, layout=layout)
#py.iplot(fig, filename = "2.3Identify_SeasonalValues")
## it can be run from the local machine on Pycharm like this like below
## It would also work here offline but in a seperate window
offline.iplot(fig,filename = 'jupyter/UnmentDemand@BirdRefuge' )
print "Figure x is replicated!!" | _____no_output_____ | BSD-3-Clause | 3_VisualizePublish/07_Step7_Serve_NewScenarios_WEAP.ipynb | WamdamProject/WaMDaM_JupyterNotebooks |
7. Upload the new result scenarios to OpenAgua to visulize them there You already uploaded the results form WaMDaM SQLite earlier at the begnining of these Jupyter Notebooks. So all you need is to select to display the result in OpenAgua. Finally, click, load data. It should replicate the same figure above and Figure 6 in the paper 8. Close the SQLite and WEAP API connections | # 9. Close the SQLite and WEAP API connections
conn.close()
print 'connection disconnected'
# Uncomment
WEAP.SaveArea
# this command will close WEAP
WEAP.Quit
print 'Connection with WEAP API is disconnected' | _____no_output_____ | BSD-3-Clause | 3_VisualizePublish/07_Step7_Serve_NewScenarios_WEAP.ipynb | WamdamProject/WaMDaM_JupyterNotebooks |
[](https://lab.mlpack.org/v2/gh/mlpack/examples/master?urlpath=lab%2Ftree%2Fforest_covertype_prediction_with_random_forests%2Fcovertype-rf-py.ipynb) | # @file covertype-rf-py.ipynb
#
# Classification using Random Forest on the Covertype dataset.
import mlpack
import pandas as pd
import numpy as np
# Load the dataset from an online URL.
df = pd.read_csv('https://lab.mlpack.org/data/covertype-small.csv.gz')
# Split the labels.
labels = df['label']
dataset = df.drop('label', 1)
# Split the dataset using mlpack. The output comes back as a dictionary, which
# we'll unpack for clarity of code.
output = mlpack.preprocess_split(input=dataset, input_labels=labels, test_ratio=0.3)
training_set = output['training']
training_labels = output['training_labels']
test_set = output['test']
test_labels = output['test_labels']
# Train a random forest.
output = mlpack.random_forest(training=training_set, labels=training_labels,
print_training_accuracy=True, num_trees=10, minimum_leaf_size=3)
random_forest = output['output_model']
# Predict the labels of the test points.
output = mlpack.random_forest(input_model=random_forest, test=test_set)
# Now print the accuracy. The 'probabilities' output could also be used to
# generate an ROC curve.
correct = np.sum(output['predictions'] == test_labels.flatten())
print(str(correct) + ' correct out of ' + str(len(test_labels)) +
' (' + str(100 * float(correct) / float(len(test_labels))) + '%).') | 24513 correct out of 30000 (81.71%).
| BSD-3-Clause | forest_covertype_prediction_with_random_forests/covertype-rf-py.ipynb | Davidportlouis/examples |
Optimizing building HVAC with Amazon SageMaker RL | import sagemaker
import boto3
from sagemaker.rl import RLEstimator
from source.common.docker_utils import build_and_push_docker_image | _____no_output_____ | Apache-2.0 | reinforcement_learning/rl_hvac_ray_energyplus/train-hvac.ipynb | P15241328/amazon-sagemaker-examples |
Initialize Amazon SageMaker | role = sagemaker.get_execution_role()
sm_session = sagemaker.session.Session()
# SageMaker SDK creates a default bucket. Change this bucket to your own bucket, if needed.
s3_bucket = sm_session.default_bucket()
s3_output_path = f's3://{s3_bucket}'
print(f'S3 bucket path: {s3_output_path}')
print(f'Role: {role}') | _____no_output_____ | Apache-2.0 | reinforcement_learning/rl_hvac_ray_energyplus/train-hvac.ipynb | P15241328/amazon-sagemaker-examples |
Set additional training parameters Set instance typeSet `cpu_or_gpu` to either `'cpu'` or `'gpu'` for using CPU or GPU instances. Configure the framework you want to useSet `framework` to `'tf'` or `'torch'` for TensorFlow or PyTorch, respectively.You will also have to edit your entry point i.e., `train-sagemaker-distributed.py` with the configuration parameter `"use_pytorch"` to match the framework that you have selected. | job_name_prefix = 'energyplus-hvac-ray'
cpu_or_gpu = 'gpu' # has to be either cpu or gpu
if cpu_or_gpu != 'cpu' and cpu_or_gpu != 'gpu':
raise ValueError('cpu_or_gpu has to be either cpu or gpu')
framework = 'tf'
instance_type = 'ml.g4dn.16xlarge' # g4dn.16x large has 1 GPU and 64 cores | _____no_output_____ | Apache-2.0 | reinforcement_learning/rl_hvac_ray_energyplus/train-hvac.ipynb | P15241328/amazon-sagemaker-examples |
Train your homogeneous scaling job here Edit the training codeThe training code is written in the file `train-sagemaker-distributed.py` which is uploaded in the /source directory.*Note that ray will automatically set `"ray_num_cpus"` and `"ray_num_gpus"` in `_get_ray_config`* | !pygmentize source/train-sagemaker-distributed.py | _____no_output_____ | Apache-2.0 | reinforcement_learning/rl_hvac_ray_energyplus/train-hvac.ipynb | P15241328/amazon-sagemaker-examples |
Train the RL model using the Python SDK Script modeWhen using SageMaker for distributed training, you can select a GPU or CPU instance. The RLEstimator is used for training RL jobs.1. Specify the source directory where the environment, presets and training code is uploaded.2. Specify the entry point as the training code3. Specify the image (CPU or GPU) to be used for the training environment.4. Define the training parameters such as the instance count, job name, S3 path for output and job name.5. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks. GPU docker image | # Build image
repository_short_name = f'sagemaker-hvac-ray-{cpu_or_gpu}'
docker_build_args = {
'CPU_OR_GPU': cpu_or_gpu,
'AWS_REGION': boto3.Session().region_name,
'FRAMEWORK': framework
}
image_name = build_and_push_docker_image(repository_short_name, build_args=docker_build_args)
print("Using ECR image %s" % image_name)
metric_definitions = [
{'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'episodes_total', 'Regex': 'episodes_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'num_steps_trained', 'Regex': 'num_steps_trained: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'timesteps_total', 'Regex': 'timesteps_total: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'training_iteration', 'Regex': 'training_iteration: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'episode_reward_max', 'Regex': 'episode_reward_max: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'episode_reward_mean', 'Regex': 'episode_reward_mean: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'episode_reward_min', 'Regex': 'episode_reward_min: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
] | _____no_output_____ | Apache-2.0 | reinforcement_learning/rl_hvac_ray_energyplus/train-hvac.ipynb | P15241328/amazon-sagemaker-examples |
Ray homogeneous scaling - Specify `train_instance_count` > 1Homogeneous scaling allows us to use multiple instances of the same type.Spot instances are unused EC2 instances that could be used at 90% discount compared to On-Demand prices (more information about spot instances can be found [here](https://aws.amazon.com/ec2/spot/?cards.sort-by=item.additionalFields.startDateTime&cards.sort-order=asc) and [here](https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html))To use spot instances, set `train_use_spot_instances = True`. To use On-Demand instances, `train_use_spot_instances = False`. | hyperparameters = {
# no. of days to simulate. Remember to adjust the dates in RunPeriod of
# 'source/eplus/envs/buildings/MediumOffice/RefBldgMediumOfficeNew2004_Chicago.idf' to match simulation days.
'n_days': 365,
'n_iter': 50, # no. of training iterations
'algorithm': 'APEX_DDPG', # only APEX_DDPG and PPO are tested
'multi_zone_control': True, # if each zone temperature set point has to be independently controlled
'energy_temp_penalty_ratio': 10
}
# Set additional training parameters
training_params = {
'base_job_name': job_name_prefix,
'train_instance_count': 1,
'tags': [{'Key': k, 'Value': str(v)} for k,v in hyperparameters.items()]
}
# Defining the RLEstimator
estimator = RLEstimator(entry_point=f'train-sagemaker-hvac.py',
source_dir='source',
dependencies=["source/common/"],
image_uri=image_name,
role=role,
train_instance_type=instance_type,
# train_instance_type='local',
output_path=s3_output_path,
metric_definitions=metric_definitions,
hyperparameters=hyperparameters,
**training_params
)
estimator.fit(wait=False)
print(' ')
print(estimator.latest_training_job.job_name)
print('type=', instance_type, 'count=', training_params['train_instance_count'])
print(' ') | _____no_output_____ | Apache-2.0 | reinforcement_learning/rl_hvac_ray_energyplus/train-hvac.ipynb | P15241328/amazon-sagemaker-examples |
Spleen 3D segmentation with MONAI This tutorial demonstrates how MONAI can be used in conjunction with the [PyTorch Lightning](https://github.com/PyTorchLightning/pytorch-lightning) framework.We demonstrate use of the following MONAI features:1. Transforms for dictionary format data.2. Loading Nifti images with metadata.3. Add channel dim to the data if no channel dimension.4. Scaling medical image intensity with expected range.5. Croping out a batch of balanced images based on the positive / negative label ratio.6. Cache IO and transforms to accelerate training and validation.7. Use of a a 3D UNet model, Dice loss function, and mean Dice metric for a 3D segmentation task.8. The sliding window inference method.9. Deterministic training for reproducibility.The training Spleen dataset used in this example can be downloaded from from http://medicaldecathlon.com//Target: Spleen Modality: CT Size: 61 3D volumes (41 Training + 20 Testing) Source: Memorial Sloan Kettering Cancer Center Challenge: Large ranging foreground size In addition to the usual MONAI requirements you will need Lightning installed. | ! pip install pytorch-lightning
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
import numpy as np
import torch
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import monai
from monai.transforms import \
Compose, LoadNiftid, AddChanneld, ScaleIntensityRanged, RandCropByPosNegLabeld, \
RandAffined, Spacingd, Orientationd, ToTensord
from monai.data import list_data_collate, sliding_window_inference
from monai.networks.layers import Norm
from monai.metrics import compute_meandice
from pytorch_lightning import LightningModule, Trainer, loggers
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
monai.config.print_config() | MONAI version: 0.1.0rc2+11.gdb4531b.dirty
Python version: 3.6.8 (default, Oct 7 2019, 12:59:55) [GCC 8.3.0]
Numpy version: 1.18.2
Pytorch version: 1.4.0
Ignite version: 0.3.0
| Apache-2.0 | examples/notebooks/spleen_segmentation_3d_lightning.ipynb | loftwah/MONAI |
Define the LightningModuleThe LightningModule contains a refactoring of your training code. The following module is a refactoring of the code in `spleen_segmentation_3d.ipynb`: | class Net(LightningModule):
def __init__(self):
super().__init__()
self._model = monai.networks.nets.UNet(dimensions=3, in_channels=1, out_channels=2, channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2), num_res_units=2, norm=Norm.BATCH)
self.loss_function = monai.losses.DiceLoss(to_onehot_y=True, do_softmax=True)
self.best_val_dice = 0
self.best_val_epoch = 0
def forward(self, x):
return self._model(x)
def prepare_data(self):
# set up the correct data path
data_root = '/workspace/data/medical/Task09_Spleen'
train_images = glob.glob(os.path.join(data_root, 'imagesTr', '*.nii.gz'))
train_labels = glob.glob(os.path.join(data_root, 'labelsTr', '*.nii.gz'))
data_dicts = [{'image': image_name, 'label': label_name}
for image_name, label_name in zip(train_images, train_labels)]
train_files, val_files = data_dicts[:-9], data_dicts[-9:]
# define the data transforms
train_transforms = Compose([
LoadNiftid(keys=['image', 'label']),
AddChanneld(keys=['image', 'label']),
Spacingd(keys=['image', 'label'], pixdim=(1.5, 1.5, 2.), interp_order=(3, 0)),
Orientationd(keys=['image', 'label'], axcodes='RAS'),
ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
# randomly crop out patch samples from big image based on pos / neg ratio
# the image centers of negative samples must be in valid image area
RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', size=(96, 96, 96), pos=1,
neg=1, num_samples=4, image_key='image', image_threshold=0),
# user can also add other random transforms
# RandAffined(keys=['image', 'label'], mode=('bilinear', 'nearest'), prob=1.0, spatial_size=(96, 96, 96),
# rotate_range=(0, 0, np.pi/15), scale_range=(0.1, 0.1, 0.1)),
ToTensord(keys=['image', 'label'])
])
val_transforms = Compose([
LoadNiftid(keys=['image', 'label']),
AddChanneld(keys=['image', 'label']),
Spacingd(keys=['image', 'label'], pixdim=(1.5, 1.5, 2.), interp_order=(3, 0)),
Orientationd(keys=['image', 'label'], axcodes='RAS'),
ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
ToTensord(keys=['image', 'label'])
])
# set deterministic training for reproducibility
train_transforms.set_random_state(seed=0)
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# we use cached datasets - these are 10x faster than regular datasets
self.train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=1.0)
self.val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0)
#self.train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
#self.val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
def train_dataloader(self):
train_loader = DataLoader(self.train_ds, batch_size=2, shuffle=True, num_workers=4, collate_fn=list_data_collate)
return train_loader
def val_dataloader(self):
val_loader = DataLoader(self.val_ds, batch_size=1, num_workers=4)
return val_loader
def configure_optimizers(self):
optimizer = torch.optim.Adam(self._model.parameters(), 1e-4)
return optimizer
def training_step(self, batch, batch_idx):
images, labels = batch['image'], batch['label']
output = self.forward(images)
loss = self.loss_function(output, labels)
tensorboard_logs = {'train_loss': loss.item()}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
images, labels = batch['image'], batch['label']
roi_size = (160, 160, 160)
sw_batch_size = 4
outputs = sliding_window_inference(images, roi_size, sw_batch_size, self.forward)
loss = self.loss_function(outputs, labels)
value = compute_meandice(y_pred=outputs, y=labels, include_background=False,
to_onehot_y=True, mutually_exclusive=True)
return {'val_loss': loss, 'val_dice': value}
def validation_epoch_end(self, outputs):
val_dice = 0
num_items = 0
for output in outputs:
val_dice += output['val_dice'].sum().item()
num_items += len(output['val_dice'])
mean_val_dice = val_dice / num_items
tensorboard_logs = {'val_dice': mean_val_dice}
if mean_val_dice > self.best_val_dice:
self.best_val_dice = mean_val_dice
self.best_val_epoch = self.current_epoch
print('current epoch %d current mean dice: %0.4f best mean dice: %0.4f at epoch %d'
% (self.current_epoch, mean_val_dice, self.best_val_dice, self.best_val_epoch))
return {'log': tensorboard_logs}
| _____no_output_____ | Apache-2.0 | examples/notebooks/spleen_segmentation_3d_lightning.ipynb | loftwah/MONAI |
Run the training | # initialise the LightningModule
net = Net()
# set up loggers and checkpoints
tb_logger = loggers.TensorBoardLogger(save_dir='logs')
checkpoint_callback = ModelCheckpoint(filepath='logs/{epoch}-{val_loss:.2f}-{val_dice:.2f}')
# initialise Lightning's trainer.
trainer = Trainer(gpus=[0],
max_epochs=250,
logger=tb_logger,
checkpoint_callback=checkpoint_callback,
show_progress_bar=False,
num_sanity_val_steps=1
)
# train
trainer.fit(net)
print('train completed, best_metric: %0.4f at epoch %d' % (net.best_val_dice, net.best_val_epoch)) | train completed, best_metric: 0.9435 at epoch 186
| Apache-2.0 | examples/notebooks/spleen_segmentation_3d_lightning.ipynb | loftwah/MONAI |
View training in tensorboard | %load_ext tensorboard
%tensorboard --logdir='logs' | _____no_output_____ | Apache-2.0 | examples/notebooks/spleen_segmentation_3d_lightning.ipynb | loftwah/MONAI |
Check best model output with the input image and label | net.eval()
device = torch.device("cuda:0")
with torch.no_grad():
for i, val_data in enumerate(net.val_dataloader()):
roi_size = (160, 160, 160)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_data['image'].to(device), roi_size, sw_batch_size, net)
# plot the slice [:, :, 50]
plt.figure('check', (18, 6))
plt.subplot(1, 3, 1)
plt.title('image ' + str(i))
plt.imshow(val_data['image'][0, 0, :, :, 50], cmap='gray')
plt.subplot(1, 3, 2)
plt.title('label ' + str(i))
plt.imshow(val_data['label'][0, 0, :, :, 50])
plt.subplot(1, 3, 3)
plt.title('output ' + str(i))
plt.imshow(torch.argmax(val_outputs, dim=1).detach().cpu()[0, :, :, 50])
plt.show() | _____no_output_____ | Apache-2.0 | examples/notebooks/spleen_segmentation_3d_lightning.ipynb | loftwah/MONAI |
Westeros Tutorial Part 1 - Welcome to the MESSAGEix framework & Creating a baseline scenario *Integrated Assessment Modeling for the 21st Century*For information on how to install *MESSAGEix*, please refer to [Installation page](https://message.iiasa.ac.at/en/stable/getting_started.html) and for getting *MESSAGEix* tutorials, please follow the steps mentioned in [Tutorials](https://message.iiasa.ac.at/en/stable/tutorials.html).Please refer to the [user guidelines](https://github.com/iiasa/message_ix/blob/master/NOTICE.rst)for additional information on using *MESSAGEix*, including the recommended citation and how to name new models.**Structure of these tutorials.** After having run this baseline tutorial, you are able to start with any of the other tutorials, but we recommend to follow the order below for going through the information step-wise:1. Baseline tutorial (``westeros_baseline.ipynb``)2. Add extra detail and constraints to the model 1. Emissions 1. Introducing emissions (`westeros_emissions_bounds.ipynb`) 2. Introducing taxes on emissions (`westeros_emissions_taxes.ipynb`) 2. Add firm capacity (``westeros_firm_capacity.ipynb``) 3. Add flexible energy generation (``westeros_flexible_generation.ipynb``) 4. Add seasonality as an example of temporal variability (``westeros_seasonality.ipynb``)3. Post-processing: learn how to report calculations _after_ the MESSAGE model has run (``westeros_report.ipynb``)**Pre-requisites**- Have succesfully installed *MESSAGEix*._This tutorial is based on a presentation by Matthew Gidden ([@gidden](https://github.com/gidden))for a summer school at the the **Centre National de la Recherche Scientifique (CNRS)**on *Integrated Assessment Modeling* in June 2018._ Scope of this tutorial: Building a Simple Energy ModelThe goal of this tutorial is to build a simple energy model using *MESSAGEix* with minimal features that can be expanded in future tutorials. We will build the model component by component, focusing on both the **how** (code implementation) and **why** (mathematical formulation). Online documentationThe full framework documentation is available at [https://message.iiasa.ac.at](https://message.iiasa.ac.at) A stylized reference energy system model for WesterosThis tutorial is based on the country of Westeros from the TV show "Game of Thrones". MESSAGEix: the mathematical paradigmAt its core, *MESSAGEix* is an optimization problem:> $\min \quad ~c^T \cdot x$ > $~s.t. \quad A \cdot x \leq b$More explicitly, the model...- optimizes an **objective function**, nominally minimizing total **system costs**- under a system of **constraints** (inequalities or equality conditions)The mathematical implementation includes a number of features that make it particularly geared towards the modelling of *energy-water-land systems* in the context of *climate change mitigation and sustainable development*.Throughout this document, the mathematical formulation follows the convention that- decision **VARIABLES** ($x$) are capitalized- input **parameters** ($A$, $b$) are lower case MESSAGEix: connected to the *ix modeling platform (ixmp)*The *modeling platform for integrated and cross-cutting analysis* (ixmp) provides a powerful framework for working with scenarios, including a database infrastucture for data version control and interfaces to scientific programming languages. Ready, steady, go!First, we import all the packages we need. We import a utility function called *make_df*, which can be used to wrap the input data into dataframes that can be saved in model parameters. | import pandas as pd
import ixmp
import message_ix
from message_ix.utils import make_df
%matplotlib inline | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
The *MESSAGEix* model is built using the *ixmp* `Platform`. The `Platform` is your connection to a database for storing model input data and scenario results. | mp = ixmp.Platform() | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Once connected, we create a new `Scenario` to build our model. A `Scenario` instance will contain all the model input data and results. | scenario = message_ix.Scenario(mp, model='Westeros Electrified',
scenario='baseline', version='new') | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Model StructureWe start by defining basic characteristics of the model, including time, space, and the energy system structure. The model horizon will span 3 decades (690-720). Let's assume that we're far in the future after the events of A Song of Ice and Fire (which occur ~300 years after Aegon the conqueror).| Math Notation | Model Meaning ||---------------|------------------------------|| $y \in Y^H$ | time periods in history || $y \in Y^M$ | time periods in model horizon| | history = [690]
model_horizon = [700, 710, 720]
scenario.add_horizon({'year': history + model_horizon,
'firstmodelyear': model_horizon[0]}) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Our model will have a single `node`, i.e., its spatial dimension.| Math Notation | Model Meaning||---------------|--------------|| $n \in N$ | node | | country = 'Westeros'
scenario.add_spatial_sets({'country': country}) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
And we fill in the energy system's `commodities`, `levels`, `technologies`, and `modes` (i.e., modes of operation of technologies). This information defines how certain technologies operate. | Math Notation | Model Meaning||---------------|--------------|| $c \in C$ | commodity || $l \in L$ | level || $t \in T$ | technology || $m \in M$ | mode | | scenario.add_set("commodity", ["electricity", "light"])
scenario.add_set("level", ["secondary", "final", "useful"])
scenario.add_set("technology", ['coal_ppl', 'wind_ppl', 'grid', 'bulb'])
scenario.add_set("mode", "standard") | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Supply and Demand (or Balancing Commodities) The fundamental premise of the model is to satisfy demand for energy (services).To first order, demand for services like electricity track with economic productivity (GDP).We define a GDP profile similar to first-world GDP growth from [1900-1930](https://en.wikipedia.org/wiki/List_of_regions_by_past_GDP): | gdp_profile = pd.Series([1., 1.5, 1.9],
index=pd.Index(model_horizon, name='Time'))
gdp_profile.plot(title='Demand') | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
The `COMMODITY_BALANCE_GT` and `COMMODITY_BALANCE_LT` equations ensure that `demand` for each `commodity` is met at each `level` in the energy system.The equation is copied below in this tutorial notebook, but every model equation is available for reference inthe [Mathematical formulation](https://message.iiasa.ac.at/en/stable/model/MESSAGE/model_core.html) section of the *MESSAGEix* documentation.$\sum_{\substack{n^L,t,m \\ y^V \leq y}} \text{output}_{n^L,t,y^V,y,m,n,c,l} \cdot \text{ACT}_{n^L,t,y^V,y,m}$$- \sum_{\substack{n^L,t,m, \\ y^V \leq y}} \text{input}_{n^L,t,y^V,y,m,n,c,l} \cdot \text{ACT}_{n^L,t,m,y}$ $\geq \text{demand}_{n,c,l,y} \quad \forall \ l \in L$While `demand` must be met, supply can *exceed* demand allowing the model to plan for meeting demand in future periods by storing storable commodities. First we establish demand. Let's assume- 40 million people in [300 AC](https://atlasoficeandfireblog.wordpress.com/2016/03/06/the-population-of-the-seven-kingdoms/)- similar population growth to Earth in the same time frame [(~factor of 12)](https://en.wikipedia.org/wiki/World_population_estimates)- a per capita demand for electricity of 1000 kWh- and 8760 hours in a year (of course!)Then we can add the demand parameter Note present day: [~72000 GWh in Austria](https://www.iea.org/statistics/?country=AUSTRIA&year=2016&category=Energy%20consumption&indicator=undefined&mode=chart&dataTable=INDICATORS) with population [~8.7M](http://www.austria.org/population/) which is ~8300 kWh per capita | demand_per_year = 40 * 12 * 1000 / 8760
light_demand = pd.DataFrame({
'node': country,
'commodity': 'light',
'level': 'useful',
'year': model_horizon,
'time': 'year',
'value': (100 * gdp_profile).round(),
'unit': 'GWa',
}) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
`light_demand` illustrates the data format for *MESSAGEix* parameters. It is a `pandas.DataFrame` containing three types of information in a specific format:- A "value" column containing the numerical values for this parameter.- A "unit" column.- Other columns ("node", "commodity", "level", "time") that indicate the key to which each value applies. | light_demand
# We use add_par for adding data to a MESSAGEix parameter
scenario.add_par("demand", light_demand) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
In order to define the input and output commodites of each technology, we define some common keys.- **Input** quantities require `_origin` keys that specify where the inputs are *received from*.- **Output** quantities require `_dest` keys that specify where the outputs are *transferred to*. | year_df = scenario.vintage_and_active_years()
vintage_years, act_years = year_df['year_vtg'], year_df['year_act']
base = {
'node_loc': country,
'year_vtg': vintage_years,
'year_act': act_years,
'mode': 'standard',
'time': 'year',
'unit': '-',
}
base_input = make_df(base, node_origin=country, time_origin='year')
base_output = make_df(base, node_dest=country, time_dest='year') | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Working backwards along the Reference Energy System, we can add connections for the `bulb`. A light bulb…- receives *input* in the form of the "electricity" *commodity* at the "final [energy]" *level*, and- *outputs* the commodity "light" at the "useful [energy]" level.The `value` in the input and output parameter is used to represent the effiecieny of a technology (efficiency = output/input).For example, input of 1.0 and output of 1.0 for a technology shows that the efficiency of that technology is 100% in convertingthe input commodity to the output commodity. | bulb_out = make_df(base_output, technology='bulb', commodity='light',
level='useful', value=1.0)
scenario.add_par('output', bulb_out)
bulb_in = make_df(base_input, technology='bulb', commodity='electricity',
level='final', value=1.0)
scenario.add_par('input', bulb_in) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Next, we parameterize the electrical `grid`, which…- receives electricity at the "secondary" energy level.- also outputs electricity, but at the "final" energy level (to be used by the light bulb).Because the grid has transmission losses, only 90% of the input electricity is available as output. | grid_efficiency = 0.9
grid_out = make_df(base_output, technology='grid', commodity='electricity',
level='final', value=grid_efficiency)
scenario.add_par('output', grid_out)
grid_in = make_df(base_input, technology='grid', commodity='electricity',
level='secondary', value=1.0)
scenario.add_par('input', grid_in) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
And finally, our power plants. The model does not include the fossil resources used as `input` for coal plants; however, costs of coal extraction are included in the parameter $variable\_cost$. | coal_out = make_df(base_output, technology='coal_ppl', commodity='electricity',
level='secondary', value=1.)
scenario.add_par('output', coal_out)
wind_out = make_df(base_output, technology='wind_ppl', commodity='electricity',
level='secondary', value=1.)
scenario.add_par('output', wind_out) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Operational Constraints and Parameters The model has a number of "reality" constraints, which relate built *capacity* (`CAP`) to available power, or the *activity* (`ACT`) of that technology.The **capacity constraint** limits the activity of a technology to the installed capacity multiplied by a capacity factor. Capacity factor or is the fraction of installed capacity that can be active in a certain period (here the sub-annual time step *h*).$$\sum_{m} \text{ACT}_{n,t,y^V,y,m,h} \leq \text{duration_time}_{h} \cdot \text{capacity_factor}_{n,t,y^V,y,h} \cdot \text{CAP}_{n,t,y^V,y} \quad t \ \in \ T^{INV}$$ This requires us to provide the `capacity_factor` for each technology. Here, we call `make_df()` and `add_par()` in a loop to execute similar code for three technologies: | base_capacity_factor = {
'node_loc': country,
'year_vtg': vintage_years,
'year_act': act_years,
'time': 'year',
'unit': '-',
}
capacity_factor = {
'coal_ppl': 1,
'wind_ppl': 0.36,
'bulb': 1,
}
for tec, val in capacity_factor.items():
df = make_df(base_capacity_factor, technology=tec, value=val)
scenario.add_par('capacity_factor', df) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
The model can further be provided `technical_lifetime`s in order to properly manage deployed capacity and related costs via the **capacity maintenance** constraint:$\text{CAP}_{n,t,y^V,y} \leq \text{remaining_capacity}_{n,t,y^V,y} \cdot \text{value} \quad \forall \quad t \in T^{INV}$where `value` can take different forms depending on what time period is considered:| Value | Condition ||-------------------------------------|-----------------------------------------------------|| $\Delta_y \text{historical_new_capacity}_{n,t,y^V}$ | $y$ is first model period || $\Delta_y \text{CAP_NEW}_{n,t,y^V}$ | $y = y^V$ || $\text{CAP}_{n,t,y^V,y-1}$ | $0 < y - y^V < \text{technical_lifetime}_{n,t,y^V}$ | | base_technical_lifetime = {
'node_loc': country,
'year_vtg': model_horizon,
'unit': 'y',
}
lifetime = {
'coal_ppl': 20,
'wind_ppl': 20,
'bulb': 1,
}
for tec, val in lifetime.items():
df = make_df(base_technical_lifetime, technology=tec, value=val)
scenario.add_par('technical_lifetime', df) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Technological Diffusion and ContractionWe know from historical precedent that energy systems can not be transformed instantaneously. Therefore, we use a family of dynamic constraints on activity and capacity. These constraints define the upper and lower limit of the domain of activity and capacity over time based on their value in the previous time step, an initial value, and growth/decline rates. $\sum_{y^V \leq y,m} \text{ACT}_{n,t,y^V,y,m,h} \leq$ $\text{initial_activity_up}_{n,t,y,h} \cdot \frac{ \Big( 1 + growth\_activity\_up_{n,t,y,h} \Big)^{|y|} - 1 } { growth\_activity\_up_{n,t,y,h} }+ \Big( 1 + growth\_activity\_up_{n,t,y,h} \Big)^{|y|} \cdot \Big( \sum_{y^V \leq y-1,m} ACT_{n,t,y^V,y-1,m,h} + \sum_{m} historical\_activity_{n,t,y-1,m,h}\Big)$ This example limits the ability for technologies to **grow**. To do so, we need to provide `growth_activity_up` values for each technology that we want to model as being diffusion constrained. Here, we set this constraint at 10% per year. | base_growth = {
'node_loc': country,
'year_act': model_horizon,
'time': 'year',
'unit': '-',
}
growth_technologies = [
"coal_ppl",
"wind_ppl",
]
for tec in growth_technologies:
df = make_df(base_growth, technology=tec, value=0.1)
scenario.add_par('growth_activity_up', df) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Defining an Energy Mix (Model Calibration)To model the transition of an energy system, one must start with the existing system which are defined by the parameters `historical_activity` and `historical_new_capacity`. These parameters define the energy mix before the model horizon. We begin by defining a few key values:- how much useful energy was needed- how much final energy was generated- and the mix for different technologies | historic_demand = 0.85 * demand_per_year
historic_generation = historic_demand / grid_efficiency
coal_fraction = 0.6
base_capacity = {
'node_loc': country,
'year_vtg': history,
'unit': 'GWa',
}
base_activity = {
'node_loc': country,
'year_act': history,
'mode': 'standard',
'time': 'year',
'unit': 'GWa',
} | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Then, we can define the **activity** and **capacity** in the historic period | old_activity = {
'coal_ppl': coal_fraction * historic_generation,
'wind_ppl': (1 - coal_fraction) * historic_generation,
}
for tec, val in old_activity.items():
df = make_df(base_activity, technology=tec, value=val)
scenario.add_par('historical_activity', df)
act_to_cap = {
'coal_ppl': 1 / 10 / capacity_factor['coal_ppl'] / 2, # 20 year lifetime
'wind_ppl': 1 / 10 / capacity_factor['wind_ppl'] / 2,
}
for tec in act_to_cap:
value = old_activity[tec] * act_to_cap[tec]
df = make_df(base_capacity, technology=tec, value=value)
scenario.add_par('historical_new_capacity', df) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Objective FunctionThe objective function drives the purpose of the optimization. Do we wish to seek maximum utility of the social planner, minimize carbon emissions, or something else? Classical IAMs seek to minimize total discounted system cost over space and time. $$\min \sum_{n,y \in Y^{M}} \text{interestrate}_{y} \cdot \text{COST_NODAL}_{n,y}$$ First, let's add the interest rate parameter. | scenario.add_par("interestrate", model_horizon, value=0.05, unit='-') | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
`COST_NODAL` is comprised of a variety of costs related to the use of different technologies. Investment CostsCapital, or investment, costs are invoked whenever a new plant or unit is built$$\text{inv_cost}_{n,t,y} \cdot \text{construction_time_factor}_{n,t,y} \cdot \text{CAP_NEW}_{n,t,y}$$ | base_inv_cost = {
'node_loc': country,
'year_vtg': model_horizon,
'unit': 'USD/kW',
}
# Adding a new unit to the library
mp.add_unit('USD/kW')
# in $ / kW (specific investment cost)
costs = {
'coal_ppl': 500,
'wind_ppl': 1500,
'bulb': 5,
}
for tec, val in costs.items():
df = make_df(base_inv_cost, technology=tec, value=val)
scenario.add_par('inv_cost', df) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Fixed O&M CostsFixed cost are only relevant as long as the capacity is active. This formulation allows to include the potential cost savings from early retirement of installed capacity.$$\sum_{y^V \leq y} \text{fix_cost}_{n,t,y^V,y} \cdot \text{CAP}_{n,t,y^V,y}$$ | base_fix_cost = {
'node_loc': country,
'year_vtg': vintage_years,
'year_act': act_years,
'unit': 'USD/kWa',
}
# in $ / kW / year (every year a fixed quantity is destinated to cover part of the O&M costs
# based on the size of the plant, e.g. lightning, labor, scheduled maintenance, etc.)
costs = {
'coal_ppl': 30,
'wind_ppl': 10,
}
for tec, val in costs.items():
df = make_df(base_fix_cost, technology=tec, value=val)
scenario.add_par('fix_cost', df) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Variable O&M CostsVariable Operation and Maintence costs are associated with the costs of actively running the plant. Thus, they are not applied if a plant is on standby (i.e., constructed, but not currently in use).$$\sum_{\substack{y^V \leq y \\ m,h}} \text{var_cost}_{n,t,y^V,y,m,h} \cdot \text{ACT}_{n,t,y^V,y,m,h} $$ | base_var_cost = {
'node_loc': country,
'year_vtg': vintage_years,
'year_act': act_years,
'mode': 'standard',
'time': 'year',
'unit': 'USD/kWa',
}
# in $ / kWa (costs associatied to the degradation of equipment when the plant is functioning
# per unit of energy produced kW·year = 8760 kWh.
# Therefore this costs represents USD per 8760 kWh of energy). Do not confuse with fixed O&M units.
costs = {
'coal_ppl': 30,
'grid': 50,
}
for tec, val in costs.items():
df = make_df(base_var_cost, technology=tec, value=val)
scenario.add_par('var_cost', df) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
A full model will also have costs associated with- costs associated with technologies (investment, fixed, variable costs)- resource extraction: $\sum_{c,g} \ resource\_cost_{n,c,g,y} \cdot EXT_{n,c,g,y} $- emissions- land use (emulator): $\sum_{s} land\_cost_{n,s,y} \cdot LAND_{n,s,y}$ Time to Solve the ModelFirst, we *commit* the model structure and input data (sets and parameters).In the `ixmp` backend, this creates a new model version in the database, which is assigned a version number automatically: | from message_ix import log
log.info('version number prior to commit: {}'.format(scenario.version))
scenario.commit(comment='basic model of Westeros electrification')
log.info('version number prior committing to the database: {}'.format(scenario.version)) | INFO:message_ix:version number prior to commit: 0
INFO:message_ix:version number prior committing to the database: 45
| Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
An `ixmp` database can contain many scenarios, and possibly multiple versions of the same model and scenario name.These are distinguished by unique version numbers.To make it easier to retrieve the "correct" version (e.g., the latest one), you can set a specific scenario as the default version to use if the "Westeros Electrified" model is loaded from the `ixmp` database. | scenario.set_as_default()
scenario.solve()
scenario.var('OBJ')['lvl'] | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Plotting ResultsWe make use of some custom code for plotting the results; see `tools.py` in the tutorial directory. | from tools import Plots
p = Plots(scenario, country, firstyear=model_horizon[0]) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
ActivityHow much energy is generated in each time period from the different potential sources? | p.plot_activity(baseyear=True, subset=['coal_ppl', 'wind_ppl']) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
CapacityHow much capacity of each plant is installed in each period? | p.plot_capacity(baseyear=True, subset=['coal_ppl', 'wind_ppl']) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Electricity PriceAnd how much does the electricity cost? These prices are in fact **shadow prices** taken from the **dual variables** of the model solution.They reflect the marginal cost of electricity generation (i.e., the additional cost of the system for supplying one more unit ofelectricity), which is in fact the marginal cost of the most expensive operating generator. Note the price drop when the most expensive technology is no longer in the system. | p.plot_prices(subset=['light'], baseyear=True) | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Close the connection to the databaseWhen working with local HSQLDB database instances, you cannot connect to one database from multipe Jupyter notebooks (or processes) at the same time.If you want to easily switch between notebooks with connections to the same `ixmp` database, you need to close the connection in one notebook before initializing the platform using `ixmp.Platform()` in another notebook.After having closed the database connection, you can reopen it using```mp.open_db()``` | mp.close_db() | _____no_output_____ | Apache-2.0 | westeros/westeros_baseline.ipynb | fschoeni/homework_no3 |
Set UpToday you will create partial dependence plots and practice building insights with data from the [Taxi Fare Prediction](https://www.kaggle.com/c/new-york-city-taxi-fare-prediction) competition.We have again provided code to do the basic loading, review and model-building. Run the cell below to set everything up: | import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Environment Set-Up for feedback system.
from learntools.core import binder
binder.bind(globals())
from learntools.ml_explainability.ex3 import *
print("Setup Complete")
# Data manipulation code below here
data = pd.read_csv('../input/new-york-city-taxi-fare-prediction/train.csv', nrows=50000)
# Remove data with extreme outlier coordinates or negative fares
data = data.query('pickup_latitude > 40.7 and pickup_latitude < 40.8 and ' +
'dropoff_latitude > 40.7 and dropoff_latitude < 40.8 and ' +
'pickup_longitude > -74 and pickup_longitude < -73.9 and ' +
'dropoff_longitude > -74 and dropoff_longitude < -73.9 and ' +
'fare_amount > 0'
)
y = data.fare_amount
base_features = ['pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude']
X = data[base_features]
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
first_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(train_X, train_y)
print("Data sample:")
data.head()
data.describe() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Question 1Here is the code to plot the partial dependence plot for pickup_longitude. Run the following cell. | from matplotlib import pyplot as plt
from pdpbox import pdp, get_dataset, info_plots
feat_name = 'pickup_longitude'
pdp_dist = pdp.pdp_isolate(model=first_model, dataset=val_X, model_features=base_features, feature=feat_name)
pdp.pdp_plot(pdp_dist, feat_name)
plt.show() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Why does the partial dependence plot have this U-shape?Does your explanation suggest what shape to expect in the partial dependence plots for the other features?Create all other partial plots in a for-loop below (copying the appropriate lines from the code above). | for feat_name in base_features:
pdp_dist = _
_
plt.show() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Do the shapes match your expectations for what shapes they would have? Can you explain the shape now that you've seen them? Uncomment the following line to check your intuition. | # q_1.solution() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Q2Now you will run a 2D partial dependence plot. As a reminder, here is the code from the tutorial. ```inter1 = pdp.pdp_interact(model=my_model, dataset=val_X, model_features=feature_names, features=['Goal Scored', 'Distance Covered (Kms)'])pdp.pdp_interact_plot(pdp_interact_out=inter1, feature_names=['Goal Scored', 'Distance Covered (Kms)'], plot_type='contour')plt.show()```Create a 2D plot for the features `pickup_longitude` and `dropoff_longitude`. Plot it appropriately?What do you expect it to look like? | # Add your code here
| _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Uncomment the line below to see the solution and explanation for how one might reason about the plot shape. | # q_2.solution() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Question 3Consider a ride starting at longitude -73.92 and ending at longitude -74. Using the graph from the last question, estimate how much money the rider would have saved if they'd started the ride at longitude -73.98 instead? | savings_from_shorter_trip = _
q_3.check() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
For a solution or hint, uncomment the appropriate line below. | # q_3.hint()
# q_3.solution() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Question 4In the PDP's you've seen so far, location features have primarily served as a proxy to capture distance traveled. In the permutation importance lessons, you added the features `abs_lon_change` and `abs_lat_change` as a more direct measure of distance.Create these features again here. You only need to fill in the top two lines. Then run the following cell. **After you run it, identify the most important difference between this partial dependence plot and the one you got without absolute value features. The code to generate the PDP without absolute value features is at the top of this code cell.**--- | # This is the PDP for pickup_longitude without the absolute difference features. Included here to help compare it to the new PDP you create
feat_name = 'pickup_longitude'
pdp_dist_original = pdp.pdp_isolate(model=first_model, dataset=val_X, model_features=base_features, feature=feat_name)
pdp.pdp_plot(pdp_dist_original, feat_name)
plt.show()
# create new features
data['abs_lon_change'] = _
data['abs_lat_change'] = _
features_2 = ['pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'abs_lat_change',
'abs_lon_change']
X = data[features_2]
new_train_X, new_val_X, new_train_y, new_val_y = train_test_split(X, y, random_state=1)
second_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(new_train_X, new_train_y)
feat_name = 'pickup_longitude'
pdp_dist = pdp.pdp_isolate(model=second_model, dataset=new_val_X, model_features=features_2, feature=feat_name)
pdp.pdp_plot(pdp_dist, feat_name)
plt.show()
q_4.check() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Uncomment the lines below to see a hint or the solution (including an explanation of the important differences between the plots). | # q_4.hint()
# q_4.solution() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Question 5Consider a scenario where you have only 2 predictive features, which we will call `feat_A` and `feat_B`. Both features have minimum values of -1 and maximum values of 1. The partial dependence plot for `feat_A` increases steeply over its whole range, whereas the partial dependence plot for feature B increases at a slower rate (less steeply) over its whole range.Does this guarantee that `feat_A` will have a higher permutation importance than `feat_B`. Why or why not?After you've thought about it, uncomment the line below for the solution. | # q_5.solution() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Q6The code cell below does the following:1. Creates two features, `X1` and `X2`, having random values in the range [-2, 2].2. Creates a target variable `y`, which is always 1.3. Trains a `RandomForestRegressor` model to predict `y` given `X1` and `X2`.4. Creates a PDP plot for `X1` and a scatter plot of `X1` vs. `y`.Do you have a prediction about what the PDP plot will look like? Run the cell to find out.Modify the initialization of `y` so that our PDP plot has a positive slope in the range [-1,1], and a negative slope everywhere else. (Note: *you should only modify the creation of `y`, leaving `X1`, `X2`, and `my_model` unchanged.*) | import numpy as np
from numpy.random import rand
n_samples = 20000
# Create array holding predictive feature
X1 = 4 * rand(n_samples) - 2
X2 = 4 * rand(n_samples) - 2
# Create y. you should have X1 and X2 in the expression for y
y = np.ones(n_samples)
# create dataframe because pdp_isolate expects a dataFrame as an argument
my_df = pd.DataFrame({'X1': X1, 'X2': X2, 'y': y})
predictors_df = my_df.drop(['y'], axis=1)
my_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(predictors_df, my_df.y)
pdp_dist = pdp.pdp_isolate(model=my_model, dataset=my_df, model_features=['X1', 'X2'], feature='X1')
# visualize your results
pdp.pdp_plot(pdp_dist, 'X1')
plt.show()
q_6.check() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Uncomment the lines below for a hint or solution | # q_6.hint()
# q_6.solution() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Question 7Create a dataset with 2 features and a target, such that the pdp of the first feature is flat, but its permutation importance is high. We will use a RandomForest for the model.*Note: You only need to supply the lines that create the variables X1, X2 and y. The code to build the model and calculate insights is provided*. | import eli5
from eli5.sklearn import PermutationImportance
n_samples = 20000
# Create array holding predictive feature
X1 = _
X2 = _
# Create y. you should have X1 and X2 in the expression for y
y = _
# create dataframe because pdp_isolate expects a dataFrame as an argument
my_df = pd.DataFrame({'X1': X1, 'X2': X2, 'y': y})
predictors_df = my_df.drop(['y'], axis=1)
my_model = RandomForestRegressor(n_estimators=30, random_state=1).fit(predictors_df, my_df.y)
pdp_dist = pdp.pdp_isolate(model=my_model, dataset=my_df, model_features=['X1', 'X2'], feature='X1')
pdp.pdp_plot(pdp_dist, 'X1')
plt.show()
perm = PermutationImportance(my_model).fit(predictors_df, my_df.y)
q_7.check()
# show the weights for the permutation importance you just calculated
eli5.show_weights(perm, feature_names = ['X1', 'X2'])
# Uncomment the following lines for the hint or solution
# q_7.hint()
# q_7.solution() | _____no_output_____ | Apache-2.0 | notebooks/ml_explainability/raw/ex3_partial_plots.ipynb | Mattjez914/Blackjack_Microchallenge |
Simple Model 3 Inception Module|true training data | Corr Training Data | Test Accuracy | Test Accuracy 0-1 | | ------------------ | ------------------ | ------------- | ----------------- || 100 | 47335 | 15 | 75 || 500 | 47335 | 16 | 80 | | 1000 | 47335 | 17 | 83 | | 2000 | 47335 | 19 | 92 | | 4000 | 47335 | 20 | 95 | | 6000 | 47335 | 20 | 96 | | 8000 | 47335 | 20 | 96 | | 12665 | 47335 | 20 | 98 | | Total Training Data | Training Accuracy ||---------------------------- | ------------------------ || 47435 | 100 || 47835 | 100 || 48335 | 100 || 49335 | 100 | | 51335 | 100 || 53335 | 100 || 55335 | 100 || 60000 | 100 | Mini- Inception network 8 Inception Modules|true training data | Corr Training Data | Test Accuracy | Test Accuracy 0-1 | | ------------------ | ------------------ | ------------- | ----------------- || 100 | 47335 | 14 | 69 || 500 | 47335 | 19 | 90 | | 1000 | 47335 | 19 | 92 | | 2000 | 47335 | 20 | 95 | | 4000 | 47335 | 20 | 97 | | 6000 | 47335 | 20 | 97 | | 8000 | 47335 | 20 | 98 | | 12665 | 47335 | 20 | 99 | | Total Training Data | Training Accuracy ||---------------------------- | ------------------------ || 47435 | 100 || 47835 | 100 || 48335 | 100 || 49335 | 100 | | 51335 | 100 || 53335 | 100 || 55335 | 100 || 60000 | 100 | | _____no_output_____ | MIT | CODS_COMAD/SIN/MNIST/cnn_2layers_1000.ipynb | lnpandey/DL_explore_synth_data |
|
Statistical analysis on NEMSIS BMI 6106 - Final Project Project by: Anwar Alsanea Luz Gabriela Iorg Jorge Rojas Abstract The National Emergency Medical Services Information System (NEMSIS) is a national database that contains Emergency Medical Services (EMS) data collected for the United States. In this project, we are adressing multiple questions to determine trends and major factors in EMS patient care. Objectives included predicting gender and age based on other factors such as incident location and type of injury. In order to approach our objectives, statistical analysis were applied using the program R. Analysis included linear regressions and principle component analysis. Our results show no significance when it comes to predicting gender based on factors. As for age, some factors were significant in prediciting the patient's age. Component analysis show low variane across all factors included in this data. We conclude that more data points with more numerical variable should be included and analyzed to provide better EMS patient care. Further analysis is needed to conclude the best approach to better determine EMS patient care with more data. IntroductionThe National Emergency Medical Services Information System (NEMSIS) is a national database that contains Emergency Medical Services (EMS) data collected for the United States. The data holds information on patient care from 9-1-1 calls. The goal of NEMSIS in collecting this database across the states to evaluate and analyze EMS needs and improve the performance of patient care (NEMSIS, 2017). The dataset is collected for all states, however the data does not specify which state it was collected. We are unable to compare between states.In this project, we are adressing multiple questions to determine trends and major factors in EMS patient care. Our first objective was to examine how the parameters related to gender. Which of the parameters or factors had the highest effect on gender? Did gender play an important role to help in determining better EMS patient care? Our second objective was to examine the patient's age to the factors. Can we use age to assist in determinig the best approach to EMS patient care?Fianlly, we analyzed the data as a whole, and determined if any factors of paramteres should be highly considered when developing new EMA patient care procedures. Methods In order to approach each of our objectives, statistical analysis were applied to determine each objective.> Data:Data was obtained from US Department of Transportation National Highway Traffic Safety Administration (NHTSA) (NHSTA, 2006). The data was first imported to Microsoft SQL Server to clean up the delimeters. Other changes included changing all column names to be able to use them in R easily. The original dataset contains 29 million events, in which were narrowed down to 10,000 events for simplification.>The data was saved as a csv file that is included with this report.>The original data set contains 44 National elements (i.e.parameters, factors). However in this analysis we were interested in analysing and examining 8 of the elements that included:>- Age>- Gender>- Primary method of payment>- Incident location type>- Primary symptom>- Cause of injury>- Incident patient disposition>- Complaint reported by dispatch>Each parameter is explained below:> *Age* (age.in.years) (numeric) This column was calculated from Date of Birth provided in the original dataset, this conversion was performed in SQL.>*Gender* (categorical)The original data provided gender in terms values that represented each gender (655 for female and 650 for male). These values were taken in R and converted to strings of "male and "female">*primary.method.of.payment* (categorical)values and their representations are as follows:>- 720 Insurance>- 725 Medicaid>- 730 Medicare>- 735 Not billed>- 740 Other government>- 745 Self pay>- 750 Workes compensaiont>*incident.location.type* (categorical)values and their representations are as follows:>- 1135 Home or residence>- 1140 Farm>- 1145 Mine or quarry>- 1150 Industrial place>- 1155 Recreation or sport place>- 1160 street or highway>- 1165 public building>- 1170 business/resturaunts>- 1175 Health care facility (hospital, clinic, nursing homes)>- 1180 Nursing homes or jail>- 1185 Lake, river, ocean>- 1190 all other locations>*primary.symptom* (categorical)values and their representations are as follows:>- 1405 Bleeding>- 1410 Breathing Problem>- 1415 Change in responsiveness >- 1425 Death>- 1420 Choking>- 1430 Device/Equipment Problem >- 1435 Diarrhea>- 1440 Drainage/Discharge>- 1445 Fever>- 1450 Malaise>- 1455 Mass/Lesion>- 1460 Mental/Psych>- 1465 Nausea/Vomiting>- 1470 None>- 1475 Pain>- 1480 Palpitations>- 1485 Rash/Itching>- 1490 Swelling>- 1495 Transport Only>- 1505 Wound>- 1500 Weakness>*cause.of.injury* (categorical)values and their representations are as follows:>- 1885 Bites (E906.0)>- 9505 Bicycle Accident (E826.0)>- 9520 Child battering (E967.0)>- 9530 Drug poisoning (E85X.0)>- 9540 Excessive Cold (E901.0)>- 9550 Falls (E88X.0)>- 9560 Firearm assault (E965.0)>- 9570 Firearm self inflicted (E955.0)>- 9580 Machinery accidents (E919.0)>- 9590 Motor Vehicle non-traffic accident (E82X.0) >- 9600 Motorcycle Accident (E81X.1)>- 9610 Pedestrian traffic accident (E814.0)>- 9620 Rape (E960.1)>- 9630 Stabbing/Cutting Accidental (E986.0)>- 9640 Struck by Blunt/Thrown Object (E968.2) >- 9650 Water Transport accident (E83X.0)>- 9500 Aircraft related accident (E84X.0) >- 9515 Chemical poisoning (E86X.0)>- 9525 Drowning (E910.0)>- 9535 Electrocution (non-lightning) (E925.0) >- 9545 Excessive Heat (E900.0)>- 9555 Fire and Flames (E89X.0)>- 9565 Firearm injury (accidental) (E985.0)>- 9575 Lightning (E907.0)>- 9585 Mechanical Suffocation (E913.0)>- 9595 Motor Vehicle traffic accident (E81X.0) >- 9605 Non-Motorized Vehicle Accident (E848.0) >- 9615 Radiation exposure (E926.0)>- 9625 Smoke Inhalation (E89X.2)>- 9635 Stabbing/Cutting Assault >- 9645 Venomous stings (plants, animals) (E905.0)>*incident.patient.disposition* (categorical)values and their representations are as follows:>- 4815 Cancelled>- 4825 No Patient Found>- 4835 Patient Refused Care>- 4845 Treated, Transferred Care>- 4855 Treated, Transported by Law Enforcement>- 4820 Dead at Scene>- 4830 No Treatment Required>- 4840 Treated and Released>- 4850 Treated, Transported by EMS>- 4860 Treated, Transported by Private Vehicle>*complaint.reported.by.dispatch* (categorical)values and their representations are as follows:>- 400 Abdominal Pain>- 410 Animal Bite>- 420 Back Pain>- 430 Burns>- 440 Cardiac Arrest>- 450 Choking>- 460 Diabetic Problem>- 470 Electrocution>- 480 Fall Victim>- 490 Heart Problems>- 500 Hemorrhage/Laceration 510 Ingestion/Poisoning>- 520 Psychiatric Problem>- 530 Stab/Gunshot Wound>- 540 Traffic Accident>- 550 Unconscious/Fainting>- 560 Transfer/Interfacility/Palliative Care>- 405 Allergies>- 415 Assault>- 425 Breathing Problem>- 435 CO Poisoning/Hazmat>- 445 Chest Pain>- 455 Convulsions/Seizure>- 465 Drowning>- 475 Eye Problem>- 485 Headache>- 495 Heat/Cold Exposure>- 505 Industrial Accident/Inaccessible Incident/Other Entrapments (non-vehicle) >- 515 Pregnancy/Childbirth>- 525 Sick Person>- 535 Stroke/CVA>- 545 Traumatic Injury>- 555 Unknown Problem Man Down 565 MCI (Mass Casualty Incident) > Statistical Analysis:Statistical analysis and visual representations were produced using the program R (R Development Core Team, 2008). Packages used in this report were:- FactoMineR- factoextra- corrplot- dplyr- ggplot2- modelr- PCAmixdata>A code is included in this report to install those packages if needed.> tests:Linear models and regression were used to approach the first two objectives. Principle component analysis was used to analyze the data as a whole. Results and Discussion The goal of this study was to analyze a subset of characteristics or factors from the NEMSIS 911 call events. Code Outline:- Input and output data- Create vectors, handle variables, and perform other basic functions (remove NAs)- Tackle data structures such as matrices, lists, factors, and data frames- Build statistical models with linear regressions and analysis of variance- Create a variety of graphic displays- Finding clusters in data Packages and libraries installation: | install.packages(c("FactoMineR", "factoextra"))
install.packages("corrplot")
install.packages("PCAmixdata")
library(dplyr)
library(ggplot2)
library(gridExtra)
library("FactoMineR")
library("corrplot")
library("factoextra")
library(modelr)
library(broom)
library("PCAmixdata")
require(stats)
#require(pls) | _____no_output_____ | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
Data: The file to import is saved under the name: events_cleaned_v3.txtThe code below is to import the data to the notebook | events = read.table(file = "events_cleaned_v3.txt", sep="|", header = TRUE, stringsAsFactors = F)
head(events, n=7)
#dim(events) | _____no_output_____ | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
Data cleaning:- Create vectors, handle variables, and perform other basic functions (remove NAs) | event1 = select(events, age.in.years, gender, primary.method.of.payment,
incident.location.type, primary.symptom,
cause.of.injury, incident.patient.disposition, complaint.reported.by.dispatch
)
event1[event1 < 0] <- NA
#head(event1, n=50)
event2 = na.exclude(event1)
dim(event2)
head(event2)
| _____no_output_____ | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
- Tackle data structures manipulation such as matrices, lists, factors, and data frames. | str(event2)
#Converting gender as factor:
event2$gender <-as.factor(event2$gender)
levels(event2$gender) <- c("male", "female")
#Converting dataframe as factor:
event2 <- data.frame(lapply(event2, as.factor))
#Converting age.in.years as numeric:
event2$age.in.years <-as.numeric(event2$age.in.years)
#Checking summaries
summary(event2)
contrasts(event2$gender)
head(event2)
str(event2) | 'data.frame': 292 obs. of 8 variables:
$ age.in.years : num 12 65 69 45 36 41 21 53 15 73 ...
$ gender : Factor w/ 2 levels "male","female": 1 1 2 2 2 1 2 2 2 2 ...
$ primary.method.of.payment : Factor w/ 6 levels "720","725","730",..: 1 1 3 5 4 4 2 2 2 3 ...
$ incident.location.type : Factor w/ 8 levels "1135","1150",..: 6 6 1 1 1 1 3 1 1 7 ...
$ primary.symptom : Factor w/ 13 levels "1405","1410",..: 3 11 1 1 9 9 1 9 9 12 ...
$ cause.of.injury : Factor w/ 20 levels "1885","9500",..: 12 6 6 19 6 6 11 6 6 6 ...
$ incident.patient.disposition : Factor w/ 4 levels "4835","4840",..: 4 4 4 2 4 4 4 4 4 4 ...
$ complaint.reported.by.dispatch: Factor w/ 19 levels "400","410","415",..: 19 19 16 3 13 13 15 8 8 8 ...
| MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
Data Analysis: Build statistical models with linear regressions and analysis of variance Regressions: For our analysis, we are using the standard cut off at alpha = 0.05 Linear regression to predict gender :The first test is a generalized linear model to predict gender based on the remanining factors.The Null hypothesis is that the factors have no effect on gender. The alternative hypothesis is that there is an effect. | model = glm(gender ~. -gender, data= event2, family= binomial)
summary(model)
#Gender (outcome variable, Y) and the rest of the variables (predictors, X)
#Null hypothesis (H0): the coefficients are equal to zero (i.e., no relationship between x and y)
#Alternative Hypothesis (Ha): the coefficients are not equal to zero (i.e., there is some relationship between x and y)
#There is not enough evidence to say that there is a relationship between gender and the predictors.
#the p-values for the intercept and the predictor variable are not significant, We can NOT reject the null hypothesis.
##further Interpretation:
#From the P value numbers we can say that only primary.method.of.payment745 (Self Pay) and
#primary.symptom(1500 and 1505) are significantly associated with the caller’s gender.
#All of the other variables do not seem to show any relationship to the caller’s gender.
#The coefficient estimate of the variable primary.method.of.payment745 is b = -1.779e+00, which is negative.
#This means that a if the caller (or patient) is Self Pay, then
#it is associated with a decreased probability of being a female.
#primary.symptom1500 (Weakness) b = 1.411e+00 which is positive.
#primary.symptom1505 (Wound) b = 1.543e+00 which is positive.
#This means that symptoms of weakness and wounds are
#associated with a increased probability of being a female.
#BUT IT IS NOT TRUE BECAUSE THERE IS NOT A SIGNIFICANT ASSOCIATION AMONG VARIABLES!
#*authors notes and future research needed to prove such claims. | Warning message:
“glm.fit: fitted probabilities numerically 0 or 1 occurred” | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
Linear regression results show that most factors had a P > 0.05, in which we have to accept the null hypothesis that the factors do not have an effect on gender and cannot predict gender. Except for primary.method.of.payment745 (which is Self Pay) and primary.symptom(1500 and 1505) are significantly associated with the caller’s gender (ie P < 0.05).All of the other variables do not show any effect on the caller’s gender ( P > 0.05).The coefficient estimate of the variable primary.method.of.payment745 is b = -1.779e+00, which is negative. This means that a if the caller (or patient) is Self Paid, then it is associated with a decreased probability of being a female. However this coeffiecient is still too low to have significance.primary.symptom1500 (Weakness) b = 1.411e+00 which is positive.primary.symptom1505 (Wound) b = 1.543e+00 which is positive.This means that symptoms of weakness and wounds are associated with a increased probability of being a female. Again, the values are too low to be significant. We however conclude that this information is not sufficient to assist EMS patient care procedures, further data needs to be collected in order to determine if EMS patient care can be improved by gender as a dependent variable. Linear regression to predict age :Our second test is to predict using age as the independent variable. The null hypothesis is that age cannot be predicted by the other variable. The alternative hypothesis is that other variables can act as independent variable that can predict age. | model2 = lm(age.in.years ~. -age.in.years, data= event2)
summary(model2)
#age.in.years (outcome variable, Y) and the rest of the variables (predictors, X)
#Null hypothesis (H0): the coefficients are equal to zero (i.e., no relationship between x and y)
#Alternative Hypothesis (Ha): the coefficients are not equal to zero (i.e., there is some relationship between x and y)
#There is enough evidence to say that there is a weak association between age.in.years and the predictors.
#the p-values for the intercept and the predictor variable are slightly significant, We can reject the null hypothesis.
| _____no_output_____ | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
Our results show that there are more factors having an effect on age than gender did. Primary methods 725 and 730 (medicaid and medicare) had high significance at P 65. Primary symptom 1500 (weakness) was significant towards age at P < 0.05. Weakness is a symptom that can explain more than one condition, it is however mostly used to describe symptoms experienced with older age. We suggest that further information is included in the primary symptom factor to be able to accurately examine and develop enhanced EMS patient care services. Cause of injury 9565 and 9605 (fire arm injury and non-motorized vehicle accident respectively) have shown high significance according to age at P < 0.05 in which we reject the null hypothesis. Other factors had a P value > 0.05 in which we accept the null hypothesis that they have no effect on age. Regression assumptions: | par(mfrow = c(2, 2))
plot(model2)
#### Linearity of the data (Residuals vs Fitted).
#There is no pattern in the residual plot. This suggests that we can assume linear relationship
#between the predictors and the outcome variables.
#### Normality of residuals (Normal Q-Q plot).
#All the points fall approximately along the reference line, so we can assume normality.
#### Homogeneity of residuals variance (Scale-Location).
#It can be seen that the variability (variances) of the residual points does not quite follows a horizontal
#line with equally spread points, suggesting non-constant variances in the residuals errors
#(or the presence of some heteroscedasticity).
#To reduce the heteroscedasticity problem we used the log transformation of the outcome variable (age.in.years, (y)).
#model3 = lm(log(age.in.years) ~. -age.in.years, data= event2)
#### Independence of residuals error terms (Residuals vs Leverage).
#There are not drastic outliers in our data. | Warning message:
“not plotting observations with leverage one:
13, 39, 65, 67, 82, 84, 105, 153, 239, 243, 258, 290, 291”Warning message:
“not plotting observations with leverage one:
13, 39, 65, 67, 82, 84, 105, 153, 239, 243, 258, 290, 291” | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
Linearity of the data (Residuals vs Fitted plot): There is no pattern in the residual plot. This suggests that we can assume linear relationship between the predictors and the outcome variables. Normality of residuals (Normal Q-Q plot):All the points fall approximately along the reference line, so we can assume normality. Homogeneity of residuals variance (Scale-Location):It can be seen that the variability (variances) of the residual points does not quite follows a horizontal line with equally spread points, suggesting non-constant variances in the residuals errors (or the presence of some heteroscedasticity).To reduce the heteroscedasticity problem we used the log transformation of the outcome variable (age.in.years, (y)). Shown next. Independence of residuals error terms (Residuals vs Leverage):There are not drastic outliers in our data. Reducing heteroscedasticity: | #Transformed Regression and new plot:
model3 = lm(log(age.in.years) ~. -age.in.years, data= event2)
plot(model3, 3)
#heteroscedasticity has been improved.
| Warning message:
“not plotting observations with leverage one:
13, 39, 65, 67, 82, 84, 105, 153, 239, 243, 258, 290, 291” | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
Linear regression for age after log transformation:After the noticeable reduced heteroscedasticity in the data after using the log transformation, we examine the linear model again: | summary(model3)
#After the log transformation of age, the p-values for the intercept and the predictor variables has
#become more significant, hence indicating a stronger association between age.in.years and the predictors.
###Interpretation:
#From the P value numbers we can say that primary.method.of.payment(725 and 730),
#incident.location.type1170, primary.symptom (1410 and 1500), cause.of.injury(9565, 9600, and 9605),
#and complaint.reported.by.dispatch(485 and 520), are significantly associated with the caller’s Age.
#The coefficient estimate of the variables are:
#primary.method.of.payment725 (Medicaid) b = -0.183656, which is negative.
#primary.method.of.payment730(Medicare) b = 0.476600 which is positive.
#This means that as age increases the probability of being on Medicaid decreases;
#but the probability of being on Medicare increases as age increases.
#incident.location.type1170(Trade or service (business, bars, restaurants, etc)) b = 0.396803, which is positive.
#This means that as age increases the probability of the incident happening at a business, bars,
#restaurants, etc., increases.
#primary.symptom1410 (Breathing Problem) b = -0.854654, which is negative.
#This means that Breathing Problem are more prevalent among younger people (perhaps among babies).
#primary.symptom1500 (Weakness) b = 0.370141 which is positive.
#This means that as age increases the probability of the primary symptom being "Weakness" increases.
#complaint.reported.by.dispatch485(Headache) b = -2.192445, which is negative.
#complaint.reported.by.dispatch520(Psychiatric Problem) b = -1.606781, which is negative.
#This means that if the complaint.reported.by.dispatch is a "Headache" or a "Psychiatric Problem",
#is associated with an increased probability of being a younger person.
#cause.of.injury9565 (Firearm injury) b = -2.458792, which is negative.
#cause.of.injury9505 (Bicycle Accident) b = -2.166411, which is negative.
##cause.of.injury9600 (Motorcycle Accident) b = -1.344680, which is negative.
#This means that accidents involving Firearms, Motorcycle, and Bicycles are more prevalent among younger people.
| _____no_output_____ | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
After the log transformation of age, the p-values for the intercept and the predictor variables has become more significant, hence indicating a stronger association between age.in.years and the predictors. *Interpretation:*From the P value numbers we can say that primary method of payment(725 and 730), incident location type 1170, primary symptom (1410 and 1500), cause of injury(9565, 9600, and 9605), and complaint reported by dispatch(485 and 520), are significantly associated with the caller’s Age. ( ie P << 0.05, in which we reject the null hypothesis).*The coefficient estimate of the variables are: *primary.method.of.payment725 (Medicaid) b = -0.183656, which is negative.primary.method.of.payment730(Medicare) b = 0.476600 which is positive.This means that as age increases the probability of being on Medicaid decreases; but the probability of being on Medicare increases as age increases. Which has been shown to be true in the first model as well.incident.location.type1170(Trade or service (business, bars, restaurants, etc)) b = 0.396803, which is positive.This means that as age increases the probability of the incident happening at a business, bars, restaurants, etc., increases. primary.symptom1410 (Breathing Problem) b = -0.854654, which is negative.This means that Breathing Problem are more prevalent among younger people (perhaps among babies). primary.symptom1500 (Weakness) b = 0.370141 which is positive. This means that as age increases the probability of the primary symptom being "Weakness" increases. complaint.reported.by.dispatch485(Headache) b = -2.192445, which is negative. complaint.reported.by.dispatch520(Psychiatric Problem) b = -1.606781, which is negative.This means that if the complaint.reported.by.dispatch is a "Headache" or a "Psychiatric Problem", is associated with an increased probability of being a younger person. cause.of.injury9565 (Firearm injury) b = -2.458792, which is negative.cause.of.injury9505 (Bicycle Accident) b = -2.166411, which is negative.cause.of.injury9600 (Motorcycle Accident) b = -1.344680, which is negative.This means that accidents involving Firearms, Motorcycle, and Bicycles are more prevalent among younger people. Data visulization:In order to examine those trends, the log transfomation of age was plotted against cause of injury and incident location as shown below: Component Analysis: FAMD Our data contains both quantitative (numeric) and qualitative (categorical) variables, the best tool to analyze similarity between individuals and the association between all variables is the "Factor analysis of mixed data" (FAMD), from the FactoMineR package. Quantitative and qualitative variables are normalized during the analysis in order to balance the influence of each set of variables, (FAMD does it internally). | res.famd <- FAMD(event2, ncp=5, graph = FALSE)
summary(res.famd)
#About 5% of the variation is explained by this first eigenvalue, which is the first dimension.
#Based on the contribution plots, the variables plots, and the significant categories,
#we selected the next varibles for our simpler model:
relevant = select(event2, age.in.years, cause.of.injury, complaint.reported.by.dispatch,
primary.method.of.payment, incident.location.type,
)
improv_model = lm(log(age.in.years) ~. -age.in.years, data= relevant)
#summary(improv_model)
#Analysis and Comparison of models:
AIC(model3, improv_model)
glance(model3) %>%
dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value)
glance(improv_model) %>%
dplyr::select(adj.r.squared, sigma, AIC, BIC, p.value)
#Looking at the models' summaries, we can see that model3 and improv_model have a similar adjusted R2,
#but model3's is slightly higher. This means that improv_model is a little bit better at exaining the outcome (age.in.years).
#The two models have exactly the same (rounded) amount of residual standard error (RSE or sigma = 0.54).
#However, improv_model is more simple than model3 because it incorporates less variables.
#All things equal, the simple model is always better.
#The AIC and the BIC of the improv_model are lower than those of the model3 (AIC= 527.9 vs 521.8).
#In model comparison strategies, the model with the lowest AIC and BIC scores is preferred.
#Finally, the F-statistic P-value of improv_model is lower than the one of the model3.
#This means that the improv_model is statistically more significant compared to model3.
#In this way, we can conclude that improv_model is the best model and should be used for further analyses.
| _____no_output_____ | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
Visual representations for variables: | #Plots for the frequency of the variables' categories
for (i in 2:8) {
plot(event2[,i], main=colnames(event2)[i],
ylab = "Count", xlab = "Categories", col="#00AFBB", las = 2)
}
#Some of the variable categories have a very low frequency. These variables could distort the analysis.
#scree plot
a = fviz_screeplot(res.famd, addlabels = TRUE, ylim = c(0, 5))
#19% of the information (variances) contained in the data are retained by the first five principal components.
#The percentage value of our variables explains less than desired of the variance;
#The low frequency variables could be distorting the analysis.
# Plot of variables
b = fviz_famd_var(res.famd, repel = TRUE)
##It can be seen that, the variables gender, age.in.years, and incident.patient.disposition are the most correlated with dimension 1.
#None of the variables are strongly correlated solely to dimension 2.
# Contribution to the first dimension
c = fviz_contrib(res.famd, "var", axes = 1)
# Contribution to the second dimension
d = fviz_contrib(res.famd, "var", axes = 2)
#From the plots, it can be seen that:
#variables that contribute the most to the first dimension are: cause.of.injury and complaint.reported.by.dispatch.
#variables that contribute the most to the second dimension are: cause.of.injury and complaint.reported.by.dispatch.
grid.arrange(a, b, c, d, ncol = 2) | _____no_output_____ | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
From the plots we can see that 19% of the variances contained in the data were retained by the first five principal components. The percentage value of our variables explains low variance among the factors. Variables gender, age, and incident patient disposition are strongly correlated with dimension 1. Hierarchical K-means clustering: | df= select(relevant, age.in.years)
#head(df)
#Hierarchical K-means clustering
hk3 <-hkmeans(df, 3)
hk3$centers
relevant2 = relevant
relevant2$k3cluster = hk3$cluster
relevant2$k3cluster <-as.factor(relevant2$k3cluster)
#levels(relevant2$k3cluster)
levels(relevant2$k3cluster) <- c("Child", "Young-Adult", "Adult" )
#levels(relevant2$k3cluster)
#head(relevant2)
res.famd2 <- FAMD(relevant2, ncp = 5, graph = FALSE)
fviz_pca_ind(res.famd2,
geom.ind = "point", # show points only
col.ind = relevant2$k3cluster, # color by groups
palette = c("#00AFBB", "#E7B800", "#FC4E07"),
addEllipses = TRUE, ellipse.type = "convex",
#addEllipses = TRUE, # Concentration ellipses
legend.title = "Age category"
) | _____no_output_____ | MIT | Final_Proj/.ipynb_checkpoints/Stats Final Project -checkpoint.ipynb | alsaneaan/BMI_stats_final |
Una introducción a NuSA**NuSA** es una librería Python para resolver problemas de análisis estructural bidimensional. La idea es tener una estructura de códigos escritos utilizando la programación orientada a objetos, de modo que sea posible crear instancias de un modelo de elemento finito y operar con éste a través de métodos. ¿Por qué NuSA? La estructura de NuSALa estructura de **NuSA** está basada en tres clases fundamentales que componen el *core*: `Model`, `Element`, `Node`. | _____no_output_____ | MIT | docs/nusa-info/es/intro-nusa.ipynb | Bartman00/nusa |
|
Training Arguments | datadir = '../../data'
data_name = 'cifar10'
fraction = float(0.1)
num_epochs = int(300)
select_every = int(20)
feature = 'dss'# 70
warm_method = 0 # whether to use warmstart-onestep (1) or online (0)
num_runs = 1 # number of random runs
learning_rate = 0.05
| _____no_output_____ | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Results Folder | all_logs_dir = './results/' + data_name +'/' + feature +'/' + str(fraction) + '/' + str(select_every)
print(all_logs_dir)
subprocess.run(["mkdir", "-p", all_logs_dir])
path_logfile = os.path.join(all_logs_dir, data_name + '.txt')
logfile = open(path_logfile, 'w')
exp_name = data_name + '_fraction:' + str(fraction) + '_epochs:' + str(num_epochs) + \
'_selEvery:' + str(select_every) + '_variant' + str(warm_method) + '_runs' + str(num_runs)
print(exp_name)
| ./results/cifar10/dss/0.1/20
cifar10_fraction:0.1_epochs:300_selEvery:20_variant0_runs1
| MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Loading CIFAR10 Dataset | print("=======================================", file=logfile)
fullset, valset, testset, num_cls = load_mnist_cifar(datadir, data_name, feature)
| Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ../data/cifar-10-python.tar.gz
| MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Splitting Training dataset to train and validation sets | validation_set_fraction = 0.1
num_fulltrn = len(fullset)
num_val = int(num_fulltrn * validation_set_fraction)
num_trn = num_fulltrn - num_val
trainset, validset = random_split(fullset, [num_trn, num_val])
N = len(trainset)
trn_batch_size = 20
| _____no_output_____ | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Creating DataLoaders | trn_batch_size = 20
val_batch_size = 1000
tst_batch_size = 1000
trainloader = torch.utils.data.DataLoader(trainset, batch_size=trn_batch_size,
shuffle=False, pin_memory=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=val_batch_size, shuffle=False,
sampler=SubsetRandomSampler(validset.indices),
pin_memory=True)
testloader = torch.utils.data.DataLoader(testset, batch_size=tst_batch_size,
shuffle=False, pin_memory=True)
| _____no_output_____ | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Budget for Data Subset Selection | bud = int(fraction * N)
print("Budget, fraction and N:", bud, fraction, N)
# Transfer all the data to GPU
print_every = 3 | Budget, fraction and N: 4500 0.1 45000
| MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Loading ResNet Model | model = ResNet18(num_cls)
model = model.to(device)
print(model) | ResNet(
(conv1): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(layer1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
)
(layer2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
)
(layer3): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential(
(0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
)
(layer4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(shortcut): Sequential()
)
)
(linear): Linear(in_features=512, out_features=10, bias=True)
)
| MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Initial Random Subset for Training | start_idxs = np.random.choice(N, size=bud, replace=False) | _____no_output_____ | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Loss Type, Optimizer and Learning Rate Scheduler | criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
| _____no_output_____ | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Last Layer GLISTER Strategy with Stcohastic Selection | setf_model = Strategy(trainloader, valloader, model, criterion,
learning_rate, device, num_cls, False, 'Stochastic')
idxs = start_idxs
print("Starting Greedy Selection Strategy!")
substrn_losses = np.zeros(num_epochs)
fulltrn_losses = np.zeros(num_epochs)
val_losses = np.zeros(num_epochs)
timing = np.zeros(num_epochs)
val_acc = np.zeros(num_epochs)
tst_acc = np.zeros(num_epochs)
full_trn_acc = np.zeros(num_epochs)
subtrn_acc = np.zeros(num_epochs)
subset_trnloader = torch.utils.data.DataLoader(trainset, batch_size=trn_batch_size,
shuffle=False, sampler=SubsetRandomSampler(idxs), pin_memory=True) | Starting Greedy Selection Strategy!
| MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Training Loop | for i in tqdm.trange(num_epochs):
subtrn_loss = 0
subtrn_correct = 0
subtrn_total = 0
start_time = time.time()
if (((i+1) % select_every) == 0):
cached_state_dict = copy.deepcopy(model.state_dict())
clone_dict = copy.deepcopy(model.state_dict())
print("selEpoch: %d, Starting Selection:" % i, str(datetime.datetime.now()))
subset_start_time = time.time()
subset_idxs, grads_idxs = setf_model.select(int(bud), clone_dict)
subset_end_time = time.time() - subset_start_time
print("Subset Selection Time is:" + str(subset_end_time))
idxs = subset_idxs
print("selEpoch: %d, Selection Ended at:" % (i), str(datetime.datetime.now()))
model.load_state_dict(cached_state_dict)
subset_trnloader = torch.utils.data.DataLoader(trainset, batch_size=trn_batch_size,
shuffle=False, sampler=SubsetRandomSampler(idxs), pin_memory=True)
model.train()
for batch_idx, (inputs, targets) in enumerate(subset_trnloader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True) # targets can have non_blocking=True.
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
subtrn_loss += loss.item()
loss.backward()
optimizer.step()
_, predicted = outputs.max(1)
subtrn_total += targets.size(0)
subtrn_correct += predicted.eq(targets).sum().item()
scheduler.step()
timing[i] = time.time() - start_time
#print("Epoch timing is: " + str(timing[i]))
val_loss = 0
val_correct = 0
val_total = 0
tst_correct = 0
tst_total = 0
tst_loss = 0
full_trn_loss = 0
#subtrn_loss = 0
full_trn_correct = 0
full_trn_total = 0
model.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valloader):
#print(batch_idx)
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
val_loss += loss.item()
_, predicted = outputs.max(1)
val_total += targets.size(0)
val_correct += predicted.eq(targets).sum().item()
for batch_idx, (inputs, targets) in enumerate(testloader):
#print(batch_idx)
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
tst_loss += loss.item()
_, predicted = outputs.max(1)
tst_total += targets.size(0)
tst_correct += predicted.eq(targets).sum().item()
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
full_trn_loss += loss.item()
_, predicted = outputs.max(1)
full_trn_total += targets.size(0)
full_trn_correct += predicted.eq(targets).sum().item()
val_acc[i] = val_correct/val_total
tst_acc[i] = tst_correct/tst_total
subtrn_acc[i] = subtrn_correct/subtrn_total
full_trn_acc[i] = full_trn_correct/full_trn_total
substrn_losses[i] = subtrn_loss
fulltrn_losses[i] = full_trn_loss
val_losses[i] = val_loss
print('Epoch:', i + 1, 'SubsetTrn,FullTrn,ValLoss,Time:', subtrn_loss, full_trn_loss, val_loss, timing[i])
| 0%| | 1/300 [00:35<2:58:55, 35.91s/it] | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Results Logging | print("SelectionRun---------------------------------")
print("Final SubsetTrn and FullTrn Loss:", subtrn_loss, full_trn_loss)
print("Validation Loss and Accuracy:", val_loss, val_acc[-1])
print("Test Data Loss and Accuracy:", tst_loss, tst_acc[-1])
print('-----------------------------------')
print("GLISTER", file=logfile)
print('---------------------------------------------------------------------', file=logfile)
val = "Validation Accuracy,"
tst = "Test Accuracy,"
time_str = "Time,"
for i in range(num_epochs):
time_str = time_str + "," + str(timing[i])
val = val + "," + str(val_acc[i])
tst = tst + "," + str(tst_acc[i])
print(timing, file=logfile)
print(val, file=logfile)
print(tst, file=logfile) | _____no_output_____ | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Full Data Training | torch.manual_seed(42)
np.random.seed(42)
model = ResNet18(num_cls)
model = model.to(device)
idxs = start_idxs
criterion = nn.CrossEntropyLoss()
#optimizer = optim.SGD(model.parameters(), lr=learning_rate)
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=0.9, weight_decay=5e-4)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_epochs)
print("Starting Full Training Run!")
substrn_losses = np.zeros(num_epochs)
fulltrn_losses = np.zeros(num_epochs)
val_losses = np.zeros(num_epochs)
subset_trnloader = torch.utils.data.DataLoader(trainset, batch_size=trn_batch_size, shuffle=False,
sampler=SubsetRandomSampler(idxs),
pin_memory=True)
timing = np.zeros(num_epochs)
val_acc = np.zeros(num_epochs)
tst_acc = np.zeros(num_epochs)
full_trn_acc = np.zeros(num_epochs)
subtrn_acc = np.zeros(num_epochs)
| _____no_output_____ | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Full Training Loop | for i in tqdm.trange(num_epochs):
start_time = time.time()
model.train()
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
# Variables in Pytorch are differentiable.
inputs, target = Variable(inputs), Variable(inputs)
# This will zero out the gradients for this batch.
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
scheduler.step()
timing[i] = time.time() - start_time
val_loss = 0
val_correct = 0
val_total = 0
tst_correct = 0
tst_total = 0
tst_loss = 0
full_trn_loss = 0
subtrn_loss = 0
full_trn_correct = 0
full_trn_total = 0
subtrn_correct = 0
subtrn_total = 0
model.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valloader):
# print(batch_idx)
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
val_loss += loss.item()
_, predicted = outputs.max(1)
val_total += targets.size(0)
val_correct += predicted.eq(targets).sum().item()
for batch_idx, (inputs, targets) in enumerate(testloader):
# print(batch_idx)
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
tst_loss += loss.item()
_, predicted = outputs.max(1)
tst_total += targets.size(0)
tst_correct += predicted.eq(targets).sum().item()
for batch_idx, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
full_trn_loss += loss.item()
_, predicted = outputs.max(1)
full_trn_total += targets.size(0)
full_trn_correct += predicted.eq(targets).sum().item()
for batch_idx, (inputs, targets) in enumerate(subset_trnloader):
inputs, targets = inputs.to(device), targets.to(device, non_blocking=True)
outputs = model(inputs)
loss = criterion(outputs, targets)
subtrn_loss += loss.item()
_, predicted = outputs.max(1)
subtrn_total += targets.size(0)
subtrn_correct += predicted.eq(targets).sum().item()
val_acc[i] = val_correct / val_total
tst_acc[i] = tst_correct / tst_total
subtrn_acc[i] = subtrn_correct / subtrn_total
full_trn_acc[i] = full_trn_correct / full_trn_total
substrn_losses[i] = subtrn_loss
fulltrn_losses[i] = full_trn_loss
val_losses[i] = val_loss
print('Epoch:', i + 1, 'SubsetTrn,FullTrn,ValLoss,Time:', subtrn_loss, full_trn_loss, val_loss, timing[i]) |
0%| | 0/300 [00:00<?, ?it/s][A | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
Results and Timing Logging | print("SelectionRun---------------------------------")
print("Final SubsetTrn and FullTrn Loss:", subtrn_loss, full_trn_loss)
print("Validation Loss and Accuracy:", val_loss, val_acc[-1])
print("Test Data Loss and Accuracy:", tst_loss, tst_acc[-1])
print('-----------------------------------')
print("Full Training", file=logfile)
print('---------------------------------------------------------------------', file=logfile)
val = "Validation Accuracy,"
tst = "Test Accuracy,"
time_str = "Time,"
for i in range(num_epochs):
time_str = time_str + "," + str(timing[i])
val = val + "," + str(val_acc[i])
tst = tst + "," + str(tst_acc[i])
print(timing, file=logfile)
print(val, file=logfile)
print(tst, file=logfile)
logfile.close() | _____no_output_____ | MIT | examples/trials/cifar10_grad_match/notebooks/cifar10_example.ipynb | savan77/nni |
"Visualizing Earnings Based On College Majors"> "Awesome project using numpy, pandas & matplotlib"- toc: true- comments: true- image: images/cosmos.jpg- categories: [project]- tags: [Numpy, Pandas]- badges: true- twitter_large_image: true- featured: true | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
recent_grads = pd.read_csv('recent-grads.csv')
recent_grads.iloc[0]
recent_grads.head()
recent_grads.tail()
recent_grads.describe()
recent_grads.info()
print('Number of Rows Before :', len(recent_grads))
recent_grads = recent_grads.dropna()
print('Number of Rows After :', len(recent_grads))
p1 = recent_grads.plot(x = 'Sample_size', y = 'Median', kind = 'scatter')
p2 = recent_grads.plot(x = 'Sample_size', y = 'Unemployment_rate', kind = 'scatter')
p3 = recent_grads.plot(x = 'Full_time', y = 'Median', kind = 'scatter')
p4 = recent_grads.plot(x = 'ShareWomen', y = 'Unemployment_rate', kind = 'scatter')
p5 = recent_grads.plot(x = 'Men', y = 'Median', kind = 'scatter')
p6 = recent_grads.plot(x = 'Women', y = 'Median', kind = 'scatter')
h1 = recent_grads['Sample_size'].hist(bins = 10, range = (0,4500))
h1.set_title('Sample_size')
h2 = recent_grads['Median'].hist(bins = 20, range = (22000,110000))
h2.set_title('Median')
h3 = recent_grads['Employed'].hist(bins = 10, range = (0,300000))
h3.set_title('Employed')
h4 = recent_grads['Full_time'].hist(bins = 10, range = (0,250000))
h4.set_title('Full_time')
h5 = recent_grads['ShareWomen'].hist(bins = 20, range = (0,1))
h5.set_title('Share Women')
h6 = recent_grads['Men'].hist(bins = 10, range = (110,175000))
h6.set_title('Men')
h7 = recent_grads['Women'].hist(bins = 10, range = (0,300000))
h7.set_title('Women')
from pandas.plotting import scatter_matrix
matrix1 = scatter_matrix(recent_grads[['Sample_size', 'Median']])
matrix2 = scatter_matrix(recent_grads[['Sample_size', 'Median', 'Unemployment_rate']])
recent_grads['ShareWomen'][:10].plot(kind = 'bar')
recent_grads['ShareWomen'][-10:-1].plot(kind = 'bar')
recent_grads[:10].plot.bar(x='Major_category', y='Unemployment_rate')
# OR
# recent_grads['Unemployment_rate'][:10].plot(kind = 'bar')
recent_grads[-10:-1].plot.bar(x='Major_category', y='Unemployment_rate') | _____no_output_____ | Apache-2.0 | _notebooks/2021-05-12-visualizing-earnings.ipynb | MoRaouf/MoSpace |
Nuevo Modelo | important_values = values\
.merge(labels, on="building_id")
important_values.drop(columns=["building_id"], inplace = True)
important_values["geo_level_1_id"] = important_values["geo_level_1_id"].astype("category")
important_values
important_values.shape
X_train, X_test, y_train, y_test = train_test_split(important_values.drop(columns = 'damage_grade'),
important_values['damage_grade'], test_size = 0.2, random_state = 123)
#OneHotEncoding
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = pd.get_dummies(original_dataframe[[feature_to_encode]])
res = pd.concat([original_dataframe, dummies], axis=1)
res = res.drop([feature_to_encode], axis=1)
return(res)
features_to_encode = ["geo_level_1_id", "land_surface_condition", "foundation_type", "roof_type",\
"position", "ground_floor_type", "other_floor_type",\
"plan_configuration", "legal_ownership_status", "age_range_superstructure"]
for feature in features_to_encode:
X_train = encode_and_bind(X_train, feature)
X_test = encode_and_bind(X_test, feature)
X_train
X_train.shape
# # Busco los mejores tres parametros indicados abajo.
# n_estimators = [65, 100, 135]
# max_features = [0.2, 0.5, 0.8]
# max_depth = [None, 2, 5]
# min_samples_split = [5, 15, 25]
# # min_impurity_decrease = [0.0, 0.01, 0.025, 0.05, 0.1]
# # min_samples_leaf
# hyperF = {'n_estimators': n_estimators,
# 'max_features': max_features,
# 'max_depth': max_depth,
# 'min_samples_split': min_samples_split
# }
# gridF = GridSearchCV(estimator = RandomForestClassifier(random_state = 123),
# scoring = 'f1_micro',
# param_grid = hyperF,
# cv = 3,
# verbose = 1,
# n_jobs = -1)
# bestF = gridF.fit(X_train, y_train)
# res = pd.DataFrame(bestF.cv_results_)
# res.loc[res['rank_test_score'] <= 10]
# Utilizo los mejores parametros segun el GridSearch
rf_model = RandomForestClassifier(n_estimators = 150,
max_depth = None,
max_features = 50,
min_samples_split = 15,
min_samples_leaf = 1,
criterion = "gini",
verbose=True)
rf_model.fit(X_train, y_train)
rf_model.score(X_train, y_train)
# Calculo el F1 score para mi training set.
y_preds = rf_model.predict(X_test)
f1_score(y_test, y_preds, average='micro')
test_values = pd.read_csv('../../csv/test_values.csv', index_col = "building_id")
test_values
test_values_subset = test_values
test_values_subset["geo_level_1_id"] = test_values_subset["geo_level_1_id"].astype("category")
test_values_subset
#Promedio de altura por piso
test_values_subset['height_percentage_per_floor_pre_eq'] = test_values_subset['height_percentage']/test_values_subset['count_floors_pre_eq']
test_values_subset['volume_percentage'] = test_values_subset['area_percentage'] * test_values_subset['height_percentage']
#Algunos promedios por localizacion
test_values_subset['avg_age_for_geo_level_2_id'] = test_values_subset.groupby('geo_level_2_id')['age'].transform('mean')
test_values_subset['avg_area_percentage_for_geo_level_2_id'] = test_values_subset.groupby('geo_level_2_id')['area_percentage'].transform('mean')
test_values_subset['avg_height_percentage_for_geo_level_2_id'] = test_values_subset.groupby('geo_level_2_id')['height_percentage'].transform('mean')
test_values_subset['avg_count_floors_for_geo_level_2_id'] = test_values_subset.groupby('geo_level_2_id')['count_floors_pre_eq'].transform('mean')
test_values_subset['avg_age_for_geo_level_3_id'] = test_values_subset.groupby('geo_level_3_id')['age'].transform('mean')
test_values_subset['avg_area_percentage_for_geo_level_3_id'] = test_values_subset.groupby('geo_level_3_id')['area_percentage'].transform('mean')
test_values_subset['avg_height_percentage_for_geo_level_3_id'] = test_values_subset.groupby('geo_level_3_id')['height_percentage'].transform('mean')
test_values_subset['avg_count_floors_for_geo_level_3_id'] = test_values_subset.groupby('geo_level_3_id')['count_floors_pre_eq'].transform('mean')
#Superestructuras
superstructure_cols = [i for i in test_values_subset.filter(regex='^has_superstructure*').columns]
test_values_subset["num_superstructures"] = test_values_subset[superstructure_cols[0]]
for c in superstructure_cols[1:]:
test_values_subset["num_superstructures"] += test_values_subset[c]
test_values_subset['has_superstructure'] = test_values_subset['num_superstructures'] != 0
#Familias por unidad de area y volumen y por piso
test_values_subset['family_area_relation'] = test_values_subset['count_families'] / test_values_subset['area_percentage']
test_values_subset['family_volume_relation'] = test_values_subset['count_families'] / test_values_subset['volume_percentage']
test_values_subset['family_floors_relation'] = test_values_subset['count_families'] / test_values_subset['count_floors_pre_eq']
#Relacion material(los mas importantes segun el modelo 5)-antiguedad
test_values_subset['20_yr_age_range'] = test_values_subset['age'] // 20 * 20
test_values_subset['20_yr_age_range'] = test_values_subset['20_yr_age_range'].astype('str')
test_values_subset['superstructure'] = ''
test_values_subset['superstructure'] = np.where(test_values_subset['has_superstructure_mud_mortar_stone'], test_values_subset['superstructure'] + 'b', test_values_subset['superstructure'])
test_values_subset['superstructure'] = np.where(test_values_subset['has_superstructure_cement_mortar_brick'], test_values_subset['superstructure'] + 'e', test_values_subset['superstructure'])
test_values_subset['superstructure'] = np.where(test_values_subset['has_superstructure_timber'], test_values_subset['superstructure'] + 'f', test_values_subset['superstructure'])
test_values_subset['age_range_superstructure'] = test_values_subset['20_yr_age_range'] + test_values_subset['superstructure']
del test_values_subset['20_yr_age_range']
del test_values_subset['superstructure']
test_values_subset
def encode_and_bind(original_dataframe, feature_to_encode):
dummies = pd.get_dummies(original_dataframe[[feature_to_encode]])
res = pd.concat([original_dataframe, dummies], axis=1)
res = res.drop([feature_to_encode], axis=1)
return(res)
features_to_encode = ["geo_level_1_id", "land_surface_condition", "foundation_type", "roof_type",\
"position", "ground_floor_type", "other_floor_type",\
"plan_configuration", "legal_ownership_status", "age_range_superstructure"]
for feature in features_to_encode:
test_values_subset = encode_and_bind(test_values_subset, feature)
test_values_subset
features_in_model_not_in_tests =\
list(filter(lambda col: col not in test_values_subset.columns.to_list(), X_train.columns.to_list()))
for f in features_in_model_not_in_tests:
test_values_subset[f] = 0
test_values_subset.drop(columns = list(filter(lambda col: col not in X_train.columns.to_list() , test_values_subset.columns.to_list())), inplace = True)
test_values_subset.shape
# Genero las predicciones para los test.
preds = rf_model.predict(test_values_subset)
submission_format = pd.read_csv('../../csv/submission_format.csv', index_col = "building_id")
my_submission = pd.DataFrame(data=preds,
columns=submission_format.columns,
index=submission_format.index)
my_submission.head()
my_submission.to_csv('../../csv/predictions/jf/8/jf-model-8-submission.csv')
!head ../../csv/predictions/jf/8/jf-model-8-submission.csv | building_id,damage_grade
300051,3
99355,2
890251,2
745817,1
421793,3
871976,2
691228,1
896100,3
343471,2
| MIT | src/RandomForest/jf-model-8.ipynb | joaquinfontela/Machine-Learning |
Equilibrium constantsCalculating equilibrium constants from energy values is easy.It's known that the stability constant of $\require{mhchem}\ce{Cd(MeNH2)4^{2+}}$ is around $10^{6.55}$: | from overreact import core, _thermo, simulate
import numpy as np
from scipy import constants
K = _thermo.equilibrium_constant(-37.4 * constants.kilo)
np.log10(K) | _____no_output_____ | MIT | notebooks/tutorials/2 equilibrium constants.ipynb | geem-lab/overreact-guide |
So let's check it: | scheme = core.parse_reactions("""
Cd2p + 4 MeNH2 <=> [Cd(MeNH2)4]2p
""")
scheme.compounds, scheme.reactions
dydt = simulate.get_dydt(scheme, np.array([K[0], 1.]))
y, r = simulate.get_y(dydt, y0=[0., 0., 1.])
y(y.t_max)
Kobs = y(y.t_max)[2] / (y(y.t_max)[0] * y(y.t_max)[1]**4)
np.log10(Kobs) | _____no_output_____ | MIT | notebooks/tutorials/2 equilibrium constants.ipynb | geem-lab/overreact-guide |
Binary classification from 2 features using K Nearest Neighbors (KNN)Classification using "raw" python or libraries.The binary classification is on a single boundary defined by a continuous function and added white noise | import numpy as np
from numpy import random
import matplotlib.pyplot as plt
import matplotlib.colors as pltcolors
from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier as SkKNeighborsClassifier
import pandas as pd
import seaborn as sns | _____no_output_____ | MIT | classification/ClassificationContinuous2Features-KNN.ipynb | tonio73/data-science |
ModelQuadratic function as boundary between positive and negative valuesAdding some unknown as a Gaussian noiseThe values of X are uniformly distributed and independent | # Two features, Gaussian noise
def generateBatch(N):
#
xMin = 0
xMax = 1
b = 0.1
std = 0.1
#
x = random.uniform(xMin, xMax, (N, 2))
# 4th degree relation to shape the boundary
boundary = 2*(x[:,0]**4 + (x[:,0]-0.3)**3 + b)
# Adding some gaussian noise
labels = boundary + random.normal(0, std, N) > x[:,1]
return (x, labels) | _____no_output_____ | MIT | classification/ClassificationContinuous2Features-KNN.ipynb | tonio73/data-science |
Training data | N = 2000
# x has 1 dim in R, label has 1 dim in B
xTrain, labelTrain = generateBatch(N)
colors = ['blue','red']
fig = plt.figure(figsize=(15,4))
plt.subplot(1,3,1)
plt.scatter(xTrain[:,0], xTrain[:,1], c=labelTrain, cmap=pltcolors.ListedColormap(colors), marker=',', alpha=0.1)
plt.xlabel('x0')
plt.ylabel('x1')
plt.title('Generated train data')
plt.grid()
cb = plt.colorbar()
loc = np.arange(0,1,1/float(len(colors)))
cb.set_ticks(loc)
cb.set_ticklabels([0,1])
plt.subplot(1,3,2)
plt.scatter(xTrain[:,0], labelTrain, marker=',', alpha=0.01)
plt.xlabel('x0')
plt.ylabel('label')
plt.grid()
plt.subplot(1,3,3)
plt.scatter(xTrain[:,1], labelTrain, marker=',', alpha=0.01)
plt.xlabel('x1')
plt.ylabel('label')
plt.grid()
count, bins, ignored = plt.hist(labelTrain*1.0, 10, density=True, alpha=0.5)
p = np.mean(labelTrain)
print('Bernouilli parameter of the distribution:', p) | Bernouilli parameter of the distribution: 0.506
| MIT | classification/ClassificationContinuous2Features-KNN.ipynb | tonio73/data-science |
Test data for verification of the model | xTest, labelTest = generateBatch(N)
testColors = ['navy', 'orangered'] | _____no_output_____ | MIT | classification/ClassificationContinuous2Features-KNN.ipynb | tonio73/data-science |
Helpers | def plotHeatMap(X, classes, title=None, fmt='.2g', ax=None, xlabel=None, ylabel=None):
""" Fix heatmap plot from Seaborn with pyplot 3.1.0, 3.1.1
https://stackoverflow.com/questions/56942670/matplotlib-seaborn-first-and-last-row-cut-in-half-of-heatmap-plot
"""
ax = sns.heatmap(X, xticklabels=classes, yticklabels=classes, annot=True, fmt=fmt, cmap=plt.cm.Blues, ax=ax) #notation: "annot" not "annote"
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
if title:
ax.set_title(title)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
def plotConfusionMatrix(yTrue, yEst, classes, title=None, fmt='.2g', ax=None):
plotHeatMap(metrics.confusion_matrix(yTrue, yEst), classes, title, fmt, ax, \
xlabel='Estimations', ylabel='True values'); | _____no_output_____ | MIT | classification/ClassificationContinuous2Features-KNN.ipynb | tonio73/data-science |
K Nearest Neighbors (KNN)References:- https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm- https://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/ HomemadeUsing a simple algorithm.Unweighted : each of the K neighbors has the same weight | # Select a K
k = 10
# Create a Panda dataframe in order to link x and y
df = pd.DataFrame(np.concatenate((xTrain, labelTrain.reshape(-1,1)), axis=1), columns = ('x0', 'x1', 'label'))
# Insert columns to compute the difference of current test to the train and the L2
df.insert(df.shape[1], 'diff0', 0)
df.insert(df.shape[1], 'diff1', 0)
df.insert(df.shape[1], 'L2', 0)
#
threshold = k / 2
labelEst0 = np.zeros(xTest.shape[0])
for i, x in enumerate(xTest):
# Compute distance and norm to each training sample
df['diff0'] = df['x0'] - x[0]
df['diff1'] = df['x1'] - x[1]
df['L2'] = df['diff0']**2 + df['diff1']**2
# Get the K lowest
kSmallest = df.nsmallest(k, 'L2')
# Finalize prediction based on the mean
labelEst0[i] = np.sum(kSmallest['label']) > threshold | _____no_output_____ | MIT | classification/ClassificationContinuous2Features-KNN.ipynb | tonio73/data-science |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.