markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
Imports | import os
import re
import cv2
import time
import tensorflow
import collections
import numpy as np
import pandas as pd
from tqdm import tqdm
from glob import glob
from PIL import Image
import requests, threading
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import OneCycleLR
import torch_xla
import torch_xla.utils.utils as xu
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.data_parallel as dp
import torch_xla.distributed.parallel_loader as pl
import torch_xla.distributed.xla_multiprocessing as xmp
import warnings
warnings.filterwarnings("ignore")
torch.manual_seed(42)
torch.set_default_tensor_type('torch.FloatTensor')
# do not uncomment see https://github.com/pytorch/xla/issues/1587
# xm.get_xla_supported_devices()
# xm.xrt_world_size() # 1 | _____no_output_____ | MIT | image/2. Flower Classification with TPUs/kaggle/fast-pytorch-xla-for-tpu-with-multiprocessing.ipynb | nishchalnishant/Completed_Kaggle_competitions |
Dataset | DATASET_DIR = '/kaggle/input/104-flowers-garden-of-eden/jpeg-512x512'
TRAIN_DIR = DATASET_DIR + '/train'
VAL_DIR = DATASET_DIR + '/val'
TEST_DIR = DATASET_DIR + '/test'
BATCH_SIZE = 16 # per core
NUM_EPOCH = 25
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])
train_transform = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
normalize])
valid_transform = transforms.Compose([transforms.Resize((224,224)),
transforms.ToTensor(),
normalize])
train = datasets.ImageFolder(TRAIN_DIR, transform=train_transform)
valid = datasets.ImageFolder(VAL_DIR, transform=train_transform)
train = torch.utils.data.ConcatDataset([train, valid])
# print out some data stats
print('Num training images: ', len(train))
print('Num test images: ', len(valid)) | Num training images: 16465
Num test images: 3712
| MIT | image/2. Flower Classification with TPUs/kaggle/fast-pytorch-xla-for-tpu-with-multiprocessing.ipynb | nishchalnishant/Completed_Kaggle_competitions |
Model | class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.base_model = torchvision.models.densenet201(pretrained=True)
self.base_model.classifier = nn.Identity()
self.fc = torch.nn.Sequential(
torch.nn.Linear(1920, 1024, bias = True),
torch.nn.BatchNorm1d(1024),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(0.3),
torch.nn.Linear(1024, 512, bias = True),
torch.nn.BatchNorm1d(512),
torch.nn.ReLU(inplace=True),
torch.nn.Dropout(0.3),
torch.nn.Linear(512, 104))
def forward(self, inputs):
x = self.base_model(inputs)
return self.fc(x)
model = MyModel()
print(model)
del model | Downloading: "https://download.pytorch.org/models/densenet201-c1103571.pth" to /root/.cache/torch/checkpoints/densenet201-c1103571.pth
| MIT | image/2. Flower Classification with TPUs/kaggle/fast-pytorch-xla-for-tpu-with-multiprocessing.ipynb | nishchalnishant/Completed_Kaggle_competitions |
Training | def train_model():
train = datasets.ImageFolder(TRAIN_DIR, transform=train_transform)
valid = datasets.ImageFolder(VAL_DIR, transform=train_transform)
train = torch.utils.data.ConcatDataset([train, valid])
torch.manual_seed(42)
train_sampler = torch.utils.data.distributed.DistributedSampler(
train,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=True)
train_loader = torch.utils.data.DataLoader(
train,
batch_size=BATCH_SIZE,
sampler=train_sampler,
num_workers=0,
drop_last=True) # print(len(train_loader))
xm.master_print(f"Train for {len(train_loader)} steps per epoch")
# Scale learning rate to num cores
learning_rate = 0.0001 * xm.xrt_world_size()
# Get loss function, optimizer, and model
device = xm.xla_device()
model = MyModel()
for param in model.base_model.parameters(): # freeze some layers
param.requires_grad = False
model = model.to(device)
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=5e-4)
scheduler = OneCycleLR(optimizer,
learning_rate,
div_factor=10.0,
final_div_factor=50.0,
epochs=NUM_EPOCH,
steps_per_epoch=len(train_loader))
def train_loop_fn(loader):
tracker = xm.RateTracker()
model.train()
total_samples, correct = 0, 0
for x, (data, target) in enumerate(loader):
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
xm.optimizer_step(optimizer)
tracker.add(data.shape[0])
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
total_samples += data.size()[0]
scheduler.step()
if x % 40 == 0:
print('[xla:{}]({})\tLoss={:.3f}\tRate={:.2f}\tGlobalRate={:.2f}'.format(
xm.get_ordinal(), x, loss.item(), tracker.rate(),
tracker.global_rate()), flush=True)
accuracy = 100.0 * correct / total_samples
print('[xla:{}] Accuracy={:.2f}%'.format(xm.get_ordinal(), accuracy), flush=True)
return accuracy
# Train loops
accuracy = []
for epoch in range(1, NUM_EPOCH + 1):
start = time.time()
para_loader = pl.ParallelLoader(train_loader, [device])
accuracy.append(train_loop_fn(para_loader.per_device_loader(device)))
xm.master_print("Finished training epoch {} train-acc {:.2f} in {:.2f} sec"\
.format(epoch, accuracy[-1], time.time() - start))
xm.save(model.state_dict(), "./model.pt")
# if epoch == 15: #unfreeze
# for param in model.base_model.parameters():
# param.requires_grad = True
return accuracy
# Start training processes
def _mp_fn(rank, flags):
global acc_list
torch.set_default_tensor_type('torch.FloatTensor')
a = train_model()
FLAGS={}
xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=8, start_method='fork') | Train for 128 steps per epoch
[xla:1](0) Loss=4.989 Rate=0.89 GlobalRate=0.89
[xla:3](0) Loss=4.780 Rate=0.81 GlobalRate=0.81
[xla:4](0) Loss=4.716 Rate=0.81 GlobalRate=0.81
[xla:2](0) Loss=4.723 Rate=0.69 GlobalRate=0.69
[xla:7](0) Loss=4.702 Rate=0.83 GlobalRate=0.83
[xla:0](0) Loss=4.785 Rate=0.57 GlobalRate=0.57
[xla:5](0) Loss=4.939 Rate=0.70 GlobalRate=0.70
[xla:6](0) Loss=4.991 Rate=0.73 GlobalRate=0.73
[xla:6](40) Loss=4.108 Rate=8.98 GlobalRate=9.93
[xla:2](40) Loss=3.599 Rate=8.96 GlobalRate=9.73
[xla:1](40) Loss=3.885 Rate=9.04 GlobalRate=10.55
[xla:0](40) Loss=3.802 Rate=8.91 GlobalRate=9.08
[xla:5](40) Loss=4.445 Rate=8.97 GlobalRate=9.77
[xla:3](40) Loss=4.046 Rate=9.01 GlobalRate=10.27
[xla:7](40) Loss=4.111 Rate=9.02 GlobalRate=10.33
[xla:4](40) Loss=4.295 Rate=9.01 GlobalRate=10.28
[xla:2](80) Loss=3.008 Rate=17.59 GlobalRate=13.66
[xla:7](80) Loss=3.415 Rate=17.61 GlobalRate=14.25
[xla:6](80) Loss=3.197 Rate=17.60 GlobalRate=13.86
[xla:3](80) Loss=3.940 Rate=17.61 GlobalRate=14.20
[xla:1](80) Loss=3.327 Rate=17.62 GlobalRate=14.47
[xla:5](80) Loss=2.703 Rate=17.59 GlobalRate=13.70
[xla:0](80) Loss=3.016 Rate=17.57 GlobalRate=13.00
[xla:4](80) Loss=3.871 Rate=17.61 GlobalRate=14.20
[xla:7](120) Loss=3.458 Rate=21.58 GlobalRate=16.50
[xla:5](120) Loss=3.633 Rate=21.57 GlobalRate=16.00
[xla:3](120) Loss=2.451 Rate=21.57 GlobalRate=16.45
[xla:4](120) Loss=2.573 Rate=21.58 GlobalRate=16.45
[xla:1](120) Loss=2.963 Rate=21.58 GlobalRate=16.69
[xla:2](120) Loss=2.925 Rate=21.57 GlobalRate=15.96
[xla:6](120) Loss=2.663 Rate=21.57 GlobalRate=16.14
[xla:0](120) Loss=2.882 Rate=21.56 GlobalRate=15.35
[xla:2] Accuracy=23.83%
[xla:3] Accuracy=24.27%
[xla:0] Accuracy=24.17%
[xla:1] Accuracy=24.12%
[xla:4] Accuracy=23.34%
[xla:5] Accuracy=22.66%
Finished training epoch 1 train-acc 24.17 in 128.63 sec
[xla:7] Accuracy=22.95%
[xla:6] Accuracy=23.14%
[xla:7](0) Loss=2.414 Rate=5.76 GlobalRate=5.76
[xla:0](0) Loss=2.922 Rate=5.77 GlobalRate=5.77
[xla:6](0) Loss=2.563 Rate=5.79 GlobalRate=5.79
[xla:1](0) Loss=2.955 Rate=5.79 GlobalRate=5.79
[xla:4](0) Loss=2.739 Rate=5.77 GlobalRate=5.77
[xla:3](0) Loss=2.165 Rate=5.83 GlobalRate=5.83
[xla:5](0) Loss=2.761 Rate=5.75 GlobalRate=5.75
[xla:2](0) Loss=2.325 Rate=5.76 GlobalRate=5.76
[xla:3](40) Loss=3.028 Rate=16.53 GlobalRate=22.02
[xla:0](40) Loss=2.728 Rate=16.51 GlobalRate=22.00
[xla:6](40) Loss=2.971 Rate=16.51 GlobalRate=22.00
[xla:4](40) Loss=3.325 Rate=16.51 GlobalRate=22.00
[xla:7](40) Loss=3.123 Rate=16.50 GlobalRate=21.99
[xla:2](40) Loss=2.441 Rate=16.50 GlobalRate=21.99
[xla:5](40) Loss=3.490 Rate=16.49 GlobalRate=21.99
[xla:1](40) Loss=2.592 Rate=16.51 GlobalRate=22.00
[xla:6](80) Loss=2.163 Rate=17.94 GlobalRate=20.35
[xla:1](80) Loss=2.347 Rate=17.94 GlobalRate=20.35
[xla:7](80) Loss=2.023 Rate=17.94 GlobalRate=20.35
[xla:4](80) Loss=3.082 Rate=17.94 GlobalRate=20.35
[xla:2](80) Loss=1.798 Rate=17.94 GlobalRate=20.35
[xla:3](80) Loss=2.853 Rate=17.95 GlobalRate=20.36
[xla:5](80) Loss=2.171 Rate=17.94 GlobalRate=20.35
[xla:0](80) Loss=2.048 Rate=17.94 GlobalRate=20.35
[xla:5](120) Loss=2.252 Rate=21.94 GlobalRate=21.58
[xla:1](120) Loss=2.023 Rate=21.94 GlobalRate=21.59
[xla:4](120) Loss=1.703 Rate=21.94 GlobalRate=21.58
[xla:2](120) Loss=2.222 Rate=21.94 GlobalRate=21.58
[xla:3](120) Loss=1.383 Rate=21.94 GlobalRate=21.59
[xla:6](120) Loss=1.686 Rate=21.94 GlobalRate=21.59
[xla:0](120) Loss=2.221 Rate=21.94 GlobalRate=21.58
[xla:7](120) Loss=2.730 Rate=21.94 GlobalRate=21.58
[xla:5] Accuracy=44.58%
[xla:1] Accuracy=46.00%
[xla:7] Accuracy=44.78%
[xla:6] Accuracy=45.51%
[xla:3] Accuracy=45.31%
[xla:0] Accuracy=48.19%
[xla:2] Accuracy=45.90%
Finished training epoch 2 train-acc 48.19 in 92.46 sec
[xla:4] Accuracy=46.68%
[xla:2](0) Loss=1.348 Rate=6.10 GlobalRate=6.10
[xla:5](0) Loss=1.965 Rate=6.05 GlobalRate=6.05
[xla:1](0) Loss=2.241 Rate=6.02 GlobalRate=6.02
[xla:7](0) Loss=1.871 Rate=6.05 GlobalRate=6.05
[xla:6](0) Loss=1.966 Rate=6.06 GlobalRate=6.06
[xla:0](0) Loss=2.161 Rate=6.07 GlobalRate=6.07
[xla:3](0) Loss=1.798 Rate=6.09 GlobalRate=6.09
[xla:4](0) Loss=2.093 Rate=6.01 GlobalRate=6.01
[xla:2](40) Loss=1.825 Rate=17.35 GlobalRate=23.12
[xla:0](40) Loss=1.859 Rate=17.34 GlobalRate=23.11
[xla:3](40) Loss=2.373 Rate=17.35 GlobalRate=23.12
[xla:1](40) Loss=2.128 Rate=17.32 GlobalRate=23.09
[xla:6](40) Loss=2.580 Rate=17.34 GlobalRate=23.10
[xla:7](40) Loss=2.253 Rate=17.33 GlobalRate=23.10
[xla:5](40) Loss=2.722 Rate=17.33 GlobalRate=23.10
[xla:4](40) Loss=2.187 Rate=17.32 GlobalRate=23.09
[xla:4](80) Loss=2.246 Rate=18.03 GlobalRate=20.57
[xla:7](80) Loss=1.302 Rate=18.04 GlobalRate=20.58
[xla:0](80) Loss=1.371 Rate=18.04 GlobalRate=20.58
[xla:3](80) Loss=2.093 Rate=18.04 GlobalRate=20.58
[xla:5](80) Loss=1.693 Rate=18.04 GlobalRate=20.58
[xla:1](80) Loss=1.771 Rate=18.03 GlobalRate=20.57
[xla:2](80) Loss=1.094 Rate=18.04 GlobalRate=20.58
[xla:6](80) Loss=1.144 Rate=18.04 GlobalRate=20.58
[xla:3](120) Loss=0.705 Rate=19.41 GlobalRate=20.50
[xla:0](120) Loss=1.308 Rate=19.41 GlobalRate=20.50
[xla:4](120) Loss=0.973 Rate=19.41 GlobalRate=20.49
[xla:2](120) Loss=1.732 Rate=19.42 GlobalRate=20.50
[xla:1](120) Loss=1.382 Rate=19.41 GlobalRate=20.49
[xla:5](120) Loss=1.643 Rate=19.41 GlobalRate=20.50
[xla:6](120) Loss=0.947 Rate=19.41 GlobalRate=20.50
[xla:7](120) Loss=1.808 Rate=19.41 GlobalRate=20.50
[xla:7] Accuracy=62.21%
[xla:4] Accuracy=64.16%
[xla:5] Accuracy=62.11%
[xla:2] Accuracy=63.92%
[xla:6] Accuracy=62.94%
[xla:0] Accuracy=65.14%
[xla:1] Accuracy=63.18%
[xla:3] Accuracy=62.94%
Finished training epoch 3 train-acc 65.14 in 97.13 sec
[xla:7](0) Loss=1.025 Rate=5.97 GlobalRate=5.97
[xla:4](0) Loss=1.531 Rate=5.98 GlobalRate=5.98
[xla:3](0) Loss=0.876 Rate=5.92 GlobalRate=5.92
[xla:5](0) Loss=1.339 Rate=5.86 GlobalRate=5.86
[xla:0](0) Loss=1.646 Rate=5.88 GlobalRate=5.88
[xla:6](0) Loss=1.246 Rate=5.90 GlobalRate=5.90
[xla:1](0) Loss=1.717 Rate=5.85 GlobalRate=5.85
[xla:2](0) Loss=0.940 Rate=5.79 GlobalRate=5.79
[xla:1](40) Loss=1.652 Rate=17.00 GlobalRate=22.68
[xla:3](40) Loss=1.223 Rate=17.01 GlobalRate=22.68
[xla:0](40) Loss=1.591 Rate=17.01 GlobalRate=22.69
[xla:4](40) Loss=1.548 Rate=17.04 GlobalRate=22.70
[xla:5](40) Loss=1.994 Rate=17.00 GlobalRate=22.68
[xla:6](40) Loss=1.340 Rate=17.02 GlobalRate=22.69
[xla:2](40) Loss=1.142 Rate=16.99 GlobalRate=22.67
[xla:7](40) Loss=1.014 Rate=17.03 GlobalRate=22.70
[xla:4](80) Loss=1.145 Rate=21.72 GlobalRate=23.71
[xla:7](80) Loss=0.659 Rate=21.71 GlobalRate=23.71
[xla:6](80) Loss=0.885 Rate=21.71 GlobalRate=23.70
[xla:0](80) Loss=1.229 Rate=21.71 GlobalRate=23.70
[xla:2](80) Loss=0.829 Rate=21.70 GlobalRate=23.69
[xla:1](80) Loss=1.204 Rate=21.70 GlobalRate=23.69
[xla:3](80) Loss=1.697 Rate=21.70 GlobalRate=23.69
[xla:5](80) Loss=1.018 Rate=21.70 GlobalRate=23.69
[xla:4](120) Loss=0.758 Rate=23.00 GlobalRate=23.76
[xla:7](120) Loss=1.177 Rate=23.00 GlobalRate=23.76
[xla:5](120) Loss=1.129 Rate=22.99 GlobalRate=23.75
[xla:2](120) Loss=0.745 Rate=22.99 GlobalRate=23.75
[xla:3](120) Loss=0.391 Rate=23.00 GlobalRate=23.75
[xla:1](120) Loss=1.079 Rate=22.99 GlobalRate=23.75
[xla:0](120) Loss=1.019 Rate=23.00 GlobalRate=23.75
[xla:6](120) Loss=0.516 Rate=23.00 GlobalRate=23.75
[xla:0] Accuracy=74.37%
[xla:2] Accuracy=74.51%
[xla:5] Accuracy=74.17%
[xla:1] Accuracy=74.61%
Finished training epoch 4 train-acc 74.37 in 83.59 sec
[xla:7] Accuracy=72.02%
[xla:6] Accuracy=74.80%
[xla:4] Accuracy=74.27%
[xla:3] Accuracy=73.63%
[xla:3](0) Loss=0.506 Rate=5.79 GlobalRate=5.79
[xla:0](0) Loss=0.983 Rate=5.81 GlobalRate=5.81
[xla:7](0) Loss=0.791 Rate=5.75 GlobalRate=5.75
[xla:5](0) Loss=1.441 Rate=5.75 GlobalRate=5.75
[xla:4](0) Loss=1.180 Rate=5.75 GlobalRate=5.75
[xla:6](0) Loss=1.197 Rate=5.77 GlobalRate=5.77
[xla:1](0) Loss=1.477 Rate=5.79 GlobalRate=5.79
[xla:2](0) Loss=0.864 Rate=5.77 GlobalRate=5.77
[xla:0](40) Loss=1.177 Rate=17.10 GlobalRate=22.82
[xla:7](40) Loss=0.832 Rate=17.07 GlobalRate=22.80
[xla:5](40) Loss=1.177 Rate=17.07 GlobalRate=22.80
[xla:1](40) Loss=1.128 Rate=17.09 GlobalRate=22.81
[xla:4](40) Loss=0.984 Rate=17.07 GlobalRate=22.80
[xla:6](40) Loss=0.951 Rate=17.08 GlobalRate=22.80
[xla:2](40) Loss=0.871 Rate=17.08 GlobalRate=22.80
[xla:3](40) Loss=0.729 Rate=17.09 GlobalRate=22.81
[xla:1](80) Loss=0.666 Rate=21.01 GlobalRate=23.20
[xla:6](80) Loss=0.421 Rate=21.00 GlobalRate=23.20
[xla:2](80) Loss=0.524 Rate=21.00 GlobalRate=23.20
[xla:4](80) Loss=0.667 Rate=21.00 GlobalRate=23.19
[xla:0](80) Loss=0.512 Rate=21.01 GlobalRate=23.21
[xla:7](80) Loss=0.560 Rate=21.00 GlobalRate=23.19
[xla:3](80) Loss=1.063 Rate=21.01 GlobalRate=23.20
[xla:5](80) Loss=0.707 Rate=21.00 GlobalRate=23.19
[xla:6](120) Loss=0.372 Rate=23.76 GlobalRate=23.94
[xla:2](120) Loss=0.595 Rate=23.76 GlobalRate=23.94
[xla:4](120) Loss=0.741 Rate=23.75 GlobalRate=23.93
[xla:1](120) Loss=0.763 Rate=23.76 GlobalRate=23.94
[xla:0](120) Loss=0.669 Rate=23.75 GlobalRate=23.94
[xla:7](120) Loss=0.929 Rate=23.75 GlobalRate=23.93
[xla:5](120) Loss=0.684 Rate=23.75 GlobalRate=23.93
[xla:3](120) Loss=0.457 Rate=23.75 GlobalRate=23.94
[xla:0] Accuracy=80.27%
[xla:6] Accuracy=82.57%
[xla:3] Accuracy=79.88%
[xla:1] Accuracy=80.81%
Finished training epoch 5 train-acc 80.27 in 83.07 sec
[xla:5] Accuracy=79.83%
[xla:2] Accuracy=82.52%
[xla:7] Accuracy=80.13%
[xla:4] Accuracy=80.66%
[xla:5](0) Loss=0.858 Rate=5.81 GlobalRate=5.81
[xla:4](0) Loss=0.991 Rate=5.77 GlobalRate=5.77
[xla:2](0) Loss=0.447 Rate=5.83 GlobalRate=5.83
[xla:1](0) Loss=0.617 Rate=5.78 GlobalRate=5.78
[xla:7](0) Loss=0.436 Rate=5.84 GlobalRate=5.84
[xla:6](0) Loss=0.554 Rate=5.79 GlobalRate=5.79
[xla:3](0) Loss=0.664 Rate=5.78 GlobalRate=5.78
[xla:0](0) Loss=1.198 Rate=5.83 GlobalRate=5.83
[xla:5](40) Loss=1.213 Rate=16.77 GlobalRate=22.36
[xla:7](40) Loss=0.537 Rate=16.78 GlobalRate=22.37
[xla:0](40) Loss=0.681 Rate=16.78 GlobalRate=22.37
[xla:1](40) Loss=0.809 Rate=16.76 GlobalRate=22.35
[xla:3](40) Loss=0.404 Rate=16.76 GlobalRate=22.35
[xla:4](40) Loss=0.570 Rate=16.75 GlobalRate=22.35
[xla:6](40) Loss=0.691 Rate=16.76 GlobalRate=22.35
[xla:2](40) Loss=0.829 Rate=16.78 GlobalRate=22.37
[xla:2](80) Loss=0.537 Rate=21.62 GlobalRate=23.53
[xla:7](80) Loss=0.295 Rate=21.62 GlobalRate=23.53
[xla:0](80) Loss=0.694 Rate=21.62 GlobalRate=23.53
[xla:5](80) Loss=0.900 Rate=21.62 GlobalRate=23.52
[xla:1](80) Loss=0.749 Rate=21.61 GlobalRate=23.52
[xla:6](80) Loss=0.401 Rate=21.62 GlobalRate=23.52
[xla:4](80) Loss=0.697 Rate=21.61 GlobalRate=23.52
[xla:3](80) Loss=1.074 Rate=21.61 GlobalRate=23.52
[xla:0](120) Loss=1.010 Rate=23.28 GlobalRate=23.80
[xla:2](120) Loss=0.535 Rate=23.28 GlobalRate=23.80
[xla:5](120) Loss=0.798 Rate=23.27 GlobalRate=23.80
[xla:3](120) Loss=0.266 Rate=23.27 GlobalRate=23.80
[xla:6](120) Loss=0.806 Rate=23.27 GlobalRate=23.80
[xla:1](120) Loss=0.530 Rate=23.27 GlobalRate=23.80
[xla:4](120) Loss=0.620 Rate=23.27 GlobalRate=23.80
[xla:7](120) Loss=1.000 Rate=23.28 GlobalRate=23.81
[xla:3] Accuracy=84.67%
[xla:5] Accuracy=84.23%
[xla:4] Accuracy=84.57%
[xla:2] Accuracy=85.74%
[xla:6] Accuracy=85.25%
[xla:7] Accuracy=83.84%
[xla:0] Accuracy=84.96%
[xla:1] Accuracy=85.40%
Finished training epoch 6 train-acc 84.96 in 83.84 sec
[xla:3](0) Loss=0.341 Rate=5.55 GlobalRate=5.55
[xla:6](0) Loss=0.545 Rate=5.53 GlobalRate=5.53
[xla:5](0) Loss=0.869 Rate=5.57 GlobalRate=5.57
[xla:1](0) Loss=1.031 Rate=5.58 GlobalRate=5.58
[xla:7](0) Loss=0.610 Rate=5.57 GlobalRate=5.57
[xla:0](0) Loss=0.322 Rate=5.56 GlobalRate=5.56
[xla:4](0) Loss=0.806 Rate=5.58 GlobalRate=5.58
[xla:2](0) Loss=0.249 Rate=5.55 GlobalRate=5.55
[xla:0](40) Loss=0.440 Rate=17.17 GlobalRate=22.97
[xla:2](40) Loss=0.619 Rate=17.17 GlobalRate=22.96
[xla:6](40) Loss=0.728 Rate=17.16 GlobalRate=22.95
[xla:7](40) Loss=0.419 Rate=17.18 GlobalRate=22.97
[xla:1](40) Loss=0.726 Rate=17.18 GlobalRate=22.98
[xla:3](40) Loss=0.267 Rate=17.17 GlobalRate=22.96
[xla:5](40) Loss=0.845 Rate=17.18 GlobalRate=22.97
[xla:4](40) Loss=1.157 Rate=17.18 GlobalRate=22.97
[xla:2](80) Loss=0.830 Rate=21.63 GlobalRate=23.75
[xla:5](80) Loss=0.778 Rate=21.64 GlobalRate=23.75
[xla:0](80) Loss=0.318 Rate=21.64 GlobalRate=23.75
[xla:1](80) Loss=0.354 Rate=21.64 GlobalRate=23.75
[xla:6](80) Loss=0.075 Rate=21.63 GlobalRate=23.74
[xla:4](80) Loss=0.335 Rate=21.64 GlobalRate=23.75
[xla:3](80) Loss=1.132 Rate=21.63 GlobalRate=23.75
[xla:7](80) Loss=0.797 Rate=21.64 GlobalRate=23.75
[xla:2](120) Loss=0.302 Rate=23.44 GlobalRate=24.04
[xla:7](120) Loss=1.040 Rate=23.45 GlobalRate=24.04
[xla:3](120) Loss=0.175 Rate=23.44 GlobalRate=24.04
[xla:4](120) Loss=0.538 Rate=23.44 GlobalRate=24.04
[xla:0](120) Loss=0.725 Rate=23.44 GlobalRate=24.04
[xla:6](120) Loss=0.337 Rate=23.44 GlobalRate=24.04
[xla:5](120) Loss=0.414 Rate=23.45 GlobalRate=24.04
[xla:1](120) Loss=0.748 Rate=23.45 GlobalRate=24.04
[xla:6] Accuracy=85.89%
[xla:5] Accuracy=86.23%
[xla:4] Accuracy=86.62%
[xla:2] Accuracy=88.28%
[xla:0] Accuracy=87.45%
Finished training epoch 7 train-acc 87.45 in 83.10 sec
[xla:1] Accuracy=87.40%
[xla:7] Accuracy=87.30%
[xla:3] Accuracy=86.23%
[xla:1](0) Loss=0.519 Rate=5.73 GlobalRate=5.73
[xla:3](0) Loss=0.238 Rate=5.72 GlobalRate=5.72
[xla:7](0) Loss=0.480 Rate=5.75 GlobalRate=5.75
[xla:0](0) Loss=0.296 Rate=5.80 GlobalRate=5.80
[xla:5](0) Loss=0.807 Rate=5.73 GlobalRate=5.73
[xla:4](0) Loss=0.178 Rate=5.76 GlobalRate=5.76
[xla:2](0) Loss=0.618 Rate=5.79 GlobalRate=5.79
[xla:6](0) Loss=0.217 Rate=5.76 GlobalRate=5.76
[xla:5](40) Loss=1.007 Rate=16.91 GlobalRate=22.57
[xla:2](40) Loss=0.463 Rate=16.93 GlobalRate=22.60
[xla:6](40) Loss=0.672 Rate=16.92 GlobalRate=22.58
[xla:1](40) Loss=0.281 Rate=16.91 GlobalRate=22.57
[xla:7](40) Loss=0.522 Rate=16.92 GlobalRate=22.58
[xla:0](40) Loss=0.279 Rate=16.94 GlobalRate=22.60
[xla:3](40) Loss=0.162 Rate=16.91 GlobalRate=22.57
[xla:4](40) Loss=0.784 Rate=16.92 GlobalRate=22.58
[xla:7](80) Loss=0.546 Rate=21.57 GlobalRate=23.57
[xla:4](80) Loss=0.423 Rate=21.57 GlobalRate=23.57
[xla:1](80) Loss=0.242 Rate=21.56 GlobalRate=23.56
[xla:2](80) Loss=0.233 Rate=21.58 GlobalRate=23.58
[xla:3](80) Loss=0.664 Rate=21.56 GlobalRate=23.56
[xla:6](80) Loss=0.424 Rate=21.57 GlobalRate=23.57
[xla:5](80) Loss=0.149 Rate=21.56 GlobalRate=23.56
[xla:0](80) Loss=0.400 Rate=21.57 GlobalRate=23.57
[xla:5](120) Loss=0.466 Rate=23.62 GlobalRate=24.01
[xla:4](120) Loss=0.328 Rate=23.62 GlobalRate=24.02
[xla:2](120) Loss=0.298 Rate=23.62 GlobalRate=24.02
[xla:7](120) Loss=0.312 Rate=23.62 GlobalRate=24.02
[xla:0](120) Loss=0.304 Rate=23.63 GlobalRate=24.03
[xla:1](120) Loss=0.494 Rate=23.62 GlobalRate=24.02
[xla:6](120) Loss=0.201 Rate=23.62 GlobalRate=24.02
[xla:3](120) Loss=0.078 Rate=23.62 GlobalRate=24.01
[xla:3] Accuracy=89.40%
[xla:1] Accuracy=89.79%
[xla:6] Accuracy=88.87%
[xla:2] Accuracy=90.28%
[xla:0] Accuracy=90.82%
[xla:7] Accuracy=89.16%
[xla:4] Accuracy=89.60%
[xla:5] Accuracy=88.23%
Finished training epoch 8 train-acc 90.82 in 82.89 sec
[xla:3](0) Loss=0.159 Rate=5.55 GlobalRate=5.55
[xla:7](0) Loss=0.407 Rate=5.50 GlobalRate=5.50
[xla:1](0) Loss=0.461 Rate=5.55 GlobalRate=5.55
[xla:2](0) Loss=0.457 Rate=5.50 GlobalRate=5.50
[xla:4](0) Loss=0.412 Rate=5.47 GlobalRate=5.47
[xla:0](0) Loss=0.488 Rate=5.51 GlobalRate=5.51
[xla:5](0) Loss=0.510 Rate=5.49 GlobalRate=5.49
[xla:6](0) Loss=0.302 Rate=5.50 GlobalRate=5.50
[xla:7](40) Loss=0.120 Rate=16.97 GlobalRate=22.69
[xla:3](40) Loss=0.310 Rate=16.99 GlobalRate=22.71
[xla:0](40) Loss=0.647 Rate=16.98 GlobalRate=22.70
[xla:4](40) Loss=0.127 Rate=16.96 GlobalRate=22.68
[xla:6](40) Loss=0.535 Rate=16.96 GlobalRate=22.68
[xla:1](40) Loss=0.606 Rate=16.99 GlobalRate=22.71
[xla:2](40) Loss=0.281 Rate=16.97 GlobalRate=22.69
[xla:5](40) Loss=0.367 Rate=16.96 GlobalRate=22.69
[xla:1](80) Loss=0.680 Rate=21.49 GlobalRate=23.56
[xla:6](80) Loss=0.238 Rate=21.48 GlobalRate=23.54
[xla:7](80) Loss=0.148 Rate=21.48 GlobalRate=23.55
[xla:3](80) Loss=0.212 Rate=21.49 GlobalRate=23.56
[xla:2](80) Loss=0.351 Rate=21.48 GlobalRate=23.55
[xla:0](80) Loss=0.211 Rate=21.48 GlobalRate=23.55
[xla:4](80) Loss=0.465 Rate=21.48 GlobalRate=23.54
[xla:5](80) Loss=0.449 Rate=21.48 GlobalRate=23.54
[xla:0](120) Loss=0.383 Rate=23.29 GlobalRate=23.85
[xla:3](120) Loss=0.211 Rate=23.29 GlobalRate=23.86
[xla:6](120) Loss=0.647 Rate=23.28 GlobalRate=23.85
[xla:1](120) Loss=0.446 Rate=23.28 GlobalRate=23.86
[xla:5](120) Loss=0.253 Rate=23.28 GlobalRate=23.84
[xla:4](120) Loss=0.215 Rate=23.28 GlobalRate=23.84
[xla:2](120) Loss=0.328 Rate=23.28 GlobalRate=23.85
[xla:7](120) Loss=0.733 Rate=23.28 GlobalRate=23.84
[xla:1] Accuracy=91.02%
[xla:0] Accuracy=91.55%
[xla:6] Accuracy=90.62%
[xla:3] Accuracy=92.19%
[xla:7] Accuracy=91.41%
[xla:4] Accuracy=91.50%
Finished training epoch 9 train-acc 91.55 in 83.32 sec
[xla:2] Accuracy=91.36%
[xla:5] Accuracy=91.70%
[xla:0](0) Loss=0.272 Rate=5.82 GlobalRate=5.82
[xla:2](0) Loss=0.269 Rate=5.78 GlobalRate=5.78
[xla:7](0) Loss=0.213 Rate=5.77 GlobalRate=5.77
[xla:6](0) Loss=0.270 Rate=5.76 GlobalRate=5.76
[xla:3](0) Loss=0.460 Rate=5.73 GlobalRate=5.73
[xla:1](0) Loss=0.413 Rate=5.73 GlobalRate=5.73
[xla:4](0) Loss=0.232 Rate=5.78 GlobalRate=5.78
[xla:5](0) Loss=0.237 Rate=5.73 GlobalRate=5.73
[xla:5](40) Loss=0.376 Rate=17.18 GlobalRate=22.95
[xla:3](40) Loss=0.231 Rate=17.18 GlobalRate=22.95
[xla:1](40) Loss=0.265 Rate=17.18 GlobalRate=22.95
[xla:7](40) Loss=0.261 Rate=17.20 GlobalRate=22.96
[xla:6](40) Loss=0.575 Rate=17.19 GlobalRate=22.96
[xla:4](40) Loss=0.415 Rate=17.20 GlobalRate=22.97
[xla:0](40) Loss=0.520 Rate=17.22 GlobalRate=22.99
[xla:2](40) Loss=0.312 Rate=17.20 GlobalRate=22.97
[xla:4](80) Loss=0.384 Rate=21.32 GlobalRate=23.50
[xla:5](80) Loss=0.445 Rate=21.32 GlobalRate=23.49
[xla:1](80) Loss=0.346 Rate=21.31 GlobalRate=23.49
[xla:3](80) Loss=0.276 Rate=21.32 GlobalRate=23.49
[xla:2](80) Loss=0.479 Rate=21.32 GlobalRate=23.50
[xla:0](80) Loss=0.229 Rate=21.33 GlobalRate=23.51
[xla:6](80) Loss=0.373 Rate=21.32 GlobalRate=23.50
[xla:7](80) Loss=0.154 Rate=21.32 GlobalRate=23.50
[xla:1](120) Loss=0.319 Rate=23.64 GlobalRate=24.02
[xla:3](120) Loss=0.182 Rate=23.64 GlobalRate=24.02
[xla:2](120) Loss=0.190 Rate=23.64 GlobalRate=24.03
[xla:7](120) Loss=0.476 Rate=23.64 GlobalRate=24.03
[xla:4](120) Loss=0.161 Rate=23.64 GlobalRate=24.03
[xla:0](120) Loss=0.392 Rate=23.64 GlobalRate=24.04
[xla:6](120) Loss=0.040 Rate=23.63 GlobalRate=24.03
[xla:5](120) Loss=0.532 Rate=23.63 GlobalRate=24.02
[xla:0] Accuracy=93.46%
Finished training epoch 10 train-acc 93.46 in 83.27 sec
[xla:5] Accuracy=91.55%
[xla:1] Accuracy=93.21%
[xla:3] Accuracy=92.43%
[xla:7] Accuracy=92.72%
[xla:4] Accuracy=91.94%
[xla:6] Accuracy=92.29%
[xla:2] Accuracy=93.41%
[xla:5](0) Loss=0.165 Rate=5.42 GlobalRate=5.42
[xla:1](0) Loss=0.285 Rate=5.39 GlobalRate=5.39
[xla:7](0) Loss=0.352 Rate=5.42 GlobalRate=5.42
[xla:3](0) Loss=0.137 Rate=5.41 GlobalRate=5.41
[xla:6](0) Loss=0.152 Rate=5.39 GlobalRate=5.39
[xla:2](0) Loss=0.672 Rate=5.38 GlobalRate=5.38
[xla:0](0) Loss=0.170 Rate=5.44 GlobalRate=5.44
[xla:4](0) Loss=0.203 Rate=5.38 GlobalRate=5.38
[xla:4](40) Loss=0.222 Rate=16.66 GlobalRate=22.29
[xla:3](40) Loss=0.124 Rate=16.68 GlobalRate=22.30
[xla:6](40) Loss=0.514 Rate=16.67 GlobalRate=22.29
[xla:5](40) Loss=0.522 Rate=16.68 GlobalRate=22.30
[xla:2](40) Loss=0.198 Rate=16.66 GlobalRate=22.29
[xla:7](40) Loss=0.150 Rate=16.68 GlobalRate=22.30
[xla:0](40) Loss=0.242 Rate=16.69 GlobalRate=22.31
[xla:1](40) Loss=0.296 Rate=16.66 GlobalRate=22.29
[xla:5](80) Loss=0.168 Rate=21.45 GlobalRate=23.39
[xla:4](80) Loss=0.244 Rate=21.44 GlobalRate=23.38
[xla:6](80) Loss=0.265 Rate=21.44 GlobalRate=23.39
[xla:1](80) Loss=0.130 Rate=21.44 GlobalRate=23.38
[xla:0](80) Loss=0.122 Rate=21.45 GlobalRate=23.40
[xla:2](80) Loss=0.102 Rate=21.44 GlobalRate=23.38
[xla:7](80) Loss=0.088 Rate=21.45 GlobalRate=23.39
[xla:3](80) Loss=0.719 Rate=21.44 GlobalRate=23.39
[xla:5](120) Loss=0.888 Rate=23.52 GlobalRate=23.87
[xla:1](120) Loss=0.283 Rate=23.52 GlobalRate=23.87
[xla:7](120) Loss=0.422 Rate=23.52 GlobalRate=23.87
[xla:4](120) Loss=0.406 Rate=23.52 GlobalRate=23.87
[xla:2](120) Loss=0.657 Rate=23.52 GlobalRate=23.87
[xla:6](120) Loss=0.396 Rate=23.52 GlobalRate=23.87
[xla:0](120) Loss=0.082 Rate=23.52 GlobalRate=23.87
[xla:3](120) Loss=0.254 Rate=23.52 GlobalRate=23.87
[xla:6] Accuracy=93.75%
[xla:2] Accuracy=93.41%
[xla:0] Accuracy=93.70%
[xla:1] Accuracy=92.92%
[xla:7] Accuracy=92.63%
[xla:3] Accuracy=92.77%
[xla:5] Accuracy=93.41%
[xla:4] Accuracy=93.60%
Finished training epoch 11 train-acc 93.70 in 83.24 sec
[xla:2](0) Loss=0.042 Rate=5.97 GlobalRate=5.97
[xla:6](0) Loss=0.213 Rate=5.96 GlobalRate=5.96
[xla:0](0) Loss=0.338 Rate=5.91 GlobalRate=5.91
[xla:7](0) Loss=0.230 Rate=5.95 GlobalRate=5.95
[xla:4](0) Loss=0.089 Rate=5.91 GlobalRate=5.91
[xla:5](0) Loss=0.827 Rate=5.89 GlobalRate=5.89
[xla:3](0) Loss=0.143 Rate=5.90 GlobalRate=5.90
[xla:1](0) Loss=0.449 Rate=5.88 GlobalRate=5.88
[xla:0](40) Loss=0.683 Rate=17.12 GlobalRate=22.84
[xla:4](40) Loss=0.071 Rate=17.13 GlobalRate=22.84
[xla:3](40) Loss=0.630 Rate=17.12 GlobalRate=22.83
[xla:1](40) Loss=0.419 Rate=17.11 GlobalRate=22.83
[xla:6](40) Loss=0.439 Rate=17.14 GlobalRate=22.85
[xla:5](40) Loss=0.272 Rate=17.12 GlobalRate=22.83
[xla:2](40) Loss=0.203 Rate=17.15 GlobalRate=22.86
[xla:7](40) Loss=0.171 Rate=17.14 GlobalRate=22.85
[xla:4](80) Loss=0.119 Rate=21.45 GlobalRate=23.55
[xla:2](80) Loss=0.497 Rate=21.46 GlobalRate=23.56
[xla:0](80) Loss=0.122 Rate=21.45 GlobalRate=23.55
[xla:5](80) Loss=0.287 Rate=21.44 GlobalRate=23.55
[xla:1](80) Loss=0.269 Rate=21.44 GlobalRate=23.54
[xla:7](80) Loss=0.197 Rate=21.45 GlobalRate=23.56
[xla:6](80) Loss=0.076 Rate=21.45 GlobalRate=23.56
[xla:3](80) Loss=0.349 Rate=21.44 GlobalRate=23.54
[xla:1](120) Loss=0.359 Rate=23.48 GlobalRate=23.96
[xla:5](120) Loss=0.278 Rate=23.48 GlobalRate=23.96
[xla:6](120) Loss=0.156 Rate=23.49 GlobalRate=23.97
[xla:7](120) Loss=0.143 Rate=23.49 GlobalRate=23.97
[xla:4](120) Loss=0.218 Rate=23.49 GlobalRate=23.96
[xla:0](120) Loss=0.071 Rate=23.48 GlobalRate=23.96
[xla:3](120) Loss=0.410 Rate=23.49 GlobalRate=23.96
[xla:2](120) Loss=0.018 Rate=23.49 GlobalRate=23.97
[xla:7] Accuracy=93.95%
[xla:3] Accuracy=94.19%
[xla:4] Accuracy=94.29%
[xla:0] Accuracy=93.85%
[xla:2] Accuracy=94.14%
[xla:1] Accuracy=93.31%
Finished training epoch 12 train-acc 93.85 in 83.11 sec
[xla:5] Accuracy=93.31%
[xla:6] Accuracy=92.92%
[xla:5](0) Loss=0.250 Rate=5.57 GlobalRate=5.57
[xla:4](0) Loss=0.235 Rate=5.53 GlobalRate=5.53
[xla:2](0) Loss=0.120 Rate=5.55 GlobalRate=5.55
[xla:7](0) Loss=0.183 Rate=5.53 GlobalRate=5.53
[xla:6](0) Loss=0.200 Rate=5.57 GlobalRate=5.57
[xla:0](0) Loss=0.160 Rate=5.61 GlobalRate=5.61
[xla:3](0) Loss=0.348 Rate=5.55 GlobalRate=5.55
[xla:1](0) Loss=0.483 Rate=5.52 GlobalRate=5.52
[xla:2](40) Loss=0.207 Rate=16.49 GlobalRate=22.01
[xla:6](40) Loss=0.342 Rate=16.49 GlobalRate=22.02
[xla:3](40) Loss=0.255 Rate=16.49 GlobalRate=22.02
[xla:1](40) Loss=0.226 Rate=16.48 GlobalRate=22.01
[xla:5](40) Loss=0.309 Rate=16.49 GlobalRate=22.02
[xla:4](40) Loss=0.083 Rate=16.48 GlobalRate=22.01
[xla:7](40) Loss=0.148 Rate=16.48 GlobalRate=22.01
[xla:0](40) Loss=0.223 Rate=16.51 GlobalRate=22.03
[xla:4](80) Loss=0.249 Rate=21.22 GlobalRate=23.12
[xla:7](80) Loss=0.050 Rate=21.22 GlobalRate=23.12
[xla:5](80) Loss=0.087 Rate=21.23 GlobalRate=23.13
[xla:6](80) Loss=0.070 Rate=21.23 GlobalRate=23.13
[xla:2](80) Loss=0.272 Rate=21.22 GlobalRate=23.12
[xla:0](80) Loss=0.145 Rate=21.23 GlobalRate=23.13
[xla:1](80) Loss=0.093 Rate=21.22 GlobalRate=23.12
[xla:3](80) Loss=0.148 Rate=21.22 GlobalRate=23.12
[xla:2](120) Loss=0.137 Rate=23.55 GlobalRate=23.74
[xla:0](120) Loss=0.253 Rate=23.55 GlobalRate=23.75
[xla:1](120) Loss=0.188 Rate=23.55 GlobalRate=23.74
[xla:6](120) Loss=0.065 Rate=23.55 GlobalRate=23.74
[xla:3](120) Loss=0.023 Rate=23.55 GlobalRate=23.74
[xla:4](120) Loss=0.632 Rate=23.54 GlobalRate=23.74
[xla:7](120) Loss=0.314 Rate=23.54 GlobalRate=23.74
[xla:5](120) Loss=0.125 Rate=23.55 GlobalRate=23.74
[xla:5] Accuracy=95.21%
[xla:2] Accuracy=93.60%
[xla:3] Accuracy=93.75%
[xla:6] Accuracy=94.24%
[xla:4] Accuracy=95.17%
[xla:0] Accuracy=93.26%
[xla:7] Accuracy=95.17%
Finished training epoch 13 train-acc 93.26 in 83.70 sec
[xla:1] Accuracy=94.53%
[xla:1](0) Loss=0.371 Rate=5.52 GlobalRate=5.52
[xla:5](0) Loss=0.302 Rate=5.47 GlobalRate=5.47
[xla:3](0) Loss=0.199 Rate=5.48 GlobalRate=5.48
[xla:6](0) Loss=0.165 Rate=5.50 GlobalRate=5.50
[xla:4](0) Loss=0.242 Rate=5.50 GlobalRate=5.50
[xla:0](0) Loss=0.098 Rate=5.52 GlobalRate=5.52
[xla:2](0) Loss=0.199 Rate=5.55 GlobalRate=5.55
[xla:7](0) Loss=0.056 Rate=5.49 GlobalRate=5.49
[xla:3](40) Loss=0.180 Rate=16.87 GlobalRate=22.56
[xla:4](40) Loss=0.161 Rate=16.89 GlobalRate=22.58
[xla:7](40) Loss=0.448 Rate=16.88 GlobalRate=22.57
[xla:2](40) Loss=0.047 Rate=16.90 GlobalRate=22.59
[xla:5](40) Loss=0.587 Rate=16.87 GlobalRate=22.56
[xla:6](40) Loss=0.223 Rate=16.88 GlobalRate=22.57
[xla:0](40) Loss=0.186 Rate=16.89 GlobalRate=22.58
[xla:1](40) Loss=0.402 Rate=16.89 GlobalRate=22.58
[xla:7](80) Loss=0.055 Rate=21.67 GlobalRate=23.64
[xla:3](80) Loss=0.702 Rate=21.67 GlobalRate=23.64
[xla:6](80) Loss=0.105 Rate=21.67 GlobalRate=23.65
[xla:0](80) Loss=0.182 Rate=21.67 GlobalRate=23.65
[xla:2](80) Loss=0.502 Rate=21.68 GlobalRate=23.66
[xla:1](80) Loss=0.177 Rate=21.67 GlobalRate=23.65
[xla:4](80) Loss=0.034 Rate=21.67 GlobalRate=23.65
[xla:5](80) Loss=0.341 Rate=21.66 GlobalRate=23.64
[xla:4](120) Loss=0.298 Rate=23.22 GlobalRate=23.84
[xla:0](120) Loss=0.206 Rate=23.22 GlobalRate=23.85
[xla:5](120) Loss=0.231 Rate=23.22 GlobalRate=23.84
[xla:1](120) Loss=0.089 Rate=23.22 GlobalRate=23.85
[xla:3](120) Loss=0.509 Rate=23.22 GlobalRate=23.84
[xla:6](120) Loss=0.256 Rate=23.22 GlobalRate=23.84
[xla:2](120) Loss=0.299 Rate=23.22 GlobalRate=23.85
[xla:7](120) Loss=0.371 Rate=23.22 GlobalRate=23.84
[xla:2] Accuracy=95.75%
[xla:5] Accuracy=94.38%
[xla:7] Accuracy=95.65%
[xla:1] Accuracy=94.97%
[xla:0] Accuracy=95.17%
[xla:3] Accuracy=94.53%
[xla:4] Accuracy=95.17%
Finished training epoch 14 train-acc 95.17 in 83.63 sec
[xla:6] Accuracy=95.26%
[xla:4](0) Loss=0.085 Rate=5.51 GlobalRate=5.51
[xla:1](0) Loss=0.050 Rate=5.48 GlobalRate=5.48
[xla:0](0) Loss=0.109 Rate=5.55 GlobalRate=5.55
[xla:2](0) Loss=0.422 Rate=5.50 GlobalRate=5.50
[xla:5](0) Loss=0.232 Rate=5.48 GlobalRate=5.48
[xla:6](0) Loss=0.117 Rate=5.47 GlobalRate=5.47
[xla:7](0) Loss=0.047 Rate=5.49 GlobalRate=5.49
[xla:3](0) Loss=0.021 Rate=5.50 GlobalRate=5.50
[xla:2](40) Loss=0.157 Rate=16.96 GlobalRate=22.68
[xla:1](40) Loss=0.784 Rate=16.95 GlobalRate=22.67
[xla:6](40) Loss=0.197 Rate=16.95 GlobalRate=22.66
[xla:3](40) Loss=0.172 Rate=16.96 GlobalRate=22.68
[xla:4](40) Loss=0.180 Rate=16.96 GlobalRate=22.68
[xla:0](40) Loss=0.257 Rate=16.98 GlobalRate=22.70
[xla:7](40) Loss=0.029 Rate=16.95 GlobalRate=22.67
[xla:5](40) Loss=0.133 Rate=16.94 GlobalRate=22.66
[xla:6](80) Loss=0.169 Rate=21.12 GlobalRate=23.26
[xla:3](80) Loss=0.149 Rate=21.12 GlobalRate=23.26
[xla:5](80) Loss=0.164 Rate=21.12 GlobalRate=23.26
[xla:0](80) Loss=0.106 Rate=21.13 GlobalRate=23.28
[xla:2](80) Loss=0.060 Rate=21.12 GlobalRate=23.26
[xla:7](80) Loss=0.103 Rate=21.12 GlobalRate=23.26
[xla:4](80) Loss=0.025 Rate=21.12 GlobalRate=23.27
[xla:1](80) Loss=0.269 Rate=21.12 GlobalRate=23.26
[xla:7](120) Loss=0.256 Rate=23.52 GlobalRate=23.85
[xla:4](120) Loss=0.021 Rate=23.52 GlobalRate=23.85
[xla:0](120) Loss=0.266 Rate=23.52 GlobalRate=23.85
[xla:5](120) Loss=0.136 Rate=23.52 GlobalRate=23.84
[xla:1](120) Loss=0.372 Rate=23.52 GlobalRate=23.84
[xla:2](120) Loss=0.401 Rate=23.52 GlobalRate=23.85
[xla:3](120) Loss=0.020 Rate=23.52 GlobalRate=23.85
[xla:6](120) Loss=0.237 Rate=23.52 GlobalRate=23.84
[xla:3] Accuracy=95.56%
[xla:6] Accuracy=95.31%
[xla:7] Accuracy=94.97%
[xla:4] Accuracy=96.04%
[xla:5] Accuracy=96.00%
[xla:0] Accuracy=96.14%
[xla:2] Accuracy=95.26%
[xla:1] Accuracy=94.87%
Finished training epoch 15 train-acc 96.14 in 83.26 sec
[xla:7](0) Loss=0.048 Rate=5.68 GlobalRate=5.68
[xla:0](0) Loss=0.341 Rate=5.74 GlobalRate=5.74
[xla:5](0) Loss=0.374 Rate=5.72 GlobalRate=5.72
[xla:1](0) Loss=0.201 Rate=5.66 GlobalRate=5.66
[xla:3](0) Loss=0.069 Rate=5.67 GlobalRate=5.67
[xla:6](0) Loss=0.181 Rate=5.66 GlobalRate=5.66
[xla:4](0) Loss=0.106 Rate=5.65 GlobalRate=5.65
[xla:2](0) Loss=0.047 Rate=5.68 GlobalRate=5.68
[xla:0](40) Loss=0.045 Rate=16.68 GlobalRate=22.24
[xla:7](40) Loss=0.033 Rate=16.65 GlobalRate=22.22
[xla:1](40) Loss=0.084 Rate=16.64 GlobalRate=22.21
[xla:5](40) Loss=0.116 Rate=16.67 GlobalRate=22.24
[xla:2](40) Loss=0.107 Rate=16.65 GlobalRate=22.23
[xla:3](40) Loss=0.181 Rate=16.65 GlobalRate=22.22
[xla:4](40) Loss=0.109 Rate=16.64 GlobalRate=22.21
[xla:6](40) Loss=0.036 Rate=16.64 GlobalRate=22.21
[xla:7](80) Loss=0.086 Rate=21.61 GlobalRate=23.47
[xla:5](80) Loss=0.081 Rate=21.61 GlobalRate=23.48
[xla:0](80) Loss=0.087 Rate=21.61 GlobalRate=23.48
[xla:3](80) Loss=0.203 Rate=21.61 GlobalRate=23.47
[xla:2](80) Loss=0.114 Rate=21.61 GlobalRate=23.48
[xla:4](80) Loss=0.097 Rate=21.60 GlobalRate=23.47
[xla:6](80) Loss=0.378 Rate=21.60 GlobalRate=23.47
[xla:1](80) Loss=0.128 Rate=21.60 GlobalRate=23.47
[xla:0](120) Loss=0.036 Rate=23.31 GlobalRate=23.79
[xla:6](120) Loss=0.152 Rate=23.30 GlobalRate=23.78
[xla:4](120) Loss=0.024 Rate=23.30 GlobalRate=23.78
[xla:5](120) Loss=0.149 Rate=23.31 GlobalRate=23.79
[xla:3](120) Loss=0.224 Rate=23.30 GlobalRate=23.78
[xla:1](120) Loss=0.048 Rate=23.30 GlobalRate=23.78
[xla:2](120) Loss=0.096 Rate=23.30 GlobalRate=23.78
[xla:7](120) Loss=0.676 Rate=23.30 GlobalRate=23.78
[xla:4] Accuracy=96.48%
[xla:3] Accuracy=95.61%
[xla:1] Accuracy=96.58%
[xla:0] Accuracy=95.95%
[xla:5] Accuracy=95.70%
[xla:7] Accuracy=94.97%
[xla:6] Accuracy=95.65%
Finished training epoch 16 train-acc 95.95 in 83.74 sec
[xla:2] Accuracy=95.90%
[xla:3](0) Loss=0.049 Rate=5.91 GlobalRate=5.91
[xla:6](0) Loss=0.132 Rate=5.94 GlobalRate=5.94
[xla:4](0) Loss=0.109 Rate=5.96 GlobalRate=5.96
[xla:7](0) Loss=0.157 Rate=5.89 GlobalRate=5.89
[xla:0](0) Loss=0.081 Rate=5.96 GlobalRate=5.96
[xla:1](0) Loss=0.281 Rate=5.90 GlobalRate=5.90
[xla:5](0) Loss=0.037 Rate=5.91 GlobalRate=5.91
[xla:2](0) Loss=0.023 Rate=5.95 GlobalRate=5.95
[xla:1](40) Loss=0.159 Rate=17.39 GlobalRate=23.21
[xla:2](40) Loss=0.115 Rate=17.41 GlobalRate=23.23
[xla:6](40) Loss=0.113 Rate=17.40 GlobalRate=23.22
[xla:3](40) Loss=0.017 Rate=17.39 GlobalRate=23.21
[xla:5](40) Loss=0.186 Rate=17.39 GlobalRate=23.21
[xla:4](40) Loss=0.030 Rate=17.41 GlobalRate=23.23
[xla:0](40) Loss=0.231 Rate=17.41 GlobalRate=23.23
[xla:7](40) Loss=0.139 Rate=17.38 GlobalRate=23.20
[xla:3](80) Loss=0.304 Rate=21.25 GlobalRate=23.51
[xla:1](80) Loss=0.130 Rate=21.25 GlobalRate=23.51
[xla:4](80) Loss=0.141 Rate=21.26 GlobalRate=23.52
[xla:0](80) Loss=0.078 Rate=21.26 GlobalRate=23.52
[xla:7](80) Loss=0.013 Rate=21.25 GlobalRate=23.51
[xla:6](80) Loss=0.143 Rate=21.25 GlobalRate=23.52
[xla:2](80) Loss=0.284 Rate=21.25 GlobalRate=23.52
[xla:5](80) Loss=0.097 Rate=21.25 GlobalRate=23.51
[xla:1](120) Loss=0.044 Rate=23.54 GlobalRate=24.00
[xla:6](120) Loss=0.008 Rate=23.55 GlobalRate=24.01
[xla:7](120) Loss=0.174 Rate=23.54 GlobalRate=24.00
[xla:4](120) Loss=0.157 Rate=23.55 GlobalRate=24.01
[xla:5](120) Loss=0.188 Rate=23.55 GlobalRate=24.00
[xla:3](120) Loss=0.019 Rate=23.55 GlobalRate=24.00
[xla:2](120) Loss=0.528 Rate=23.55 GlobalRate=24.01
[xla:0](120) Loss=0.285 Rate=23.55 GlobalRate=24.01
[xla:6] Accuracy=96.39%
[xla:7] Accuracy=96.63%
[xla:0] Accuracy=96.19%
[xla:1] Accuracy=96.04%
[xla:5] Accuracy=96.48%
[xla:2] Accuracy=96.58%
[xla:4] Accuracy=96.39%
Finished training epoch 17 train-acc 96.19 in 82.70 sec
[xla:3] Accuracy=96.09%
[xla:5](0) Loss=0.139 Rate=5.33 GlobalRate=5.33
[xla:6](0) Loss=0.052 Rate=5.35 GlobalRate=5.35
[xla:3](0) Loss=0.185 Rate=5.32 GlobalRate=5.32
[xla:0](0) Loss=0.068 Rate=5.35 GlobalRate=5.35
[xla:1](0) Loss=0.219 Rate=5.30 GlobalRate=5.30
[xla:7](0) Loss=0.462 Rate=5.30 GlobalRate=5.30
[xla:4](0) Loss=0.285 Rate=5.30 GlobalRate=5.30
[xla:2](0) Loss=0.198 Rate=5.36 GlobalRate=5.36
[xla:3](40) Loss=0.106 Rate=16.73 GlobalRate=22.39
[xla:2](40) Loss=0.106 Rate=16.75 GlobalRate=22.41
[xla:6](40) Loss=0.106 Rate=16.75 GlobalRate=22.40
[xla:4](40) Loss=0.362 Rate=16.72 GlobalRate=22.38
[xla:7](40) Loss=0.036 Rate=16.73 GlobalRate=22.38
[xla:1](40) Loss=0.084 Rate=16.73 GlobalRate=22.38
[xla:0](40) Loss=0.156 Rate=16.75 GlobalRate=22.40
[xla:5](40) Loss=0.065 Rate=16.74 GlobalRate=22.39
[xla:3](80) Loss=0.097 Rate=21.76 GlobalRate=23.65
[xla:0](80) Loss=0.095 Rate=21.76 GlobalRate=23.66
[xla:2](80) Loss=0.124 Rate=21.76 GlobalRate=23.66
[xla:1](80) Loss=0.034 Rate=21.75 GlobalRate=23.65
[xla:4](80) Loss=0.077 Rate=21.75 GlobalRate=23.65
[xla:6](80) Loss=0.046 Rate=21.76 GlobalRate=23.66
[xla:7](80) Loss=0.189 Rate=21.75 GlobalRate=23.65
[xla:5](80) Loss=0.341 Rate=21.76 GlobalRate=23.66
[xla:1](120) Loss=0.067 Rate=23.67 GlobalRate=24.06
[xla:4](120) Loss=0.032 Rate=23.67 GlobalRate=24.06
[xla:2](120) Loss=0.057 Rate=23.67 GlobalRate=24.07
[xla:3](120) Loss=0.121 Rate=23.67 GlobalRate=24.07
[xla:5](120) Loss=0.383 Rate=23.67 GlobalRate=24.07
[xla:6](120) Loss=0.159 Rate=23.68 GlobalRate=24.07
[xla:0](120) Loss=0.208 Rate=23.67 GlobalRate=24.07
[xla:7](120) Loss=0.102 Rate=23.67 GlobalRate=24.06
[xla:2] Accuracy=96.73%
[xla:4] Accuracy=96.24%
[xla:0] Accuracy=96.19%
[xla:5] Accuracy=96.29%
[xla:1] Accuracy=96.19%
[xla:7] Accuracy=96.19%
[xla:6] Accuracy=96.88%
[xla:3] Accuracy=96.29%
Finished training epoch 18 train-acc 96.19 in 83.06 sec
[xla:1](0) Loss=0.059 Rate=5.36 GlobalRate=5.36
[xla:4](0) Loss=0.026 Rate=5.37 GlobalRate=5.37
[xla:0](0) Loss=0.224 Rate=5.37 GlobalRate=5.37
[xla:6](0) Loss=0.082 Rate=5.34 GlobalRate=5.34
[xla:3](0) Loss=0.027 Rate=5.34 GlobalRate=5.34
[xla:7](0) Loss=0.212 Rate=5.36 GlobalRate=5.36
[xla:2](0) Loss=0.235 Rate=5.32 GlobalRate=5.32
[xla:5](0) Loss=0.114 Rate=5.31 GlobalRate=5.31
[xla:1](40) Loss=0.072 Rate=16.99 GlobalRate=22.73
[xla:4](40) Loss=0.097 Rate=16.99 GlobalRate=22.74
[xla:3](40) Loss=0.051 Rate=16.98 GlobalRate=22.72
[xla:7](40) Loss=0.090 Rate=16.99 GlobalRate=22.73
[xla:0](40) Loss=0.061 Rate=16.99 GlobalRate=22.74
[xla:2](40) Loss=0.078 Rate=16.97 GlobalRate=22.72
[xla:6](40) Loss=0.678 Rate=16.98 GlobalRate=22.72
[xla:5](40) Loss=0.096 Rate=16.97 GlobalRate=22.71
[xla:0](80) Loss=0.163 Rate=21.33 GlobalRate=23.45
[xla:3](80) Loss=0.163 Rate=21.33 GlobalRate=23.44
[xla:6](80) Loss=0.111 Rate=21.33 GlobalRate=23.44
[xla:5](80) Loss=0.354 Rate=21.32 GlobalRate=23.44
[xla:1](80) Loss=0.081 Rate=21.33 GlobalRate=23.45
[xla:4](80) Loss=0.031 Rate=21.33 GlobalRate=23.45
[xla:7](80) Loss=0.041 Rate=21.33 GlobalRate=23.45
[xla:2](80) Loss=0.139 Rate=21.33 GlobalRate=23.44
[xla:6](120) Loss=0.024 Rate=23.20 GlobalRate=23.77
[xla:2](120) Loss=0.103 Rate=23.20 GlobalRate=23.76
[xla:5](120) Loss=0.228 Rate=23.19 GlobalRate=23.76
[xla:4](120) Loss=0.017 Rate=23.20 GlobalRate=23.77
[xla:7](120) Loss=0.190 Rate=23.20 GlobalRate=23.77
[xla:3](120) Loss=0.012 Rate=23.19 GlobalRate=23.76
[xla:0](120) Loss=0.032 Rate=23.20 GlobalRate=23.77
[xla:1](120) Loss=0.435 Rate=23.19 GlobalRate=23.77
[xla:3] Accuracy=96.58%
[xla:6] Accuracy=96.24%
[xla:0] Accuracy=96.39%
[xla:5] Accuracy=96.48%
[xla:2] Accuracy=96.73%
Finished training epoch 19 train-acc 96.39 in 83.66 sec
[xla:1] Accuracy=96.34%
[xla:4] Accuracy=96.53%
[xla:7] Accuracy=97.02%
[xla:5](0) Loss=0.082 Rate=5.90 GlobalRate=5.90
[xla:1](0) Loss=0.106 Rate=5.85 GlobalRate=5.85
[xla:4](0) Loss=0.043 Rate=5.86 GlobalRate=5.86
[xla:0](0) Loss=0.045 Rate=5.93 GlobalRate=5.93
[xla:3](0) Loss=0.012 Rate=5.90 GlobalRate=5.90
[xla:6](0) Loss=0.041 Rate=5.86 GlobalRate=5.86
[xla:7](0) Loss=0.042 Rate=5.89 GlobalRate=5.89
[xla:2](0) Loss=0.054 Rate=5.87 GlobalRate=5.87
[xla:4](40) Loss=0.146 Rate=17.25 GlobalRate=23.03
[xla:7](40) Loss=0.020 Rate=17.26 GlobalRate=23.04
[xla:2](40) Loss=0.016 Rate=17.26 GlobalRate=23.04
[xla:5](40) Loss=0.104 Rate=17.27 GlobalRate=23.05
[xla:3](40) Loss=0.024 Rate=17.27 GlobalRate=23.04
[xla:6](40) Loss=0.535 Rate=17.25 GlobalRate=23.03
[xla:1](40) Loss=0.158 Rate=17.25 GlobalRate=23.03
[xla:0](40) Loss=0.245 Rate=17.28 GlobalRate=23.06
[xla:5](80) Loss=0.032 Rate=21.46 GlobalRate=23.63
[xla:4](80) Loss=0.023 Rate=21.45 GlobalRate=23.62
[xla:7](80) Loss=0.071 Rate=21.46 GlobalRate=23.62
[xla:6](80) Loss=0.029 Rate=21.45 GlobalRate=23.62
[xla:1](80) Loss=0.090 Rate=21.45 GlobalRate=23.62
[xla:3](80) Loss=0.235 Rate=21.46 GlobalRate=23.63
[xla:0](80) Loss=0.182 Rate=21.46 GlobalRate=23.63
[xla:2](80) Loss=0.045 Rate=21.45 GlobalRate=23.62
[xla:5](120) Loss=0.046 Rate=23.80 GlobalRate=24.17
[xla:2](120) Loss=0.047 Rate=23.80 GlobalRate=24.17
[xla:6](120) Loss=0.031 Rate=23.80 GlobalRate=24.17
[xla:1](120) Loss=0.066 Rate=23.80 GlobalRate=24.17
[xla:4](120) Loss=0.091 Rate=23.80 GlobalRate=24.17
[xla:7](120) Loss=0.095 Rate=23.80 GlobalRate=24.17
[xla:3](120) Loss=0.011 Rate=23.80 GlobalRate=24.17
[xla:0](120) Loss=0.107 Rate=23.81 GlobalRate=24.18
[xla:0] Accuracy=97.41%
[xla:1] Accuracy=96.83%
Finished training epoch 20 train-acc 97.41 in 82.89 sec
[xla:2] Accuracy=97.46%
[xla:4] Accuracy=97.22%
[xla:3] Accuracy=96.78%
[xla:5] Accuracy=96.63%
[xla:7] Accuracy=97.27%
[xla:6] Accuracy=97.36%
[xla:6](0) Loss=0.099 Rate=5.50 GlobalRate=5.50
[xla:4](0) Loss=0.035 Rate=5.53 GlobalRate=5.53
[xla:2](0) Loss=0.024 Rate=5.52 GlobalRate=5.52
[xla:5](0) Loss=0.010 Rate=5.47 GlobalRate=5.47
[xla:1](0) Loss=0.031 Rate=5.47 GlobalRate=5.47
[xla:3](0) Loss=0.105 Rate=5.46 GlobalRate=5.46
[xla:0](0) Loss=0.078 Rate=5.52 GlobalRate=5.52
[xla:7](0) Loss=0.159 Rate=5.47 GlobalRate=5.47
[xla:6](40) Loss=0.366 Rate=16.70 GlobalRate=22.32
[xla:4](40) Loss=0.024 Rate=16.71 GlobalRate=22.33
[xla:2](40) Loss=0.189 Rate=16.71 GlobalRate=22.33
[xla:7](40) Loss=0.030 Rate=16.69 GlobalRate=22.31
[xla:5](40) Loss=0.015 Rate=16.69 GlobalRate=22.31
[xla:0](40) Loss=0.075 Rate=16.71 GlobalRate=22.33
[xla:1](40) Loss=0.019 Rate=16.69 GlobalRate=22.31
[xla:3](40) Loss=0.013 Rate=16.69 GlobalRate=22.31
[xla:4](80) Loss=0.122 Rate=21.55 GlobalRate=23.48
[xla:0](80) Loss=0.195 Rate=21.55 GlobalRate=23.48
[xla:6](80) Loss=0.088 Rate=21.55 GlobalRate=23.47
[xla:3](80) Loss=0.815 Rate=21.54 GlobalRate=23.47
[xla:1](80) Loss=0.033 Rate=21.54 GlobalRate=23.47
[xla:2](80) Loss=0.062 Rate=21.55 GlobalRate=23.48
[xla:5](80) Loss=0.083 Rate=21.54 GlobalRate=23.46
[xla:7](80) Loss=0.019 Rate=21.54 GlobalRate=23.46
[xla:6](120) Loss=0.080 Rate=22.19 GlobalRate=23.18
[xla:4](120) Loss=0.025 Rate=22.19 GlobalRate=23.19
[xla:2](120) Loss=0.027 Rate=22.19 GlobalRate=23.19
[xla:3](120) Loss=0.028 Rate=22.19 GlobalRate=23.18
[xla:7](120) Loss=0.086 Rate=22.19 GlobalRate=23.18
[xla:5](120) Loss=0.067 Rate=22.19 GlobalRate=23.18
[xla:0](120) Loss=0.057 Rate=22.19 GlobalRate=23.19
[xla:1](120) Loss=0.100 Rate=22.19 GlobalRate=23.18
[xla:3] Accuracy=97.41%
[xla:7] Accuracy=96.83%
[xla:5] Accuracy=97.36%
[xla:0] Accuracy=97.51%
[xla:2] Accuracy=97.51%
[xla:6] Accuracy=97.22%
[xla:4] Accuracy=96.92%
[xla:1] Accuracy=97.46%
Finished training epoch 21 train-acc 97.51 in 85.79 sec
[xla:2](0) Loss=0.028 Rate=5.99 GlobalRate=5.99
[xla:6](0) Loss=0.184 Rate=6.03 GlobalRate=6.03
[xla:1](0) Loss=0.008 Rate=6.00 GlobalRate=6.00
[xla:0](0) Loss=0.213 Rate=6.01 GlobalRate=6.01
[xla:7](0) Loss=0.071 Rate=5.97 GlobalRate=5.97
[xla:5](0) Loss=0.084 Rate=5.97 GlobalRate=5.97
[xla:3](0) Loss=0.056 Rate=6.01 GlobalRate=6.01
[xla:4](0) Loss=0.050 Rate=6.05 GlobalRate=6.05
[xla:6](40) Loss=0.033 Rate=17.20 GlobalRate=22.92
[xla:0](40) Loss=0.169 Rate=17.19 GlobalRate=22.91
[xla:7](40) Loss=0.019 Rate=17.17 GlobalRate=22.90
[xla:4](40) Loss=0.227 Rate=17.21 GlobalRate=22.93
[xla:1](40) Loss=0.034 Rate=17.19 GlobalRate=22.91
[xla:3](40) Loss=0.093 Rate=17.19 GlobalRate=22.91
[xla:2](40) Loss=0.012 Rate=17.18 GlobalRate=22.90
[xla:5](40) Loss=0.025 Rate=17.17 GlobalRate=22.89
[xla:6](80) Loss=0.013 Rate=21.28 GlobalRate=23.44
[xla:7](80) Loss=0.126 Rate=21.27 GlobalRate=23.43
[xla:2](80) Loss=0.328 Rate=21.27 GlobalRate=23.43
[xla:5](80) Loss=0.225 Rate=21.27 GlobalRate=23.43
[xla:0](80) Loss=0.245 Rate=21.28 GlobalRate=23.44
[xla:3](80) Loss=0.159 Rate=21.28 GlobalRate=23.44
[xla:1](80) Loss=0.027 Rate=21.28 GlobalRate=23.44
[xla:4](80) Loss=0.021 Rate=21.28 GlobalRate=23.44
[xla:3](120) Loss=0.071 Rate=24.02 GlobalRate=24.19
[xla:7](120) Loss=0.055 Rate=24.02 GlobalRate=24.18
[xla:0](120) Loss=0.015 Rate=24.03 GlobalRate=24.19
[xla:5](120) Loss=0.036 Rate=24.02 GlobalRate=24.18
[xla:6](120) Loss=0.009 Rate=24.03 GlobalRate=24.19
[xla:4](120) Loss=0.026 Rate=24.03 GlobalRate=24.19
[xla:2](120) Loss=0.030 Rate=24.02 GlobalRate=24.18
[xla:1](120) Loss=0.033 Rate=24.02 GlobalRate=24.18
[xla:4] Accuracy=97.31%
[xla:6] Accuracy=96.92%
[xla:2] Accuracy=97.90%
[xla:1] Accuracy=97.41%
[xla:7] Accuracy=97.41%
[xla:3] Accuracy=97.61%
[xla:0] Accuracy=97.46%
[xla:5] Accuracy=97.46%
Finished training epoch 22 train-acc 97.46 in 82.53 sec
[xla:7](0) Loss=0.110 Rate=5.50 GlobalRate=5.50
[xla:1](0) Loss=0.074 Rate=5.50 GlobalRate=5.50
[xla:0](0) Loss=0.103 Rate=5.52 GlobalRate=5.52
[xla:2](0) Loss=0.008 Rate=5.50 GlobalRate=5.50
[xla:3](0) Loss=0.037 Rate=5.52 GlobalRate=5.52
[xla:5](0) Loss=0.063 Rate=5.55 GlobalRate=5.55
[xla:4](0) Loss=0.043 Rate=5.49 GlobalRate=5.49
[xla:6](0) Loss=0.089 Rate=5.52 GlobalRate=5.52
[xla:4](40) Loss=0.108 Rate=16.69 GlobalRate=22.31
[xla:0](40) Loss=0.139 Rate=16.71 GlobalRate=22.33
[xla:2](40) Loss=0.156 Rate=16.70 GlobalRate=22.31
[xla:5](40) Loss=0.220 Rate=16.72 GlobalRate=22.34
[xla:1](40) Loss=0.124 Rate=16.70 GlobalRate=22.32
[xla:6](40) Loss=0.051 Rate=16.71 GlobalRate=22.33
[xla:7](40) Loss=0.207 Rate=16.70 GlobalRate=22.32
[xla:3](40) Loss=0.452 Rate=16.71 GlobalRate=22.33
[xla:3](80) Loss=0.329 Rate=21.33 GlobalRate=23.31
[xla:1](80) Loss=0.107 Rate=21.32 GlobalRate=23.30
[xla:6](80) Loss=0.023 Rate=21.33 GlobalRate=23.31
[xla:5](80) Loss=0.123 Rate=21.33 GlobalRate=23.31
[xla:0](80) Loss=0.016 Rate=21.33 GlobalRate=23.31
[xla:7](80) Loss=0.025 Rate=21.33 GlobalRate=23.30
[xla:4](80) Loss=0.125 Rate=21.32 GlobalRate=23.30
[xla:2](80) Loss=0.016 Rate=21.33 GlobalRate=23.30
[xla:5](120) Loss=0.023 Rate=23.46 GlobalRate=23.81
[xla:6](120) Loss=0.105 Rate=23.46 GlobalRate=23.80
[xla:1](120) Loss=0.014 Rate=23.46 GlobalRate=23.80
[xla:7](120) Loss=0.171 Rate=23.45 GlobalRate=23.80
[xla:2](120) Loss=0.070 Rate=23.45 GlobalRate=23.80
[xla:3](120) Loss=0.047 Rate=23.44 GlobalRate=23.80
[xla:4](120) Loss=0.045 Rate=23.44 GlobalRate=23.79
[xla:0](120) Loss=0.105 Rate=23.44 GlobalRate=23.80
[xla:1] Accuracy=97.56%
[xla:3] Accuracy=96.68%
[xla:7] Accuracy=97.75%
[xla:4] Accuracy=97.31%
[xla:0] Accuracy=97.36%
[xla:6] Accuracy=97.31%
[xla:5] Accuracy=97.61%
[xla:2] Accuracy=97.46%
Finished training epoch 23 train-acc 97.36 in 83.69 sec
[xla:5](0) Loss=0.206 Rate=5.78 GlobalRate=5.78
[xla:6](0) Loss=0.050 Rate=5.79 GlobalRate=5.79
[xla:1](0) Loss=0.370 Rate=5.75 GlobalRate=5.75
[xla:0](0) Loss=0.335 Rate=5.76 GlobalRate=5.76
[xla:3](0) Loss=0.237 Rate=5.79 GlobalRate=5.79
[xla:2](0) Loss=0.091 Rate=5.75 GlobalRate=5.75
[xla:7](0) Loss=0.260 Rate=5.74 GlobalRate=5.74
[xla:4](0) Loss=0.057 Rate=5.85 GlobalRate=5.85
[xla:6](40) Loss=0.016 Rate=17.21 GlobalRate=22.98
[xla:2](40) Loss=0.020 Rate=17.19 GlobalRate=22.96
[xla:4](40) Loss=0.119 Rate=17.23 GlobalRate=23.00
[xla:0](40) Loss=0.115 Rate=17.19 GlobalRate=22.96
[xla:1](40) Loss=0.055 Rate=17.19 GlobalRate=22.96
[xla:5](40) Loss=0.057 Rate=17.20 GlobalRate=22.97
[xla:3](40) Loss=0.107 Rate=17.20 GlobalRate=22.97
[xla:7](40) Loss=0.298 Rate=17.18 GlobalRate=22.95
[xla:5](80) Loss=0.537 Rate=21.93 GlobalRate=23.97
[xla:6](80) Loss=0.079 Rate=21.93 GlobalRate=23.97
[xla:4](80) Loss=0.034 Rate=21.94 GlobalRate=23.98
[xla:7](80) Loss=0.040 Rate=21.92 GlobalRate=23.96
[xla:1](80) Loss=0.123 Rate=21.92 GlobalRate=23.96
[xla:0](80) Loss=0.023 Rate=21.93 GlobalRate=23.96
[xla:3](80) Loss=0.062 Rate=21.93 GlobalRate=23.97
[xla:2](80) Loss=0.067 Rate=21.92 GlobalRate=23.96
[xla:4](120) Loss=0.248 Rate=23.46 GlobalRate=24.14
[xla:3](120) Loss=0.025 Rate=23.45 GlobalRate=24.13
[xla:1](120) Loss=0.028 Rate=23.45 GlobalRate=24.12
[xla:0](120) Loss=0.078 Rate=23.45 GlobalRate=24.13
[xla:5](120) Loss=0.165 Rate=23.45 GlobalRate=24.13
[xla:6](120) Loss=0.051 Rate=23.45 GlobalRate=24.13
[xla:7](120) Loss=0.043 Rate=23.45 GlobalRate=24.12
[xla:2](120) Loss=0.215 Rate=23.45 GlobalRate=24.13
[xla:5] Accuracy=97.02%
[xla:0] Accuracy=97.75%
[xla:2] Accuracy=97.31%
[xla:7] Accuracy=97.66%
Finished training epoch 24 train-acc 97.75 in 82.72 sec
[xla:6] Accuracy=97.46%
[xla:3] Accuracy=97.22%
[xla:4] Accuracy=97.75%
[xla:1] Accuracy=97.41%
[xla:0](0) Loss=0.061 Rate=5.94 GlobalRate=5.94
[xla:7](0) Loss=0.073 Rate=5.89 GlobalRate=5.89
[xla:6](0) Loss=0.092 Rate=5.95 GlobalRate=5.95
[xla:3](0) Loss=0.233 Rate=5.92 GlobalRate=5.92
[xla:1](0) Loss=0.142 Rate=5.89 GlobalRate=5.89
[xla:5](0) Loss=0.107 Rate=5.88 GlobalRate=5.88
[xla:4](0) Loss=0.171 Rate=5.89 GlobalRate=5.89
[xla:2](0) Loss=0.100 Rate=5.90 GlobalRate=5.90
[xla:0](40) Loss=0.061 Rate=17.26 GlobalRate=23.03
[xla:6](40) Loss=0.039 Rate=17.26 GlobalRate=23.02
[xla:2](40) Loss=0.020 Rate=17.25 GlobalRate=23.01
[xla:1](40) Loss=0.142 Rate=17.24 GlobalRate=23.00
[xla:4](40) Loss=0.077 Rate=17.24 GlobalRate=23.01
[xla:3](40) Loss=0.052 Rate=17.25 GlobalRate=23.02
[xla:7](40) Loss=0.155 Rate=17.24 GlobalRate=23.00
[xla:5](40) Loss=0.043 Rate=17.24 GlobalRate=23.00
[xla:1](80) Loss=0.040 Rate=21.51 GlobalRate=23.65
[xla:7](80) Loss=0.045 Rate=21.51 GlobalRate=23.65
[xla:5](80) Loss=0.013 Rate=21.51 GlobalRate=23.65
[xla:6](80) Loss=0.007 Rate=21.52 GlobalRate=23.66
[xla:0](80) Loss=0.087 Rate=21.52 GlobalRate=23.66
[xla:2](80) Loss=0.076 Rate=21.51 GlobalRate=23.66
[xla:4](80) Loss=0.028 Rate=21.51 GlobalRate=23.65
[xla:3](80) Loss=0.066 Rate=21.51 GlobalRate=23.66
[xla:4](120) Loss=0.108 Rate=23.49 GlobalRate=24.02
[xla:0](120) Loss=0.030 Rate=23.49 GlobalRate=24.03
[xla:3](120) Loss=0.048 Rate=23.49 GlobalRate=24.02
[xla:7](120) Loss=0.045 Rate=23.49 GlobalRate=24.02
[xla:2](120) Loss=0.045 Rate=23.48 GlobalRate=24.02
[xla:6](120) Loss=0.106 Rate=23.49 GlobalRate=24.03
[xla:1](120) Loss=0.230 Rate=23.49 GlobalRate=24.02
[xla:5](120) Loss=0.015 Rate=23.49 GlobalRate=24.02
[xla:6] Accuracy=97.41%
[xla:0] Accuracy=97.27%
Finished training epoch 25 train-acc 97.27 in 82.68 sec
[xla:2] Accuracy=98.00%
[xla:4] Accuracy=97.46%
[xla:1] Accuracy=97.17%
[xla:3] Accuracy=98.14%
[xla:7] Accuracy=97.41%
[xla:5] Accuracy=97.56%
| MIT | image/2. Flower Classification with TPUs/kaggle/fast-pytorch-xla-for-tpu-with-multiprocessing.ipynb | nishchalnishant/Completed_Kaggle_competitions |
R06725035 陳廷易* Tokenization.* Lowercasing everything.* Stemming using Porter’s algorithm.* Stopword removal.* Save the result as a txt file. | # import keras
# from keras.preprocessing.text import Tokenizer
# import gensim
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import wordpunct_tokenize
from nltk import word_tokenize
from nltk.stem.porter import *
from nltk.tokenize import RegexpTokenizer
# nltk.download('all') | [nltk_data] Downloading collection 'all'
[nltk_data] |
[nltk_data] | Downloading package abc to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package abc is already up-to-date!
[nltk_data] | Downloading package alpino to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package alpino is already up-to-date!
[nltk_data] | Downloading package biocreative_ppi to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package biocreative_ppi is already up-to-date!
[nltk_data] | Downloading package brown to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package brown is already up-to-date!
[nltk_data] | Downloading package brown_tei to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package brown_tei is already up-to-date!
[nltk_data] | Downloading package cess_cat to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package cess_cat is already up-to-date!
[nltk_data] | Downloading package cess_esp to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package cess_esp is already up-to-date!
[nltk_data] | Downloading package chat80 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package chat80 is already up-to-date!
[nltk_data] | Downloading package city_database to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package city_database is already up-to-date!
[nltk_data] | Downloading package cmudict to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package cmudict is already up-to-date!
[nltk_data] | Downloading package comparative_sentences to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package comparative_sentences is already up-to-
[nltk_data] | date!
[nltk_data] | Downloading package comtrans to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package comtrans is already up-to-date!
[nltk_data] | Downloading package conll2000 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package conll2000 is already up-to-date!
[nltk_data] | Downloading package conll2002 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package conll2002 is already up-to-date!
[nltk_data] | Downloading package conll2007 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package conll2007 is already up-to-date!
[nltk_data] | Downloading package crubadan to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package crubadan is already up-to-date!
[nltk_data] | Downloading package dependency_treebank to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package dependency_treebank is already up-to-date!
[nltk_data] | Downloading package dolch to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package dolch is already up-to-date!
[nltk_data] | Downloading package europarl_raw to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package europarl_raw is already up-to-date!
[nltk_data] | Downloading package floresta to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package floresta is already up-to-date!
[nltk_data] | Downloading package framenet_v15 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package framenet_v15 is already up-to-date!
[nltk_data] | Downloading package framenet_v17 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package framenet_v17 is already up-to-date!
[nltk_data] | Downloading package gazetteers to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package gazetteers is already up-to-date!
[nltk_data] | Downloading package genesis to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package genesis is already up-to-date!
[nltk_data] | Downloading package gutenberg to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package gutenberg is already up-to-date!
[nltk_data] | Downloading package ieer to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package ieer is already up-to-date!
[nltk_data] | Downloading package inaugural to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package inaugural is already up-to-date!
[nltk_data] | Downloading package indian to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package indian is already up-to-date!
[nltk_data] | Downloading package jeita to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package jeita is already up-to-date!
[nltk_data] | Downloading package kimmo to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package kimmo is already up-to-date!
[nltk_data] | Downloading package knbc to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package knbc is already up-to-date!
[nltk_data] | Downloading package lin_thesaurus to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package lin_thesaurus is already up-to-date!
[nltk_data] | Downloading package mac_morpho to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package mac_morpho is already up-to-date!
[nltk_data] | Downloading package machado to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package machado is already up-to-date!
[nltk_data] | Downloading package masc_tagged to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package masc_tagged is already up-to-date!
[nltk_data] | Downloading package moses_sample to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package moses_sample is already up-to-date!
[nltk_data] | Downloading package movie_reviews to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package movie_reviews is already up-to-date!
[nltk_data] | Downloading package names to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package names is already up-to-date!
[nltk_data] | Downloading package nombank.1.0 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package nombank.1.0 is already up-to-date!
[nltk_data] | Downloading package nps_chat to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package nps_chat is already up-to-date!
[nltk_data] | Downloading package omw to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package omw is already up-to-date!
[nltk_data] | Downloading package opinion_lexicon to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package opinion_lexicon is already up-to-date!
[nltk_data] | Downloading package paradigms to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package paradigms is already up-to-date!
[nltk_data] | Downloading package pil to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package pil is already up-to-date!
[nltk_data] | Downloading package pl196x to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package pl196x is already up-to-date!
[nltk_data] | Downloading package ppattach to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package ppattach is already up-to-date!
[nltk_data] | Downloading package problem_reports to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package problem_reports is already up-to-date!
[nltk_data] | Downloading package propbank to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package propbank is already up-to-date!
[nltk_data] | Downloading package ptb to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package ptb is already up-to-date!
[nltk_data] | Downloading package product_reviews_1 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package product_reviews_1 is already up-to-date!
[nltk_data] | Downloading package product_reviews_2 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package product_reviews_2 is already up-to-date!
[nltk_data] | Downloading package pros_cons to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package pros_cons is already up-to-date!
[nltk_data] | Downloading package qc to /home/leoqaz12/nltk_data...
[nltk_data] | Package qc is already up-to-date!
[nltk_data] | Downloading package reuters to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package reuters is already up-to-date!
[nltk_data] | Downloading package rte to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package rte is already up-to-date!
[nltk_data] | Downloading package semcor to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package semcor is already up-to-date!
[nltk_data] | Downloading package senseval to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package senseval is already up-to-date!
[nltk_data] | Downloading package sentiwordnet to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package sentiwordnet is already up-to-date!
[nltk_data] | Downloading package sentence_polarity to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package sentence_polarity is already up-to-date!
[nltk_data] | Downloading package shakespeare to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package shakespeare is already up-to-date!
[nltk_data] | Downloading package sinica_treebank to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package sinica_treebank is already up-to-date!
[nltk_data] | Downloading package smultron to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package smultron is already up-to-date!
[nltk_data] | Downloading package state_union to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package state_union is already up-to-date!
[nltk_data] | Downloading package stopwords to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package stopwords is already up-to-date!
[nltk_data] | Downloading package subjectivity to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package subjectivity is already up-to-date!
[nltk_data] | Downloading package swadesh to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package swadesh is already up-to-date!
[nltk_data] | Downloading package switchboard to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package switchboard is already up-to-date!
[nltk_data] | Downloading package timit to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package timit is already up-to-date!
[nltk_data] | Downloading package toolbox to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package toolbox is already up-to-date!
[nltk_data] | Downloading package treebank to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package treebank is already up-to-date!
[nltk_data] | Downloading package twitter_samples to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package twitter_samples is already up-to-date!
[nltk_data] | Downloading package udhr to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package udhr is already up-to-date!
[nltk_data] | Downloading package udhr2 to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package udhr2 is already up-to-date!
[nltk_data] | Downloading package unicode_samples to
[nltk_data] | /home/leoqaz12/nltk_data...
[nltk_data] | Package unicode_samples is already up-to-date!
[nltk_data] | Downloading package universal_treebanks_v20 to
[nltk_data] | /home/leoqaz12/nltk_data...
| MIT | preprocessing/ExtractTerms.ipynb | tychen5/IR_TextMining |
read data | file = open('data/28.txt','r')
texts = file.read()
file.close()
texts | _____no_output_____ | MIT | preprocessing/ExtractTerms.ipynb | tychen5/IR_TextMining |
main preprocessing | with open('data/stop_words.txt') as f:
stop_words_list = f.read().splitlines()
ps = PorterStemmer() # Stemming
stop_words = set(stopwords.words('english')) #Stopword
short = ['.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}',
'\'s','\'m','\'re','\'ll','\'d','n\'t','shan\'t']
stop_words_list.extend(short)
stop_words.update(stop_words_list) # remove it if you need punctuation
tokens = [i for i in word_tokenize(texts.lower()) if i not in stop_words] # Tokenization.# Lowercasing
token_result = ''
for i,token in enumerate(tokens):
if i != len(tokens)-1: # 最後不要空白
token_result += ps.stem(token) + ' '
else:
token_result += ps.stem(token)
# tokens = nltk.word_tokenize(texts.lower())
# ps.stem(token_result)
token_result
"plan" in token_result | _____no_output_____ | MIT | preprocessing/ExtractTerms.ipynb | tychen5/IR_TextMining |
Output | # output=""
# for token in tokens:
# output+=token+' '
# print(output)
file = open('result/output.txt','w')
file.write(token_result) #Save the result
file.close()
print(token_result)
# tokenizer = Tokenizer()
# tokenizer.fit_on_texts(texts)
# print(tokenizer.sequences_to_texts()) | _____no_output_____ | MIT | preprocessing/ExtractTerms.ipynb | tychen5/IR_TextMining |
FunctionsThink of mathematical functions...> 8 -------> 16> 10 ------> 20> 0.5 ------> 1Thus, the function of course is `f(x) = 2x`, which is a function f that doubles whatever input. What if we could write a doubling function too? | def f(x):
return 2*x
print(f(8))
print(f(10))
print(f(0.5)) | _____no_output_____ | MIT | 19T2/2_review/functions.ipynb | photomz/learn-python3 |
Programmatic functions can do a lot more. For example: | def is_even(x):
return not x % 2
print(is_even(100))
print(is_even(7))
def calculate_interest(principal=100,rate=0.05,year=5):
# principal = initial amount, rate = interest rate, year = number of years
compounded = principal
for i in range(year):
compounded = compounded * (1+rate)
year = year + 1
return compounded
print(calculate_interest())
print(calculate_interest(1000,0.01,100)) | _____no_output_____ | MIT | 19T2/2_review/functions.ipynb | photomz/learn-python3 |
Prepare dataset for hiveplotThis notebook currently just exports a subset of the nodes to a DOT file for import into [`jhive`](https://www.bcgsc.ca/wiki/display/jhive/Documentation). | import random
import pandas
import networkx
from networkx.drawing.nx_pydot import write_dot
node_df = pandas.read_table('../../data/nodes.tsv')
edge_df = pandas.read_table('../../data/edges.sif.gz')
node_df.head(2)
edge_df.head(2)
graph = networkx.MultiGraph()
# No colons allowed. See https://github.com/carlos-jenkins/pydotplus/issues/3
make_dot_safe = lambda x: x.replace(':', '_')
for row in node_df.itertuples():
node_id = make_dot_safe(row.id)
graph.add_node(node_id, node_name=row.name, kind=row.kind)
for row in edge_df.itertuples():
source = make_dot_safe(row.source)
target = make_dot_safe(row.target)
graph.add_edge(source, target, key=row.metaedge)
len(graph)
random.seed(0)
node_subset = random.sample(graph.nodes(), 1000)
graph_subset = graph.subgraph(node_subset)
len(graph_subset)
write_dot(graph_subset, 'data/hetionet-v1.0-simple.dot') | _____no_output_____ | CC0-1.0 | viz/auto/3-hiveplot.ipynb | dhimmel/integrate |
Normalizing the data |
colsList = ["actual_area",
"poolcnt",
"latitude",
"longitude",
"unitcnt",
"lotsizesquarefeet",
"bedroomcnt",
"calculatedbathnbr",
"hashottuborspa",
"fireplacecnt",
"taxvaluedollarcnt",
"buildingqualitytypeid",
"garagecarcnt",
"age",
"taxamount"]
prop_data_ahp = prop_data[colsList]
# prop_data_ahp
for col in prop_data_ahp.columns:
prop_data_ahp[col] = (prop_data_ahp[col] - prop_data_ahp[col].mean())/prop_data_ahp[col].std(ddof=0)
# prop_data_ahp.isnull().sum()
for cols in prop_data_ahp.columns.values:
print prop_data_ahp[cols].value_counts(dropna=False)
| -0.130731 47739
-0.199816 8968
-0.249310 7211
-0.323551 7111
-0.447286 6024
-0.364796 5956
-0.385418 5837
-0.406041 5436
-0.298804 5319
-0.220439 4743
-0.348298 4685
-0.292617 4658
-0.261684 4585
-0.302929 4520
-0.271995 4478
-0.274057 4281
-0.335925 4211
-0.459659 4166
-0.132793 4162
-0.397792 4148
-0.422539 4147
-0.150322 4029
-0.117326 3987
-0.496780 3926
-0.137949 3919
-0.352422 3888
-0.224563 3831
-0.076082 3799
-0.307053 3784
-0.168883 3690
...
6.255530 1
6.173555 1
5.930210 1
37.453189 1
10.936313 1
3.555018 1
4.396415 1
-0.859219 1
5.301225 1
6.710255 1
5.229562 1
46.473969 1
10.368164 1
7.095379 1
3.632353 1
5.497654 1
7.580007 1
5.365670 1
3.451906 1
5.129543 1
4.411882 1
8.115676 1
39.548946 1
4.417553 1
6.807180 1
11.143053 1
7.860988 1
9.108647 1
2.499664 1
3.159067 1
Name: actual_area, Length: 12630, dtype: int64
-0.469740 2445585
2.128838 539632
Name: poolcnt, dtype: int64
0.027779 2937
-0.068664 859
-1.203029 756
0.227560 740
0.700452 611
-0.965556 568
-0.887905 559
0.191816 555
0.692235 553
0.232902 541
-1.379075 524
-0.593808 513
0.226328 510
-0.943781 504
0.613762 497
0.554599 484
-1.238773 463
1.681980 455
-0.948300 435
-0.540323 429
-0.177130 428
-0.548273 424
-0.723153 424
-1.553897 421
-0.724386 419
0.255088 416
0.165111 402
-1.248634 401
0.611297 396
-0.945014 395
...
-0.792460 1
-1.781087 1
-0.116981 1
2.731273 1
0.364896 1
1.654765 1
2.764363 1
2.652504 1
1.658614 1
1.529635 1
-2.704733 1
-1.113808 1
-1.198251 1
1.673393 1
2.347754 1
-1.942281 1
-1.952564 1
2.788509 1
-1.251078 1
1.764154 1
-1.181940 1
-2.302825 1
1.751019 1
-1.721390 1
0.405341 1
-1.375559 1
-1.770553 1
2.412657 1
1.520966 1
2.756364 1
Name: latitude, Length: 852718, dtype: int64
-0.085527 2935
0.545319 1718
0.684279 1585
0.542424 1569
0.669804 1545
0.145809 1402
0.666909 1371
0.704544 1319
0.675594 1290
0.768234 1266
0.756654 1255
0.678489 1193
0.707439 1192
0.510579 1187
0.177654 1169
0.507684 1138
0.719019 1112
-0.039471 1107
0.765339 1091
0.681384 1078
-0.045261 1063
0.687174 1048
0.759549 1048
0.498999 1031
0.525054 1008
0.698754 1007
-0.207380 1003
0.652434 988
0.307929 966
0.721914 965
...
-1.663629 1
0.539561 1
1.032258 1
1.167006 1
1.813341 1
-1.423735 1
-0.706643 1
2.881900 1
-1.377186 1
0.577607 1
-1.512571 1
-1.822741 1
1.876284 1
-1.169508 1
0.722803 1
-1.309269 1
-1.700601 1
2.848760 1
1.539375 1
-0.590024 1
2.234980 1
0.123243 1
1.000793 1
-1.007466 1
-1.300958 1
-0.283968 1
3.112350 1
0.457743 1
2.815870 1
-0.883366 1
Name: longitude, Length: 1043238, dtype: int64
-0.059008 2788048
0.434155 115778
1.420482 39926
0.927319 39893
1.913645 325
2.406809 248
3.393135 136
2.899972 134
4.379462 87
3.886299 81
5.365789 45
4.872625 42
6.352115 33
5.858952 32
7.338442 30
11.283749 26
9.311096 17
6.845279 16
8.324769 15
7.831606 12
10.297422 11
15.229056 10
12.763239 9
8.817932 9
14.242729 8
12.270076 8
9.804259 7
10.790586 7
13.256402 7
24.105996 6
...
197.206333 1
56.654777 1
101.039479 1
125.697647 1
69.970188 1
47.777837 1
39.887223 1
91.176212 1
31.996610 1
37.421407 1
35.941917 1
49.257327 1
41.366713 1
148.876324 1
189.808882 1
337.264724 1
54.682124 1
37.914570 1
42.846203 1
34.955590 1
468.446176 1
437.376885 1
35.448753 1
55.668451 1
146.903671 1
67.504371 1
434.911068 1
123.724993 1
61.093247 1
31.503446 1
Name: unitcnt, Length: 154, dtype: int64
-0.059489 284570
-0.063685 44585
-0.067881 17517
-0.058650 15326
-0.061587 11565
-0.057391 10179
-0.061168 6999
-0.062426 6702
-0.065783 6162
-0.066202 5955
-0.060538 5685
-0.067042 5504
-0.072077 5097
-0.057396 4827
-0.055293 4673
-0.069979 4661
-0.063265 4493
-0.062846 4204
-0.056133 4017
-0.068720 3890
-0.056552 3885
-0.067461 3779
-0.063689 3762
-0.062636 3750
-0.053615 3357
-0.067877 3319
-0.067885 3299
-0.063181 3244
-0.063681 3220
-0.065363 3167
...
-0.087131 1
0.051309 1
0.363920 1
0.122583 1
1.733924 1
0.084263 1
0.103895 1
0.017306 1
0.072720 1
0.749622 1
1.773566 1
0.826942 1
0.357622 1
0.173255 1
0.167759 1
0.541943 1
0.232546 1
0.345081 1
0.219338 1
0.387811 1
0.163475 1
0.091853 1
0.381819 1
0.363278 1
0.219606 1
0.562091 1
0.463503 1
0.039737 1
0.488330 1
0.212784 1
Name: lotsizesquarefeet, Length: 70214, dtype: int64
-0.073386 1175702
0.713430 731475
-0.860202 606782
1.500246 182765
-2.433835 118705
-1.647019 86941
2.287062 48915
3.860695 13542
3.073878 12763
4.647511 4279
5.434327 1702
7.007959 959
6.221143 425
7.794775 86
8.581592 69
10.155224 50
9.368408 24
10.942040 11
11.728856 9
13.302489 8
15.662937 1
16.449753 1
12.515673 1
17.236570 1
14.089305 1
Name: bedroomcnt, dtype: int64
-0.296309 1336955
0.718788 633088
-1.311405 499324
0.211239 208578
1.733884 133922
-0.803857 45427
2.748981 38514
1.226336 31773
2.241432 19811
3.764077 16416
3.256529 6259
4.779174 6221
5.794270 4548
6.809367 1341
4.271625 1340
7.824463 496
5.286722 382
9.854656 269
8.839560 200
6.301818 110
10.869753 53
7.316915 50
11.884849 39
13.915042 25
12.899945 21
8.332011 14
15.945235 12
14.930138 8
17.975428 8
10.362204 3
9.347108 3
16.960331 3
29.141489 1
30.156586 1
17.467880 1
12.392397 1
Name: calculatedbathnbr, dtype: int64
-0.130599 2935155
7.657049 50062
Name: hashottuborspa, dtype: int64
-0.315882 2672093
2.260353 270019
4.836589 34487
7.412825 7716
9.989060 716
12.565296 129
15.141532 34
17.717767 15
22.870238 6
20.294003 2
Name: fireplacecnt, dtype: int64
1.434326e-16 34266
-5.464493e-01 1902
7.974085e-03 1452
-5.362975e-02 1371
6.957792e-02 1285
4.493639e-02 1131
-4.346682e-03 1120
-2.898822e-02 1117
-1.666745e-02 1097
-1.152336e-01 1095
2.029485e-02 1037
1.311818e-01 1036
-2.282783e-02 1029
3.877600e-02 1027
-6.595052e-02 1019
-4.130898e-02 1010
1.927856e-01 975
-7.827128e-02 964
2.645524e-02 938
-1.050707e-02 937
3.261562e-02 934
-8.443167e-02 927
-3.514860e-02 913
1.003798e-01 897
5.725715e-02 890
9.421945e-02 885
8.189869e-02 874
-1.521959e-01 873
-1.768374e-01 871
-1.398751e-01 864
...
-1.864107e-01 1
6.722295e-03 1
2.270490e+00 1
9.539540e-01 1
6.918518e-01 1
-1.599333e-01 1
-2.733893e-03 1
-4.364130e-02 1
2.842020e-01 1
-2.808037e-01 1
1.950557e+00 1
1.808081e-02 1
1.051473e+00 1
7.214709e-01 1
2.987257e-01 1
4.987780e-01 1
-2.101491e-01 1
8.328753e-01 1
2.167736e+01 1
2.837067e-01 1
1.694402e-01 1
-3.841491e-01 1
-7.575169e-02 1
-5.391283e-01 1
4.750852e-01 1
2.738716e+00 1
7.904019e-01 1
-5.163078e-01 1
7.483078e+00 1
2.164724e+00 1
Name: taxvaluedollarcnt, Length: 661521, dtype: int64
0.325565 1234066
-0.370521 561502
1.021651 501925
-1.762694 448049
-1.066608 107053
1.717737 69428
3.109909 28488
-2.458780 17858
2.413823 10629
3.805995 4123
-3.850952 1776
-3.154866 320
Name: buildingqualitytypeid, dtype: int64
-0.353068 1632131
1.105037 980997
-1.811172 340395
2.563141 19884
4.021245 8655
5.479350 1739
6.937454 587
8.395558 272
9.853663 181
11.311767 130
12.769871 83
14.227975 64
15.686080 41
17.144184 16
18.602288 14
20.060393 10
22.976601 4
25.892810 3
21.518497 3
27.350914 2
28.809018 2
33.183331 2
34.641436 1
24.434706 1
Name: garagecarcnt, dtype: int64
4.023912e-01 88555
6.155737e-01 76100
4.450277e-01 73284
4.876642e-01 67062
3.597547e-01 65723
1.866257e-02 64907
5.729372e-01 56643
6.129908e-02 54754
5.303007e-01 54378
7.434833e-01 52935
7.008467e-01 52625
-1.047250e+00 52527
-3.650660e-01 51429
-2.397394e-02 49462
6.582102e-01 48257
1.039356e-01 48223
3.029506e-15 47833
-6.208851e-01 47501
2.318451e-01 47199
-5.356121e-01 46666
3.171181e-01 46509
-9.619772e-01 44844
-3.224295e-01 44400
-2.797930e-01 42995
-5.782486e-01 42674
1.892086e-01 41916
-4.929756e-01 41759
-6.635216e-01 41395
-1.089887e+00 41213
1.465721e-01 40188
...
6.371503e+00 2
4.239677e+00 2
6.755231e+00 2
6.115684e+00 2
5.305590e+00 2
6.925777e+00 1
4.069131e+00 1
6.712595e+00 1
5.902501e+00 1
4.793952e+00 1
5.817228e+00 1
4.708679e+00 1
5.220317e+00 1
5.774592e+00 1
5.561409e+00 1
5.945138e+00 1
4.410223e+00 1
5.859865e+00 1
5.007134e+00 1
6.158320e+00 1
4.452860e+00 1
4.324950e+00 1
4.197041e+00 1
6.584685e+00 1
3.728039e+00 1
6.456776e+00 1
3.941222e+00 1
3.642766e+00 1
6.797868e+00 1
5.604045e+00 1
Name: age, Length: 184, dtype: int64
| Apache-2.0 | HW3/HW3.ipynb | jay-z007/Data-Science-Fundamentals |
Analytical Hierarchical Processing | rel_imp_matrix = pd.read_csv("rel_imp_matrix.csv", index_col=0)
# rel_imp_matrix
import fractions
for col in rel_imp_matrix.columns.values:
temp_list = rel_imp_matrix[col].tolist()
rel_imp_matrix[col] = [float(fractions.Fraction(x)) for x in temp_list]
# data = [float(fractions.Fraction(x)) for x in data]
# rel_imp_matrix
for col in rel_imp_matrix.columns.values:
rel_imp_matrix[col] /= rel_imp_matrix[col].sum()
# rel_imp_matrix
rel_imp_matrix["row_sum"] = rel_imp_matrix.sum(axis=1)
rel_imp_matrix["score"] = rel_imp_matrix["row_sum"]/rel_imp_matrix.shape[0]
rel_imp_matrix.to_csv("final_score_matrix.csv", index=False)
# rel_imp_matrix
ahp_column_score = rel_imp_matrix["score"]
ahp_column_score
prop_data_ahp.info()
prop_data_ahp.drop('sum', axis=1,inplace=True)
prop_data_ahp.keys() | _____no_output_____ | Apache-2.0 | HW3/HW3.ipynb | jay-z007/Data-Science-Fundamentals |
SAW | sum_series = pd.Series(0, index=prop_data_ahp.index,dtype='float32')
for col in prop_data_ahp.columns:
sum_series = sum_series+ prop_data_ahp[col] * ahp_column_score[col]
prop_data_ahp["sum"] = sum_series.astype('float32')
prop_data_ahp["sum"]
# prop_data_ahp["sum"] = prop_data_ahp.sum(axis=1)
prop_data_ahp["sum"].describe()
prop_data_ahp.sort_values(by='sum', inplace=True)
prop_data_ahp.head(n=10)
prop_data_ahp.tail(n=10)
print prop_data[colsList].iloc[1252741],"\n\n"
print prop_data[colsList].iloc[342941]
# #imputing airconditioningtypeid, making some NaN to 1.0 where heatingorsystemtypeid == 2
# prop_data.loc[(prop_data["heatingorsystemtypeid"]==2.0) & (pd.isnull(prop_data["airconditioningtypeid"])), "airconditioningtypeid"] = 1.0
# prop_data["airconditioningtypeid"].fillna(-1, inplace=True)
# print prop_data["airconditioningtypeid"].value_counts()
# prop_data[["airconditioningtypeid", "heatingorsystemtypeid"]].head()
# duplicate_or_not_useful_cols = pd.Series(['calculatedbathnbr', 'assessmentyear', 'fullbathcnt',
# 'regionidneighborhood', 'propertyzoningdesc', 'censustractandblock'])#,'finishedsquarefeet12'])
# prop_data.drop(duplicate_or_not_useful_cols, axis=1, inplace=True)
# prop_data["buildingqualitytypeid"].fillna(prop_data["buildingqualitytypeid"].mean(), inplace=True)
# prop_data["calculatedfinishedsquarefeet"].interpolate(inplace=True)
# prop_data["heatingorsystemtypeid"].fillna(-1, inplace=True)
# prop_data["lotsizesquarefeet"].fillna(prop_data["lotsizesquarefeet"].median(), inplace=True)
# prop_data.drop(["numberofstories"], axis=1, inplace=True)
# #removing propertycountylandusecode because it is not in interpretable format
# prop_data.drop(["propertycountylandusecode"], axis=1, inplace=True)
# prop_data["regionidcity"].interpolate(inplace=True)
# prop_data["regionidzip"].interpolate(inplace=True)
# prop_data["yearbuilt"].fillna(prop_data["yearbuilt"].mean(), inplace=True)
# #impute structuretaxvaluedollarcnt, taxvaluedollarcnt, landtaxvaluedollarcnt, taxamount by interpolation
# cols_to_interpolate = ["structuretaxvaluedollarcnt", "taxvaluedollarcnt", "landtaxvaluedollarcnt", "taxamount"]
# for c in cols_to_interpolate:
# prop_data[c].interpolate(inplace=True)
# #imputing garagecarcnt on basis of propertylandusetypeid
# #All the residential places have 1 or 2 garagecarcnt, hence using random filling for those values.
# prop_data.loc[(prop_data["propertylandusetypeid"]==261) & (pd.isnull(prop_data["garagecarcnt"])), "garagecarcnt"] = np.random.randint(1,3)
# prop_data.loc[(prop_data["propertylandusetypeid"]==266) & (pd.isnull(prop_data["garagecarcnt"])), "garagecarcnt"] = np.random.randint(1,3)
# prop_data["garagecarcnt"].fillna(-1, inplace=True)
# prop_data["garagecarcnt"].value_counts(dropna=False)
# #imputing garagetotalsqft using the garagecarcnt
# prop_data.loc[(prop_data["garagecarcnt"]==-1) & (pd.isnull(prop_data["garagetotalsqft"]) | (prop_data["garagetotalsqft"] == 0)), "garagetotalsqft"] = -1
# prop_data.loc[(prop_data["garagecarcnt"]==1) & (pd.isnull(prop_data["garagetotalsqft"]) | (prop_data["garagetotalsqft"] == 0)), "garagetotalsqft"] = np.random.randint(180, 400)
# prop_data.loc[(prop_data["garagecarcnt"]==2) & (pd.isnull(prop_data["garagetotalsqft"]) | (prop_data["garagetotalsqft"] == 0)), "garagetotalsqft"] = np.random.randint(400, 720)
# prop_data.loc[(prop_data["garagecarcnt"]==3) & (pd.isnull(prop_data["garagetotalsqft"]) | (prop_data["garagetotalsqft"] == 0)), "garagetotalsqft"] = np.random.randint(720, 880)
# prop_data.loc[(prop_data["garagecarcnt"]==4) & (pd.isnull(prop_data["garagetotalsqft"]) | (prop_data["garagetotalsqft"] == 0)), "garagetotalsqft"] = np.random.randint(880, 1200)
# #interpolate the remaining missing values
# prop_data["garagetotalsqft"].interpolate(inplace=True)
# prop_data["garagetotalsqft"].value_counts(dropna=False)
# #imputing unitcnt using propertylandusetypeid
# prop_data.loc[(prop_data["propertylandusetypeid"]==261) & pd.isnull(prop_data["unitcnt"]), "unitcnt"] = 1
# prop_data.loc[(prop_data["propertylandusetypeid"]==266) & pd.isnull(prop_data["unitcnt"]), "unitcnt"] = 1
# prop_data.loc[(prop_data["propertylandusetypeid"]==269) & pd.isnull(prop_data["unitcnt"]), "unitcnt"] = 1
# prop_data.loc[(prop_data["propertylandusetypeid"]==246) & pd.isnull(prop_data["unitcnt"]), "unitcnt"] = 2
# prop_data.loc[(prop_data["propertylandusetypeid"]==247) & pd.isnull(prop_data["unitcnt"]), "unitcnt"] = 3
# prop_data.loc[(prop_data["propertylandusetypeid"]==248) & pd.isnull(prop_data["unitcnt"]), "unitcnt"] = 4
# prop_data["unitcnt"].value_counts(dropna=False) | _____no_output_____ | Apache-2.0 | HW3/HW3.ipynb | jay-z007/Data-Science-Fundamentals |
Distance MetricWe will be using weighted Manhattan distance as a distance metric | dist_imp_matrix = pd.read_csv("./dist_metric.csv", index_col=0)
dist_imp_matrix
import fractions
for col in dist_imp_matrix.columns.values:
temp_list = dist_imp_matrix[col].tolist()
dist_imp_matrix[col] = [float(fractions.Fraction(x)) for x in temp_list]
# dist_imp_matrix
for col in dist_imp_matrix.columns.values:
dist_imp_matrix[col] /= dist_imp_matrix[col].sum()
dist_imp_matrix["row_sum"] = dist_imp_matrix.sum(axis=1)
dist_imp_matrix["score"] = dist_imp_matrix["row_sum"]/dist_imp_matrix.shape[0]
dist_imp_matrix.to_csv("final_score_matrix_Q2.csv") | _____no_output_____ | Apache-2.0 | HW3/HW3.ipynb | jay-z007/Data-Science-Fundamentals |
A Crypto-Arithmetic Puzzle In this exercise we will solve the crypto-arithmetic puzzle shown in the picture below: The idea is that the letters "$\texttt{S}$", "$\texttt{E}$", "$\texttt{N}$", "$\texttt{D}$", "$\texttt{M}$", "$\texttt{O}$", "$\texttt{R}$", "$\texttt{Y}$" occurring in this puzzleare interpreted as variables ranging over the set of decimal digits, i.e. these variables can take values inthe set $\{0,1,2,3,4,5,6,7,8,9\}$. Then, the string "$\texttt{SEND}$" is interpreted as a decimal number,i.e. it is interpreted as the number$$\texttt{S} \cdot 10^3 + \texttt{E} \cdot 10^2 + \texttt{N} \cdot 10^1 + \texttt{D} \cdot 10^0.$$The strings "$\texttt{MORE}$ and "$\texttt{MONEY}$" are interpreted similarly. To make the probleminteresting, the assumption is that different variables have different values. Furthermore, thedigits at the beginning of a number should be different from $0$. Then, we have to find values for the variables"$\texttt{S}$", "$\texttt{E}$", "$\texttt{N}$", "$\texttt{D}$", "$\texttt{M}$", "$\texttt{O}$", "$\texttt{R}$", "$\texttt{Y}$" such that the formula$$ (\texttt{S} \cdot 10^3 + \texttt{E} \cdot 10^2 + \texttt{N} \cdot 10 + \texttt{D}) + (\texttt{M} \cdot 10^3 + \texttt{O} \cdot 10^2 + \texttt{R} \cdot 10 + \texttt{E}) = \texttt{M} \cdot 10^4 + \texttt{O} \cdot 10^3 + \texttt{N} \cdot 10^2 + \texttt{E} \cdot 10 + \texttt{Y}$$is true. The problem with this constraint is that it involves far too many variables. As this constraint can only bechecked when all the variables have values assigned to them, the backtracking search would essentiallyboil down to a mere brute force search. We would have 8 variables and hence we would have to test $8^{10}$possible assignments. In order to do better, we have to perform the addition in the figure shown abovecolumn by column, just as it is taught in elementary school. To be able to do this, we have to introduce carry digits "$\texttt{C1}$", "$\texttt{C2}$", "$\texttt{C3}$" where $\texttt{C1}$ is the carry produced by adding $\texttt{D}$ and $\texttt{E}$, $\texttt{C2}$ is the carry produced by adding $\texttt{N}$, $\texttt{R}$ and $\texttt{C1}$, and $\texttt{C3}$ is the carry produced by adding $\texttt{E}$, $\texttt{O}$ and $\texttt{C2}$. | import cspSolver | _____no_output_____ | MIT | Python/Exercises/Blatt-13.ipynb | BuserLukas/Logic |
For a set $V$ of variables, the function $\texttt{allDifferent}(V)$ generates a set of formulas that express that all the variables of $V$ are different. | def allDifferent(Variables):
return { f'{x} != {y}' for x in Variables
for y in Variables
if x < y
}
allDifferent({ 'a', 'b', 'c' }) | _____no_output_____ | MIT | Python/Exercises/Blatt-13.ipynb | BuserLukas/Logic |
Pause bis 14:23 | def createCSP():
Variables = "your code here"
Values = "your code here"
Constraints = "much more code here"
return [Variables, Values, Constraints];
puzzle = createCSP()
puzzle
%%time
solution = cspSolver.solve(puzzle)
print(f'Time needed: {round((stop-start) * 1000)} milliseconds.')
solution
def printSolution(A):
if A == None:
print("no solution found")
return
for v in { "S", "E", "N", "D", "M", "O", "R", "Y" }:
print(f"{v} = {A[v]}")
print("\nThe solution of\n")
print(" S E N D")
print(" + M O R E")
print(" ---------")
print(" M O N E Y")
print("\nis as follows\n")
print(f" {A['S']} {A['E']} {A['N']} {A['D']}")
print(f" + {A['M']} {A['O']} {A['R']} {A['E']}")
print(f" ==========")
print(f" {A['M']} {A['O']} {A['N']} {A['E']} {A['Y']}")
printSolution(solution) | _____no_output_____ | MIT | Python/Exercises/Blatt-13.ipynb | BuserLukas/Logic |
Solution based on Multiple Models | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all" | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
Tokenize and Numerize - Make it ready | training_size = 20000
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
vocab_size = 1000
max_length = 120
embedding_dim = 16
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences,
maxlen=max_length,
padding=padding_type,
truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences,
maxlen=max_length,
padding=padding_type,
truncating=trunc_type) | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
Plot | def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss") | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
Function to train and show | def fit_model_and_show_results (model, reviews):
model.summary()
history = model.fit(training_padded,
training_labels_final,
epochs=num_epochs,
validation_data=(validation_padded, validation_labels_final))
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
predict_review(model, reviews) | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
ANN Embedding | model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
num_epochs = 20
history = model.fit(training_padded, training_labels_final, epochs=num_epochs,
validation_data=(validation_padded, validation_labels_final))
plot_graphs(history, "accuracy")
plot_graphs(history, "loss") | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
CNN | num_epochs = 30
model_cnn = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Conv1D(16, 5, activation='relu'),
tf.keras.layers.GlobalMaxPooling1D(),
tf.keras.layers.Dense(1, activation='sigmoid')
])
# Default learning rate for the Adam optimizer is 0.001
# Let's slow down the learning rate by 10.
learning_rate = 0.0001
model_cnn.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate),
metrics=['accuracy'])
fit_model_and_show_results(model_cnn, new_reviews) | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
GRU | num_epochs = 30
model_gru = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.GRU(32)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
learning_rate = 0.00003 # slower than the default learning rate
model_gru.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate),
metrics=['accuracy'])
fit_model_and_show_results(model_gru, new_reviews) | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
Bidirectional LSTM | num_epochs = 30
model_bidi_lstm = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
learning_rate = 0.00003
model_bidi_lstm.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate),
metrics=['accuracy'])
fit_model_and_show_results(model_bidi_lstm, new_reviews) | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
Multiple bidirectional LSTMs | num_epochs = 30
model_multiple_bidi_lstm = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim,
return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(embedding_dim)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
learning_rate = 0.0003
model_multiple_bidi_lstm.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(learning_rate),
metrics=['accuracy'])
fit_model_and_show_results(model_multiple_bidi_lstm, new_reviews) | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
Prediction Define a function to prepare the new reviews for use with a modeland then use the model to predict the sentiment of the new reviews | def predict_review(model, reviews):
# Create the sequences
padding_type='post'
sample_sequences = tokenizer.texts_to_sequences(reviews)
reviews_padded = pad_sequences(sample_sequences,
padding=padding_type,
maxlen=max_length)
classes = model.predict(reviews_padded)
for x in range(len(reviews_padded)):
print(reviews[x])
print(classes[x])
print('\n') | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
How to use examples more_reviews = [review1, review2, review3, review4, review5, review6, review7, review8, review9, review10]predict_review(model, new_reviews) | print("============================\n","Embeddings only:\n", "============================")
predict_review(model, more_reviews)
print("============================\n","With CNN\n", "============================")
predict_review(model_cnn, more_reviews)
print("===========================\n","With bidirectional GRU\n", "============================")
predict_review(model_gru, more_reviews)
print("===========================\n", "With a single bidirectional LSTM:\n", "===========================")
predict_review(model_bidi_lstm, more_reviews)
print("===========================\n", "With multiple bidirectional LSTM:\n", "==========================")
predict_review(model_multiple_bidi_lstm, more_reviews) | _____no_output_____ | MIT | 3. NLP/AZ/Text Classification/Models_Template.ipynb | AmirRazaMBA/TensorFlow-Certification |
1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play. | !pip install git+https://github.com/google/starthinker
| _____no_output_____ | Apache-2.0 | colabs/dv360_data_warehouse.ipynb | arbrown/starthinker |
2. Get Cloud Project IDTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play. | CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
| _____no_output_____ | Apache-2.0 | colabs/dv360_data_warehouse.ipynb | arbrown/starthinker |
3. Get Client CredentialsTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play. | CLIENT_CREDENTIALS = 'PASTE CLIENT CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
| _____no_output_____ | Apache-2.0 | colabs/dv360_data_warehouse.ipynb | arbrown/starthinker |
4. Enter DV360 Data Warehouse ParametersDeploy a BigQuery dataset mirroring DV360 account structure. Foundation for solutions on top. 1. Wait for BigQuery->->->* to be created. 1. Every table mimics the DV360 API Endpoints.Modify the values below for your use case, can be done multiple times, then click play. | FIELDS = {
'auth_bigquery': 'service', # Credentials used for writing data.
'auth_dv': 'service', # Credentials used for reading data.
'auth_cm': 'service', # Credentials used for reading data.
'recipe_slug': '', # Name of Google BigQuery dataset to create.
'partners': [], # List of account ids to pull.
}
print("Parameters Set To: %s" % FIELDS)
| _____no_output_____ | Apache-2.0 | colabs/dv360_data_warehouse.ipynb | arbrown/starthinker |
5. Execute DV360 Data WarehouseThis does NOT need to be modified unless you are changing the recipe, click play. | from starthinker.util.configuration import Configuration
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'dataset': {
'description': 'Create a dataset for bigquery tables.',
'hour': [
4
],
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','description': 'Place where tables will be created in BigQuery.'}}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'partners.get',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','description': 'Place where tables will be created in BigQuery.'}},
'legacy': False,
'query': 'SELECT CAST(partnerId AS STRING) partnerId FROM (SELECT DISTINCT * FROM UNNEST({partners}) AS partnerId)',
'parameters': {
'partners': {'field': {'name': 'partners','kind': 'integer_list','order': 4,'default': [],'description': 'List of account ids to pull.'}}
}
}
},
'iterate': False,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_Partners'
}
}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'advertisers.list',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 0,'default': '','description': 'Google BigQuery dataset to create tables in.'}},
'query': 'SELECT DISTINCT CAST(partnerId AS STRING) partnerId FROM `DV360_Partners`',
'legacy': False
}
},
'iterate': True,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_Advertisers'
}
}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'advertisers.insertionOrders.list',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 0,'default': '','description': 'Google BigQuery dataset to create tables in.'}},
'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`',
'legacy': False
}
},
'iterate': True,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_InsertionOrders'
}
}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'advertisers.lineItems.list',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 0,'default': '','description': 'Google BigQuery dataset to create tables in.'}},
'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`',
'legacy': False
}
},
'iterate': True,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_LineItems'
}
}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'advertisers.campaigns.list',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 0,'default': '','description': 'Google BigQuery dataset to create tables in.'}},
'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`',
'legacy': False
}
},
'iterate': True,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_Campaigns'
}
}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'advertisers.channels.list',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 0,'default': '','description': 'Google BigQuery dataset to create tables in.'}},
'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`',
'legacy': False
}
},
'iterate': True,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_Channels'
}
}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'advertisers.creatives.list',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 0,'default': '','description': 'Google BigQuery dataset to create tables in.'}},
'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`',
'legacy': False
}
},
'iterate': True,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_Creatives'
}
}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'inventorySources.list',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 0,'default': '','description': 'Google BigQuery dataset to create tables in.'}},
'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`',
'legacy': False
}
},
'iterate': True,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_Inventory_Sources'
}
}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'googleAudiences.list',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 0,'default': '','description': 'Google BigQuery dataset to create tables in.'}},
'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`',
'legacy': False
}
},
'iterate': True,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_Google_Audiences'
}
}
}
},
{
'google_api': {
'auth': 'user',
'api': 'displayvideo',
'version': 'v1',
'function': 'combinedAudiences.list',
'kwargs_remote': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 0,'default': '','description': 'Google BigQuery dataset to create tables in.'}},
'query': 'SELECT DISTINCT CAST(advertiserId AS STRING) AS advertiserId FROM `DV360_Advertisers`',
'legacy': False
}
},
'iterate': True,
'results': {
'bigquery': {
'auth': 'user',
'dataset': {'field': {'name': 'recipe_slug','kind': 'string','order': 4,'default': '','description': 'Name of Google BigQuery dataset to create.'}},
'table': 'DV360_Combined_Audiences'
}
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(Configuration(project=CLOUD_PROJECT, client=CLIENT_CREDENTIALS, user=USER_CREDENTIALS, verbose=True), TASKS, force=True)
| _____no_output_____ | Apache-2.0 | colabs/dv360_data_warehouse.ipynb | arbrown/starthinker |
Tutorial on Python for scientific computingMarcos Duarte This tutorial is a short introduction to programming and a demonstration of the basic features of Python for scientific computing. To use Python for scientific computing we need the Python program itself with its main modules and specific packages for scientific computing. [See this notebook on how to install Python for scientific computing](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/PythonInstallation.ipynb). Once you get Python and the necessary packages for scientific computing ready to work, there are different ways to run Python, the main ones are:- open a terminal window in your computer and type `python` or `ipython` that the Python interpreter will start- run the IPython notebook and start working with Python in a browser- run Spyder, an interactive development environment (IDE)- run the IPython qtconsole, a more featured terminal- run IPython completely in the cloud with for example, [https://cloud.sagemath.com](https://cloud.sagemath.com) or [https://www.wakari.io](https://www.wakari.io)- run Python online in a website such as [https://www.pythonanywhere.com/](https://www.pythonanywhere.com/)- run Python using any other Python editor or IDE We will use the IPython Notebook for this tutorial but you can run almost all the things we will see here using the other forms listed above. Python as a calculatorOnce in the IPython notebook, if you type a simple mathematical expression and press `Shift+Enter` it will give the result of the expression: | 1 + 2 - 30
4/5 | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
If you are using Python version 2.x instead of Python 3.x, you should have got 0 as the result of 4 divided by 5, which is wrong! The problem is that for Python versions up to 2.x, the operator '/' performs division with integers and the result will also be an integer (this behavior was changed in version 3.x). If you want the normal behavior for division, in Python 2.x you have two options: tell Python that at least one of the numbers is not an integer or import the new division operator (which is inoffensive if you are already using Python 3), let's see these two options: | 4/5.
from __future__ import division
4/5 | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
I prefer to use the import division option (from future!); if we put this statement in the beginning of a file or IPython notebook, it will work for all subsequent commands.Another command that changed its behavior from Python 2.x to 3.x is the `print` command. In Python 2.x, the print command could be used as a statement: | print 4/5 | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
With Python 3.x, the print command bahaves as a true function and has to be called with parentheses. Let's also import this future command to Python 2.x and use it from now on: | from __future__ import print_function
print(4/5) | 0.8
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
With the `print` function, let's explore the mathematical operations available in Python: | print('1+2 = ', 1+2, '\n', '4*5 = ', 4*5, '\n', '6/7 = ', 6/7, '\n', '8**2 = ', 8**2, sep='') | 1+2 = 3
4*5 = 20
6/7 = 0.8571428571428571
8**2 = 64
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
And if we want the square-root of a number: | sqrt(9) | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
We get an error message saying that the `sqrt` function if not defined. This is because `sqrt` and other mathematical functions are available with the `math` module: | import math
math.sqrt(9)
from math import sqrt
sqrt(9) | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
The import functionWe used the command '`import`' to be able to call certain functions. In Python functions are organized in modules and packages and they have to be imported in order to be used. A module is a file containing Python definitions (e.g., functions) and statements. Packages are a way of structuring Python’s module namespace by using “dotted module names”. For example, the module name A.B designates a submodule named B in a package named A. To be used, modules and packages have to be imported in Python with the import function. Namespace is a container for a set of identifiers (names), and allows the disambiguation of homonym identifiers residing in different namespaces. For example, with the command import math, we will have all the functions and statements defined in this module in the namespace '`math.`', for example, '`math.pi`' is the π constant and '`math.cos()`', the cosine function. By the way, to know which Python version you are running, we can use one of the following modules: | import sys
sys.version | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
And if you are in an IPython session: | from IPython import sys_info
print(sys_info()) | {'commit_hash': '681fd77',
'commit_source': 'installation',
'default_encoding': 'cp1252',
'ipython_path': 'C:\\Anaconda3\\lib\\site-packages\\IPython',
'ipython_version': '2.1.0',
'os_name': 'nt',
'platform': 'Windows-7-6.1.7601-SP1',
'sys_executable': 'C:\\Anaconda3\\python.exe',
'sys_platform': 'win32',
'sys_version': '3.4.1 |Anaconda 2.0.1 (64-bit)| (default, May 19 2014, '
'13:02:30) [MSC v.1600 64 bit (AMD64)]'}
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
The first option gives information about the Python version; the latter also includes the IPython version, operating system, etc. Object-oriented programmingPython is designed as an object-oriented programming (OOP) language. OOP is a paradigm that represents concepts as "objects" that have data fields (attributes that describe the object) and associated procedures known as methods.This means that all elements in Python are objects and they have attributes which can be acessed with the dot (.) operator after the name of the object. We already experimented with that when we imported the module `sys`, it became an object, and we acessed one of its attribute: `sys.version`.OOP as a paradigm is much more than defining objects, attributes, and methods, but for now this is enough to get going with Python. Python and IPython helpTo get help about any Python command, use `help()`: | help(math.degrees) | Help on built-in function degrees in module math:
degrees(...)
degrees(x)
Convert angle x from radians to degrees.
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Or if you are in the IPython environment, simply add '?' to the function that a window will open at the bottom of your browser with the same help content: | math.degrees? | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
And if you add a second '?' to the statement you get access to the original script file of the function (an advantage of an open source language), unless that function is a built-in function that does not have a script file, which is the case of the standard modules in Python (but you can access the Python source code if you want; it just does not come with the standard program for installation).So, let's see this feature with another function: | import scipy.fftpack
scipy.fftpack.fft?? | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
To know all the attributes of an object, for example all the functions available in `math`, we can use the function `dir`: | print(dir(math)) | ['__doc__', '__loader__', '__name__', '__package__', '__spec__', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp', 'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum', 'gamma', 'hypot', 'isfinite', 'isinf', 'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2', 'modf', 'pi', 'pow', 'radians', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc']
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Tab completion in IPythonIPython has tab completion: start typing the name of the command (object) and press `tab` to see the names of objects available with these initials letters. When the name of the object is typed followed by a dot (`math.`), pressing `tab` will show all available attribites, scroll down to the desired attribute and press `Enter` to select it. The four most helpful commands in IPythonThese are the most helpful commands in IPython (from [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html)): - `?` : Introduction and overview of IPython’s features. - `%quickref` : Quick reference. - `help` : Python’s own help system. - `object?` : Details about ‘object’, use ‘object??’ for extra details. [See these IPython Notebooks for more on IPython and the Notebook capabilities](http://nbviewer.ipython.org/github/ipython/ipython/tree/master/examples/Notebook/). CommentsComments in Python start with the hash character, , and extend to the end of the physical line: | # Import the math library to access more math stuff
import math
math.pi # this is the pi constant; a useless comment since this is obvious | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
To insert comments spanning more than one line, use a multi-line string with a pair of matching triple-quotes: `"""` or `'''` (we will see the string data type later). A typical use of a multi-line comment is as documentation strings and are meant for anyone reading the code: | """Documentation strings are typically written like that.
A docstring is a string literal that occurs as the first statement
in a module, function, class, or method definition.
""" | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
A docstring like above is useless and its output as a standalone statement looks uggly in IPython Notebook, but you will see its real importance when reading and writting codes.Commenting a programming code is an important step to make the code more readable, which Python cares a lot. There is a style guide for writting Python code ([PEP 8](http://www.python.org/dev/peps/pep-0008/)) with a session about [how to write comments](http://www.python.org/dev/peps/pep-0008/comments). Magic functionsIPython has a set of predefined ‘magic functions’ that you can call with a command line style syntax. There are two kinds of magics, line-oriented and cell-oriented. Line magics are prefixed with the % character and work much like OS command-line calls: they get as an argument the rest of the line, where arguments are passed without parentheses or quotes. Cell magics are prefixed with a double %%, and they are functions that get as an argument not only the rest of the line, but also the lines below it in a separate argument. Assignment and expressionsThe equal sign ('=') is used to assign a value to a variable. Afterwards, no result is displayed before the next interactive prompt: | x = 1 | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Spaces between the statements are optional but it helps for readability.To see the value of the variable, call it again or use the print function: | x
print(x) | 1
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Of course, the last assignment is that holds: | x = 2
x = 3
x | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
In mathematics '=' is the symbol for identity, but in computer programming '=' is used for assignment, it means that the right part of the expresssion is assigned to its left part. For example, 'x=x+1' does not make sense in mathematics but it does in computer programming: | x = 1
print(x)
x = x + 1
print(x) | 1
2
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
A value can be assigned to several variables simultaneously: | x = y = 4
print(x)
print(y) | 4
4
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Several values can be assigned to several variables at once: | x, y = 5, 6
print(x)
print(y) | 5
6
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
And with that, you can do (!): | x, y = y, x
print(x)
print(y) | 6
5
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Variables must be “defined” (assigned a value) before they can be used, or an error will occur: | x = z | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Variables and typesThere are different types of built-in objects in Python (and remember that everything in Python is an object): | import types
print(dir(types)) | ['BuiltinFunctionType', 'BuiltinMethodType', 'CodeType', 'DynamicClassAttribute', 'FrameType', 'FunctionType', 'GeneratorType', 'GetSetDescriptorType', 'LambdaType', 'MappingProxyType', 'MemberDescriptorType', 'MethodType', 'ModuleType', 'SimpleNamespace', 'TracebackType', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', '_calculate_meta', 'new_class', 'prepare_class']
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Let's see some of them now. Numbers: int, float, complexNumbers can an integer (int), float, and complex (with imaginary part). Let's use the function `type` to show the type of number (and later for any other object): | type(6) | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
A float is a non-integer number: | math.pi
type(math.pi) | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Python (IPython) is showing `math.pi` with only 15 decimal cases, but internally a float is represented with higher precision. Floating point numbers in Python are implemented using a double (eight bytes) word; the precison and internal representation of floating point numbers are machine specific and are available in: | sys.float_info | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Be aware that floating-point numbers can be trick in computers: | 0.1 + 0.2
0.1 + 0.2 - 0.3 | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
These results are not correct (and the problem is not due to Python). The error arises from the fact that floating-point numbers are represented in computer hardware as base 2 (binary) fractions and most decimal fractions cannot be represented exactly as binary fractions. As consequence, decimal floating-point numbers are only approximated by the binary floating-point numbers actually stored in the machine. [See here for more on this issue](http://docs.python.org/2/tutorial/floatingpoint.html). A complex number has real and imaginary parts: | 1+2j
print(type(1+2j)) | <class 'complex'>
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Each part of a complex number is represented as a floating-point number. We can see them using the attributes `.real` and `.imag`: | print((1+2j).real)
print((1+2j).imag) | 1.0
2.0
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
StringsStrings can be enclosed in single quotes or double quotes: | s = 'string (str) is a built-in type in Python'
s
type(s) | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
String enclosed with single and double quotes are equal, but it may be easier to use one instead of the other: | 'string (str) is a Python's built-in type'
"string (str) is a Python's built-in type" | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
But you could have done that using the Python escape character '\': | 'string (str) is a Python\'s built-in type' | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Strings can be concatenated (glued together) with the + operator, and repeated with *: | s = 'P' + 'y' + 't' + 'h' + 'o' + 'n'
print(s)
print(s*5) | Python
PythonPythonPythonPythonPython
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Strings can be subscripted (indexed); like in C, the first character of a string has subscript (index) 0: | print('s[0] = ', s[0], ' (s[index], start at 0)')
print('s[5] = ', s[5])
print('s[-1] = ', s[-1], ' (last element)')
print('s[:] = ', s[:], ' (all elements)')
print('s[1:] = ', s[1:], ' (from this index (inclusive) till the last (inclusive))')
print('s[2:4] = ', s[2:4], ' (from first index (inclusive) till second index (exclusive))')
print('s[:2] = ', s[:2], ' (till this index, exclusive)')
print('s[:10] = ', s[:10], ' (Python handles the index if it is larger than the string length)')
print('s[-10:] = ', s[-10:])
print('s[0:5:2] = ', s[0:5:2], ' (s[ini:end:step])')
print('s[::2] = ', s[::2], ' (s[::step], initial and final indexes can be omitted)')
print('s[0:5:-1] = ', s[::-1], ' (s[::-step] reverses the string)')
print('s[:2] + s[2:] = ', s[:2] + s[2:], ' (because of Python indexing, this sounds natural)') | s[0] = P (s[index], start at 0)
s[5] = n
s[-1] = n (last element)
s[:] = Python (all elements)
s[1:] = ython (from this index (inclusive) till the last (inclusive))
s[2:4] = th (from first index (inclusive) till second index (exclusive))
s[:2] = Py (till this index, exclusive)
s[:10] = Python (Python handles the index if it is larger than the string length)
s[-10:] = Python
s[0:5:2] = Pto (s[ini:end:step])
s[::2] = Pto (s[::step], initial and final indexes can be omitted)
s[0:5:-1] = nohtyP (s[::-step] reverses the string)
s[:2] + s[2:] = Python (because of Python indexing, this sounds natural)
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
len()Python has a built-in functon to get the number of itens of a sequence: | help(len)
s = 'Python'
len(s) | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
The function len() helps to understand how the backward indexing works in Python. The index s[-i] should be understood as s[len(s) - i] rather than accessing directly the i-th element from back to front. This is why the last element of a string is s[-1]: | print('s = ', s)
print('len(s) = ', len(s))
print('len(s)-1 = ',len(s) - 1)
print('s[-1] = ', s[-1])
print('s[len(s) - 1] = ', s[len(s) - 1]) | s = Python
len(s) = 6
len(s)-1 = 5
s[-1] = n
s[len(s) - 1] = n
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Or, strings can be surrounded in a pair of matching triple-quotes: """ or '''. End of lines do not need to be escaped when using triple-quotes, but they will be included in the string. This is how we created a multi-line comment earlier: | """Strings can be surrounded in a pair of matching triple-quotes: \""" or '''.
End of lines do not need to be escaped when using triple-quotes,
but they will be included in the string.
""" | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
ListsValues can be grouped together using different types, one of them is list, which can be written as a list of comma-separated values between square brackets. List items need not all have the same type: | x = ['spam', 'eggs', 100, 1234]
x | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Lists can be indexed and the same indexing rules we saw for strings are applied: | x[0] | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
The function len() works for lists: | len(x) | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
TuplesA tuple consists of a number of values separated by commas, for instance: | t = ('spam', 'eggs', 100, 1234)
t | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
The type tuple is why multiple assignments in a single line works; elements separated by commas (with or without surrounding parentheses) are a tuple and in an expression with an '=', the right-side tuple is attributed to the left-side tuple: | a, b = 1, 2
print('a = ', a, '\nb = ', b) | a = 1
b = 2
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Is the same as: | (a, b) = (1, 2)
print('a = ', a, '\nb = ', b) | a = 1
b = 2
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
SetsPython also includes a data type for sets. A set is an unordered collection with no duplicate elements. | basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
fruit = set(basket) # create a set without duplicates
fruit | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
As set is an unordered collection, it can not be indexed as lists and tuples. | set(['orange', 'pear', 'apple', 'banana'])
'orange' in fruit # fast membership testing | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
DictionariesDictionary is a collection of elements organized keys and values. Unlike lists and tuples, which are indexed by a range of numbers, dictionaries are indexed by their keys: | tel = {'jack': 4098, 'sape': 4139}
tel
tel['guido'] = 4127
tel
tel['jack']
del tel['sape']
tel['irv'] = 4127
tel
tel.keys()
'guido' in tel | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
The dict() constructor builds dictionaries directly from sequences of key-value pairs: | tel = dict([('sape', 4139), ('guido', 4127), ('jack', 4098)])
tel | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Built-in Constants- **False** : false value of the bool type- **True** : true value of the bool type- **None** : sole value of types.NoneType. None is frequently used to represent the absence of a value. In computer science, the Boolean or logical data type is composed by two values, true and false, intended to represent the values of logic and Boolean algebra. In Python, 1 and 0 can also be used in most situations as equivalent to the Boolean values. Logical (Boolean) operators and, or, not - **and** : logical AND operator. If both the operands are true then condition becomes true. (a and b) is true.- **or** : logical OR Operator. If any of the two operands are non zero then condition becomes true. (a or b) is true.- **not** : logical NOT Operator. Reverses the logical state of its operand. If a condition is true then logical NOT operator will make false. ComparisonsThe following comparison operations are supported by objects in Python:- **==** : equal- **!=** : not equal- **<** : strictly less than- **<=** : less than or equal- **\>** : strictly greater than- **\>=** : greater than or equal- **is** : object identity- **is not** : negated object identity | True == False
not True == False
1 < 2 > 1
True != (False or True)
True != False or True | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Indentation and whitespaceIn Python, statement grouping is done by indentation (this is mandatory), which are done by inserting whitespaces, not tabs. Indentation is also recommended for alignment of function calling that span more than one line for better clarity. We will see examples of indentation in the next session. Control of flow `if`...`elif`...`else`Conditional statements (to peform something if another thing is True or False) can be implemmented using the `if` statement:```if expression: statementelif: statement else: statement````elif` (one or more) and `else` are optionals. The indentation is obligatory. For example: | if True:
pass | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Which does nothing useful. Let's use the `if`...`elif`...`else` statements to categorize the [body mass index](http://en.wikipedia.org/wiki/Body_mass_index) of a person: | # body mass index
weight = 100 # kg
height = 1.70 # m
bmi = weight / height**2
if bmi < 15:
c = 'very severely underweight'
elif 15 <= bmi < 16:
c = 'severely underweight'
elif 16 <= bmi < 18.5:
c = 'underweight'
elif 18.5 <= bmi < 25:
c = 'normal'
elif 25 <= bmi < 30:
c = 'overweight'
elif 30 <= bmi < 35:
c = 'moderately obese'
elif 35 <= bmi < 40:
c = 'severely obese'
else:
c = 'very severely obese'
print('For a weight of {0:.1f} kg and a height of {1:.2f} m,\n\
the body mass index (bmi) is {2:.1f} kg/m2,\nwhich is considered {3:s}.'\
.format(weight, height, bmi, c)) | For a weight of 100.0 kg and a height of 1.70 m,
the body mass index (bmi) is 34.6 kg/m2,
which is considered moderately obese.
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
forThe `for` statement iterates over a sequence to perform operations (a loop event).```for iterating_var in sequence: statements``` | for i in [3, 2, 1, 'go!']:
print(i),
for letter in 'Python':
print(letter), | P
y
t
h
o
n
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
The `range()` functionThe built-in function range() is useful if we need to create a sequence of numbers, for example, to iterate over this list. It generates lists containing arithmetic progressions: | help(range)
range(10)
range(1, 10, 2)
for i in range(10):
n2 = i**2
print(n2), | 0
1
4
9
16
25
36
49
64
81
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
whileThe `while` statement is used for repeating sections of code in a loop until a condition is met (this different than the `for` statement which executes n times):```while expression: statement```Let's generate the Fibonacci series using a `while` loop: | # Fibonacci series: the sum of two elements defines the next
a, b = 0, 1
while b < 1000:
print(b, end=' ')
a, b = b, a+b | 1 1 2 3 5 8 13 21 34 55 89 144 233 377 610 987 | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Function definitionA function in a programming language is a piece of code that performs a specific task. Functions are used to reduce duplication of code making easier to reuse it and to decompose complex problems into simpler parts. The use of functions contribute to the clarity of the code.A function is created with the `def` keyword and the statements in the block of the function must be indented: | def function():
pass | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
As per construction, this function does nothing when called: | function() | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
The general syntax of a function definition is:```def function_name( parameters ): """Function docstring. The help for the function """ function body return variables```A more useful function: | def fibo(N):
"""Fibonacci series: the sum of two elements defines the next.
The series is calculated till the input parameter N and
returned as an ouput variable.
"""
a, b, c = 0, 1, []
while b < N:
c.append(b)
a, b = b, a + b
return c
fibo(100)
if 3 > 2:
print('teste') | teste
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Let's implemment the body mass index calculus and categorization as a function: | def bmi(weight, height):
"""Body mass index calculus and categorization.
Enter the weight in kg and the height in m.
See http://en.wikipedia.org/wiki/Body_mass_index
"""
bmi = weight / height**2
if bmi < 15:
c = 'very severely underweight'
elif 15 <= bmi < 16:
c = 'severely underweight'
elif 16 <= bmi < 18.5:
c = 'underweight'
elif 18.5 <= bmi < 25:
c = 'normal'
elif 25 <= bmi < 30:
c = 'overweight'
elif 30 <= bmi < 35:
c = 'moderately obese'
elif 35 <= bmi < 40:
c = 'severely obese'
else:
c = 'very severely obese'
s = 'For a weight of {0:.1f} kg and a height of {1:.2f} m,\
the body mass index (bmi) is {2:.1f} kg/m2,\
which is considered {3:s}.'\
.format(weight, height, bmi, c)
print(s)
bmi(73, 1.70) | For a weight of 73.0 kg and a height of 1.70 m, the body mass index (bmi) is 25.3 kg/m2, which is considered overweight.
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Numeric data manipulation with NumpyNumpy is the fundamental package for scientific computing in Python and has a N-dimensional array package convenient to work with numerical data. With Numpy it's much easier and faster to work with numbers grouped as 1-D arrays (a vector), 2-D arrays (like a table or matrix), or higher dimensions. Let's create 1-D and 2-D arrays in Numpy: | import numpy as np
x1d = np.array([1, 2, 3, 4, 5, 6])
print(type(x1d))
x1d
x2d = np.array([[1, 2, 3], [4, 5, 6]])
x2d | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
len() and the Numpy functions size() and shape() give information aboout the number of elements and the structure of the Numpy array: | print('1-d array:')
print(x1d)
print('len(x1d) = ', len(x1d))
print('np.size(x1d) = ', np.size(x1d))
print('np.shape(x1d) = ', np.shape(x1d))
print('np.ndim(x1d) = ', np.ndim(x1d))
print('\n2-d array:')
print(x2d)
print('len(x2d) = ', len(x2d))
print('np.size(x2d) = ', np.size(x2d))
print('np.shape(x2d) = ', np.shape(x2d))
print('np.ndim(x2d) = ', np.ndim(x2d)) | 1-d array:
[1 2 3 4 5 6]
len(x1d) = 6
np.size(x1d) = 6
np.shape(x1d) = (6,)
np.ndim(x1d) = 1
2-d array:
[[1 2 3]
[4 5 6]]
len(x2d) = 2
np.size(x2d) = 6
np.shape(x2d) = (2, 3)
np.ndim(x2d) = 2
| MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Create random data | x = np.random.randn(4,3)
x | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Joining (stacking together) arrays | x = np.random.randint(0, 5, size=(2, 3))
print(x)
y = np.random.randint(5, 10, size=(2, 3))
print(y)
np.vstack((x,y))
np.hstack((x,y)) | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Create equally spaced data | np.arange(start = 1, stop = 10, step = 2)
np.linspace(start = 0, stop = 1, num = 11) | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
InterpolationConsider the following data: | y = [5, 4, 10, 8, 1, 10, 2, 7, 1, 3] | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Suppose we want to create data in between the given data points (interpolation); for instance, let's try to double the resolution of the data by generating twice as many data: | t = np.linspace(0, len(y), len(y)) # time vector for the original data
tn = np.linspace(0, len(y), 2 * len(y)) # new time vector for the new time-normalized data
yn = np.interp(tn, t, y) # new time-normalized data
yn | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
The key is the Numpy `interp` function, from its help: interp(x, xp, fp, left=None, right=None) One-dimensional linear interpolation. Returns the one-dimensional piecewise linear interpolant to a function with given values at discrete data-points.A plot of the data will show what we have done: | %matplotlib inline
import matplotlib.pyplot as plt
plt.figure(figsize=(10,5))
plt.plot(t, y, 'bo-', lw=2, label='original data')
plt.plot(tn, yn, '.-', color=[1, 0, 0, .5], lw=2, label='interpolated')
plt.legend(loc='best', framealpha=.5)
plt.show() | _____no_output_____ | MIT | notebooks/PythonTutorial.ipynb | jagar2/BMC |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.