repo
stringlengths 2
99
| file
stringlengths 14
239
| code
stringlengths 20
3.99M
| file_length
int64 20
3.99M
| avg_line_length
float64 9.73
128
| max_line_length
int64 11
86.4k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
TiKick | TiKick-main/setup.py |
import os
from setuptools import setup, find_packages
import setuptools
def get_version() -> str:
# https://packaging.python.org/guides/single-sourcing-package-version/
init = open(os.path.join("tmarl", "__init__.py"), "r").read().split()
return init[init.index("__version__") + 2][1:-1]
setup(
name="tmarl", # Replace with your own username
version=get_version(),
description="marl algorithms",
long_description=open("README.md", encoding="utf8").read(),
long_description_content_type="text/markdown",
author="tmarl",
author_email="tmarl_contact@tartrl.cn",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
],
keywords="multi-agent reinforcement learning algorithms pytorch",
python_requires='>=3.6',
)
| 1,788 | 35.510204 | 74 | py |
TiKick | TiKick-main/tmarl/networks/policy_network.py |
import torch
import torch.nn as nn
from tmarl.networks.utils.util import init, check
from tmarl.networks.utils.mlp import MLPBase, MLPLayer
from tmarl.networks.utils.rnn import RNNLayer
from tmarl.networks.utils.act import ACTLayer
from tmarl.networks.utils.popart import PopArt
from tmarl.utils.util import get_shape_from_obs_space
# networks are defined here
class PolicyNetwork(nn.Module):
def __init__(self, args, obs_space, action_space, device=torch.device("cpu")):
super(PolicyNetwork, self).__init__()
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_policy_active_masks = args.use_policy_active_masks
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._use_influence_policy = args.use_influence_policy
self._influence_layer_N = args.influence_layer_N
self._use_policy_vhead = args.use_policy_vhead
self._recurrent_N = args.recurrent_N
self.tpdv = dict(dtype=torch.float32, device=device)
obs_shape = get_shape_from_obs_space(obs_space)
self._mixed_obs = False
self.base = MLPBase(args, obs_shape, use_attn_internal=False, use_cat_self=True)
input_size = self.base.output_size
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(input_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
input_size = self.hidden_size
if self._use_influence_policy:
self.mlp = MLPLayer(obs_shape[0], self.hidden_size,
self._influence_layer_N, self._use_orthogonal, self._activation_id)
input_size += self.hidden_size
self.act = ACTLayer(action_space, input_size, self._use_orthogonal, self._gain)
if self._use_policy_vhead:
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0))
if self._use_popart:
self.v_out = init_(PopArt(input_size, 1, device=device))
else:
self.v_out = init_(nn.Linear(input_size, 1))
self.to(device)
def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
actions, action_log_probs = self.act(actor_features, available_actions, deterministic)
return actions, action_log_probs, rnn_states
def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
action = check(action).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
if active_masks is not None:
active_masks = check(active_masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features, action, available_actions, active_masks = active_masks if self._use_policy_active_masks else None)
values = self.v_out(actor_features) if self._use_policy_vhead else None
return action_log_probs, dist_entropy, values
def get_policy_values(self, obs, rnn_states, masks):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
values = self.v_out(actor_features)
return values | 5,558 | 41.113636 | 181 | py |
TiKick | TiKick-main/tmarl/networks/utils/distributions.py | import torch
import torch.nn as nn
from .util import init
"""
Modify standard PyTorch distributions so they are compatible with this code.
"""
# Standardize distribution interfaces
# Categorical
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entrop(self):
return super.entropy().sum(-1)
def mode(self):
return self.mean
# Bernoulli
class FixedBernoulli(torch.distributions.Bernoulli):
def log_probs(self, actions):
return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1)
def entropy(self):
return super().entropy().sum(-1)
def mode(self):
return torch.gt(self.probs, 0.5).float()
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Categorical, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x, available_actions=None):
x = self.linear(x)
if available_actions is not None:
x[available_actions == 0] = -1e10
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(DiagGaussian, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
class Bernoulli(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Bernoulli, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedBernoulli(logits=x)
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
| 3,466 | 27.891667 | 86 | py |
TiKick | TiKick-main/tmarl/networks/utils/mlp.py |
import torch.nn as nn
from .util import init, get_clones
class MLPLayer(nn.Module):
def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, activation_id):
super(MLPLayer, self).__init__()
self._layer_N = layer_N
active_func = [nn.Tanh(), nn.ReLU(), nn.LeakyReLU(), nn.ELU()][activation_id]
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu', 'leaky_relu', 'leaky_relu'][activation_id])
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)
self.fc1 = nn.Sequential(
init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc_h = nn.Sequential(init_(
nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc2 = get_clones(self.fc_h, self._layer_N)
def forward(self, x):
x = self.fc1(x)
for i in range(self._layer_N):
x = self.fc2[i](x)
return x
class MLPBase(nn.Module):
def __init__(self, args, obs_shape, use_attn_internal=False, use_cat_self=True):
super(MLPBase, self).__init__()
self._use_feature_normalization = args.use_feature_normalization
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_conv1d = args.use_conv1d
self._stacked_frames = args.stacked_frames
self._layer_N = args.layer_N
self.hidden_size = args.hidden_size
obs_dim = obs_shape[0]
inputs_dim = obs_dim
if self._use_feature_normalization:
self.feature_norm = nn.LayerNorm(obs_dim)
self.mlp = MLPLayer(inputs_dim, self.hidden_size,
self._layer_N, self._use_orthogonal, self._activation_id)
def forward(self, x):
if self._use_feature_normalization:
x = self.feature_norm(x)
x = self.mlp(x)
return x
@property
def output_size(self):
return self.hidden_size | 2,116 | 32.603175 | 98 | py |
TiKick | TiKick-main/tmarl/networks/utils/popart.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PopArt(torch.nn.Module):
def __init__(self, input_shape, output_shape, norm_axes=1, beta=0.99999, epsilon=1e-5, device=torch.device("cpu")):
super(PopArt, self).__init__()
self.beta = beta
self.epsilon = epsilon
self.norm_axes = norm_axes
self.tpdv = dict(dtype=torch.float32, device=device)
self.input_shape = input_shape
self.output_shape = output_shape
self.weight = nn.Parameter(torch.Tensor(output_shape, input_shape)).to(**self.tpdv)
self.bias = nn.Parameter(torch.Tensor(output_shape)).to(**self.tpdv)
self.stddev = nn.Parameter(torch.ones(output_shape), requires_grad=False).to(**self.tpdv)
self.mean = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.mean_sq = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
self.mean.zero_()
self.mean_sq.zero_()
self.debiasing_term.zero_()
def forward(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
return F.linear(input_vector, self.weight, self.bias)
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
old_mean, old_stddev = self.mean, self.stddev
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
self.mean.mul_(self.beta).add_(batch_mean * (1.0 - self.beta))
self.mean_sq.mul_(self.beta).add_(batch_sq_mean * (1.0 - self.beta))
self.debiasing_term.mul_(self.beta).add_(1.0 * (1.0 - self.beta))
self.stddev = (self.mean_sq - self.mean ** 2).sqrt().clamp(min=1e-4)
self.weight = self.weight * old_stddev / self.stddev
self.bias = (old_stddev * self.bias + old_mean - self.mean) / self.stddev
def debiased_mean_var(self):
debiased_mean = self.mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
def normalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,796 | 38.968421 | 119 | py |
TiKick | TiKick-main/tmarl/networks/utils/util.py |
import copy
import numpy as np
import torch
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def check(input):
output = torch.from_numpy(input) if type(input) == np.ndarray else input
return output
| 426 | 21.473684 | 76 | py |
TiKick | TiKick-main/tmarl/networks/utils/act.py |
from .distributions import Bernoulli, Categorical, DiagGaussian
import torch
import torch.nn as nn
class ACTLayer(nn.Module):
def __init__(self, action_space, inputs_dim, use_orthogonal, gain):
super(ACTLayer, self).__init__()
self.multidiscrete_action = False
self.continuous_action = False
self.mixed_action = False
if action_space.__class__.__name__ == "Discrete":
action_dim = action_space.n
self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "Box":
self.continuous_action = True
action_dim = action_space.shape[0]
self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiBinary":
action_dim = action_space.shape[0]
self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiDiscrete":
self.multidiscrete_action = True
action_dims = action_space.high - action_space.low + 1
self.action_outs = []
for action_dim in action_dims:
self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain))
self.action_outs = nn.ModuleList(self.action_outs)
else: # discrete + continous
self.mixed_action = True
continous_dim = action_space[0].shape[0]
discrete_dim = action_space[1].n
self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain), Categorical(
inputs_dim, discrete_dim, use_orthogonal, gain)])
def forward(self, x, available_actions=None, deterministic=False):
if self.mixed_action :
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action.float())
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
elif self.multidiscrete_action:
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action)
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.cat(action_log_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
else:
action_logits = self.action_out(x, available_actions)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
return actions, action_log_probs
def get_probs(self, x, available_actions=None):
if self.mixed_action or self.multidiscrete_action:
action_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action_prob = action_logit.probs
action_probs.append(action_prob)
action_probs = torch.cat(action_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
action_probs = action_logits.probs
else:
action_logits = self.action_out(x, available_actions)
action_probs = action_logits.probs
return action_probs
def get_log_1mp(self, x, action, available_actions=None, active_masks=None):
action_logits = self.action_out(x, available_actions)
action_prob = torch.gather(action_logits.probs, 1, action.long())
action_prob = torch.clamp(action_prob, 0, 1-1e-6)
action_log_1mp = torch.log(1 - action_prob)
return action_log_1mp
def evaluate_actions(self, x, action, available_actions=None, active_masks=None):
if self.mixed_action:
a, b = action.split((2, 1), -1)
b = b.long()
action = [a, b]
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
if len(action_logit.entropy().shape) == len(active_masks.shape):
dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum())
else:
dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
dist_entropy = dist_entropy[0] * 0.0025 + dist_entropy[1] * 0.01
elif self.multidiscrete_action:
action = torch.transpose(action, 0, 1)
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong
dist_entropy = torch.tensor(dist_entropy).mean()
elif self.continuous_action:
action_logits = self.action_out(x)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
else:
action_logits = self.action_out(x, available_actions)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
return action_log_probs, dist_entropy | 7,195 | 46.342105 | 121 | py |
TiKick | TiKick-main/tmarl/networks/utils/rnn.py |
import torch
import torch.nn as nn
class RNNLayer(nn.Module):
def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal):
super(RNNLayer, self).__init__()
self._recurrent_N = recurrent_N
self._use_orthogonal = use_orthogonal
self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N)
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
if self._use_orthogonal:
nn.init.orthogonal_(param)
else:
nn.init.xavier_uniform_(param)
self.norm = nn.LayerNorm(outputs_dim)
def forward(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.rnn(x.unsqueeze(0), (hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous())
x = x.squeeze(0)
hxs = hxs.transpose(0, 1)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0)
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.transpose(0, 1)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous()
rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp)
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.reshape(T * N, -1)
hxs = hxs.transpose(0, 1)
x = self.norm(x)
return x, hxs
| 2,816 | 34.2125 | 132 | py |
TiKick | TiKick-main/tmarl/drivers/shared_distributed/base_driver.py | import numpy as np
import torch
def _t2n(x):
return x.detach().cpu().numpy()
class Driver(object):
def __init__(self, config, client=None):
self.all_args = config['all_args']
self.envs = config['envs']
self.eval_envs = config['eval_envs']
self.device = config['device']
self.num_agents = config['num_agents']
if 'signal' in config:
self.actor_id = config['signal'].actor_id
self.weight_ids = config['signal'].weight_ids
else:
self.actor_id = 0
self.weight_ids = [0]
# parameters
self.env_name = self.all_args.env_name
self.algorithm_name = self.all_args.algorithm_name
self.experiment_name = self.all_args.experiment_name
self.use_centralized_V = self.all_args.use_centralized_V
self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state
self.num_env_steps = self.all_args.num_env_steps if hasattr(self.all_args,'num_env_steps') else self.all_args.eval_num
self.episode_length = self.all_args.episode_length
self.n_rollout_threads = self.all_args.n_rollout_threads
self.learner_n_rollout_threads = self.all_args.n_rollout_threads
self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
self.hidden_size = self.all_args.hidden_size
self.recurrent_N = self.all_args.recurrent_N
# interval
self.save_interval = self.all_args.save_interval
self.use_eval = self.all_args.use_eval
self.eval_interval = self.all_args.eval_interval
self.log_interval = self.all_args.log_interval
# dir
self.model_dir = self.all_args.model_dir
if self.algorithm_name == "rmappo":
from tmarl.algorithms.r_mappo_distributed.mappo_algorithm import MAPPOAlgorithm as TrainAlgo
from tmarl.algorithms.r_mappo_distributed.mappo_module import MAPPOModule as AlgoModule
else:
raise NotImplementedError
if self.envs:
share_observation_space = self.envs.share_observation_space[0] \
if self.use_centralized_V else self.envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.envs.observation_space[0],
share_observation_space,
self.envs.action_space[0],
device=self.device)
else:
share_observation_space = self.eval_envs.share_observation_space[0] \
if self.use_centralized_V else self.eval_envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.eval_envs.observation_space[0],
share_observation_space,
self.eval_envs.action_space[0],
device=self.device)
if self.model_dir is not None:
self.restore()
# algorithm
self.trainer = TrainAlgo(self.all_args, self.algo_module, device=self.device)
# buffer
from tmarl.replay_buffers.normal.shared_buffer import SharedReplayBuffer
self.buffer = SharedReplayBuffer(self.all_args,
self.num_agents,
self.envs.observation_space[0] if self.envs else self.eval_envs.observation_space[0],
share_observation_space,
self.envs.action_space[0] if self.envs else self.eval_envs.action_space[0])
def run(self):
raise NotImplementedError
def warmup(self):
raise NotImplementedError
def collect(self, step):
raise NotImplementedError
def insert(self, data):
raise NotImplementedError
def restore(self):
policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor.pt', map_location=self.device)
self.algo_module.actor.load_state_dict(policy_actor_state_dict)
| 4,244 | 39.04717 | 126 | py |
TiKick | TiKick-main/tmarl/drivers/shared_distributed/football_driver.py | from tqdm import tqdm
import numpy as np
from tmarl.drivers.shared_distributed.base_driver import Driver
def _t2n(x):
return x.detach().cpu().numpy()
class FootballDriver(Driver):
def __init__(self, config):
super(FootballDriver, self).__init__(config)
def run(self):
self.trainer.prep_rollout()
episodes = int(self.num_env_steps)
total_num_steps = 0
for episode in range(episodes):
print('Episode {}:'.format(episode))
self.eval(total_num_steps)
def eval(self, total_num_steps):
eval_episode_rewards = []
eval_obs, eval_share_obs, eval_available_actions = self.eval_envs.reset()
agent_num = eval_obs.shape[1]
used_buffer = self.buffer
rnn_shape = [self.n_eval_rollout_threads, agent_num, *used_buffer.rnn_states_critic.shape[3:]]
eval_rnn_states = np.zeros(rnn_shape, dtype=np.float32)
eval_rnn_states_critic = np.zeros(rnn_shape, dtype=np.float32)
eval_masks = np.ones((self.n_eval_rollout_threads, agent_num, 1), dtype=np.float32)
finished = None
for eval_step in tqdm(range(3001)):
self.trainer.prep_rollout()
_, eval_action, eval_action_log_prob, eval_rnn_states, _ = \
self.trainer.algo_module.get_actions(np.concatenate(eval_share_obs),
np.concatenate(eval_obs),
np.concatenate(eval_rnn_states),
None,
np.concatenate(eval_masks),
np.concatenate(eval_available_actions),
deterministic=True)
eval_actions = np.array(
np.split(_t2n(eval_action), self.n_eval_rollout_threads))
eval_rnn_states = np.array(
np.split(_t2n(eval_rnn_states), self.n_eval_rollout_threads))
if self.eval_envs.action_space[0].__class__.__name__ == 'Discrete':
eval_actions_env = np.squeeze(
np.eye(self.eval_envs.action_space[0].n)[eval_actions], 2)
else:
raise NotImplementedError
# Obser reward and next obs
eval_obs, eval_share_obs, eval_rewards, eval_dones, eval_infos, eval_available_actions = \
self.eval_envs.step(eval_actions_env)
eval_rewards = eval_rewards.reshape([-1, agent_num]) # [roll_out, num_agents]
if finished is None:
eval_r = eval_rewards[:,:self.num_agents]
eval_episode_rewards.append(eval_r)
finished = eval_dones.copy()
else:
eval_r = (eval_rewards * ~finished)[:,:self.num_agents]
eval_episode_rewards.append(eval_r)
finished = eval_dones.copy() | finished
eval_masks = np.ones(
(self.n_eval_rollout_threads, agent_num, 1), dtype=np.float32)
eval_masks[eval_dones == True] = np.zeros(
((eval_dones == True).sum(), 1), dtype=np.float32)
eval_rnn_states[eval_dones == True] = np.zeros(
((eval_dones == True).sum(), self.recurrent_N, self.hidden_size), dtype=np.float32)
if finished.all() == True:
break
eval_episode_rewards = np.array(eval_episode_rewards) # [step,rollout,num_agents]
ally_goal = np.sum((eval_episode_rewards == 1), axis=0)
enemy_goal = np.sum((eval_episode_rewards == -1), axis=0)
net_goal = np.sum(eval_episode_rewards, axis=0)
winning_rate = np.mean(net_goal, axis=-1)
eval_env_infos = {}
eval_env_infos['eval_average_winning_rate'] = winning_rate>0
eval_env_infos['eval_average_losing_rate'] = winning_rate<0
eval_env_infos['eval_average_draw_rate'] = winning_rate==0
eval_env_infos['eval_average_ally_score'] = ally_goal
eval_env_infos['eval_average_enemy_score'] = enemy_goal
eval_env_infos['eval_average_net_score'] = net_goal
print("\tSuccess Rate: " + str(np.mean(winning_rate>0)) )
| 4,315 | 42.16 | 102 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_algorithm.py | import torch
from tmarl.utils.valuenorm import ValueNorm
# implement the loss of the MAPPO here
class MAPPOAlgorithm():
def __init__(self,
args,
init_module,
device=torch.device("cpu")):
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.algo_module = init_module
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.policy_value_loss_coef = args.policy_value_loss_coef
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
self._use_policy_vhead = args.use_policy_vhead
assert (self._use_popart and self._use_valuenorm) == False, ("self._use_popart and self._use_valuenorm can not be set True simultaneously")
if self._use_popart:
self.value_normalizer = self.algo_module.critic.v_out
if self._use_policy_vhead:
self.policy_value_normalizer = self.algo_module.actor.v_out
elif self._use_valuenorm:
self.value_normalizer = ValueNorm(1, device = self.device)
if self._use_policy_vhead:
self.policy_value_normalizer = ValueNorm(1, device = self.device)
else:
self.value_normalizer = None
if self._use_policy_vhead:
self.policy_value_normalizer = None
def prep_rollout(self):
self.algo_module.actor.eval()
| 2,234 | 38.210526 | 147 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_module.py | import torch
from tmarl.networks.policy_network import PolicyNetwork
class MAPPOModule:
def __init__(self, args, obs_space, share_obs_space, act_space, device=torch.device("cpu")):
self.device = device
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = share_obs_space
self.act_space = act_space
self.actor = PolicyNetwork(args, self.obs_space, self.act_space, self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr, eps=self.opti_eps, weight_decay=self.weight_decay)
def get_actions(self, share_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None, deterministic=False):
actions, action_log_probs, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)
return None, actions, action_log_probs, rnn_states_actor, None | 1,050 | 41.04 | 135 | py |
TiKick | TiKick-main/tmarl/loggers/utils.py |
import time
def timer(function):
"""
装饰器函数timer
:param function:想要计时的函数
:return:
"""
def wrapper(*args, **kwargs):
time_start = time.time()
res = function(*args, **kwargs)
cost_time = time.time() - time_start
print("{} running time: {}s".format(function.__name__, cost_time))
return res
return wrapper | 1,011 | 27.914286 | 74 | py |
TiKick | TiKick-main/tmarl/runners/base_evaluator.py |
import random
import numpy as np
import torch
from tmarl.configs.config import get_config
from tmarl.runners.base_runner import Runner
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class Evaluator(Runner):
def __init__(self, argv,program_type=None, client=None):
super().__init__(argv)
parser = get_config()
all_args = self.extra_args_func(argv, parser)
all_args.cuda = not all_args.disable_cuda
self.algorithm_name = all_args.algorithm_name
# cuda
if not all_args.disable_cuda and torch.cuda.is_available():
device = torch.device("cuda:0")
if all_args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print("choose to use cpu...")
device = torch.device("cpu")
# run dir
run_dir = self.setup_run_dir(all_args)
# env init
Env_Class, SubprocVecEnv, DummyVecEnv = self.get_env()
eval_envs = self.env_init(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv)
num_agents = all_args.num_agents
config = {
"all_args": all_args,
"envs": None,
"eval_envs": eval_envs,
"num_agents": num_agents,
"device": device,
"run_dir": run_dir,
}
self.all_args, self.envs, self.eval_envs, self.config \
= all_args, None, eval_envs, config
self.driver = self.init_driver()
def run(self):
# run experiments
self.driver.run()
self.stop()
def stop(self):
pass
def extra_args_func(self, argv, parser):
raise NotImplementedError
def get_env(self):
raise NotImplementedError
def init_driver(self):
raise NotImplementedError
def make_eval_env(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
def get_env_fn(rank):
def init_env():
env = Env_Class(all_args)
env.seed(all_args.seed * 50000 + rank * 10000)
return env
return init_env
if all_args.n_eval_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])
def env_init(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
eval_envs = self.make_eval_env(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv) if all_args.use_eval else None
return eval_envs
def setup_run_dir(self, all_args):
return None
| 3,402 | 28.08547 | 97 | py |
TiKick | TiKick-main/tmarl/runners/base_runner.py |
import os
import random
import socket
import setproctitle
import numpy as np
from pathlib import Path
import torch
from tmarl.configs.config import get_config
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class Runner:
def __init__(self, argv):
self.argv = argv
def run(self):
# main run
raise NotImplementedError | 1,079 | 22.478261 | 74 | py |
TiKick | TiKick-main/tmarl/runners/football/football_evaluator.py |
import sys
import os
from pathlib import Path
from tmarl.runners.base_evaluator import Evaluator
from tmarl.envs.football.football import RllibGFootball
from tmarl.envs.env_wrappers import ShareSubprocVecEnv, ShareDummyVecEnv
class FootballEvaluator(Evaluator):
def __init__(self, argv):
super(FootballEvaluator, self).__init__(argv)
def setup_run_dir(self, all_args):
dump_dir = Path(all_args.replay_save_dir)
if not dump_dir.exists():
os.makedirs(str(dump_dir))
self.dump_dir = dump_dir
return super(FootballEvaluator, self).setup_run_dir(all_args)
def make_eval_env(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
def get_env_fn(rank):
def init_env():
env = Env_Class(all_args, rank, log_dir=str(self.dump_dir), isEval=True)
env.seed(all_args.seed * 50000 + rank * 10000)
return env
return init_env
if all_args.n_eval_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])
def extra_args_func(self, args, parser):
parser.add_argument('--scenario_name', type=str,
default='simple_spread', help="Which scenario to run on")
parser.add_argument('--num_agents', type=int,
default=0, help="number of players")
# football config
parser.add_argument('--representation', type=str,
default='raw', help="format of the observation in gfootball env")
parser.add_argument('--rewards', type=str,
default='scoring', help="format of the reward in gfootball env")
parser.add_argument("--render_only", action='store_true', default=False,
help="if ture, render without training")
all_args = parser.parse_known_args(args)[0]
return all_args
def get_env(self):
return RllibGFootball, ShareSubprocVecEnv, ShareDummyVecEnv
def init_driver(self):
if not self.all_args.separate_policy:
from tmarl.drivers.shared_distributed.football_driver import FootballDriver as Driver
else:
raise NotImplementedError
driver = Driver(self.config)
return driver
def main(argv):
evaluator = FootballEvaluator(argv)
evaluator.run()
if __name__ == "__main__":
main(sys.argv[1:])
| 3,181 | 34.355556 | 97 | py |
TiKick | TiKick-main/tmarl/utils/multi_discrete.py | import gym
import numpy as np
# An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
self.n = np.sum(self.high) + 2
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
random_array = np.random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)
| 2,346 | 50.021739 | 198 | py |
TiKick | TiKick-main/tmarl/utils/valuenorm.py |
import numpy as np
import torch
import torch.nn as nn
class ValueNorm(nn.Module):
""" Normalize a vector of observations - across the first norm_axes dimensions"""
def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device("cpu")):
super(ValueNorm, self).__init__()
self.input_shape = input_shape
self.norm_axes = norm_axes
self.epsilon = epsilon
self.beta = beta
self.per_element_update = per_element_update
self.tpdv = dict(dtype=torch.float32, device=device)
self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_mean_sq.zero_()
self.debiasing_term.zero_()
def running_mean_var(self):
debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
if self.per_element_update:
batch_size = np.prod(input_vector.size()[:self.norm_axes])
weight = self.beta ** batch_size
else:
weight = self.beta
self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))
self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))
self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))
def normalize(self, input_vector):
# Make sure input is float32
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
""" Transform normalized data back into original distribution """
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,110 | 37.8875 | 131 | py |
TiKick | TiKick-main/tmarl/utils/util.py |
import copy
import numpy as np
import math
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.autograd import Variable
from gym.spaces import Box, Discrete, Tuple
def check(input):
if type(input) == np.ndarray:
return torch.from_numpy(input)
def get_gard_norm(it):
sum_grad = 0
for x in it:
if x.grad is None:
continue
sum_grad += x.grad.norm() ** 2
return math.sqrt(sum_grad)
def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):
"""Decreases the learning rate linearly"""
lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a*e**2/2 + b*d*(abs(e)-d/2)
def mse_loss(e):
return e**2/2
def get_shape_from_obs_space(obs_space):
if obs_space.__class__.__name__ == 'Box':
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == 'list':
obs_shape = obs_space
elif obs_space.__class__.__name__ == 'Dict':
obs_shape = obs_space.spaces
else:
raise NotImplementedError
return obs_shape
def get_shape_from_act_space(act_space):
if act_space.__class__.__name__ == 'Discrete':
act_shape = 1
elif act_space.__class__.__name__ == "MultiDiscrete":
act_shape = act_space.shape
elif act_space.__class__.__name__ == "Box":
act_shape = act_space.shape[0]
elif act_space.__class__.__name__ == "MultiBinary":
act_shape = act_space.shape[0]
else: # agar
act_shape = act_space[0].shape[0] + 1
return act_shape
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(
list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
def to_torch(input):
return torch.from_numpy(input) if type(input) == np.ndarray else input
def to_numpy(x):
return x.detach().cpu().numpy()
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample()
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
self.n = np.sum(self.high) + 2
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
random_array = np.random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)
class DecayThenFlatSchedule():
def __init__(self,
start,
finish,
time_length,
decay="exp"):
self.start = start
self.finish = finish
self.time_length = time_length
self.delta = (self.start - self.finish) / self.time_length
self.decay = decay
if self.decay in ["exp"]:
self.exp_scaling = (-1) * self.time_length / \
np.log(self.finish) if self.finish > 0 else 1
def eval(self, T):
if self.decay in ["linear"]:
return max(self.finish, self.start - self.delta * T)
elif self.decay in ["exp"]:
return min(self.start, max(self.finish, np.exp(- T / self.exp_scaling)))
pass
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a*e**2/2 + b*d*(abs(e)-d/2)
def mse_loss(e):
return e**2
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def soft_update(target, source, tau):
"""
Perform DDPG soft update (move target params toward source based on weight
factor tau)
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
tau (float, 0 < x < 1): Weight factor for update
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
"""
Copy network parameters from source to target
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def average_gradients(model):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)
param.grad.data /= size
def onehot_from_logits(logits, avail_logits=None, eps=0.0):
"""
Given batch of logits, return one-hot sample using epsilon greedy strategy
(based on given epsilon)
"""
# get best (according to current policy) actions in one-hot form
logits = to_torch(logits)
dim = len(logits.shape) - 1
if avail_logits is not None:
avail_logits = to_torch(avail_logits)
logits[avail_logits == 0] = -1e10
argmax_acs = (logits == logits.max(dim, keepdim=True)[0]).float()
if eps == 0.0:
return argmax_acs
# get random actions in one-hot form
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(
range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
# chooses between best and random actions using epsilon greedy
return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in
enumerate(torch.rand(logits.shape[0]))])
def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
"""Sample from Gumbel(0, 1)"""
U = Variable(tens_type(*shape).uniform_(), requires_grad=False)
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(logits, avail_logits, temperature, device=torch.device('cpu')):
""" Draw a sample from the Gumbel-Softmax distribution"""
if str(device) == 'cpu':
y = logits + sample_gumbel(logits.shape, tens_type=type(logits.data))
else:
y = (logits.cpu() + sample_gumbel(logits.shape,
tens_type=type(logits.data))).cuda()
dim = len(logits.shape) - 1
if avail_logits is not None:
avail_logits = to_torch(avail_logits).to(device)
y[avail_logits == 0] = -1e10
return F.softmax(y / temperature, dim=dim)
def gumbel_softmax(logits, avail_logits=None, temperature=1.0, hard=False, device=torch.device('cpu')):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, avail_logits, temperature, device)
if hard:
y_hard = onehot_from_logits(y)
y = (y_hard - y).detach() + y
return y
def gaussian_noise(shape, std):
return torch.empty(shape).normal_(mean=0, std=std)
def get_obs_shape(obs_space):
if obs_space.__class__.__name__ == "Box":
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == "list":
obs_shape = obs_space
else:
raise NotImplementedError
return obs_shape
def get_dim_from_space(space):
if isinstance(space, Box):
dim = space.shape[0]
elif isinstance(space, Discrete):
dim = space.n
elif isinstance(space, Tuple):
dim = sum([get_dim_from_space(sp) for sp in space])
elif "MultiDiscrete" in space.__class__.__name__:
return (space.high - space.low) + 1
elif isinstance(space, list):
dim = space[0]
else:
raise Exception("Unrecognized space: ", type(space))
return dim
def get_state_dim(observation_dict, action_dict):
combined_obs_dim = sum([get_dim_from_space(space)
for space in observation_dict.values()])
combined_act_dim = 0
for space in action_dict.values():
dim = get_dim_from_space(space)
if isinstance(dim, np.ndarray):
combined_act_dim += int(sum(dim))
else:
combined_act_dim += dim
return combined_obs_dim, combined_act_dim, combined_obs_dim+combined_act_dim
def get_cent_act_dim(action_space):
cent_act_dim = 0
for space in action_space:
dim = get_dim_from_space(space)
if isinstance(dim, np.ndarray):
cent_act_dim += int(sum(dim))
else:
cent_act_dim += dim
return cent_act_dim
def is_discrete(space):
if isinstance(space, Discrete) or "MultiDiscrete" in space.__class__.__name__:
return True
else:
return False
def is_multidiscrete(space):
if "MultiDiscrete" in space.__class__.__name__:
return True
else:
return False
def make_onehot(int_action, action_dim, seq_len=None):
if type(int_action) == torch.Tensor:
int_action = int_action.cpu().numpy()
if not seq_len:
return np.eye(action_dim)[int_action]
if seq_len:
onehot_actions = []
for i in range(seq_len):
onehot_action = np.eye(action_dim)[int_action[i]]
onehot_actions.append(onehot_action)
return np.stack(onehot_actions)
def avail_choose(x, avail_x=None):
x = to_torch(x)
if avail_x is not None:
avail_x = to_torch(avail_x)
x[avail_x == 0] = -1e10
return x # FixedCategorical(logits=x)
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(
list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
| 13,893 | 31.846336 | 122 | py |
TiKick | TiKick-main/tmarl/utils/segment_tree.py | import numpy as np
def unique(sorted_array):
"""
More efficient implementation of np.unique for sorted arrays
:param sorted_array: (np.ndarray)
:return:(np.ndarray) sorted_array without duplicate elements
"""
if len(sorted_array) == 1:
return sorted_array
left = sorted_array[:-1]
right = sorted_array[1:]
uniques = np.append(right != left, True)
return sorted_array[uniques]
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""
Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array that supports Index arrays, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
:param capacity: (int) Total size of the array - must be a power of two.
:param operation: (lambda (Any, Any): Any) operation for combining elements (eg. sum, max) must form a
mathematical group together with the set of possible values for array elements (i.e. be associative)
:param neutral_element: (Any) neutral element for the operation above. eg. float('-inf') for max and 0 for sum.
"""
assert capacity > 0 and capacity & (
capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
self.neutral_element = neutral_element
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(
mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""
Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
:param start: (int) beginning of the subsequence
:param end: (int) end of the subsequences
:return: (Any) result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# indexes of the leaf
idxs = idx + self._capacity
self._value[idxs] = val
if isinstance(idxs, int):
idxs = np.array([idxs])
# go up one level in the tree and remove duplicate indexes
idxs = unique(idxs // 2)
while len(idxs) > 1 or idxs[0] > 0:
# as long as there are non-zero indexes, update the corresponding values
self._value[idxs] = self._operation(
self._value[2 * idxs],
self._value[2 * idxs + 1]
)
# go up one level in the tree and remove duplicate indexes
idxs = unique(idxs // 2)
def __getitem__(self, idx):
assert np.max(idx) < self._capacity
assert 0 <= np.min(idx)
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=np.add,
neutral_element=0.0
)
self._value = np.array(self._value)
def sum(self, start=0, end=None):
"""
Returns arr[start] + ... + arr[end]
:param start: (int) start position of the reduction (must be >= 0)
:param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1)
:return: (Any) reduction of SumSegmentTree
"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""
Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum for each entry in prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
:param prefixsum: (np.ndarray) float upper bounds on the sum of array prefix
:return: (np.ndarray) highest indexes satisfying the prefixsum constraint
"""
if isinstance(prefixsum, float):
prefixsum = np.array([prefixsum])
assert 0 <= np.min(prefixsum)
assert np.max(prefixsum) <= self.sum() + 1e-5
assert isinstance(prefixsum[0], float)
idx = np.ones(len(prefixsum), dtype=int)
cont = np.ones(len(prefixsum), dtype=bool)
while np.any(cont): # while not all nodes are leafs
idx[cont] = 2 * idx[cont]
prefixsum_new = np.where(
self._value[idx] <= prefixsum, prefixsum - self._value[idx], prefixsum)
# prepare update of prefixsum for all right children
idx = np.where(np.logical_or(
self._value[idx] > prefixsum, np.logical_not(cont)), idx, idx + 1)
# Select child node for non-leaf nodes
prefixsum = prefixsum_new
# update prefixsum
cont = idx < self._capacity
# collect leafs
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=np.minimum,
neutral_element=float('inf')
)
self._value = np.array(self._value)
def min(self, start=0, end=None):
"""
Returns min(arr[start], ..., arr[end])
:param start: (int) start position of the reduction (must be >= 0)
:param end: (int) end position of the reduction (must be < len(arr), can be None for len(arr) - 1)
:return: (Any) reduction of MinSegmentTree
"""
return super(MinSegmentTree, self).reduce(start, end)
| 6,859 | 40.325301 | 119 | py |
TiKick | TiKick-main/tmarl/utils/gpu_mem_track.py |
import gc
import datetime
import inspect
import torch
import numpy as np
dtype_memory_size_dict = {
torch.float64: 64/8,
torch.double: 64/8,
torch.float32: 32/8,
torch.float: 32/8,
torch.float16: 16/8,
torch.half: 16/8,
torch.int64: 64/8,
torch.long: 64/8,
torch.int32: 32/8,
torch.int: 32/8,
torch.int16: 16/8,
torch.short: 16/6,
torch.uint8: 8/8,
torch.int8: 8/8,
}
# compatibility of torch1.0
if getattr(torch, "bfloat16", None) is not None:
dtype_memory_size_dict[torch.bfloat16] = 16/8
if getattr(torch, "bool", None) is not None:
dtype_memory_size_dict[torch.bool] = 8/8 # pytorch use 1 byte for a bool, see https://github.com/pytorch/pytorch/issues/41571
def get_mem_space(x):
try:
ret = dtype_memory_size_dict[x]
except KeyError:
print(f"dtype {x} is not supported!")
return ret
class MemTracker(object):
"""
Class used to track pytorch memory usage
Arguments:
detail(bool, default True): whether the function shows the detail gpu memory usage
path(str): where to save log file
verbose(bool, default False): whether show the trivial exception
device(int): GPU number, default is 0
"""
def __init__(self, detail=True, path='', verbose=False, device=0):
self.print_detail = detail
self.last_tensor_sizes = set()
self.gpu_profile_fn = path + f'{datetime.datetime.now():%d-%b-%y-%H:%M:%S}-gpu_mem_track.txt'
self.verbose = verbose
self.begin = True
self.device = device
def get_tensors(self):
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
tensor = obj
else:
continue
if tensor.is_cuda:
yield tensor
except Exception as e:
if self.verbose:
print('A trivial exception occured: {}'.format(e))
def get_tensor_usage(self):
sizes = [np.prod(np.array(tensor.size())) * get_mem_space(tensor.dtype) for tensor in self.get_tensors()]
return np.sum(sizes) / 1024**2
def get_allocate_usage(self):
return torch.cuda.memory_allocated() / 1024**2
def clear_cache(self):
gc.collect()
torch.cuda.empty_cache()
def print_all_gpu_tensor(self, file=None):
for x in self.get_tensors():
print(x.size(), x.dtype, np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2, file=file)
def track(self):
"""
Track the GPU memory usage
"""
frameinfo = inspect.stack()[1]
where_str = frameinfo.filename + ' line ' + str(frameinfo.lineno) + ': ' + frameinfo.function
with open(self.gpu_profile_fn, 'a+') as f:
if self.begin:
f.write(f"GPU Memory Track | {datetime.datetime.now():%d-%b-%y-%H:%M:%S} |"
f" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb"
f" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\n\n")
self.begin = False
if self.print_detail is True:
ts_list = [(tensor.size(), tensor.dtype) for tensor in self.get_tensors()]
new_tensor_sizes = {(type(x),
tuple(x.size()),
ts_list.count((x.size(), x.dtype)),
np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2,
x.dtype) for x in self.get_tensors()}
for t, s, n, m, data_type in new_tensor_sizes - self.last_tensor_sizes:
f.write(f'+ | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\n')
for t, s, n, m, data_type in self.last_tensor_sizes - new_tensor_sizes:
f.write(f'- | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\n')
self.last_tensor_sizes = new_tensor_sizes
f.write(f"\nAt {where_str:<50}"
f" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb"
f" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\n\n")
| 4,432 | 36.888889 | 129 | py |
TiKick | TiKick-main/tmarl/utils/modelsize_estimate.py |
import torch.nn as nn
import numpy as np
def modelsize(model, input, type_size=4):
para = sum([np.prod(list(p.size())) for p in model.parameters()])
# print('Model {} : Number of params: {}'.format(model._get_name(), para))
print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000))
input_ = input.clone()
input_.requires_grad_(requires_grad=False)
mods = list(model.modules())
out_sizes = []
for i in range(1, len(mods)):
m = mods[i]
if isinstance(m, nn.ReLU):
if m.inplace:
continue
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
total_nums = 0
for i in range(len(out_sizes)):
s = out_sizes[i]
nums = np.prod(np.array(s))
total_nums += nums
# print('Model {} : Number of intermedite variables without backward: {}'.format(model._get_name(), total_nums))
# print('Model {} : Number of intermedite variables with backward: {}'.format(model._get_name(), total_nums*2))
print('Model {} : intermedite variables: {:3f} M (without backward)'
.format(model._get_name(), total_nums * type_size / 1000 / 1000))
print('Model {} : intermedite variables: {:3f} M (with backward)'
.format(model._get_name(), total_nums * type_size*2 / 1000 / 1000))
| 1,428 | 34.725 | 116 | py |
TiKick | TiKick-main/scripts/football/replay2video.py |
"""Script allowing to replay a given trace file.
Example usage:
python replay.py --trace_file=/tmp/dumps/shutdown_20190521-165136974075.dump
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tmarl.envs.football.env import script_helpers
from absl import app
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('replay_file', None, 'replay file path')
flags.DEFINE_string('video_save_dir', '../../results/videos', 'video save dir')
flags.DEFINE_integer('fps', 10, 'How many frames per second to render')
flags.mark_flag_as_required('replay_file')
def main(_):
script_helpers.ScriptHelpers().replay(FLAGS.replay_file, FLAGS.fps,directory=FLAGS.video_save_dir)
if __name__ == '__main__':
app.run(main) | 1,389 | 31.325581 | 102 | py |
criterion.rs | criterion.rs-master/benches/benchmarks/external_process.py | import time
import sys
def fibonacci(n):
if n == 0 or n == 1:
return 1
return fibonacci(n - 1) + fibonacci(n - 2)
MILLIS = 1000
MICROS = MILLIS * 1000
NANOS = MICROS * 1000
def benchmark():
depth = int(sys.argv[1])
for line in sys.stdin:
iters = int(line.strip())
# Setup
start = time.perf_counter()
for x in range(iters):
fibonacci(depth)
end = time.perf_counter()
# Teardown
delta = end - start
nanos = int(delta * NANOS)
print("%d" % nanos)
sys.stdout.flush()
benchmark()
| 603 | 15.324324 | 46 | py |
RobDanns | RobDanns-main/deep_learning/yaml_gen.py |
"""Generate yaml files for experiment configurations."""
import yaml
# import math
import os
import re
import argparse
import numpy as np
import shutil
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--task',
dest='task',
help='Generate configs for the given tasks: e.g., mlp_cifar, cnn_cifar, cnn_imagenet, resnet18_tinyimagenet, resenet18_imagenet',
default='mlp_cifar10',
type=str
)
return parser.parse_args()
def makedirs_rm_exist(dir):
if os.path.isdir(dir):
shutil.rmtree(dir)
os.makedirs(dir, exist_ok=True)
def purge(dir, pattern):
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
def gen(dir_in, dir_out, fname_base, vars_label, vars_alias, vars_value):
'''Generate yaml files'''
with open(dir_in + fname_base + '.yaml') as f:
data_base = yaml.unsafe_load(f)
for vars in vars_value:
data = data_base.copy()
fname_new = fname_base
for id, var in enumerate(vars):
if vars_label[id][0] in data: # if key1 exist
data[vars_label[id][0]][vars_label[id][1]] = var
else:
data[vars_label[id][0]] = {vars_label[id][1]: var}
if vars_label[id][1] == 'TRANS_FUN':
var = var.split('_')[0]
fname_new += '_{}{}'.format(vars_alias[id], var)
with open(dir_out + fname_new + '.yaml', "w") as f:
yaml.dump(data, f, default_flow_style=False)
def gen_single(dir_in, dir_out, fname_base, vars_label, vars_alias, vars, comment='best'):
'''Generate yaml files for a single experiment'''
with open(dir_in + fname_base + '.yaml') as f:
data_base = yaml.unsafe_load(f)
data = data_base.copy()
fname_new = '{}_{}'.format(fname_base, comment)
for id, var in enumerate(vars):
if vars_label[id][0] in data: # if key1 exist
data[vars_label[id][0]][vars_label[id][1]] = var
else:
data[vars_label[id][0]] = {vars_label[id][1]: var}
with open(dir_out + fname_new + '.yaml', "w") as f:
yaml.dump(data, f, default_flow_style=False)
def grid2list(grid):
'''grid search to list'''
list_in = [[i] for i in grid[0]]
grid.pop(0)
for grid_temp in grid:
list_out = []
for val in grid_temp:
for list_temp in list_in:
list_out.append(list_temp + [val])
list_in = list_out
return list_in
args = parse_args()
# Format for all experiments
# Note: many arguments are deprecated, they are kept to be consistent with existing experimental results
vars_value = []
vars_label = [['RESNET', 'TRANS_FUN'], ['RGRAPH', 'TALK_MODE'], ['RGRAPH', 'GROUP_NUM'],
['RGRAPH', 'MESSAGE_TYPE'], ['RGRAPH', 'SPARSITY'], ['RGRAPH', 'P'], ['RGRAPH', 'AGG_FUNC'],
['RGRAPH', 'SEED_GRAPH'], ['RGRAPH', 'SEED_TRAIN_START'], ['RGRAPH', 'SEED_TRAIN_END'],
['RGRAPH', 'KEEP_GRAPH'],
['RGRAPH', 'ADD_1x1'], ['RGRAPH', 'UPPER'], ['TRAIN', 'AUTO_MATCH'], ['OPTIM', 'MAX_EPOCH'], ['TRAIN', 'CHECKPOINT_PERIOD']]
vars_alias = ['trans', 'talkmode', 'num',
'message', 'sparsity', 'p', 'agg',
'graphseed', 'starttrainseed', 'endtrainseed', 'keep',
'add1x1', 'upper', 'match', 'epoch', 'chkpt'
]
## Note: (1) how many relational graphs used to run: graphs_n64_54, graphs_n64_441, graphs_n64_3854
## (2): "best_id" is to be discovered based on experimental results. Given best_id is for graph2nn experiments
## (3): Each ImageNet experiment provides with 1 seed. One can change SEED_TRAIN_START and SEED_TRAIN_END
## to get results for multiple seeds
# usage: python yaml_gen.py --task mlp_cifar10
if args.task == 'mlp_cifar10':
# best_id = 3552 # best_id is for graph2nn experiments.
fname_bases = ['mlp_bs128_1gpu_layer3']
# graphs = np.load('analysis/graphs_n64_53.npy')
# To load the .npy file
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['talklinear_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 6, True,
0, True, True, 200, 200]]
vars_value += [['linear_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 6, True,
0, True, True, 200, 200]]
# usage : python yaml_gen.py --task cnn_cifar10
if args.task == 'cnn_cifar10':
# best_id = 3552 # best_id is for graph2nn experiments.
fname_bases = ['cnn6_bs1024_8gpu_64d']
# graphs = np.load('analysis/graphs_n64_53.npy')
# To load the .npy file
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['convtalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 6, True,
0, True, True, 100, 100]]
vars_value += [['convbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 6, True,
0, True, True, 100, 100]]
# uage python yaml_gen.py --task cnn_cifar100
elif args.task == 'cnn_cifar100':
# best_id = 3552 # best_id is for graph2nn experiments.
fname_bases = ['cnn6_bs640_1gpu_64d']
# graphs = np.load('analysis/graphs_n64_53.npy')
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['convtalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 6, True,
0, True, True, 100, 100]]
vars_value += [['convbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 6, True,
0, True, True, 100, 100]]
# usage: python yaml_gen.py --task resnet18_tinyimagenet
elif args.task == 'resnet18_tinyimagenet':
fname_bases = ['R-18_tiny_bs256_1gpu']
# graphs = np.load('analysis/graphs_n64_53.npy')
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['groupbasictalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 75, 25]]
vars_value += [['channelbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 75, 25]]
elif args.task == 'cnn_imagenet':
# best_id = 27 # best_id is for graph2nn experiments.
fname_bases = ['cnn6_bs32_1gpu_64d', 'cnn6_bs256_8gpu_64d']
# graphs = np.load('analysis/graphs_n64_53.npy')
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a, **k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
graphs = np.load('analysis/graphs_n64_53.npy')
# restore np.load for future normal usage
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['convtalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 100]]
vars_value += [['convbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 100]]
# usage : python yaml_gen.py --task resnet18_imagenet
elif args.task == 'resnet18_imagenet':
# best_id = 37 # best_id is for graph2nn experiments.
fname_bases = ['R-18_bs450_1gpu']
# graphs = np.load('analysis/graphs_n64_53.npy')
np_load_old = np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
graphs = np.load('analysis/graphs_n64_53.npy')
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['groupbasictalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 75, 25]]
vars_value += [['channelbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 75, 25]]
# usage: python yaml_gen.py --task resnet34_imagenet
elif args.task == 'resnet34_imagenet':
# best_id = 37 # best_id is for graph2nn experiments.
fname_bases = ['R-34_bs32_1gpu', 'R-34_bs256_8gpu']
# graphs = np.load('analysis/graphs_n64_52.npy')
np_load_old = np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
graphs = np.load('analysis/graphs_n64_53.npy')
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['groupbasictalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 25]]
vars_value += [['channelbasic_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 25]]
elif args.task == 'resnet34sep_imagenet':
# best_id = 36 # best_id is for graph2nn experiments.
fname_bases = ['R-34_bs32_1gpu', 'R-34_bs256_8gpu']
graphs = np.load('analysis/graphs_n64_53.npy')
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['groupseptalk_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 25]]
vars_value += [['channelsep_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 25]]
elif args.task == 'resnet50_imagenet':
# best_id = 22 # best_id is for graph2nn experiments.
fname_bases = ['R-50_bs32_1gpu', 'R-50_bs256_8gpu']
graphs = np.load('analysis/graphs_n64_53.npy')
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['talkbottleneck_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 25]]
vars_value += [['bottleneck_transform', 'dense', 64,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 25]]
# uage : python yaml_gen.py --task efficient_imagenet
elif args.task == 'efficient_imagenet':
# best_id = 42 # best_id is for graph2nn experiments.
fname_bases = ['EN-B0_bs64_1gpu_nms', 'EN-B0_bs512_8gpu_nms']
# graphs = np.load('analysis/graphs_n64_53.npy'))
np_load_old = np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
graphs = np.load('analysis/graphs_n64_53.npy')
np.load = np_load_old
for graph in graphs:
sparsity = float(round(graph[1], 6))
randomness = float(round(graph[2], 6))
graphseed = int(graph[3])
vars_value += [['mbtalkconv_transform', 'dense', int(graph[0]),
'ws', sparsity, randomness, 'sum',
graphseed, 1, 2, True,
0, True, True, 100, 25]]
vars_value += [['mbconv_transform', 'dense', 16,
'ws', 1.0, 0.0, 'sum',
1, 1, 2, True,
0, True, True, 100, 25]]
# elif args.task == 'mlp_cifar10_bio':
# fname_bases = ['mlp_bs128_1gpu_layer3']
# for graph_type in ['mcwholeraw']:
# vars_value += [['talklinear_transform', 'dense', 71,
# graph_type, 1.0, 0.0, 'sum',
# 1, 1, 6, True,
# 0, True, True, 200]]
# for graph_type in ['mcvisualraw']:
# vars_value += [['talklinear_transform', 'dense', 30,
# graph_type, 1.0, 0.0, 'sum',
# 1, 1, 6, True,
# 0, True, True, 200]]
# for graph_type in ['catraw']:
# vars_value += [['talklinear_transform', 'dense', 52,
# graph_type, 1.0, 0.0, 'sum',
# 1, 1, 6, True,
# 0, True, True, 200]]
# vars_value += [['linear_transform', 'dense', 64,
# 'ws', 1.0, 0.0, 'sum',
# 1, 1, 6, True,
# 0, True, True, 200]]
if args.task == 'cifar0':
dir_name = 'cifar10'
elif 'cifar100' in args.task:
dir_name = 'cifar100'
elif 'tinyimagenet' in args.task:
dir_name = 'tinyimagenet200'
else:
dir_name = 'imagenet'
dir_in = 'configs/baselines/{}/'.format(dir_name)
dir_out = 'configs/baselines/{}/{}/'.format(dir_name, args.task)
dir_out_all = 'configs/baselines/{}/{}/all/'.format(dir_name, args.task)
dir_out_best = 'configs/baselines/{}/{}/best/'.format(dir_name, args.task)
# makedirs_rm_exist(dir_out)
# makedirs_rm_exist(dir_out_all)
# makedirs_rm_exist(dir_out_best)
# print(vars_value)
for fname_base in fname_bases:
if 'bio' not in args.task:
gen(dir_in, dir_out_all, fname_base, vars_label, vars_alias, vars_value)
# gen_single(dir_in, dir_out_best, fname_base, vars_label, vars_alias, vars_value[best_id], comment='best')
gen_single(dir_in, dir_out_best, fname_base, vars_label, vars_alias, vars_value[-1], comment='baseline')
else:
gen(dir_in, dir_out_best, fname_base, vars_label, vars_alias, vars_value)
| 16,638 | 38.058685 | 138 | py |
RobDanns | RobDanns-main/deep_learning/tools/corruptions-inference-tinyimagenet.py |
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
from torchvision.utils import save_image
from skimage.util import random_noise
print("Let's use GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST(VAL) DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
#if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
# return eval_stats
def save_noisy_image(img, name):
if img.size(2) == 32:
img = img.view(img.size(0), 3, 32, 32)
save_image(img, name)
if img.size(2) == 64:
img = img.view(img.size(0), 3, 64, 64)
save_image(img, name)
else:
img = img.view(img.size(0), 3, 224, 224)
save_image(img, name)
## Functions to save noisy images.
# def gaussian_noise(test_loader):
# print("Adding gaussian_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# gaussian_img_05 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.05, clip=True))
# gaussian_img_2 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.2, clip=True))
# gaussian_img_4 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.4, clip=True))
# gaussian_img_6 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.6, clip=True))
# save_noisy_image(gaussian_img_05, r"noisy-images/gaussian_05.png")
# save_noisy_image(gaussian_img_2, r"noisy-images/gaussian_2.png")
# save_noisy_image(gaussian_img_4, r"noisy-images/gaussian_4.png")
# save_noisy_image(gaussian_img_6, r"noisy-images/gaussian_6.png")
# break
# def salt_pepper_noise(test_loader):
# print("Adding salt_pepper_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# s_vs_p_5 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.5, clip=True))
# s_vs_p_6 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.6, clip=True))
# s_vs_p_7 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.7, clip=True))
# save_noisy_image(s_vs_p_5, r"noisy-images/s&p_5.png")
# break
# def speckle_noise(test_loader):
# print("Adding speckle_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# speckle_img_05 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.05, clip=True))
# speckle_img_2 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.2, clip=True))
# speckle_img_4 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.4, clip=True))
# speckle_img_6 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.6, clip=True))
# save_noisy_image(speckle_img_05, r"noisy-images/speckle_05.png")
# save_noisy_image(speckle_img_2, r"noisy-images/speckle_2.png")
# save_noisy_image(speckle_img_4, r"noisy-images/speckle_4.png")
# save_noisy_image(speckle_img_6, r"noisy-images/speckle_6.png")
# break
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
data_path = dp.get_data_path(cfg.TRAIN.DATASET) # Retrieve the data path for the dataset
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
noise_mode = ['gaussian', 'speckle', 's&p']
noise_std = [0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # change the variance values as desired.
model.eval()
accuracies_gaussian = []
accuracies_saltpepper = []
accuracies_speckle = []
for mode in noise_mode:
for level in noise_std:
print("Adding noise={} at level={} to images".format(mode, level))
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader):
if not 's&p' in mode:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, mean=0, var=level, clip=True))
else:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, salt_vs_pepper=0.5, clip=True))
noisy_img, labels = noisy_img.cuda(), labels.cuda(non_blocking=True)
outputs = model(noisy_img.float())
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += labels.size(0)
correct += (predicted == labels).sum()
if total > X: # replace X with the number of images to be generated for adversarial attacks.
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
if 'gaussian' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_gaussian.append(round(acc, 2))
print("Guassian Accuracies after append :", accuracies_gaussian)
elif 'speckle' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_speckle.append(round(acc, 2))
print("Speckle Accuracies after append :", accuracies_speckle)
elif 's&p' in mode:
print('Robust Accuracy = {:.3f} for S&P noise'.format(acc))
accuracies_saltpepper.append(round(acc, 2))
print("Salt&Pepper Accuracies after append :", accuracies_saltpepper)
break
else:
print("noise mode not supported")
# gaussian_noise(test_loader)
# salt_pepper_noise(test_loader)
# speckle_noise(test_loader)
# Change the number of variable as desired number of outputs.
gaus_001, gaus_01, gaus_05, gaus_1, gaus_2, gaus_3, gaus_4, gaus_5, gaus_6 = (items for items in accuracies_gaussian)
speck_001, speck_01, speck_05, speck_1, speck_2, speck_3, speck_4, speck_5, speck_6 = (items for items in accuracies_speckle)
saltpepper = accuracies_saltpepper[0]
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_gaussian = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(gaus_001), str(gaus_01), str(gaus_05), str(gaus_1), str(gaus_2), str(gaus_3), str(gaus_4), str(gaus_5), str(gaus_6)])
result_speck = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(speck_001), str(speck_01), str(speck_05), str(speck_1), str(speck_2), str(speck_3), str(speck_4), str(speck_5), str(speck_6)])
result_sp = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(saltpepper)])
with open("{}/gaus_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Gaussian:{} ".format(accuracies_gaussian))
text_file.write(result_gaussian + '\n')
with open("{}/saltpepper_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Salt & Pepper:{} ".format(accuracies_saltpepper))
text_file.write(result_sp + '\n')
with open("{}/speckle_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Speckle:{} ".format(accuracies_speckle))
text_file.write(result_speck + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 25,928 | 41.092532 | 139 | py |
RobDanns | RobDanns-main/deep_learning/tools/train_resnet18_on_tinyimagenet200.py |
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
logger = lu.get_logger(__name__)
print("Let's use GPU :", torch.cuda.current_device())
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST/VAL DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
#if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
# logger.info('Number of images: {}'.format(len(self.imgs)))
# logger.info('Number of classes: {}'.format(len(self.classnames)))
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
def train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch, writer_train=None, params=0, flops=0, is_master=False):
"""Performs one epoch of training."""
# Shuffle the data
loader.shuffle(train_loader, cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cur_epoch)
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(train_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parameters
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.scaled_all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point)
loss, top1_err, top5_err = loss.item(), top1_err.item(), top5_err.item()
train_meter.iter_toc()
# Update and log stats
train_meter.update_stats(
top1_err, top5_err, loss, lr, inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch, writer_train, params, flops, is_master=is_master)
trg_stats = train_meter.get_epoch_stats(cur_epoch)
train_meter.reset()
return trg_stats
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
return eval_stats
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cfg.TRAIN.AUTO_RESUME and cu.has_checkpoint():
last_checkpoint = cu.get_checkpoint_last()
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
# Create data loaders
# Retrieve the data path for the dataset
data_path = dp.get_data_path(cfg.TRAIN.DATASET)
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
train_meter = TrainMeter(len(train_loader))
test_meter = TestMeter(len(test_loader))
# Create meters for fgsm
test_meter_fgsm = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
# do eval at initialization
initial_eval_stats = eval_epoch(test_loader, model, test_meter, -1,
writer_eval, params, flops, is_master=is_master)
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
last_epoch_eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
else:
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
print('Epoch {} Started'.format(cur_epoch))
# Train for one epoch
trg_stats = train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch,
writer_train, is_master=is_master
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# Save a checkpoint
if cu.is_checkpoint_epoch(cur_epoch):
checkpoint_file = cu.save_checkpoint(model, optimizer, cur_epoch)
logger.info('Wrote checkpoint to: {}'.format(checkpoint_file))
# Evaluate the model
if is_eval_epoch(cur_epoch):
eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
if cfg.NUM_GPUS > 1:
mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_train)
else:
single_proc_train()
else:
print('Seed {} exists, skip!'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 21,617 | 37.741935 | 129 | py |
RobDanns | RobDanns-main/deep_learning/tools/adversarial-inference-tinyimagenet200.py |
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
print("Let's use GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST/VAL DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
# if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
# return eval_stats
class Normalize(torch.nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1,3,1,1)
std = self.std.reshape(1,3,1,1)
norm_img = (input - mean) / std
return norm_img
# Helper class for printing model layers
class PrintLayer(torch.nn.Module):
def __init__(self):
super(PrintLayer, self).__init__()
def forward(self, x):
# Do your print / debug stuff here
print(x)
return x
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
data_path = dp.get_data_path(cfg.TRAIN.DATASET) # Retrieve the data path for the dataset
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# create adversarial dataset
adv_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()]))
# create adversarial loader
test_loader_adv = torch.utils.data.DataLoader(
adv_dataset,
batch_size=1,
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
test_meter = TestMeter(len(test_loader))
test_meter_adv = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# when epsilon=0 --> PGD, epsilon=1 --> CW, otherwise FGSM-->replace eps1, eps2, ... with required epsilon of attack versions
epsilons = [0, eps1, eps2, ... epsN, 1]
# Per-channel mean and SD values in BGR order for TinyImageNet dataset
tinyimagenet_MEAN = [0.485, 0.456, 0.406]
tinyimagenet_SD = [0.229, 0.224, 0.225]
accuracies = []
# add normalization layer to the model
norm_layer = Normalize(mean=tinyimagenet_MEAN, std=tinyimagenet_SD)
net = torch.nn.Sequential(norm_layer, model).cuda()
net = net.eval()
for epsilon in epsilons:
if epsilon == 0:
print("Running PGD Attack")
atk = torchattacks.PGD(net, eps=1/510, alpha=2/225, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 1:
print("Running CW Attack")
atk = torchattacks.CW(net, c=0.1, kappa=0, steps=100, lr=0.01) # choose suitable values for c, kappa, steps, and lr.
else:
print("Running FGSM Attacks on epsilon :", epsilon)
atk = torchattacks.FGSM(net, eps=epsilon)
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader_adv):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
adv_images = atk(inputs, labels)
outputs = net(adv_images)
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += 1
correct += (predicted == labels).sum()
if ctr > X: # replace X with the number of images to be generated for adversarial attacks.
print(ctr, " images done for epsilon:", epsilon)
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
accuracies.append(round(acc, 2))
print('Attack Accuracy = {:.3f} with epsilon = {:.4f}'.format(acc, epsilon))
print("accuracies after apend :", accuracies)
# save items inside accuracies list to separate float objects, update the # of variables according to requirement.
accPGD, accFGSM1, accFGSM2, accFGSM3, accFGSM4, accFGSM5, accFGSM6, accFGSM7, accCW = (items for items in accuracies)
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_info = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(accPGD), str(accFGSM1), str(accFGSM2), str(accFGSM3), str(accFGSM4), str(accFGSM5),
str(accFGSM6), str(accFGSM7), str(accCW)])
with open("{}/stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies {} ".format(accuracies))
text_file.write(result_info + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,184 | 38.768439 | 147 | py |
RobDanns | RobDanns-main/deep_learning/tools/adversarial-inference.py |
"""Train a classification model."""
import argparse
import pickle
import numpy as np
import os
import sys
import torch
import math
import torchvision
import torchattacks
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.transforms as transforms
from datetime import datetime
from tensorboardX import SummaryWriter
import foolbox as fb
import art
import art.attacks.evasion as evasion
from art.estimators.classification import PyTorchClassifier
print("Using GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
# val_input_imgs,
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
class Normalize(torch.nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1,3,1,1)
std = self.std.reshape(1,3,1,1)
norm_img = (input - mean) / std
return norm_img
# Helper class for printing model layers
class PrintLayer(torch.nn.Module):
def __init__(self):
super(PrintLayer, self).__init__()
def forward(self, x):
# Do your print / debug stuff here
print(x)
return x
def train_model(writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
if cfg.MODEL.DEPTH == 20:
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.DEPTH == 26:
stats_baseline = 56140000 # ResNet26
elif cfg.MODEL.DEPTH == 34:
stats_baseline = 71480000 # ResNet34
elif cfg.MODEL.DEPTH == 38:
stats_baseline = 86819000 # ResNet38
elif cfg.MODEL.DEPTH == 50:
stats_baseline = 130000000 # ResNet50
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
elif cfg.TRAIN.DATASET == 'imagenet':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
if cfg.IS_INFERENCE and cfg.IS_DDP:
model = torch.nn.parallel.DataParallel(model)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
test_loader = loader.construct_test_loader()
test_loader_adv = loader.construct_test_loader_adv()
# Create meters
test_meter = TestMeter(len(test_loader))
test_meter_adv = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# when epsilon=0, 1 --> PGD, epsilon=2, 3 --> CW, otherwise FGSM-->replace eps1, eps2, ... with required epsilon of attack versions
epsilons = [0, 1, eps1, eps2, ... epsN, 2, 3]
# Per-channel mean and SD values in BGR order for ImageNet dataset
cifar10_MEAN = [0.491, 0.482, 0.4465]
cifar10_SD = [0.247, 0.243, 0.262]
cifar100_MEAN = [0.507, 0.487, 0.441]
cifar100_SD = [0.267, 0.256, 0.276]
imagenet_MEAN = [0.406, 0.456, 0.485]
imagenet_SD = [0.225, 0.224, 0.229]
accuracies = []
# replace the MEAN and SD variable in the following line for the relevant dataset.
norm_layer = Normalize(mean=cifar10_MEAN, std=cifar10_SD)
net = torch.nn.Sequential(norm_layer, model).cuda()
# net = torch.nn.Sequential(norm_layer, PrintLayer(), model).cuda()
net = net.eval()
print("Adversarial Loader Batch Size =", test_loader_adv.batch_size)
for epsilon in epsilons:
if epsilon == 0:
print("Running PGD Attack")
atk_ta = torchattacks.PGD(net, eps=6/255, alpha=2/255, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 1:
print("Running PGD Attack")
atk_ta = torchattacks.PGD(net, eps=9/255, alpha=2/255, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 2:
print("Running Torchattacks.CW")
atk_ta = torchattacks.CW(net, c=0.15, kappa=0, steps=100, lr=0.01) # replace the values of c and steps according to hyperparameters reported in the paper.
elif epsilon == 3:
print("Running Torchattacks.CW")
atk_ta = torchattacks.CW(net, c=0.25, kappa=0, steps=100, lr=0.01) # replace the values of c and steps according to hyperparameters reported in the paper.
# For Foolbox or ART attacks, uncomment the following lines.
# print("-> FoolBox.CW")
# fmodel = fb.PyTorchModel(net, bounds=(0, 1))
# atk_fb = fb.attacks.L2CarliniWagnerAttack(binary_search_steps=1, initial_const=0.05,
# confidence=0, steps=100, stepsize=0.01)
# print("-> Adversarial Robustness Toolbox.CW")
# classifier = PyTorchClassifier(model=net, clip_values=(0, 1),
# loss=loss_fun,
# optimizer=optimizer,
# input_shape=(3, 32, 32), nb_classes=10)
# atk_art = evasion.CarliniL2Method(batch_size=1, classifier=classifier,
# binary_search_steps=1, initial_const=0.05,
# confidence=0, max_iter=100,
# learning_rate=0.01)
else:
print("Running FGSM Attacks on epsilon :", epsilon)
atk_ta = torchattacks.FGSM(net, eps=epsilon)
ctr = 0
correct_ta = 0
# correct_fb = 0
# correct_art = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader_adv):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
inputs = inputs.float().div(255)
adv_images_ta = atk_ta(inputs, labels)
# _, adv_images_fb, _ = atk_fb(fmodel, inputs, labels, epsilons=1)
# adv_images_art = torch.tensor(atk_art.generate(inputsnp, labelsnp)).cuda()
adv_inputs_ta = adv_images_ta.float()
# adv_inputs_fb = adv_images_fb.float()
# adv_inputs_art = adv_images_art.float()
outputs_ta = net(adv_inputs_ta)
# outputs_fb = net(adv_inputs_fb)
# outputs_art = net(adv_inputs_art)
_, predicted_ta = torch.max(outputs_ta.data, 1)
# _, predicted_fb = torch.max(outputs_fb.data, 1)
# _, predicted_art = torch.max(outputs_art.data, 1)
ctr += 1
total += 1
correct_ta += (predicted_ta == labels).sum()
# correct_fb += (predicted_fb == labels).sum()
# correct_art += (predicted_art == labels).sum()
if ctr > X: # replace X with the number of images to be generated for adversarial attacks.
print(ctr, " images done for epsilon:", epsilon)
break
acc_ta = 100 * float(correct_ta) / total
# acc_fb = 100 * float(correct_fb) / total
# acc_art = 100 * float(correct_art) / total
print("ta acc =", round(acc_ta, 2), ", ta correct =", float(correct_ta), ", total =", total)
# print("fb acc =", round(acc_fb, 2), ", fb correct =", float(correct_fb), ", total =", total)
# print("art acc =", round(acc_art, 2), ", art correct =", float(correct_art), ", total =", total)
accuracies.append(round(acc_ta, 2))
print('Attack Accuracy = {:.3f} with epsilon = {:.2f}'.format(acc_ta, epsilon))
print("accuracies after apend :", accuracies)
# save items inside accuracies list to separate float objects, update the # of variables according to requirement.
accPGD_6by255, accPGD_9by255, accFGSM1, accFGSM2, accFGSM3, accFGSM4, accFGSM5, accCW_15, accCW_25 = (items for items in accuracies)
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_info = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(accPGD_6by255), str(accPGD_9by255), str(accFGSM1), str(accFGSM2), str(accFGSM3), str(accFGSM4), str(accFGSM5),
str(accCW_15), str(accCW_25)])
#
with open("{}/stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies {} ".format(accuracies))
text_file.write(result_info + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
# If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_eval, is_master=du.is_master_proc())
if writer_eval is not None:
# writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Trained seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,798 | 41.72711 | 166 | py |
RobDanns | RobDanns-main/deep_learning/tools/corruptions-inference.py |
"""Train a classification model."""
import argparse
import pickle
import numpy as np
import os
import sys
import torch
import math
import torchvision
import torchattacks
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.transforms as transforms
from datetime import datetime
from tensorboardX import SummaryWriter
from torchvision.utils import save_image
from skimage.util import random_noise
print("Using GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
# val_input_imgs,
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
def save_noisy_image(img, name):
if img.size(2) == 32:
img = img.view(img.size(0), 3, 32, 32)
save_image(img, name)
else:
img = img.view(img.size(0), 3, 224, 224)
save_image(img, name)
## Functions to save noisy images.
# def gaussian_noise(test_loader):
# print("Adding gaussian_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# gaussian_img_05 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.05, clip=True))
# gaussian_img_2 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.2, clip=True))
# gaussian_img_4 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.4, clip=True))
# gaussian_img_6 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.6, clip=True))
# save_noisy_image(gaussian_img_05, r"noisy-images/gaussian_05.png")
# save_noisy_image(gaussian_img_2, r"noisy-images/gaussian_2.png")
# save_noisy_image(gaussian_img_4, r"noisy-images/gaussian_4.png")
# save_noisy_image(gaussian_img_6, r"noisy-images/gaussian_6.png")
# break
# def salt_pepper_noise(test_loader):
# print("Adding salt_pepper_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# s_vs_p_5 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.5, clip=True))
# s_vs_p_6 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.6, clip=True))
# s_vs_p_7 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.7, clip=True))
# save_noisy_image(s_vs_p_5, r"noisy-images/s&p_5.png")
# save_noisy_image(s_vs_p_6, r"noisy-images/s&p_6.png")
# save_noisy_image(s_vs_p_7, r"noisy-images/s&p_7.png")
# break
# def speckle_noise(test_loader):
# print("Adding speckle_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# speckle_img_05 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.05, clip=True))
# speckle_img_2 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.2, clip=True))
# speckle_img_4 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.4, clip=True))
# speckle_img_6 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.6, clip=True))
# save_noisy_image(speckle_img_05, r"noisy-images/speckle_05.png")
# save_noisy_image(speckle_img_2, r"noisy-images/speckle_2.png")
# save_noisy_image(speckle_img_4, r"noisy-images/speckle_4.png")
# save_noisy_image(speckle_img_6, r"noisy-images/speckle_6.png")
# break
def train_model(writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
if cfg.MODEL.DEPTH == 20:
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.DEPTH == 26:
stats_baseline = 56140000 # ResNet26
elif cfg.MODEL.DEPTH == 34:
stats_baseline = 71480000 # ResNet34
elif cfg.MODEL.DEPTH == 38:
stats_baseline = 86819000 # ResNet38
elif cfg.MODEL.DEPTH == 50:
stats_baseline = 130000000 # ResNet50
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
elif cfg.TRAIN.DATASET == 'imagenet':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
if cfg.IS_INFERENCE and cfg.IS_DDP:
model = torch.nn.parallel.DataParallel(model)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
test_loader = loader.construct_test_loader()
# Create meters
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
noise_mode = ['gaussian', 'speckle', 's&p']
noise_var = [0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # change the variance values as desired.
model.eval()
accuracies_gaussian = []
accuracies_saltpepper = []
accuracies_speckle = []
for mode in noise_mode:
for level in noise_var:
print("Adding noise={} at level={} to images".format(mode, level))
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader):
if not 's&p' in mode:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, mean=0, var=level, clip=True))
else:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, salt_vs_pepper=0.5, clip=True))
noisy_img, labels = noisy_img.cuda(), labels.cuda(non_blocking=True)
outputs = model(noisy_img.float())
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += labels.size(0)
correct += (predicted == labels).sum()
if total > X: # replace X with the number of images to be generated for adversarial attacks.
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
if 'gaussian' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_gaussian.append(round(acc, 2))
print("Guassian Accuracies after append :", accuracies_gaussian)
elif 'speckle' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_speckle.append(round(acc, 2))
print("Speckle Accuracies after append :", accuracies_speckle)
elif 's&p' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_saltpepper.append(round(acc, 2))
print("Salt&Pepper Accuracies after append :", accuracies_saltpepper)
break
else:
print("noise mode not supported")
# gaussian_noise(test_loader)
# salt_pepper_noise(test_loader)
# speckle_noise(test_loader)
# Change the number of variable as desired number of outputs.
gaus_001, gaus_01, gaus_05, gaus_1, gaus_2, gaus_3, gaus_4, gaus_5, gaus_6 = (items for items in accuracies_gaussian)
speck_001, speck_01, speck_05, speck_1, speck_2, speck_3, speck_4, speck_5, speck_6 = (items for items in accuracies_speckle)
saltpepper = accuracies_saltpepper[0]
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_gaussian = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(gaus_001), str(gaus_01), str(gaus_05), str(gaus_1), str(gaus_2), str(gaus_3), str(gaus_4), str(gaus_5), str(gaus_6)])
result_speck = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(speck_001), str(speck_01), str(speck_05), str(speck_1), str(speck_2), str(speck_3), str(speck_4), str(speck_5), str(speck_6)])
result_sp = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(saltpepper)])
with open("{}/gaus_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Gaussian:{} ".format(accuracies_gaussian))
text_file.write(result_gaussian + '\n')
with open("{}/saltpepper_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Salt & Pepper:{} ".format(accuracies_saltpepper))
text_file.write(result_sp + '\n')
with open("{}/speckle_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Speckle:{} ".format(accuracies_speckle))
text_file.write(result_speck + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
# If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_eval, is_master=du.is_master_proc())
if writer_eval is not None:
# writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,864 | 42.708791 | 139 | py |
RobDanns | RobDanns-main/deep_learning/tools/train_net.py |
"""Train a classification model."""
import argparse
import pickle
import numpy as np
import os
import sys
import torch
import math
# import torchvision
# import time
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
from datetime import datetime
from tensorboardX import SummaryWriter
# import wandb
logger = lu.get_logger(__name__)
print("Let's use GPU :", torch.cuda.current_device())
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
def train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch, writer_train=None, params=0, flops=0,
is_master=False):
"""Performs one epoch of training."""
# Shuffle the data
loader.shuffle(train_loader, cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cur_epoch)
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(train_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parameters
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.scaled_all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point)
loss, top1_err, top5_err = loss.item(), top1_err.item(), top5_err.item()
train_meter.iter_toc()
# Update and log stats
train_meter.update_stats(
top1_err, top5_err, loss, lr, inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch, writer_train, params, flops, is_master=is_master)
trg_stats = train_meter.get_epoch_stats(cur_epoch)
train_meter.reset()
return trg_stats
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
return stats
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
if cfg.MODEL.DEPTH == 20:
stats_baseline = 40813184 # ResNet20
elif cfg.MODEL.DEPTH == 26:
stats_baseline = 56140000 # ResNet26
elif cfg.MODEL.DEPTH == 34:
stats_baseline = 71480000 # ResNet34
elif cfg.MODEL.DEPTH == 38:
stats_baseline = 86819000 # ResNet38
elif cfg.MODEL.DEPTH == 50:
stats_baseline = 130000000 # ResNet50
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'imagenet':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# wandb.watch(model)
# Load a checkpoint if applicable
start_epoch = 0
if cfg.TRAIN.AUTO_RESUME and cu.has_checkpoint():
last_checkpoint = cu.get_checkpoint_last1()
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
# Create data loaders
train_loader = loader.construct_train_loader()
test_loader = loader.construct_test_loader()
# Create meters
train_meter = TrainMeter(len(train_loader))
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
# do eval at initialization
initial_eval_stats = eval_epoch(test_loader, model, test_meter, -1,
writer_eval, params, flops, is_master=is_master)
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
last_epoch_eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
else:
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
print('Epoch {} Started'.format(cur_epoch))
# Train for one epoch
trg_stats = train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch,
writer_train, is_master=is_master
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# Save a checkpoint
if cu.is_checkpoint_epoch(cur_epoch):
checkpoint_file = cu.save_checkpoint(model, optimizer, cur_epoch)
logger.info('Wrote checkpoint to: {}'.format(checkpoint_file))
# Evaluate the model
if is_eval_epoch(cur_epoch):
eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# wandb.log({'Epoch': cur_epoch, 'Train top1_err': trg_stats['top1_err'], 'Test top1_err': eval_stats['top1_err']})
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# wandb.init(project = 'Rob_G2NN', entity='rowanai-graph-robustness')
# Parse cmd line args
args = parse_args()
# wandb.config.update(args)
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
if cfg.NUM_GPUS > 1:
mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_train)
else:
single_proc_train()
else:
print('Seed {} exists, skip!'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 18,692 | 39.113734 | 127 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/losses.py |
"""Loss functions."""
import torch.nn as nn
from pycls.config import cfg
# Supported losses
_LOSS_FUNS = {
'cross_entropy': nn.CrossEntropyLoss,
}
def get_loss_fun():
"""Retrieves the loss function."""
assert cfg.MODEL.LOSS_FUN in _LOSS_FUNS.keys(), \
'Loss function \'{}\' not supported'.format(cfg.TRAIN.LOSS)
return _LOSS_FUNS[cfg.MODEL.LOSS_FUN]().cuda()
| 730 | 26.074074 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/efficientnet.py |
"""EfficientNet models."""
import math
import torch
import torch.nn as nn
from pycls.config import cfg
import pycls.utils.net as nu
import pycls.utils.logging as logging
from .relation_graph import *
logger = logging.get_logger(__name__)
def get_conv(name):
"""Retrieves the transformation function by name."""
trans_funs = {
'mbconv_transform': MBConv,
'mbtalkconv_transform': MBTalkConv,
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
def drop_connect_tf(x, drop_ratio):
"""Drop connect (tensorflow port)."""
keep_ratio = 1.0 - drop_ratio
rt = torch.rand([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
rt.add_(keep_ratio)
bt = torch.floor(rt)
x.div_(keep_ratio)
x.mul_(bt)
return x
def drop_connect_pt(x, drop_ratio):
"""Drop connect (pytorch version)."""
keep_ratio = 1.0 - drop_ratio
mask = torch.empty([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device)
mask.bernoulli_(keep_ratio)
x.div_(keep_ratio)
x.mul_(mask)
return x
def get_act_fun(act_type):
"""Retrieves the activations function."""
act_funs = {
'swish': Swish,
'relu': nn.ReLU,
}
assert act_type in act_funs.keys(), \
'Activation function \'{}\' not supported'.format(act_type)
return act_funs[act_type]
class SimpleHead(nn.Module):
"""Simple head."""
def __init__(self, dim_in, num_classes):
super(SimpleHead, self).__init__()
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# Dropout
if cfg.EFFICIENT_NET.DROPOUT_RATIO > 0.0:
self.dropout = nn.Dropout(p=cfg.EFFICIENT_NET.DROPOUT_RATIO)
# FC
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x) if hasattr(self, 'dropout') else x
x = self.fc(x)
return x
class ConvHead(nn.Module):
"""EfficientNet conv head."""
def __init__(self, in_w, out_w, num_classes, act_fun):
super(ConvHead, self).__init__()
self._construct_class(in_w, out_w, num_classes, act_fun)
def _construct_class(self, in_w, out_w, num_classes, act_fun):
# 1x1, BN, Swish
self.conv = nn.Conv2d(
in_w, out_w,
kernel_size=1, stride=1, padding=0, bias=False
)
self.conv_bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.conv_swish = act_fun()
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# Dropout
if cfg.EFFICIENT_NET.DROPOUT_RATIO > 0.0:
self.dropout = nn.Dropout(p=cfg.EFFICIENT_NET.DROPOUT_RATIO)
# FC
self.fc = nn.Linear(out_w, num_classes, bias=True)
def forward(self, x):
# 1x1, BN, Swish
x = self.conv_swish(self.conv_bn(self.conv(x)))
# AvgPool
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
# Dropout
x = self.dropout(x) if hasattr(self, 'dropout') else x
# FC
x = self.fc(x)
return x
class LinearHead(nn.Module):
"""EfficientNet linear head."""
def __init__(self, in_w, out_w, num_classes, act_fun):
super(LinearHead, self).__init__()
self._construct_class(in_w, out_w, num_classes, act_fun)
def _construct_class(self, in_w, out_w, num_classes, act_fun):
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# FC0
self.fc0 = nn.Linear(in_w, out_w, bias=False)
self.fc0_bn = nn.BatchNorm1d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.fc0_swish = act_fun()
# FC
self.fc = nn.Linear(out_w, num_classes, bias=True)
def forward(self, x):
# AvgPool
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
# Linear, BN, Swish
x = self.fc0_swish(self.fc0_bn(self.fc0(x)))
# FC
x = self.fc(x)
return x
class MBConv(nn.Module):
"""Mobile inverted bottleneck block with SE (MBConv)."""
def __init__(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun, seed=None, exp_w=None):
super(MBConv, self).__init__()
self._construct_class(in_w, exp_r, kernel, stride, se_r, out_w, act_fun)
def _construct_class(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun):
# Expansion: 1x1, BN, Swish
self.expand = None
exp_w = int(in_w * exp_r)
# Include exp ops only if the exp ratio is different from 1
if exp_w != in_w:
self.expand = nn.Conv2d(
in_w, exp_w,
kernel_size=1, stride=1, padding=0, bias=False
)
self.expand_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.expand_swish = act_fun()
# Depthwise: 3x3 dwise, BN, Swish
self.dwise = nn.Conv2d(
exp_w, exp_w,
kernel_size=kernel, stride=stride, groups=exp_w, bias=False,
# Hacky padding to preserve res (supports only 3x3 and 5x5)
padding=(1 if kernel == 3 else 2)
)
self.dwise_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.dwise_swish = act_fun()
# SE: x * F_ex(x)
if cfg.EFFICIENT_NET.SE_ENABLED:
se_w = int(in_w * se_r)
self.se = SE(exp_w, se_w, act_fun)
# Linear projection: 1x1, BN
self.lin_proj = nn.Conv2d(
exp_w, out_w,
kernel_size=1, stride=1, padding=0, bias=False
)
self.lin_proj_bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
self.lin_proj_swish = act_fun()
# Skip connections on blocks w/ same in and out shapes (MN-V2, Fig. 4)
self.has_skip = (stride == 1) and (in_w == out_w)
def forward(self, x):
f_x = x
# Expansion
if self.expand:
f_x = self.expand_swish(self.expand_bn(self.expand(f_x)))
# Depthwise
f_x = self.dwise_swish(self.dwise_bn(self.dwise(f_x)))
# SE
if cfg.EFFICIENT_NET.SE_ENABLED:
f_x = self.se(f_x)
# Linear projection
f_x = self.lin_proj_bn(self.lin_proj(f_x))
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
f_x = self.lin_proj_swish(f_x)
# Skip connection
if self.has_skip:
# Drop connect
if self.training and cfg.EFFICIENT_NET.DC_RATIO > 0.0:
if cfg.EFFICIENT_NET.DC_IMP == 'tf':
f_x = drop_connect_tf(f_x, cfg.EFFICIENT_NET.DC_RATIO)
else:
f_x = drop_connect_pt(f_x, cfg.EFFICIENT_NET.DC_RATIO)
f_x = x + f_x
return f_x
class MBTalkConv(nn.Module):
"""Mobile inverted bottleneck block with SE (MBConv)."""
def __init__(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun, seed=None, exp_w=None):
super(MBTalkConv, self).__init__()
self.seed=seed
self._construct_class(in_w, exp_r, kernel, stride, se_r, out_w, act_fun, exp_w)
def _construct_class(self, in_w, exp_r, kernel, stride, se_r, out_w, act_fun, exp_w):
# Expansion: 1x1, BN, Swish
self.expand = None
if int(exp_r)==1:
exp_w = in_w
else:
self.expand = TalkConv2d(
in_w, exp_w, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.expand_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.expand_swish = act_fun()
# Depthwise: 3x3 dwise, BN, Swish
self.dwise = nn.Conv2d(
exp_w, exp_w,
kernel_size=kernel, stride=stride, groups=exp_w, bias=False,
# Hacky padding to preserve res (supports only 3x3 and 5x5)
padding=(1 if kernel == 3 else 2)
)
self.dwise_bn = nn.BatchNorm2d(
exp_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.dwise_swish = act_fun()
# SE: x * F_ex(x)
if cfg.EFFICIENT_NET.SE_ENABLED:
se_w = int(in_w * se_r)
self.se = SE(exp_w, se_w, act_fun)
# Linear projection: 1x1, BN
self.lin_proj = TalkConv2d(
exp_w, out_w, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.lin_proj_bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
self.lin_proj_swish = act_fun()
# Skip connections on blocks w/ same in and out shapes (MN-V2, Fig. 4)
self.has_skip = (stride == 1) and (in_w == out_w)
def forward(self, x):
f_x = x
# Expansion
if self.expand:
f_x = self.expand_swish(self.expand_bn(self.expand(f_x)))
# Depthwise
f_x = self.dwise_swish(self.dwise_bn(self.dwise(f_x)))
# SE
if cfg.EFFICIENT_NET.SE_ENABLED:
f_x = self.se(f_x)
# Linear projection
f_x = self.lin_proj_bn(self.lin_proj(f_x))
# Nonlinear projection
if not cfg.EFFICIENT_NET.LIN_PROJ:
f_x = self.lin_proj_swish(f_x)
# Skip connection
if self.has_skip:
# Drop connect
if self.training and cfg.EFFICIENT_NET.DC_RATIO > 0.0:
if cfg.EFFICIENT_NET.DC_IMP == 'tf':
f_x = drop_connect_tf(f_x, cfg.EFFICIENT_NET.DC_RATIO)
else:
f_x = drop_connect_pt(f_x, cfg.EFFICIENT_NET.DC_RATIO)
f_x = x + f_x
return f_x
class Stage(nn.Module):
"""EfficientNet stage."""
def __init__(self, in_w, exp_r, kernel, stride, se_r, out_w, d, act_fun, exp_w=None):
super(Stage, self).__init__()
self._construct_class(in_w, exp_r, kernel, stride, se_r, out_w, d, act_fun, exp_w)
def _construct_class(self, in_w, exp_r, kernel, stride, se_r, out_w, d, act_fun, exp_w):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(cfg.RGRAPH.SEED_GRAPH*100)
# Construct a sequence of blocks
for i in range(d):
trans_fun = get_conv(cfg.RESNET.TRANS_FUN)
# Stride and input width apply to the first block of the stage
stride_b = stride if i == 0 else 1
in_w_b = in_w if i == 0 else out_w
# Construct the block
self.add_module(
'b{}'.format(i + 1),
trans_fun(in_w_b, exp_r, kernel, stride_b, se_r, out_w, act_fun, seed=seed, exp_w=exp_w)
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
def forward(self, x):
for block in self.children():
x = block(x)
return x
class StemIN(nn.Module):
"""EfficientNet stem for ImageNet."""
def __init__(self, in_w, out_w, act_fun):
super(StemIN, self).__init__()
self._construct_class(in_w, out_w, act_fun)
def _construct_class(self, in_w, out_w, act_fun):
self.conv = nn.Conv2d(
in_w, out_w,
kernel_size=3, stride=2, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(
out_w, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.swish = act_fun()
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class EfficientNet(nn.Module):
"""EfficientNet model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['imagenet'], \
'Training on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['imagenet'], \
'Testing on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
assert cfg.EFFICIENT_NET.HEAD_TYPE in ['conv_head', 'simple_head', 'linear_head'], \
'Unsupported head type: {}'.format(cfg.EFFICIENT_NET.HEAD_TYPE)
super(EfficientNet, self).__init__()
self._construct_class(
stem_w=cfg.EFFICIENT_NET.STEM_W,
ds=cfg.EFFICIENT_NET.DEPTHS,
ws=cfg.EFFICIENT_NET.WIDTHS,
exp_rs=cfg.EFFICIENT_NET.EXP_RATIOS,
se_r=cfg.EFFICIENT_NET.SE_RATIO,
ss=cfg.EFFICIENT_NET.STRIDES,
ks=cfg.EFFICIENT_NET.KERNELS,
head_type=cfg.EFFICIENT_NET.HEAD_TYPE,
head_w=cfg.EFFICIENT_NET.HEAD_W,
act_type=cfg.EFFICIENT_NET.ACT_FUN,
nc=cfg.MODEL.NUM_CLASSES
)
self.apply(nu.init_weights)
def _construct_class(
self, stem_w, ds, ws, exp_rs, se_r, ss, ks,
head_type, head_w, act_type, nc
):
"""Constructs imagenet models."""
# Group params by stage
stage_params = list(zip(ds, ws, exp_rs, ss, ks))
# Activation function
act_fun = get_act_fun(act_type)
# Set dim for each stage
dim_list = cfg.RGRAPH.DIM_LIST
expdim_list = [int(cfg.EFFICIENT_NET.WIDTHS[i]*cfg.EFFICIENT_NET.EXP_RATIOS[i])
for i in range(len(cfg.EFFICIENT_NET.WIDTHS))]
# Construct the stems
self.stem = StemIN(3, stem_w, act_fun)
prev_w = stem_w
# Construct the stages
for i, (d, w, exp_r, stride, kernel) in enumerate(stage_params):
if cfg.RESNET.TRANS_FUN != 'mbconv_transform':
w = dim_list[i]
exp_w = expdim_list[i]
self.add_module(
's{}'.format(i + 1),
Stage(prev_w, exp_r, kernel, stride, se_r, w, d, act_fun, exp_w=exp_w)
)
prev_w = w
# Construct the head
if head_type == 'conv_head':
self.head = ConvHead(prev_w, head_w, nc, act_fun)
elif head_type == 'linear_head':
self.head = LinearHead(prev_w, head_w, nc, act_fun)
else:
self.head = SimpleHead(prev_w, nc)
def forward(self, x):
for module in self.children():
x = module(x)
return x | 15,385 | 33.809955 | 108 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/resnet.py |
"""ResNet or ResNeXt model."""
import torch.nn as nn
import torch
from pycls.config import cfg
import pycls.utils.logging as lu
import pycls.utils.net as nu
from .relation_graph import *
import time
import pdb
logger = lu.get_logger(__name__)
# Stage depths for an ImageNet model {model depth -> (d2, d3, d4, d5)}
_IN_MODEL_STAGE_DS = {
18: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
def get_trans_fun(name):
"""Retrieves the transformation function by name."""
trans_funs = {
'channelbasic_transform': ChannelBasicTransform,
'groupbasictalk_transform': GroupBasicTalkTransform,
'channelsep_transform': ChannelSepTransform,
'groupseptalk_transform': GroupSepTalkTransform,
'bottleneck_transform': BottleneckTransform,
'talkbottleneck_transform': TalkBottleneckTransform,
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
class ChannelBasicTransform(nn.Module):
"""Basic transformation: 3x3, 3x3"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(ChannelBasicTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN
self.b = nn.Conv2d(
dim_out, dim_out, kernel_size=3,
stride=1, padding=1, bias=False
)
self.b_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupBasicTalkTransform(nn.Module):
"""Basic transformation: 3x3, 3x3, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
self.seed = seed
super(GroupBasicTalkTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=stride, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN
self.b = TalkConv2d(
dim_out, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=1, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.b_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ChannelSepTransform(nn.Module):
"""Separable transformation: 3x3, 3x3"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(ChannelSepTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# ReLU, 3x3, BN, 1x1, BN
self.a_3x3 = nn.Conv2d(
dim_in, dim_in, kernel_size=3,
stride=stride, padding=1, bias=False, groups=dim_in
)
self.a_1x1 = nn.Conv2d(
dim_in, dim_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.a_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# ReLU, 3x3, BN, 1x1, BN
self.b_3x3 = nn.Conv2d(
dim_out, dim_out, kernel_size=3,
stride=1, padding=1, bias=False, groups=dim_out
)
self.b_1x1 = nn.Conv2d(
dim_out, dim_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.b_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_1x1_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupSepTalkTransform(nn.Module):
"""Separable transformation: 3x3, 3x3, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
self.seed = seed
super(GroupSepTalkTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# ReLU, 3x3, BN, 1x1, BN
self.a_3x3 = nn.Conv2d(
dim_in, dim_in, kernel_size=3,
stride=stride, padding=1, bias=False, groups=dim_in
)
self.a_1x1 = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# ReLU, 3x3, BN, 1x1, BN
self.b_3x3 = nn.Conv2d(
dim_out, dim_out, kernel_size=3,
stride=1, padding=1, bias=False, groups=dim_out
)
self.b_1x1 = TalkConv2d(
dim_out, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.b_1x1_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.b_1x1_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class BottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3, 1x1"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(BottleneckTransform, self).__init__()
dim_inner = int(round(dim_out / 4))
self._construct_class(dim_in, dim_out, stride, dim_inner, num_gs, seed)
def _construct_class(self, dim_in, dim_out, stride, dim_inner, num_gs, seed):
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3
# (str1x1, str3x3) = (stride, 1) if cfg.RESNET.STRIDE_1X1 else (1, stride)
(str1x1, str3x3) = (1, stride)
# 1x1, BN, ReLU
self.a = nn.Conv2d(
dim_in, dim_inner, kernel_size=1,
stride=str1x1, padding=0, bias=False
)
self.a_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN, ReLU
self.b = nn.Conv2d(
dim_inner, dim_inner, kernel_size=3,
stride=str3x3, padding=1, groups=num_gs, bias=False
)
self.b_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.b_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 1x1, BN
self.c = nn.Conv2d(
dim_inner, dim_out, kernel_size=1,
stride=1, padding=0, bias=False
)
self.c_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class TalkBottleneckTransform(nn.Module):
"""Bottleneck transformation: 1x1, 3x3, 1x1, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(TalkBottleneckTransform, self).__init__()
dim_inner = int(round(dim_out / 4))
self.seed = seed
self._construct_class(dim_in, dim_out, stride, dim_inner, num_gs, seed)
def _construct_class(self, dim_in, dim_out, stride, dim_inner, num_gs, seed):
# MSRA -> stride=2 is on 1x1; TH/C2 -> stride=2 is on 3x3
# (str1x1, str3x3) = (stride, 1) if cfg.RESNET.STRIDE_1X1 else (1, stride)
(str1x1, str3x3) = (1, stride)
# 1x1, BN, ReLU
self.a = TalkConv2d(
dim_in, dim_inner, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=str1x1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 3x3, BN, ReLU
self.b = TalkConv2d(
dim_inner, dim_inner, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=str3x3, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.b_bn = nn.BatchNorm2d(
dim_inner, eps=cfg.BN.EPS, momentum=cfg.BN.MOM
)
self.b_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# 1x1, BN
self.c = TalkConv2d(
dim_inner, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.c_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.c_bn.final_bn = True
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResBlock(nn.Module):
"""Residual block: x + F(x)"""
def __init__(
self, dim_in, dim_out, stride, trans_fun, dim_inner=None, num_gs=1, seed=None):
super(ResBlock, self).__init__()
self.seed = seed
self._construct_class(dim_in, dim_out, stride, trans_fun, dim_inner, num_gs, seed)
def _add_skip_proj(self, dim_in, dim_out, stride):
if 'group' in cfg.RESNET.TRANS_FUN and 'share' not in cfg.RESNET.TRANS_FUN:
self.proj = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=stride, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
else:
self.proj = nn.Conv2d(
dim_in, dim_out, kernel_size=1,
stride=stride, padding=0, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
def _construct_class(self, dim_in, dim_out, stride, trans_fun, dim_inner, num_gs, seed):
# Use skip connection with projection if dim or res change
self.proj_block = (dim_in != dim_out) or (stride != 1)
if self.proj_block:
self._add_skip_proj(dim_in, dim_out, stride)
self.f = trans_fun(dim_in, dim_out, stride, dim_inner, num_gs, seed)
self.act = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
if self.proj_block:
x = self.bn(self.proj(x)) + self.f(x)
else:
x = x + self.f(x)
x = self.act(x)
return x
class ResStage(nn.Module):
"""Stage of ResNet."""
def __init__(
self, dim_in, dim_out, stride, num_bs, dim_inner=None, num_gs=1):
super(ResStage, self).__init__()
self._construct_class(dim_in, dim_out, stride, num_bs, dim_inner, num_gs)
def _construct_class(self, dim_in, dim_out, stride, num_bs, dim_inner, num_gs):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
for i in range(num_bs):
# Stride and dim_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_dim_in = dim_in if i == 0 else dim_out
# Retrieve the transformation function
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
# Construct the block
res_block = ResBlock(
b_dim_in, dim_out, b_stride, trans_fun, dim_inner, num_gs, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}'.format(i + 1), res_block)
for j in range(cfg.RGRAPH.ADD_1x1):
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN + '1x1')
# Construct the block
res_block = ResBlock(
dim_out, dim_out, 1, trans_fun, dim_inner, num_gs, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}_{}1x1'.format(i + 1, j + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class ResStem(nn.Module):
"""Stem of ResNet."""
def __init__(self, dim_in, dim_out):
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(ResStem, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar(dim_in, dim_out)
else:
self._construct_imagenet(dim_in, dim_out)
def _construct_cifar(self, dim_in, dim_out):
# 3x3, BN, ReLU
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=1, padding=1, bias=False
# )
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=7,
stride=1, padding=3, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def _construct_imagenet(self, dim_in, dim_out):
# 7x7, BN, ReLU, pool
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=7,
stride=2, padding=3, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ResHead(nn.Module):
"""ResNet head."""
def __init__(self, dim_in, num_classes):
super(ResHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class ResNet(nn.Module):
"""ResNet model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Training ResNet on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Testing ResNet on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(ResNet, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar()
elif cfg.TRAIN.DATASET == 'cifar100':
self._construct_cifar()
else:
self._construct_imagenet()
self.apply(nu.init_weights)
def _construct_cifar(self):
assert (cfg.MODEL.DEPTH - 2) % 6 == 0, \
'Model depth should be of the format 6n + 2 for cifar'
logger.info('Constructing: ResNet-{}, cifar'.format(cfg.MODEL.DEPTH))
# Each stage has the same number of blocks for cifar
num_blocks = int((cfg.MODEL.DEPTH - 2) / 6)
# length = num of stages (excluding stem and head)
dim_list = cfg.RGRAPH.DIM_LIST
# Stage 1: (N, 3, 32, 32) -> (N, 16, 32, 32)*8
# self.s1 = ResStem(dim_in=3, dim_out=16)
self.s1 = ResStem(dim_in=3, dim_out=64)
# Stage 2: (N, 16, 32, 32) -> (N, 16, 32, 32)
# self.s2 = ResStage(dim_in=16, dim_out=dim_list[0], stride=1, num_bs=num_blocks)
self.s2 = ResStage(dim_in=64, dim_out=dim_list[0], stride=1, num_bs=num_blocks)
# Stage 3: (N, 16, 32, 32) -> (N, 32, 16, 16)
self.s3 = ResStage(dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=num_blocks)
# Stage 4: (N, 32, 16, 16) -> (N, 64, 8, 8)
self.s4 = ResStage(dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=num_blocks)
# Head: (N, 64, 8, 8) -> (N, num_classes)
self.head = ResHead(dim_in=dim_list[2], num_classes=cfg.MODEL.NUM_CLASSES)
# smaller imagenet
def _construct_imagenet(self):
logger.info('Constructing: ResNet-{}, Imagenet'.format(cfg.MODEL.DEPTH))
# Retrieve the number of blocks per stage (excluding base)
(d2, d3, d4, d5) = _IN_MODEL_STAGE_DS[cfg.MODEL.DEPTH]
# Compute the initial inner block dim
dim_list = cfg.RGRAPH.DIM_LIST
print(dim_list)
# Stage 1: (N, 3, 224, 224) -> (N, 64, 56, 56)
self.s1 = ResStem(dim_in=3, dim_out=64)
# Stage 2: (N, 64, 56, 56) -> (N, 256, 56, 56)
self.s2 = ResStage(
dim_in=64, dim_out=dim_list[0], stride=1, num_bs=d2
)
# Stage 3: (N, 256, 56, 56) -> (N, 512, 28, 28)
self.s3 = ResStage(
dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=d3
)
# Stage 4: (N, 512, 56, 56) -> (N, 1024, 14, 14)
self.s4 = ResStage(
dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=d4
)
# Stage 5: (N, 1024, 14, 14) -> (N, 2048, 7, 7)
self.s5 = ResStage(
dim_in=dim_list[2], dim_out=dim_list[3], stride=2, num_bs=d5
)
# Head: (N, 2048, 7, 7) -> (N, num_classes)
self.head = ResHead(dim_in=dim_list[3], num_classes=cfg.MODEL.NUM_CLASSES)
def forward(self, x):
for module in self.children():
x = module(x)
return x
| 20,015 | 37.198473 | 108 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/cnn.py |
"""CNN model."""
import torch.nn as nn
import torch
from pycls.config import cfg
import pycls.utils.logging as lu
import pycls.utils.net as nu
from .relation_graph import *
logger = lu.get_logger(__name__)
def get_trans_fun(name):
"""Retrieves the transformation function by name."""
trans_funs = {
'convbasic_transform': ConvBasicTransform,
'symconvbasic_transform': SymConvBasicTransform,
'convtalk_transform': ConvTalkTransform, # relational graph
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
class ConvBasicTransform(nn.Module):
"""Basic transformation: 3x3"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(ConvBasicTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.a_bn.final_bn = True
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SymConvBasicTransform(nn.Module):
"""Basic transformation: 3x3 conv, symmetric"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
super(SymConvBasicTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = SymConv2d(
dim_in, dim_out, kernel_size=3,
stride=stride, padding=1, bias=False
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.a_bn.final_bn = True
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class ConvTalkTransform(nn.Module):
"""Basic transformation: 3x3 conv, relational graph"""
def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
self.seed = seed
super(ConvTalkTransform, self).__init__()
self._construct_class(dim_in, dim_out, stride)
def _construct_class(self, dim_in, dim_out, stride):
# 3x3, BN, ReLU
self.a = TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=stride, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.a_bn.final_bn = True
self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class CNNStage(nn.Module):
"""Stage of CNN."""
def __init__(
self, dim_in, dim_out, stride, num_bs, dim_inner=None, num_gs=1):
super(CNNStage, self).__init__()
self._construct_class(dim_in, dim_out, stride, num_bs, dim_inner, num_gs)
def _construct_class(self, dim_in, dim_out, stride, num_bs, dim_inner, num_gs):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
for i in range(num_bs):
# Stride and dim_in apply to the first block of the stage
b_stride = stride if i == 0 else 1
b_dim_in = dim_in if i == 0 else dim_out
# Retrieve the transformation function
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
# Construct the block
res_block = trans_fun(
b_dim_in, dim_out, b_stride, dim_inner, num_gs, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}'.format(i + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class CNNStem(nn.Module):
"""Stem of CNN."""
def __init__(self, dim_in, dim_out):
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(CNNStem, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar(dim_in, dim_out)
elif cfg.TRAIN.DATASET == 'cifar100':
self._construct_cifar(dim_in, dim_out)
else:
self._construct_imagenet(dim_in, dim_out)
def _construct_cifar(self, dim_in, dim_out):
# 3x3, BN, ReLU
if cfg.RGRAPH.STEM_MODE == 'default':
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=1, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
elif cfg.RGRAPH.STEM_MODE == 'downsample':
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=1, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _construct_imagenet(self, dim_in, dim_out):
# 3x3, BN, ReLU, pool
self.conv = nn.Conv2d(
dim_in, dim_out, kernel_size=3,
stride=2, padding=1, bias=False
)
self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class CNNHead(nn.Module):
"""CNN head."""
def __init__(self, dim_in, num_classes):
super(CNNHead, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(p=0.15)
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.fc(x)
return x
class CNN(nn.Module):
"""CNN model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Training CNN on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Testing CNN on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(CNN, self).__init__()
self._construct()
self.apply(nu.init_weights)
def _construct(self):
# Each stage has the same number of blocks for cifar
dim_list = cfg.RGRAPH.DIM_LIST
num_bs = cfg.MODEL.LAYERS // 3
self.s1 = CNNStem(dim_in=3, dim_out=cfg.RGRAPH.DIM_FIRST)
self.s2 = CNNStage(dim_in=cfg.RGRAPH.DIM_FIRST, dim_out=dim_list[0], stride=2, num_bs=num_bs)
self.s3 = CNNStage(dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=num_bs)
self.s4 = CNNStage(dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=num_bs)
# self.s5 = CNNStage(dim_in=dim_list[2], dim_out=dim_list[3], stride=2, num_bs=num_bs)
self.head = CNNHead(dim_in=dim_list[2], num_classes=cfg.MODEL.NUM_CLASSES)
def forward(self, x):
for module in self.children():
x = module(x)
return x
# #
# """CNN model."""
# import torch.nn as nn
# import torch
# from pycls.config import cfg
# import pycls.utils.logging as lu
# import pycls.utils.net as nu
# from .relation_graph import *
# logger = lu.get_logger(__name__)
# def get_trans_fun(name):
# """Retrieves the transformation function by name."""
# trans_funs = {
# 'convbasic_transform': ConvBasicTransform,
# 'symconvbasic_transform': SymConvBasicTransform,
# 'convtalk_transform': ConvTalkTransform, # relational graph
# }
# assert name in trans_funs.keys(), \
# 'Transformation function \'{}\' not supported'.format(name)
# return trans_funs[name]
# class ConvBasicTransform(nn.Module):
# """Basic transformation: 3x3"""
# def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
# super(ConvBasicTransform, self).__init__()
# self._construct_class(dim_in, dim_out, stride)
# def _construct_class(self, dim_in, dim_out, stride):
# # 3x3, BN, ReLU
# self.a = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=stride, padding=1, bias=False
# )
# self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# # self.a_bn.final_bn = True
# self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class SymConvBasicTransform(nn.Module):
# """Basic transformation: 3x3 conv, symmetric"""
# def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
# super(SymConvBasicTransform, self).__init__()
# self._construct_class(dim_in, dim_out, stride)
# def _construct_class(self, dim_in, dim_out, stride):
# # 3x3, BN, ReLU
# self.a = SymConv2d(
# dim_in, dim_out, kernel_size=3,
# stride=stride, padding=1, bias=False
# )
# self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# # self.a_bn.final_bn = True
# self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class ConvTalkTransform(nn.Module):
# """Basic transformation: 3x3 conv, relational graph"""
# def __init__(self, dim_in, dim_out, stride, dim_inner=None, num_gs=1, seed=None):
# self.seed = seed
# super(ConvTalkTransform, self).__init__()
# self._construct_class(dim_in, dim_out, stride)
# def _construct_class(self, dim_in, dim_out, stride):
# # 3x3, BN, ReLU
# self.a = TalkConv2d(
# dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
# stride=stride, padding=1, bias=False,
# message_type=cfg.RGRAPH.MESSAGE_TYPE, directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
# sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
# )
# self.a_bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# # self.a_bn.final_bn = True
# self.a_relu = nn.ReLU(inplace=cfg.MEM.RELU_INPLACE)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class CNNStage(nn.Module):
# """Stage of CNN."""
# def __init__(
# self, dim_in, dim_out, stride, num_bs, dim_inner=None, num_gs=1):
# super(CNNStage, self).__init__()
# self._construct_class(dim_in, dim_out, stride, num_bs, dim_inner, num_gs)
# def _construct_class(self, dim_in, dim_out, stride, num_bs, dim_inner, num_gs):
# if cfg.RGRAPH.KEEP_GRAPH:
# seed = cfg.RGRAPH.SEED_GRAPH
# else:
# seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
# for i in range(num_bs):
# # Stride and dim_in apply to the first block of the stage
# b_stride = stride if i == 0 else 1
# b_dim_in = dim_in if i == 0 else dim_out
# # Retrieve the transformation function
# trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
# # Construct the block
# res_block = trans_fun(
# b_dim_in, dim_out, b_stride, dim_inner, num_gs, seed=seed
# )
# if not cfg.RGRAPH.KEEP_GRAPH:
# seed += 1
# self.add_module('b{}'.format(i + 1), res_block)
# def forward(self, x):
# for block in self.children():
# x = block(x)
# return x
# class CNNStem(nn.Module):
# """Stem of CNN."""
# def __init__(self, dim_in, dim_out):
# assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
# 'Train and test dataset must be the same for now'
# super(CNNStem, self).__init__()
# if cfg.TRAIN.DATASET == 'cifar10':
# self._construct_cifar(dim_in, dim_out)
# else:
# self._construct_imagenet(dim_in, dim_out)
# def _construct_cifar(self, dim_in, dim_out):
# # 3x3, BN, ReLU
# if cfg.RGRAPH.STEM_MODE == 'default':
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=1, padding=1, bias=False
# )
# self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
# momentum=cfg.BN.MOM)
# self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
# elif cfg.RGRAPH.STEM_MODE == 'downsample':
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=1, padding=1, bias=False
# )
# self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS,
# momentum=cfg.BN.MOM)
# self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
# self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# def _construct_imagenet(self, dim_in, dim_out):
# # 3x3, BN, ReLU, pool
# self.conv = nn.Conv2d(
# dim_in, dim_out, kernel_size=3,
# stride=2, padding=1, bias=False
# )
# self.bn = nn.BatchNorm2d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
# self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
# self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# def forward(self, x):
# for layer in self.children():
# x = layer(x)
# return x
# class CNNHead(nn.Module):
# """CNN head."""
# def __init__(self, dim_in, num_classes):
# super(CNNHead, self).__init__()
# self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(dim_in, num_classes, bias=True)
# def forward(self, x):
# x = self.avg_pool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
# return x
# class CNN(nn.Module):
# """CNN model."""
# def __init__(self):
# assert cfg.TRAIN.DATASET in ['cifar10', 'imagenet'], \
# 'Training ResNet on {} is not supported'.format(cfg.TRAIN.DATASET)
# assert cfg.TEST.DATASET in ['cifar10', 'imagenet'], \
# 'Testing ResNet on {} is not supported'.format(cfg.TEST.DATASET)
# assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
# 'Train and test dataset must be the same for now'
# super(CNN, self).__init__()
# self._construct()
# self.apply(nu.init_weights)
# def _construct(self):
# # Each stage has the same number of blocks for cifar
# dim_list = cfg.RGRAPH.DIM_LIST
# num_bs = cfg.MODEL.LAYERS // 3
# self.s1 = CNNStem(dim_in=3, dim_out=cfg.RGRAPH.DIM_FIRST)
# self.s2 = CNNStage(dim_in=cfg.RGRAPH.DIM_FIRST, dim_out=dim_list[0], stride=2, num_bs=num_bs)
# self.s3 = CNNStage(dim_in=dim_list[0], dim_out=dim_list[1], stride=2, num_bs=num_bs)
# self.s4 = CNNStage(dim_in=dim_list[1], dim_out=dim_list[2], stride=2, num_bs=num_bs)
# self.head = CNNHead(dim_in=dim_list[2], num_classes=cfg.MODEL.NUM_CLASSES)
# def forward(self, x):
# for module in self.children():
# x = module(x)
# return x
| 17,388 | 34.779835 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/vgg.py |
"""VGG example"""
import torch.nn as nn
import torch.nn.functional as F
from pycls.config import cfg
import pycls.utils.net as nu
from .relation_graph import *
class VGG(nn.Module):
def __init__(self, num_classes=1024):
super(VGG, self).__init__()
self.seed = cfg.RGRAPH.SEED_GRAPH
def conv_bn(dim_in, dim_out, stride, stem=False):
if stem:
conv = get_conv('convbasic_transform', dim_in, dim_out, stride)
else:
conv = get_conv(cfg.RESNET.TRANS_FUN, dim_in, dim_out, stride)
return nn.Sequential(
conv,
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True)
)
def get_conv(name, dim_in, dim_out, stride=1):
if not cfg.RGRAPH.KEEP_GRAPH:
self.seed += 1
if name == 'convbasic_transform':
return nn.Conv2d(dim_in, dim_out,
kernel_size=3, stride=stride,
padding=1, bias=False)
elif name == 'convtalk_transform':
return TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=3,
stride=stride, padding=1, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE,
directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P,
talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
self.dim_list = cfg.RGRAPH.DIM_LIST
# print(self.dim_list)
self.model = nn.Sequential(
conv_bn(3, 64, 1, stem=True),
conv_bn(64, self.dim_list[0], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[0], self.dim_list[1], 1),
conv_bn(self.dim_list[1], self.dim_list[1], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[1], self.dim_list[2], 1),
conv_bn(self.dim_list[2], self.dim_list[2], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[2], self.dim_list[3], 1),
conv_bn(self.dim_list[3], self.dim_list[3], 1),
nn.MaxPool2d(kernel_size=2, stride=2),
conv_bn(self.dim_list[3], self.dim_list[3], 1),
conv_bn(self.dim_list[3], self.dim_list[3], 1),
)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(self.dim_list[3], num_classes)
self.apply(nu.init_weights)
def forward(self, x):
x = self.model(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
| 3,097 | 35.880952 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/mlp.py |
"""MLP model."""
import torch.nn as nn
import torch
from pycls.config import cfg
import pycls.utils.logging as lu
import pycls.utils.net as nu
from .relation_graph import *
import time
import pdb
logger = lu.get_logger(__name__)
def get_trans_fun(name):
"""Retrieves the transformation function by name."""
trans_funs = {
'linear_transform': LinearTransform,
'symlinear_transform': SymLinearTransform,
'grouplinear_transform': GroupLinearTransform,
'groupshufflelinear_transform': GroupShuffleLinearTransform,
'talklinear_transform': TalkLinearTransform, # relational graph
}
assert name in trans_funs.keys(), \
'Transformation function \'{}\' not supported'.format(name)
return trans_funs[name]
class LinearTransform(nn.Module):
"""Basic transformation: linear"""
def __init__(self, dim_in, dim_out, seed=None):
super(LinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = nn.Linear(
dim_in, dim_out, bias=False
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class SymLinearTransform(nn.Module):
"""Basic transformation: linear, symmetric"""
def __init__(self, dim_in, dim_out, seed=None):
super(SymLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = SymLinear(
dim_in, dim_out, bias=False
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupLinearTransform(nn.Module):
"""Basic transformation: linear, group"""
def __init__(self, dim_in, dim_out, seed=None):
super(GroupLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = GroupLinear(
dim_in, dim_out, bias=False, group_size=cfg.RGRAPH.GROUP_SIZE
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class GroupShuffleLinearTransform(nn.Module):
"""Basic transformation: linear, shuffle"""
def __init__(self, dim_in, dim_out, seed=None):
super(GroupShuffleLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
# 3x3, BN, ReLU
self.a = GroupLinear(
dim_in, dim_out, bias=False, group_size=cfg.RGRAPH.GROUP_SIZE
)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
self.shuffle_shape = (dim_out // cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.GROUP_NUM)
def forward(self, x):
x = self.a(x)
x = x.view(x.shape[0], self.shuffle_shape[0], self.shuffle_shape[1]).permute(0, 2, 1).contiguous()
x = x.view(x.shape[0], x.shape[1] * x.shape[2])
x = self.a_bn(x)
x = self.relu(x)
return x
class TalkLinearTransform(nn.Module):
"""Basic transformation: linear, relational graph"""
def __init__(self, dim_in, dim_out, seed=None):
self.seed = seed
super(TalkLinearTransform, self).__init__()
self._construct_class(dim_in, dim_out)
def _construct_class(self, dim_in, dim_out):
self.a = TalkLinear(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE, sparsity=cfg.RGRAPH.SPARSITY,
p=cfg.RGRAPH.P, talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed)
self.a_bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.a_bn.final_bn = True
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
for layer in self.children():
x = layer(x)
return x
class MLPStage(nn.Module):
"""Stage of MLPNet."""
def __init__(
self, dim_in, dim_out, num_bs):
super(MLPStage, self).__init__()
self._construct_class(dim_in, dim_out, num_bs)
def _construct_class(self, dim_in, dim_out, num_bs):
if cfg.RGRAPH.KEEP_GRAPH:
seed = cfg.RGRAPH.SEED_GRAPH
else:
seed = int(dim_out * 100 * cfg.RGRAPH.SPARSITY)
for i in range(num_bs):
b_dim_in = dim_in if i == 0 else dim_out
trans_fun = get_trans_fun(cfg.RESNET.TRANS_FUN)
res_block = trans_fun(
b_dim_in, dim_out, seed=seed
)
if not cfg.RGRAPH.KEEP_GRAPH:
seed += 1
self.add_module('b{}'.format(i + 1), res_block)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class MLPStem(nn.Module):
"""Stem of MLPNet."""
def __init__(self, dim_in, dim_out):
super(MLPStem, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar(dim_in, dim_out)
else:
raise NotImplementedError
def _construct_cifar(self, dim_in, dim_out):
self.linear = nn.Linear(
dim_in, dim_out, bias=False
)
self.bn = nn.BatchNorm1d(dim_out, eps=cfg.BN.EPS, momentum=cfg.BN.MOM)
self.relu = nn.ReLU(cfg.MEM.RELU_INPLACE)
def forward(self, x):
x = x.view(x.size(0), -1)
for layer in self.children():
x = layer(x)
return x
class MLPHead(nn.Module):
"""MLPNet head."""
def __init__(self, dim_in, num_classes):
super(MLPHead, self).__init__()
self.fc = nn.Linear(dim_in, num_classes, bias=True)
def forward(self, x):
x = self.fc(x)
return x
class MLPNet(nn.Module):
"""MLPNet model."""
def __init__(self):
assert cfg.TRAIN.DATASET in ['cifar10'], \
'Training MLPNet on {} is not supported'.format(cfg.TRAIN.DATASET)
assert cfg.TEST.DATASET in ['cifar10'], \
'Testing MLPNet on {} is not supported'.format(cfg.TEST.DATASET)
assert cfg.TRAIN.DATASET == cfg.TEST.DATASET, \
'Train and test dataset must be the same for now'
super(MLPNet, self).__init__()
if cfg.TRAIN.DATASET == 'cifar10':
self._construct_cifar()
else:
raise NotImplementedError
self.apply(nu.init_weights)
def _construct_cifar(self):
num_layers = cfg.MODEL.LAYERS
dim_inner = cfg.RGRAPH.DIM_LIST[0]
dim_first = cfg.RGRAPH.DIM_FIRST
self.s1 = MLPStem(dim_in=3072, dim_out=dim_first)
self.s2 = MLPStage(dim_in=dim_first, dim_out=dim_inner, num_bs=num_layers)
self.head = MLPHead(dim_in=dim_inner, num_classes=cfg.MODEL.NUM_CLASSES)
def forward(self, x):
for module in self.children():
x = module(x)
return x
| 8,012 | 30.300781 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/model_builder.py |
"""Model construction functions."""
import torch
from pycls.config import cfg
from pycls.models.resnet import ResNet
from pycls.models.mlp import MLPNet
from pycls.models.cnn import CNN
from pycls.models.mobilenet import MobileNetV1
from pycls.models.efficientnet import EfficientNet
from pycls.models.vgg import VGG
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
logger = lu.get_logger(__name__)
# Supported model types
_MODEL_TYPES = {
'resnet': ResNet,
'mlpnet': MLPNet,
'cnn': CNN,
'mobilenet': MobileNetV1,
'efficientnet': EfficientNet,
'vgg': VGG,
}
def build_model():
"""Builds the model."""
assert cfg.MODEL.TYPE in _MODEL_TYPES.keys(), \
'Model type \'{}\' not supported'.format(cfg.MODEL.TYPE)
assert cfg.NUM_GPUS <= torch.cuda.device_count(), \
'Cannot use more GPU devices than available'
# Construct the model
model = _MODEL_TYPES[cfg.MODEL.TYPE]()
# Determine the GPU used by the current process
cur_device = torch.cuda.current_device()
# Transfer the model to the current GPU device
model = model.cuda(device=cur_device)
# Use multi-process data parallel model in the multi-gpu setting
if cfg.NUM_GPUS > 1:
# Make model replica operate on the current device
model = torch.nn.parallel.DistributedDataParallel(
module=model,
device_ids=[cur_device],
output_device=cur_device
)
return model
## auto match flop
def build_model_stats(mode='flops'):
"""Builds the model."""
assert cfg.MODEL.TYPE in _MODEL_TYPES.keys(), \
'Model type \'{}\' not supported'.format(cfg.MODEL.TYPE)
assert cfg.NUM_GPUS <= torch.cuda.device_count(), \
'Cannot use more GPU devices than available'
# Construct the model
model = _MODEL_TYPES[cfg.MODEL.TYPE]()
if mode == 'flops':
flops = mu.flops_count(model)
return flops
else:
params = mu.params_count(model)
return params
| 2,355 | 30 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/mobilenet.py |
"""MobileNet example"""
import torch.nn as nn
import torch.nn.functional as F
from pycls.config import cfg
import pycls.utils.net as nu
from .relation_graph import *
class MobileNetV1(nn.Module):
def __init__(self, num_classes=1024):
super(MobileNetV1, self).__init__()
if cfg.RGRAPH.KEEP_GRAPH:
self.seed = cfg.RGRAPH.SEED_GRAPH
else:
self.seed = int(cfg.RGRAPH.SEED_GRAPH * 100)
def conv_bn(dim_in, dim_out, stride):
return nn.Sequential(
nn.Conv2d(dim_in, dim_out, 3, stride, 1, bias=False),
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True)
)
def get_conv(name, dim_in, dim_out):
if not cfg.RGRAPH.KEEP_GRAPH:
self.seed += 1
if name == 'channelbasic_transform':
return nn.Conv2d(dim_in, dim_out, 1, 1, 0, bias=False)
elif name == 'groupbasictalk_transform':
return TalkConv2d(
dim_in, dim_out, cfg.RGRAPH.GROUP_NUM, kernel_size=1,
stride=1, padding=0, bias=False,
message_type=cfg.RGRAPH.MESSAGE_TYPE,
directed=cfg.RGRAPH.DIRECTED, agg=cfg.RGRAPH.AGG_FUNC,
sparsity=cfg.RGRAPH.SPARSITY, p=cfg.RGRAPH.P,
talk_mode=cfg.RGRAPH.TALK_MODE, seed=self.seed
)
def conv_dw(dim_in, dim_out, stride):
conv1x1 = get_conv(cfg.RESNET.TRANS_FUN, dim_in, dim_out)
return nn.Sequential(
nn.Conv2d(dim_in, dim_in, 3, stride, 1, groups=dim_in,
bias=False),
nn.BatchNorm2d(dim_in),
nn.ReLU(inplace=True),
conv1x1,
nn.BatchNorm2d(dim_out),
nn.ReLU(inplace=True),
)
self.dim_list = cfg.RGRAPH.DIM_LIST
# print(self.dim_list)
self.model = nn.Sequential(
conv_bn(3, 32, 2),
conv_dw(32, self.dim_list[1], 1),
conv_dw(self.dim_list[1], self.dim_list[2], 2),
conv_dw(self.dim_list[2], self.dim_list[2], 1),
conv_dw(self.dim_list[2], self.dim_list[3], 2),
conv_dw(self.dim_list[3], self.dim_list[3], 1),
conv_dw(self.dim_list[3], self.dim_list[4], 2),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[4], 1),
conv_dw(self.dim_list[4], self.dim_list[5], 2),
conv_dw(self.dim_list[5], self.dim_list[5], 1),
)
self.fc = nn.Linear(self.dim_list[5], num_classes)
self.apply(nu.init_weights)
def forward(self, x):
x = self.model(x)
x = F.avg_pool2d(x, 7)
x = x.view(-1, self.dim_list[5])
x = self.fc(x)
return x
| 3,404 | 35.223404 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/optimizer.py |
"""Optimizer."""
import torch
from pycls.config import cfg
import pycls.utils.lr_policy as lr_policy
def construct_optimizer(model):
"""Constructs the optimizer.
Note that the momentum update in PyTorch differs from the one in Caffe2.
In particular,
Caffe2:
V := mu * V + lr * g
p := p - V
PyTorch:
V := mu * V + g
p := p - lr * V
where V is the velocity, mu is the momentum factor, lr is the learning rate,
g is the gradient and p are the parameters.
Since V is defined independently of the learning rate in PyTorch,
when the learning rate is changed there is no need to perform the
momentum correction by scaling V (unlike in the Caffe2 case).
"""
return torch.optim.SGD(
model.parameters(),
lr=cfg.OPTIM.BASE_LR,
momentum=cfg.OPTIM.MOMENTUM,
weight_decay=cfg.OPTIM.WEIGHT_DECAY,
dampening=cfg.OPTIM.DAMPENING,
nesterov=cfg.OPTIM.NESTEROV
)
def get_epoch_lr(cur_epoch):
"""Retrieves the lr for the given epoch (as specified by the lr policy)."""
return lr_policy.get_epoch_lr(cur_epoch)
def set_lr(optimizer, new_lr):
"""Sets the optimizer lr to the specified value."""
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
| 1,678 | 27.457627 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/models/relation_graph.py |
"""Relational graph modules"""
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.nn.init as init
import networkx as nx
import numpy as np
from torch.nn.modules.utils import _pair
from torch.nn.modules.conv import _ConvNd
from torch.autograd import Function
from itertools import repeat
from networkx.utils import py_random_state
from pycls.datasets.load_graph import load_graph
import pdb
import time
import random
def compute_count(channel, group):
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
return out
@py_random_state(3)
def ws_graph(n, k, p, seed=1):
"""Returns a ws-flex graph, k can be real number in [2,n]
"""
assert k >= 2 and k <= n
# compute number of edges:
edge_num = int(round(k * n / 2))
count = compute_count(edge_num, n)
# print(count)
G = nx.Graph()
for i in range(n):
source = [i] * count[i]
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
# print(source, target)
G.add_edges_from(zip(source, target))
# rewire edges from each node
nodes = list(G.nodes())
for i in range(n):
u = i
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
for v in target:
if seed.random() < p:
w = seed.choice(nodes)
# Enforce no self-loops or multiple edges
while w == u or G.has_edge(u, w):
w = seed.choice(nodes)
if G.degree(u) >= n - 1:
break # skip this rewiring
else:
G.remove_edge(u, v)
G.add_edge(u, w)
return G
@py_random_state(4)
def connected_ws_graph(n, k, p, tries=100, seed=1):
"""Returns a connected ws-flex graph.
"""
for i in range(tries):
# seed is an RNG so should change sequence each call
G = ws_graph(n, k, p, seed)
if nx.is_connected(G):
return G
raise nx.NetworkXError('Maximum number of tries exceeded')
def nx_to_edge(graph, directed=False, add_self_loops=True,
shuffle_id=False, seed=1):
'''nx graph to edge index'''
graph.remove_edges_from(graph.selfloop_edges())
# relabel graphs
keys = list(graph.nodes)
vals = list(range(graph.number_of_nodes()))
# shuffle node id assignment
if shuffle_id:
random.seed(seed)
random.shuffle(vals)
mapping = dict(zip(keys, vals))
graph = nx.relabel_nodes(graph, mapping, copy=True)
# get edges
edge_index = np.array(list(graph.edges))
if not directed:
edge_index = np.concatenate((edge_index, edge_index[:, ::-1]), axis=0)
if add_self_loops:
edge_self = np.arange(graph.number_of_nodes())[:, np.newaxis]
edge_self = np.tile(edge_self, (1, 2))
edge_index = np.concatenate((edge_index, edge_self), axis=0)
# sort edges
idx = np.argsort(edge_index[:, 0])
edge_index = edge_index[idx, :]
return edge_index
# edge index generator
def generate_index(message_type='ba', n=16, sparsity=0.5, p=0.2,
directed=False, seed=123):
degree = n * sparsity
known_names = ['mcwhole', 'mcwholeraw', 'mcvisual', 'mcvisualraw', 'cat', 'catraw']
if message_type == 'er':
graph = nx.gnm_random_graph(n=n, m=n * degree // 2, seed=seed)
elif message_type == 'random':
edge_num = int(n * n * sparsity)
edge_id = np.random.choice(n * n, edge_num, replace=False)
edge_index = np.zeros((edge_num, 2), dtype=int)
for i in range(edge_num):
edge_index[i, 0] = edge_id[i] // n
edge_index[i, 1] = edge_id[i] % n
elif message_type == 'ws':
graph = connected_ws_graph(n=n, k=degree, p=p, seed=seed)
elif message_type == 'ba':
graph = nx.barabasi_albert_graph(n=n, m=degree // 2, seed=seed)
elif message_type == 'hypercube':
graph = nx.hypercube_graph(n=int(np.log2(n)))
elif message_type == 'grid':
m = degree
n = n // degree
graph = nx.grid_2d_graph(m=m, n=n)
elif message_type == 'cycle':
graph = nx.cycle_graph(n=n)
elif message_type == 'tree':
graph = nx.random_tree(n=n, seed=seed)
elif message_type == 'regular':
graph = nx.connected_watts_strogatz_graph(n=n, k=degree, p=0, seed=seed)
elif message_type in known_names:
graph = load_graph(message_type)
edge_index = nx_to_edge(graph, directed=True, seed=seed)
else:
raise NotImplementedError
if message_type != 'random' and message_type not in known_names:
edge_index = nx_to_edge(graph, directed=directed, seed=seed)
return edge_index
def compute_size(channel, group, seed=1):
np.random.seed(seed)
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
out = np.random.permutation(out)
return out
def compute_densemask(in_channels, out_channels, group_num, edge_index):
repeat_in = compute_size(in_channels, group_num)
repeat_out = compute_size(out_channels, group_num)
mask = np.zeros((group_num, group_num))
mask[edge_index[:, 0], edge_index[:, 1]] = 1
mask = np.repeat(mask, repeat_out, axis=0)
mask = np.repeat(mask, repeat_in, axis=1)
return mask
def get_mask(in_channels, out_channels, group_num,
message_type='ba', directed=False, sparsity=0.5, p=0.2, talk_mode='dense', seed=123):
assert group_num <= in_channels and group_num <= out_channels
# high-level graph edge index
edge_index_high = generate_index(message_type=message_type,
n=group_num, sparsity=sparsity, p=p, directed=directed, seed=seed)
# get in/out size for each high-level node
in_sizes = compute_size(in_channels, group_num)
out_sizes = compute_size(out_channels, group_num)
# decide low-level node num
group_num_low = int(min(np.min(in_sizes), np.min(out_sizes)))
# decide how to fill each node
mask_high = compute_densemask(in_channels, out_channels, group_num, edge_index_high)
return mask_high
class TalkLinear(nn.Linear):
'''Relational graph version of Linear. Neurons "talk" according to the graph structure'''
def __init__(self, in_channels, out_channels, group_num, bias=False,
message_type='ba', directed=False,
sparsity=0.5, p=0.2, talk_mode='dense', seed=None):
group_num_max = min(in_channels, out_channels)
if group_num > group_num_max:
group_num = group_num_max
# print(group_num, in_channels, out_channels, kernel_size, stride)
super(TalkLinear, self).__init__(
in_channels, out_channels, bias)
self.mask = get_mask(in_channels, out_channels, group_num,
message_type, directed, sparsity, p, talk_mode, seed)
nonzero = np.sum(self.mask)
self.mask = torch.from_numpy(self.mask).float().cuda()
self.flops_scale = nonzero / (in_channels * out_channels)
self.params_scale = self.flops_scale
self.init_scale = torch.sqrt(out_channels / torch.sum(self.mask.cpu(), dim=0, keepdim=True))
def forward(self, x):
weight = self.weight * self.mask
# pdb.set_trace()
return F.linear(x, weight, self.bias)
class SymLinear(nn.Module):
'''Linear with symmetric weight matrices'''
def __init__(self, in_features, out_features, bias=True):
super(SymLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
weight = self.weight + self.weight.permute(1, 0)
return F.linear(input, weight, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
class TalkConv2d(_ConvNd):
'''Relational graph version of Conv2d. Neurons "talk" according to the graph structure'''
def __init__(self, in_channels, out_channels, group_num, kernel_size, stride=1,
padding=0, dilation=1, bias=False, message_type='ba', directed=False, agg='sum',
sparsity=0.5, p=0.2, talk_mode='dense', seed=None):
group_num_max = min(in_channels, out_channels)
if group_num > group_num_max:
group_num = group_num_max
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(TalkConv2d, self).__init__(
in_channels, out_channels,
kernel_size, stride, padding, dilation,
False, _pair(0), 1, bias, 'zeros')
self.mask = get_mask(in_channels, out_channels, group_num,
message_type, directed, sparsity, p, talk_mode, seed)
nonzero = np.sum(self.mask)
self.mask = torch.from_numpy(self.mask[:, :, np.newaxis, np.newaxis]).float().cuda()
self.init_scale = torch.sqrt(out_channels / torch.sum(self.mask.cpu(), dim=0, keepdim=True))
self.flops_scale = nonzero / (in_channels * out_channels)
self.params_scale = self.flops_scale
def forward(self, input):
weight = self.weight * self.mask
return F.conv2d(input, weight, self.bias, self.stride, self.padding, self.dilation, 1)
class SymConv2d(_ConvNd):
'''Conv2d with symmetric weight matrices'''
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1,
bias=True, padding_mode='zeros'):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(SymConv2d, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias, padding_mode)
def forward(self, input):
weight = self.weight + self.weight.permute(1, 0, 2, 3)
if self.padding_mode == 'circular':
expanded_padding = ((self.padding[1] + 1) // 2, self.padding[1] // 2,
(self.padding[0] + 1) // 2, self.padding[0] // 2)
return F.conv2d(F.pad(input, expanded_padding, mode='circular'),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
class Swish(nn.Module):
"""Swish activation function: x * sigmoid(x)"""
def __init__(self):
super(Swish, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
class SE(nn.Module):
"""Squeeze-and-Excitation (SE) block w/ Swish activation fun."""
def __init__(self, in_w, se_w, act_fun):
super(SE, self).__init__()
self._construct_class(in_w, se_w, act_fun)
def _construct_class(self, in_w, se_w, act_fun):
# AvgPool
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
# FC, Swish, FC, Sigmoid
self.f_ex = nn.Sequential(
nn.Conv2d(in_w, se_w, kernel_size=1, bias=True),
act_fun(),
nn.Conv2d(se_w, in_w, kernel_size=1, bias=True),
nn.Sigmoid()
)
def forward(self, x):
return x * self.f_ex(self.avg_pool(x))
class SparseLinear(nn.Linear):
'''Sparse Linear layer'''
def __init__(self, group_num, in_scale, out_scale, bias=False,
edge_index=None, flops_scale=0.5, params_scale=0.5):
# mask is used for reset to zero
mask_one = np.ones((out_scale, in_scale), dtype=bool)
mask_zero = np.zeros((out_scale, in_scale), dtype=bool)
mask_list = [[mask_one for i in range(group_num)] for j in range(group_num)]
for i in range(edge_index.shape[0]):
mask_list[edge_index[i, 0]][edge_index[i, 1]] = mask_zero
self.mask = np.block(mask_list)
self.edge_index = edge_index
# todo: update to pytorch 1.2.0, then use bool() dtype
self.mask = torch.from_numpy(self.mask).byte().cuda()
self.flops_scale = flops_scale
self.params_scale = params_scale
super(SparseLinear, self).__init__(
group_num * in_scale, group_num * out_scale, bias)
def forward(self, x):
weight = self.weight.clone().masked_fill_(self.mask, 0)
# pdb.set_trace()
return F.linear(x, weight, self.bias)
class GroupLinear(nn.Module):
'''Group conv style linear layer'''
def __init__(self, in_channels, out_channels, bias=False, group_size=1):
super(GroupLinear, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.group_size = group_size
self.group_num = in_channels // group_size
self.in_scale = in_channels // self.group_num
self.out_scale = out_channels // self.group_num
assert in_channels % self.group_num == 0
assert out_channels % self.group_num == 0
assert in_channels % self.group_size == 0
# Note: agg_fun is always sum
self.edge_index = np.arange(self.group_num)[:, np.newaxis].repeat(2, axis=1)
self.edge_num = self.edge_index.shape[0]
flops_scale = self.edge_num / (self.group_num * self.group_num)
params_scale = self.edge_num / (self.group_num * self.group_num)
self.linear = SparseLinear(self.group_num, self.in_scale, self.out_scale, bias,
edge_index=self.edge_index, flops_scale=flops_scale, params_scale=params_scale)
def forward(self, x):
x = self.linear(x)
return x
| 15,045 | 35.877451 | 114 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/cifar100.py |
"""CIFAR100 dataset."""
import numpy as np
import os
import pickle
import torch
import torch.utils.data
import pycls.datasets.transforms as transforms
from torchvision import datasets
import pycls.utils.logging as lu
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [129.3, 124.1, 112.4]
_SD = [68.2, 65.4, 70.4]
class Cifar100(torch.utils.data.Dataset):
"""CIFAR-100 dataset."""
def __init__(self, data_path, split, batch_size):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'test'], \
'Split \'{}\' not supported for cifar'.format(split)
logger.info('Constructing CIFAR-100 {}...'.format(split))
self._data_path = data_path
self._split = split
self._batch_size = batch_size
# Data format:
# self._inputs - (split_size, 3, 32, 32) ndarray
# self._labels - split_size list
self._inputs, self._labels = self._load_data()
def _load_batch(self, batch_path):
with open(batch_path, 'rb') as f:
d = pickle.load(f, encoding='bytes')
return d[b'data'], d[b'fine_labels']
# return d[b'data'], d[b'labels']
def _load_data(self):
"""Loads data in memory."""
logger.info('{} data path: {}'.format(self._split, self._data_path))
# Compute data batch names
if self._split == 'train':
batch_names = ['train']
# datasets.CIFAR100(self._data_path, train=True)
# batch_names = ['data_batch_{}'.format(i) for i in range(1, 6)]
else:
batch_names = ['test']
# Load data batches
inputs, labels = [], []
for batch_name in batch_names:
batch_path = os.path.join(self._data_path, batch_name)
inputs_batch, labels_batch = self._load_batch(batch_path)
inputs.append(inputs_batch)
labels += labels_batch
# Combine and reshape the inputs
inputs = np.vstack(inputs).astype(np.float32)
inputs = inputs.reshape((-1, 3, 32, 32))
return inputs, labels
def _transform_image(self, image):
"""Transforms the image for network input."""
if self._batch_size != 1:
image = transforms.color_normalization(image, _MEAN, _SD)
if self._split == 'train':
image = transforms.horizontal_flip(image=image, prob=0.5)
image = transforms.random_crop(image=image, size=32, pad_size=4)
return image
def __getitem__(self, index):
image, label = self._inputs[index, ...], self._labels[index]
image = self._transform_image(image)
return image, label
def __len__(self):
return self._inputs.shape[0]
| 3,163 | 34.155556 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/cifar10.py |
"""CIFAR10 dataset."""
import numpy as np
import os
import pickle
import torch
import torch.utils.data
import pycls.datasets.transforms as transforms
import pycls.utils.logging as lu
from pycls.config import cfg
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [125.3, 123.0, 113.9]
_SD = [63.0, 62.1, 66.7]
class Cifar10(torch.utils.data.Dataset):
"""CIFAR-10 dataset."""
def __init__(self, data_path, split, batch_size):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'test'], \
'Split \'{}\' not supported for cifar'.format(split)
logger.info('Constructing CIFAR-10 {}...'.format(split))
self._data_path = data_path
self._split = split
self._batch_size = batch_size
# Data format:
# self._inputs - (split_size, 3, 32, 32) ndarray
# self._labels - split_size list
self._inputs, self._labels = self._load_data()
def _load_batch(self, batch_path):
with open(batch_path, 'rb') as f:
d = pickle.load(f, encoding='bytes')
return d[b'data'], d[b'labels']
def _load_data(self):
"""Loads data in memory."""
logger.info('{} data path: {}'.format(self._split, self._data_path))
# Compute data batch names
if self._split == 'train':
batch_names = ['data_batch_{}'.format(i) for i in range(1, 6)]
else:
batch_names = ['test_batch']
# Load data batches
inputs, labels = [], []
for batch_name in batch_names:
batch_path = os.path.join(self._data_path, batch_name)
inputs_batch, labels_batch = self._load_batch(batch_path)
inputs.append(inputs_batch)
labels += labels_batch
# Combine and reshape the inputs
inputs = np.vstack(inputs).astype(np.float32)
inputs = inputs.reshape((-1, 3, 32, 32))
return inputs, labels
def _transform_image(self, image):
"""Transforms the image for network input."""
if self._batch_size != 1:
# Normalizing input images
image = transforms.color_normalization(image, _MEAN, _SD)
if self._split == 'train':
image = transforms.horizontal_flip(image=image, prob=0.5)
image = transforms.random_crop(image=image, size=32, pad_size=4)
return image
def __getitem__(self, index):
image, label = self._inputs[index, ...], self._labels[index]
image = self._transform_image(image)
return image, label
def __len__(self):
return self._inputs.shape[0]
| 3,048 | 33.647727 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/paths.py |
"""Dataset paths."""
import os
# Default data directory (/path/pycls/pycls/datasets/data)
_DEF_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
# Data paths
_paths = {
'cifar10': _DEF_DATA_DIR + '/cifar10',
'cifar100': _DEF_DATA_DIR + '/cifar100',
'tinyimagenet200': _DEF_DATA_DIR + '/tinyimagenet200',
'imagenet': _DEF_DATA_DIR + '/imagenet'
}
def has_data_path(dataset_name):
"""Determines if the dataset has a data path."""
return dataset_name in _paths.keys()
def get_data_path(dataset_name):
"""Retrieves data path for the dataset."""
return _paths[dataset_name]
def set_data_path(dataset_name, data_path):
"""Sets data path for the dataset."""
_paths[dataset_name] = data_path
| 1,084 | 26.820513 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/loader.py |
"""Data loader."""
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler
import torch
from pycls.config import cfg
from pycls.datasets.cifar10 import Cifar10
from pycls.datasets.cifar100 import Cifar100
from pycls.datasets.tinyimagenet200 import TinyImageNet200
from pycls.datasets.imagenet import ImageNet
import pycls.datasets.paths as dp
# Supported datasets
_DATASET_CATALOG = {
'cifar10': Cifar10,
'cifar100': Cifar100,
'tinyimagenet200': TinyImageNet200,
'imagenet': ImageNet
}
def _construct_loader(dataset_name, split, batch_size, shuffle, drop_last):
"""Constructs the data loader for the given dataset."""
assert dataset_name in _DATASET_CATALOG.keys(), \
'Dataset \'{}\' not supported'.format(dataset_name)
assert dp.has_data_path(dataset_name), \
'Dataset \'{}\' has no data path'.format(dataset_name)
# Retrieve the data path for the dataset
data_path = dp.get_data_path(dataset_name)
# Construct the dataset
dataset = _DATASET_CATALOG[dataset_name](data_path, split, batch_size)
# Create a sampler for multi-process training
sampler = DistributedSampler(dataset) if cfg.NUM_GPUS > 1 else None
# Create a loader
loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=(False if sampler else shuffle),
sampler=sampler,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=drop_last
)
return loader
def construct_train_loader():
"""Train loader wrapper."""
return _construct_loader(
dataset_name=cfg.TRAIN.DATASET,
split=cfg.TRAIN.SPLIT,
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
drop_last=True
)
def construct_test_loader():
"""Test loader wrapper."""
return _construct_loader(
dataset_name=cfg.TEST.DATASET,
split=cfg.TEST.SPLIT,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
drop_last=False
)
def construct_test_loader_adv():
"""Test loader wrapper."""
return _construct_loader(
dataset_name=cfg.TEST.DATASET,
split=cfg.TEST.SPLIT,
batch_size=1,
shuffle=False,
drop_last=False
)
def shuffle(loader, cur_epoch):
""""Shuffles the data."""
assert isinstance(loader.sampler, (RandomSampler, DistributedSampler)), \
'Sampler type \'{}\' not supported'.format(type(loader.sampler))
# RandomSampler handles shuffling automatically
if isinstance(loader.sampler, DistributedSampler):
# DistributedSampler shuffles data based on epoch
loader.sampler.set_epoch(cur_epoch)
| 3,131 | 30.009901 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/imagenet.py |
"""ImageNet dataset."""
import cv2
import numpy as np
import os
import torch
import torch.utils.data
import pycls.datasets.transforms as transforms
import pycls.utils.logging as lu
logger = lu.get_logger(__name__)
# Per-channel mean and SD values in BGR order
_MEAN = [0.406, 0.456, 0.485]
_SD = [0.225, 0.224, 0.229]
# Eig vals and vecs of the cov mat
_EIG_VALS = [0.2175, 0.0188, 0.0045]
_EIG_VECS = np.array([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]
])
class ImageNet(torch.utils.data.Dataset):
"""ImageNet dataset."""
def __init__(self, data_path, split, batch_size):
assert os.path.exists(data_path), \
'Data path \'{}\' not found'.format(data_path)
assert split in ['train', 'val'], \
'Split \'{}\' not supported for ImageNet'.format(split)
logger.info('Constructing ImageNet {}...'.format(split))
self._data_path = data_path
self._split = split
self._batch_size = batch_size
self._construct_imdb()
def _construct_imdb(self):
"""Constructs the imdb."""
# Compile the split data path
split_path = os.path.join(self._data_path, self._split)
logger.info('{} data path: {}'.format(self._split, split_path))
# Map ImageNet class ids to contiguous ids
self._class_ids = os.listdir(split_path)
self._class_id_cont_id = {v: i for i, v in enumerate(self._class_ids)}
# Construct the image db
self._imdb = []
for class_id in self._class_ids:
cont_id = self._class_id_cont_id[class_id]
im_dir = os.path.join(split_path, class_id)
for im_name in os.listdir(im_dir):
self._imdb.append({
'im_path': os.path.join(im_dir, im_name),
'class': cont_id,
})
logger.info('Number of images: {}'.format(len(self._imdb)))
logger.info('Number of classes: {}'.format(len(self._class_ids)))
def _prepare_im(self, im):
"""Prepares the image for network input."""
# Train and test setups differ
if self._split == 'train':
# Scale and aspect ratio
im = transforms.random_sized_crop(
image=im, size=224, area_frac=0.08
)
# Horizontal flip
im = transforms.horizontal_flip(image=im, prob=0.5, order='HWC')
else:
# Scale and center crop
im = transforms.scale(256, im)
im = transforms.center_crop(224, im)
# HWC -> CHW
im = transforms.HWC2CHW(im)
# [0, 255] -> [0, 1]
im = im / 255.0
# PCA jitter
if self._split == 'train':
im = transforms.lighting(im, 0.1, _EIG_VALS, _EIG_VECS)
# Color normalization
if self._batch_size != 1:
im = transforms.color_normalization(im, _MEAN, _SD)
return im
def __getitem__(self, index):
# Load the image
im = cv2.imread(self._imdb[index]['im_path'])
im = im.astype(np.float32, copy=False)
# Prepare the image for training / testing
im = self._prepare_im(im)
# Retrieve the label
label = self._imdb[index]['class']
return im, label
def __len__(self):
return len(self._imdb)
# class ImageNet(torch.utils.data.Dataset):
# """ImageNet dataset."""
# def __init__(self, data_path, split):
# assert os.path.exists(data_path), \
# 'Data path \'{}\' not found'.format(data_path)
# assert split in ['train', 'val'], \
# 'Split \'{}\' not supported for ImageNet'.format(split)
# logger.info('Constructing ImageNet {}...'.format(split))
# self._data_path = data_path
# self._split = split
# self._construct_imdb()
# def _construct_imdb(self):
# """Constructs the imdb."""
# # Compile the split data path
# split_path = os.path.join(self._data_path, self._split)
# logger.info('{} data path: {}'.format(self._split, split_path))
# # Map ImageNet class ids to contiguous ids
# self._class_ids = os.listdir(split_path)
# self._class_id_cont_id = {v: i for i, v in enumerate(self._class_ids)}
# # Construct the image db
# self._imdb = []
# counter = 1
# for class_id in self._class_ids:
# print('progress: {}/{}'.format(counter,len(self._class_ids)))
# counter += 1
# cont_id = self._class_id_cont_id[class_id]
# im_dir = os.path.join(split_path, class_id)
# for im_name in os.listdir(im_dir):
# self._imdb.append({
# 'im_path': os.path.join(im_dir, im_name),
# 'class': cont_id,
# 'img': cv2.imread(os.path.join(im_dir, im_name)).astype(np.float32, copy=False)
# })
# logger.info('Number of images: {}'.format(len(self._imdb)))
# logger.info('Number of classes: {}'.format(len(self._class_ids)))
# def _prepare_im(self, im):
# """Prepares the image for network input."""
# # Train and test setups differ
# if self._split == 'train':
# # Scale and aspect ratio
# im = transforms.random_sized_crop(
# image=im, size=224, area_frac=0.08
# )
# # Horizontal flip
# im = transforms.horizontal_flip(image=im, prob=0.5, order='HWC')
# else:
# # Scale and center crop
# im = transforms.scale(256, im)
# im = transforms.center_crop(224, im)
# # HWC -> CHW
# im = transforms.HWC2CHW(im)
# # [0, 255] -> [0, 1]
# im = im / 255.0
# # PCA jitter
# if self._split == 'train':
# im = transforms.lighting(im, 0.1, _EIG_VALS, _EIG_VECS)
# # Color normalization
# im = transforms.color_normalization(im, _MEAN, _SD)
# return im
# def __getitem__(self, index):
# # Load the image
# im = self._imdb[index]['img']
# # Prepare the image for training / testing
# im = self._prepare_im(im)
# # Retrieve the label
# label = self._imdb[index]['class']
# return im, label
# def __len__(self):
# return len(self._imdb)
| 6,759 | 35.344086 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/transforms.py |
"""Image transformations."""
import cv2
import math
import numpy as np
def CHW2HWC(image):
return image.transpose([1, 2, 0])
def HWC2CHW(image):
return image.transpose([2, 0, 1])
def color_normalization(image, mean, std):
"""Expects image in CHW format."""
assert len(mean) == image.shape[0]
assert len(std) == image.shape[0]
for i in range(image.shape[0]):
image[i] = image[i] - mean[i]
image[i] = image[i] / std[i]
return image
def zero_pad(image, pad_size, order='CHW'):
assert order in ['CHW', 'HWC']
if order == 'CHW':
pad_width = ((0, 0), (pad_size, pad_size), (pad_size, pad_size))
else:
pad_width = ((pad_size, pad_size), (pad_size, pad_size), (0, 0))
return np.pad(image, pad_width, mode='constant')
def horizontal_flip(image, prob, order='CHW'):
assert order in ['CHW', 'HWC']
if np.random.uniform() < prob:
if order == 'CHW':
image = image[:, :, ::-1]
else:
image = image[:, ::-1, :]
return image
def random_crop(image, size, pad_size=0, order='CHW'):
assert order in ['CHW', 'HWC']
if pad_size > 0:
image = zero_pad(image=image, pad_size=pad_size, order=order)
if order == 'CHW':
if image.shape[1] == size and image.shape[2] == size:
return image
height = image.shape[1]
width = image.shape[2]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = image[:, y_offset:y_offset + size, x_offset:x_offset + size]
assert cropped.shape[1] == size, "Image not cropped properly"
assert cropped.shape[2] == size, "Image not cropped properly"
else:
if image.shape[0] == size and image.shape[1] == size:
return image
height = image.shape[0]
width = image.shape[1]
y_offset = 0
if height > size:
y_offset = int(np.random.randint(0, height - size))
x_offset = 0
if width > size:
x_offset = int(np.random.randint(0, width - size))
cropped = image[y_offset:y_offset + size, x_offset:x_offset + size, :]
assert cropped.shape[0] == size, "Image not cropped properly"
assert cropped.shape[1] == size, "Image not cropped properly"
return cropped
def scale(size, image):
height = image.shape[0]
width = image.shape[1]
if ((width <= height and width == size) or
(height <= width and height == size)):
return image
new_width = size
new_height = size
if width < height:
new_height = int(math.floor((float(height) / width) * size))
else:
new_width = int(math.floor((float(width) / height) * size))
img = cv2.resize(
image,
(new_width, new_height),
interpolation=cv2.INTER_LINEAR
)
return img.astype(np.float32)
def center_crop(size, image):
height = image.shape[0]
width = image.shape[1]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
cropped = image[y_offset:y_offset + size, x_offset:x_offset + size, :]
assert cropped.shape[0] == size, "Image height not cropped properly"
assert cropped.shape[1] == size, "Image width not cropped properly"
return cropped
def random_sized_crop(image, size, area_frac=0.08):
for _ in range(0, 10):
height = image.shape[0]
width = image.shape[1]
area = height * width
target_area = np.random.uniform(area_frac, 1.0) * area
aspect_ratio = np.random.uniform(3.0 / 4.0, 4.0 / 3.0)
w = int(round(math.sqrt(float(target_area) * aspect_ratio)))
h = int(round(math.sqrt(float(target_area) / aspect_ratio)))
if np.random.uniform() < 0.5:
w, h = h, w
if h <= height and w <= width:
if height == h:
y_offset = 0
else:
y_offset = np.random.randint(0, height - h)
if width == w:
x_offset = 0
else:
x_offset = np.random.randint(0, width - w)
y_offset = int(y_offset)
x_offset = int(x_offset)
cropped = image[y_offset:y_offset + h, x_offset:x_offset + w, :]
assert cropped.shape[0] == h and cropped.shape[1] == w, \
"Wrong crop size"
cropped = cv2.resize(
cropped,
(size, size),
interpolation=cv2.INTER_LINEAR
)
return cropped.astype(np.float32)
return center_crop(size, scale(size, image))
def lighting(img, alphastd, eigval, eigvec):
if alphastd == 0:
return img
# generate alpha1, alpha2, alpha3
alpha = np.random.normal(0, alphastd, size=(1, 3))
eig_vec = np.array(eigvec)
eig_val = np.reshape(eigval, (1, 3))
rgb = np.sum(
eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0),
axis=1
)
for idx in range(img.shape[0]):
img[idx] = img[idx] + rgb[2 - idx]
return img
| 5,563 | 32.119048 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/datasets/load_graph.py |
"""load bio neural networks"""
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from networkx.utils import py_random_state
from matplotlib.colors import ListedColormap
import pdb
def compute_stats(G):
G_cluster = sorted(list(nx.clustering(G).values()))
cluster = sum(G_cluster) / len(G_cluster)
path = nx.average_shortest_path_length(G) # path
return cluster, path
def plot_graph(graph, name, dpi=200, width=0.5, layout='spring'):
plt.figure(figsize=(10, 10))
pos = nx.spiral_layout(graph)
if layout == 'spring':
pos = nx.spring_layout(graph)
elif layout == 'circular':
pos = nx.circular_layout(graph)
nx.draw(graph, pos=pos, node_size=100, width=width)
plt.savefig('figs/graph_view_{}.png'.format(name), dpi=dpi, transparent=True)
def load_graph(name, verbose=False, seed=1):
if 'raw' in name:
name = name[:-3]
directed = True
else:
directed = False
filename = '{}.txt'.format(name)
# filename = 'pycls/datasets/{}.txt'.format(name)
with open(filename) as f:
content = f.readlines()
content = [list(x.strip()) for x in content]
adj = np.array(content).astype(int)
if not directed:
adj = np.logical_or(adj.transpose(), adj).astype(int)
graph = nx.from_numpy_array(adj, create_using=nx.DiGraph)
if verbose:
print(type(graph))
print(graph.number_of_nodes(), graph.number_of_edges())
print(compute_stats(graph))
print(len(graph.edges))
# plot_graph(graph, 'mc_whole', dpi=60, width=1, layout='circular')
cmap = ListedColormap(['w', 'k'])
plt.matshow(nx.to_numpy_matrix(graph), cmap=cmap)
plt.show()
return graph
def compute_count(channel, group):
divide = channel // group
remain = channel % group
out = np.zeros(group, dtype=int)
out[:remain] = divide + 1
out[remain:] = divide
return out
@py_random_state(3)
def ws_graph(n, k, p, seed=1):
"""Returns a ws-flex graph, k can be real number in [2,n]
"""
assert k >= 2 and k <= n
# compute number of edges:
edge_num = int(round(k * n / 2))
count = compute_count(edge_num, n)
# print(count)
G = nx.Graph()
for i in range(n):
source = [i] * count[i]
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
# print(source, target)
G.add_edges_from(zip(source, target))
# rewire edges from each node
nodes = list(G.nodes())
for i in range(n):
u = i
target = range(i + 1, i + count[i] + 1)
target = [node % n for node in target]
for v in target:
if seed.random() < p:
w = seed.choice(nodes)
# Enforce no self-loops or multiple edges
while w == u or G.has_edge(u, w):
w = seed.choice(nodes)
if G.degree(u) >= n - 1:
break # skip this rewiring
else:
G.remove_edge(u, v)
G.add_edge(u, w)
return G
@py_random_state(4)
def connected_ws_graph(n, k, p, tries=100, seed=1):
"""Returns a connected ws-flex graph.
"""
for i in range(tries):
# seed is an RNG so should change sequence each call
G = ws_graph(n, k, p, seed)
if nx.is_connected(G):
return G
raise nx.NetworkXError('Maximum number of tries exceeded')
def generate_graph(message_type='ws', n=16, sparsity=0.5, p=0.2,
directed=False, seed=123):
degree = n * sparsity
if message_type == 'ws':
graph = connected_ws_graph(n=n, k=degree, p=p, seed=seed)
return graph
# graph = load_graph('mcwhole', True)
# graph = load_graph('mcwholeraw', True)
# graph = load_graph('mcvisual', True)
# graph = load_graph('mcvisualraw', True)
# graph = load_graph('cat', True)
# graph = load_graph('catraw', True)
| 4,341 | 30.014286 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/checkpoint.py |
"""Functions that handle saving and loading of checkpoints."""
import os
import torch
from collections import OrderedDict
from pycls.config import cfg
import pycls.utils.distributed as du
# Common prefix for checkpoint file names
_NAME_PREFIX = 'model_epoch_'
# Checkpoints directory name
_DIR_NAME = 'checkpoints'
def get_checkpoint_dir():
"""Get location for storing checkpoints."""
return os.path.join(cfg.OUT_DIR, _DIR_NAME)
def got_checkpoint_dir():
"""Get location for storing checkpoints for inference time."""
return os.path.join(cfg.CHECKPT_DIR, _DIR_NAME)
def get_checkpoint(epoch):
"""Get the full path to a checkpoint file."""
name = '{}{:04d}.pyth'.format(_NAME_PREFIX, epoch)
return os.path.join(get_checkpoint_dir(), name)
def got_checkpoint(epoch):
"""Get the full path to a checkpoint file for inference time."""
name = '{}{:04d}.pyth'.format(_NAME_PREFIX, epoch)
return os.path.join(got_checkpoint_dir(), name)
def get_checkpoint_last():
d = get_checkpoint_dir()
names = os.listdir(d) if os.path.exists(d) else []
names = [f for f in names if _NAME_PREFIX in f]
assert len(names), 'No checkpoints found in \'{}\'.'.format(d)
name = sorted(names)[-1]
return os.path.join(d, name)
def got_checkpoint_last():
d = got_checkpoint_dir()
names = os.listdir(d) if os.path.exists(d) else []
names = [f for f in names if _NAME_PREFIX in f]
assert len(names), 'No checkpoints found in \'{}\'.'.format(d)
name = sorted(names)[-1]
return os.path.join(d, name)
def has_checkpoint():
"""Determines if the given directory contains a checkpoint."""
d = get_checkpoint_dir()
print("checkpoint directory =", d)
files = os.listdir(d) if os.path.exists(d) else []
return any(_NAME_PREFIX in f for f in files)
def had_checkpoint():
"""Determines if the given directory contains a checkpoint for inference time."""
d = got_checkpoint_dir()
print("checkpoint directory =", d)
files = os.listdir(d) if os.path.exists(d) else []
return any(_NAME_PREFIX in f for f in files)
def is_checkpoint_epoch(cur_epoch):
"""Determines if a checkpoint should be saved on current epoch."""
return (cur_epoch + 1) % cfg.TRAIN.CHECKPOINT_PERIOD == 0
def save_checkpoint(model, optimizer, epoch):
"""Saves a checkpoint."""
# Save checkpoints only from the master process
if not du.is_master_proc():
return
os.makedirs(get_checkpoint_dir(), exist_ok=True)
checkpoint = {
'epoch': epoch,
'model_state': model.state_dict(),
'optimizer_state': optimizer.state_dict(),
'cfg': cfg.dump()
}
checkpoint_file = get_checkpoint(epoch + 1)
torch.save(checkpoint, checkpoint_file)
return checkpoint_file
def load_checkpoint(checkpoint_file, model, optimizer=None):
"""Loads the checkpoint from the given file."""
assert os.path.exists(checkpoint_file), \
'Checkpoint \'{}\' not found'.format(checkpoint_file)
# if cfg.IS_INFERENCE and cfg.IS_DDP:
# state_dict = torch.load(checkpoint_file, map_location='cpu')
# new_state_dict = OrderedDict()
# print("state_dict.items() :", state_dict)
# for k, v in state_dict.items():
# name = k[7:] # remove `module.`
# new_state_dict[name] = v
# # load params
# epoch = state_dict['epoch']
# model.load_state_dict(new_state_dict['model_state'])
# if optimizer:
# optimizer.load_state_dict(new_state_dict['optimizer_state'])
if cfg.IS_INFERENCE:
print("Mapping model to CPU")
checkpoint = torch.load(checkpoint_file, map_location='cpu')
# print(checkpoint)
else:
checkpoint = torch.load(checkpoint_file)
epoch = checkpoint['epoch']
print("Epochs from checkpoint = ", epoch)
model.load_state_dict(checkpoint['model_state'], strict=False)
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state'])
return epoch
| 4,392 | 31.540741 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/timer.py |
"""Timer."""
import time
class Timer(object):
"""A simple timer (adapted from Detectron)."""
def __init__(self):
self.reset()
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
| 1,013 | 25 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/error_handler.py |
"""Multiprocessing error handler."""
import os
import signal
import threading
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and
propagates the tracebacks to the parent process.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, _sig_num, _stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
| 2,012 | 31.467742 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/plotting.py |
"""Plotting functions."""
import colorlover as cl
import matplotlib.pyplot as plt
import plotly.graph_objs as go
import plotly.offline as offline
import pycls.utils.logging as lu
def get_plot_colors(max_colors, color_format='pyplot'):
"""Generate colors for plotting."""
colors = cl.scales['11']['qual']['Paired']
if max_colors > len(colors):
colors = cl.to_rgb(cl.interp(colors, max_colors))
if color_format == 'pyplot':
return [[j / 255.0 for j in c] for c in cl.to_numeric(colors)]
return colors
def prepare_plot_data(log_files, names, key='top1_err'):
"""Load logs and extract data for plotting error curves."""
plot_data = []
for file, name in zip(log_files, names):
d, log = {}, lu.load_json_stats(file)
for phase in ['train', 'test']:
x = lu.parse_json_stats(log, phase + '_epoch', 'epoch')
y = lu.parse_json_stats(log, phase + '_epoch', key)
d['x_' + phase], d['y_' + phase] = x, y
d[phase + '_label'] = '[{:5.2f}] '.format(min(y) if y else 0) + name
plot_data.append(d)
assert len(plot_data) > 0, 'No data to plot'
return plot_data
def plot_error_curves_plotly(log_files, names, filename, key='top1_err'):
"""Plot error curves using plotly and save to file."""
plot_data = prepare_plot_data(log_files, names, key)
colors = get_plot_colors(len(plot_data), 'plotly')
# Prepare data for plots (3 sets, train duplicated w and w/o legend)
data = []
for i, d in enumerate(plot_data):
s = str(i)
line_train = {'color': colors[i], 'dash': 'dashdot', 'width': 1.5}
line_test = {'color': colors[i], 'dash': 'solid', 'width': 1.5}
data.append(go.Scatter(
x=d['x_train'], y=d['y_train'], mode='lines', name=d['train_label'],
line=line_train, legendgroup=s, visible=True, showlegend=False
))
data.append(go.Scatter(
x=d['x_test'], y=d['y_test'], mode='lines', name=d['test_label'],
line=line_test, legendgroup=s, visible=True, showlegend=True
))
data.append(go.Scatter(
x=d['x_train'], y=d['y_train'], mode='lines', name=d['train_label'],
line=line_train, legendgroup=s, visible=False, showlegend=True
))
# Prepare layout w ability to toggle 'all', 'train', 'test'
titlefont = {'size': 18, 'color': '#7f7f7f'}
vis = [[True, True, False], [False, False, True], [False, True, False]]
buttons = zip(['all', 'train', 'test'], [[{'visible': v}] for v in vis])
buttons = [{'label': l, 'args': v, 'method': 'update'} for l, v in buttons]
layout = go.Layout(
title=key + ' vs. epoch<br>[dash=train, solid=test]',
xaxis={'title': 'epoch', 'titlefont': titlefont},
yaxis={'title': key, 'titlefont': titlefont},
showlegend=True,
hoverlabel={'namelength': -1},
updatemenus=[{
'buttons': buttons, 'direction': 'down', 'showactive': True,
'x': 1.02, 'xanchor': 'left', 'y': 1.08, 'yanchor': 'top'
}]
)
# Create plotly plot
offline.plot({'data': data, 'layout': layout}, filename=filename)
def plot_error_curves_pyplot(log_files, names, filename=None, key='top1_err'):
"""Plot error curves using matplotlib.pyplot and save to file."""
plot_data = prepare_plot_data(log_files, names, key)
colors = get_plot_colors(len(names))
for ind, d in enumerate(plot_data):
c, lbl = colors[ind], d['test_label']
plt.plot(d['x_train'], d['y_train'], '--', c=c, alpha=0.8)
plt.plot(d['x_test'], d['y_test'], '-', c=c, alpha=0.8, label=lbl)
plt.title(key + ' vs. epoch\n[dash=train, solid=test]', fontsize=14)
plt.xlabel('epoch', fontsize=14)
plt.ylabel(key, fontsize=14)
plt.grid(alpha=0.4)
plt.legend()
if filename:
plt.savefig(filename)
plt.clf()
else:
plt.show()
| 4,288 | 39.847619 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/logging.py |
"""Logging."""
import builtins
import decimal
import logging
import os
import simplejson
import sys
from pycls.config import cfg
import pycls.utils.distributed as du
import pycls.utils.metrics as mu
import pdb
# Show filename and line number in logs
_FORMAT = '[%(filename)s: %(lineno)3d]: %(message)s'
# Log file name (for cfg.LOG_DEST = 'file')
_LOG_FILE = 'stdout.log'
# Printed json stats lines will be tagged w/ this
_TAG = 'json_stats: '
def _suppress_print():
"""Suppresses printing from the current process."""
def ignore(*_objects, _sep=' ', _end='\n', _file=sys.stdout, _flush=False):
pass
builtins.print = ignore
def setup_logging():
"""Sets up the logging."""
# Enable logging only for the master process
if du.is_master_proc():
# Clear the root logger to prevent any existing logging config
# (e.g. set by another module) from messing with our setup
logging.root.handlers = []
# Construct logging configuration
logging_config = {
'level': logging.INFO,
'format': _FORMAT
}
# Log either to stdout or to a file
if cfg.LOG_DEST == 'stdout':
logging_config['stream'] = sys.stdout
else:
logging_config['filename'] = os.path.join(cfg.OUT_DIR, _LOG_FILE)
# Configure logging
logging.basicConfig(**logging_config)
else:
pass
# _suppress_print()
def get_logger(name):
"""Retrieves the logger."""
return logging.getLogger(name)
def log_json_stats(stats, cur_epoch=None, writer=None, is_epoch=False, params=0, flops=0, model=None, is_master=False):
"""Logs json stats."""
if writer is not None:
for k, v in stats.items():
if isinstance(v, float) or isinstance(v, int):
writer.add_scalar(k, v, cur_epoch + 1)
# if model is not None:
# for name, param in model.named_parameters():
# writer.add_histogram(name, param.clone().cpu().data.numpy(), cur_epoch)
# Decimal + string workaround for having fixed len float vals in logs
stats = {
k: decimal.Decimal('{:.6f}'.format(v)) if isinstance(v, float) else v
for k, v in stats.items()
}
json_stats = simplejson.dumps(stats, sort_keys=True, use_decimal=True)
logger = get_logger(__name__)
logger.info('{:s}{:s}'.format(_TAG, json_stats))
if is_epoch and cur_epoch is not None and is_master:
epoch_id = cur_epoch + 1
result_info = ', '.join(
[str(round(params / 1000000, 3)), str(round(flops / 1000000000, 3)), '{:.3f}'.format(stats['time_avg']),
'{:.3f}'.format(stats['top1_err']), '{:.3f}'.format(stats['top5_err']),
str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.DIM_LIST[0]), str(cfg.RGRAPH.SEED_TRAIN)])
with open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, epoch_id), "a") as text_file:
text_file.write(result_info + '\n')
def load_json_stats(log_file):
"""Loads json_stats from a single log file."""
with open(log_file, 'r') as f:
lines = f.readlines()
json_lines = [l[l.find(_TAG) + len(_TAG):] for l in lines if _TAG in l]
json_stats = [simplejson.loads(l) for l in json_lines]
return json_stats
def parse_json_stats(log, row_type, key):
"""Extract values corresponding to row_type/key out of log."""
vals = [row[key] for row in log if row['_type'] == row_type and key in row]
if key == 'iter' or key == 'epoch':
vals = [int(val.split('/')[0]) for val in vals]
return vals
def get_log_files(log_dir, name_filter=''):
"""Get all log files in directory containing subdirs of trained models."""
names = [n for n in sorted(os.listdir(log_dir)) if name_filter in n]
files = [os.path.join(log_dir, n, _LOG_FILE) for n in names]
f_n_ps = [(f, n) for (f, n) in zip(files, names) if os.path.exists(f)]
files, names = zip(*f_n_ps)
return files, names
| 4,325 | 33.608 | 119 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/net.py |
"""Functions for manipulating networks."""
import itertools
import math
import torch
import torch.nn as nn
from pycls.config import cfg
from ..models.relation_graph import *
def init_weights(m):
"""Performs ResNet style weight initialization."""
if isinstance(m, nn.Conv2d) or isinstance(m, SymConv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
elif isinstance(m, TalkConv2d):
# Note that there is no bias due to BN
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels * m.params_scale
# fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(mean=0.0, std=math.sqrt(2.0 / fan_out))
# m.weight.data = m.weight.data*m.init_scale
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
zero_init_gamma = (
hasattr(m, 'final_bn') and m.final_bn and
cfg.BN.ZERO_INIT_FINAL_GAMMA
)
m.weight.data.fill_(0.0 if zero_init_gamma else 1.0)
m.bias.data.zero_()
elif isinstance(m, nn.Linear) or isinstance(m, TalkLinear) or isinstance(m, SymLinear):
m.weight.data.normal_(mean=0.0, std=0.01)
if m.bias is not None:
m.bias.data.zero_()
@torch.no_grad()
def compute_precise_bn_stats(model, loader):
"""Computes precise BN stats on training data."""
# Compute the number of minibatches to use
num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))
# Retrieve the BN layers
bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]
# Initialize stats storage
mus = [torch.zeros_like(bn.running_mean) for bn in bns]
sqs = [torch.zeros_like(bn.running_var) for bn in bns]
# Remember momentum values
moms = [bn.momentum for bn in bns]
# Disable momentum
for bn in bns:
bn.momentum = 1.0
# Accumulate the stats across the data samples
for inputs, _labels in itertools.islice(loader, num_iter):
model(inputs.cuda())
# Accumulate the stats for each BN layer
for i, bn in enumerate(bns):
m, v = bn.running_mean, bn.running_var
sqs[i] += (v + m * m) / num_iter
mus[i] += m / num_iter
# Set the stats and restore momentum values
for i, bn in enumerate(bns):
bn.running_var = sqs[i] - mus[i] * mus[i]
bn.running_mean = mus[i]
bn.momentum = moms[i]
def get_flat_weights(model):
"""Gets all model weights as a single flat vector."""
return torch.cat([p.data.view(-1, 1) for p in model.parameters()], 0)
def set_flat_weights(model, flat_weights):
"""Sets all model weights from a single flat vector."""
k = 0
for p in model.parameters():
n = p.data.numel()
p.data.copy_(flat_weights[k:(k + n)].view_as(p.data))
k += n
assert k == flat_weights.numel()
def model2adj(model):
adj_dict = {}
i = 0
for n, m in model.named_modules():
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
adj_dict['weight_{}'.format(i)] = m.weight.data.squeeze().cpu().numpy()
i += 1
elif isinstance(m, SymLinear):
weight = m.weight.data + m.weight.data.permute(1, 0)
adj_dict['weight_{}'.format(i)] = weight.squeeze().cpu().numpy()
i += 1
elif isinstance(m, SymConv2d):
weight = m.weight.data + m.weight.data.permute(1, 0, 2, 3)
adj_dict['weight_{}'.format(i)] = weight.squeeze().cpu().numpy()
i += 1
elif isinstance(m, TalkLinear) or isinstance(m, TalkConv2d):
adj_dict['weight_{}'.format(i)] = m.weight.data.squeeze().cpu().numpy()
adj_dict['mask_{}'.format(i)] = m.mask.data.squeeze().cpu().numpy()
i += 1
return adj_dict
| 4,360 | 37.59292 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/distributed.py |
"""Distributed helpers."""
import torch
from pycls.config import cfg
def is_master_proc():
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints.
In the multi GPU setting, we assign the master role to the rank 0 process.
When training using a single GPU, there is only one training processes
which is considered the master processes.
"""
return cfg.NUM_GPUS == 1 or torch.distributed.get_rank() == 0
def init_process_group(proc_rank, world_size):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
# print('--rank{},world{}--'.format(proc_rank, world_size))
# torch.distributed.init_process_group(
# backend=cfg.DIST_BACKEND,
# init_method="tcp://{}:{}".format(cfg.HOST, cfg.PORT),
# world_size=world_size,
# rank=proc_rank
# )
torch.distributed.init_process_group(
backend=cfg.DIST_BACKEND,
init_method='env://',
world_size=world_size,
rank=proc_rank
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
def scaled_all_reduce(tensors):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of
the process group (equivalent to cfg.NUM_GPUS).
"""
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
for tensor in tensors:
tensor.mul_(1.0 / cfg.NUM_GPUS)
return tensors
| 2,323 | 31.277778 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/metrics.py |
"""Functions for computing metrics."""
import numpy as np
import torch
import torch.nn as nn
import pdb
from pycls.config import cfg
from functools import reduce
import operator
from ..models.relation_graph import *
# Number of bytes in a megabyte
_B_IN_MB = 1024 * 1024
def topks_correct(preds, labels, ks):
"""Computes the number of top-k correct predictions for each k."""
assert preds.size(0) == labels.size(0), \
'Batch dim of predictions and labels must match'
# Find the top max_k predictions for each sample
_top_max_k_vals, top_max_k_inds = torch.topk(
preds, max(ks), dim=1, largest=True, sorted=True
)
# (batch_size, max_k) -> (max_k, batch_size)
top_max_k_inds = top_max_k_inds.t()
# (batch_size, ) -> (max_k, batch_size)
rep_max_k_labels = labels.view(1, -1).expand_as(top_max_k_inds)
# (i, j) = 1 if top i-th prediction for the j-th sample is correct
top_max_k_correct = top_max_k_inds.eq(rep_max_k_labels)
# Compute the number of topk correct predictions for each k
topks_correct = [
top_max_k_correct[:k, :].view(-1).float().sum() for k in ks
]
return topks_correct
def topk_errors(preds, labels, ks):
"""Computes the top-k error for each k."""
num_topks_correct = topks_correct(preds, labels, ks)
return [(1.0 - x / preds.size(0)) * 100.0 for x in num_topks_correct]
def topk_accuracies(preds, labels, ks):
"""Computes the top-k accuracy for each k."""
num_topks_correct = topks_correct(preds, labels, ks)
return [(x / preds.size(0)) * 100.0 for x in num_topks_correct]
def params_count(model):
"""Computes the number of parameters."""
count = 0
for n,m in model.named_modules():
if isinstance(m, TalkConv2d) or isinstance(m, TalkLinear):
count += np.sum([p.numel()*m.params_scale for p in m.parameters(recurse=False)]).item()
else:
count += np.sum([p.numel() for p in m.parameters(recurse=False)]).item()
return int(count)
def flops_count(model):
"""Computes the number of flops."""
assert cfg.TRAIN.DATASET in ['cifar10', 'cifar100', 'tinyimagenet200', 'imagenet'], \
'Computing flops for {} is not supported'.format(cfg.TRAIN.DATASET)
# im_size = 32 if cfg.TRAIN.DATASET == 'cifar10' else 224
if cfg.TRAIN.DATASET == 'cifar10':
im_size = 32
elif cfg.TRAIN.DATASET == 'cifar100':
im_size = 32
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
im_size = 64
else:
im_size = 224
h, w = im_size, im_size
count = 0
for n, m in model.named_modules():
if isinstance(m, nn.Conv2d):
if '.se' in n:
count += m.in_channels * m.out_channels + m.bias.numel()
continue
h_out = (h + 2 * m.padding[0] - m.kernel_size[0]) // m.stride[0] + 1
w_out = (w + 2 * m.padding[1] - m.kernel_size[1]) // m.stride[1] + 1
count += np.prod([
m.weight.numel(),
h_out, w_out
])
if 'proj' not in n:
h, w = h_out, w_out
elif isinstance(m, TalkConv2d):
h_out = (h + 2 * m.padding[0] - m.kernel_size[0]) // m.stride[0] + 1
w_out = (w + 2 * m.padding[1] - m.kernel_size[1]) // m.stride[1] + 1
count += int(np.prod([
m.weight.numel()*m.flops_scale,
h_out, w_out
]))
if 'proj' not in n and 'pool' not in n:
h, w = h_out, w_out
elif isinstance(m, nn.MaxPool2d):
h = (h + 2 * m.padding - m.kernel_size) // m.stride + 1
w = (w + 2 * m.padding - m.kernel_size) // m.stride + 1
elif isinstance(m, TalkLinear):
count += int(m.in_features * m.out_features * m.flops_scale)
elif isinstance(m, nn.Linear):
count += m.in_features * m.out_features
return count
def gpu_mem_usage():
"""Computes the GPU memory usage for the current device (MB)."""
mem_usage_bytes = torch.cuda.max_memory_allocated()
return mem_usage_bytes / _B_IN_MB
# Online FLOPs/Params calculation from CondenseNet codebase
count_ops = 0
count_params = 0
def get_num_gen(gen):
return sum(1 for x in gen)
def is_pruned(layer):
try:
layer.mask
return True
except AttributeError:
return False
def is_leaf(model):
return get_num_gen(model.children()) == 0
def get_layer_info(layer):
layer_str = str(layer)
type_name = layer_str[:layer_str.find('(')].strip()
return type_name
def get_layer_param(model):
return sum([reduce(operator.mul, i.size(), 1) for i in model.parameters()])
def measure_layer(layer, x):
global count_ops, count_params
delta_ops = 0
delta_params = 0
multi_add = 1
type_name = get_layer_info(layer)
if type_name in ['Conv2d']:
out_h = int((x.size()[2] + 2 * layer.padding[0] - layer.kernel_size[0]) /
layer.stride[0] + 1)
out_w = int((x.size()[3] + 2 * layer.padding[1] - layer.kernel_size[1]) /
layer.stride[1] + 1)
delta_ops = layer.in_channels * layer.out_channels * layer.kernel_size[0] * \
layer.kernel_size[1] * out_h * out_w / layer.groups * multi_add
print(layer)
print('out_h: ', out_h, 'out_w:', out_w)
delta_params = get_layer_param(layer)
elif type_name in ['ReLU']:
delta_ops = x.numel()
delta_params = get_layer_param(layer)
elif type_name in ['AvgPool2d', 'MaxPool2d']:
in_w = x.size()[2]
kernel_ops = layer.kernel_size * layer.kernel_size
out_w = int((in_w + 2 * layer.padding - layer.kernel_size) / layer.stride + 1)
out_h = int((in_w + 2 * layer.padding - layer.kernel_size) / layer.stride + 1)
delta_ops = x.size()[0] * x.size()[1] * out_w * out_h * kernel_ops
delta_params = get_layer_param(layer)
elif type_name in ['AdaptiveAvgPool2d']:
delta_ops = x.size()[0] * x.size()[1] * x.size()[2] * x.size()[3]
delta_params = get_layer_param(layer)
elif type_name in ['Linear']:
weight_ops = layer.weight.numel() * multi_add
bias_ops = layer.bias.numel()
delta_ops = x.size()[0] * (weight_ops + bias_ops)
delta_params = get_layer_param(layer)
elif type_name in ['WeightedSumTransform']:
weight_ops = layer.weight.numel() * multi_add
delta_ops = x.size()[0] * (weight_ops)
delta_params = get_layer_param(layer)
elif type_name in ['BatchNorm2d', 'Dropout2d', 'DropChannel', 'Dropout', 'Sigmoid', 'DirichletWeightedSumTransform', 'Softmax', 'Identity', 'Sequential']:
delta_params = get_layer_param(layer)
else:
raise TypeError('unknown layer type: %s' % type_name)
count_ops += delta_ops
count_params += delta_params
return
def measure_model(model, H, W):
global count_ops, count_params
count_ops = 0
count_params = 0
data = torch.zeros(1, 3, H, W).cuda()
def should_measure(x):
return is_leaf(x) or is_pruned(x)
def modify_forward(model):
for child in model.children():
if should_measure(child):
def new_forward(m):
def lambda_forward(x):
measure_layer(m, x)
return m.old_forward(x)
return lambda_forward
child.old_forward = child.forward
child.forward = new_forward(child)
else:
modify_forward(child)
def restore_forward(model):
for child in model.children():
# leaf node
if is_leaf(child) and hasattr(child, 'old_forward'):
child.forward = child.old_forward
child.old_forward = None
else:
restore_forward(child)
modify_forward(model)
model.forward(data)
restore_forward(model)
return count_ops, count_params
| 8,557 | 33.095618 | 158 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/multiprocessing.py |
"""Multiprocessing helpers."""
import multiprocessing as mp
import traceback
import subprocess
import numpy as np
import os
from pycls.utils.error_handler import ErrorHandler
import pycls.utils.distributed as du
def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12112'
# print("--proc_rank{}, world_size{}--".format(proc_rank, world_size))
"""Runs a function from a child process."""
try:
# Initialize the process group
du.init_process_group(proc_rank, world_size)
# Run the function
fun(*fun_args, **fun_kwargs)
except KeyboardInterrupt:
# Killed by the parent process
pass
except Exception:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
# Destroy the process group
du.destroy_process_group()
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs={}):
"""Runs a function in a multi-proc setting."""
# Handle errors from training subprocesses
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = mp.Process(
target=run,
args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
# get gpu usage
def get_gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = np.array([int(x) for x in result.strip().split('\n')])
return gpu_memory
def auto_select_gpu(memory_threshold=7000, smooth_ratio=200):
gpu_memory_raw = get_gpu_memory_map() + 10
gpu_memory = gpu_memory_raw / smooth_ratio
gpu_memory = gpu_memory.sum() / (gpu_memory + 10)
gpu_memory[gpu_memory_raw > memory_threshold] = 0
gpu_prob = gpu_memory / gpu_memory.sum()
cuda = str(np.random.choice(len(gpu_prob), p=gpu_prob))
print('GPU select prob: {}, Select GPU {}'.format(gpu_prob, cuda))
return cuda
| 2,888 | 29.09375 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/lr_policy.py |
"""Learning rate policies."""
import numpy as np
from pycls.config import cfg
def lr_fun_steps(cur_epoch):
"""Steps schedule (cfg.OPTIM.LR_POLICY = 'steps')."""
ind = [i for i, s in enumerate(cfg.OPTIM.STEPS) if cur_epoch >= s][-1]
return cfg.OPTIM.BASE_LR * (cfg.OPTIM.LR_MULT ** ind)
def lr_fun_exp(cur_epoch):
"""Exponential schedule (cfg.OPTIM.LR_POLICY = 'exp')."""
return cfg.OPTIM.BASE_LR * (cfg.OPTIM.GAMMA ** cur_epoch)
def lr_fun_cos(cur_epoch):
"""Cosine schedule (cfg.OPTIM.LR_POLICY = 'cos')."""
base_lr, max_epoch = cfg.OPTIM.BASE_LR, cfg.OPTIM.MAX_EPOCH
return 0.5 * base_lr * (1.0 + np.cos(np.pi * cur_epoch / max_epoch))
def get_lr_fun():
"""Retrieves the specified lr policy function"""
lr_fun = 'lr_fun_' + cfg.OPTIM.LR_POLICY
if lr_fun not in globals():
raise NotImplementedError('Unknown LR policy:' + cfg.OPTIM.LR_POLICY)
return globals()[lr_fun]
def get_epoch_lr(cur_epoch):
"""Retrieves the lr for the given epoch according to the policy."""
lr = get_lr_fun()(cur_epoch)
# Linear warmup
if cur_epoch < cfg.OPTIM.WARMUP_EPOCHS:
alpha = cur_epoch / cfg.OPTIM.WARMUP_EPOCHS
warmup_factor = cfg.OPTIM.WARMUP_FACTOR * (1.0 - alpha) + alpha
lr *= warmup_factor
return lr
| 1,643 | 31.235294 | 107 | py |
RobDanns | RobDanns-main/deep_learning/pycls/utils/meters.py |
"""Meters."""
from collections import deque
import datetime
import numpy as np
from pycls.config import cfg
from pycls.utils.timer import Timer
import pycls.utils.logging as lu
import pycls.utils.metrics as metrics
def eta_str(eta_td):
"""Converts an eta timedelta to a fixed-width string format."""
days = eta_td.days
hrs, rem = divmod(eta_td.seconds, 3600)
mins, secs = divmod(rem, 60)
return '{0:02},{1:02}:{2:02}:{3:02}'.format(days, hrs, mins, secs)
class ScalarMeter(object):
"""Measures a scalar value (adapted from Detectron)."""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
def reset(self):
self.deque.clear()
self.total = 0.0
self.count = 0
def add_value(self, value):
self.deque.append(value)
self.count += 1
self.total += value
def get_win_median(self):
return np.median(self.deque)
def get_win_avg(self):
return np.mean(self.deque)
def get_global_avg(self):
return self.total / self.count
class TrainMeter(object):
"""Measures training stats."""
def __init__(self, epoch_iters):
self.epoch_iters = epoch_iters
self.max_iter = cfg.OPTIM.MAX_EPOCH * epoch_iters
self.iter_timer = Timer()
self.loss = ScalarMeter(cfg.LOG_PERIOD)
self.loss_total = 0.0
self.lr = None
# Current minibatch errors (smoothed over a window)
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Number of misclassified examples
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def reset(self, timer=False):
if timer:
self.iter_timer.reset()
self.loss.reset()
self.loss_total = 0.0
self.lr = None
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def iter_tic(self):
self.iter_timer.tic()
def iter_toc(self):
self.iter_timer.toc()
def update_stats(self, top1_err, top5_err, loss, lr, mb_size):
# Current minibatch stats
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
self.loss.add_value(loss)
self.lr = lr
# Aggregate stats
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
self.loss_total += loss * mb_size
self.num_samples += mb_size
def get_iter_stats(self, cur_epoch, cur_iter):
eta_sec = self.iter_timer.average_time * (
self.max_iter - (cur_epoch * self.epoch_iters + cur_iter + 1)
)
eta_td = datetime.timedelta(seconds=int(eta_sec))
mem_usage = metrics.gpu_mem_usage()
stats = {
'_type': 'train_iter',
'epoch': '{}/{}'.format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
'iter': '{}/{}'.format(cur_iter + 1, self.epoch_iters),
'time_avg': self.iter_timer.average_time,
'time_diff': self.iter_timer.diff,
'eta': eta_str(eta_td),
'top1_err': self.mb_top1_err.get_win_median(),
'top5_err': self.mb_top5_err.get_win_median(),
'loss': self.loss.get_win_median(),
'lr': self.lr,
'mem': int(np.ceil(mem_usage))
}
return stats
def log_iter_stats(self, cur_epoch, cur_iter):
if (cur_iter + 1) % cfg.LOG_PERIOD != 0:
return
stats = self.get_iter_stats(cur_epoch, cur_iter)
lu.log_json_stats(stats)
def get_epoch_stats(self, cur_epoch):
eta_sec = self.iter_timer.average_time * (
self.max_iter - (cur_epoch + 1) * self.epoch_iters
)
eta_td = datetime.timedelta(seconds=int(eta_sec))
mem_usage = metrics.gpu_mem_usage()
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
avg_loss = self.loss_total / self.num_samples
stats = {
'_type': 'train_epoch',
'epoch': '{}/{}'.format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
'time_avg': self.iter_timer.average_time,
'eta': eta_str(eta_td),
'top1_err': top1_err,
'top5_err': top5_err,
'loss': avg_loss,
'lr': self.lr,
'mem': int(np.ceil(mem_usage))
}
return stats
def log_epoch_stats(self, cur_epoch, writer, params=0, flops=0, is_master=False):
stats = self.get_epoch_stats(cur_epoch)
lu.log_json_stats(stats, cur_epoch, writer, is_epoch=False, params=params, flops=flops, is_master=is_master)
class TestMeter(object):
"""Measures testing stats."""
def __init__(self, max_iter):
self.max_iter = max_iter
self.iter_timer = Timer()
# Current minibatch errors (smoothed over a window)
self.mb_top1_err = ScalarMeter(cfg.LOG_PERIOD)
self.mb_top5_err = ScalarMeter(cfg.LOG_PERIOD)
# Min errors (over the full test set)
self.min_top1_err = 100.0
self.min_top5_err = 100.0
# Number of misclassified examples
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def reset(self, min_errs=False):
if min_errs:
self.min_top1_err = 100.0
self.min_top5_err = 100.0
self.iter_timer.reset()
self.mb_top1_err.reset()
self.mb_top5_err.reset()
self.num_top1_mis = 0
self.num_top5_mis = 0
self.num_samples = 0
def iter_tic(self):
self.iter_timer.tic()
def iter_toc(self):
self.iter_timer.toc()
def update_stats(self, top1_err, top5_err, mb_size):
self.mb_top1_err.add_value(top1_err)
self.mb_top5_err.add_value(top5_err)
self.num_top1_mis += top1_err * mb_size
self.num_top5_mis += top5_err * mb_size
self.num_samples += mb_size
def get_iter_stats(self, cur_epoch, cur_iter):
mem_usage = metrics.gpu_mem_usage()
iter_stats = {
'_type': 'test_iter',
'epoch': '{}/{}'.format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
'iter': '{}/{}'.format(cur_iter + 1, self.max_iter),
'time_avg': self.iter_timer.average_time,
'time_diff': self.iter_timer.diff,
'top1_err': self.mb_top1_err.get_win_median(),
'top5_err': self.mb_top5_err.get_win_median(),
'mem': int(np.ceil(mem_usage))
}
return iter_stats
def log_iter_stats(self, cur_epoch, cur_iter):
if (cur_iter + 1) % cfg.LOG_PERIOD != 0:
return
stats = self.get_iter_stats(cur_epoch, cur_iter)
lu.log_json_stats(stats)
def get_epoch_stats(self, cur_epoch):
top1_err = self.num_top1_mis / self.num_samples
top5_err = self.num_top5_mis / self.num_samples
self.min_top1_err = min(self.min_top1_err, top1_err)
self.min_top5_err = min(self.min_top5_err, top5_err)
mem_usage = metrics.gpu_mem_usage()
stats = {
'_type': 'test_epoch',
'epoch': '{}/{}'.format(cur_epoch + 1, cfg.OPTIM.MAX_EPOCH),
'time_avg': self.iter_timer.average_time,
'top1_err': top1_err,
'top5_err': top5_err,
'min_top1_err': self.min_top1_err,
'min_top5_err': self.min_top5_err,
'mem': int(np.ceil(mem_usage))
}
return stats
def log_epoch_stats(self, cur_epoch, writer, params=0, flops=0, model=None, is_master=False):
stats = self.get_epoch_stats(cur_epoch)
lu.log_json_stats(stats, cur_epoch, writer, is_epoch=True, params=params, flops=flops, model=model,
is_master=is_master)
| 8,313 | 32.934694 | 116 | py |
kge_ecotox_regression | kge_ecotox_regression-main/main.py |
"""
TODO:
- Train embedding model.
- Apply embeddings to data.
- Encode data.
- Train,valid,test model
"""
from autoencoder import create_auto_encoder
from model import create_model, CorrelelatedFeatures, ApproxKerasSVM, coeff_determination
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold
from random import shuffle
from collections import defaultdict
import tensorflow as tf
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression, LinearRegression, HuberRegressor, BayesianRidge
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, VotingRegressor, BaggingRegressor, ExtraTreesRegressor, GradientBoostingRegressor
from sklearn.compose import TransformedTargetRegressor
from sklearn.preprocessing import QuantileTransformer, RobustScaler
from sklearn.tree import DecisionTreeRegressor
from itertools import product
from random import choice, choices
from sklearn.pipeline import Pipeline
from tqdm import tqdm
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA,FastICA
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_selection import RFE
from sklearn.metrics import r2_score
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_selection import VarianceThreshold
from sklearn.dummy import DummyRegressor
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import cross_val_score, LeaveOneOut
MAX_ENCODER_EPOCHS = 1000
MAX_EPOCHS = 1000
EPSILON = 1e-10
MODEL = 'ComplEx'
hidden_dim = (128,)
SEED = 42
np.random.seed(SEED)
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
import warnings
warnings.filterwarnings('ignore')
def load_fingerprints(filename):
df = pd.read_csv(filename,index_col='chemical')
l = len(df.iloc[0]['fingerprint'])
out = {}
for c in df.index:
fp = df.loc[c]['fingerprint']
v = [int(f) for f in fp]
out[c] = np.asarray(v)
return out
def load_features(filename):
df = pd.read_csv(filename,index_col='chemical')
df = df.dropna()
columns = df.columns
out = {}
for c in df.index:
v = [df.loc[c][col] for col in columns]
out[c] = np.asarray(v)
return out
def load_one_hot(entities):
all_entities = list(set(entities))
out = {}
for e in entities:
v = np.zeros((len(all_entities),))
v[all_entities.index(e)] = 1
out[e] = np.asarray(v)
return out
def load_embeddings(filename,filename_ids):
df = np.load(filename)
ids = dict(np.load(filename_ids))
return {k:df[int(ids[k])] for k in ids}
def load_data(filename,filter_chemicals=None, filter_species=None):
df = pd.read_csv(filename)
X,y = [],[]
if filter_chemicals:
to_drop = set(df.chemical) - filter_chemicals
for c in to_drop:
df = df.drop(df[df.chemical == c].index)
if filter_species:
to_drop = set(df.species) - filter_species
for s in to_drop:
df = df.drop(df[df.species == s].index)
df = df.drop(df[df.study_duration > 24*14].index)
df = df.groupby(['chemical','species'],as_index=False).mean()
X = list(zip(df['chemical'],df['species']))
y = np.log(df.concentration+EPSILON)
tmp = np.asarray(df.study_duration).reshape((-1,1))
mms = StandardScaler()
tmp = mms.fit_transform(tmp)
experimental_features = dict(zip(X,tmp.reshape(-1,1)))
y = np.asarray(y).reshape((-1,1))
#y = MinMaxScaler().fit_transform(y)
return X, y, experimental_features
def data_split(X,Y,restrictions=None,method = 1, variant = 1, prop=0.33):
"""
C_x - chemical set
S_x - species set
t,v - training,validation
1. C_t \cap C_v == Ø and S_t \cap S_v != Ø,
2. C_t \cap C_v == Ø and S_t \cap S_v == Ø,
3. C_t \cap C_v != Ø and S_t \cap S_v != Ø,
4. C_t \cap C_v != Ø and S_t \cap S_v == Ø,
Variants where C_t \cap C_v != Ø (same for S_x):
1. C_t == C_v
2. |C_t \cap C_v| < |C_t \cup C_v|
Restrictions:
Retriction of a set. eg. s_1 \in S_v and |S_v|=1, {'S_v':{'content:[s_1],'max_len',1}}
"""
C_t,C_v,S_t,S_v=map(set,[[]]*4)
restrictions = {**{'C_t':{},'C_v':{},'S_t':{},'S_v':{}},**restrictions}
def filter_restrictions(C_t,C_v,S_t,S_v):
for _set,_inv_set,k in zip([C_t,C_v,S_t,S_v],[C_v,C_t,S_v,S_t],['C_t','C_v','S_t','S_v']):
if k in restrictions:
if 'content' in restrictions[k]:
_set |= restrictions[k]['content']
if 'not content' in restrictions[k]:
_set -= restrictions[k]['not content']
if 'max_len' in restrictions[k]:
while restrictions[k]['max_len'] < len(_set):
entity = choice(list(_set))
if not ('content' in restrictions[k] and entity in restrictions[k]['content']):
_set.remove(entity)
return C_t,C_v,S_t,S_v
def check_restrictions(C_t,C_v,S_t,S_v):
for _set,k,inv_k in zip([C_t,C_v,S_t,S_v],['C_t','C_v','S_t','S_v'],['C_v','C_t','S_v','S_t']):
if k in restrictions:
if 'content' in restrictions[k] and 'not content' in restrictions[k]:
try:
assert len(restrictions[k]['content'].intersection(restrictions[k]['not content'])) < 1
except AssertionError:
raise AssertionError('Set %s content conflict.' % k)
if 'content' in restrictions[k] and 'max_len' in restrictions[k]:
try:
assert len(restrictions[k]['content']) <= restrictions[k]['max_len']
except AssertionError:
raise AssertionError('Set %s content is longer than max length' % k)
if ((method == 1 and 'C' in k) or (method == 4 and 'S' in k) or method == 2) and 'content' in restrictions[inv_k]:
try:
assert restrictions[k]['content'].intersection(restrictions[inv_k]['content']) == set()
except AssertionError:
raise AssertionError('Intersection in %s content is not allowed in method %s.' % ('chemical' if method==1 else 'species',str(method)))
if method == 3 and 'content' in restrictions[inv_k]:
try:
assert restrictions[k]['content'].intersection(restrictions[inv_k]['content']) == set()
except AssertionError:
raise AssertionError('Intersection in set content is not allowed in method 3.')
C,S = map(set,zip(*X))
if method == 1:
C_t,C_v = train_test_split(list(C),test_size=prop)
if variant == 1:
S_t,S_v = S, S
else:
S_t = choices(list(S),k=int((1-prop)*len(S)))
S_v = choices(list(S),k=int(prop*len(S)))
if method == 2:
S_t,S_v = train_test_split(list(S),test_size=prop)
C_t,C_v = train_test_split(list(C),test_size=prop)
if method == 3:
X_t, X_v = train_test_split(X,test_size=prop)
C_t,S_t = map(set,zip(*X_t))
C_v,S_v = map(set,zip(*X_v))
if method == 4:
S_t,S_v = train_test_split(list(S),test_size=prop)
if variant == 1:
C_t,C_v = C, C
else:
C_t = choices(list(C),k=int((1-prop)*len(C)))
C_v = choices(list(C),k=int(prop*len(C)))
C_t,C_v,S_t,S_v = map(set,[C_t,C_v,S_t,S_v])
C_t,C_v,S_t,S_v = filter_restrictions(C_t,C_v,S_t,S_v)
if method == 1: C_t -= C_v
if method == 2:
C_t -= C_v
S_t -= S_v
if method == 4: S_t -= S_v
if method == 1:
assert C_t.intersection(C_v) == set()
if variant == 1:
S_t = S_v
assert S_t == S_v
else:
assert len(S_t.intersection(S_v)) < len(S_t.union(S_v))
if method == 2:
assert C_t.intersection(C_v) == set() and S_t.intersection(S_v) == set()
if method == 3:
assert len(C_t.intersection(C_v)) > 0 and len(S_t.intersection(S_v)) > 0
if method == 4:
assert S_t.intersection(S_v) == set()
if variant == 1:
C_t = C_v
assert C_t == C_v
else:
assert len(C_t.intersection(C_v)) < len(C_t.union(C_v))
check_restrictions(C_t,C_v,S_t,S_v)
Xtr = []
Xte = []
ytr = []
yte = []
for x,y in zip(X,Y):
c,s = x
if c in C_t and s in S_t:
Xtr.append(x)
ytr.append(y)
if c in C_v and s in S_v:
Xte.append(x)
yte.append(y)
return Xtr,Xte,ytr,yte
class FilterFingerprints:
def __init__(self):
pass
def fit(self,X):
idx = []
for i,a in enumerate(X.T):
if len(np.unique(a)) > 1:
idx.append(i)
self.idx = idx
def transform(self,X):
if len(X.shape) > 1:
return X[:,self.idx]
else:
return X[self.idx]
def fit_transform(self,X):
self.fit(X)
return self.transform(X)
def compile_model(model):
model.compile(optimizer='adagrad',loss='log_cosh',metrics=['mae','mse',R2(name='r2')])
import math
def lcm(a, b):
return abs(a*b) // math.gcd(a, b)
def combine(Xs):
n = map(len,Xs)
l = max(*map(lambda x: lcm(len(x[0]),len(x[1])),product(Xs,Xs)))
r = [l//a for a in n]
tmp = []
for X,a in zip(Xs,r):
tmp.append(np.repeat(X,a,axis=0))
return np.concatenate(tmp,axis=1)
def list_duplicates(seq):
tally = defaultdict(list)
for i,item in enumerate(seq):
tally[item].append(i)
return ((key,locs) for key,locs in tally.items() if len(locs)>1)
def run_model(C_t,C_v,S_t,S_v,y,
experimental_features,
fingerprints,
chemical_embedding,
species_embedding,
chemical_features,
merge_species=False):
"""
Take four classes of chemicals, two pairs of siblings, test these on one-two species, combine siblings, combine cusins, see performance drop. Repeat on species side.
Repeat with embeddings for chemicals and species and see the same performance on lower levels, but imporved over baseline on higher levels.
"""
"""
5-fold validation
+ 1-fold test set
"""
keys = set(y.keys())
keys_t = keys.intersection(set(product(C_t,S_t)))
keys_v = keys.intersection(set(product(C_v,S_v)))
ytr,yte = map(lambda x:np.asarray([y[i] for i in x]),[keys_t,keys_v])
if len(yte) < 1 or len(ytr) < 1:
return None,None,None
fingerprints_train,fingerprints_test = map(lambda x:np.asarray([fingerprints[i] for i,_ in x]),[keys_t,keys_v])
chemical_embedding_train,chemical_embedding_test = map(lambda x:np.asarray([chemical_embedding[i] for i,_ in x]),[keys_t,keys_v])
chemical_features_train,chemical_features_test = map(lambda x:np.asarray([chemical_features[i] for i,_ in x]),[keys_t,keys_v])
species_embedding_train,species_embedding_test = map(lambda x:np.asarray([species_embedding[i] for _,i in x]),[keys_t,keys_v])
experimental_features_train,experimental_features_test = map(lambda x:np.asarray([experimental_features[i] for i in x]),[keys_t,keys_v])
species_one_hot_encoder = OneHotEncoder(sparse=False)
sp_t = set(list(zip(*keys_t))[1])
sp_v = set(list(zip(*keys_v))[1])
sp = np.asarray(list(sp_t|sp_v)).reshape((-1,1))
species_one_hot_encoder.fit(sp)
species_one_hot_train,species_one_hot_test = map(lambda x:species_one_hot_encoder.transform(np.asarray(list(zip(*x))[1]).reshape((-1,1))),[keys_t,keys_v])
if merge_species:
for array in [species_embedding_train,species_one_hot_train,ytr]:
for elem,loc in list_duplicates([c for c,_ in keys_t]): #i.e. mean where c is the same
array[loc] = np.mean(array[loc])
for array in [species_embedding_test,species_one_hot_test,yte]:
for elem,loc in list_duplicates([c for c,_ in keys_v]):
array[loc] = np.mean(array[loc])
n_tr = ytr.shape[1]
n_te = yte.shape[1]
train_1 = combine([fingerprints_train,chemical_features_train,species_one_hot_train,experimental_features_train,ytr])
train_2 = combine([fingerprints_train,chemical_features_train,species_embedding_train,chemical_embedding_train,experimental_features_train,ytr])
test_1 = combine([fingerprints_test,chemical_features_test,species_one_hot_test,experimental_features_test,yte])
test_2 = combine([fingerprints_test,chemical_features_test,species_embedding_test,chemical_embedding_test,experimental_features_test,yte])
Xtr_1,ytr = train_1[:,:-n_tr],train_1[:,-n_tr:]
Xtr_2,ytr = train_2[:,:-n_tr],train_2[:,-n_tr:]
Xte_1,yte = test_1[:,:-n_te],test_1[:,-n_te:]
Xte_2,yte = test_2[:,:-n_te],test_2[:,-n_te:]
res1 = np.zeros(yte.ravel().shape)
res2 = np.zeros(yte.ravel().shape)
params = {'n_neighbors':[2,5,10,25,50,100],
'weights':['uniform','distance']}
n = min(len(ytr),5)
FOLDS = 10
for Xtr,Xte,res in zip([Xtr_1,Xtr_2],[Xte_1,Xte_2],[res1,res2]):
for _ in range(FOLDS):
regr = AdaBoostRegressor(n_estimators=10,loss='square')
regr.fit(Xtr,ytr.ravel())
res += regr.predict(Xte)/FOLDS
return res1,res2,yte
from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
sparql.setReturnFormat(JSON)
def get_species_name(ncbi_id):
q = """
select ?label where {
?s wdt:P685 "%s" ;
wdt:P225 ?label .
}
""" % ncbi_id
sparql.setQuery(q)
try:
results = sparql.query().convert()
for result in results["results"]["bindings"]:
out = result["label"]["value"]
return out
except:
return ncbi_id
def encode_fingerprints(fingerprints_all):
fingerprint_encoder, fingerprint_ae = create_auto_encoder(input_size=len(fingerprints_all[0]),dense_layers=(128,),noise=0.1)
fingerprint_ae.compile(optimizer='adagrad',loss='binary_crossentropy')
fingerprint_ae.fit(fingerprints_all,fingerprints_all,
epochs=MAX_ENCODER_EPOCHS,
callbacks=[EarlyStopping('loss',min_delta=1e-5)],
verbose=0)
return fingerprint_encoder.predict(fingerprints_all)
from sklearn.cluster import KMeans
# function returns WSS score for k values from 1 to kmax
def calculate_WSS(points, kmax):
sse = []
for k in range(1, kmax+1):
kmeans = KMeans(n_clusters = k).fit(points)
centroids = kmeans.cluster_centers_
pred_clusters = kmeans.predict(points)
curr_sse = 0
# calculate square of Euclidean distance of each point from its cluster center and add to current WSS
for i in range(len(points)):
curr_center = centroids[pred_clusters[i]]
curr_sse += (points[i, 0] - curr_center[0]) ** 2 + (points[i, 1] - curr_center[1]) ** 2
sse.append(curr_sse)
return sse
def define_chemical_clusters(fingerprints,k=15,use_pca=True):
if not isinstance(fingerprints,list):
fingerprints = [fingerprints]
keys = set.intersection(*[set(f.keys()) for f in fingerprints])
array = np.concatenate([np.asarray([v[k] for k in keys]) for v in fingerprints],axis=1)
if use_pca:
array = PCA(2).fit_transform(array)
if k < 0:
sse = calculate_WSS(array,25)
k = np.argmin(sse) + 1
plt.plot(sse)
plt.show()
clusters = defaultdict(set)
kmeans = KMeans(n_clusters = k).fit(array)
cp = kmeans.predict(array)
for k,v in zip(keys,cp):
clusters[v].add(k)
return clusters, kmeans.cluster_centers_
def merge_closest(clusters,cluster_centers,ord=2):
dist = {}
for i,cc1 in enumerate(cluster_centers):
for j,cc2 in enumerate(cluster_centers):
if i == j: continue
dist[(i,j)] = np.linalg.norm(cc1-cc2,ord=ord)
if len(dist) > 1:
merge,_ = sorted(dist.items(),key=lambda x:x[1])[0]
else:
merge = (i,j)
k1,k2 = merge
cluster_centers[k1] = np.mean([cluster_centers[k1],cluster_centers[k2]],axis=0)
cluster_centers = np.delete(cluster_centers,k2,axis=0)
clusters[k1] |= clusters[k2]
clusters.pop(k2,None)
return clusters, cluster_centers
def filter_data(X,Y,C_t,C_v,S_t,S_v):
Xtr,Xte,ytr,yte = [],[],[],[]
for x,y in zip(X,Y):
c,s = x
if c in C_t and s in S_t:
Xtr.append(x)
ytr.append(y)
if c in C_v and s in S_v:
Xte.append(x)
yte.append(y)
return Xtr,Xte,ytr,yte
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '/media/erik/Mass/Dropbox/NIVA_GITLAB/pySMIfp')
from smiles_fingerprints import smiles_fingerprint
def load_smiles_fingerprints():
q = """
select ?chembl ?smiles where {
?c wdt:P233 ?smiles ;
wdt:P592 ?chembl .
}
"""
converter = {}
sparql.setQuery(q)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
ch = result["chembl"]["value"]
smi = result['smiles']['value']
smifp = smiles_fingerprint(smi)
converter['http://rdf.ebi.ac.uk/resource/chembl/molecule/'+ch] = smifp
return converter
def save_smiles_fingerprints(fp,filename='data/smiles_fingerprints.csv'):
a = {}
for i in range(len(smiles_fingerprint('C'))):
a['sig%s'%str(i)] = [array[i] for _,array in fp.items()]
df = pd.DataFrame(data={'chemical':list(fp.keys()),**a})
df.to_csv(filename)
def read_smiles_fingerprints(filename):
df = pd.read_csv(filename)
cols = [c for c in df.columns if 'sig' in c]
chemicals = df['chemical'].values
arrays = df[cols].values
return dict(zip(chemicals,np.asarray(arrays)))
def chemical_similarities(fingerprints):
keys = fingerprints.keys()
array = np.asarray([i for k,i in fingerprints.items()])
sim = []
for a in array:
v = a @ array.T
w = np.sum(a) + np.sum(array,axis=1)
sim_score = 2*v/w
sim.append(sim_score)
return {k:s for k,s in zip(keys,sim)}
def main():
"""
organic = obo['CHEBI_50860']
inorganic = obo['CHEBI_24835']
"""
model = 'ComplEx'
g1_parts = [[0],[0,1],[0,1,2]]
g2_parts = [[0],[0,1]]
p = list(product(g1_parts,g2_parts))
p += [p[-1]]
ul = (False,False)
f1,f2=[],[]
for g1p,g2p,in p:
for lit,gp,fs,name in zip([*ul],[g1p,g2p],[f1,f2],['_chemical_','_taxonomy_']):
fs.append(model+name+str(hash((lit,*gp))))
if (g1p,g2p) == p[-1]:
ul = (True,True)
organic_chemicals = set()
inorganic_chemicals = set()
salts = set()
for i in range(1,10):
df = pd.read_csv('./data/chemical_group_%s.csv' % str(i),index_col='parent')
try:
organic_chemicals |= set(df.loc['http://purl.obolibrary.org/obo/CHEBI_50860','children'].split(','))
except:
pass
try:
inorganic_chemicals |= set(df.loc['http://purl.obolibrary.org/obo/CHEBI_24835','children'].split(','))
except:
pass
try:
salts |= set(df.loc['http://purl.obolibrary.org/obo/CHEBI_24866','children'].split(','))
except:
pass
print('Num organic chemicals',len(organic_chemicals))
print('Num inorganic chemicals',len(inorganic_chemicals))
print('Num salts',len(salts))
C = organic_chemicals
try:
smiles_fingerprints = read_smiles_fingerprints('./data/smiles_fingerprints.csv')
except FileNotFoundError:
smiles_fingerprints = load_smiles_fingerprints()
save_smiles_fingerprints(smiles_fingerprints,'./data/smiles_fingerprints.csv')
mms = MinMaxScaler().fit_transform(np.asarray([smiles_fingerprints[k] for k in smiles_fingerprints]))
smiles_fingerprints = dict(zip(smiles_fingerprints,mms))
X,Y,experimental_features = load_data('./data/experiments.csv',filter_chemicals=None, filter_species=None)
pubchem_fingerprints = load_fingerprints('./data/chemicals_fingerprints.csv')
Y = {k:y for k,y in zip(X,Y)}
pubchem_fingerprints = chemical_similarities(pubchem_fingerprints)
chemical_embedding = load_embeddings('./data/embeddings/%s_entity_embeddings.npy' % f1[0],
'./data/embeddings/%s_entity_ids.npy' % f1[0])
species_embedding = load_embeddings('./data/embeddings/%s_entity_embeddings.npy' % f2[0],
'./data/embeddings/%s_entity_ids.npy' % f2[0])
chemical_features = load_features('./data/chemicals_features.csv')
chemical_features = dict(zip(chemical_features,MinMaxScaler().fit_transform(np.asarray([chemical_features[k] for k in chemical_features]))))
for cf in [QuantileTransformer(n_quantiles=100,output_distribution='normal')]:
chemical_embedding = dict(zip(chemical_embedding,cf.fit_transform(np.asarray([chemical_embedding[k] for k in chemical_embedding]))))
for cf in [QuantileTransformer(n_quantiles=100,output_distribution='normal')]:
species_embedding = dict(zip(species_embedding,cf.fit_transform(np.asarray([species_embedding[k] for k in species_embedding]))))
species_divisions = defaultdict(set)
for k in range(1,2):
df = pd.read_csv('./data/species_groups_%s.csv' % str(k), index_col='parent')
for s in df.index:
species_divisions[s] |= set(df.loc[s,'children'].split(','))
species_divisions = dict(filter(lambda x:len(x[1])>5,species_divisions.items()))
#for k in species_divisions:
#print(get_species_name(k.split('/')[-1]))
#species_divisions = defaultdict(set)
#df = pd.read_csv('./data/species_divisions.csv', index_col='parent')
#for s in df.index:
#species_divisions[s] |= set(df.loc[s,'children'].split(','))
C = set.intersection(*map(lambda k:set(k.keys()),[smiles_fingerprints,pubchem_fingerprints,chemical_features,chemical_embedding]))
for d in [smiles_fingerprints,pubchem_fingerprints,chemical_embedding,chemical_features]:
for c in set(d.keys()):
if not c in C:
d.pop(c,None)
n = 7
clusters, cluster_centers = define_chemical_clusters([smiles_fingerprints],k=max(-1,n),use_pca=False)
print(*map(lambda x:len(x[1]),clusters.items()))
data = {}
all_runs = {}
TOP_K = 10
while True:
for C,S in tqdm(product(clusters,species_divisions),total=len(clusters)*len(species_divisions)):
k = [C,S]
C = list(clusters[C])
S = species_divisions[S]
k[1] = get_species_name(k[1].split('/')[-1])
loo = LeaveOneOut()
predictions = []
y_true = []
for train_index, test_index in loo.split(C):
C_t = [C[i] for i in train_index]
C_v = [C[i] for i in test_index]
r1,r2,yte = run_model(C_t,C_v,S,S,Y,
experimental_features,
pubchem_fingerprints,
chemical_embedding,
species_embedding,
chemical_features,
merge_species=True)
if r1 is None and r2 is None: continue
r1 = np.mean(r1)
r2 = np.mean(r2)
y_true.append(np.mean(yte))
predictions.append((r1,r2))
y_true, predictions = map(np.asarray,[y_true,predictions])
if len(predictions) < 10: continue
try:
if len(predictions.shape) < 2:
predictions = np.expand_dims(predictions,axis=1)
rsq_1 = r2_score(y_true,predictions[:,0])
rsq_2 = r2_score(y_true,predictions[:,1])
all_runs[tuple(k)] = (rsq_1,rsq_2)
except ValueError:
pass
all_runs = dict(sorted(all_runs.items(),key=lambda x: sum(x[1])/2,reverse=True))
print(all_runs)
data[len(cluster_centers)] = all_runs
if len(cluster_centers) > 0:
clusters, cluster_centers = merge_closest(clusters,cluster_centers)
for k in list(all_runs.keys())[:TOP_K]:
_,s = k
species_divisions.pop(k,None)
else:
break
pd.to_pickle(data,'chemical_cluster_merging.pkl')
exit()
ks = set()
for k in species_divisions:
S = species_divisions[k]
still_true = True
for k_c in clusters:
C = clusters[k_c]
Xtr,Xte,ytr,yte = filter_data(X,Y,C,C,S,S)
if count(Xtr,Xte) > 100: ks.add(k)
for k in tqdm(ks):
n=6
clusters, cluster_centers = define_chemical_clusters([smiles_fingerprints],k=max(-1,n))
S = species_divisions[k]
sn = get_species_name(k.split('/')[-1])
results = defaultdict(list)
i = 0
while True:
k_c = sorted(clusters,key=lambda x:len(clusters[x]),reverse=True)[0]
C_t = clusters[k_c]
if len(C_t) < 1: continue
C_t,C_v = train_test_split(list(C_t),test_size=0.25)
S_t = S
S_v = S
Xtr,Xte,ytr,yte = filter_data(X,Y,C_t,C_v,S_t,S_v)
try:
assert count(Xtr,Xte) > 20
r1,r2 = run_model(Xtr,
Xte,
ytr,
yte,
experimental_features,
pubchem_fingerprints,
chemical_embedding,
species_embedding,
chemical_features,
merge_species=True)
except AssertionError:
r1,r2 = float('nan'), float('nan')
except np.AxisError:
r1,r2 = float('nan'), float('nan')
results[i].append((r1,r2))
clusters, cluster_centers = merge_closest(clusters,cluster_centers)
if len(cluster_centers) < 1:
break
i += 1
v0 = [[v[0] for v in results[k]] for k in results]
v1 = [[v[1] for v in results[k]] for k in results]
fig, ax = plt.subplots()
for x,color,ran in zip([v0,v1],['red','green'],[np.arange(0,len(v0)*2,2),np.arange(1,len(v1)*2,2)]):
mins = [np.nanmin(a) for a in x]
maxes = [np.nanmax(a) for a in x]
means = [np.nanmean(a) for a in x]
std = [np.nanstd(a) for a in x]
mins,maxes,means,std = map(np.asarray,[mins,maxes,means,std])
ax.bar(ran,maxes,width=0.5,color=color)
#plt.ylim(-1,1)
ax.set_xticks(np.arange(0.5,len(v0)*2,2))
ax.set_xticklabels(('%s Clusters' % str(abs(i)) for i in range(-n,0)))
plt.savefig('./plots/chemical_clusters_taxon_%s.png' % sn)
exit()
#def tqdm(x,**params):
#return x
for filter_chemicals,string,TOP_K in tqdm(zip([inorganic_chemicals | salts],['organic'],[4]),total=1,desc='Chemical Groups'):
#if string=='organic': continue
for division in tqdm(S_v,total=len(S_v),desc='Divisions'):
if not len(S_v[division]) > 1: continue
model_params={'encode':False,'train_ae_fingerprints':False,'train_ae_species':False}
results = [[]]*TOP_K
f = lambda _s: sum([1 for c,s in X if (s == _s and c in C-filter_chemicals)])
tmp_division = list(sorted(S_v[division],key=f,reverse=True))[:TOP_K]
for i,s_v in tqdm(enumerate(tmp_division),desc='Species in division %s' % division,leave=False,total=len(tmp_division)):
C_restriction = {'C_v':{'not content':filter_chemicals},'C_t':{'not content':filter_chemicals}}
configs = []
#Method 1
configs.append((1, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
configs.append((1, 2, {'S_v':{'content':set([s_v]),'max_len':1}}))
#Method 2
configs.append((2, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
#Method 3
configs.append((3, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
configs.append((3, 2, {'S_v':{'content':set([s_v]),'max_len':1}}))
#Method 4
configs.append((4, 1, {'S_v':{'content':set([s_v]),'max_len':1}}))
configs.append((4, 2, {'S_v':{'content':set([s_v]),'max_len':1}}))
tmp_res = np.zeros((len(configs),2))
for j,config in tqdm(enumerate(configs),total=len(configs),leave=False,desc='Configs'):
m,v,res = config
r1_tmp = []
r2_tmp = []
for _ in range(10):
tf.keras.backend.clear_session()
prop = 0.3
Xtr,Xte,ytr,yte = data_split(X,Y,restrictions={**res,**C_restriction},method=m,variant=v,prop=prop)
try:
r1,r2 = run_model(Xtr,
Xte,
ytr,
yte,
experimental_features,
fingerprints,
chemical_embedding,
species_embedding,
model_params=model_params)
except:
r1,r2=0,0
r1_tmp.append(r1)
r2_tmp.append(r2)
tmp_res[j,0] = np.mean(r1_tmp)
tmp_res[j,1] = np.mean(r2_tmp)
results[i] = tmp_res
fig, axs = plt.subplots(1,len(results),figsize=(40, 10))
for i,ax in enumerate(axs):
ms = results[i]
baseline = ms[:,0]
over = ms[:,1]
baseline = np.nan_to_num(baseline, nan=0.0,posinf=0.0, neginf=0.0)
over = np.nan_to_num(over, nan=0.0,posinf=0.0, neginf=0.0)
width = 0.4
ax.bar(np.arange(0,len(baseline)*2,2),baseline,width,color='red')
ax.bar(np.arange(1,len(baseline)*2,2),over,width,color='green')
ax.set_title(get_species_name(tmp_division[i].split('/')[-1]))
ax.set_xticks(np.arange(0.5,len(baseline)*2,2))
ax.set_xticklabels((str(i) for i in range(len(configs))))
ax.set_ylim(0,max(*over,*baseline)+0.1)
plt.savefig('plots/division_%s_%s.png' % (division,string))
if __name__ == '__main__':
main()
| 32,681 | 35.394209 | 169 | py |
kge_ecotox_regression | kge_ecotox_regression-main/train_rdf2vec.py |
from pyrdf2vec.graphs import KG
from pyrdf2vec.samplers import UniformSampler
from pyrdf2vec.walkers import RandomWalker
from pyrdf2vec import RDF2VecTransformer
import pandas as pd
from rdflib import Graph, URIRef
import numpy as np
from main import load_data
import rdflib
d = './data/embeddings/'
pdf = [pd.read_csv('./data/chemicals_%s.csv' % str(i)) for i in range(3)]
kg1 = pd.concat(pdf)
kg2 = pd.read_csv('./data/taxonomy.csv')
X,_ = load_data('./data/experiments.csv')
entities1 = list(set(map(rdflib.URIRef,list(zip(*X))[0])))
entities2 = list(set(map(rdflib.URIRef,list(zip(*X))[1])))
for kg,kg_name,entities in zip([kg1,kg2],['chemical','taxonomy'],[entities1,entities2]):
g = Graph()
for t in zip(kg['subject'],kg['predicate'],kg['object']):
g.add(tuple(map(rdflib.URIRef,t)))
g.serialize('tmp.ttl',format='ttl')
kg = KG(location="tmp.ttl",file_type='ttl')
walkers = [RandomWalker(4, 5, UniformSampler())]
transformer = RDF2VecTransformer(walkers=walkers)
embeddings = transformer.fit_transform(kg,entities)
np.save(d + 'rdf2vec_%s_entity_embeddings.csv' % kg_name, embeddings)
np.save(d + 'rdf2vec_%s_entity_ids.csv' % kg_name, np.asarray(list(enumerate(entities))))
| 1,266 | 27.795455 | 93 | py |
kge_ecotox_regression | kge_ecotox_regression-main/embedding_model.py | from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Input, Embedding, Dense, Dropout, Conv2D, Flatten, Concatenate, Multiply
import tensorflow as tf
def min_distance_loss(w,epsilon=1.0):
r = tf.reduce_sum(w*w, 1)
r = tf.reshape(r, [-1, 1])
D = r - 2*tf.matmul(w, tf.transpose(w)) + tf.transpose(r)
D = D + tf.linalg.diag(epsilon * tf.ones(D.shape[0]))
return tf.reduce_sum(tf.where(D<epsilon,1.0,0.0))/tf.cast(w.shape[1],tf.float32)
def TransE(entities,relations,dim=200,bias=1,lamb=1,norm_size=0.0,mdl=0.0):
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
score = bias - tf.norm(h+r-t, ord=2, axis=-1)
loss = lamb - inp_label * score
loss = tf.where(loss>0,loss,0) + \
norm_size * tf.norm(entity_embedding.weights[0],ord=2)**2 + \
min_distance_loss(entity_embedding.weights[0]) * mdl
model = Model(inputs=[inp,inp_label],outputs=score)
model.add_loss(loss)
model.compile(optimizer='adam',loss=None)
return model
def DistMult(entities,relations,dim=200,norm_size=0.0,mdl=0.0):
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
score = tf.keras.layers.Activation('linear')(tf.reduce_sum(h*r*t,axis=-1))
model = Model(inputs=[inp,inp_label],outputs=score)
loss = lambda true,pred: tf.reduce_sum(tf.math.log(1+tf.math.exp(-true*pred))) + \
norm_size * tf.norm(entity_embedding.weights[0],ord=2)**2 + \
min_distance_loss(entity_embedding.weights[0],mdl) * mdl
model.compile(optimizer='adam',loss=loss)
return model
def ComplEx(entities,relations,dim=200,norm_size=0.0,mdl=0.0):
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
h_real,h_img = tf.split(h,2,axis=-1)
r_real,r_img = tf.split(r,2,axis=-1)
t_real,t_img = tf.split(t,2,axis=-1)
score = tf.reduce_sum(r_real*h_real*t_real,axis=-1) + \
tf.reduce_sum(r_real*h_img*t_img,axis=-1) + \
tf.reduce_sum(r_img*h_real*t_img,axis=-1) - \
tf.reduce_sum(r_img*h_img*t_real,axis=-1)
model = Model(inputs=[inp,inp_label],outputs=score)
loss = lambda true,pred: tf.reduce_sum(tf.math.log(1+tf.math.exp(-true*pred))) + \
norm_size * tf.norm(entity_embedding.weights[0],ord=2)**2 + \
min_distance_loss(entity_embedding.weights[0]) * mdl
model.compile(optimizer='adam',loss=loss)
return model
def ConvE(entities,relations):
dim = 200
inp = Input((3,))
inp_label = Input(())
s,p,o = tf.unstack(inp,axis=-1)
entity_embedding = Embedding(len(entities),dim,name='entity_embedding')
relation_embedding = Embedding(len(relations),dim,name='relation_embedding')
h,r,t = entity_embedding(s),relation_embedding(p),entity_embedding(o)
h = tf.reshape(h,(-1,20,10,1))
r = tf.reshape(r,(-1,20,10,1))
x = Concatenate(axis=2)([h,r])
x = Conv2D(16,(5,5),activation='relu')(x)
x = Dropout(0.2)(x)
x = Conv2D(16,(3,3),activation='relu')(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(dim)(x)
x = Multiply()([x,t])
x = Dense(1,activation='sigmoid')(x)
model = Model(inputs=[inp,inp_label],outputs=x)
model.compile(optimizer='adam',loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.05))
return model
| 4,177 | 31.897638 | 108 | py |
kge_ecotox_regression | kge_ecotox_regression-main/pretrained_embedding_models.py |
import sys
import os
from itertools import product
from KGEkeras import DistMult, HolE, TransE, HAKE, ConvE, ComplEx, ConvR, RotatE, pRotatE, ConvKB, CosinE
from kerastuner import RandomSearch, HyperParameters, Objective, Hyperband, BayesianOptimization
from random import choice
from collections import defaultdict
from tensorflow.keras.losses import binary_crossentropy,hinge,mean_squared_error
from tensorflow.keras import Input
from tensorflow.keras import Model
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, Callback, TerminateOnNaN, ReduceLROnPlateau
from sklearn.metrics.cluster import completeness_score
from tensorflow.keras.optimizers import Adam
import json
import tensorflow as tf
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from KGEkeras import loss_function_lookup
from lib.utils import generate_negative, oversample_data, load_data
from tqdm import tqdm
import string
import random
from random import choices
from lib.hptuner import HPTuner
import pickle
try:
from tensorflow_addons.callbacks import TimeStopping
except:
pass
from rdflib import Graph, URIRef, Literal, Namespace
from KGEkeras import LiteralConverter
from sklearn.decomposition import PCA
SECONDS_PER_TRAIL = 600
SECONDS_TO_TERMINATE = 3600
SEARCH_MAX_EPOCHS = 10
MAX_EPOCHS = 200
MIN_EPOCHS = 50
MAX_TRIALS = 20
PATIENCE = 10
EPSILON = 10e-7
models = {
#'DistMult':DistMult,
#'TransE':TransE,
#'HolE':HolE,
'ComplEx':ComplEx,
#'HAKE':HAKE,
#'pRotatE':pRotatE,
#'RotatE':RotatE,
#'ConvE':ConvE,
#'ConvKB':ConvKB,
}
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, kg, ns=10, batch_size=32, shuffle=True):
self.batch_size = min(batch_size,len(kg))
self.kg = kg
self.ns = ns
self.num_e = len(set([s for s,_,_ in kg])|set([o for _,_,o in kg]))
self.shuffle = shuffle
self.indices = list(range(len(kg)))
self.on_epoch_end()
def __len__(self):
return len(self.kg) // self.batch_size
def __getitem__(self, index):
index = self.index[index * self.batch_size:(index + 1) * self.batch_size]
batch = [self.indices[k] for k in index]
X, y = self.__get_data(batch)
return X, y
def on_epoch_end(self):
self.index = np.arange(len(self.indices))
if self.shuffle == True:
np.random.shuffle(self.index)
def __get_data(self, batch):
tmp_kg = np.asarray([self.kg[i] for i in batch])
negative_kg = generate_negative(tmp_kg,N=self.num_e,negative=self.ns)
X = oversample_data(kgs=[tmp_kg,negative_kg])
return X, None
def build_model(hp):
params = hp.copy()
params['e_dim'] = params['dim']
params['r_dim'] = params['dim']
params['name'] = 'embedding_model'
embedding_model = models[params['embedding_model']]
embedding_model = embedding_model(**params)
triple = Input((3,))
ftriple = Input((3,))
inputs = [triple, ftriple]
score = embedding_model(triple)
fscore = embedding_model(ftriple)
loss_function = loss_function_lookup(params['loss_function'])
loss = loss_function(score,fscore,params['margin'] or 1, 1)
model = Model(inputs=inputs, outputs=loss)
model.add_loss(loss)
model.compile(optimizer=Adam(learning_rate=ExponentialDecay(params['learning_rate'],decay_steps=100000,decay_rate=0.96)),
loss=None)
return model
def optimize_model(model, kg, lit=False, name='name', hp=None):
if lit:
lc = LiteralConverter(kg)
literals = lc.fit_transform()
kg = lc.g
literals = PCA(min(len(literals[0]),100)).fit_transform(literals)
else:
literals = None
kg -= [(s,p,o) for s,p,o in kg if isinstance(o,Literal)]
entities = set(kg.subjects()) | set(kg.objects())
relations = set(kg.predicates())
me = {k:i for i,k in enumerate(entities)}
mr = {k:i for i,k in enumerate(relations)}
kg = list(map(lambda x: (me[x[0]],mr[x[1]],me[x[2]]), kg))
bs = 512
kg = np.asarray(kg)
model_name = model
N = len(me)
M = len(mr)
hptuner = HPTuner(runs=MAX_TRIALS, objectiv_direction='min')
hptuner.add_value_hp('gamma',0,21)
hptuner.add_value_hp('dim',100,401,dtype=int)
hptuner.add_value_hp('negative_samples',10,101,dtype=int)
hptuner.add_value_hp('margin',1,11,dtype=int)
hptuner.add_list_hp('loss_function',['pairwize_hinge','pairwize_logistic','pointwize_hinge','pointwize_logistic'],exhaustive=True)
hptuner.add_fixed_hp('embedding_model',model)
hptuner.add_fixed_hp('dp',0.2)
hptuner.add_fixed_hp('hidden_dp',0.2)
hptuner.add_fixed_hp('num_entities',N)
hptuner.add_fixed_hp('num_relations',M)
if hp:
for k,i in hp.items():
hptuner.add_fixed_hp(k,i)
hptuner.add_fixed_hp('num_entities',N)
hptuner.add_fixed_hp('num_relations',M)
hptuner.add_fixed_hp('learning_rate',0.001)
hptuner.add_fixed_hp('regularization',0.001)
if lit:
hptuner.add_fixed_hp('literals',literals)
hptuner.add_fixed_hp('literal_activation','tanh')
if hp:
hptuner.next_hp_config()
hptuner.add_result(0.0)
with tqdm(total=hptuner.runs, desc='Trials') as pbar:
while hptuner.is_active and hp is None:
hp = hptuner.next_hp_config()
model = build_model(hp)
tr_gen = DataGenerator(kg, batch_size=bs, shuffle=True, ns=hp['negative_samples'])
hist = model.fit(tr_gen,epochs=SEARCH_MAX_EPOCHS,verbose=2, callbacks=[EarlyStopping('loss'),TerminateOnNaN()])
score = hist.history['loss'][-1]/hist.history['loss'][0]
hptuner.add_result(score)
tf.keras.backend.clear_session()
pbar.update(1)
hp = hptuner.best_config()
#if hp is None:
#with open('./pretrained_hp/%s%s_kg.json' % (model_name,name), 'w') as fp:
#json.dump(hp, fp)
model = build_model(hp)
tr_gen = DataGenerator(kg, batch_size=bs, shuffle=True, ns=hp['negative_samples'])
hist = model.fit(tr_gen,epochs=MAX_EPOCHS, verbose=2, callbacks=[EarlyStopping('loss',patience=PATIENCE), TerminateOnNaN()])
if np.isnan(hist.history['loss'][-1]):
print(model_name,'nan loss.')
return optimize_model(model_name,kg,lit,name,None)
for l in model.layers:
if isinstance(l,models[model_name]):
m = l.name
m, W1, W2 = model, model.get_layer(m).entity_embedding.get_weights()[0], model.get_layer(m).relational_embedding.get_weights()[0]
m.save_weights('pretrained_models/model/'+name)
np.save(name+'_entity_embeddings.npy', W1)
np.save(name+'_entity_ids.npy',np.asarray(list(zip(entities,range(len(entities))))))
np.save(name+'_relational_embeddings.npy', W2)
np.save(name+'_relation_ids.npy',np.asarray(list(zip(relations,range(len(relations))))))
def main():
d = './data/embeddings/'
use_literals = product([False,True],[False,True])
g1_parts = [[0],[0,1],[0,1,2]]
g2_parts = [[0],[0,1]]
p = list(product(g1_parts,g2_parts))
p += [p[-1]]
ul = (False,False)
for g1p,g2p in tqdm(p):
g1,g2 = Graph(),Graph()
for i in g1p:
g = Graph()
g.load('./data/chemicals_%s.ttl' % str(i),format='ttl')
g1 += g
for i in g2p:
g = Graph()
g.load('./data/taxonomy_%s.ttl' % str(i),format='ttl')
g2 += g
for lit,gp,kg,name in zip([*ul],[g1p,g2p],[g1,g2],['_chemical_','_taxonomy_']):
#hp_file = '../KGE-CEP/pretrained_hp/%s%s_kg.json' % (model,name)
hp = {'e_dim':100,
'negative_samples':10,
'loss_function':'pairwize_logistic'}
model = 'ComplEx'
f = d+model+name+str(hash((lit,*gp)))
optimize_model(model,kg,lit,name=f,hp=hp)
tf.keras.backend.clear_session()
if (g1p,g2p) == p[-1]:
ul = (True,True)
if __name__ == '__main__':
main()
| 8,625 | 30.140794 | 134 | py |
kge_ecotox_regression | kge_ecotox_regression-main/create_data.py |
"""
TODO:
- Load LC50 data from ECOTOX.
- Take median per chemical species pairs.
- Defined chemical groups.
- Export files per chemical groups and each species.
- Forall chemicals and species export relevant KGs.
"""
from tera.DataAggregation import Taxonomy, Effects, Traits
from tera.DataAccess import EffectsAPI
from tera.DataIntegration import DownloadedWikidata, LogMapMapping
from tera.utils import strip_namespace, unit_conversion
from tqdm import tqdm
from rdflib import Graph, URIRef, Literal, BNode, Namespace
from rdflib.namespace import RDFS, RDF
cco = Namespace('http://rdf.ebi.ac.uk/terms/chembl#')
skos = Namespace('http://www.w3.org/2004/02/skos/core#')
obo = Namespace('http://purl.obolibrary.org/obo/')
import pandas as pd
from collections import defaultdict
import pubchempy as pcp
import numpy as np
def get_subgraph(to_visit, graph, backtracking=0):
out = Graph()
visited = set()
while to_visit:
curr = to_visit.pop()
visited.add(curr)
tmp = set(graph.triples((curr,None,None)))
for t in tmp:
out.add(t)
to_visit |= set([o for _,_,o in tmp if not isinstance(o,Literal)])
to_visit -= visited
if backtracking > 0:
tmp = set()
for s in set([s for s,_,_ in out]):
tmp |= set(graph.subjects(object=s))
for t in out:
graph.remove(t)
return out + get_subgraph(tmp, graph, backtracking-1)
return out
def load_endpoint_data():
ed = Effects(directory='../ecotox_data/',verbose=False)
species_mapping = LogMapMapping(filename='./data/final_mappings.txt')
chemicals_mappings = DownloadedWikidata(filename='./data/cas_to_chembl.csv')
species_mapping.load()
chemicals_mappings.load()
ncbi_namespace = Namespace('https://www.ncbi.nlm.nih.gov/taxonomy/')
species_mapping = [(ed.namespace['taxon/'+k],ncbi_namespace['taxon/'+i.pop(0)]) for k,i in species_mapping.mappings.items()]
ed.replace(species_mapping)
chembl_namespace = Namespace('http://rdf.ebi.ac.uk/resource/chembl/molecule/')
chemicals_mappings = [(ed.namespace['cas/'+k],chembl_namespace[i.pop(0)]) for k,i in chemicals_mappings.mappings.items()]
ed.replace(chemicals_mappings)
endpoints = EffectsAPI(dataobject=ed, verbose=True).get_endpoint(c=None, s=None)
d = defaultdict(list)
for c,s,cc,cu,ep,ef,sd,sdu in endpoints:
try:
sd = float(sd)
except:
continue
if 'day' in str(sdu).lower():
sd *= 24
elif 'week' in str(sdu).lower():
sd *= (7*24)
elif 'hour' in str(sdu).lower():
sd *= 1
else:
continue
if ('LC50' in str(ep) or 'LD50' in str(ep) or ('EC50' in str(ep) and 'MOR' in str(ef))) and ('ncbi' in str(s) and 'chembl' in str(c)):
try:
factor = unit_conversion(str(cu),'http://qudt.org/vocab/unit#MilligramPerLitre')
except:
factor = 0
if factor > 0:
cc = float(cc)
cc = cc*factor
d['chemical'].append(str(c))
d['species'].append(str(s))
d['concentration'].append(cc)
d['study_duration'].append(sd)
df = pd.DataFrame(data=d)
df.to_csv('./data/experiments.csv')
def fingerprints():
df = pd.read_csv('./data/experiments.csv')
mapping = DownloadedWikidata(filename='./data/chembl_to_cid.csv')
to_look_for = mapping.convert(set(df['chemical']),reverse=False,strip=True)
to_look_for = set([URIRef('http://rdf.ncbi.nlm.nih.gov/pubchem/compound/CID'+str(i)) for k,i in to_look_for.items() if i != 'no mapping'])
out = []
fp = []
for c,c2 in tqdm(zip(to_look_for,set(df['chemical'])),total=len(to_look_for)):
try:
compound = pcp.Compound.from_cid(int(c.split('CID')[-1]))
fp.append(bin(int(compound.fingerprint,16))[2:])
out.append(c2)
except:
pass
df = pd.DataFrame(data={'chemical':out,'fingerprint':fp})
df.to_csv('./data/chemicals_fingerprints.csv')
def chemical_features():
df = pd.read_csv('./data/experiments.csv')
mapping = DownloadedWikidata(filename='./data/chembl_to_cid.csv')
to_look_for = mapping.convert(set(df['chemical']),reverse=False,strip=True)
to_look_for = set([URIRef('http://rdf.ncbi.nlm.nih.gov/pubchem/compound/CID'+str(i)) for k,i in to_look_for.items() if i != 'no mapping'])
out = defaultdict(list)
fp = []
fs = ['xlogp','exact_mass','tpsa','complexity','charge']
for c,c2 in tqdm(zip(to_look_for,set(df['chemical'])),total=len(to_look_for)):
try:
compound = pcp.Compound.from_cid(int(c.split('CID')[-1]))
tmp = compound.to_dict(fs)
for k in tmp:
out[k].append(tmp[k])
out['chemical'].append(c2)
except:
pass
df = pd.DataFrame(data=out)
df.to_csv('./data/chemicals_features.csv')
def load_chemical_groups():
"""
Split (in)organic
CHEBI:50860 (organic)
CHEBI:24835 (inorganic)
"""
df = pd.read_csv('./data/experiments.csv')
chemicals = df['chemical']
graph = Graph()
graph.parse('../chembl/chembl_26.0_molecule_chebi_ls.ttl',format='ttl')
mapping = defaultdict()
for c in chemicals:
c = URIRef(c)
m = list(graph.objects(subject=c,predicate=skos['exactMatch']))
if m:
mapping[c]=m.pop(0)
chebi_graph = Graph()
chebi_graph.parse('../chebi/chebi.ttl',format='ttl')
chebi_graph = replace(chebi_graph,{i:k for k,i in mapping.items()})
for steps in range(1,20):
out = defaultdict(set)
desendents=set()
for c in map(lambda x:URIRef(x), chemicals):
p = ' / '.join('<'+r+'>' for r in [RDFS.subClassOf]*steps)
qres = chebi_graph.query(
"""SELECT DISTINCT ?parent
WHERE {
<%s> %s ?parent
}""" % (str(c),p))
for r in qres:
desendents.add((c,r[-1]))
for c,p in desendents:
out[p].add(c)
df = pd.DataFrame(data={'parent':list(out.keys()),'children':[','.join(out[k]) for k in out]})
df.to_csv('./data/chemical_group_%s.csv' % str(steps))
def load_species_groups():
df = pd.read_csv('./data/experiments.csv')
t = Graph()
t.load('./data/taxonomy_0.ttl',format='ttl')
species = set(df['species'])
for steps in range(0,20):
out_taxon = defaultdict(set)
out_division = defaultdict(set)
desendents = set()
for c in map(URIRef,species):
p = ' / '.join('<'+r+'>' for r in [RDF.type,*[RDFS.subClassOf]*steps])
qres = t.query(
"""SELECT DISTINCT ?parent
WHERE {
<%s> %s ?parent
}""" % (str(c),p))
for r in qres:
desendents.add((c,r[-1]))
for c,p in desendents:
if 'division' in str(p):
out_division[p].add(c)
else:
out_taxon[p].add(c)
df = pd.DataFrame(data={'parent':list(out_division.keys()),'children':[','.join(out_division[k]) for k in out_division]})
df.to_csv('./data/species_divisions.csv')
df = pd.DataFrame(data={'parent':list(out_taxon.keys()),'children':[','.join(out_taxon[k]) for k in out_taxon]})
df.to_csv('./data/species_groups_%s.csv' % str(steps))
def replace(graph,mapping):
for s,p,o in graph:
if s in mapping:
graph.remove((s,p,o))
graph.add((mapping[s],p,o))
if o in mapping:
graph.remove((s,p,o))
graph.add((s,p,mapping[o]))
if p in mapping:
graph.remove((s,p,o))
graph.add((s,mapping[p],o))
return graph
def load_chemical_graph():
df = pd.read_csv('./data/experiments.csv')
mapping = DownloadedWikidata(filename='./data/chembl_to_mesh.csv')
mapping.load()
mapping = {URIRef('http://id.nlm.nih.gov/mesh/'+i.pop(0)):URIRef('http://rdf.ebi.ac.uk/resource/chembl/molecule/'+k) for k,i in mapping.mappings.items()}
mesh_graph = Graph()
mesh_graph.parse('../mesh/mesh.nt',format='nt')
mesh_graph = replace(mesh_graph,mapping)
graph = Graph()
graph.parse('../chembl/chembl_26.0_molecule_chebi_ls.ttl',format='ttl')
mapping = defaultdict()
for c in df['chemical']:
c = URIRef(c)
m = list(graph.objects(subject=c,predicate=skos['exactMatch']))
if m:
mapping[c]=m.pop(0)
chebi_graph = Graph()
chebi_graph.parse('../chebi/chebi.ttl',format='ttl')
chebi_graph = replace(chebi_graph,{i:k for k,i in mapping.items()})
chembl_graph = Graph()
for f in [#'../chembl/chembl_26.0_molecule.ttl',
'../chembl/chembl_26.0_molhierarchy.ttl',
'../chembl/chembl_26.0_target.ttl',
'../chembl/chembl_26.0_targetrel.ttl',
'../chembl/chembl_26.0_moa.ttl']:
chembl_graph.parse(f,format='ttl')
for i,g in enumerate([mesh_graph,chebi_graph,chembl_graph]):
graph = get_subgraph(set([URIRef(a) for a in set(df['chemical'])]), g, backtracking=0)
graph.serialize('./data/chemicals_%s.ttl' % str(i),format='ttl')
def load_taxonomy_graph():
df = pd.read_csv('./data/experiments.csv')
t = Taxonomy(directory='../taxdump/', verbose=True, taxon_namespace='http://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=Info&id=')
ne = DownloadedWikidata(filename='./data/ncbi_to_eol.csv', verbose=False)
n = list(set(t.graph.subjects(predicate=t.namespace['rank'],
object=t.namespace['rank/species'])))
tr = Traits(directory='../eol/', verbose=True)
conv = ne.convert(n, strip=True)
converted = [(tr.namespace[i],k) for k,i in conv.items() if i != 'no mapping']
tr.replace(converted)
for i,g in enumerate([t.graph,tr.graph]):
tmp = set([URIRef(a) for a in set(df['species'])])
graph = get_subgraph(tmp, g, backtracking=0)
graph.serialize('./data/taxonomy_%s.ttl' % str(i),format='ttl')
if __name__ == '__main__':
#load_endpoint_data()
#fingerprints()
#chemical_features()
#load_species_groups()
#load_chemical_groups()
#load_chemical_graph()
load_taxonomy_graph()
| 10,807 | 30.510204 | 157 | py |
kge_ecotox_regression | kge_ecotox_regression-main/autoencoder.py |
from tensorflow.keras.layers import Dense, GaussianNoise, Input, LayerNormalization
from tensorflow.keras.models import Model
from tensorflow import keras
def create_auto_encoder(input_size, dense_layers = (10,), noise=0):
autoencoder = keras.Sequential()
if noise > 0:
autoencoder.add(GaussianNoise(noise))
for l in dense_layers:
autoencoder.add(Dense(l,activation='relu'))
encoder = autoencoder
for l in dense_layers[::-1]:
autoencoder.add(Dense(l,activation='relu'))
autoencoder.add(Dense(input_size,activation='sigmoid'))
return encoder, autoencoder
| 613 | 33.111111 | 83 | py |
lepard | lepard-main/main.py | import os, torch, json, argparse, shutil
from easydict import EasyDict as edict
import yaml
from datasets.dataloader import get_dataloader, get_datasets
from models.pipeline import Pipeline
from lib.utils import setup_seed
from lib.tester import get_trainer
from models.loss import MatchMotionLoss
from lib.tictok import Timers
from configs.models import architectures
from torch import optim
setup_seed(0)
def join(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
yaml.add_constructor('!join', join)
if __name__ == '__main__':
# load configs
parser = argparse.ArgumentParser()
parser.add_argument('config', type=str, help= 'Path to the config file.')
args = parser.parse_args()
with open(args.config,'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
config['snapshot_dir'] = 'snapshot/%s/%s' % (config['dataset']+config['folder'], config['exp_dir'])
config['tboard_dir'] = 'snapshot/%s/%s/tensorboard' % (config['dataset']+config['folder'], config['exp_dir'])
config['save_dir'] = 'snapshot/%s/%s/checkpoints' % (config['dataset']+config['folder'], config['exp_dir'])
config = edict(config)
os.makedirs(config.snapshot_dir, exist_ok=True)
os.makedirs(config.save_dir, exist_ok=True)
os.makedirs(config.tboard_dir, exist_ok=True)
if config.gpu_mode:
config.device = torch.device("cuda:0")
else:
config.device = torch.device('cpu')
# backup the
if config.mode == 'train':
os.system(f'cp -r models {config.snapshot_dir}')
os.system(f'cp -r configs {config.snapshot_dir}')
os.system(f'cp -r cpp_wrappers {config.snapshot_dir}')
os.system(f'cp -r datasets {config.snapshot_dir}')
os.system(f'cp -r kernels {config.snapshot_dir}')
os.system(f'cp -r lib {config.snapshot_dir}')
shutil.copy2('main.py',config.snapshot_dir)
# model initialization
config.kpfcn_config.architecture = architectures[config.dataset]
config.model = Pipeline(config)
# config.model = KPFCNN(config)
# create optimizer
if config.optimizer == 'SGD':
config.optimizer = optim.SGD(
config.model.parameters(),
lr=config.lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
)
elif config.optimizer == 'ADAM':
config.optimizer = optim.Adam(
config.model.parameters(),
lr=config.lr,
betas=(0.9, 0.999),
weight_decay=config.weight_decay,
)
#create learning rate scheduler
if 'overfit' in config.exp_dir :
config.scheduler = optim.lr_scheduler.MultiStepLR(
config.optimizer,
milestones=[config.max_epoch-1], # fix lr during overfitting
gamma=0.1,
last_epoch=-1)
else:
config.scheduler = optim.lr_scheduler.ExponentialLR(
config.optimizer,
gamma=config.scheduler_gamma,
)
config.timers = Timers()
# create dataset and dataloader
train_set, val_set, test_set = get_datasets(config)
config.train_loader, neighborhood_limits = get_dataloader(train_set,config,shuffle=True)
config.val_loader, _ = get_dataloader(val_set, config, shuffle=False, neighborhood_limits=neighborhood_limits)
config.test_loader, _ = get_dataloader(test_set, config, shuffle=False, neighborhood_limits=neighborhood_limits)
# config.desc_loss = MetricLoss(config)
config.desc_loss = MatchMotionLoss (config['train_loss'])
trainer = get_trainer(config)
if(config.mode=='train'):
trainer.train()
else:
trainer.test()
| 3,723 | 32.54955 | 116 | py |
lepard | lepard-main/models/matching.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.position_encoding import VolumetricPositionEncoding as VolPE
def log_optimal_transport(scores, alpha, iters, src_mask, tgt_mask ):
b, m, n = scores.shape
if src_mask is None:
ms = m
ns = n
else :
ms = src_mask.sum(dim=1, keepdim=True)
ns = tgt_mask.sum(dim=1, keepdim=True)
bins0 = alpha.expand(b, m, 1)
bins1 = alpha.expand(b, 1, n)
alpha = alpha.expand(b, 1, 1)
Z = torch.cat([torch.cat([scores, bins0], -1),
torch.cat([bins1, alpha], -1)], 1)
norm = - (ms + ns).log() # [b, 1]
log_mu = torch.cat([norm .repeat(1, m), ns.log() + norm], dim=1)
log_nu = torch.cat([norm.repeat(1, n), ms.log() + norm], dim=1)
u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu)
for _ in range(iters):
u = log_mu - torch.logsumexp( Z + v.unsqueeze(1), dim=2)
v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1)
Z= Z + u.unsqueeze(2) + v.unsqueeze(1)
Z = Z - norm.view(-1,1,1)
return Z
class Matching(nn.Module):
def __init__(self, config):
super().__init__()
self.match_type = config['match_type']
self.confidence_threshold = config['confidence_threshold']
d_model = config['feature_dim']
self.src_proj = nn.Linear(d_model, d_model, bias=False)
self.tgt_proj = nn.Linear(d_model, d_model, bias=False)
self.entangled= config['entangled']
if self.match_type == "dual_softmax":
self.temperature = config['dsmax_temperature']
elif self.match_type == 'sinkhorn':
#sinkhorn algorithm
self.skh_init_bin_score = config['skh_init_bin_score']
self.skh_iters = config['skh_iters']
self.skh_prefilter = config['skh_prefilter']
self.bin_score = nn.Parameter(
torch.tensor( self.skh_init_bin_score, requires_grad=True))
else:
raise NotImplementedError()
@staticmethod
@torch.no_grad()
def get_match( conf_matrix, thr, mutual=True):
mask = conf_matrix > thr
#mutual nearest
if mutual:
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
#find all valid coarse matches
index = (mask==True).nonzero()
b_ind, src_ind, tgt_ind = index[:,0], index[:,1], index[:,2]
mconf = conf_matrix[b_ind, src_ind, tgt_ind]
return index, mconf, mask
@staticmethod
@torch.no_grad()
def get_topk_match( conf_matrix, thr, mutual=True):
mask = conf_matrix > thr
#mutual nearest
if mutual:
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
#find all valid coarse matches
index = (mask==True).nonzero()
b_ind, src_ind, tgt_ind = index[:,0], index[:,1], index[:,2]
mconf = conf_matrix[b_ind, src_ind, tgt_ind]
return index, mconf, mask
def forward(self, src_feats, tgt_feats, src_pe, tgt_pe, src_mask, tgt_mask, data, pe_type="rotary"):
'''
@param src_feats: [B, S, C]
@param tgt_feats: [B, T, C]
@param src_mask: [B, S]
@param tgt_mask: [B, T]
@return:
'''
src_feats = self.src_proj(src_feats)
tgt_feats = self.src_proj(tgt_feats)
data["src_feats_nopos"] = src_feats
data["tgt_feats_nopos"] = tgt_feats
if not self.entangled :
src_feats = VolPE.embed_pos(pe_type, src_feats, src_pe)
tgt_feats = VolPE.embed_pos(pe_type, tgt_feats, tgt_pe)
data["src_feats"] = src_feats
data["tgt_feats"] = tgt_feats
src_feats, tgt_feats = map(lambda feat: feat / feat.shape[-1] ** .5,
[src_feats, tgt_feats])
if self.match_type == "dual_softmax":
# dual softmax matching
sim_matrix_1 = torch.einsum("bsc,btc->bst", src_feats, tgt_feats) / self.temperature
if src_mask is not None:
sim_matrix_2 = sim_matrix_1.clone()
sim_matrix_1.masked_fill_(~src_mask[:, :, None], float('-inf'))
sim_matrix_2.masked_fill_(~tgt_mask[:, None, :], float('-inf'))
conf_matrix = F.softmax(sim_matrix_1, 1) * F.softmax(sim_matrix_2, 2)
else :
conf_matrix = F.softmax(sim_matrix_1, 1) * F.softmax(sim_matrix_1, 2)
elif self.match_type == "sinkhorn" :
#optimal transport sinkhoron
sim_matrix = torch.einsum("bsc,btc->bst", src_feats, tgt_feats)
if src_mask is not None:
sim_matrix.masked_fill_(
~(src_mask[..., None] * tgt_mask[:, None]).bool(), float('-inf'))
log_assign_matrix = log_optimal_transport( sim_matrix, self.bin_score, self.skh_iters, src_mask, tgt_mask)
assign_matrix = log_assign_matrix.exp()
conf_matrix = assign_matrix[:, :-1, :-1].contiguous()
coarse_match, _, _ = self.get_match(conf_matrix, self.confidence_threshold)
return conf_matrix, coarse_match
| 5,412 | 29.931429 | 118 | py |
lepard | lepard-main/models/loss.py | import torch
import torch.nn as nn
import numpy as np
import open3d as o3d
from lib.benchmark_utils import to_o3d_pcd
from lib.visualization import *
import nibabel.quaternions as nq
from sklearn.metrics import precision_recall_fscore_support
from datasets.utils import blend_scene_flow, multual_nn_correspondence, knn_point_np
from models.matching import Matching as CM
def ransac_pose_estimation(src_pcd, tgt_pcd, corrs, distance_threshold=0.05, ransac_n=3):
src_pcd = to_o3d_pcd(src_pcd)
tgt_pcd = to_o3d_pcd(tgt_pcd)
corrs = o3d.utility.Vector2iVector(np.array(corrs).T)
result_ransac = o3d.registration.registration_ransac_based_on_correspondence(
source=src_pcd, target=tgt_pcd, corres=corrs,
max_correspondence_distance=distance_threshold,
estimation_method=o3d.registration.TransformationEstimationPointToPoint(False),
ransac_n=ransac_n,
criteria=o3d.registration.RANSACConvergenceCriteria(50000, 1000))
return result_ransac.transformation
def computeTransformationErr(trans, info):
"""
Computer the transformation error as an approximation of the RMSE of corresponding points.
More informaiton at http://redwood-data.org/indoor/registration.html
Args:
trans (numpy array): transformation matrices [n,4,4]
info (numpy array): covariance matrices of the gt transformation paramaters [n,4,4]
Returns:
p (float): transformation error
"""
t = trans[:3, 3]
r = trans[:3, :3]
q = nq.mat2quat(r)
er = np.concatenate([t, q[1:]], axis=0)
p = er.reshape(1, 6) @ info @ er.reshape(6, 1) / info[0, 0]
return p.item()
class MatchMotionLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.focal_alpha = config['focal_alpha']
self.focal_gamma = config['focal_gamma']
self.pos_w = config['pos_weight']
self.neg_w = config['neg_weight']
self.mot_w = config['motion_weight']
self.mat_w = config['match_weight']
self.motion_loss_type = config['motion_loss_type']
self.match_type = config['match_type']
self.positioning_type = config['positioning_type']
self.registration_threshold = config['registration_threshold']
self.confidence_threshold_metric = config['confidence_threshold_metric']
self.inlier_thr = config['inlier_thr']
self.fmr_thr = config['fmr_thr']
self.mutual_nearest = config['mutual_nearest']
self.dataset = config['dataset']
def forward(self, data):
loss_info = {}
loss = self.ge_coarse_loss(data, loss_info)
loss_info.update({ 'loss': loss })
return loss_info
def ge_coarse_loss(self, data, loss_info, eval_metric=False):
if self.dataset == "4dmatch":
s2t_flow = torch.zeros_like(data['s_pcd'])
for i, cflow in enumerate(data['coarse_flow']):
s2t_flow[i][: len(cflow)] = cflow
loss = 0.
src_mask = data['src_mask']
tgt_mask = data['tgt_mask']
conf_matrix_pred = data['conf_matrix_pred']
match_gt = data['coarse_matches']
R_s2t_gt = data['batched_rot']
t_s2t_gt = data['batched_trn']
#get the overlap mask, for dense motion loss
s_overlap_mask = torch.zeros_like(src_mask).bool()
for bi, corr in enumerate (match_gt):
s_overlap_mask[bi][ corr[0] ] = True
# compute focal loss
c_weight = (src_mask[:, :, None] * tgt_mask[:, None, :]).float()
conf_matrix_gt = self.match_2_conf_matrix(match_gt, conf_matrix_pred)
data['conf_matrix_gt'] = conf_matrix_gt
focal_coarse = self.compute_correspondence_loss(conf_matrix_pred, conf_matrix_gt, weight=c_weight)
recall, precision = self.compute_match_recall( conf_matrix_gt, data['coarse_match_pred'])
loss_info.update( { "focal_coarse": focal_coarse, "recall_coarse": recall, "precision_coarse": precision } )
loss = loss + self.mat_w * focal_coarse
if recall > 0.01 and self.mot_w > 0:
R_s2t_pred = data["R_s2t_pred"]
t_s2t_pred = data["t_s2t_pred"]
#compute predicted flow. Note, if 4dmatch, the R_pred,t_pred try to find the best rigid fit of deformation
src_pcd_wrapped_pred = (torch.matmul(R_s2t_pred, data['s_pcd'].transpose(1, 2)) + t_s2t_pred).transpose(1, 2)
sflow_pred = src_pcd_wrapped_pred - data['s_pcd']
if self.dataset == '4dmatch':
spcd_deformed = data['s_pcd'] + s2t_flow
src_pcd_wrapped_gt = (torch.matmul(R_s2t_gt, spcd_deformed.transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
else : # 3dmatch
src_pcd_wrapped_gt = (torch.matmul(R_s2t_gt, data['s_pcd'].transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
sflow_gt = src_pcd_wrapped_gt - data['s_pcd']
e1 = torch.sum(torch.abs(sflow_pred - sflow_gt), 2)
e1 = e1[s_overlap_mask] # [data['src_mask']]
l1_loss = torch.mean(e1)
loss = loss + self.mot_w * l1_loss
#
# if eval_metric :
#
# match_pred, _, _ = CM.get_match(data['conf_matrix_pred'], thr=self.confidence_threshold_metric, mutual=self.mutual_nearest)
#
# '''Inlier Ratio (IR)'''
# s2t_flow=s2t_flow if self.dataset == "4dmatch" else None)
# loss_info.update({"Inlier Ratio": ir.mean()})
#
# if self.dataset == '3dmatch':
#
# '''Feature Matching Recall (FMR)'''
# fmr = (ir > self.fmr_thr).float().sum() / len(ir)
# loss_info.update({"Feature Matching Recall": fmr})
#
# '''Registration Recall (RR)'''
# rot_, trn_ = self.ransac_regist_coarse(data['s_pcd'], data['t_pcd'], src_mask, tgt_mask , match_pred)
# rot, trn = rot_.to(data['s_pcd']) , trn_.to(data['s_pcd'])
# loss_info.update({'Registration_Recall': rr})
if self.positioning_type == "procrustes":
for layer_ind in data["position_layers"]:
# compute focal loss
rpe_conf_matrix = data["position_layers"][layer_ind]["conf_matrix"]
focal_rpe = self.compute_correspondence_loss(rpe_conf_matrix, conf_matrix_gt, weight=c_weight)
recall, precision = self.compute_match_recall(conf_matrix_gt,
data["position_layers"][layer_ind]['match_pred'])
# loss_info.update({'focal_layer_%d' % layer_ind: focal_rpe, 'recall_layer_%d' % layer_ind: recall,
# 'precision_layer_%d' % layer_ind: precision})
loss = loss + self.mat_w * focal_rpe
if recall >0.01 and self.mot_w > 0:
R_s2t_pred = data["position_layers"][layer_ind]["R_s2t_pred"]
t_s2t_pred = data["position_layers"][layer_ind]["t_s2t_pred"]
src_pcd_wrapped_pred = (torch.matmul(R_s2t_pred, data['s_pcd'].transpose(1, 2)) + t_s2t_pred).transpose(1, 2)
sflow_pred = src_pcd_wrapped_pred - data['s_pcd']
if self.dataset == '4dmatch':
spcd_deformed = data['s_pcd'] + s2t_flow
src_pcd_wrapped_gt = ( torch.matmul(R_s2t_gt, spcd_deformed.transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
else: # 3dmatch
src_pcd_wrapped_gt = ( torch.matmul(R_s2t_gt, data['s_pcd'].transpose(1, 2)) + t_s2t_gt).transpose(1, 2)
sflow_gt = src_pcd_wrapped_gt - data['s_pcd']
e1 = torch.sum(torch.abs(sflow_pred - sflow_gt), 2) #[data['src_mask']]
e1 = e1[s_overlap_mask] # [data['src_mask']]
l1_loss = torch.mean(e1)
loss = loss + self.mot_w * l1_loss
return loss
@staticmethod
def compute_nrfmr(match_pred, data, recall_thr=0.04):
s_pcd, t_pcd = data['s_pcd'], data['t_pcd']
s_pcd_raw = data['src_pcd_list']
sflow_list = data['sflow_list']
metric_index_list = data['metric_index_list']
batched_rot = data['batched_rot'] # B,3,3
batched_trn = data['batched_trn']
nrfmr = 0.
for i in range(len(s_pcd_raw)):
# use the match prediction as the motion anchor
match_pred_i = match_pred[match_pred[:, 0] == i]
s_id, t_id = match_pred_i[:, 1], match_pred_i[:, 2]
s_pcd_matched = s_pcd[i][s_id]
t_pcd_matched = t_pcd[i][t_id]
motion_pred = t_pcd_matched - s_pcd_matched
if len(s_pcd_matched) >= 3 :
# get the wrapped metric points
metric_index = metric_index_list[i]
sflow = sflow_list[i]
s_pcd_raw_i = s_pcd_raw[i]
metric_pcd = s_pcd_raw_i[metric_index]
metric_sflow = sflow[metric_index]
metric_pcd_deformed = metric_pcd + metric_sflow
metric_pcd_wrapped_gt = (torch.matmul(batched_rot[i], metric_pcd_deformed.T) + batched_trn[i]).T
# blend the motion for metric points
try:
metric_motion_pred, valid_mask = MatchMotionLoss.blend_anchor_motion(
metric_pcd.cpu().numpy(), s_pcd_matched.cpu().numpy(), motion_pred.cpu().numpy(), knn=3,
search_radius=0.1)
metric_pcd_wrapped_pred = metric_pcd + torch.from_numpy(metric_motion_pred).to(metric_pcd)
dist = torch.sqrt(torch.sum((metric_pcd_wrapped_pred - metric_pcd_wrapped_gt) ** 2, dim=1))
r = (dist < recall_thr).float().sum() / len(dist)
except :
r = 0
nrfmr = nrfmr + r
debug = False
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.013
metric_pcd_wrapped_gt = metric_pcd_wrapped_gt.cpu()
metric_pcd_wrapped_pred = metric_pcd_wrapped_pred.cpu()
err = metric_pcd_wrapped_pred - metric_pcd_wrapped_gt
mlab.points3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2],
scale_factor=scale_factor, color=c_pink)
mlab.points3d(metric_pcd_wrapped_pred[:, 0], metric_pcd_wrapped_pred[:, 1],
metric_pcd_wrapped_pred[:, 2], scale_factor=scale_factor, color=c_blue)
mlab.quiver3d(metric_pcd_wrapped_gt[:, 0], metric_pcd_wrapped_gt[:, 1], metric_pcd_wrapped_gt[:, 2],
err[:, 0], err[:, 1], err[:, 2],
scale_factor=1, mode='2ddash', line_width=1.)
mlab.show()
nrfmr = nrfmr / len(s_pcd_raw)
return nrfmr
@staticmethod
def blend_anchor_motion(query_loc, reference_loc, reference_flow, knn=3, search_radius=0.1):
'''approximate flow on query points
this function assume query points are sub- or un-sampled from reference locations
@param query_loc:[m,3]
@param reference_loc:[n,3]
@param reference_flow:[n,3]
@param knn:
@return:
blended_flow:[m,3]
'''
dists, idx = knn_point_np(knn, reference_loc, query_loc)
dists[dists < 1e-10] = 1e-10
mask = dists > search_radius
dists[mask] = 1e+10
weight = 1.0 / dists
weight = weight / np.sum(weight, -1, keepdims=True) # [B,N,3]
blended_flow = np.sum(reference_flow[idx] * weight.reshape([-1, knn, 1]), axis=1, keepdims=False)
mask = mask.sum(axis=1) < 3
return blended_flow, mask
def compute_correspondence_loss(self, conf, conf_gt, weight=None):
'''
@param conf: [B, L, S]
@param conf_gt: [B, L, S]
@param weight: [B, L, S]
@return:
'''
pos_mask = conf_gt == 1
neg_mask = conf_gt == 0
pos_w, neg_w = self.pos_w, self.neg_w
#corner case assign a wrong gt
if not pos_mask.any():
pos_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
pos_w = 0.
if not neg_mask.any():
neg_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
neg_w = 0.
# focal loss
conf = torch.clamp(conf, 1e-6, 1 - 1e-6)
alpha = self.focal_alpha
gamma = self.focal_gamma
if self.match_type == "dual_softmax":
pos_conf = conf[pos_mask]
loss_pos = - alpha * torch.pow(1 - pos_conf, gamma) * pos_conf.log()
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss = pos_w * loss_pos.mean()
return loss
elif self.match_type == "sinkhorn":
# no supervision on dustbin row & column.
loss_pos = - alpha * torch.pow(1 - conf[pos_mask], gamma) * (conf[pos_mask]).log()
loss_neg = - alpha * torch.pow(conf[neg_mask], gamma) * (1 - conf[neg_mask]).log()
loss = pos_w * loss_pos.mean() + neg_w * loss_neg.mean()
return loss
def match_2_conf_matrix(self, matches_gt, matrix_pred):
matrix_gt = torch.zeros_like(matrix_pred)
for b, match in enumerate (matches_gt) :
matrix_gt [ b][ match[0], match[1] ] = 1
return matrix_gt
@staticmethod
def compute_match_recall(conf_matrix_gt, match_pred) : #, s_pcd, t_pcd, search_radius=0.3):
'''
@param conf_matrix_gt:
@param match_pred:
@return:
'''
pred_matrix = torch.zeros_like(conf_matrix_gt)
b_ind, src_ind, tgt_ind = match_pred[:, 0], match_pred[:, 1], match_pred[:, 2]
pred_matrix[b_ind, src_ind, tgt_ind] = 1.
true_positive = (pred_matrix == conf_matrix_gt) * conf_matrix_gt
recall = true_positive.sum() / conf_matrix_gt.sum()
precision = true_positive.sum() / max(len(match_pred), 1)
return recall, precision
@staticmethod
def ransac_regist_coarse(batched_src_pcd, batched_tgt_pcd, src_mask, tgt_mask, match_pred ):
s_len = src_mask.sum(dim=1).int()
t_len = tgt_mask.sum(dim=1).int()
bsize = len(batched_src_pcd)
batched_src_pcd = MatchMotionLoss.tensor2numpy( batched_src_pcd)
batched_tgt_pcd = MatchMotionLoss.tensor2numpy( batched_tgt_pcd)
match_pred = MatchMotionLoss.tensor2numpy(match_pred)
rot = []
trn = []
for i in range(bsize):
s_pcd = batched_src_pcd[i][:s_len[i]]
t_pcd = batched_tgt_pcd[i][:t_len[i]]
pair_i = match_pred[:, 0] == i
n_pts = pair_i.sum()
if n_pts < 3 :
rot.append(torch.eye(3))
trn.append(torch.zeros((3,1)))
continue
ind = match_pred[pair_i]
s_ind, t_ind = ind[:, 1], ind[:, 2]
pose = ransac_pose_estimation(s_pcd, t_pcd, [s_ind, t_ind], distance_threshold=0.05)
pose = pose.copy()
rot.append(torch.from_numpy(pose[:3,:3]))
trn.append(torch.from_numpy(pose[:3,3:]))
return torch.stack(rot, dim=0 ), torch.stack(trn , dim=0)#ndarray
@staticmethod
def compute_inlier_ratio(match_pred, data, inlier_thr, s2t_flow=None):
s_pcd, t_pcd = data['s_pcd'], data['t_pcd'] #B,N,3
batched_rot = data['batched_rot'] #B,3,3
batched_trn = data['batched_trn']
if s2t_flow is not None: # 4dmatch
s_pcd_deformed = s_pcd + s2t_flow
s_pcd_wrapped = (torch.matmul(batched_rot, s_pcd_deformed.transpose(1, 2)) + batched_trn).transpose(1,2)
else: # 3dmatch
s_pcd_wrapped = (torch.matmul(batched_rot, s_pcd.transpose(1, 2)) + batched_trn).transpose(1,2)
s_pcd_matched = s_pcd_wrapped [match_pred[:,0], match_pred[:,1]]
t_pcd_matched = t_pcd [match_pred[:,0], match_pred[:,2]]
inlier = torch.sum( (s_pcd_matched - t_pcd_matched)**2 , dim= 1) < inlier_thr**2
bsize = len(s_pcd)
IR=[]
for i in range(bsize):
pair_i = match_pred[:, 0] == i
n_match = pair_i.sum()
inlier_i = inlier[pair_i]
n_inlier = inlier_i.sum().float()
if n_match <3:
IR.append( n_match.float()*0)
else :
IR.append(n_inlier/n_match)
return torch.stack(IR, dim=0)
@staticmethod
def compute_registration_recall(R_est, t_est, data, thr=0.2):
bs = len(R_est)
success = 0.
if data['gt_cov'] is not None:
err2 = thr ** 2
gt = np.zeros( (bs, 4, 4))
gt[:, -1,-1] = 1
gt[:, :3, :3] = data['batched_rot'].cpu().numpy()
gt[:, :3, 3:] = data['batched_trn'].cpu().numpy()
pred = np.zeros((bs, 4, 4))
pred[:, -1, -1] = 1
pred[:, :3, :3] = R_est.detach().cpu().numpy()
pred[:, :3, 3:] = t_est.detach().cpu().numpy()
for i in range(bs):
p = computeTransformationErr( np.linalg.inv(gt[i]) @ pred[i], data['gt_cov'][i])
if p <= err2:
success += 1
rr = success / bs
return rr
else :
return 0.
@staticmethod
def tensor2numpy(tensor):
if tensor.requires_grad:
tensor=tensor.detach()
return tensor.cpu().numpy() | 18,271 | 38.042735 | 137 | py |
lepard | lepard-main/models/position_encoding.py | import math
import torch
from torch import nn
class VolumetricPositionEncoding(nn.Module):
def __init__(self, config):
super().__init__()
self.feature_dim = config.feature_dim
self.vol_bnds = config.vol_bnds
self.voxel_size = config.voxel_size
self.vol_origin = self.vol_bnds[0]
self.pe_type = config.pe_type
def voxelize(self, xyz):
'''
@param xyz: B,N,3
@return: B,N,3
'''
if type ( self.vol_origin ) == list :
self.vol_origin = torch.FloatTensor(self.vol_origin ).view(1, 1, -1).to( xyz.device )
return (xyz - self.vol_origin) / self.voxel_size
@staticmethod
def embed_rotary(x, cos, sin):
'''
@param x: [B,N,d]
@param cos: [B,N,d] [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
@param sin: [B,N,d] [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
@return:
'''
x2 = torch.stack([-x[..., 1::2], x[..., ::2]], dim=-1).reshape_as(x).contiguous()
x = x * cos + x2 * sin
return x
@staticmethod
def embed_pos(pe_type, x, pe):
""" combine feature and position code
"""
if pe_type == 'rotary':
return VolumetricPositionEncoding.embed_rotary(x, pe[..., 0], pe[..., 1])
elif pe_type == 'sinusoidal':
return x + pe
else:
raise KeyError()
def forward(self, XYZ):
'''
@param XYZ: [B,N,3]
@return:
'''
bsize, npoint, _ = XYZ.shape
vox = self.voxelize( XYZ)
x_position, y_position, z_position = vox[..., 0:1], vox[...,1:2], vox[...,2:3]
div_term = torch.exp( torch.arange(0, self.feature_dim // 3, 2, dtype=torch.float, device=XYZ.device) * (-math.log(10000.0) / (self.feature_dim // 3)))
div_term = div_term.view( 1,1, -1) # [1, 1, d//6]
sinx = torch.sin(x_position * div_term) # [B, N, d//6]
cosx = torch.cos(x_position * div_term)
siny = torch.sin(y_position * div_term)
cosy = torch.cos(y_position * div_term)
sinz = torch.sin(z_position * div_term)
cosz = torch.cos(z_position * div_term)
if self.pe_type == 'sinusoidal' :
position_code = torch.cat( [ sinx, cosx, siny, cosy, sinz, cosz] , dim=-1 )
elif self.pe_type == "rotary" :
# sin/cos [θ0,θ1,θ2......θd/6-1] -> sin/cos [θ0,θ0,θ1,θ1,θ2,θ2......θd/6-1,θd/6-1]
sinx, cosx, siny, cosy, sinz, cosz = map( lambda feat:torch.stack([feat, feat], dim=-1).view(bsize, npoint, -1),
[ sinx, cosx, siny, cosy, sinz, cosz] )
sin_pos = torch.cat([sinx,siny,sinz], dim=-1)
cos_pos = torch.cat([cosx,cosy,cosz], dim=-1)
position_code = torch.stack( [cos_pos, sin_pos] , dim=-1)
else:
raise KeyError()
if position_code.requires_grad:
position_code = position_code.detach()
return position_code | 2,989 | 33.367816 | 160 | py |
lepard | lepard-main/models/backbone.py | from models.blocks import *
import torch.nn.functional as F
import numpy as np
class KPFCN(nn.Module):
def __init__(self, config):
super(KPFCN, self).__init__()
# Parameters
layer = 0
r = config.first_subsampling_dl * config.conv_radius
in_dim = config.in_feats_dim
out_dim = config.first_feats_dim
# List Encoder blocks
self.encoder_blocks = nn.ModuleList()
self.encoder_skip_dims = []
self.encoder_skips = []
# Loop over consecutive blocks
for block_i, block in enumerate(config.architecture):
# Check equivariance
if ('equivariant' in block) and (not out_dim % 3 == 0):
raise ValueError('Equivariant block but features dimension is not a factor of 3')
# Detect change to next layer for skip connection
if np.any([tmp in block for tmp in ['pool', 'strided', 'upsample', 'global']]):
self.encoder_skips.append(block_i)
self.encoder_skip_dims.append(in_dim)
# Detect upsampling block to stop
if 'upsample' in block:
break
# Apply the good block function defining tf ops
self.encoder_blocks.append(block_decider(block,
r,
in_dim,
out_dim,
layer,
config))
# Update dimension of input from output
if 'simple' in block:
in_dim = out_dim // 2
else:
in_dim = out_dim
# Detect change to a subsampled layer
if 'pool' in block or 'strided' in block:
# Update radius and feature dimension for next layer
layer += 1
r *= 2
out_dim *= 2
# bottleneck output & input layer
self.coarse_out = nn.Conv1d(in_dim//2, config.coarse_feature_dim, kernel_size=1, bias=True)
coarse_in_dim = config.coarse_feature_dim
self.coarse_in = nn.Conv1d(coarse_in_dim, in_dim//2, kernel_size=1, bias=True)
# List Decoder blocks
# Save all block operations in a list of modules
self.decoder_blocks = nn.ModuleList()
self.decoder_concats = []
# Find first upsampling block
start_i = 0
for block_i, block in enumerate(config.architecture):
if 'upsample' in block:
start_i = block_i
break
# Loop over consecutive blocks
for block_i, block in enumerate(config.architecture[start_i:]):
# Add dimension of skip connection concat
if block_i > 0 and 'upsample' in config.architecture[start_i + block_i - 1]:
in_dim += self.encoder_skip_dims[layer]
self.decoder_concats.append(block_i)
# Apply the good block function defining tf ops
self.decoder_blocks.append(block_decider(block,
r,
in_dim,
out_dim,
layer,
config))
# Update dimension of input from output
in_dim = out_dim
# Detect change to a subsampled layer
if 'upsample' in block:
# Update radius and feature dimension for next layer
layer -= 1
r *= 0.5
out_dim = out_dim // 2
# fine output layer
fine_feature_dim = config.fine_feature_dim
self.fine_out = nn.Conv1d(out_dim, fine_feature_dim, kernel_size=1, bias=True)
def forward(self, batch, phase = 'encode'):
# Get input features
if phase == 'coarse' :
x = batch['features'].clone().detach()
# 1. joint encoder part
self.skip_x = []
for block_i, block_op in enumerate(self.encoder_blocks):
if block_i in self.encoder_skips:
self.skip_x.append(x)
x = block_op(x, batch) # [N,C]
for block_i, block_op in enumerate(self.decoder_blocks):
if block_i in self.decoder_concats:
x = torch.cat([x, self.skip_x.pop()], dim=1)
x = block_op(x, batch)
if block_i == 1 :
coarse_feats = x.transpose(0,1).unsqueeze(0) #[B, C, N]
coarse_feats = self.coarse_out(coarse_feats) #[B, C, N]
coarse_feats = coarse_feats.transpose(1,2).squeeze(0)
return coarse_feats #[N,C2]
#
# elif phase == "fine":
#
# coarse_feats = batch['coarse_feats']
# coarse_feats = coarse_feats.transpose(0,1).unsqueeze(0)
# coarse_feats = self.coarse_in(coarse_feats)
# x = coarse_feats.transpose(1,2).squeeze(0)
#
#
# for block_i, block_op in enumerate(self.decoder_blocks):
# if block_i > 1 :
# if block_i in self.decoder_concats:
# x = torch.cat([x, self.skip_x.pop()], dim=1)
# x = block_op(x, batch)
#
# fine_feats = x.transpose(0, 1).unsqueeze(0) # [1, C, N]
# fine_feats = self.fine_out(fine_feats) # [1, C, N]
# fine_feats = fine_feats.transpose(1, 2).squeeze(0)
#
# return fine_feats | 6,033 | 36.018405 | 100 | py |
lepard | lepard-main/models/transformer.py | import copy
import math
import torch
from torch import nn
from torch.nn import Module, Dropout
from models.position_encoding import VolumetricPositionEncoding as VolPE
from models.matching import Matching
from models.procrustes import SoftProcrustesLayer
import numpy as np
import random
from scipy.spatial.transform import Rotation
class GeometryAttentionLayer(nn.Module):
def __init__(self, config):
super(GeometryAttentionLayer, self).__init__()
d_model = config['feature_dim']
nhead = config['n_head']
self.dim = d_model // nhead
self.nhead = nhead
self.pe_type = config['pe_type']
# multi-head attention
self.q_proj = nn.Linear(d_model, d_model, bias=False)
self.k_proj = nn.Linear(d_model, d_model, bias=False)
self.v_proj = nn.Linear(d_model, d_model, bias=False)
# self.attention = Attention() #LinearAttention() if attention == 'linear' else FullAttention()
self.merge = nn.Linear(d_model, d_model, bias=False)
# feed-forward network
self.mlp = nn.Sequential(
nn.Linear(d_model*2, d_model*2, bias=False),
nn.ReLU(True),
nn.Linear(d_model*2, d_model, bias=False),
)
# norm and dropout
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, x, source, x_pe, source_pe, x_mask=None, source_mask=None):
bs = x.size(0)
q, k, v = x, source, source
qp, kvp = x_pe, source_pe
q_mask, kv_mask = x_mask, source_mask
if self.pe_type == 'sinusoidal':
#w(x+p), attention is all you need : https://arxiv.org/abs/1706.03762
if qp is not None: # disentangeld
q = q + qp
k = k + kvp
qw = self.q_proj(q).view(bs, -1, self.nhead, self.dim) # [N, L, (H, D)]
kw = self.k_proj(k).view(bs, -1, self.nhead, self.dim) # [N, S, (H, D)]
vw = self.v_proj(v).view(bs, -1, self.nhead, self.dim)
elif self.pe_type == 'rotary':
#Rwx roformer : https://arxiv.org/abs/2104.09864
qw = self.q_proj(q)
kw = self.k_proj(k)
vw = self.v_proj(v)
if qp is not None: # disentangeld
q_cos, q_sin = qp[...,0] ,qp[...,1]
k_cos, k_sin = kvp[...,0],kvp[...,1]
qw = VolPE.embed_rotary(qw, q_cos, q_sin)
kw = VolPE.embed_rotary(kw, k_cos, k_sin)
qw = qw.view(bs, -1, self.nhead, self.dim)
kw = kw.view(bs, -1, self.nhead, self.dim)
vw = vw.view(bs, -1, self.nhead, self.dim)
else:
raise KeyError()
# attention
a = torch.einsum("nlhd,nshd->nlsh", qw, kw)
if kv_mask is not None:
a.masked_fill_( q_mask[:, :, None, None] * (~kv_mask[:, None, :, None]), float('-inf'))
a = a / qw.size(3) **0.5
a = torch.softmax(a, dim=2)
o = torch.einsum("nlsh,nshd->nlhd", a, vw).contiguous() # [N, L, (H, D)]
message = self.merge(o.view(bs, -1, self.nhead*self.dim)) # [N, L, C]
message = self.norm1(message)
# feed-forward network
message = self.mlp(torch.cat([x, message], dim=2))
message = self.norm2(message)
e = x + message
return e
class RepositioningTransformer(nn.Module):
def __init__(self, config):
super(RepositioningTransformer, self).__init__()
self.d_model = config['feature_dim']
self.nhead = config['n_head']
self.layer_types = config['layer_types']
self.positioning_type = config['positioning_type']
self.pe_type =config['pe_type']
self.entangled= config['entangled']
self.positional_encoding = VolPE(config)
encoder_layer = GeometryAttentionLayer (config)
self.layers = nn.ModuleList()
for l_type in self.layer_types:
if l_type in ['self','cross']:
self.layers.append( copy.deepcopy(encoder_layer))
elif l_type == "positioning":
if self.positioning_type == 'procrustes':
positioning_layer = nn.ModuleList()
positioning_layer.append( Matching(config['feature_matching']))
positioning_layer.append( SoftProcrustesLayer(config['procrustes']) )
self.layers.append(positioning_layer)
elif self.positioning_type in ['oracle', 'randSO3']:
self.layers.append( None)
else :
raise KeyError(self.positioning_type + " undefined positional encoding type")
else:
raise KeyError()
self._reset_parameters()
def forward(self, src_feat, tgt_feat, s_pcd, t_pcd, src_mask, tgt_mask, data, T = None, timers = None):
self.timers = timers
assert self.d_model == src_feat.size(2), "the feature number of src and transformer must be equal"
if T is not None:
R, t = T
src_pcd_wrapped = (torch.matmul(R, s_pcd.transpose(1, 2)) + t).transpose(1, 2)
tgt_pcd_wrapped = t_pcd
else:
src_pcd_wrapped = s_pcd
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding( src_pcd_wrapped)
tgt_pe = self.positional_encoding( tgt_pcd_wrapped)
if not self.entangled:
position_layer = 0
data.update({"position_layers":{}})
for layer, name in zip(self.layers, self.layer_types) :
if name == 'self':
if self.timers: self.timers.tic('self atten')
src_feat = layer(src_feat, src_feat, src_pe, src_pe, src_mask, src_mask,)
tgt_feat = layer(tgt_feat, tgt_feat, tgt_pe, tgt_pe, tgt_mask, tgt_mask)
if self.timers: self.timers.toc('self atten')
elif name == 'cross':
if self.timers: self.timers.tic('cross atten')
src_feat = layer(src_feat, tgt_feat, src_pe, tgt_pe, src_mask, tgt_mask)
tgt_feat = layer(tgt_feat, src_feat, tgt_pe, src_pe, tgt_mask, src_mask)
if self.timers: self.timers.toc('cross atten')
elif name =='positioning':
if self.positioning_type == 'procrustes':
conf_matrix, match_pred = layer[0](src_feat, tgt_feat, src_pe, tgt_pe, src_mask, tgt_mask, data, pe_type=self.pe_type)
position_layer += 1
data["position_layers"][position_layer] = {"conf_matrix": conf_matrix, "match_pred": match_pred}
if self.timers: self.timers.tic('procrustes_layer')
R, t, R_forwd, t_forwd, condition, solution_mask = layer[1] (conf_matrix, s_pcd, t_pcd, src_mask, tgt_mask)
if self.timers: self.timers.toc('procrustes_layer')
data["position_layers"][position_layer].update({
"R_s2t_pred": R,"t_s2t_pred": t, "solution_mask": solution_mask, "condition": condition})
src_pcd_wrapped = (torch.matmul(R_forwd, s_pcd.transpose(1, 2)) + t_forwd).transpose(1, 2)
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding(src_pcd_wrapped)
tgt_pe = self.positional_encoding(tgt_pcd_wrapped)
elif self.positioning_type == 'randSO3':
src_pcd_wrapped = self.rand_rot_pcd( s_pcd, src_mask)
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding(src_pcd_wrapped)
tgt_pe = self.positional_encoding(tgt_pcd_wrapped)
elif self.positioning_type == 'oracle':
#Note R,t ground truth is only available for computing oracle position encoding
rot_gt = data['batched_rot']
trn_gt = data['batched_trn']
src_pcd_wrapped = (torch.matmul(rot_gt, s_pcd.transpose(1, 2)) + trn_gt).transpose(1, 2)
tgt_pcd_wrapped = t_pcd
src_pe = self.positional_encoding(src_pcd_wrapped)
tgt_pe = self.positional_encoding(tgt_pcd_wrapped)
else:
raise KeyError(self.positioning_type + " undefined positional encoding type")
else :
raise KeyError
return src_feat, tgt_feat, src_pe, tgt_pe
else : # pos. fea. entangeled
position_layer = 0
data.update({"position_layers":{}})
src_feat = VolPE.embed_pos(self.pe_type, src_feat, src_pe)
tgt_feat = VolPE.embed_pos(self.pe_type, tgt_feat, tgt_pe)
for layer, name in zip(self.layers, self.layer_types):
if name == 'self':
if self.timers: self.timers.tic('self atten')
src_feat = layer(src_feat, src_feat, None, None, src_mask, src_mask, )
tgt_feat = layer(tgt_feat, tgt_feat, None, None, tgt_mask, tgt_mask)
if self.timers: self.timers.toc('self atten')
elif name == 'cross':
if self.timers: self.timers.tic('cross atten')
src_feat = layer(src_feat, tgt_feat, None, None, src_mask, tgt_mask)
tgt_feat = layer(tgt_feat, src_feat, None, None, tgt_mask, src_mask)
if self.timers: self.timers.toc('cross atten')
elif name == 'positioning':
pass
return src_feat, tgt_feat, src_pe, tgt_pe
def rand_rot_pcd (self, pcd, mask):
'''
@param pcd: B, N, 3
@param mask: B, N
@return:
'''
pcd[~mask]=0.
N = mask.shape[1]
n_points = mask.sum(dim=1, keepdim=True).view(-1,1,1)
bs = pcd.shape[0]
euler_ab = np.random.rand(bs, 3) * np.pi * 2 # anglez, angley, anglex
rand_rot = torch.from_numpy( Rotation.from_euler('zyx', euler_ab).as_matrix() ).to(pcd)
pcd_u = pcd.mean(dim=1, keepdim=True) * N / n_points
pcd_centered = pcd - pcd_u
pcd_rand_rot = torch.matmul( rand_rot, pcd_centered.transpose(1,2) ).transpose(1,2) + pcd_u
return pcd_rand_rot
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p) | 10,666 | 36.559859 | 142 | py |
lepard | lepard-main/models/procrustes.py | import torch
import torch.nn as nn
def topk(data, num_topk):
sort, idx = data.sort(descending=True)
return sort[:num_topk], idx[:num_topk]
class SoftProcrustesLayer(nn.Module):
def __init__(self, config):
super(SoftProcrustesLayer, self).__init__()
self.sample_rate = config.sample_rate
self.max_condition_num= config.max_condition_num
@staticmethod
def batch_weighted_procrustes( X, Y, w, eps=0.0001):
'''
@param X: source frame [B, N,3]
@param Y: target frame [B, N,3]
@param w: weights [B, N,1]
@param eps:
@return:
'''
# https://ieeexplore.ieee.org/document/88573
bsize = X.shape[0]
device = X.device
W1 = torch.abs(w).sum(dim=1, keepdim=True)
w_norm = w / (W1 + eps)
mean_X = (w_norm * X).sum(dim=1, keepdim=True)
mean_Y = (w_norm * Y).sum(dim=1, keepdim=True)
Sxy = torch.matmul( (Y - mean_Y).transpose(1,2), w_norm * (X - mean_X) )
Sxy = Sxy.cpu().double()
U, D, V = Sxy.svd() # small SVD runs faster on cpu
condition = D.max(dim=1)[0] / D.min(dim=1)[0]
S = torch.eye(3)[None].repeat(bsize,1,1).double()
UV_det = U.det() * V.det()
S[:, 2:3, 2:3] = UV_det.view(-1, 1,1)
svT = torch.matmul( S, V.transpose(1,2) )
R = torch.matmul( U, svT).float().to(device)
t = mean_Y.transpose(1,2) - torch.matmul( R, mean_X.transpose(1,2) )
return R, t, condition
def forward(self, conf_matrix, src_pcd, tgt_pcd, src_mask, tgt_mask):
'''
@param conf_matrix:
@param src_pcd:
@param tgt_pcd:
@param src_mask:
@param tgt_mask:
@return:
'''
bsize, N, M = conf_matrix.shape
# subsample correspondence
src_len = src_mask.sum(dim=1)
tgt_len = tgt_mask.sum(dim=1)
entry_max, _ = torch.stack([src_len,tgt_len], dim=0).max(dim=0)
entry_max = (entry_max * self.sample_rate).int()
sample_n_points = entry_max.float().mean().int() #entry_max.max()
conf, idx = conf_matrix.view(bsize, -1).sort(descending=True,dim=1)
w = conf [:, :sample_n_points]
idx= idx[:, :sample_n_points]
idx_src = idx//M #torch.div(idx, M, rounding_mode='trunc')
idx_tgt = idx%M
b_index = torch.arange(bsize).view(-1, 1).repeat((1, sample_n_points)).view(-1)
src_pcd_sampled = src_pcd[b_index, idx_src.view(-1)].view(bsize, sample_n_points, -1)
tgt_pcd_sampled = tgt_pcd[b_index, idx_tgt.view(-1)].view(bsize, sample_n_points, -1)
w_mask = torch.arange(sample_n_points).view(1,-1).repeat(bsize,1).to(w)
w_mask = w_mask < entry_max[:,None]
w[~w_mask] = 0.
# solve
try :
R, t, condition = self.batch_weighted_procrustes(src_pcd_sampled, tgt_pcd_sampled, w[...,None])
except: # fail to get valid solution, this usually happens at the early stage of training
R = torch.eye(3)[None].repeat(bsize,1,1).type_as(conf_matrix)
t = torch.zeros(3, 1)[None].repeat(bsize,1,1).type_as(conf_matrix)
condition = torch.zeros(bsize).type_as(conf_matrix)
#filter unreliable solution with condition nnumber
solution_mask = condition < self.max_condition_num
R_forwd = R.clone()
t_forwd = t.clone()
R_forwd[~solution_mask] = torch.eye(3).type_as(R)
t_forwd[~solution_mask] = torch.zeros(3, 1).type_as(R)
return R, t, R_forwd, t_forwd, condition, solution_mask | 3,591 | 37.623656 | 107 | py |
lepard | lepard-main/models/pipeline.py | from models.blocks import *
from models.backbone import KPFCN
from models.transformer import RepositioningTransformer
from models.matching import Matching
from models.procrustes import SoftProcrustesLayer
class Pipeline(nn.Module):
def __init__(self, config):
super(Pipeline, self).__init__()
self.config = config
self.backbone = KPFCN(config['kpfcn_config'])
self.pe_type = config['coarse_transformer']['pe_type']
self.positioning_type = config['coarse_transformer']['positioning_type']
self.coarse_transformer = RepositioningTransformer(config['coarse_transformer'])
self.coarse_matching = Matching(config['coarse_matching'])
self.soft_procrustes = SoftProcrustesLayer(config['coarse_transformer']['procrustes'])
def forward(self, data, timers=None):
self.timers = timers
if self.timers: self.timers.tic('kpfcn backbone encode')
coarse_feats = self.backbone(data, phase="coarse")
if self.timers: self.timers.toc('kpfcn backbone encode')
if self.timers: self.timers.tic('coarse_preprocess')
src_feats, tgt_feats, s_pcd, t_pcd, src_mask, tgt_mask = self.split_feats (coarse_feats, data)
data.update({ 's_pcd': s_pcd, 't_pcd': t_pcd })
if self.timers: self.timers.toc('coarse_preprocess')
if self.timers: self.timers.tic('coarse feature transformer')
src_feats, tgt_feats, src_pe, tgt_pe = self.coarse_transformer(src_feats, tgt_feats, s_pcd, t_pcd, src_mask, tgt_mask, data, timers=timers)
if self.timers: self.timers.toc('coarse feature transformer')
if self.timers: self.timers.tic('match feature coarse')
conf_matrix_pred, coarse_match_pred = self.coarse_matching(src_feats, tgt_feats, src_pe, tgt_pe, src_mask, tgt_mask, data, pe_type = self.pe_type)
data.update({'conf_matrix_pred': conf_matrix_pred, 'coarse_match_pred': coarse_match_pred })
if self.timers: self.timers.toc('match feature coarse')
if self.timers: self.timers.tic('procrustes_layer')
R, t, _, _, _, _ = self.soft_procrustes(conf_matrix_pred, s_pcd, t_pcd, src_mask, tgt_mask)
data.update({"R_s2t_pred": R, "t_s2t_pred": t})
if self.timers: self.timers.toc('procrustes_layer')
return data
def split_feats(self, geo_feats, data):
pcd = data['points'][self.config['kpfcn_config']['coarse_level']]
src_mask = data['src_mask']
tgt_mask = data['tgt_mask']
src_ind_coarse_split = data[ 'src_ind_coarse_split']
tgt_ind_coarse_split = data['tgt_ind_coarse_split']
src_ind_coarse = data['src_ind_coarse']
tgt_ind_coarse = data['tgt_ind_coarse']
b_size, src_pts_max = src_mask.shape
tgt_pts_max = tgt_mask.shape[1]
src_feats = torch.zeros([b_size * src_pts_max, geo_feats.shape[-1]]).type_as(geo_feats)
tgt_feats = torch.zeros([b_size * tgt_pts_max, geo_feats.shape[-1]]).type_as(geo_feats)
src_pcd = torch.zeros([b_size * src_pts_max, 3]).type_as(pcd)
tgt_pcd = torch.zeros([b_size * tgt_pts_max, 3]).type_as(pcd)
src_feats[src_ind_coarse_split] = geo_feats[src_ind_coarse]
tgt_feats[tgt_ind_coarse_split] = geo_feats[tgt_ind_coarse]
src_pcd[src_ind_coarse_split] = pcd[src_ind_coarse]
tgt_pcd[tgt_ind_coarse_split] = pcd[tgt_ind_coarse]
return src_feats.view( b_size , src_pts_max , -1), \
tgt_feats.view( b_size , tgt_pts_max , -1), \
src_pcd.view( b_size , src_pts_max , -1), \
tgt_pcd.view( b_size , tgt_pts_max , -1), \
src_mask, \
tgt_mask | 3,685 | 43.95122 | 154 | py |
lepard | lepard-main/models/blocks.py | import time
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.init import kaiming_uniform_
from kernels.kernel_points import load_kernels
# from lib.ply import write_ply
def gather(x, idx, method=2):
"""
implementation of a custom gather operation for faster backwards.
:param x: input with shape [N, D_1, ... D_d]
:param idx: indexing with shape [n_1, ..., n_m]
:param method: Choice of the method
:return: x[idx] with shape [n_1, ..., n_m, D_1, ... D_d]
"""
if method == 0:
return x[idx]
elif method == 1:
x = x.unsqueeze(1)
x = x.expand((-1, idx.shape[-1], -1))
idx = idx.unsqueeze(2)
idx = idx.expand((-1, -1, x.shape[-1]))
return x.gather(0, idx)
elif method == 2:
for i, ni in enumerate(idx.size()[1:]):
x = x.unsqueeze(i+1)
new_s = list(x.size())
new_s[i+1] = ni
x = x.expand(new_s)
n = len(idx.size())
for i, di in enumerate(x.size()[n:]):
idx = idx.unsqueeze(i+n)
new_s = list(idx.size())
new_s[i+n] = di
idx = idx.expand(new_s)
return x.gather(0, idx)
else:
raise ValueError('Unkown method')
def radius_gaussian(sq_r, sig, eps=1e-9):
"""
Compute a radius gaussian (gaussian of distance)
:param sq_r: input radiuses [dn, ..., d1, d0]
:param sig: extents of gaussians [d1, d0] or [d0] or float
:return: gaussian of sq_r [dn, ..., d1, d0]
"""
return torch.exp(-sq_r / (2 * sig**2 + eps))
def closest_pool(x, inds):
"""
Pools features from the closest neighbors. WARNING: this function assumes the neighbors are ordered.
:param x: [n1, d] features matrix
:param inds: [n2, max_num] Only the first column is used for pooling
:return: [n2, d] pooled features matrix
"""
# Add a last row with minimum features for shadow pools
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get features for each pooling location [n2, d]
return gather(x, inds[:, 0])
def max_pool(x, inds):
"""
Pools features with the maximum values.
:param x: [n1, d] features matrix
:param inds: [n2, max_num] pooling indices
:return: [n2, d] pooled features matrix
"""
# Add a last row with minimum features for shadow pools
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get all features for each pooling location [n2, max_num, d]
pool_features = gather(x, inds)
# Pool the maximum [n2, d]
max_features, _ = torch.max(pool_features, 1)
return max_features
def global_average(x, batch_lengths):
"""
Block performing a global average over batch pooling
:param x: [N, D] input features
:param batch_lengths: [B] list of batch lengths
:return: [B, D] averaged features
"""
# Loop over the clouds of the batch
averaged_features = []
i0 = 0
for b_i, length in enumerate(batch_lengths):
# Average features for each batch cloud
averaged_features.append(torch.mean(x[i0:i0 + length], dim=0))
# Increment for next cloud
i0 += length
# Average features in each batch
return torch.stack(averaged_features)
# KPConv class
# \******************/
class KPConv(nn.Module):
def __init__(self, kernel_size, p_dim, in_channels, out_channels, KP_extent, radius,
fixed_kernel_points='center', KP_influence='linear', aggregation_mode='sum',
deformable=False, modulated=False):
"""
Initialize parameters for KPConvDeformable.
:param kernel_size: Number of kernel points.
:param p_dim: dimension of the point space.
:param in_channels: dimension of input features.
:param out_channels: dimension of output features.
:param KP_extent: influence radius of each kernel point.
:param radius: radius used for kernel point init. Even for deformable, use the config.conv_radius
:param fixed_kernel_points: fix position of certain kernel points ('none', 'center' or 'verticals').
:param KP_influence: influence function of the kernel points ('constant', 'linear', 'gaussian').
:param aggregation_mode: choose to sum influences, or only keep the closest ('closest', 'sum').
:param deformable: choose deformable or not
:param modulated: choose if kernel weights are modulated in addition to deformed
"""
super(KPConv, self).__init__()
# Save parameters
self.K = kernel_size
self.p_dim = p_dim
self.in_channels = in_channels
self.out_channels = out_channels
self.radius = radius
self.KP_extent = KP_extent
self.fixed_kernel_points = fixed_kernel_points
self.KP_influence = KP_influence
self.aggregation_mode = aggregation_mode
self.deformable = deformable
self.modulated = modulated
# Running variable containing deformed KP distance to input points. (used in regularization loss)
self.min_d2 = None
self.deformed_KP = None
self.offset_features = None
# Initialize weights
self.weights = Parameter(torch.zeros((self.K, in_channels, out_channels), dtype=torch.float32),
requires_grad=True)
# Initiate weights for offsets
if deformable:
if modulated:
self.offset_dim = (self.p_dim + 1) * self.K
else:
self.offset_dim = self.p_dim * self.K
self.offset_conv = KPConv(self.K,
self.p_dim,
self.in_channels,
self.offset_dim,
KP_extent,
radius,
fixed_kernel_points=fixed_kernel_points,
KP_influence=KP_influence,
aggregation_mode=aggregation_mode)
self.offset_bias = Parameter(torch.zeros(self.offset_dim, dtype=torch.float32), requires_grad=True)
else:
self.offset_dim = None
self.offset_conv = None
self.offset_bias = None
# Reset parameters
self.reset_parameters()
# Initialize kernel points
self.kernel_points = self.init_KP()
return
def reset_parameters(self):
kaiming_uniform_(self.weights, a=math.sqrt(5))
if self.deformable:
nn.init.zeros_(self.offset_bias)
return
def init_KP(self):
"""
Initialize the kernel point positions in a sphere
:return: the tensor of kernel points
"""
# Create one kernel disposition (as numpy array). Choose the KP distance to center thanks to the KP extent
K_points_numpy = load_kernels(self.radius,
self.K,
dimension=self.p_dim,
fixed=self.fixed_kernel_points)
return Parameter(torch.tensor(K_points_numpy, dtype=torch.float32),
requires_grad=False)
def forward(self, q_pts, s_pts, neighb_inds, x):
# Offset generation
if self.deformable:
# Get offsets with a KPConv that only takes part of the features
self.offset_features = self.offset_conv(q_pts, s_pts, neighb_inds, x) + self.offset_bias
if self.modulated:
# Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features[:, :self.p_dim * self.K]
unscaled_offsets = unscaled_offsets.view(-1, self.K, self.p_dim)
# Get modulations
modulations = 2 * torch.sigmoid(self.offset_features[:, self.p_dim * self.K:])
else:
# Get offset (in normalized scale) from features
unscaled_offsets = self.offset_features.view(-1, self.K, self.p_dim)
# No modulations
modulations = None
# Rescale offset for this layer
offsets = unscaled_offsets * self.KP_extent
else:
offsets = None
modulations = None
# Deformed convolution
# Add a fake point in the last row for shadow neighbors
s_pts = torch.cat((s_pts, torch.zeros_like(s_pts[:1, :]) + 1e6), 0)
# Get neighbor points [n_points, n_neighbors, dim]
neighbors = s_pts[neighb_inds, :]
# Center every neighborhood
neighbors = neighbors - q_pts.unsqueeze(1)
# Apply offsets to kernel points [n_points, n_kpoints, dim]
if self.deformable:
self.deformed_KP = offsets + self.kernel_points
deformed_K_points = self.deformed_KP.unsqueeze(1)
else:
deformed_K_points = self.kernel_points
# Get all difference matrices [n_points, n_neighbors, n_kpoints, dim]
neighbors.unsqueeze_(2)
differences = neighbors - deformed_K_points
# Get the square distances [n_points, n_neighbors, n_kpoints]
sq_distances = torch.sum(differences ** 2, dim=3)
# Optimization by ignoring points outside a deformed KP range
if self.deformable:
# Save distances for loss
self.min_d2, _ = torch.min(sq_distances, dim=1)
# Boolean of the neighbors in range of a kernel point [n_points, n_neighbors]
in_range = torch.any(sq_distances < self.KP_extent ** 2, dim=2).type(torch.int32)
# New value of max neighbors
new_max_neighb = torch.max(torch.sum(in_range, dim=1))
# For each row of neighbors, indices of the ones that are in range [n_points, new_max_neighb]
neighb_row_bool, neighb_row_inds = torch.topk(in_range, new_max_neighb.item(), dim=1)
# Gather new neighbor indices [n_points, new_max_neighb]
new_neighb_inds = neighb_inds.gather(1, neighb_row_inds, sparse_grad=False)
# Gather new distances to KP [n_points, new_max_neighb, n_kpoints]
neighb_row_inds.unsqueeze_(2)
neighb_row_inds = neighb_row_inds.expand(-1, -1, self.K)
sq_distances = sq_distances.gather(1, neighb_row_inds, sparse_grad=False)
# New shadow neighbors have to point to the last shadow point
new_neighb_inds *= neighb_row_bool
new_neighb_inds -= (neighb_row_bool.type(torch.int64) - 1) * int(s_pts.shape[0] - 1)
else:
new_neighb_inds = neighb_inds
# Get Kernel point influences [n_points, n_kpoints, n_neighbors]
if self.KP_influence == 'constant':
# Every point get an influence of 1.
all_weights = torch.ones_like(sq_distances)
all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'linear':
# Influence decrease linearly with the distance, and get to zero when d = KP_extent.
all_weights = torch.clamp(1 - torch.sqrt(sq_distances) / self.KP_extent, min=0.0)
all_weights = torch.transpose(all_weights, 1, 2)
elif self.KP_influence == 'gaussian':
# Influence in gaussian of the distance.
sigma = self.KP_extent * 0.3
all_weights = radius_gaussian(sq_distances, sigma)
all_weights = torch.transpose(all_weights, 1, 2)
else:
raise ValueError('Unknown influence function type (config.KP_influence)')
# In case of closest mode, only the closest KP can influence each point
if self.aggregation_mode == 'closest':
neighbors_1nn = torch.argmin(sq_distances, dim=2)
all_weights *= torch.transpose(nn.functional.one_hot(neighbors_1nn, self.K), 1, 2)
elif self.aggregation_mode != 'sum':
raise ValueError("Unknown convolution mode. Should be 'closest' or 'sum'")
# Add a zero feature for shadow neighbors
x = torch.cat((x, torch.zeros_like(x[:1, :])), 0)
# Get the features of each neighborhood [n_points, n_neighbors, in_fdim]
neighb_x = gather(x, new_neighb_inds)
# Apply distance weights [n_points, n_kpoints, in_fdim]
weighted_features = torch.matmul(all_weights, neighb_x)
# Apply modulations
if self.deformable and self.modulated:
weighted_features *= modulations.unsqueeze(2)
# Apply network weights [n_kpoints, n_points, out_fdim]
weighted_features = weighted_features.permute((1, 0, 2))
kernel_outputs = torch.matmul(weighted_features, self.weights)
# Convolution sum [n_points, out_fdim]
# return torch.sum(kernel_outputs, dim=0)
output_features = torch.sum(kernel_outputs, dim=0, keepdim=False)
# normalization term.
neighbor_features_sum = torch.sum(neighb_x, dim=-1)
neighbor_num = torch.sum(torch.gt(neighbor_features_sum, 0.0), dim=-1)
neighbor_num = torch.max(neighbor_num, torch.ones_like(neighbor_num))
output_features = output_features / neighbor_num.unsqueeze(1)
return output_features
def __repr__(self):
return 'KPConv(radius: {:.2f}, extent: {:.2f}, in_feat: {:d}, out_feat: {:d})'.format(self.radius, self.KP_extent,
self.in_channels,
self.out_channels)
# Complex blocks
# \********************/
def block_decider(block_name,
radius,
in_dim,
out_dim,
layer_ind,
config):
if block_name == 'unary':
return UnaryBlock(in_dim, out_dim, config.use_batch_norm, config.batch_norm_momentum)
elif block_name in ['simple',
'simple_deformable',
'simple_invariant',
'simple_equivariant',
'simple_strided',
'simple_deformable_strided',
'simple_invariant_strided',
'simple_equivariant_strided']:
return SimpleBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
elif block_name in ['resnetb',
'resnetb_invariant',
'resnetb_equivariant',
'resnetb_deformable',
'resnetb_strided',
'resnetb_deformable_strided',
'resnetb_equivariant_strided',
'resnetb_invariant_strided']:
return ResnetBottleneckBlock(block_name, in_dim, out_dim, radius, layer_ind, config)
elif block_name == 'max_pool' or block_name == 'max_pool_wide':
return MaxPoolBlock(layer_ind)
elif block_name == 'global_average':
return GlobalAverageBlock()
elif block_name == 'nearest_upsample':
return NearestUpsampleBlock(layer_ind)
else:
raise ValueError('Unknown block name in the architecture definition : ' + block_name)
class BatchNormBlock(nn.Module):
def __init__(self, in_dim, use_bn, bn_momentum):
"""
Initialize a batch normalization block. If network does not use batch normalization, replace with biases.
:param in_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(BatchNormBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.in_dim = in_dim
if self.use_bn:
#self.batch_norm = nn.BatchNorm1d(in_dim, momentum=bn_momentum)
self.batch_norm = nn.InstanceNorm1d(in_dim, momentum=bn_momentum)
else:
self.bias = Parameter(torch.zeros(in_dim, dtype=torch.float32), requires_grad=True)
return
def reset_parameters(self):
nn.init.zeros_(self.bias)
def forward(self, x):
if self.use_bn:
x = x.unsqueeze(2)
x = x.transpose(0, 2)
x = self.batch_norm(x)
x = x.transpose(0, 2)
return x.squeeze()
else:
return x + self.bias
def __repr__(self):
return 'BatchNormBlock(in_feat: {:d}, momentum: {:.3f}, only_bias: {:s})'.format(self.in_dim,
self.bn_momentum,
str(not self.use_bn))
class UnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard unary block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(UnaryBlock, self).__init__()
self.bn_momentum = bn_momentum
self.use_bn = use_bn
self.no_relu = no_relu
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
self.batch_norm = BatchNormBlock(out_dim, self.use_bn, self.bn_momentum)
if not no_relu:
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, batch=None):
x = self.mlp(x)
x = self.batch_norm(x)
if not self.no_relu:
x = self.leaky_relu(x)
return x
def __repr__(self):
return 'UnaryBlock(in_feat: {:d}, out_feat: {:d}, BN: {:s}, ReLU: {:s})'.format(self.in_dim,
self.out_dim,
str(self.use_bn),
str(not self.no_relu))
class LastUnaryBlock(nn.Module):
def __init__(self, in_dim, out_dim, use_bn, bn_momentum, no_relu=False):
"""
Initialize a standard last_unary block without BN, ReLU.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param use_bn: boolean indicating if we use Batch Norm
:param bn_momentum: Batch norm momentum
"""
super(LastUnaryBlock, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.mlp = nn.Linear(in_dim, out_dim, bias=False)
return
def forward(self, x, batch=None):
x = self.mlp(x)
return x
def __repr__(self):
return 'LastUnaryBlock(in_feat: {:d}, out_feat: {:d})'.format(self.in_dim,
self.out_dim)
class SimpleBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
"""
Initialize a simple convolution block with its ReLU and BatchNorm.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param radius: current radius of convolution
:param config: parameters
"""
super(SimpleBlock, self).__init__()
# get KP_extent from current radius
current_extent = radius * config.KP_extent / config.conv_radius
# Get other parameters
self.bn_momentum = config.batch_norm_momentum
self.use_bn = config.use_batch_norm
self.layer_ind = layer_ind
self.block_name = block_name
self.in_dim = in_dim
self.out_dim = out_dim
# Define the KPConv class
self.KPConv = KPConv(config.num_kernel_points,
config.in_points_dim,
in_dim,
out_dim // 2,
current_extent,
radius,
fixed_kernel_points=config.fixed_kernel_points,
KP_influence=config.KP_influence,
aggregation_mode=config.aggregation_mode,
deformable='deform' in block_name,
modulated=config.modulated)
# Other opperations
self.batch_norm = BatchNormBlock(out_dim // 2, self.use_bn, self.bn_momentum)
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, x, batch):
if 'strided' in self.block_name:
q_pts = batch['points'][self.layer_ind + 1]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['pools'][self.layer_ind]
else:
q_pts = batch['points'][self.layer_ind]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['neighbors'][self.layer_ind]
x = self.KPConv(q_pts, s_pts, neighb_inds, x)
return self.leaky_relu(self.batch_norm(x))
class ResnetBottleneckBlock(nn.Module):
def __init__(self, block_name, in_dim, out_dim, radius, layer_ind, config):
"""
Initialize a resnet bottleneck block.
:param in_dim: dimension input features
:param out_dim: dimension input features
:param radius: current radius of convolution
:param config: parameters
"""
super(ResnetBottleneckBlock, self).__init__()
# get KP_extent from current radius
current_extent = radius * config.KP_extent / config.conv_radius
# Get other parameters
self.bn_momentum = config.batch_norm_momentum
self.use_bn = config.use_batch_norm
self.block_name = block_name
self.layer_ind = layer_ind
self.in_dim = in_dim
self.out_dim = out_dim
# First downscaling mlp
if in_dim != out_dim // 4:
self.unary1 = UnaryBlock(in_dim, out_dim // 4, self.use_bn, self.bn_momentum)
else:
self.unary1 = nn.Identity()
# KPConv block
self.KPConv = KPConv(config.num_kernel_points,
config.in_points_dim,
out_dim // 4,
out_dim // 4,
current_extent,
radius,
fixed_kernel_points=config.fixed_kernel_points,
KP_influence=config.KP_influence,
aggregation_mode=config.aggregation_mode,
deformable='deform' in block_name,
modulated=config.modulated)
self.batch_norm_conv = BatchNormBlock(out_dim // 4, self.use_bn, self.bn_momentum)
# Second upscaling mlp
self.unary2 = UnaryBlock(out_dim // 4, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
# Shortcut optional mpl
if in_dim != out_dim:
self.unary_shortcut = UnaryBlock(in_dim, out_dim, self.use_bn, self.bn_momentum, no_relu=True)
else:
self.unary_shortcut = nn.Identity()
# Other operations
self.leaky_relu = nn.LeakyReLU(0.1)
return
def forward(self, features, batch):
if 'strided' in self.block_name:
q_pts = batch['points'][self.layer_ind + 1]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['pools'][self.layer_ind]
else:
q_pts = batch['points'][self.layer_ind]
s_pts = batch['points'][self.layer_ind]
neighb_inds = batch['neighbors'][self.layer_ind]
# First downscaling mlp
x = self.unary1(features)
# Convolution
x = self.KPConv(q_pts, s_pts, neighb_inds, x)
x = self.leaky_relu(self.batch_norm_conv(x))
# Second upscaling mlp
x = self.unary2(x)
# Shortcut
if 'strided' in self.block_name:
shortcut = max_pool(features, neighb_inds)
else:
shortcut = features
shortcut = self.unary_shortcut(shortcut)
return self.leaky_relu(x + shortcut)
class GlobalAverageBlock(nn.Module):
def __init__(self):
"""
Initialize a global average block with its ReLU and BatchNorm.
"""
super(GlobalAverageBlock, self).__init__()
return
def forward(self, x, batch):
return global_average(x, batch['stack_lengths'][-1])
class NearestUpsampleBlock(nn.Module):
def __init__(self, layer_ind):
"""
Initialize a nearest upsampling block with its ReLU and BatchNorm.
"""
super(NearestUpsampleBlock, self).__init__()
self.layer_ind = layer_ind
return
def forward(self, x, batch):
return closest_pool(x, batch['upsamples'][self.layer_ind - 1])
def __repr__(self):
return 'NearestUpsampleBlock(layer: {:d} -> {:d})'.format(self.layer_ind,
self.layer_ind - 1)
class MaxPoolBlock(nn.Module):
def __init__(self, layer_ind):
"""
Initialize a max pooling block with its ReLU and BatchNorm.
"""
super(MaxPoolBlock, self).__init__()
self.layer_ind = layer_ind
return
def forward(self, x, batch):
return max_pool(x, batch['pools'][self.layer_ind + 1])
| 26,090 | 35.956091 | 122 | py |
lepard | lepard-main/cpp_wrappers/cpp_neighbors/setup.py | from distutils.core import setup, Extension
import numpy.distutils.misc_util
# Adding OpenCV to project
# ************************
# Adding sources of the project
# *****************************
SOURCES = ["../cpp_utils/cloud/cloud.cpp",
"neighbors/neighbors.cpp",
"wrapper.cpp"]
module = Extension(name="radius_neighbors",
sources=SOURCES,
extra_compile_args=['-std=c++11',
'-D_GLIBCXX_USE_CXX11_ABI=0'])
setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
| 619 | 20.37931 | 92 | py |
lepard | lepard-main/cpp_wrappers/cpp_subsampling/setup.py | from distutils.core import setup, Extension
import numpy.distutils.misc_util
# Adding OpenCV to project
# ************************
# Adding sources of the project
# *****************************
SOURCES = ["../cpp_utils/cloud/cloud.cpp",
"grid_subsampling/grid_subsampling.cpp",
"wrapper.cpp"]
module = Extension(name="grid_subsampling",
sources=SOURCES,
extra_compile_args=['-std=c++11',
'-D_GLIBCXX_USE_CXX11_ABI=0'])
setup(ext_modules=[module], include_dirs=numpy.distutils.misc_util.get_numpy_include_dirs())
| 633 | 20.862069 | 92 | py |
lepard | lepard-main/datasets/_4dmatch.py | import os, sys, glob, torch
# sys.path.append("../")
[sys.path.append(i) for i in ['.', '..']]
import numpy as np
import torch
import random
from scipy.spatial.transform import Rotation
from torch.utils.data import Dataset
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, KDTree_corr
from lib.utils import load_obj
HMN_intrin = np.array( [443, 256, 443, 250 ])
cam_intrin = np.array( [443, 256, 443, 250 ])
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, get_correspondences
class _4DMatch(Dataset):
def __init__(self, config, split, data_augmentation=True):
super(_4DMatch, self).__init__()
assert split in ['train','val','test']
if 'overfit' in config.exp_dir:
d_slice = config.batch_size
else :
d_slice = None
self.entries = self.read_entries( config.split[split] , config.data_root, d_slice=d_slice )
self.base_dir = config.data_root
self.data_augmentation = data_augmentation
self.config = config
self.rot_factor = 1.
self.augment_noise = config.augment_noise
self.max_points = 30000
self.overlap_radius = 0.0375
self.cache = {}
self.cache_size = 30000
def read_entries (self, split, data_root, d_slice=None, shuffle= False):
entries = glob.glob(os.path.join(data_root, split, "*/*.npz"))
if shuffle:
random.shuffle(entries)
if d_slice:
return entries[:d_slice]
return entries
def __len__(self):
return len(self.entries )
def __getitem__(self, index, debug=False):
if index in self.cache:
entry = self.cache[index]
else :
entry = np.load(self.entries[index])
if len(self.cache) < self.cache_size:
self.cache[index] = entry
# get transformation
rot = entry['rot']
trans = entry['trans']
s2t_flow = entry['s2t_flow']
src_pcd = entry['s_pc']
tgt_pcd = entry['t_pc']
correspondences = entry['correspondences'] # obtained with search radius 0.015 m
src_pcd_deformed = src_pcd + s2t_flow
if "metric_index" in entry:
metric_index = entry['metric_index'].squeeze()
else:
metric_index = None
# if we get too many points, we do some downsampling
if (src_pcd.shape[0] > self.max_points):
idx = np.random.permutation(src_pcd.shape[0])[:self.max_points]
src_pcd = src_pcd[idx]
if (tgt_pcd.shape[0] > self.max_points):
idx = np.random.permutation(tgt_pcd.shape[0])[:self.max_points]
tgt_pcd = tgt_pcd[idx]
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.013
src_wrapped = (np.matmul( rot, src_pcd_deformed.T ) + trans ).T
mlab.points3d(src_wrapped[:, 0], src_wrapped[:, 1], src_wrapped[:, 2], scale_factor=scale_factor, color=c_pink)
mlab.points3d(src_pcd[ :, 0] , src_pcd[ :, 1], src_pcd[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(tgt_pcd[ :, 0] , tgt_pcd[ :, 1], tgt_pcd[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.show()
# add gaussian noise
if self.data_augmentation:
# rotate the point cloud
euler_ab = np.random.rand(3) * np.pi * 2 / self.rot_factor # anglez, angley, anglex
rot_ab = Rotation.from_euler('zyx', euler_ab).as_matrix()
if (np.random.rand(1)[0] > 0.5):
src_pcd = np.matmul(rot_ab, src_pcd.T).T
src_pcd_deformed = np.matmul(rot_ab, src_pcd_deformed.T).T
rot = np.matmul(rot, rot_ab.T)
else:
tgt_pcd = np.matmul(rot_ab, tgt_pcd.T).T
rot = np.matmul(rot_ab, rot)
trans = np.matmul(rot_ab, trans)
src_pcd += (np.random.rand(src_pcd.shape[0], 3) - 0.5) * self.augment_noise
tgt_pcd += (np.random.rand(tgt_pcd.shape[0], 3) - 0.5) * self.augment_noise
s2t_flow = src_pcd_deformed - src_pcd
if debug:
# wrapp_src = (np.matmul(rot, src_pcd.T)+ trans).T
src_wrapped = (np.matmul( rot, src_pcd_deformed.T ) + trans ).T
mlab.points3d(src_wrapped[:, 0], src_wrapped[:, 1], src_wrapped[:, 2], scale_factor=scale_factor, color=c_red)
mlab.points3d(tgt_pcd[:, 0], tgt_pcd[:, 1], tgt_pcd[:, 2], scale_factor=scale_factor, color=c_blue)
mlab.show()
if (trans.ndim == 1):
trans = trans[:, None]
src_feats = np.ones_like(src_pcd[:, :1]).astype(np.float32)
tgt_feats = np.ones_like(tgt_pcd[:, :1]).astype(np.float32)
rot = rot.astype(np.float32)
trans = trans.astype(np.float32)
#R * ( Ps + flow ) + t = Pt
return src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trans, s2t_flow, metric_index
if __name__ == '__main__':
from lib.utils import load_config
from easydict import EasyDict as edict
from lib.tictok import Timers
import yaml
def join(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
yaml.add_constructor('!join', join)
config = "/home/liyang/workspace/Regformer/configs/train/4dmatch.yaml"
with open(config,'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
config = edict(config)
config.timers=Timers()
D = _4DMatch(config, "test")
for i in range (len(D)):
try:
if i%1000 == 0 :
print (i,"/",len(D))
D.__getitem__(i, debug=True)
except:
# print(i, "/", len(D))
pass | 5,939 | 32.75 | 123 | py |
lepard | lepard-main/datasets/dataloader.py | import numpy as np
from functools import partial
import torch
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
import cpp_wrappers.cpp_neighbors.radius_neighbors as cpp_neighbors
from datasets._3dmatch import _3DMatch
from datasets._4dmatch import _4DMatch
from datasets.utils import blend_scene_flow, multual_nn_correspondence
from lib.visualization import *
from torch.utils.data import DataLoader
def batch_grid_subsampling_kpconv(points, batches_len, features=None, labels=None, sampleDl=0.1, max_p=0, verbose=0, random_grid_orient=True):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features)
"""
if (features is None) and (labels is None):
s_points, s_len = cpp_subsampling.subsample_batch(points,
batches_len,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len)
elif (labels is None):
s_points, s_len, s_features = cpp_subsampling.subsample_batch(points,
batches_len,
features=features,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len), torch.from_numpy(s_features)
elif (features is None):
s_points, s_len, s_labels = cpp_subsampling.subsample_batch(points,
batches_len,
classes=labels,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len), torch.from_numpy(s_labels)
else:
s_points, s_len, s_features, s_labels = cpp_subsampling.subsample_batch(points,
batches_len,
features=features,
classes=labels,
sampleDl=sampleDl,
max_p=max_p,
verbose=verbose)
return torch.from_numpy(s_points), torch.from_numpy(s_len), torch.from_numpy(s_features), torch.from_numpy(s_labels)
def batch_neighbors_kpconv(queries, supports, q_batches, s_batches, radius, max_neighbors):
"""
Computes neighbors for a batch of queries and supports, apply radius search
:param queries: (N1, 3) the query points
:param supports: (N2, 3) the support points
:param q_batches: (B) the list of lengths of batch elements in queries
:param s_batches: (B)the list of lengths of batch elements in supports
:param radius: float32
:return: neighbors indices
"""
neighbors = cpp_neighbors.batch_query(queries, supports, q_batches, s_batches, radius=radius)
if max_neighbors > 0:
return torch.from_numpy(neighbors[:, :max_neighbors])
else:
return torch.from_numpy(neighbors)
def collate_fn_3dmatch(list_data, config, neighborhood_limits ):
batched_points_list = []
batched_features_list = []
batched_lengths_list = []
correspondences_list = []
src_pcd_list = []
tgt_pcd_list = []
batched_rot = []
batched_trn = []
gt_cov_list = []
for ind, ( src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trn, gt_cov) in enumerate(list_data):
correspondences_list.append(correspondences )
src_pcd_list.append(torch.from_numpy(src_pcd) )
tgt_pcd_list.append(torch.from_numpy(tgt_pcd) )
batched_points_list.append(src_pcd)
batched_points_list.append(tgt_pcd)
batched_features_list.append(src_feats)
batched_features_list.append(tgt_feats)
batched_lengths_list.append(len(src_pcd))
batched_lengths_list.append(len(tgt_pcd))
batched_rot.append( torch.from_numpy(rot).float())
batched_trn.append( torch.from_numpy(trn).float())
gt_cov_list.append(gt_cov)
gt_cov_list = None if gt_cov_list[0] is None \
else np.stack(gt_cov_list, axis=0)
# if timers: cnter['collate_load_batch'] = time.time() - st
batched_features = torch.from_numpy(np.concatenate(batched_features_list, axis=0))
batched_points = torch.from_numpy(np.concatenate(batched_points_list, axis=0))
batched_lengths = torch.from_numpy(np.array(batched_lengths_list)).int()
batched_rot = torch.stack(batched_rot,dim=0)
batched_trn = torch.stack(batched_trn,dim=0)
# Starting radius of convolutions
r_normal = config.first_subsampling_dl * config.conv_radius
# Starting layer
layer_blocks = []
layer = 0
# Lists of inputs
input_points = []
input_neighbors = []
input_pools = []
input_upsamples = []
input_batches_len = []
# construt kpfcn inds
for block_i, block in enumerate(config.architecture):
# Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block:
break
# Get all blocks of the layer
if not ('pool' in block or 'strided' in block):
layer_blocks += [block]
if block_i < len(config.architecture) - 1 and not ('upsample' in config.architecture[block_i + 1]):
continue
# Convolution neighbors indices
# *****************************
if layer_blocks:
# Convolutions are done in this layer, compute the neighbors with the good radius
if np.any(['deformable' in blck for blck in layer_blocks[:-1]]):
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
conv_i = batch_neighbors_kpconv(batched_points, batched_points, batched_lengths, batched_lengths, r,
neighborhood_limits[layer])
else:
# This layer only perform pooling, no neighbors required
conv_i = torch.zeros((0, 1), dtype=torch.int64)
# Pooling neighbors indices
# *************************
# If end of layer is a pooling operation
if 'pool' in block or 'strided' in block:
# New subsampling length
dl = 2 * r_normal / config.conv_radius
# Subsampled points
pool_p, pool_b = batch_grid_subsampling_kpconv(batched_points, batched_lengths, sampleDl=dl)
# Radius of pooled neighbors
if 'deformable' in block:
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
# Subsample indices
pool_i = batch_neighbors_kpconv(pool_p, batched_points, pool_b, batched_lengths, r,
neighborhood_limits[layer])
# Upsample indices (with the radius of the next layer to keep wanted density)
up_i = batch_neighbors_kpconv(batched_points, pool_p, batched_lengths, pool_b, 2 * r,
neighborhood_limits[layer])
else:
# No pooling in the end of this layer, no pooling indices required
pool_i = torch.zeros((0, 1), dtype=torch.int64)
pool_p = torch.zeros((0, 3), dtype=torch.float32)
pool_b = torch.zeros((0,), dtype=torch.int64)
up_i = torch.zeros((0, 1), dtype=torch.int64)
# Updating input lists
input_points += [batched_points.float()]
input_neighbors += [conv_i.long()]
input_pools += [pool_i.long()]
input_upsamples += [up_i.long()]
input_batches_len += [batched_lengths]
# New points for next layer
batched_points = pool_p
batched_lengths = pool_b
# Update radius and reset blocks
r_normal *= 2
layer += 1
layer_blocks = []
# coarse infomation
coarse_level = config.coarse_level
pts_num_coarse = input_batches_len[coarse_level].view(-1, 2)
b_size = pts_num_coarse.shape[0]
src_pts_max, tgt_pts_max = pts_num_coarse.amax(dim=0)
coarse_pcd = input_points[coarse_level] # .numpy()
coarse_matches= []
src_ind_coarse_split= [] # src_feats shape :[b_size * src_pts_max]
src_ind_coarse = []
tgt_ind_coarse_split= []
tgt_ind_coarse = []
accumu = 0
src_mask = torch.zeros([b_size, src_pts_max], dtype=torch.bool)
tgt_mask = torch.zeros([b_size, tgt_pts_max], dtype=torch.bool)
#grid subsample fine level points for differentiable matching
fine_pts, fine_length = batch_grid_subsampling_kpconv(input_points[0], input_batches_len[0], sampleDl=dl*0.5*0.85)
fine_ind = batch_neighbors_kpconv(fine_pts, input_points[0], fine_length, input_batches_len[0], dl*0.5*0.85, 1).squeeze().long()
for entry_id, cnt in enumerate( pts_num_coarse ): #input_batches_len[-1].numpy().reshape(-1,2)) :
n_s_pts, n_t_pts = cnt
'''split mask for bottlenect feats'''
src_mask[entry_id][:n_s_pts] = 1
tgt_mask[entry_id][:n_t_pts] = 1
'''split indices of bottleneck feats'''
src_ind_coarse_split.append( torch.arange( n_s_pts ) + entry_id * src_pts_max )
tgt_ind_coarse_split.append( torch.arange( n_t_pts ) + entry_id * tgt_pts_max )
src_ind_coarse.append( torch.arange( n_s_pts ) + accumu )
tgt_ind_coarse.append( torch.arange( n_t_pts ) + accumu + n_s_pts )
'''get match at coarse level'''
c_src_pcd = coarse_pcd[accumu : accumu + n_s_pts]
c_tgt_pcd = coarse_pcd[accumu + n_s_pts: accumu + n_s_pts + n_t_pts]
s_pc_wrapped = (torch.matmul( batched_rot[entry_id], c_src_pcd.T ) + batched_trn [entry_id]).T
coarse_match_gt = torch.from_numpy( multual_nn_correspondence(s_pc_wrapped.numpy(), c_tgt_pcd.numpy(), search_radius=config['coarse_match_radius']) )# 0.1m scaled
coarse_matches.append(coarse_match_gt)
accumu = accumu + n_s_pts + n_t_pts
vis=False # for debug
if vis :
viz_coarse_nn_correspondence_mayavi(c_src_pcd, c_tgt_pcd, coarse_match_gt, scale_factor=0.04)
vis=False # for debug
if vis :
pass
import mayavi.mlab as mlab
# src_nei_valid = src_nei_mask[coarse_match_gt[0]].view(-1)
# tgt_nei_valid = tgt_nei_mask[coarse_match_gt[1]].view(-1)
#
# f_src_pcd = src_m_nei_pts.view(-1, 3)[src_nei_valid]
# f_tgt_pcd = tgt_m_nei_pts.view(-1,3)[tgt_nei_valid]
#
# mlab.points3d(f_src_pcd[:, 0], f_src_pcd[:, 1], f_src_pcd[:, 2], scale_factor=0.02,color=c_gray1)
# mlab.points3d(f_tgt_pcd[:, 0], f_tgt_pcd[:, 1], f_tgt_pcd[:, 2], scale_factor=0.02,color=c_gray2)
#
# src_m_nn_pts =src_m_nn_pts.view(-1, 3)
# src_m_nn_pts_wrapped = src_m_nn_pts_wrapped.view(-1,3)
# tgt_m_nn_pts = tgt_m_nei_pts [ torch.arange(tgt_m_nei_pts.shape[0]), nni.view(-1), ... ]
# mlab.points3d(src_m_nn_pts[:, 0], src_m_nn_pts[:, 1], src_m_nn_pts[:, 2], scale_factor=0.04,color=c_red)
# mlab.points3d(src_m_nn_pts_wrapped[:, 0], src_m_nn_pts_wrapped[:, 1], src_m_nn_pts_wrapped[:, 2], scale_factor=0.04,color=c_red)
# mlab.points3d(tgt_m_nn_pts[:, 0], tgt_m_nn_pts[:, 1], tgt_m_nn_pts[:, 2], scale_factor=0.04 ,color=c_blue)
# mlab.show()
# viz_coarse_nn_correspondence_mayavi(c_src_pcd, c_tgt_pcd, coarse_match_gt,
# f_src_pcd=src_m_nei_pts.view(-1,3)[src_nei_valid],
# f_tgt_pcd=tgt_m_nei_pts.view(-1,3)[tgt_nei_valid], scale_factor=0.08)
src_ind_coarse_split = torch.cat(src_ind_coarse_split)
tgt_ind_coarse_split = torch.cat(tgt_ind_coarse_split)
src_ind_coarse = torch.cat(src_ind_coarse)
tgt_ind_coarse = torch.cat(tgt_ind_coarse)
dict_inputs = {
'src_pcd_list': src_pcd_list,
'tgt_pcd_list': tgt_pcd_list,
'points': input_points,
'neighbors': input_neighbors,
'pools': input_pools,
'upsamples': input_upsamples,
'features': batched_features.float(),
'stack_lengths': input_batches_len,
'coarse_matches': coarse_matches,
'src_mask': src_mask,
'tgt_mask': tgt_mask,
'src_ind_coarse_split': src_ind_coarse_split,
'tgt_ind_coarse_split': tgt_ind_coarse_split,
'src_ind_coarse': src_ind_coarse,
'tgt_ind_coarse': tgt_ind_coarse,
'batched_rot': batched_rot,
'batched_trn': batched_trn,
'gt_cov': gt_cov_list,
#for refine
'correspondences_list': correspondences_list,
'fine_ind': fine_ind,
'fine_pts': fine_pts,
'fine_length': fine_length
}
return dict_inputs
def collate_fn_4dmatch(list_data, config, neighborhood_limits ):
batched_points_list = []
batched_features_list = []
batched_lengths_list = []
correspondences_list = []
src_pcd_list = []
tgt_pcd_list = []
batched_rot = []
batched_trn = []
sflow_list = []
metric_index_list = [] #for feature matching recall computation
for ind, ( src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trn, s2t_flow, metric_index) in enumerate(list_data):
correspondences_list.append(correspondences )
src_pcd_list.append(torch.from_numpy(src_pcd) )
tgt_pcd_list.append(torch.from_numpy(tgt_pcd) )
batched_points_list.append(src_pcd)
batched_points_list.append(tgt_pcd)
batched_features_list.append(src_feats)
batched_features_list.append(tgt_feats)
batched_lengths_list.append(len(src_pcd))
batched_lengths_list.append(len(tgt_pcd))
batched_rot.append( torch.from_numpy(rot).float())
batched_trn.append( torch.from_numpy(trn).float())
# gt_cov_list.append(gt_cov)
sflow_list.append( torch.from_numpy(s2t_flow).float() )
if metric_index is None:
metric_index_list = None
else :
metric_index_list.append ( torch.from_numpy(metric_index))
# if timers: cnter['collate_load_batch'] = time.time() - st
batched_features = torch.from_numpy(np.concatenate(batched_features_list, axis=0))
batched_points = torch.from_numpy(np.concatenate(batched_points_list, axis=0))
batched_lengths = torch.from_numpy(np.array(batched_lengths_list)).int()
batched_rot = torch.stack(batched_rot,dim=0)
batched_trn = torch.stack(batched_trn,dim=0)
# Starting radius of convolutions
r_normal = config.first_subsampling_dl * config.conv_radius
# Starting layer
layer_blocks = []
layer = 0
# Lists of inputs
input_points = []
input_neighbors = []
input_pools = []
input_upsamples = []
input_batches_len = []
# construt kpfcn inds
for block_i, block in enumerate(config.architecture):
# Stop when meeting a global pooling or upsampling
if 'global' in block or 'upsample' in block:
break
# Get all blocks of the layer
if not ('pool' in block or 'strided' in block):
layer_blocks += [block]
if block_i < len(config.architecture) - 1 and not ('upsample' in config.architecture[block_i + 1]):
continue
# Convolution neighbors indices
# *****************************
if layer_blocks:
# Convolutions are done in this layer, compute the neighbors with the good radius
if np.any(['deformable' in blck for blck in layer_blocks[:-1]]):
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
conv_i = batch_neighbors_kpconv(batched_points, batched_points, batched_lengths, batched_lengths, r,
neighborhood_limits[layer])
else:
# This layer only perform pooling, no neighbors required
conv_i = torch.zeros((0, 1), dtype=torch.int64)
# Pooling neighbors indices
# *************************
# If end of layer is a pooling operation
if 'pool' in block or 'strided' in block:
# New subsampling length
dl = 2 * r_normal / config.conv_radius
# Subsampled points
pool_p, pool_b = batch_grid_subsampling_kpconv(batched_points, batched_lengths, sampleDl=dl)
# Radius of pooled neighbors
if 'deformable' in block:
r = r_normal * config.deform_radius / config.conv_radius
else:
r = r_normal
# Subsample indices
pool_i = batch_neighbors_kpconv(pool_p, batched_points, pool_b, batched_lengths, r,
neighborhood_limits[layer])
# Upsample indices (with the radius of the next layer to keep wanted density)
up_i = batch_neighbors_kpconv(batched_points, pool_p, batched_lengths, pool_b, 2 * r,
neighborhood_limits[layer])
else:
# No pooling in the end of this layer, no pooling indices required
pool_i = torch.zeros((0, 1), dtype=torch.int64)
pool_p = torch.zeros((0, 3), dtype=torch.float32)
pool_b = torch.zeros((0,), dtype=torch.int64)
up_i = torch.zeros((0, 1), dtype=torch.int64)
# Updating input lists
input_points += [batched_points.float()]
input_neighbors += [conv_i.long()]
input_pools += [pool_i.long()]
input_upsamples += [up_i.long()]
input_batches_len += [batched_lengths]
# New points for next layer
batched_points = pool_p
batched_lengths = pool_b
# Update radius and reset blocks
r_normal *= 2
layer += 1
layer_blocks = []
# coarse infomation
coarse_level = config.coarse_level
pts_num_coarse = input_batches_len[coarse_level].view(-1, 2)
b_size = pts_num_coarse.shape[0]
src_pts_max, tgt_pts_max = pts_num_coarse.amax(dim=0)
coarse_pcd = input_points[coarse_level] # .numpy()
coarse_matches= []
coarse_flow = []
src_ind_coarse_split= [] # src_feats shape :[b_size * src_pts_max]
src_ind_coarse = []
tgt_ind_coarse_split= []
tgt_ind_coarse = []
accumu = 0
src_mask = torch.zeros([b_size, src_pts_max], dtype=torch.bool)
tgt_mask = torch.zeros([b_size, tgt_pts_max], dtype=torch.bool)
for entry_id, cnt in enumerate( pts_num_coarse ): #input_batches_len[-1].numpy().reshape(-1,2)) :
n_s_pts, n_t_pts = cnt
'''split mask for bottlenect feats'''
src_mask[entry_id][:n_s_pts] = 1
tgt_mask[entry_id][:n_t_pts] = 1
'''split indices of bottleneck feats'''
src_ind_coarse_split.append( torch.arange( n_s_pts ) + entry_id * src_pts_max )
tgt_ind_coarse_split.append( torch.arange( n_t_pts ) + entry_id * tgt_pts_max )
src_ind_coarse.append( torch.arange( n_s_pts ) + accumu )
tgt_ind_coarse.append( torch.arange( n_t_pts ) + accumu + n_s_pts )
'''get match at coarse level'''
c_src_pcd_np = coarse_pcd[accumu : accumu + n_s_pts].numpy()
c_tgt_pcd_np = coarse_pcd[accumu + n_s_pts: accumu + n_s_pts + n_t_pts].numpy()
#interpolate flow
f_src_pcd = batched_points_list[entry_id * 2]
c_flow = blend_scene_flow( c_src_pcd_np, f_src_pcd, sflow_list[entry_id].numpy(), knn=3)
c_src_pcd_deformed = c_src_pcd_np + c_flow
s_pc_wrapped = (np.matmul( batched_rot[entry_id].numpy(), c_src_pcd_deformed.T ) + batched_trn [entry_id].numpy()).T
coarse_match_gt = torch.from_numpy( multual_nn_correspondence(s_pc_wrapped , c_tgt_pcd_np , search_radius=config['coarse_match_radius']) )# 0.1m scaled
coarse_matches.append(coarse_match_gt)
coarse_flow.append(torch.from_numpy(c_flow) )
accumu = accumu + n_s_pts + n_t_pts
vis=False # for debug
if vis :
viz_coarse_nn_correspondence_mayavi(c_src_pcd_np, c_tgt_pcd_np, coarse_match_gt, scale_factor=0.02)
src_ind_coarse_split = torch.cat(src_ind_coarse_split)
tgt_ind_coarse_split = torch.cat(tgt_ind_coarse_split)
src_ind_coarse = torch.cat(src_ind_coarse)
tgt_ind_coarse = torch.cat(tgt_ind_coarse)
dict_inputs = {
'src_pcd_list': src_pcd_list,
'tgt_pcd_list': tgt_pcd_list,
'points': input_points,
'neighbors': input_neighbors,
'pools': input_pools,
'upsamples': input_upsamples,
'features': batched_features.float(),
'stack_lengths': input_batches_len,
'coarse_matches': coarse_matches,
'coarse_flow' : coarse_flow,
'src_mask': src_mask,
'tgt_mask': tgt_mask,
'src_ind_coarse_split': src_ind_coarse_split,
'tgt_ind_coarse_split': tgt_ind_coarse_split,
'src_ind_coarse': src_ind_coarse,
'tgt_ind_coarse': tgt_ind_coarse,
'batched_rot': batched_rot,
'batched_trn': batched_trn,
'sflow_list': sflow_list,
"metric_index_list": metric_index_list
}
return dict_inputs
def calibrate_neighbors(dataset, config, collate_fn, keep_ratio=0.8, samples_threshold=2000):
# From config parameter, compute higher bound of neighbors number in a neighborhood
hist_n = int(np.ceil(4 / 3 * np.pi * (config.deform_radius + 1) ** 3))
neighb_hists = np.zeros((config.num_layers, hist_n), dtype=np.int32)
# Get histogram of neighborhood sizes i in 1 epoch max.
for i in range(len(dataset)):
batched_input = collate_fn([dataset[i]], config, neighborhood_limits=[hist_n] * 5)
# update histogram
counts = [torch.sum(neighb_mat < neighb_mat.shape[0], dim=1).numpy() for neighb_mat in batched_input['neighbors']]
hists = [np.bincount(c, minlength=hist_n)[:hist_n] for c in counts]
neighb_hists += np.vstack(hists)
# if timer.total_time - last_display > 0.1:
# last_display = timer.total_time
# print(f"Calib Neighbors {i:08d}: timings {timer.total_time:4.2f}s")
if np.min(np.sum(neighb_hists, axis=1)) > samples_threshold:
break
cumsum = np.cumsum(neighb_hists.T, axis=0)
percentiles = np.sum(cumsum < (keep_ratio * cumsum[hist_n - 1, :]), axis=0)
neighborhood_limits = percentiles
print('\n')
return neighborhood_limits
def get_datasets(config):
if (config.dataset == '3dmatch'):
train_set = _3DMatch(config, 'train', data_augmentation=True)
val_set = _3DMatch(config, 'val', data_augmentation=False)
test_set = _3DMatch(config, 'test', data_augmentation=False)
elif(config.dataset == '4dmatch'):
train_set = _4DMatch(config, 'train', data_augmentation=True)
val_set = _4DMatch(config, 'val', data_augmentation=False)
test_set = _4DMatch(config, 'test', data_augmentation=False)
else:
raise NotImplementedError
return train_set, val_set, test_set
def get_dataloader(dataset, config, shuffle=True, neighborhood_limits=None):
if config.dataset=='4dmatch':
collate_fn = collate_fn_4dmatch
elif config.dataset == '3dmatch':
collate_fn = collate_fn_3dmatch
else:
raise NotImplementedError()
if neighborhood_limits is None:
neighborhood_limits = calibrate_neighbors(dataset, config['kpfcn_config'], collate_fn=collate_fn)
print("neighborhood:", neighborhood_limits)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=config['batch_size'],
shuffle=shuffle,
num_workers=config['num_workers'],
collate_fn=partial(collate_fn, config=config['kpfcn_config'], neighborhood_limits=neighborhood_limits ),
drop_last=False
)
return dataloader, neighborhood_limits
if __name__ == '__main__':
pass
| 24,996 | 37.875583 | 171 | py |
lepard | lepard-main/datasets/utils.py | import numpy as np
# from lib.benchmark_utils import to_o3d_pcd, KDTree_corr
def partition_arg_topK(matrix, K, axis=0):
""" find index of K smallest entries along a axis
perform topK based on np.argpartition
:param matrix: to be sorted
:param K: select and sort the top K items
:param axis: 0 or 1. dimension to be sorted.
:return:
"""
a_part = np.argpartition(matrix, K, axis=axis)
if axis == 0:
row_index = np.arange(matrix.shape[1 - axis])
a_sec_argsort_K = np.argsort(matrix[a_part[0:K, :], row_index], axis=axis)
return a_part[0:K, :][a_sec_argsort_K, row_index]
else:
column_index = np.arange(matrix.shape[1 - axis])[:, None]
a_sec_argsort_K = np.argsort(matrix[column_index, a_part[:, 0:K]], axis=axis)
return a_part[:, 0:K][column_index, a_sec_argsort_K]
def knn_point_np(k, reference_pts, query_pts):
'''
:param k: number of k in k-nn search
:param reference_pts: (N, 3) float32 array, input points
:param query_pts: (M, 3) float32 array, query points
:return:
val: (batch_size, npoint, k) float32 array, L2 distances
idx: (batch_size, npoint, k) int32 array, indices to input points
'''
N, _ = reference_pts.shape
M, _ = query_pts.shape
reference_pts = reference_pts.reshape(1, N, -1).repeat(M, axis=0)
query_pts = query_pts.reshape(M, 1, -1).repeat(N, axis=1)
dist = np.sum((reference_pts - query_pts) ** 2, -1)
idx = partition_arg_topK(dist, K=k, axis=1)
val = np.take_along_axis ( dist , idx, axis=1)
return np.sqrt(val), idx
def blend_scene_flow (query_loc, reference_loc, reference_flow , knn=3) :
'''approximate flow on query points
this function assume query points are sub-/un-sampled from reference locations
@param query_loc:[m,3]
@param reference_loc:[n,3]
@param reference_flow:[n,3]
@param knn:
@return:
blended_flow:[m,3]
'''
dists, idx = knn_point_np (knn, reference_loc, query_loc)
dists[dists < 1e-10] = 1e-10
weight = 1.0 / dists
weight = weight / np.sum(weight, -1, keepdims=True) # [B,N,3]
blended_flow = np.sum (reference_flow [idx] * weight.reshape ([-1, knn, 1]), axis=1, keepdims=False)
return blended_flow
def multual_nn_correspondence(src_pcd_deformed, tgt_pcd, search_radius=0.3, knn=1):
src_idx = np.arange(src_pcd_deformed.shape[0])
s2t_dists, ref_tgt_idx = knn_point_np (knn, tgt_pcd, src_pcd_deformed)
s2t_dists, ref_tgt_idx = s2t_dists[:,0], ref_tgt_idx [:, 0]
valid_distance = s2t_dists < search_radius
_, ref_src_idx = knn_point_np (knn, src_pcd_deformed, tgt_pcd)
_, ref_src_idx = _, ref_src_idx [:, 0]
cycle_src_idx = ref_src_idx [ ref_tgt_idx ]
is_mutual_nn = cycle_src_idx == src_idx
mutual_nn = np.logical_and( is_mutual_nn, valid_distance)
correspondences = np.stack([src_idx [ mutual_nn ], ref_tgt_idx[mutual_nn] ] , axis=0)
return correspondences | 2,983 | 36.3 | 104 | py |
lepard | lepard-main/datasets/_3dmatch.py | import os, sys, glob, torch
# sys.path.append("../")
[sys.path.append(i) for i in ['.', '..']]
import numpy as np
import torch
import random
from scipy.spatial.transform import Rotation
from torch.utils.data import Dataset
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, KDTree_corr
from lib.utils import load_obj
from lib.benchmark_utils import to_o3d_pcd, to_tsfm, get_correspondences
class _3DMatch(Dataset):
def __init__(self, config,split, data_augmentation=True):
super(_3DMatch, self).__init__()
assert split in ['train','val','test']
if 'overfit' in config.exp_dir:
d_slice = config.batch_size
else :
d_slice = None
self.infos = self.read_entries( config.split[split] , config.data_root, d_slice=d_slice )
self.base_dir = config.data_root
self.data_augmentation = data_augmentation
self.config = config
self.rot_factor = 1.
self.augment_noise = config.augment_noise
self.max_points = 30000
self.overlap_radius = 0.0375
def read_entries (self, split, data_root, d_slice=None, shuffle= True):
infos = load_obj(split) # we use the split prepared by Predator
if d_slice:
for k, v in infos.items():
infos[k] = v[:d_slice]
return infos
def __len__(self):
return len(self.infos['rot'])
def __getitem__(self, item, debug=False):
# get transformation
rot = self.infos['rot'][item]
trans = self.infos['trans'][item]
if 'gt_cov' in self.infos:
gt_cov = self.infos['gt_cov'][item]
else :
gt_cov = None
# get pointcloud
src_path = os.path.join(self.base_dir, self.infos['src'][item])
tgt_path = os.path.join(self.base_dir, self.infos['tgt'][item])
src_pcd = torch.load(src_path)
tgt_pcd = torch.load(tgt_path)
# if we get too many points, we do some downsampling
if (src_pcd.shape[0] > self.max_points):
idx = np.random.permutation(src_pcd.shape[0])[:self.max_points]
src_pcd = src_pcd[idx]
if (tgt_pcd.shape[0] > self.max_points):
idx = np.random.permutation(tgt_pcd.shape[0])[:self.max_points]
tgt_pcd = tgt_pcd[idx]
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.02
# mlab.points3d(s_pc[ :, 0] , s_pc[ :, 1], s_pc[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.points3d(src_pcd[ :, 0] , src_pcd[ :, 1], src_pcd[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(tgt_pcd[ :, 0] , tgt_pcd[ :, 1], tgt_pcd[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.show()
# add gaussian noise
if self.data_augmentation:
# rotate the point cloud
euler_ab = np.random.rand(3) * np.pi * 2 / self.rot_factor # anglez, angley, anglex
rot_ab = Rotation.from_euler('zyx', euler_ab).as_matrix()
if (np.random.rand(1)[0] > 0.5):
src_pcd = np.matmul(rot_ab, src_pcd.T).T
rot = np.matmul(rot, rot_ab.T)
else:
tgt_pcd = np.matmul(rot_ab, tgt_pcd.T).T
rot = np.matmul(rot_ab, rot)
trans = np.matmul(rot_ab, trans)
src_pcd += (np.random.rand(src_pcd.shape[0], 3) - 0.5) * self.augment_noise
tgt_pcd += (np.random.rand(tgt_pcd.shape[0], 3) - 0.5) * self.augment_noise
# get correspondence at fine level
tsfm = to_tsfm(rot, trans)
correspondences = get_correspondences(to_o3d_pcd(src_pcd), to_o3d_pcd(tgt_pcd), tsfm,self.overlap_radius)
if debug:
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
scale_factor = 0.02
# mlab.points3d(s_pc[ :, 0] , s_pc[ :, 1], s_pc[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.points3d(src_pcd[ :, 0] , src_pcd[ :, 1], src_pcd[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(tgt_pcd[ :, 0] , tgt_pcd[ :, 1], tgt_pcd[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.show()
if (trans.ndim == 1):
trans = trans[:, None]
src_feats = np.ones_like(src_pcd[:, :1]).astype(np.float32)
tgt_feats = np.ones_like(tgt_pcd[:, :1]).astype(np.float32)
rot = rot.astype(np.float32)
trans = trans.astype(np.float32)
return src_pcd, tgt_pcd, src_feats, tgt_feats, correspondences, rot, trans, gt_cov
if __name__ == '__main__':
from lib.utils import load_config
from easydict import EasyDict as edict
from lib.tictok import Timers
import yaml
def join(loader, node):
seq = loader.construct_sequence(node)
return '_'.join([str(i) for i in seq])
yaml.add_constructor('!join', join)
config = "/home/liyang/workspace/Regformer/configs/train/3dmatch.yaml"
with open(config,'r') as f:
config = yaml.load(f, Loader=yaml.Loader)
config = edict(config)
config.timers=Timers()
D = _3DMatch(config, "test")
for i in range (len(D)):
try:
if i%1000 == 0 :
print (i,"/",len(D))
D.__getitem__(i, debug=True)
except:
pass
# print ( D.data_entries[i] )
# print (os.remove(D.data_entries[i]) )
| 5,766 | 33.327381 | 116 | py |
lepard | lepard-main/lib/visualization.py |
c_red = (224. / 255., 0 / 255., 125 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
c_green = (0. / 255., 255. / 255., 0. / 255.)
c_gray1 = (100. / 255., 100. / 255., 100. / 255.)
c_gray2 = (175. / 255., 175. / 255., 175. / 255.)
def viz_flow_mayavi( s_pc,flow = None, s_pc_deformed=None, t_pc=None, scale_factor = 0.02):
import mayavi.mlab as mlab
mlab.points3d(s_pc[:, 0], s_pc[:, 1], s_pc[:, 2], scale_factor=scale_factor, color=c_red)
if flow is not None:
mlab.quiver3d(s_pc[:, 0], s_pc[:, 1], s_pc[:, 2],
flow[:, 0], flow[:, 1], flow[:, 2], scale_factor=1)
if t_pc is not None:
mlab.points3d(t_pc[:, 0], t_pc[:, 1], t_pc[:, 2], scale_factor=scale_factor, color=c_blue)
if s_pc_deformed is not None:
mlab.points3d(s_pc_deformed[:, 0], s_pc_deformed[:, 1], s_pc_deformed[:, 2], scale_factor=scale_factor, color=c_green)
mlab.show()
def viz_coarse_nn_correspondence_mayavi(s_pc, t_pc, correspondence, f_src_pcd=None, f_tgt_pcd=None, scale_factor = 0.02):
'''
@param s_pc: [S,3]
@param t_pc: [T,3]
@param correspondence: [2,K]
@param f_src_pcd: [S1,3]
@param f_tgt_pcd: [T1,3]
@param scale_factor:
@return:
'''
import mayavi
import mayavi.mlab as mlab
if f_src_pcd is not None:
mlab.points3d(f_src_pcd[:, 0], f_src_pcd[:, 1], f_src_pcd[:, 2], scale_factor=scale_factor * 0.25, color=c_gray1)
else:
mlab.points3d(s_pc[:, 0], s_pc[:, 1], s_pc[:, 2], scale_factor=scale_factor*0.75, color=c_gray1)
if f_tgt_pcd is not None:
mlab.points3d(f_tgt_pcd[:, 0], f_tgt_pcd[:, 1], f_tgt_pcd[:, 2], scale_factor=scale_factor * 0.25, color=c_gray2)
else :
mlab.points3d(t_pc[:, 0], t_pc[:, 1], t_pc[:, 2], scale_factor=scale_factor*0.75, color=c_gray2)
s_cpts = s_pc[correspondence[0]]
t_cpts = t_pc[correspondence[1]]
flow = t_cpts-s_cpts
mlab.points3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], scale_factor=scale_factor , color=c_red)
mlab.points3d(t_cpts[:, 0], t_cpts[:, 1], t_cpts[:, 2], scale_factor=scale_factor , color=c_blue)
mlab.quiver3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], flow[:, 0], flow[:, 1], flow[:, 2],
scale_factor=1, mode='2ddash', line_width=1.)
mlab.show()
| 2,352 | 36.951613 | 126 | py |
lepard | lepard-main/lib/timer.py | import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0.0
self.sq_sum = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.sq_sum += val ** 2 * n
self.var = self.sq_sum / self.count - self.avg ** 2
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.avg = 0.
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.avg = 0
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.avg = self.total_time / self.calls
if average:
return self.avg
else:
return self.diff
| 1,335 | 22.438596 | 71 | py |
lepard | lepard-main/lib/benchmark_utils.py | import os,re,sys,json,yaml,random, glob, argparse, torch, pickle
from tqdm import tqdm
import numpy as np
from scipy.spatial.transform import Rotation
import open3d as o3d
_EPS = 1e-7 # To prevent division by zero
def viz_coarse_nn_correspondence_mayavi(s_pc, t_pc, good_c, bad_c, f_src_pcd=None, f_tgt_pcd=None, scale_factor=0.02):
'''
@param s_pc: [S,3]
@param t_pc: [T,3]
@param correspondence: [2,K]
@param f_src_pcd: [S1,3]
@param f_tgt_pcd: [T1,3]
@param scale_factor:
@return:
'''
import mayavi.mlab as mlab
c_red = (224. / 255., 0 / 255., 0 / 255.)
c_pink = (224. / 255., 75. / 255., 232. / 255.)
c_blue = (0. / 255., 0. / 255., 255. / 255.)
c_green = (0. / 255., 255. / 255., 0. / 255.)
c_gray1 = (255 / 255., 255 / 255., 125 / 255.)
c_gray2 = (125. / 255., 125. / 255., 255. / 255.)
if f_src_pcd is not None:
mlab.points3d(f_src_pcd[:, 0], f_src_pcd[:, 1], f_src_pcd[:, 2], scale_factor=scale_factor * 0.25,
color=c_gray1)
else:
mlab.points3d(s_pc[:, 0], s_pc[:, 1], s_pc[:, 2], scale_factor=scale_factor * 0.75, color=c_gray1)
if f_tgt_pcd is not None:
mlab.points3d(f_tgt_pcd[:, 0], f_tgt_pcd[:, 1], f_tgt_pcd[:, 2], scale_factor=scale_factor * 0.25,
color=c_gray2)
else:
mlab.points3d(t_pc[:, 0], t_pc[:, 1], t_pc[:, 2], scale_factor=scale_factor * 0.75, color=c_gray2)
s_cpts_god = s_pc[good_c[0]]
t_cpts_god = t_pc[good_c[1]]
flow_good = t_cpts_god - s_cpts_god
s_cpts_bd = s_pc[bad_c[0]]
t_cpts_bd = t_pc[bad_c[1]]
flow_bad = t_cpts_bd - s_cpts_bd
def match_draw(s_cpts, t_cpts, flow, color):
mlab.points3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], scale_factor=scale_factor * 0.35, color=c_blue)
mlab.points3d(t_cpts[:, 0], t_cpts[:, 1], t_cpts[:, 2], scale_factor=scale_factor * 0.35, color=c_pink)
mlab.quiver3d(s_cpts[:, 0], s_cpts[:, 1], s_cpts[:, 2], flow[:, 0], flow[:, 1], flow[:, 2],
scale_factor=1, mode='2ddash', line_width=1., color=color)
match_draw(s_cpts_god, t_cpts_god, flow_good, c_green)
match_draw(s_cpts_bd, t_cpts_bd, flow_bad, c_red)
mlab.show()
def correspondence_viz(src_raw, tgt_raw, src_pcd, tgt_pcd, corrs, inlier_mask, max=200):
perm = np.random.permutation(corrs.shape[1])
ind = perm[:max]
corrs = corrs[:, ind]
inlier_mask = inlier_mask[ind]
good_c = corrs[:, inlier_mask]
bad_c = corrs[:, ~inlier_mask]
offset = np.array([[1.45, 0, 0]])
# src_pcd = src_pcd + offset
# src_raw = src_raw + offset
tgt_pcd = tgt_pcd + offset
tgt_raw = tgt_raw + offset
viz_coarse_nn_correspondence_mayavi(src_pcd, tgt_pcd, good_c, bad_c, src_raw, tgt_raw, scale_factor=0.07)
def fmr_wrt_distance(data,split,inlier_ratio_threshold=0.05):
"""
calculate feature match recall wrt distance threshold
"""
fmr_wrt_distance =[]
for distance_threshold in range(1,21):
inlier_ratios =[]
distance_threshold /=100.0
for idx in range(data.shape[0]):
inlier_ratio = (data[idx] < distance_threshold).mean()
inlier_ratios.append(inlier_ratio)
fmr = 0
for ele in split:
fmr += (np.array(inlier_ratios[ele[0]:ele[1]]) > inlier_ratio_threshold).mean()
fmr /= 8
fmr_wrt_distance.append(fmr*100)
return fmr_wrt_distance
def fmr_wrt_inlier_ratio(data, split, distance_threshold=0.1):
"""
calculate feature match recall wrt inlier ratio threshold
"""
fmr_wrt_inlier =[]
for inlier_ratio_threshold in range(1,21):
inlier_ratios =[]
inlier_ratio_threshold /=100.0
for idx in range(data.shape[0]):
inlier_ratio = (data[idx] < distance_threshold).mean()
inlier_ratios.append(inlier_ratio)
fmr = 0
for ele in split:
fmr += (np.array(inlier_ratios[ele[0]:ele[1]]) > inlier_ratio_threshold).mean()
fmr /= 8
fmr_wrt_inlier.append(fmr*100)
return fmr_wrt_inlier
def to_tensor(array):
"""
Convert array to tensor
"""
if(not isinstance(array,torch.Tensor)):
return torch.from_numpy(array).float()
else:
return array
def to_array(tensor):
"""
Conver tensor to array
"""
if(not isinstance(tensor,np.ndarray)):
if(tensor.device == torch.device('cpu')):
return tensor.numpy()
else:
return tensor.cpu().numpy()
else:
return tensor
def to_tsfm(rot,trans):
tsfm = np.eye(4)
tsfm[:3,:3]=rot
tsfm[:3,3]=trans.flatten()
return tsfm
def to_o3d_pcd(xyz):
"""
Convert tensor/array to open3d PointCloud
xyz: [N, 3]
"""
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(to_array(xyz))
return pcd
def to_o3d_feats(embedding):
"""
Convert tensor/array to open3d features
embedding: [N, 3]
"""
feats = o3d.registration.Feature()
feats.data = to_array(embedding).T
return feats
def get_correspondences(src_pcd, tgt_pcd, trans, search_voxel_size, K=None):
src_pcd.transform(trans)
correspondences = KDTree_corr ( src_pcd, tgt_pcd, search_voxel_size, K=None)
correspondences = torch.from_numpy(correspondences)
return correspondences
def KDTree_corr ( src_pcd_transformed, tgt_pcd, search_voxel_size, K=None):
pcd_tree = o3d.geometry.KDTreeFlann(tgt_pcd)
correspondences = []
for i, point in enumerate(src_pcd_transformed.points):
[count, idx, _] = pcd_tree.search_radius_vector_3d(point, search_voxel_size)
if K is not None:
idx = idx[:K]
for j in idx:
correspondences.append([i, j])
correspondences = np.array(correspondences)
return correspondences
def get_blue():
"""
Get color blue for rendering
"""
return [0, 0.651, 0.929]
def get_yellow():
"""
Get color yellow for rendering
"""
return [1, 0.706, 0]
def random_sample(pcd, feats, N):
"""
Do random sampling to get exact N points and associated features
pcd: [N,3]
feats: [N,C]
"""
if(isinstance(pcd,torch.Tensor)):
n1 = pcd.size(0)
elif(isinstance(pcd, np.ndarray)):
n1 = pcd.shape[0]
if n1 == N:
return pcd, feats
if n1 > N:
choice = np.random.permutation(n1)[:N]
else:
choice = np.random.choice(n1, N)
return pcd[choice], feats[choice]
def get_angle_deviation(R_pred,R_gt):
"""
Calculate the angle deviation between two rotaion matrice
The rotation error is between [0,180]
Input:
R_pred: [B,3,3]
R_gt : [B,3,3]
Return:
degs: [B]
"""
R=np.matmul(R_pred,R_gt.transpose(0,2,1))
tr=np.trace(R,0,1,2)
rads=np.arccos(np.clip((tr-1)/2,-1,1)) # clip to valid range
degs=rads/np.pi*180
return degs
def ransac_pose_estimation(src_pcd, tgt_pcd, src_feat, tgt_feat, mutual = False, distance_threshold = 0.05, ransac_n = 3):
"""
RANSAC pose estimation with two checkers
We follow D3Feat to set ransac_n = 3 for 3DMatch and ransac_n = 4 for KITTI.
For 3DMatch dataset, we observe significant improvement after changing ransac_n from 4 to 3.
"""
if(mutual):
if(torch.cuda.device_count()>=1):
device = torch.device('cuda')
else:
device = torch.device('cpu')
src_feat, tgt_feat = to_tensor(src_feat), to_tensor(tgt_feat)
scores = torch.matmul(src_feat.to(device), tgt_feat.transpose(0,1).to(device)).cpu()
selection = mutual_selection(scores[None,:,:])[0]
row_sel, col_sel = np.where(selection)
corrs = o3d.utility.Vector2iVector(np.array([row_sel,col_sel]).T)
src_pcd = to_o3d_pcd(src_pcd)
tgt_pcd = to_o3d_pcd(tgt_pcd)
result_ransac = o3d.registration.registration_ransac_based_on_correspondence(
source=src_pcd, target=tgt_pcd,corres=corrs,
max_correspondence_distance=distance_threshold,
estimation_method=o3d.registration.TransformationEstimationPointToPoint(False),
ransac_n=4,
criteria=o3d.registration.RANSACConvergenceCriteria(50000, 1000))
else:
src_pcd = to_o3d_pcd(src_pcd)
tgt_pcd = to_o3d_pcd(tgt_pcd)
src_feats = to_o3d_feats(src_feat)
tgt_feats = to_o3d_feats(tgt_feat)
result_ransac = o3d.registration.registration_ransac_based_on_feature_matching(
src_pcd, tgt_pcd, src_feats, tgt_feats,distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), ransac_n,
[o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
o3d.registration.RANSACConvergenceCriteria(50000, 1000))
return result_ransac.transformation
def get_inlier_ratio(src_pcd, tgt_pcd, src_feat, tgt_feat, rot, trans, inlier_distance_threshold = 0.1):
"""
Compute inlier ratios with and without mutual check, return both
"""
src_pcd = to_tensor(src_pcd)
tgt_pcd = to_tensor(tgt_pcd)
src_feat = to_tensor(src_feat)
tgt_feat = to_tensor(tgt_feat)
rot, trans = to_tensor(rot), to_tensor(trans)
results =dict()
results['w']=dict()
results['wo']=dict()
if(torch.cuda.device_count()>=1):
device = torch.device('cuda')
else:
device = torch.device('cpu')
src_pcd = (torch.matmul(rot, src_pcd.transpose(0,1)) + trans).transpose(0,1)
scores = torch.matmul(src_feat.to(device), tgt_feat.transpose(0,1).to(device)).cpu()
# 1. calculate inlier ratios wo mutual check
_, idx = scores.max(-1)
dist = torch.norm(src_pcd- tgt_pcd[idx],dim=1)
results['wo']['distance'] = dist.numpy()
c_inlier_ratio = (dist < inlier_distance_threshold).float().mean()
results['wo']['inlier_ratio'] = c_inlier_ratio
# 2. calculate inlier ratios w mutual check
selection = mutual_selection(scores[None,:,:])[0]
row_sel, col_sel = np.where(selection)
dist = torch.norm(src_pcd[row_sel]- tgt_pcd[col_sel],dim=1)
results['w']['distance'] = dist.numpy()
c_inlier_ratio = (dist < inlier_distance_threshold).float().mean()
results['w']['inlier_ratio'] = c_inlier_ratio
return results
def mutual_selection(score_mat):
"""
Return a {0,1} matrix, the element is 1 if and only if it's maximum along both row and column
Args: np.array()
score_mat: [B,N,N]
Return:
mutuals: [B,N,N]
"""
score_mat=to_array(score_mat)
if(score_mat.ndim==2):
score_mat=score_mat[None,:,:]
mutuals=np.zeros_like(score_mat)
for i in range(score_mat.shape[0]): # loop through the batch
c_mat=score_mat[i]
flag_row=np.zeros_like(c_mat)
flag_column=np.zeros_like(c_mat)
max_along_row=np.argmax(c_mat,1)[:,None]
max_along_column=np.argmax(c_mat,0)[None,:]
np.put_along_axis(flag_row,max_along_row,1,1)
np.put_along_axis(flag_column,max_along_column,1,0)
mutuals[i]=(flag_row.astype(np.bool)) & (flag_column.astype(np.bool))
return mutuals.astype(np.bool)
| 11,442 | 31.882184 | 122 | py |
lepard | lepard-main/lib/utils.py | import os,re,sys,json,yaml,random, argparse, torch, pickle
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from scipy.spatial.transform import Rotation
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import minkowski
_EPS = 1e-7 # To prevent division by zero
class Logger:
def __init__(self, path):
self.path = path
log_path = self.path + '/log'
if os.path.exists(log_path):
os.remove(log_path)
self.fw = open(log_path,'a')
def write(self, text):
self.fw.write(text)
self.fw.flush()
def close(self):
self.fw.close()
def save_obj(obj, path ):
"""
save a dictionary to a pickle file
"""
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load_obj(path):
"""
read a dictionary from a pickle file
"""
with open(path, 'rb') as f:
return pickle.load(f)
def load_config(path):
"""
Loads config file:
Args:
path (str): path to the config file
Returns:
config (dict): dictionary of the configuration parameters, merge sub_dicts
"""
with open(path,'r') as f:
cfg = yaml.safe_load(f)
config = dict()
for key, value in cfg.items():
for k,v in value.items():
config[k] = v
return config
def setup_seed(seed):
"""
fix random seed for deterministic training
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def square_distance(src, dst, normalised = False):
"""
Calculate Euclid distance between each two points.
Args:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Returns:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
if(normalised):
dist += 2
else:
dist += torch.sum(src ** 2, dim=-1)[:, :, None]
dist += torch.sum(dst ** 2, dim=-1)[:, None, :]
dist = torch.clamp(dist, min=1e-12, max=None)
return dist
def validate_gradient(model):
"""
Confirm all the gradients are non-nan and non-inf
"""
for name, param in model.named_parameters():
if param.grad is not None:
if torch.any(torch.isnan(param.grad)):
return False
if torch.any(torch.isinf(param.grad)):
return False
return True
def natural_key(string_):
"""
Sort strings by numbers in the name
"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)] | 2,759 | 23.424779 | 82 | py |
lepard | lepard-main/lib/ply.py | # | PLY files reader/writer |
# function to read/write .ply files
# Hugues THOMAS - 10/02/2017
# Imports and global variables
# \**********************************/
# Basic libs
import numpy as np
import sys
# Define PLY types
ply_dtypes = dict([
(b'int8', 'i1'),
(b'char', 'i1'),
(b'uint8', 'u1'),
(b'uchar', 'u1'),
(b'int16', 'i2'),
(b'short', 'i2'),
(b'uint16', 'u2'),
(b'ushort', 'u2'),
(b'int32', 'i4'),
(b'int', 'i4'),
(b'uint32', 'u4'),
(b'uint', 'u4'),
(b'float32', 'f4'),
(b'float', 'f4'),
(b'float64', 'f8'),
(b'double', 'f8')
])
# Numpy reader format
valid_formats = {'ascii': '', 'binary_big_endian': '>',
'binary_little_endian': '<'}
# Functions
# \***************/
def parse_header(plyfile, ext):
# Variables
line = []
properties = []
num_points = None
while b'end_header' not in line and line != b'':
line = plyfile.readline()
if b'element' in line:
line = line.split()
num_points = int(line[2])
elif b'property' in line:
line = line.split()
properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
return num_points, properties
def parse_mesh_header(plyfile, ext):
# Variables
line = []
vertex_properties = []
num_points = None
num_faces = None
current_element = None
while b'end_header' not in line and line != b'':
line = plyfile.readline()
# Find point element
if b'element vertex' in line:
current_element = 'vertex'
line = line.split()
num_points = int(line[2])
elif b'element face' in line:
current_element = 'face'
line = line.split()
num_faces = int(line[2])
elif b'property' in line:
if current_element == 'vertex':
line = line.split()
vertex_properties.append((line[2].decode(), ext + ply_dtypes[line[1]]))
elif current_element == 'vertex':
if not line.startswith('property list uchar int'):
raise ValueError('Unsupported faces property : ' + line)
return num_points, num_faces, vertex_properties
def read_ply(filename, triangular_mesh=False):
"""
Read ".ply" files
Parameters
----------
filename : string
the name of the file to read.
Returns
-------
result : array
data stored in the file
Examples
--------
Store data in file
>>> points = np.random.rand(5, 3)
>>> values = np.random.randint(2, size=10)
>>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values'])
Read the file
>>> data = read_ply('example.ply')
>>> values = data['values']
array([0, 0, 1, 1, 0])
>>> points = np.vstack((data['x'], data['y'], data['z'])).T
array([[ 0.466 0.595 0.324]
[ 0.538 0.407 0.654]
[ 0.850 0.018 0.988]
[ 0.395 0.394 0.363]
[ 0.873 0.996 0.092]])
"""
with open(filename, 'rb') as plyfile:
# Check if the file start with ply
if b'ply' not in plyfile.readline():
raise ValueError('The file does not start whith the word ply')
# get binary_little/big or ascii
fmt = plyfile.readline().split()[1].decode()
if fmt == "ascii":
raise ValueError('The file is not binary')
# get extension for building the numpy dtypes
ext = valid_formats[fmt]
# PointCloud reader vs mesh reader
if triangular_mesh:
# Parse header
num_points, num_faces, properties = parse_mesh_header(plyfile, ext)
# Get point data
vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points)
# Get face data
face_properties = [('k', ext + 'u1'),
('v1', ext + 'i4'),
('v2', ext + 'i4'),
('v3', ext + 'i4')]
faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces)
# Return vertex data and concatenated faces
faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T
data = [vertex_data, faces]
else:
# Parse header
num_points, properties = parse_header(plyfile, ext)
# Get data
data = np.fromfile(plyfile, dtype=properties, count=num_points)
return data
def header_properties(field_list, field_names):
# List of lines to write
lines = []
# First line describing element vertex
lines.append('element vertex %d' % field_list[0].shape[0])
# Properties lines
i = 0
for fields in field_list:
for field in fields.T:
lines.append('property %s %s' % (field.dtype.name, field_names[i]))
i += 1
return lines
def write_ply(filename, field_list, field_names, triangular_faces=None):
"""
Write ".ply" files
Parameters
----------
filename : string
the name of the file to which the data is saved. A '.ply' extension will be appended to the
file name if it does no already have one.
field_list : list, tuple, numpy array
the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a
tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered
as one field.
field_names : list
the name of each fields as a list of strings. Has to be the same length as the number of
fields.
Examples
--------
>>> points = np.random.rand(10, 3)
>>> write_ply('example1.ply', points, ['x', 'y', 'z'])
>>> values = np.random.randint(2, size=10)
>>> write_ply('example2.ply', [points, values], ['x', 'y', 'z', 'values'])
>>> colors = np.random.randint(255, size=(10,3), dtype=np.uint8)
>>> field_names = ['x', 'y', 'z', 'red', 'green', 'blue', values']
>>> write_ply('example3.ply', [points, colors, values], field_names)
"""
# Format list input to the right form
field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,))
for i, field in enumerate(field_list):
if field.ndim < 2:
field_list[i] = field.reshape(-1, 1)
if field.ndim > 2:
print('fields have more than 2 dimensions')
return False
# check all fields have the same number of data
n_points = [field.shape[0] for field in field_list]
if not np.all(np.equal(n_points, n_points[0])):
print('wrong field dimensions')
return False
# Check if field_names and field_list have same nb of column
n_fields = np.sum([field.shape[1] for field in field_list])
if (n_fields != len(field_names)):
print('wrong number of field names')
return False
# Add extension if not there
if not filename.endswith('.ply'):
filename += '.ply'
# open in text mode to write the header
with open(filename, 'w') as plyfile:
# First magical word
header = ['ply']
# Encoding format
header.append('format binary_' + sys.byteorder + '_endian 1.0')
# Points properties description
header.extend(header_properties(field_list, field_names))
# Add faces if needded
if triangular_faces is not None:
header.append('element face {:d}'.format(triangular_faces.shape[0]))
header.append('property list uchar int vertex_indices')
# End of header
header.append('end_header')
# Write all lines
for line in header:
plyfile.write("%s\n" % line)
# open in binary/append to use tofile
with open(filename, 'ab') as plyfile:
# Create a structured array
i = 0
type_list = []
for fields in field_list:
for field in fields.T:
type_list += [(field_names[i], field.dtype.str)]
i += 1
data = np.empty(field_list[0].shape[0], dtype=type_list)
i = 0
for fields in field_list:
for field in fields.T:
data[field_names[i]] = field
i += 1
data.tofile(plyfile)
if triangular_faces is not None:
triangular_faces = triangular_faces.astype(np.int32)
type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)]
data = np.empty(triangular_faces.shape[0], dtype=type_list)
data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8)
data['0'] = triangular_faces[:, 0]
data['1'] = triangular_faces[:, 1]
data['2'] = triangular_faces[:, 2]
data.tofile(plyfile)
return True
def describe_element(name, df):
""" Takes the columns of the dataframe and builds a ply-like description
Parameters
----------
name: str
df: pandas DataFrame
Returns
-------
element: list[str]
"""
property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'}
element = ['element ' + name + ' ' + str(len(df))]
if name == 'face':
element.append("property list uchar int points_indices")
else:
for i in range(len(df.columns)):
# get first letter of dtype to infer format
f = property_formats[str(df.dtypes[i])[0]]
element.append('property ' + f + ' ' + df.columns.values[i])
return element
| 10,301 | 28.267045 | 120 | py |
lepard | lepard-main/lib/tictok.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import numpy as np
from collections import defaultdict
class Timer(object):
def __init__(self):
self.reset()
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
def tictoc(self, diff):
self.diff = diff
self.total_time += diff
self.calls += 1
def total(self):
""" return the total amount of time """
return self.total_time
def avg(self):
""" return the average amount of time """
return self.total_time / float(self.calls)
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
class Timers(object):
def __init__(self):
self.timers = defaultdict(Timer)
def tic(self, key):
self.timers[key].tic()
def toc(self, key):
self.timers[key].toc()
def tictoc(self, key, diff):
self.timers[key].tictoc( diff)
def print(self, key=None):
if key is None:
# print all time
for k, v in self.timers.items():
print("{:}: \t average {:.4f}, total {:.4f} ,\t calls {:}".format(k.ljust(30), v.avg(), v.total_time, v.calls))
else:
print("Average time for {:}: {:}".format(key, self.timers[key].avg()))
def get_avg(self, key):
return self.timers[key].avg() | 1,644 | 24.307692 | 130 | py |
lepard | lepard-main/lib/trainer.py | import gc
import os
import torch
import torch.nn as nn
import numpy as np
from tensorboardX import SummaryWriter
from tqdm import tqdm
from lib.timer import AverageMeter
from lib.utils import Logger, validate_gradient
from lib.tictok import Timers
class Trainer(object):
def __init__(self, args):
self.config = args
# parameters
self.start_epoch = 1
self.max_epoch = args.max_epoch
self.save_dir = args.save_dir
self.device = args.device
self.verbose = args.verbose
self.model = args.model
self.model = self.model.to(self.device)
self.optimizer = args.optimizer
self.scheduler = args.scheduler
self.scheduler_freq = args.scheduler_freq
self.snapshot_dir = args.snapshot_dir
self.iter_size = args.iter_size
self.verbose_freq = args.verbose_freq // args.batch_size + 1
if 'overfit' in self.config.exp_dir:
self.verbose_freq = 1
self.loss = args.desc_loss
self.best_loss = 1e5
self.best_recall = -1e5
self.summary_writer = SummaryWriter(log_dir=args.tboard_dir)
self.logger = Logger(args.snapshot_dir)
self.logger.write(f'#parameters {sum([x.nelement() for x in self.model.parameters()]) / 1000000.} M\n')
if (args.pretrain != ''):
self._load_pretrain(args.pretrain)
self.loader = dict()
self.loader['train'] = args.train_loader
self.loader['val'] = args.val_loader
self.loader['test'] = args.test_loader
self.timers = args.timers
with open(f'{args.snapshot_dir}/model', 'w') as f:
f.write(str(self.model))
f.close()
def _snapshot(self, epoch, name=None):
state = {
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'best_loss': self.best_loss,
'best_recall': self.best_recall
}
if name is None:
filename = os.path.join(self.save_dir, f'model_{epoch}.pth')
else:
filename = os.path.join(self.save_dir, f'model_{name}.pth')
self.logger.write(f"Save model to {filename}\n")
torch.save(state, filename, _use_new_zipfile_serialization=False)
def _load_pretrain(self, resume):
print ("loading pretrained", resume)
if os.path.isfile(resume):
state = torch.load(resume)
self.model.load_state_dict(state['state_dict'])
self.start_epoch = state['epoch']
self.scheduler.load_state_dict(state['scheduler'])
self.optimizer.load_state_dict(state['optimizer'])
self.best_loss = state['best_loss']
self.best_recall = state['best_recall']
self.logger.write(f'Successfully load pretrained model from {resume}!\n')
self.logger.write(f'Current best loss {self.best_loss}\n')
self.logger.write(f'Current best recall {self.best_recall}\n')
else:
raise ValueError(f"=> no checkpoint found at '{resume}'")
def _get_lr(self, group=0):
return self.optimizer.param_groups[group]['lr']
def inference_one_batch(self, inputs, phase):
assert phase in ['train', 'val', 'test']
inputs ['phase'] = phase
if (phase == 'train'):
self.model.train()
if self.timers: self.timers.tic('forward pass')
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
if self.timers: self.timers.toc('forward pass')
if self.timers: self.timers.tic('compute loss')
loss_info = self.loss( data)
if self.timers: self.timers.toc('compute loss')
if self.timers: self.timers.tic('backprop')
loss_info['loss'].backward()
if self.timers: self.timers.toc('backprop')
else:
self.model.eval()
with torch.no_grad():
data = self.model(inputs, timers=self.timers) # [N1, C1], [N2, C2]
loss_info = self.loss(data)
return loss_info
def inference_one_epoch(self, epoch, phase):
gc.collect()
assert phase in ['train', 'val', 'test']
# init stats meter
stats_meter = None # self.stats_meter()
num_iter = int(len(self.loader[phase].dataset) // self.loader[phase].batch_size) # drop last incomplete batch
c_loader_iter = self.loader[phase].__iter__()
self.optimizer.zero_grad()
for c_iter in tqdm(range(num_iter)): # loop through this epoch
if self.timers: self.timers.tic('one_iteration')
if self.timers: self.timers.tic('load batch')
inputs = c_loader_iter.next()
# for gpu_div_i, _ in enumerate(inputs):
for k, v in inputs.items():
if type(v) == list:
inputs [k] = [item.to(self.device) for item in v]
elif type(v) in [ dict, float, type(None), np.ndarray]:
pass
else:
inputs [k] = v.to(self.device)
if self.timers: self.timers.toc('load batch')
if self.timers: self.timers.tic('inference_one_batch')
loss_info = self.inference_one_batch(inputs, phase)
if self.timers: self.timers.toc('inference_one_batch')
# run optimisation
# if self.timers: self.timers.tic('run optimisation')
if ((c_iter + 1) % self.iter_size == 0 and phase == 'train'):
gradient_valid = validate_gradient(self.model)
if (gradient_valid):
self.optimizer.step()
else:
self.logger.write('gradient not valid\n')
self.optimizer.zero_grad()
# if self.timers: self.timers.toc('run optimisation')
torch.cuda.empty_cache()
if stats_meter is None:
stats_meter = dict()
for key, _ in loss_info.items():
stats_meter[key] = AverageMeter()
for key, value in loss_info.items():
stats_meter[key].update(value)
if phase == 'train' :
if (c_iter + 1) % self.verbose_freq == 0 and self.verbose :
curr_iter = num_iter * (epoch - 1) + c_iter
for key, value in stats_meter.items():
self.summary_writer.add_scalar(f'{phase}/{key}', value.avg, curr_iter)
dump_mess=True
if dump_mess:
message = f'{phase} Epoch: {epoch} [{c_iter + 1:4d}/{num_iter}]'
for key, value in stats_meter.items():
message += f'{key}: {value.avg:.2f}\t'
self.logger.write(message + '\n')
if self.timers: self.timers.toc('one_iteration')
# report evaluation score at end of each epoch
if phase in ['val', 'test']:
for key, value in stats_meter.items():
self.summary_writer.add_scalar(f'{phase}/{key}', value.avg, epoch)
message = f'{phase} Epoch: {epoch}'
for key, value in stats_meter.items():
message += f'{key}: {value.avg:.2f}\t'
self.logger.write(message + '\n')
return stats_meter
def train(self):
print('start training...')
for epoch in range(self.start_epoch, self.max_epoch):
with torch.autograd.set_detect_anomaly(True):
if self.timers: self.timers.tic('run one epoch')
stats_meter = self.inference_one_epoch(epoch, 'train')
if self.timers: self.timers.toc('run one epoch')
self.scheduler.step()
if 'overfit' in self.config.exp_dir :
if stats_meter['loss'].avg < self.best_loss:
self.best_loss = stats_meter['loss'].avg
self._snapshot(epoch, 'best_loss')
if self.timers: self.timers.print()
else : # no validation step for overfitting
if self.config.do_valid:
stats_meter = self.inference_one_epoch(epoch, 'val')
if stats_meter['loss'].avg < self.best_loss:
self.best_loss = stats_meter['loss'].avg
self._snapshot(epoch, 'best_loss')
if self.timers: self.timers.print()
# finish all epoch
print("Training finish!") | 8,861 | 34.590361 | 117 | py |
lepard | lepard-main/kernels/kernel_points.py |
# | Kernel Point Convolutions |
# Functions handling the disposition of kernel points.
# Hugues THOMAS - 11/06/2018
import time
import numpy as np
from os import makedirs
from os.path import join, exists
from lib.ply import read_ply, write_ply
# Functions
# \***************/
def create_3D_rotations(axis, angle):
"""
Create rotation matrices from a list of axes and angles. Code from wikipedia on quaternions
:param axis: float32[N, 3]
:param angle: float32[N,]
:return: float32[N, 3, 3]
"""
t1 = np.cos(angle)
t2 = 1 - t1
t3 = axis[:, 0] * axis[:, 0]
t6 = t2 * axis[:, 0]
t7 = t6 * axis[:, 1]
t8 = np.sin(angle)
t9 = t8 * axis[:, 2]
t11 = t6 * axis[:, 2]
t12 = t8 * axis[:, 1]
t15 = axis[:, 1] * axis[:, 1]
t19 = t2 * axis[:, 1] * axis[:, 2]
t20 = t8 * axis[:, 0]
t24 = axis[:, 2] * axis[:, 2]
R = np.stack([t1 + t2 * t3,
t7 - t9,
t11 + t12,
t7 + t9,
t1 + t2 * t15,
t19 - t20,
t11 - t12,
t19 + t20,
t1 + t2 * t24], axis=1)
return np.reshape(R, (-1, 3, 3))
def spherical_Lloyd(radius, num_cells, dimension=3, fixed='center', approximation='monte-carlo',
approx_n=5000, max_iter=500, momentum=0.9, verbose=0):
"""
Creation of kernel point via Lloyd algorithm. We use an approximation of the algorithm, and compute the Voronoi
cell centers with discretization of space. The exact formula is not trivial with part of the sphere as sides.
:param radius: Radius of the kernels
:param num_cells: Number of cell (kernel points) in the Voronoi diagram.
:param dimension: dimension of the space
:param fixed: fix position of certain kernel points ('none', 'center' or 'verticals')
:param approximation: Approximation method for Lloyd's algorithm ('discretization', 'monte-carlo')
:param approx_n: Number of point used for approximation.
:param max_iter: Maximum nu;ber of iteration for the algorithm.
:param momentum: Momentum of the low pass filter smoothing kernel point positions
:param verbose: display option
:return: points [num_kernels, num_points, dimension]
"""
# Parameters definition
# Radius used for optimization (points are rescaled afterwards)
radius0 = 1.0
# Kernel initialization
# Random kernel points (Uniform distribution in a sphere)
kernel_points = np.zeros((0, dimension))
while kernel_points.shape[0] < num_cells:
new_points = np.random.rand(num_cells, dimension) * 2 * radius0 - radius0
kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[np.logical_and(d2 < radius0 ** 2, (0.9 * radius0) ** 2 < d2), :]
kernel_points = kernel_points[:num_cells, :].reshape((num_cells, -1))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[:3, :] *= 0
kernel_points[1, -1] += 2 * radius0 / 3
kernel_points[2, -1] -= 2 * radius0 / 3
# Approximation initialization
# Initialize figure
if verbose > 1:
fig = plt.figure()
# Initialize discretization in this method is chosen
if approximation == 'discretization':
side_n = int(np.floor(approx_n ** (1. / dimension)))
dl = 2 * radius0 / side_n
coords = np.arange(-radius0 + dl/2, radius0, dl)
if dimension == 2:
x, y = np.meshgrid(coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y))).T
elif dimension == 3:
x, y, z = np.meshgrid(coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z))).T
elif dimension == 4:
x, y, z, t = np.meshgrid(coords, coords, coords, coords)
X = np.vstack((np.ravel(x), np.ravel(y), np.ravel(z), np.ravel(t))).T
else:
raise ValueError('Unsupported dimension (max is 4)')
elif approximation == 'monte-carlo':
X = np.zeros((0, dimension))
else:
raise ValueError('Wrong approximation method chosen: "{:s}"'.format(approximation))
# Only points inside the sphere are used
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
# Kernel optimization
# Warning if at least one kernel point has no cell
warning = False
# moving vectors of kernel points saved to detect convergence
max_moves = np.zeros((0,))
for iter in range(max_iter):
# In the case of monte-carlo, renew the sampled points
if approximation == 'monte-carlo':
X = np.random.rand(approx_n, dimension) * 2 * radius0 - radius0
d2 = np.sum(np.power(X, 2), axis=1)
X = X[d2 < radius0 * radius0, :]
# Get the distances matrix [n_approx, K, dim]
differences = np.expand_dims(X, 1) - kernel_points
sq_distances = np.sum(np.square(differences), axis=2)
# Compute cell centers
cell_inds = np.argmin(sq_distances, axis=1)
centers = []
for c in range(num_cells):
bool_c = (cell_inds == c)
num_c = np.sum(bool_c.astype(np.int32))
if num_c > 0:
centers.append(np.sum(X[bool_c, :], axis=0) / num_c)
else:
warning = True
centers.append(kernel_points[c])
# Update kernel points with low pass filter to smooth mote carlo
centers = np.vstack(centers)
moves = (1 - momentum) * (centers - kernel_points)
kernel_points += moves
# Check moves for convergence
max_moves = np.append(max_moves, np.max(np.linalg.norm(moves, axis=1)))
# Optional fixing
if fixed == 'center':
kernel_points[0, :] *= 0
if fixed == 'verticals':
kernel_points[0, :] *= 0
kernel_points[:3, :-1] *= 0
if verbose:
print('iter {:5d} / max move = {:f}'.format(iter, np.max(np.linalg.norm(moves, axis=1))))
if warning:
print('{:}WARNING: at least one point has no cell{:}'.format(bcolors.WARNING, bcolors.ENDC))
if verbose > 1:
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0,
marker='.', cmap=plt.get_cmap('tab20'))
#plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
plt.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_ylim((-radius0 * 1.1, radius0 * 1.1))
fig.axes[0].set_aspect('equal')
plt.draw()
plt.pause(0.001)
plt.show(block=False)
# User verification
# Show the convergence to ask user if this kernel is correct
if verbose:
if dimension == 2:
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[10.4, 4.8])
ax1.plot(max_moves)
ax2.scatter(X[:, 0], X[:, 1], c=cell_inds, s=20.0,
marker='.', cmap=plt.get_cmap('tab20'))
# plt.scatter(kernel_points[:, 0], kernel_points[:, 1], c=np.arange(num_cells), s=100.0,
# marker='+', cmap=plt.get_cmap('tab20'))
ax2.plot(kernel_points[:, 0], kernel_points[:, 1], 'k+')
circle = plt.Circle((0, 0), radius0, color='r', fill=False)
ax2.add_artist(circle)
ax2.set_xlim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_ylim((-radius0 * 1.1, radius0 * 1.1))
ax2.set_aspect('equal')
plt.title('Check if kernel is correct.')
plt.draw()
plt.show()
if dimension > 2:
plt.figure()
plt.plot(max_moves)
plt.title('Check if kernel is correct.')
plt.show()
# Rescale kernels with real radius
return kernel_points * radius
def kernel_point_optimization_debug(radius, num_points, num_kernels=1, dimension=3,
fixed='center', ratio=0.66, verbose=0):
"""
Creation of kernel point via optimization of potentials.
:param radius: Radius of the kernels
:param num_points: points composing kernels
:param num_kernels: number of wanted kernels
:param dimension: dimension of the space
:param fixed: fix position of certain kernel points ('none', 'center' or 'verticals')
:param ratio: ratio of the radius where you want the kernels points to be placed
:param verbose: display option
:return: points [num_kernels, num_points, dimension]
"""
# Parameters definition
# Radius used for optimization (points are rescaled afterwards)
radius0 = 1
diameter0 = 2
# Factor multiplicating gradients for moving points (~learning rate)
moving_factor = 1e-2
continuous_moving_decay = 0.9995
# Gradient threshold to stop optimization
thresh = 1e-5
# Gradient clipping value
clip = 0.05 * radius0
# Kernel initialization
# Random kernel points
kernel_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
while (kernel_points.shape[0] < num_kernels * num_points):
new_points = np.random.rand(num_kernels * num_points - 1, dimension) * diameter0 - radius0
kernel_points = np.vstack((kernel_points, new_points))
d2 = np.sum(np.power(kernel_points, 2), axis=1)
kernel_points = kernel_points[d2 < 0.5 * radius0 * radius0, :]
kernel_points = kernel_points[:num_kernels * num_points, :].reshape((num_kernels, num_points, -1))
# Optionnal fixing
if fixed == 'center':
kernel_points[:, 0, :] *= 0
if fixed == 'verticals':
kernel_points[:, :3, :] *= 0
kernel_points[:, 1, -1] += 2 * radius0 / 3
kernel_points[:, 2, -1] -= 2 * radius0 / 3
# Kernel optimization
# Initialize figure
if verbose>1:
fig = plt.figure()
saved_gradient_norms = np.zeros((10000, num_kernels))
old_gradient_norms = np.zeros((num_kernels, num_points))
for iter in range(10000):
# Compute gradients
# *****************
# Derivative of the sum of potentials of all points
A = np.expand_dims(kernel_points, axis=2)
B = np.expand_dims(kernel_points, axis=1)
interd2 = np.sum(np.power(A - B, 2), axis=-1)
inter_grads = (A - B) / (np.power(np.expand_dims(interd2, -1), 3/2) + 1e-6)
inter_grads = np.sum(inter_grads, axis=1)
# Derivative of the radius potential
circle_grads = 10*kernel_points
# All gradients
gradients = inter_grads + circle_grads
if fixed == 'verticals':
gradients[:, 1:3, :-1] = 0
# Stop condition
# **************
# Compute norm of gradients
gradients_norms = np.sqrt(np.sum(np.power(gradients, 2), axis=-1))
saved_gradient_norms[iter, :] = np.max(gradients_norms, axis=1)
# Stop if all moving points are gradients fixed (low gradients diff)
if fixed == 'center' and np.max(np.abs(old_gradient_norms[:, 1:] - gradients_norms[:, 1:])) < thresh:
break
elif fixed == 'verticals' and np.max(np.abs(old_gradient_norms[:, 3:] - gradients_norms[:, 3:])) < thresh:
break
elif np.max(np.abs(old_gradient_norms - gradients_norms)) < thresh:
break
old_gradient_norms = gradients_norms
# Move points
# ***********
# Clip gradient to get moving dists
moving_dists = np.minimum(moving_factor * gradients_norms, clip)
# Fix central point
if fixed == 'center':
moving_dists[:, 0] = 0
if fixed == 'verticals':
moving_dists[:, 0] = 0
# Move points
kernel_points -= np.expand_dims(moving_dists, -1) * gradients / np.expand_dims(gradients_norms + 1e-6, -1)
if verbose:
print('iter {:5d} / max grad = {:f}'.format(iter, np.max(gradients_norms[:, 3:])))
if verbose > 1:
plt.clf()
plt.plot(kernel_points[0, :, 0], kernel_points[0, :, 1], '.')
circle = plt.Circle((0, 0), radius, color='r', fill=False)
fig.axes[0].add_artist(circle)
fig.axes[0].set_xlim((-radius*1.1, radius*1.1))
fig.axes[0].set_ylim((-radius*1.1, radius*1.1))
fig.axes[0].set_aspect('equal')
plt.draw()
plt.pause(0.001)
plt.show(block=False)
print(moving_factor)
# moving factor decay
moving_factor *= continuous_moving_decay
# Rescale radius to fit the wanted ratio of radius
r = np.sqrt(np.sum(np.power(kernel_points, 2), axis=-1))
kernel_points *= ratio / np.mean(r[:, 1:])
# Rescale kernels with real radius
return kernel_points * radius, saved_gradient_norms
def load_kernels(radius, num_kpoints, dimension, fixed, lloyd=False):
# Kernel directory
kernel_dir = 'kernels/dispositions'
if not exists(kernel_dir):
makedirs(kernel_dir)
# To many points switch to Lloyds
if num_kpoints > 30:
lloyd = True
# Kernel_file
kernel_file = join(kernel_dir, 'k_{:03d}_{:s}_{:d}D.ply'.format(num_kpoints, fixed, dimension))
# Check if already done
if not exists(kernel_file):
if lloyd:
# Create kernels
kernel_points = spherical_Lloyd(1.0,
num_kpoints,
dimension=dimension,
fixed=fixed,
verbose=0)
else:
# Create kernels
kernel_points, grad_norms = kernel_point_optimization_debug(1.0,
num_kpoints,
num_kernels=100,
dimension=dimension,
fixed=fixed,
verbose=0)
# Find best candidate
best_k = np.argmin(grad_norms[-1, :])
# Save points
kernel_points = kernel_points[best_k, :, :]
write_ply(kernel_file, kernel_points, ['x', 'y', 'z'])
else:
data = read_ply(kernel_file)
kernel_points = np.vstack((data['x'], data['y'], data['z'])).T
# Random roations for the kernel
# N.B. 4D random rotations not supported yet
R = np.eye(dimension)
theta = np.random.rand() * 2 * np.pi
if dimension == 2:
if fixed != 'vertical':
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s], [s, c]], dtype=np.float32)
elif dimension == 3:
if fixed != 'vertical':
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]], dtype=np.float32)
else:
phi = (np.random.rand() - 0.5) * np.pi
# Create the first vector in carthesian coordinates
u = np.array([np.cos(theta) * np.cos(phi), np.sin(theta) * np.cos(phi), np.sin(phi)])
# Choose a random rotation angle
alpha = np.random.rand() * 2 * np.pi
# Create the rotation matrix with this vector and angle
R = create_3D_rotations(np.reshape(u, (1, -1)), np.reshape(alpha, (1, -1)))[0]
R = R.astype(np.float32)
# Add a small noise
kernel_points = kernel_points + np.random.normal(scale=0.01, size=kernel_points.shape)
# Scale kernels
kernel_points = radius * kernel_points
# Rotate kernels
kernel_points = np.matmul(kernel_points, R)
return kernel_points.astype(np.float32)
| 17,189 | 35.496815 | 120 | py |
sngan.pytorch | sngan.pytorch-master/functions.py | # @Date : 2019-07-25
# @Link : None
# @Version : 0.0
import os
import numpy as np
import torch
import torch.nn as nn
from torchvision.utils import make_grid
from imageio import imsave
from tqdm import tqdm
from copy import deepcopy
import logging
from utils.inception_score import get_inception_score
from utils.fid_score import calculate_fid_given_paths
logger = logging.getLogger(__name__)
def train(args, gen_net: nn.Module, dis_net: nn.Module, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch,
writer_dict, schedulers=None):
writer = writer_dict['writer']
gen_step = 0
# train mode
gen_net = gen_net.train()
dis_net = dis_net.train()
for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
global_steps = writer_dict['train_global_steps']
# Adversarial ground truths
real_imgs = imgs.type(torch.cuda.FloatTensor)
# Sample noise as generator input
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
# Train Discriminator
dis_optimizer.zero_grad()
real_validity = dis_net(real_imgs)
fake_imgs = gen_net(z).detach()
assert fake_imgs.size() == real_imgs.size()
fake_validity = dis_net(fake_imgs)
# cal loss
d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - real_validity)) + \
torch.mean(nn.ReLU(inplace=True)(1 + fake_validity))
d_loss.backward()
dis_optimizer.step()
writer.add_scalar('d_loss', d_loss.item(), global_steps)
# Train Generator
if global_steps % args.n_critic == 0:
gen_optimizer.zero_grad()
gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
gen_imgs = gen_net(gen_z)
fake_validity = dis_net(gen_imgs)
# cal loss
g_loss = -torch.mean(fake_validity)
g_loss.backward()
gen_optimizer.step()
# adjust learning rate
if schedulers:
gen_scheduler, dis_scheduler = schedulers
g_lr = gen_scheduler.step(global_steps)
d_lr = dis_scheduler.step(global_steps)
writer.add_scalar('LR/g_lr', g_lr, global_steps)
writer.add_scalar('LR/d_lr', d_lr, global_steps)
# moving average weight
for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
avg_p.mul_(0.999).add_(0.001, p.data)
writer.add_scalar('g_loss', g_loss.item(), global_steps)
gen_step += 1
# verbose
if gen_step and iter_idx % args.print_freq == 0:
tqdm.write(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
(epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))
writer_dict['train_global_steps'] = global_steps + 1
def validate(args, fixed_z, fid_stat, gen_net: nn.Module, writer_dict):
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
# eval mode
gen_net = gen_net.eval()
# generate images
sample_imgs = gen_net(fixed_z)
img_grid = make_grid(sample_imgs, nrow=5, normalize=True, scale_each=True)
# get fid and inception score
fid_buffer_dir = os.path.join(args.path_helper['sample_path'], 'fid_buffer')
os.makedirs(fid_buffer_dir)
eval_iter = args.num_eval_imgs // args.eval_batch_size
img_list = list()
for iter_idx in tqdm(range(eval_iter), desc='sample images'):
z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim)))
# Generate a batch of images
gen_imgs = gen_net(z).mul_(127.5).add_(127.5).clamp_(0.0, 255.0).permute(0, 2, 3, 1).to('cpu', torch.uint8).numpy()
for img_idx, img in enumerate(gen_imgs):
file_name = os.path.join(fid_buffer_dir, f'iter{iter_idx}_b{img_idx}.png')
imsave(file_name, img)
img_list.extend(list(gen_imgs))
# get inception score
logger.info('=> calculate inception score')
mean, std = get_inception_score(img_list)
# get fid score
logger.info('=> calculate fid score')
fid_score = calculate_fid_given_paths([fid_buffer_dir, fid_stat], inception_path=None)
os.system('rm -r {}'.format(fid_buffer_dir))
writer.add_image('sampled_images', img_grid, global_steps)
writer.add_scalar('Inception_score/mean', mean, global_steps)
writer.add_scalar('Inception_score/std', std, global_steps)
writer.add_scalar('FID_score', fid_score, global_steps)
writer_dict['valid_global_steps'] = global_steps + 1
return mean, fid_score
class LinearLrDecay(object):
def __init__(self, optimizer, start_lr, end_lr, decay_start_step, decay_end_step):
assert start_lr > end_lr
self.optimizer = optimizer
self.delta = (start_lr - end_lr) / (decay_end_step - decay_start_step)
self.decay_start_step = decay_start_step
self.decay_end_step = decay_end_step
self.start_lr = start_lr
self.end_lr = end_lr
def step(self, current_step):
if current_step <= self.decay_start_step:
lr = self.start_lr
elif current_step >= self.decay_end_step:
lr = self.end_lr
else:
lr = self.start_lr - self.delta * (current_step - self.decay_start_step)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
| 6,022 | 32.837079 | 123 | py |
sngan.pytorch | sngan.pytorch-master/cfg.py | # @Date : 2019-07-25
# @Link : None
# @Version : 0.0
import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--max_epoch',
type=int,
default=200,
help='number of epochs of training')
parser.add_argument(
'--max_iter',
type=int,
default=None,
help='set the max iteration number')
parser.add_argument(
'-gen_bs',
'--gen_batch_size',
type=int,
default=64,
help='size of the batches')
parser.add_argument(
'-dis_bs',
'--dis_batch_size',
type=int,
default=64,
help='size of the batches')
parser.add_argument(
'--g_lr',
type=float,
default=0.0002,
help='adam: gen learning rate')
parser.add_argument(
'--d_lr',
type=float,
default=0.0002,
help='adam: disc learning rate')
parser.add_argument(
'--lr_decay',
action='store_true',
help='learning rate decay or not')
parser.add_argument(
'--beta1',
type=float,
default=0.0,
help='adam: decay of first order momentum of gradient')
parser.add_argument(
'--beta2',
type=float,
default=0.9,
help='adam: decay of first order momentum of gradient')
parser.add_argument(
'--num_workers',
type=int,
default=8,
help='number of cpu threads to use during batch generation')
parser.add_argument(
'--latent_dim',
type=int,
default=128,
help='dimensionality of the latent space')
parser.add_argument(
'--img_size',
type=int,
default=32,
help='size of each image dimension')
parser.add_argument(
'--channels',
type=int,
default=3,
help='number of image channels')
parser.add_argument(
'--n_critic',
type=int,
default=1,
help='number of training steps for discriminator per iter')
parser.add_argument(
'--val_freq',
type=int,
default=20,
help='interval between each validation')
parser.add_argument(
'--print_freq',
type=int,
default=50,
help='interval between each verbose')
parser.add_argument(
'--load_path',
type=str,
help='The reload model path')
parser.add_argument(
'--exp_name',
type=str,
help='The name of exp')
parser.add_argument(
'--d_spectral_norm',
type=str2bool,
default=False,
help='add spectral_norm on discriminator?')
parser.add_argument(
'--g_spectral_norm',
type=str2bool,
default=False,
help='add spectral_norm on generator?')
parser.add_argument(
'--dataset',
type=str,
default='cifar10',
help='dataset type')
parser.add_argument(
'--data_path',
type=str,
default='./data',
help='The path of data set')
parser.add_argument('--init_type', type=str, default='normal',
choices=['normal', 'orth', 'xavier_uniform', 'false'],
help='The init type')
parser.add_argument('--gf_dim', type=int, default=64,
help='The base channel num of gen')
parser.add_argument('--df_dim', type=int, default=64,
help='The base channel num of disc')
parser.add_argument(
'--model',
type=str,
default='sngan_cifar10',
help='path of model')
parser.add_argument('--eval_batch_size', type=int, default=100)
parser.add_argument('--num_eval_imgs', type=int, default=50000)
parser.add_argument(
'--bottom_width',
type=int,
default=4,
help="the base resolution of the GAN")
parser.add_argument('--random_seed', type=int, default=12345)
opt = parser.parse_args()
return opt
| 4,330 | 27.30719 | 78 | py |
sngan.pytorch | sngan.pytorch-master/datasets.py | import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import Dataset
class ImageDataset(object):
def __init__(self, args):
if args.dataset.lower() == 'cifar10':
Dt = datasets.CIFAR10
transform = transforms.Compose([
transforms.Resize(args.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
args.n_classes = 10
elif args.dataset.lower() == 'stl10':
Dt = datasets.STL10
transform = transforms.Compose([
transforms.Resize(args.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
else:
raise NotImplementedError('Unknown dataset: {}'.format(args.dataset))
if args.dataset.lower() == 'stl10':
self.train = torch.utils.data.DataLoader(
Dt(root=args.data_path, split='train+unlabeled', transform=transform, download=True),
batch_size=args.dis_batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
self.valid = torch.utils.data.DataLoader(
Dt(root=args.data_path, split='test', transform=transform),
batch_size=args.dis_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
self.test = self.valid
else:
self.train = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=True, transform=transform, download=True),
batch_size=args.dis_batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
self.valid = torch.utils.data.DataLoader(
Dt(root=args.data_path, train=False, transform=transform),
batch_size=args.dis_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
self.test = self.valid
| 2,115 | 40.490196 | 101 | py |
sngan.pytorch | sngan.pytorch-master/train.py | # @Date : 2019-07-25
# @Link : None
# @Version : 0.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cfg
import models
import datasets
from functions import train, validate, LinearLrDecay, load_params, copy_params
from utils.utils import set_log_dir, save_checkpoint, create_logger
from utils.inception_score import _init_inception
from utils.fid_score import create_inception_graph, check_or_download_inception
import torch
import os
import numpy as np
import torch.nn as nn
from tensorboardX import SummaryWriter
from tqdm import tqdm
from copy import deepcopy
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def main():
args = cfg.parse_args()
torch.cuda.manual_seed(args.random_seed)
# set tf env
_init_inception()
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
# import network
gen_net = eval('models.'+args.model+'.Generator')(args=args).cuda()
dis_net = eval('models.'+args.model+'.Discriminator')(args=args).cuda()
# weight init
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
if args.init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif args.init_type == 'orth':
nn.init.orthogonal_(m.weight.data)
elif args.init_type == 'xavier_uniform':
nn.init.xavier_uniform(m.weight.data, 1.)
else:
raise NotImplementedError('{} unknown inital type'.format(args.init_type))
elif classname.find('BatchNorm2d') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0.0)
gen_net.apply(weights_init)
dis_net.apply(weights_init)
# set optimizer
gen_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, gen_net.parameters()),
args.g_lr, (args.beta1, args.beta2))
dis_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, dis_net.parameters()),
args.d_lr, (args.beta1, args.beta2))
gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, args.max_iter * args.n_critic)
dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, args.max_iter * args.n_critic)
# set up data_loader
dataset = datasets.ImageDataset(args)
train_loader = dataset.train
# fid stat
if args.dataset.lower() == 'cifar10':
fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
elif args.dataset.lower() == 'stl10':
fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz'
else:
raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
assert os.path.exists(fid_stat)
# epoch number for dis_net
args.max_epoch = args.max_epoch * args.n_critic
if args.max_iter:
args.max_epoch = np.ceil(args.max_iter * args.n_critic / len(train_loader))
# initial
fixed_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (25, args.latent_dim)))
gen_avg_param = copy_params(gen_net)
start_epoch = 0
best_fid = 1e4
# set writer
if args.load_path:
print(f'=> resuming from {args.load_path}')
assert os.path.exists(args.load_path)
checkpoint_file = os.path.join(args.load_path, 'Model', 'checkpoint.pth')
assert os.path.exists(checkpoint_file)
checkpoint = torch.load(checkpoint_file)
start_epoch = checkpoint['epoch']
best_fid = checkpoint['best_fid']
gen_net.load_state_dict(checkpoint['gen_state_dict'])
dis_net.load_state_dict(checkpoint['dis_state_dict'])
gen_optimizer.load_state_dict(checkpoint['gen_optimizer'])
dis_optimizer.load_state_dict(checkpoint['dis_optimizer'])
avg_gen_net = deepcopy(gen_net)
avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
gen_avg_param = copy_params(avg_gen_net)
del avg_gen_net
args.path_helper = checkpoint['path_helper']
logger = create_logger(args.path_helper['log_path'])
logger.info(f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})')
else:
# create new log dir
assert args.exp_name
args.path_helper = set_log_dir('logs', args.exp_name)
logger = create_logger(args.path_helper['log_path'])
logger.info(args)
writer_dict = {
'writer': SummaryWriter(args.path_helper['log_path']),
'train_global_steps': start_epoch * len(train_loader),
'valid_global_steps': start_epoch // args.val_freq,
}
# train loop
lr_schedulers = (gen_scheduler, dis_scheduler) if args.lr_decay else None
for epoch in tqdm(range(int(start_epoch), int(args.max_epoch)), desc='total progress'):
train(args, gen_net, dis_net, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch, writer_dict,
lr_schedulers)
if epoch and epoch % args.val_freq == 0 or epoch == int(args.max_epoch)-1:
backup_param = copy_params(gen_net)
load_params(gen_net, gen_avg_param)
inception_score, fid_score = validate(args, fixed_z, fid_stat, gen_net, writer_dict)
logger.info(f'Inception score: {inception_score}, FID score: {fid_score} || @ epoch {epoch}.')
load_params(gen_net, backup_param)
if fid_score < best_fid:
best_fid = fid_score
is_best = True
else:
is_best = False
else:
is_best = False
avg_gen_net = deepcopy(gen_net)
load_params(avg_gen_net, gen_avg_param)
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'gen_state_dict': gen_net.state_dict(),
'dis_state_dict': dis_net.state_dict(),
'avg_gen_state_dict': avg_gen_net.state_dict(),
'gen_optimizer': gen_optimizer.state_dict(),
'dis_optimizer': dis_optimizer.state_dict(),
'best_fid': best_fid,
'path_helper': args.path_helper
}, is_best, args.path_helper['ckpt_path'])
del avg_gen_net
if __name__ == '__main__':
main()
| 6,393 | 37.287425 | 116 | py |
sngan.pytorch | sngan.pytorch-master/models/sngan_64.py | import torch.nn as nn
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), upsample=False, n_classes=0):
super(GenBlock, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
self.b1 = nn.BatchNorm2d(in_channels)
self.b2 = nn.BatchNorm2d(hidden_channels)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def upsample_conv(self, x, conv):
return conv(nn.UpsamplingNearest2d(scale_factor=2)(x))
def residual(self, x):
h = x
h = self.b1(h)
h = self.activation(h)
h = self.upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(self.ch)
self.c5 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.ch = args.df_dim
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.block4 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.l5 = nn.Linear(self.ch, 1, bias=False)
if args.d_spectral_norm:
self.l5 = nn.utils.spectral_norm(self.l5)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# Global average pooling
h = h.sum(2).sum(2)
output = self.l5(h)
return output
| 6,296 | 34.778409 | 107 | py |
sngan.pytorch | sngan.pytorch-master/models/sngan_stl10.py | import torch.nn as nn
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), upsample=False, n_classes=0):
super(GenBlock, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
self.b1 = nn.BatchNorm2d(in_channels)
self.b2 = nn.BatchNorm2d(hidden_channels)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def upsample_conv(self, x, conv):
return conv(nn.UpsamplingNearest2d(scale_factor=2)(x))
def residual(self, x):
h = x
h = self.b1(h)
h = self.activation(h)
h = self.upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = 512
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(512, 256, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(256, 128, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(128, 64, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(64)
self.c5 = nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, 64)
self.block2 = DisBlock(args, 64, 128, activation=activation, downsample=True)
self.block3 = DisBlock(args, 128, 256, activation=activation, downsample=True)
self.block4 = DisBlock(args, 256, 512, activation=activation, downsample=True)
self.block5 = DisBlock(args, 512, 1024, activation=activation, downsample=False)
self.l6 = nn.Linear(1024, 1, bias=False)
if args.d_spectral_norm:
self.l6 = nn.utils.spectral_norm(self.l6)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.block5(h)
h = self.activation(h)
# Global average pooling
h = h.sum(2).sum(2)
output = self.l6(h)
return output
| 6,305 | 34.426966 | 99 | py |
sngan.pytorch | sngan.pytorch-master/models/sngan_cifar10.py | import torch.nn as nn
from .gen_resblock import GenBlock
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(self.ch)
self.c5 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU()):
super(Discriminator, self).__init__()
self.ch = args.df_dim
self.activation = activation
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.block4 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.l5 = nn.Linear(self.ch, 1, bias=False)
if args.d_spectral_norm:
self.l5 = nn.utils.spectral_norm(self.l5)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# Global average pooling
h = h.sum(2).sum(2)
output = self.l5(h)
return output
| 4,805 | 34.338235 | 107 | py |
sngan.pytorch | sngan.pytorch-master/models/gen_resblock.py | # @Date : 3/26/20
# @Link : None
# @Version : 0.0
import torch.nn as nn
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), upsample=False, n_classes=0):
super(GenBlock, self).__init__()
self.activation = activation
self.upsample = upsample
self.learnable_sc = in_channels != out_channels or upsample
hidden_channels = out_channels if hidden_channels is None else hidden_channels
self.n_classes = n_classes
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
self.b1 = nn.BatchNorm2d(in_channels)
self.b2 = nn.BatchNorm2d(hidden_channels)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
def upsample_conv(self, x, conv):
return conv(nn.UpsamplingNearest2d(scale_factor=2)(x))
def residual(self, x):
h = x
h = self.b1(h)
h = self.activation(h)
h = self.upsample_conv(h, self.c1) if self.upsample else self.c1(h)
h = self.b2(h)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.upsample_conv(x, self.c_sc) if self.upsample else self.c_sc(x)
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x) | 1,671 | 33.833333 | 90 | py |
sngan.pytorch | sngan.pytorch-master/utils/cal_fid_stat.py | # @Date : 2019-07-26
# @Link : None
# @Version : 0.0
import os
import glob
import argparse
import numpy as np
from imageio import imread
import tensorflow as tf
import utils.fid_score as fid
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
required=True,
help='set path to training set jpg images dir')
parser.add_argument(
'--output_file',
type=str,
default='fid_stat/fid_stats_cifar10_train.npz',
help='path for where to store the statistics')
opt = parser.parse_args()
print(opt)
return opt
def main():
args = parse_args()
# PATHS
data_path = args.data_path
output_path = args.output_file
# if you have downloaded and extracted
# http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
# set this path to the directory where the extracted files are, otherwise
# just set it to None and the script will later download the files for you
inception_path = None
print("check for inception model..", end=" ", flush=True)
inception_path = fid.check_or_download_inception(inception_path) # download inception if necessary
print("ok")
# loads all images into memory (this might require a lot of RAM!)
print("load images..", end=" ", flush=True)
image_list = glob.glob(os.path.join(data_path, '*.jpg'))
images = np.array([imread(str(fn)).astype(np.float32) for fn in image_list])
print("%d images found and loaded" % len(images))
print("create inception graph..", end=" ", flush=True)
fid.create_inception_graph(inception_path) # load the graph into the current TF graph
print("ok")
print("calculte FID stats..", end=" ", flush=True)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
mu, sigma = fid.calculate_activation_statistics(images, sess, batch_size=100)
np.savez_compressed(output_path, mu=mu, sigma=sigma)
print("finished")
if __name__ == '__main__':
main()
| 2,264 | 29.2 | 103 | py |
sngan.pytorch | sngan.pytorch-master/utils/utils.py | # @Date : 2019-07-25
# @Link : None
# @Version : 0.0
import os
import torch
import dateutil.tz
from datetime import datetime
import time
import logging
def create_logger(log_dir, phase='train'):
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}.log'.format(time_str, phase)
final_log_file = os.path.join(log_dir, log_file)
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
return logger
def set_log_dir(root_dir, exp_name):
path_dict = {}
os.makedirs(root_dir, exist_ok=True)
# set log path
exp_path = os.path.join(root_dir, exp_name)
now = datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
prefix = exp_path + '_' + timestamp
os.makedirs(prefix)
path_dict['prefix'] = prefix
# set checkpoint path
ckpt_path = os.path.join(prefix, 'Model')
os.makedirs(ckpt_path)
path_dict['ckpt_path'] = ckpt_path
log_path = os.path.join(prefix, 'Log')
os.makedirs(log_path)
path_dict['log_path'] = log_path
# set sample image path for fid calculation
sample_path = os.path.join(prefix, 'Samples')
os.makedirs(sample_path)
path_dict['sample_path'] = sample_path
return path_dict
def save_checkpoint(states, is_best, output_dir,
filename='checkpoint.pth'):
torch.save(states, os.path.join(output_dir, filename))
if is_best:
torch.save(states, os.path.join(output_dir, 'checkpoint_best.pth'))
| 1,772 | 26.703125 | 75 | py |
sngan.pytorch | sngan.pytorch-master/utils/inception_score.py | # Code derived from tensorflow/tensorflow/models/image/imagenet/classify_image.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tqdm import tqdm
import os.path
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import math
import sys
MODEL_DIR = '/tmp/imagenet'
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
softmax = None
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Call this function with list of images. Each of elements should be a
# numpy array with values ranging from 0 to 255.
def get_inception_score(images, splits=10):
assert (type(images) == list)
assert (type(images[0]) == np.ndarray)
assert (len(images[0].shape) == 3)
assert (np.max(images[0]) > 10)
assert (np.min(images[0]) >= 0.0)
inps = []
for img in images:
img = img.astype(np.float32)
inps.append(np.expand_dims(img, 0))
bs = 100
with tf.Session(config=config) as sess:
preds = []
n_batches = int(math.ceil(float(len(inps)) / float(bs)))
for i in tqdm(range(n_batches), desc="Calculate inception score"):
sys.stdout.flush()
inp = inps[(i * bs):min((i + 1) * bs, len(inps))]
inp = np.concatenate(inp, 0)
pred = sess.run(softmax, {'ExpandDims:0': inp})
preds.append(pred)
preds = np.concatenate(preds, 0)
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
sess.close()
return np.mean(scores), np.std(scores)
# This function is called automatically.
def _init_inception():
global softmax
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(MODEL_DIR, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(MODEL_DIR)
with tf.gfile.FastGFile(os.path.join(
MODEL_DIR, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
# Works with an arbitrary minibatch size.
with tf.Session(config=config) as sess:
pool3 = sess.graph.get_tensor_by_name('pool_3:0')
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims != []:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__['_shape_val'] = tf.TensorShape(new_shape)
w = sess.graph.get_operation_by_name("softmax/logits/MatMul").inputs[1]
logits = tf.matmul(tf.squeeze(pool3, [1, 2]), w)
softmax = tf.nn.softmax(logits)
sess.close()
| 3,818 | 36.07767 | 96 | py |
sngan.pytorch | sngan.pytorch-master/utils/fid_score.py | """ Calculates the Frechet Inception Distance (FID) to evaluate GANs.
The FID metric calculates the distance between two distributions of images.
Typically, we have summary statistics (mean & covariance matrix) of one
of these distributions, while the 2nd distribution is given by a GAN.
When run as a stand-alone program, it compares the distribution of
images that are stored as PNG/JPEG at a specified location with a
distribution given by summary statistics (in pickle format).
The FID is calculated by assuming that X_1 and X_2 are the activations of
the pool_3 layer of the inception net for generated samples and real world
samples respectively.
See --help to see further details.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import tensorflow as tf
from imageio import imread
from scipy import linalg
import pathlib
import warnings
class InvalidFIDException(Exception):
pass
def create_inception_graph(pth):
"""Creates a graph from saved GraphDef file."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(pth, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='FID_Inception_Net')
# code for handling inception net derived from
def _get_inception_layer(sess):
"""Prepares inception net for batched usage and returns pool_3 layer. """
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims != []:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__['_shape_val'] = tf.TensorShape(new_shape)
return pool3
def get_activations(images, sess, batch_size=50, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 256.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
d0 = images.shape[0]
if batch_size > d0:
print("warning: batch size is bigger than the data size. setting batch size to data size")
batch_size = d0
n_batches = d0 // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, 2048))
for i in range(n_batches):
if verbose:
print("\rPropagating batch %d/%d" % (i + 1, n_batches), end="", flush=True)
start = i * batch_size
end = start + batch_size
batch = images[start:end]
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, -1)
if verbose:
print(" done")
return pred_arr
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean
def calculate_activation_statistics(images, sess, batch_size=50, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 255.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
# The following methods are implemented to obtain a batched version of the activations.
# This has the advantage to reduce memory requirements, at the cost of slightly reduced efficiency.
# - Pyrestone
def load_image_batch(files):
"""Convenience method for batch-loading images
Params:
-- files : list of paths to image files. Images need to have same dimensions for all files.
Returns:
-- A numpy array of dimensions (num_images,hi, wi, 3) representing the image pixel values.
"""
return np.array([imread(str(fn)).astype(np.float32) for fn in files])
def get_activations_from_files(files, sess, batch_size=50, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : list of paths to image files. Images need to have same dimensions for all files.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
d0 = len(files)
if batch_size > d0:
print("warning: batch size is bigger than the data size. setting batch size to data size")
batch_size = d0
n_batches = d0 // batch_size
n_used_imgs = n_batches * batch_size
pred_arr = np.empty((n_used_imgs, 2048))
for i in range(n_batches):
if verbose:
print("\rPropagating batch %d/%d" % (i + 1, n_batches), end="", flush=True)
start = i * batch_size
end = start + batch_size
batch = load_image_batch(files[start:end])
pred = sess.run(inception_layer, {'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, -1)
del batch # clean up memory
if verbose:
print(" done")
return pred_arr
def calculate_activation_statistics_from_files(files, sess, batch_size=50, verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- files : list of paths to image files. Images need to have same dimensions for all files.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations_from_files(files, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
# The following functions aren't needed for calculating the FID
# they're just here to make this module work as a stand-alone script
# for calculating FID scores
def check_or_download_inception(inception_path):
""" Checks if the path to the inception file is valid, or downloads
the file if it is not present. """
INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
if inception_path is None:
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = inception_path / 'classify_image_graph_def.pb'
if not model_file.exists():
print("Downloading Inception model")
from urllib import request
import tarfile
fn, _ = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file)
def _handle_path(path, sess, low_profile=False):
if path.endswith('.npz'):
f = np.load(path)
m, s = f['mu'][:], f['sigma'][:]
f.close()
else:
path = pathlib.Path(path)
files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
if low_profile:
m, s = calculate_activation_statistics_from_files(files, sess)
else:
x = np.array([imread(str(fn)).astype(np.float32) for fn in files])
m, s = calculate_activation_statistics(x, sess)
del x # clean up memory
return m, s
def calculate_fid_given_paths(paths, inception_path, low_profile=False):
""" Calculates the FID of two paths. """
# inception_path = check_or_download_inception(inception_path)
for p in paths:
if not os.path.exists(p):
raise RuntimeError("Invalid path: %s" % p)
# from utils import memory
# memory()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
m1, s1 = _handle_path(paths[0], sess, low_profile=low_profile)
m2, s2 = _handle_path(paths[1], sess, low_profile=low_profile)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
sess.close()
del m1, s1, m2, s2
return fid_value
| 13,063 | 39.196923 | 103 | py |