repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
TiKick | TiKick-main/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import os
from setuptools import setup, find_packages
import setuptools
def get_version() -> str:
# https://packaging.python.org/guides/single-sourcing-package-version/
init = open(os.path.join("tmarl", "__init__.py"), "r").read().split()
return init[init.index("__version__") + 2][1:-1]
setup(
name="tmarl", # Replace with your own username
version=get_version(),
description="marl algorithms",
long_description=open("README.md", encoding="utf8").read(),
long_description_content_type="text/markdown",
author="tmarl",
author_email="tmarl_contact@tartrl.cn",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
],
keywords="multi-agent reinforcement learning algorithms pytorch",
python_requires='>=3.6',
)
| 1,788 | 35.510204 | 74 | py |
TiKick | TiKick-main/tmarl/networks/policy_network.py |
import torch
import torch.nn as nn
from tmarl.networks.utils.util import init, check
from tmarl.networks.utils.mlp import MLPBase, MLPLayer
from tmarl.networks.utils.rnn import RNNLayer
from tmarl.networks.utils.act import ACTLayer
from tmarl.networks.utils.popart import PopArt
from tmarl.utils.util import get_shape_from_obs_space
# networks are defined here
class PolicyNetwork(nn.Module):
def __init__(self, args, obs_space, action_space, device=torch.device("cpu")):
super(PolicyNetwork, self).__init__()
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_policy_active_masks = args.use_policy_active_masks
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._use_influence_policy = args.use_influence_policy
self._influence_layer_N = args.influence_layer_N
self._use_policy_vhead = args.use_policy_vhead
self._recurrent_N = args.recurrent_N
self.tpdv = dict(dtype=torch.float32, device=device)
obs_shape = get_shape_from_obs_space(obs_space)
self._mixed_obs = False
self.base = MLPBase(args, obs_shape, use_attn_internal=False, use_cat_self=True)
input_size = self.base.output_size
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(input_size, self.hidden_size, self._recurrent_N, self._use_orthogonal)
input_size = self.hidden_size
if self._use_influence_policy:
self.mlp = MLPLayer(obs_shape[0], self.hidden_size,
self._influence_layer_N, self._use_orthogonal, self._activation_id)
input_size += self.hidden_size
self.act = ACTLayer(action_space, input_size, self._use_orthogonal, self._gain)
if self._use_policy_vhead:
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][self._use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0))
if self._use_popart:
self.v_out = init_(PopArt(input_size, 1, device=device))
else:
self.v_out = init_(nn.Linear(input_size, 1))
self.to(device)
def forward(self, obs, rnn_states, masks, available_actions=None, deterministic=False):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
actions, action_log_probs = self.act(actor_features, available_actions, deterministic)
return actions, action_log_probs, rnn_states
def evaluate_actions(self, obs, rnn_states, action, masks, available_actions=None, active_masks=None):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
action = check(action).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
if active_masks is not None:
active_masks = check(active_masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
action_log_probs, dist_entropy = self.act.evaluate_actions(actor_features, action, available_actions, active_masks = active_masks if self._use_policy_active_masks else None)
values = self.v_out(actor_features) if self._use_policy_vhead else None
return action_log_probs, dist_entropy, values
def get_policy_values(self, obs, rnn_states, masks):
if self._mixed_obs:
for key in obs.keys():
obs[key] = check(obs[key]).to(**self.tpdv)
else:
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states, masks)
if self._use_influence_policy:
mlp_obs = self.mlp(obs)
actor_features = torch.cat([actor_features, mlp_obs], dim=1)
values = self.v_out(actor_features)
return values | 5,558 | 41.113636 | 181 | py |
TiKick | TiKick-main/tmarl/networks/utils/distributions.py | import torch
import torch.nn as nn
from .util import init
"""
Modify standard PyTorch distributions so they are compatible with this code.
"""
#
# Standardize distribution interfaces
#
# Categorical
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entrop(self):
return super.entropy().sum(-1)
def mode(self):
return self.mean
# Bernoulli
class FixedBernoulli(torch.distributions.Bernoulli):
def log_probs(self, actions):
return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1)
def entropy(self):
return super().entropy().sum(-1)
def mode(self):
return torch.gt(self.probs, 0.5).float()
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Categorical, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x, available_actions=None):
x = self.linear(x)
if available_actions is not None:
x[available_actions == 0] = -1e10
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(DiagGaussian, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
class Bernoulli(nn.Module):
def __init__(self, num_inputs, num_outputs, use_orthogonal=True, gain=0.01):
super(Bernoulli, self).__init__()
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedBernoulli(logits=x)
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
| 3,466 | 27.891667 | 86 | py |
TiKick | TiKick-main/tmarl/networks/utils/mlp.py |
import torch.nn as nn
from .util import init, get_clones
class MLPLayer(nn.Module):
def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal, activation_id):
super(MLPLayer, self).__init__()
self._layer_N = layer_N
active_func = [nn.Tanh(), nn.ReLU(), nn.LeakyReLU(), nn.ELU()][activation_id]
init_method = [nn.init.xavier_uniform_, nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu', 'leaky_relu', 'leaky_relu'][activation_id])
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0), gain=gain)
self.fc1 = nn.Sequential(
init_(nn.Linear(input_dim, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc_h = nn.Sequential(init_(
nn.Linear(hidden_size, hidden_size)), active_func, nn.LayerNorm(hidden_size))
self.fc2 = get_clones(self.fc_h, self._layer_N)
def forward(self, x):
x = self.fc1(x)
for i in range(self._layer_N):
x = self.fc2[i](x)
return x
class MLPBase(nn.Module):
def __init__(self, args, obs_shape, use_attn_internal=False, use_cat_self=True):
super(MLPBase, self).__init__()
self._use_feature_normalization = args.use_feature_normalization
self._use_orthogonal = args.use_orthogonal
self._activation_id = args.activation_id
self._use_conv1d = args.use_conv1d
self._stacked_frames = args.stacked_frames
self._layer_N = args.layer_N
self.hidden_size = args.hidden_size
obs_dim = obs_shape[0]
inputs_dim = obs_dim
if self._use_feature_normalization:
self.feature_norm = nn.LayerNorm(obs_dim)
self.mlp = MLPLayer(inputs_dim, self.hidden_size,
self._layer_N, self._use_orthogonal, self._activation_id)
def forward(self, x):
if self._use_feature_normalization:
x = self.feature_norm(x)
x = self.mlp(x)
return x
@property
def output_size(self):
return self.hidden_size | 2,116 | 32.603175 | 98 | py |
TiKick | TiKick-main/tmarl/networks/utils/popart.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PopArt(torch.nn.Module):
def __init__(self, input_shape, output_shape, norm_axes=1, beta=0.99999, epsilon=1e-5, device=torch.device("cpu")):
super(PopArt, self).__init__()
self.beta = beta
self.epsilon = epsilon
self.norm_axes = norm_axes
self.tpdv = dict(dtype=torch.float32, device=device)
self.input_shape = input_shape
self.output_shape = output_shape
self.weight = nn.Parameter(torch.Tensor(output_shape, input_shape)).to(**self.tpdv)
self.bias = nn.Parameter(torch.Tensor(output_shape)).to(**self.tpdv)
self.stddev = nn.Parameter(torch.ones(output_shape), requires_grad=False).to(**self.tpdv)
self.mean = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.mean_sq = nn.Parameter(torch.zeros(output_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
self.mean.zero_()
self.mean_sq.zero_()
self.debiasing_term.zero_()
def forward(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
return F.linear(input_vector, self.weight, self.bias)
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
old_mean, old_stddev = self.mean, self.stddev
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
self.mean.mul_(self.beta).add_(batch_mean * (1.0 - self.beta))
self.mean_sq.mul_(self.beta).add_(batch_sq_mean * (1.0 - self.beta))
self.debiasing_term.mul_(self.beta).add_(1.0 * (1.0 - self.beta))
self.stddev = (self.mean_sq - self.mean ** 2).sqrt().clamp(min=1e-4)
self.weight = self.weight * old_stddev / self.stddev
self.bias = (old_stddev * self.bias + old_mean - self.mean) / self.stddev
def debiased_mean_var(self):
debiased_mean = self.mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
def normalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,796 | 38.968421 | 119 | py |
TiKick | TiKick-main/tmarl/networks/utils/util.py |
import copy
import numpy as np
import torch
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def check(input):
output = torch.from_numpy(input) if type(input) == np.ndarray else input
return output
| 426 | 21.473684 | 76 | py |
TiKick | TiKick-main/tmarl/networks/utils/act.py |
from .distributions import Bernoulli, Categorical, DiagGaussian
import torch
import torch.nn as nn
class ACTLayer(nn.Module):
def __init__(self, action_space, inputs_dim, use_orthogonal, gain):
super(ACTLayer, self).__init__()
self.multidiscrete_action = False
self.continuous_action = False
self.mixed_action = False
if action_space.__class__.__name__ == "Discrete":
action_dim = action_space.n
self.action_out = Categorical(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "Box":
self.continuous_action = True
action_dim = action_space.shape[0]
self.action_out = DiagGaussian(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiBinary":
action_dim = action_space.shape[0]
self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal, gain)
elif action_space.__class__.__name__ == "MultiDiscrete":
self.multidiscrete_action = True
action_dims = action_space.high - action_space.low + 1
self.action_outs = []
for action_dim in action_dims:
self.action_outs.append(Categorical(inputs_dim, action_dim, use_orthogonal, gain))
self.action_outs = nn.ModuleList(self.action_outs)
else: # discrete + continous
self.mixed_action = True
continous_dim = action_space[0].shape[0]
discrete_dim = action_space[1].n
self.action_outs = nn.ModuleList([DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain), Categorical(
inputs_dim, discrete_dim, use_orthogonal, gain)])
def forward(self, x, available_actions=None, deterministic=False):
if self.mixed_action :
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action.float())
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
elif self.multidiscrete_action:
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode() if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action)
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.cat(action_log_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
else:
action_logits = self.action_out(x, available_actions)
actions = action_logits.mode() if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
return actions, action_log_probs
def get_probs(self, x, available_actions=None):
if self.mixed_action or self.multidiscrete_action:
action_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action_prob = action_logit.probs
action_probs.append(action_prob)
action_probs = torch.cat(action_probs, -1)
elif self.continuous_action:
action_logits = self.action_out(x)
action_probs = action_logits.probs
else:
action_logits = self.action_out(x, available_actions)
action_probs = action_logits.probs
return action_probs
def get_log_1mp(self, x, action, available_actions=None, active_masks=None):
action_logits = self.action_out(x, available_actions)
action_prob = torch.gather(action_logits.probs, 1, action.long())
action_prob = torch.clamp(action_prob, 0, 1-1e-6)
action_log_1mp = torch.log(1 - action_prob)
return action_log_1mp
def evaluate_actions(self, x, action, available_actions=None, active_masks=None):
if self.mixed_action:
a, b = action.split((2, 1), -1)
b = b.long()
action = [a, b]
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
if len(action_logit.entropy().shape) == len(active_masks.shape):
dist_entropy.append((action_logit.entropy() * active_masks).sum()/active_masks.sum())
else:
dist_entropy.append((action_logit.entropy() * active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.sum(torch.cat(action_log_probs, -1), -1, keepdim=True)
dist_entropy = dist_entropy[0] * 0.0025 + dist_entropy[1] * 0.01
elif self.multidiscrete_action:
action = torch.transpose(action, 0, 1)
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
dist_entropy.append((action_logit.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.cat(action_log_probs, -1) # ! could be wrong
dist_entropy = torch.tensor(dist_entropy).mean()
elif self.continuous_action:
action_logits = self.action_out(x)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
else:
action_logits = self.action_out(x, available_actions)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (action_logits.entropy()*active_masks.squeeze(-1)).sum()/active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
return action_log_probs, dist_entropy | 7,195 | 46.342105 | 121 | py |
TiKick | TiKick-main/tmarl/networks/utils/rnn.py |
import torch
import torch.nn as nn
class RNNLayer(nn.Module):
def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal):
super(RNNLayer, self).__init__()
self._recurrent_N = recurrent_N
self._use_orthogonal = use_orthogonal
self.rnn = nn.GRU(inputs_dim, outputs_dim, num_layers=self._recurrent_N)
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
if self._use_orthogonal:
nn.init.orthogonal_(param)
else:
nn.init.xavier_uniform_(param)
self.norm = nn.LayerNorm(outputs_dim)
def forward(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.rnn(x.unsqueeze(0), (hxs * masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(0, 1).contiguous())
x = x.squeeze(0)
hxs = hxs.transpose(0, 1)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0)
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.transpose(0, 1)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(self._recurrent_N, 1, 1)).contiguous()
rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp)
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.reshape(T * N, -1)
hxs = hxs.transpose(0, 1)
x = self.norm(x)
return x, hxs
| 2,816 | 34.2125 | 132 | py |
TiKick | TiKick-main/tmarl/drivers/shared_distributed/base_driver.py | import numpy as np
import torch
def _t2n(x):
return x.detach().cpu().numpy()
class Driver(object):
def __init__(self, config, client=None):
self.all_args = config['all_args']
self.envs = config['envs']
self.eval_envs = config['eval_envs']
self.device = config['device']
self.num_agents = config['num_agents']
if 'signal' in config:
self.actor_id = config['signal'].actor_id
self.weight_ids = config['signal'].weight_ids
else:
self.actor_id = 0
self.weight_ids = [0]
# parameters
self.env_name = self.all_args.env_name
self.algorithm_name = self.all_args.algorithm_name
self.experiment_name = self.all_args.experiment_name
self.use_centralized_V = self.all_args.use_centralized_V
self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state
self.num_env_steps = self.all_args.num_env_steps if hasattr(self.all_args,'num_env_steps') else self.all_args.eval_num
self.episode_length = self.all_args.episode_length
self.n_rollout_threads = self.all_args.n_rollout_threads
self.learner_n_rollout_threads = self.all_args.n_rollout_threads
self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
self.hidden_size = self.all_args.hidden_size
self.recurrent_N = self.all_args.recurrent_N
# interval
self.save_interval = self.all_args.save_interval
self.use_eval = self.all_args.use_eval
self.eval_interval = self.all_args.eval_interval
self.log_interval = self.all_args.log_interval
# dir
self.model_dir = self.all_args.model_dir
if self.algorithm_name == "rmappo":
from tmarl.algorithms.r_mappo_distributed.mappo_algorithm import MAPPOAlgorithm as TrainAlgo
from tmarl.algorithms.r_mappo_distributed.mappo_module import MAPPOModule as AlgoModule
else:
raise NotImplementedError
if self.envs:
share_observation_space = self.envs.share_observation_space[0] \
if self.use_centralized_V else self.envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.envs.observation_space[0],
share_observation_space,
self.envs.action_space[0],
device=self.device)
else:
share_observation_space = self.eval_envs.share_observation_space[0] \
if self.use_centralized_V else self.eval_envs.observation_space[0]
# policy network
self.algo_module = AlgoModule(self.all_args,
self.eval_envs.observation_space[0],
share_observation_space,
self.eval_envs.action_space[0],
device=self.device)
if self.model_dir is not None:
self.restore()
# algorithm
self.trainer = TrainAlgo(self.all_args, self.algo_module, device=self.device)
# buffer
from tmarl.replay_buffers.normal.shared_buffer import SharedReplayBuffer
self.buffer = SharedReplayBuffer(self.all_args,
self.num_agents,
self.envs.observation_space[0] if self.envs else self.eval_envs.observation_space[0],
share_observation_space,
self.envs.action_space[0] if self.envs else self.eval_envs.action_space[0])
def run(self):
raise NotImplementedError
def warmup(self):
raise NotImplementedError
def collect(self, step):
raise NotImplementedError
def insert(self, data):
raise NotImplementedError
def restore(self):
policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor.pt', map_location=self.device)
self.algo_module.actor.load_state_dict(policy_actor_state_dict)
| 4,244 | 39.04717 | 126 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_algorithm.py | import torch
from tmarl.utils.valuenorm import ValueNorm
# implement the loss of the MAPPO here
class MAPPOAlgorithm():
def __init__(self,
args,
init_module,
device=torch.device("cpu")):
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.algo_module = init_module
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.policy_value_loss_coef = args.policy_value_loss_coef
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
self._use_policy_vhead = args.use_policy_vhead
assert (self._use_popart and self._use_valuenorm) == False, ("self._use_popart and self._use_valuenorm can not be set True simultaneously")
if self._use_popart:
self.value_normalizer = self.algo_module.critic.v_out
if self._use_policy_vhead:
self.policy_value_normalizer = self.algo_module.actor.v_out
elif self._use_valuenorm:
self.value_normalizer = ValueNorm(1, device = self.device)
if self._use_policy_vhead:
self.policy_value_normalizer = ValueNorm(1, device = self.device)
else:
self.value_normalizer = None
if self._use_policy_vhead:
self.policy_value_normalizer = None
def prep_rollout(self):
self.algo_module.actor.eval()
| 2,234 | 38.210526 | 147 | py |
TiKick | TiKick-main/tmarl/algorithms/r_mappo_distributed/mappo_module.py | import torch
from tmarl.networks.policy_network import PolicyNetwork
class MAPPOModule:
def __init__(self, args, obs_space, share_obs_space, act_space, device=torch.device("cpu")):
self.device = device
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = share_obs_space
self.act_space = act_space
self.actor = PolicyNetwork(args, self.obs_space, self.act_space, self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.lr, eps=self.opti_eps, weight_decay=self.weight_decay)
def get_actions(self, share_obs, obs, rnn_states_actor, rnn_states_critic, masks, available_actions=None, deterministic=False):
actions, action_log_probs, rnn_states_actor = self.actor(obs, rnn_states_actor, masks, available_actions, deterministic)
return None, actions, action_log_probs, rnn_states_actor, None | 1,050 | 41.04 | 135 | py |
TiKick | TiKick-main/tmarl/replay_buffers/normal/shared_buffer.py | import torch
import numpy as np
from collections import defaultdict
from tmarl.utils.util import check,get_shape_from_obs_space, get_shape_from_act_space
def _flatten(T, N, x):
return x.reshape(T * N, *x.shape[2:])
def _cast(x):
return x.transpose(1, 2, 0, 3).reshape(-1, *x.shape[3:])
class SharedReplayBuffer(object):
def __init__(self, args, num_agents, obs_space, share_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
self._mixed_obs = False # for mixed observation
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(share_obs_space)
# for mixed observation
if 'Dict' in obs_shape.__class__.__name__:
self._mixed_obs = True
self.obs = {}
self.share_obs = {}
for key in obs_shape:
self.obs[key] = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape[key].shape), dtype=np.float32)
for key in share_obs_shape:
self.share_obs[key] = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape[key].shape), dtype=np.float32)
else:
# deal with special attn format
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *share_obs_shape), dtype=np.float32)
self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, *obs_shape), dtype=np.float32)
self.rnn_states = np.zeros((self.episode_length + 1, self.n_rollout_threads, num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.returns = np.zeros_like(self.value_preds)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, act_space.n), dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.action_log_probs = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, act_shape), dtype=np.float32)
self.rewards = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.masks = np.ones((self.episode_length + 1, self.n_rollout_threads, num_agents, 1), dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
if self._mixed_obs:
for key in self.share_obs.keys():
self.share_obs[key][self.step + 1] = share_obs[key].copy()
for key in self.obs.keys():
self.obs[key][self.step + 1] = obs[key].copy()
else:
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def init_buffer(self,share_obs,obs):
self.share_obs[0] = share_obs
self.obs[0] = obs
def chooseinsert(self, share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs,
value_preds, rewards, masks, bad_masks=None, active_masks=None, available_actions=None):
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
if self._mixed_obs:
for key in self.share_obs.keys():
self.share_obs[key][0] = self.share_obs[key][-1].copy()
for key in self.obs.keys():
self.obs[key][0] = self.obs[key][-1].copy()
else:
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
# step + 1
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * gae * self.masks[step + 1]
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + value_normalizer.denormalize(self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch=None, mini_batch_size=None):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) * number of agents ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length, num_agents, n_rollout_threads * episode_length * num_agents,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
share_obs[key] = self.share_obs[key][:-1].reshape(-1, *self.share_obs[key].shape[3:])
for key in self.obs.keys():
obs[key] = self.obs[key][:-1].reshape(-1, *self.obs[key].shape[3:])
else:
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:])
rnn_states = self.rnn_states[:-1].reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(-1, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim]
if self._mixed_obs:
share_obs_batch = {}
obs_batch = {}
for key in share_obs.keys():
share_obs_batch[key] = share_obs[key][indices]
for key in obs.keys():
obs_batch[key] = obs[key][indices]
else:
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads*num_agents
assert n_rollout_threads*num_agents >= num_mini_batch, (
"PPO requires the number of processes ({})* number of agents ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_agents, num_mini_batch))
num_envs_per_batch = batch_size // num_mini_batch
perm = torch.randperm(batch_size).numpy()
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
share_obs[key] = self.share_obs[key].reshape(-1, batch_size, *self.share_obs[key].shape[3:])
for key in self.obs.keys():
obs[key] = self.obs[key].reshape(-1, batch_size, *self.obs[key].shape[3:])
else:
share_obs = self.share_obs.reshape(-1, batch_size, *self.share_obs.shape[3:])
obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:])
rnn_states = self.rnn_states.reshape(-1, batch_size, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic.reshape(-1, batch_size, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions.reshape(-1, batch_size, self.available_actions.shape[-1])
value_preds = self.value_preds.reshape(-1, batch_size, 1)
returns = self.returns.reshape(-1, batch_size, 1)
masks = self.masks.reshape(-1, batch_size, 1)
active_masks = self.active_masks.reshape(-1, batch_size, 1)
action_log_probs = self.action_log_probs.reshape(-1, batch_size, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, batch_size, 1)
for start_ind in range(0, batch_size, num_envs_per_batch):
if self._mixed_obs:
share_obs_batch = defaultdict(list)
obs_batch = defaultdict(list)
else:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
if self._mixed_obs:
for key in share_obs.keys():
share_obs_batch[key].append(share_obs[key][:-1, ind])
for key in obs.keys():
obs_batch[key].append(obs[key][:-1, ind])
else:
share_obs_batch.append(share_obs[:-1, ind])
obs_batch.append(obs[:-1, ind])
rnn_states_batch.append(rnn_states[0:1, ind])
rnn_states_critic_batch.append(rnn_states_critic[0:1, ind])
actions_batch.append(actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(available_actions[:-1, ind])
value_preds_batch.append(value_preds[:-1, ind])
return_batch.append(returns[:-1, ind])
masks_batch.append(masks[:-1, ind])
active_masks_batch.append(active_masks[:-1, ind])
old_action_log_probs_batch.append(action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = np.stack(share_obs_batch[key], 1)
for key in obs_batch.keys():
obs_batch[key] = np.stack(obs_batch[key], 1)
else:
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, 1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, dim) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = _flatten(T, N, share_obs_batch[key])
for key in obs_batch.keys():
obs_batch[key] = _flatten(T, N, obs_batch[key])
else:
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N, old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch, data_chunk_length):
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
data_chunks = batch_size // data_chunk_length # [C=r*T*M/L]
mini_batch_size = data_chunks // num_mini_batch
assert n_rollout_threads * episode_length * num_agents >= data_chunk_length, (
"PPO requires the number of processes ({})* number of agents ({}) * episode length ({}) "
"to be greater than or equal to the number of "
"data chunk length ({}).".format(n_rollout_threads, num_agents, episode_length ,data_chunk_length))
rand = torch.randperm(data_chunks).numpy()
sampler = [rand[i*mini_batch_size:(i+1)*mini_batch_size] for i in range(num_mini_batch)]
if self._mixed_obs:
share_obs = {}
obs = {}
for key in self.share_obs.keys():
if len(self.share_obs[key].shape) == 6:
share_obs[key] = self.share_obs[key][:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs[key].shape[3:])
elif len(self.share_obs[key].shape) == 5:
share_obs[key] = self.share_obs[key][:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.share_obs[key].shape[3:])
else:
share_obs[key] = _cast(self.share_obs[key][:-1])
for key in self.obs.keys():
if len(self.obs[key].shape) == 6:
obs[key] = self.obs[key][:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs[key].shape[3:])
elif len(self.obs[key].shape) == 5:
obs[key] = self.obs[key][:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.obs[key].shape[3:])
else:
obs[key] = _cast(self.obs[key][:-1])
else:
if len(self.share_obs.shape) > 4:
share_obs = self.share_obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].transpose(1, 2, 0, 3, 4, 5).reshape(-1, *self.obs.shape[3:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(1, 2, 0, 3, 4).reshape(-1, *self.rnn_states_critic.shape[3:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
if self._mixed_obs:
share_obs_batch = defaultdict(list)
obs_batch = defaultdict(list)
else:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim]
if self._mixed_obs:
for key in share_obs.keys():
share_obs_batch[key].append(share_obs[key][ind:ind+data_chunk_length])
for key in obs.keys():
obs_batch[key].append(obs[key][ind:ind+data_chunk_length])
else:
share_obs_batch.append(share_obs[ind:ind+data_chunk_length])
obs_batch.append(obs[ind:ind+data_chunk_length])
actions_batch.append(actions[ind:ind+data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(available_actions[ind:ind+data_chunk_length])
value_preds_batch.append(value_preds[ind:ind+data_chunk_length])
return_batch.append(returns[ind:ind+data_chunk_length])
masks_batch.append(masks[ind:ind+data_chunk_length])
active_masks_batch.append(active_masks[ind:ind+data_chunk_length])
old_action_log_probs_batch.append(action_log_probs[ind:ind+data_chunk_length])
adv_targ.append(advantages[ind:ind+data_chunk_length])
# size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (L, N, Dim)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = np.stack(share_obs_batch[key], axis=1)
for key in obs_batch.keys():
obs_batch[key] = np.stack(obs_batch[key], axis=1)
else:
share_obs_batch = np.stack(share_obs_batch, axis=1)
obs_batch = np.stack(obs_batch, axis=1)
actions_batch = np.stack(actions_batch, axis=1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, axis=1)
value_preds_batch = np.stack(value_preds_batch, axis=1)
return_batch = np.stack(return_batch, axis=1)
masks_batch = np.stack(masks_batch, axis=1)
active_masks_batch = np.stack(active_masks_batch, axis=1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch, axis=1)
adv_targ = np.stack(adv_targ, axis=1)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(rnn_states_critic_batch).reshape(N, *self.rnn_states_critic.shape[3:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
if self._mixed_obs:
for key in share_obs_batch.keys():
share_obs_batch[key] = _flatten(L, N, share_obs_batch[key])
for key in obs_batch.keys():
obs_batch[key] = _flatten(L, N, obs_batch[key])
else:
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N, available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N, old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
| 28,769 | 52.081181 | 231 | py |
TiKick | TiKick-main/tmarl/configs/config.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import argparse
def get_config():
parser = argparse.ArgumentParser(
description='TiKick', formatter_class=argparse.RawDescriptionHelpFormatter)
# prepare parameters
parser.add_argument("--algorithm_name", type=str,
default='rmappo', choices=["rmappo"])
parser.add_argument("--experiment_name", type=str, default="check",
help="an identifier to distinguish different experiment.")
parser.add_argument("--seed", type=int, default=1,
help="Random seed for numpy/torch")
parser.add_argument("--disable_cuda", action='store_true', default=False,
help="by default False, will use GPU to train; or else will use CPU;")
parser.add_argument("--cuda_deterministic",
action='store_false', default=True,
help="by default, make sure random seed effective. if set, bypass such function.")
parser.add_argument("--n_rollout_threads", type=int, default=2,
help="Number of parallel envs for training rollout")
parser.add_argument("--n_eval_rollout_threads", type=int, default=1,
help="Number of parallel envs for evaluating rollout")
parser.add_argument("--n_render_rollout_threads", type=int, default=1,
help="Number of parallel envs for rendering rollout")
parser.add_argument("--eval_num", type=int, default=1,
help='Number of environment steps to evaluate (default: 1)')
# env parameters
parser.add_argument("--env_name", type=str, default='StarCraft2',
help="specify the name of environment")
parser.add_argument("--use_obs_instead_of_state", action='store_true',
default=False, help="Whether to use global state or concatenated obs")
# replay buffer parameters
parser.add_argument("--episode_length", type=int,
default=200, help="Max length for any episode")
# network parameters
parser.add_argument("--separate_policy", action='store_true',
default=False, help='Whether agent seperate the policy')
parser.add_argument("--use_centralized_V", action='store_false',
default=True, help="Whether to use centralized V function")
parser.add_argument("--use_conv1d", action='store_true',
default=False, help="Whether to use conv1d")
parser.add_argument("--stacked_frames", type=int, default=1,
help="Dimension of hidden layers for actor/critic networks")
parser.add_argument("--use_stacked_frames", action='store_true',
default=False, help="Whether to use stacked_frames")
parser.add_argument("--hidden_size", type=int, default=256,
help="Dimension of hidden layers for actor/critic networks") # TODO @zoeyuchao. The same comment might in need of change.
parser.add_argument("--layer_N", type=int, default=3,
help="Number of layers for actor/critic networks")
parser.add_argument("--activation_id", type=int,
default=1, help="choose 0 to use tanh, 1 to use relu, 2 to use leaky relu, 3 to use elu")
parser.add_argument("--use_popart", action='store_true', default=False,
help="by default False, use PopArt to normalize rewards.")
parser.add_argument("--use_valuenorm", action='store_false', default=True,
help="by default True, use running mean and std to normalize rewards.")
parser.add_argument("--use_feature_normalization", action='store_false',
default=True, help="Whether to apply layernorm to the inputs")
parser.add_argument("--use_orthogonal", action='store_false', default=True,
help="Whether to use Orthogonal initialization for weights and 0 initialization for biases")
parser.add_argument("--gain", type=float, default=0.01,
help="The gain # of last action layer")
parser.add_argument("--cnn_layers_params", type=str, default=None,
help="The parameters of cnn layer")
parser.add_argument("--use_maxpool2d", action='store_true',
default=False, help="Whether to apply layernorm to the inputs")
# recurrent parameters
parser.add_argument("--use_naive_recurrent_policy", action='store_true',
default=False, help='Whether to use a naive recurrent policy')
parser.add_argument("--use_recurrent_policy", action='store_false',
default=True, help='use a recurrent policy')
parser.add_argument("--recurrent_N", type=int, default=1,
help="The number of recurrent layers.")
parser.add_argument("--data_chunk_length", type=int, default=25,
help="Time length of chunks used to train a recurrent_policy")
parser.add_argument("--use_influence_policy", action='store_true',
default=False, help='use a recurrent policy')
parser.add_argument("--influence_layer_N", type=int, default=1,
help="Number of layers for actor/critic networks")
# optimizer parameters
parser.add_argument("--lr", type=float, default=5e-4,
help='learning rate (default: 5e-4)')
parser.add_argument("--tau", type=float, default=0.995,
help='soft update polyak (default: 0.995)')
parser.add_argument("--critic_lr", type=float, default=5e-4,
help='critic learning rate (default: 5e-4)')
parser.add_argument("--opti_eps", type=float, default=1e-5,
help='RMSprop optimizer epsilon (default: 1e-5)')
parser.add_argument("--weight_decay", type=float, default=0)
# ppo parameters
parser.add_argument("--ppo_epoch", type=int, default=15,
help='number of ppo epochs (default: 15)')
parser.add_argument("--use_policy_vhead",
action='store_true', default=False,
help="by default, do not use policy vhead. if set, use policy vhead.")
parser.add_argument("--use_clipped_value_loss",
action='store_false', default=True,
help="by default, clip loss value. If set, do not clip loss value.")
parser.add_argument("--clip_param", type=float, default=0.2,
help='ppo clip parameter (default: 0.2)')
parser.add_argument("--num_mini_batch", type=int, default=1,
help='number of batches for ppo (default: 1)')
parser.add_argument("--policy_value_loss_coef", type=float,
default=1, help='policy value loss coefficient (default: 0.5)')
parser.add_argument("--entropy_coef", type=float, default=0.01,
help='entropy term coefficient (default: 0.01)')
parser.add_argument("--value_loss_coef", type=float,
default=1, help='value loss coefficient (default: 0.5)')
parser.add_argument("--use_max_grad_norm",
action='store_false', default=True,
help="by default, use max norm of gradients. If set, do not use.")
parser.add_argument("--max_grad_norm", type=float, default=10.0,
help='max norm of gradients (default: 0.5)')
parser.add_argument("--use_gae", action='store_false',
default=True, help='use generalized advantage estimation')
parser.add_argument("--gamma", type=float, default=0.99,
help='discount factor for rewards (default: 0.99)')
parser.add_argument("--gae_lambda", type=float, default=0.95,
help='gae lambda parameter (default: 0.95)')
parser.add_argument("--use_proper_time_limits", action='store_true',
default=False, help='compute returns taking into account time limits')
parser.add_argument("--use_huber_loss", action='store_false', default=True,
help="by default, use huber loss. If set, do not use huber loss.")
parser.add_argument("--use_value_active_masks",
action='store_false', default=True,
help="by default True, whether to mask useless data in value loss.")
parser.add_argument("--use_policy_active_masks",
action='store_false', default=True,
help="by default True, whether to mask useless data in policy loss.")
parser.add_argument("--huber_delta", type=float,
default=10.0, help=" coefficience of huber loss.")
# save parameters
parser.add_argument("--save_interval", type=int, default=1,
help="time duration between contiunous twice models saving.")
# log parameters
parser.add_argument("--log_interval", type=int, default=5,
help="time duration between contiunous twice log printing.")
# eval parameters
parser.add_argument("--use_eval", action='store_true', default=False,
help="by default, do not start evaluation. If set`, start evaluation alongside with training.")
parser.add_argument("--eval_interval", type=int, default=25,
help="time duration between contiunous twice evaluation progress.")
parser.add_argument("--eval_episodes", type=int, default=64,
help="number of episodes of a single evaluation.")
# pretrained parameters
parser.add_argument("--model_dir", type=str, default=None,
help="by default None. set the path to pretrained model.")
parser.add_argument("--replay_save_dir", type=str, default=None,
help="replay file save dir")
# replay buffer parameters
return parser
| 10,665 | 55.734043 | 146 | py |
TiKick | TiKick-main/tmarl/runners/base_evaluator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import random
import numpy as np
import torch
from tmarl.configs.config import get_config
from tmarl.runners.base_runner import Runner
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class Evaluator(Runner):
def __init__(self, argv,program_type=None, client=None):
super().__init__(argv)
parser = get_config()
all_args = self.extra_args_func(argv, parser)
all_args.cuda = not all_args.disable_cuda
self.algorithm_name = all_args.algorithm_name
# cuda
if not all_args.disable_cuda and torch.cuda.is_available():
device = torch.device("cuda:0")
if all_args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
else:
print("choose to use cpu...")
device = torch.device("cpu")
# run dir
run_dir = self.setup_run_dir(all_args)
# env init
Env_Class, SubprocVecEnv, DummyVecEnv = self.get_env()
eval_envs = self.env_init(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv)
num_agents = all_args.num_agents
config = {
"all_args": all_args,
"envs": None,
"eval_envs": eval_envs,
"num_agents": num_agents,
"device": device,
"run_dir": run_dir,
}
self.all_args, self.envs, self.eval_envs, self.config \
= all_args, None, eval_envs, config
self.driver = self.init_driver()
def run(self):
# run experiments
self.driver.run()
self.stop()
def stop(self):
pass
def extra_args_func(self, argv, parser):
raise NotImplementedError
def get_env(self):
raise NotImplementedError
def init_driver(self):
raise NotImplementedError
def make_eval_env(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
def get_env_fn(rank):
def init_env():
env = Env_Class(all_args)
env.seed(all_args.seed * 50000 + rank * 10000)
return env
return init_env
if all_args.n_eval_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv([get_env_fn(i) for i in range(all_args.n_eval_rollout_threads)])
def env_init(self, all_args, Env_Class, SubprocVecEnv, DummyVecEnv):
eval_envs = self.make_eval_env(
all_args, Env_Class, SubprocVecEnv, DummyVecEnv) if all_args.use_eval else None
return eval_envs
def setup_run_dir(self, all_args):
return None
| 3,402 | 28.08547 | 97 | py |
TiKick | TiKick-main/tmarl/runners/base_runner.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 The TARTRL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""""
import os
import random
import socket
import setproctitle
import numpy as np
from pathlib import Path
import torch
from tmarl.configs.config import get_config
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class Runner:
def __init__(self, argv):
self.argv = argv
def run(self):
# main run
raise NotImplementedError | 1,079 | 22.478261 | 74 | py |
TiKick | TiKick-main/tmarl/utils/valuenorm.py |
import numpy as np
import torch
import torch.nn as nn
class ValueNorm(nn.Module):
""" Normalize a vector of observations - across the first norm_axes dimensions"""
def __init__(self, input_shape, norm_axes=1, beta=0.99999, per_element_update=False, epsilon=1e-5, device=torch.device("cpu")):
super(ValueNorm, self).__init__()
self.input_shape = input_shape
self.norm_axes = norm_axes
self.epsilon = epsilon
self.beta = beta
self.per_element_update = per_element_update
self.tpdv = dict(dtype=torch.float32, device=device)
self.running_mean = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
self.running_mean_sq = nn.Parameter(torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0), requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_mean_sq.zero_()
self.debiasing_term.zero_()
def running_mean_var(self):
debiased_mean = self.running_mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean ** 2).clamp(min=1e-2)
return debiased_mean, debiased_var
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector ** 2).mean(dim=tuple(range(self.norm_axes)))
if self.per_element_update:
batch_size = np.prod(input_vector.size()[:self.norm_axes])
weight = self.beta ** batch_size
else:
weight = self.beta
self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))
self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))
self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))
def normalize(self, input_vector):
# Make sure input is float32
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = (input_vector - mean[(None,) * self.norm_axes]) / torch.sqrt(var)[(None,) * self.norm_axes]
return out
def denormalize(self, input_vector):
""" Transform normalized data back into original distribution """
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = input_vector * torch.sqrt(var)[(None,) * self.norm_axes] + mean[(None,) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,110 | 37.8875 | 131 | py |
TiKick | TiKick-main/tmarl/utils/util.py |
import copy
import numpy as np
import math
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.autograd import Variable
from gym.spaces import Box, Discrete, Tuple
def check(input):
if type(input) == np.ndarray:
return torch.from_numpy(input)
def get_gard_norm(it):
sum_grad = 0
for x in it:
if x.grad is None:
continue
sum_grad += x.grad.norm() ** 2
return math.sqrt(sum_grad)
def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):
"""Decreases the learning rate linearly"""
lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a*e**2/2 + b*d*(abs(e)-d/2)
def mse_loss(e):
return e**2/2
def get_shape_from_obs_space(obs_space):
if obs_space.__class__.__name__ == 'Box':
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == 'list':
obs_shape = obs_space
elif obs_space.__class__.__name__ == 'Dict':
obs_shape = obs_space.spaces
else:
raise NotImplementedError
return obs_shape
def get_shape_from_act_space(act_space):
if act_space.__class__.__name__ == 'Discrete':
act_shape = 1
elif act_space.__class__.__name__ == "MultiDiscrete":
act_shape = act_space.shape
elif act_space.__class__.__name__ == "Box":
act_shape = act_space.shape[0]
elif act_space.__class__.__name__ == "MultiBinary":
act_shape = act_space.shape[0]
else: # agar
act_shape = act_space[0].shape[0] + 1
return act_shape
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(
list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
def to_torch(input):
return torch.from_numpy(input) if type(input) == np.ndarray else input
def to_numpy(x):
return x.detach().cpu().numpy()
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample()
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space
where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
self.n = np.sum(self.high) + 2
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
random_array = np.random.rand(self.num_discrete_space)
return [int(x) for x in np.floor(np.multiply((self.high - self.low + 1.), random_array) + self.low)]
def contains(self, x):
return len(x) == self.num_discrete_space and (np.array(x) >= self.low).all() and (np.array(x) <= self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(self.high, other.high)
class DecayThenFlatSchedule():
def __init__(self,
start,
finish,
time_length,
decay="exp"):
self.start = start
self.finish = finish
self.time_length = time_length
self.delta = (self.start - self.finish) / self.time_length
self.decay = decay
if self.decay in ["exp"]:
self.exp_scaling = (-1) * self.time_length / \
np.log(self.finish) if self.finish > 0 else 1
def eval(self, T):
if self.decay in ["linear"]:
return max(self.finish, self.start - self.delta * T)
elif self.decay in ["exp"]:
return min(self.start, max(self.finish, np.exp(- T / self.exp_scaling)))
pass
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a*e**2/2 + b*d*(abs(e)-d/2)
def mse_loss(e):
return e**2
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L11
def soft_update(target, source, tau):
"""
Perform DDPG soft update (move target params toward source based on weight
factor tau)
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
tau (float, 0 < x < 1): Weight factor for update
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau)
# https://github.com/ikostrikov/pytorch-ddpg-naf/blob/master/ddpg.py#L15
def hard_update(target, source):
"""
Copy network parameters from source to target
Inputs:
target (torch.nn.Module): Net to copy parameters to
source (torch.nn.Module): Net whose parameters to copy
"""
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
# https://github.com/seba-1511/dist_tuto.pth/blob/gh-pages/train_dist.py
def average_gradients(model):
""" Gradient averaging. """
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM, group=0)
param.grad.data /= size
def onehot_from_logits(logits, avail_logits=None, eps=0.0):
"""
Given batch of logits, return one-hot sample using epsilon greedy strategy
(based on given epsilon)
"""
# get best (according to current policy) actions in one-hot form
logits = to_torch(logits)
dim = len(logits.shape) - 1
if avail_logits is not None:
avail_logits = to_torch(avail_logits)
logits[avail_logits == 0] = -1e10
argmax_acs = (logits == logits.max(dim, keepdim=True)[0]).float()
if eps == 0.0:
return argmax_acs
# get random actions in one-hot form
rand_acs = Variable(torch.eye(logits.shape[1])[[np.random.choice(
range(logits.shape[1]), size=logits.shape[0])]], requires_grad=False)
# chooses between best and random actions using epsilon greedy
return torch.stack([argmax_acs[i] if r > eps else rand_acs[i] for i, r in
enumerate(torch.rand(logits.shape[0]))])
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def sample_gumbel(shape, eps=1e-20, tens_type=torch.FloatTensor):
"""Sample from Gumbel(0, 1)"""
U = Variable(tens_type(*shape).uniform_(), requires_grad=False)
return -torch.log(-torch.log(U + eps) + eps)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax_sample(logits, avail_logits, temperature, device=torch.device('cpu')):
""" Draw a sample from the Gumbel-Softmax distribution"""
if str(device) == 'cpu':
y = logits + sample_gumbel(logits.shape, tens_type=type(logits.data))
else:
y = (logits.cpu() + sample_gumbel(logits.shape,
tens_type=type(logits.data))).cuda()
dim = len(logits.shape) - 1
if avail_logits is not None:
avail_logits = to_torch(avail_logits).to(device)
y[avail_logits == 0] = -1e10
return F.softmax(y / temperature, dim=dim)
# modified for PyTorch from https://github.com/ericjang/gumbel-softmax/blob/master/Categorical%20VAE.ipynb
def gumbel_softmax(logits, avail_logits=None, temperature=1.0, hard=False, device=torch.device('cpu')):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, avail_logits, temperature, device)
if hard:
y_hard = onehot_from_logits(y)
y = (y_hard - y).detach() + y
return y
def gaussian_noise(shape, std):
return torch.empty(shape).normal_(mean=0, std=std)
def get_obs_shape(obs_space):
if obs_space.__class__.__name__ == "Box":
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == "list":
obs_shape = obs_space
else:
raise NotImplementedError
return obs_shape
def get_dim_from_space(space):
if isinstance(space, Box):
dim = space.shape[0]
elif isinstance(space, Discrete):
dim = space.n
elif isinstance(space, Tuple):
dim = sum([get_dim_from_space(sp) for sp in space])
elif "MultiDiscrete" in space.__class__.__name__:
return (space.high - space.low) + 1
elif isinstance(space, list):
dim = space[0]
else:
raise Exception("Unrecognized space: ", type(space))
return dim
def get_state_dim(observation_dict, action_dict):
combined_obs_dim = sum([get_dim_from_space(space)
for space in observation_dict.values()])
combined_act_dim = 0
for space in action_dict.values():
dim = get_dim_from_space(space)
if isinstance(dim, np.ndarray):
combined_act_dim += int(sum(dim))
else:
combined_act_dim += dim
return combined_obs_dim, combined_act_dim, combined_obs_dim+combined_act_dim
def get_cent_act_dim(action_space):
cent_act_dim = 0
for space in action_space:
dim = get_dim_from_space(space)
if isinstance(dim, np.ndarray):
cent_act_dim += int(sum(dim))
else:
cent_act_dim += dim
return cent_act_dim
def is_discrete(space):
if isinstance(space, Discrete) or "MultiDiscrete" in space.__class__.__name__:
return True
else:
return False
def is_multidiscrete(space):
if "MultiDiscrete" in space.__class__.__name__:
return True
else:
return False
def make_onehot(int_action, action_dim, seq_len=None):
if type(int_action) == torch.Tensor:
int_action = int_action.cpu().numpy()
if not seq_len:
return np.eye(action_dim)[int_action]
if seq_len:
onehot_actions = []
for i in range(seq_len):
onehot_action = np.eye(action_dim)[int_action[i]]
onehot_actions.append(onehot_action)
return np.stack(onehot_actions)
def avail_choose(x, avail_x=None):
x = to_torch(x)
if avail_x is not None:
avail_x = to_torch(avail_x)
x[avail_x == 0] = -1e10
return x # FixedCategorical(logits=x)
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N)/H))
img_nhwc = np.array(
list(img_nhwc) + [img_nhwc[0]*0 for _ in range(N, H*W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H*h, W*w, c)
return img_Hh_Ww_c
| 13,893 | 31.846336 | 122 | py |
TiKick | TiKick-main/tmarl/utils/gpu_mem_track.py | # code from https://github.com/Oldpan/Pytorch-Memory-Utils
import gc
import datetime
import inspect
import torch
import numpy as np
dtype_memory_size_dict = {
torch.float64: 64/8,
torch.double: 64/8,
torch.float32: 32/8,
torch.float: 32/8,
torch.float16: 16/8,
torch.half: 16/8,
torch.int64: 64/8,
torch.long: 64/8,
torch.int32: 32/8,
torch.int: 32/8,
torch.int16: 16/8,
torch.short: 16/6,
torch.uint8: 8/8,
torch.int8: 8/8,
}
# compatibility of torch1.0
if getattr(torch, "bfloat16", None) is not None:
dtype_memory_size_dict[torch.bfloat16] = 16/8
if getattr(torch, "bool", None) is not None:
dtype_memory_size_dict[torch.bool] = 8/8 # pytorch use 1 byte for a bool, see https://github.com/pytorch/pytorch/issues/41571
def get_mem_space(x):
try:
ret = dtype_memory_size_dict[x]
except KeyError:
print(f"dtype {x} is not supported!")
return ret
class MemTracker(object):
"""
Class used to track pytorch memory usage
Arguments:
detail(bool, default True): whether the function shows the detail gpu memory usage
path(str): where to save log file
verbose(bool, default False): whether show the trivial exception
device(int): GPU number, default is 0
"""
def __init__(self, detail=True, path='', verbose=False, device=0):
self.print_detail = detail
self.last_tensor_sizes = set()
self.gpu_profile_fn = path + f'{datetime.datetime.now():%d-%b-%y-%H:%M:%S}-gpu_mem_track.txt'
self.verbose = verbose
self.begin = True
self.device = device
def get_tensors(self):
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
tensor = obj
else:
continue
if tensor.is_cuda:
yield tensor
except Exception as e:
if self.verbose:
print('A trivial exception occured: {}'.format(e))
def get_tensor_usage(self):
sizes = [np.prod(np.array(tensor.size())) * get_mem_space(tensor.dtype) for tensor in self.get_tensors()]
return np.sum(sizes) / 1024**2
def get_allocate_usage(self):
return torch.cuda.memory_allocated() / 1024**2
def clear_cache(self):
gc.collect()
torch.cuda.empty_cache()
def print_all_gpu_tensor(self, file=None):
for x in self.get_tensors():
print(x.size(), x.dtype, np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2, file=file)
def track(self):
"""
Track the GPU memory usage
"""
frameinfo = inspect.stack()[1]
where_str = frameinfo.filename + ' line ' + str(frameinfo.lineno) + ': ' + frameinfo.function
with open(self.gpu_profile_fn, 'a+') as f:
if self.begin:
f.write(f"GPU Memory Track | {datetime.datetime.now():%d-%b-%y-%H:%M:%S} |"
f" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb"
f" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\n\n")
self.begin = False
if self.print_detail is True:
ts_list = [(tensor.size(), tensor.dtype) for tensor in self.get_tensors()]
new_tensor_sizes = {(type(x),
tuple(x.size()),
ts_list.count((x.size(), x.dtype)),
np.prod(np.array(x.size()))*get_mem_space(x.dtype)/1024**2,
x.dtype) for x in self.get_tensors()}
for t, s, n, m, data_type in new_tensor_sizes - self.last_tensor_sizes:
f.write(f'+ | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\n')
for t, s, n, m, data_type in self.last_tensor_sizes - new_tensor_sizes:
f.write(f'- | {str(n)} * Size:{str(s):<20} | Memory: {str(m*n)[:6]} M | {str(t):<20} | {data_type}\n')
self.last_tensor_sizes = new_tensor_sizes
f.write(f"\nAt {where_str:<50}"
f" Total Tensor Used Memory:{self.get_tensor_usage():<7.1f}Mb"
f" Total Allocated Memory:{self.get_allocate_usage():<7.1f}Mb\n\n")
| 4,432 | 36.888889 | 129 | py |
TiKick | TiKick-main/tmarl/utils/modelsize_estimate.py | # code from https://github.com/Oldpan/Pytorch-Memory-Utils
import torch.nn as nn
import numpy as np
def modelsize(model, input, type_size=4):
para = sum([np.prod(list(p.size())) for p in model.parameters()])
# print('Model {} : Number of params: {}'.format(model._get_name(), para))
print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000))
input_ = input.clone()
input_.requires_grad_(requires_grad=False)
mods = list(model.modules())
out_sizes = []
for i in range(1, len(mods)):
m = mods[i]
if isinstance(m, nn.ReLU):
if m.inplace:
continue
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
total_nums = 0
for i in range(len(out_sizes)):
s = out_sizes[i]
nums = np.prod(np.array(s))
total_nums += nums
# print('Model {} : Number of intermedite variables without backward: {}'.format(model._get_name(), total_nums))
# print('Model {} : Number of intermedite variables with backward: {}'.format(model._get_name(), total_nums*2))
print('Model {} : intermedite variables: {:3f} M (without backward)'
.format(model._get_name(), total_nums * type_size / 1000 / 1000))
print('Model {} : intermedite variables: {:3f} M (with backward)'
.format(model._get_name(), total_nums * type_size*2 / 1000 / 1000))
| 1,428 | 34.725 | 116 | py |
RobDanns | RobDanns-main/deep_learning/tools/corruptions-inference-tinyimagenet.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
from torchvision.utils import save_image
from skimage.util import random_noise
print("Let's use GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST(VAL) DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
#if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
# return eval_stats
def save_noisy_image(img, name):
if img.size(2) == 32:
img = img.view(img.size(0), 3, 32, 32)
save_image(img, name)
if img.size(2) == 64:
img = img.view(img.size(0), 3, 64, 64)
save_image(img, name)
else:
img = img.view(img.size(0), 3, 224, 224)
save_image(img, name)
## Functions to save noisy images.
# def gaussian_noise(test_loader):
# print("Adding gaussian_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# gaussian_img_05 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.05, clip=True))
# gaussian_img_2 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.2, clip=True))
# gaussian_img_4 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.4, clip=True))
# gaussian_img_6 = torch.tensor(random_noise(img, mode='gaussian', mean=0, var=0.6, clip=True))
# save_noisy_image(gaussian_img_05, r"noisy-images/gaussian_05.png")
# save_noisy_image(gaussian_img_2, r"noisy-images/gaussian_2.png")
# save_noisy_image(gaussian_img_4, r"noisy-images/gaussian_4.png")
# save_noisy_image(gaussian_img_6, r"noisy-images/gaussian_6.png")
# break
# def salt_pepper_noise(test_loader):
# print("Adding salt_pepper_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# s_vs_p_5 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.5, clip=True))
# s_vs_p_6 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.6, clip=True))
# s_vs_p_7 = torch.tensor(random_noise(img, mode='s&p', salt_vs_pepper=0.7, clip=True))
# save_noisy_image(s_vs_p_5, r"noisy-images/s&p_5.png")
# break
# def speckle_noise(test_loader):
# print("Adding speckle_noise")
# for data in test_loader:
# img, _ = data[0], data[1]
# speckle_img_05 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.05, clip=True))
# speckle_img_2 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.2, clip=True))
# speckle_img_4 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.4, clip=True))
# speckle_img_6 = torch.tensor(random_noise(img, mode='speckle', mean=0, var=0.6, clip=True))
# save_noisy_image(speckle_img_05, r"noisy-images/speckle_05.png")
# save_noisy_image(speckle_img_2, r"noisy-images/speckle_2.png")
# save_noisy_image(speckle_img_4, r"noisy-images/speckle_4.png")
# save_noisy_image(speckle_img_6, r"noisy-images/speckle_6.png")
# break
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
data_path = dp.get_data_path(cfg.TRAIN.DATASET) # Retrieve the data path for the dataset
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
test_meter = TestMeter(len(test_loader))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
noise_mode = ['gaussian', 'speckle', 's&p']
noise_std = [0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6] # change the variance values as desired.
model.eval()
accuracies_gaussian = []
accuracies_saltpepper = []
accuracies_speckle = []
for mode in noise_mode:
for level in noise_std:
print("Adding noise={} at level={} to images".format(mode, level))
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader):
if not 's&p' in mode:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, mean=0, var=level, clip=True))
else:
noisy_img = torch.tensor(random_noise(inputs, mode=mode, salt_vs_pepper=0.5, clip=True))
noisy_img, labels = noisy_img.cuda(), labels.cuda(non_blocking=True)
outputs = model(noisy_img.float())
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += labels.size(0)
correct += (predicted == labels).sum()
if total > X: # replace X with the number of images to be generated for adversarial attacks.
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
if 'gaussian' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_gaussian.append(round(acc, 2))
print("Guassian Accuracies after append :", accuracies_gaussian)
elif 'speckle' in mode:
print('Robust Accuracy = {:.3f} with level = {:.2f}'.format(acc, level))
accuracies_speckle.append(round(acc, 2))
print("Speckle Accuracies after append :", accuracies_speckle)
elif 's&p' in mode:
print('Robust Accuracy = {:.3f} for S&P noise'.format(acc))
accuracies_saltpepper.append(round(acc, 2))
print("Salt&Pepper Accuracies after append :", accuracies_saltpepper)
break
else:
print("noise mode not supported")
# gaussian_noise(test_loader)
# salt_pepper_noise(test_loader)
# speckle_noise(test_loader)
# Change the number of variable as desired number of outputs.
gaus_001, gaus_01, gaus_05, gaus_1, gaus_2, gaus_3, gaus_4, gaus_5, gaus_6 = (items for items in accuracies_gaussian)
speck_001, speck_01, speck_05, speck_1, speck_2, speck_3, speck_4, speck_5, speck_6 = (items for items in accuracies_speckle)
saltpepper = accuracies_saltpepper[0]
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_gaussian = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(gaus_001), str(gaus_01), str(gaus_05), str(gaus_1), str(gaus_2), str(gaus_3), str(gaus_4), str(gaus_5), str(gaus_6)])
result_speck = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(speck_001), str(speck_01), str(speck_05), str(speck_1), str(speck_2), str(speck_3), str(speck_4), str(speck_5), str(speck_6)])
result_sp = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(saltpepper)])
with open("{}/gaus_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Gaussian:{} ".format(accuracies_gaussian))
text_file.write(result_gaussian + '\n')
with open("{}/saltpepper_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Salt & Pepper:{} ".format(accuracies_saltpepper))
text_file.write(result_sp + '\n')
with open("{}/speckle_noise_stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies Speckle:{} ".format(accuracies_speckle))
text_file.write(result_speck + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 25,928 | 41.092532 | 139 | py |
RobDanns | RobDanns-main/deep_learning/tools/train_resnet18_on_tinyimagenet200.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
logger = lu.get_logger(__name__)
print("Let's use GPU :", torch.cuda.current_device())
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST/VAL DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
#if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
# logger.info('Number of images: {}'.format(len(self.imgs)))
# logger.info('Number of classes: {}'.format(len(self.classnames)))
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
def train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch, writer_train=None, params=0, flops=0, is_master=False):
"""Performs one epoch of training."""
# Shuffle the data
loader.shuffle(train_loader, cur_epoch)
# Update the learning rate
lr = optim.get_epoch_lr(cur_epoch)
optim.set_lr(optimizer, lr)
# Enable training mode
model.train()
train_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(train_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Perform the forward pass
preds = model(inputs)
# Compute the loss
loss = loss_fun(preds, labels)
# Perform the backward pass
optimizer.zero_grad()
loss.backward()
# Update the parameters
optimizer.step()
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the stats across the GPUs
if cfg.NUM_GPUS > 1:
loss, top1_err, top5_err = du.scaled_all_reduce(
[loss, top1_err, top5_err]
)
# Copy the stats from GPU to CPU (sync point)
loss, top1_err, top5_err = loss.item(), top1_err.item(), top5_err.item()
train_meter.iter_toc()
# Update and log stats
train_meter.update_stats(
top1_err, top5_err, loss, lr, inputs.size(0) * cfg.NUM_GPUS
)
train_meter.log_iter_stats(cur_epoch, cur_iter)
train_meter.iter_tic()
# Log epoch stats
train_meter.log_epoch_stats(cur_epoch, writer_train, params, flops, is_master=is_master)
trg_stats = train_meter.get_epoch_stats(cur_epoch)
train_meter.reset()
return trg_stats
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
return eval_stats
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet':
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cfg.TRAIN.AUTO_RESUME and cu.has_checkpoint():
last_checkpoint = cu.get_checkpoint_last()
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
# Create data loaders
# Retrieve the data path for the dataset
data_path = dp.get_data_path(cfg.TRAIN.DATASET)
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
train_meter = TrainMeter(len(train_loader))
test_meter = TestMeter(len(test_loader))
# Create meters for fgsm
test_meter_fgsm = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
# do eval at initialization
initial_eval_stats = eval_epoch(test_loader, model, test_meter, -1,
writer_eval, params, flops, is_master=is_master)
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
last_epoch_eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
else:
for cur_epoch in range(start_epoch, cfg.OPTIM.MAX_EPOCH):
print('Epoch {} Started'.format(cur_epoch))
# Train for one epoch
trg_stats = train_epoch(
train_loader, model, loss_fun, optimizer, train_meter, cur_epoch,
writer_train, is_master=is_master
)
# Compute precise BN stats
if cfg.BN.USE_PRECISE_STATS:
nu.compute_precise_bn_stats(model, train_loader)
# Save a checkpoint
if cu.is_checkpoint_epoch(cur_epoch):
checkpoint_file = cu.save_checkpoint(model, optimizer, cur_epoch)
logger.info('Wrote checkpoint to: {}'.format(checkpoint_file))
# Evaluate the model
if is_eval_epoch(cur_epoch):
eval_stats = eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Train the model
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
if cfg.NUM_GPUS > 1:
mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_train)
else:
single_proc_train()
else:
print('Seed {} exists, skip!'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 21,617 | 37.741935 | 129 | py |
RobDanns | RobDanns-main/deep_learning/tools/adversarial-inference-tinyimagenet200.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the original graph2nn github repo.
# File modifications and additions by Rowan AI Lab, licensed under the Creative Commons Zero v1.0 Universal
# LICENSE file in the root directory of this source tree.
"""Train a classification model."""
from __future__ import print_function
import argparse
import numpy as np
import os
import sys
import torch
import multiprocessing as mp
import math
import pdb
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from pycls.config import assert_cfg
from pycls.config import cfg
from pycls.config import dump_cfg
from pycls.datasets import loader
from pycls.models import model_builder
from pycls.utils.meters import TestMeter
from pycls.utils.meters import TrainMeter
from PIL import Image
import pycls.models.losses as losses
import pycls.models.optimizer as optim
import pycls.utils.checkpoint as cu
import pycls.utils.distributed as du
import pycls.utils.logging as lu
import pycls.utils.metrics as mu
import pycls.utils.multiprocessing as mpu
import pycls.utils.net as nu
import pycls.datasets.paths as dp
import time
from datetime import datetime
from tensorboardX import SummaryWriter
print("Let's use GPU :", torch.cuda.current_device())
logger = lu.get_logger(__name__)
def parse_args():
"""Parses the arguments."""
parser = argparse.ArgumentParser(
description='Train a classification model'
)
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file',
required=True,
type=str
)
parser.add_argument(
'opts',
help='See pycls/core/config.py for all options',
default=None,
nargs=argparse.REMAINDER
)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
# TEST/VAL DATA_LOADER FOR TINY_IMAGENET200
def parseClasses(file):
classes = []
filenames = []
with open(file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
for x in range(0, len(lines)):
tokens = lines[x].split()
classes.append(tokens[1])
filenames.append(tokens[0])
return filenames, classes
def load_allimages(dir):
images = []
if not os.path.isdir(dir):
sys.exit(-1)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
# if datasets.folder.is_image_file(fname):
if datasets.folder.has_file_allowed_extension(fname,['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']):
path = os.path.join(root, fname)
item = path
images.append(item)
return images
class TinyImageNet(torch.utils.data.Dataset):
""" TinyImageNet200 validation dataloader."""
def __init__(self, img_path, gt_path, class_to_idx=None, transform=None):
self.img_path = img_path
self.transform = transform
self.gt_path = gt_path
self.class_to_idx = class_to_idx
self.classidx = []
self.imgs, self.classnames = parseClasses(gt_path)
for classname in self.classnames:
self.classidx.append(self.class_to_idx[classname])
def __getitem__(self, index):
"""inputs: Index, retrns: tuple(im, label)"""
img = None
with open(os.path.join(self.img_path, self.imgs[index]), 'rb') as f:
img = Image.open(f)
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
label = self.classidx[index]
return img, label
def __len__(self):
return len(self.imgs)
def is_eval_epoch(cur_epoch):
"""Determines if the model should be evaluated at the current epoch."""
return (
(cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or
(cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH
)
def log_model_info(model, writer_eval=None):
"""Logs model info"""
logger.info('Model:\n{}'.format(model))
params = mu.params_count(model)
flops = mu.flops_count(model)
logger.info('Params: {:,}'.format(params))
logger.info('Flops: {:,}'.format(flops))
logger.info('Number of node: {:,}'.format(cfg.RGRAPH.GROUP_NUM))
# logger.info('{}, {}'.format(params,flops))
if writer_eval is not None:
writer_eval.add_scalar('Params', params, 1)
writer_eval.add_scalar('Flops', flops, 1)
return params, flops
@torch.no_grad()
def eval_epoch(test_loader, model, test_meter, cur_epoch, writer_eval=None, params=0, flops=0, is_master=False):
"""Evaluates the model on the test set."""
# Enable eval mode
model.eval()
test_meter.iter_tic()
for cur_iter, (inputs, labels) in enumerate(test_loader):
# Transfer the data to the current GPU device
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
# Compute the predictions
preds = model(inputs)
# Compute the errors
top1_err, top5_err = mu.topk_errors(preds, labels, [1, 5])
# Combine the errors across the GPUs
if cfg.NUM_GPUS > 1:
top1_err, top5_err = du.scaled_all_reduce([top1_err, top5_err])
# Copy the errors from GPU to CPU (sync point)
top1_err, top5_err = top1_err.item(), top5_err.item()
test_meter.iter_toc()
# Update and log stats
test_meter.update_stats(
top1_err, top5_err, inputs.size(0) * cfg.NUM_GPUS
)
test_meter.log_iter_stats(cur_epoch, cur_iter)
test_meter.iter_tic()
# Log epoch stats
# test_meter.log_epoch_stats(cur_epoch,writer_eval,params,flops)
test_meter.log_epoch_stats(cur_epoch, writer_eval, params, flops, model, is_master=is_master)
eval_stats = test_meter.get_epoch_stats(cur_epoch)
test_meter.reset()
if cfg.RGRAPH.SAVE_GRAPH:
adj_dict = nu.model2adj(model)
adj_dict = {**adj_dict, 'top1_err': eval_stats['top1_err']}
os.makedirs('{}/graphs/{}'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN), exist_ok=True)
np.savez('{}/graphs/{}/{}.npz'.format(cfg.OUT_DIR, cfg.RGRAPH.SEED_TRAIN, cur_epoch), **adj_dict)
# return eval_stats
class Normalize(torch.nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1,3,1,1)
std = self.std.reshape(1,3,1,1)
norm_img = (input - mean) / std
return norm_img
# Helper class for printing model layers
class PrintLayer(torch.nn.Module):
def __init__(self):
super(PrintLayer, self).__init__()
def forward(self, x):
# Do your print / debug stuff here
print(x)
return x
def train_model(writer_train=None, writer_eval=None, is_master=False):
"""Trains the model."""
# Fit flops/params
if cfg.TRAIN.AUTO_MATCH and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
mode = 'flops' # flops or params
if cfg.TRAIN.DATASET == 'cifar10':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 64:
stats_baseline = 48957952
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'cifar100':
pre_repeat = 15
if cfg.MODEL.TYPE == 'resnet': # ResNet20
stats_baseline = 40813184
elif cfg.MODEL.TYPE == 'mlpnet': # 5-layer MLP. cfg.MODEL.LAYERS exclude stem and head layers
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 256:
stats_baseline = 985600
elif cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 2364416
elif cfg.RGRAPH.DIM_LIST[0] == 1024:
stats_baseline = 6301696
elif cfg.MODEL.TYPE == 'cnn':
if cfg.MODEL.LAYERS == 3:
if cfg.RGRAPH.DIM_LIST[0] == 512:
stats_baseline = 806884352
elif cfg.RGRAPH.DIM_LIST[0] == 16:
stats_baseline = 1216672
elif cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 48957952
elif '16d' in cfg.OUT_DIR:
stats_baseline = 3392128
elif cfg.TRAIN.DATASET == 'tinyimagenet200':
pre_repeat = 9
if cfg.MODEL.TYPE == 'resnet':
if 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 18: # ResNet18
stats_baseline = 1820000000
elif 'basic' in cfg.RESNET.TRANS_FUN and cfg.MODEL.DEPTH == 34: # ResNet34
stats_baseline = 3663761408
elif 'sep' in cfg.RESNET.TRANS_FUN: # ResNet34-sep
stats_baseline = 553614592
elif 'bottleneck' in cfg.RESNET.TRANS_FUN: # ResNet50
stats_baseline = 4089184256
elif cfg.MODEL.TYPE == 'efficientnet': # EfficientNet
stats_baseline = 385824092
elif cfg.MODEL.TYPE == 'cnn': # CNN
if cfg.MODEL.LAYERS == 6:
if '64d' in cfg.OUT_DIR:
stats_baseline = 166438912
cfg.defrost()
stats = model_builder.build_model_stats(mode)
if stats != stats_baseline:
# 1st round: set first stage dim
for i in range(pre_repeat):
scale = round(math.sqrt(stats_baseline / stats), 2)
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first = int(round(first * scale))
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
step = 1
while True:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first += flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if stats == stats_baseline:
break
if flag != flag_init:
if cfg.RGRAPH.UPPER == False: # make sure the stats is SMALLER than baseline
if flag < 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
else:
if flag > 0:
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [dim / first for dim in cfg.RGRAPH.DIM_LIST]
first -= flag_init * step
cfg.RGRAPH.DIM_LIST = [int(round(first * ratio)) for ratio in ratio_list]
break
# 2nd round: set other stage dim
first = cfg.RGRAPH.DIM_LIST[0]
ratio_list = [int(round(dim / first)) for dim in cfg.RGRAPH.DIM_LIST]
stats = model_builder.build_model_stats(mode)
flag_init = 1 if stats < stats_baseline else -1
if 'share' not in cfg.RESNET.TRANS_FUN:
for i in range(1, len(cfg.RGRAPH.DIM_LIST)):
for j in range(ratio_list[i]):
cfg.RGRAPH.DIM_LIST[i] += flag_init
stats = model_builder.build_model_stats(mode)
flag = 1 if stats < stats_baseline else -1
if flag_init != flag:
cfg.RGRAPH.DIM_LIST[i] -= flag_init
break
stats = model_builder.build_model_stats(mode)
print('FINAL', cfg.RGRAPH.GROUP_NUM, cfg.RGRAPH.DIM_LIST, stats, stats_baseline, stats < stats_baseline)
# Build the model (before the loaders to ease debugging)
model = model_builder.build_model()
params, flops = log_model_info(model, writer_eval)
# for name, param in model.named_parameters():
# print(name, param.shape)
# Define the loss function
loss_fun = losses.get_loss_fun()
# Construct the optimizer
optimizer = optim.construct_optimizer(model)
# Load a checkpoint if applicable
start_epoch = 0
if cu.had_checkpoint():
print("Checking for a checkpoint")
last_checkpoint = cu.get_checkpoint_last()
print("Last Checkpoint : ", last_checkpoint)
checkpoint_epoch = cu.load_checkpoint(last_checkpoint, model, optimizer)
logger.info('Loaded checkpoint from: {}'.format(last_checkpoint))
if checkpoint_epoch == cfg.OPTIM.MAX_EPOCH:
exit()
start_epoch = checkpoint_epoch
else:
start_epoch = checkpoint_epoch + 1
print("Epoch = ", start_epoch)
# Create data loaders
data_path = dp.get_data_path(cfg.TRAIN.DATASET) # Retrieve the data path for the dataset
traindir = os.path.join(data_path, cfg.TRAIN.SPLIT)
valdir = os.path.join(data_path, cfg.TEST.SPLIT, 'images')
valgtfile = os.path.join(data_path, cfg.TEST.SPLIT, 'val_annotations.txt')
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# create training dataset and loader
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=True)
# create validation dataset
test_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
normalize]))
# create validation loader
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=int(cfg.TEST.BATCH_SIZE / cfg.NUM_GPUS),
shuffle=False,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# create adversarial dataset
adv_dataset = TinyImageNet(
valdir,
valgtfile,
class_to_idx=train_loader.dataset.class_to_idx.copy(),
transform=transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()]))
# create adversarial loader
test_loader_adv = torch.utils.data.DataLoader(
adv_dataset,
batch_size=1,
shuffle=True,
num_workers=cfg.DATA_LOADER.NUM_WORKERS,
pin_memory=cfg.DATA_LOADER.PIN_MEMORY,
drop_last=False)
# Create meters
test_meter = TestMeter(len(test_loader))
test_meter_adv = TestMeter(len(test_loader_adv))
if cfg.ONLINE_FLOPS:
model_dummy = model_builder.build_model()
IMAGE_SIZE = 224
n_flops, n_params = mu.measure_model(model_dummy, IMAGE_SIZE, IMAGE_SIZE)
logger.info('FLOPs: %.2fM, Params: %.2fM' % (n_flops / 1e6, n_params / 1e6))
del (model_dummy)
# Perform the training loop
logger.info('Start epoch: {}'.format(start_epoch + 1))
if start_epoch == cfg.OPTIM.MAX_EPOCH:
cur_epoch = start_epoch - 1
eval_epoch(test_loader, model, test_meter, cur_epoch,
writer_eval, params, flops, is_master=is_master)
# when epsilon=0 --> PGD, epsilon=1 --> CW, otherwise FGSM-->replace eps1, eps2, ... with required epsilon of attack versions
epsilons = [0, eps1, eps2, ... epsN, 1]
# Per-channel mean and SD values in BGR order for TinyImageNet dataset
tinyimagenet_MEAN = [0.485, 0.456, 0.406]
tinyimagenet_SD = [0.229, 0.224, 0.225]
accuracies = []
# add normalization layer to the model
norm_layer = Normalize(mean=tinyimagenet_MEAN, std=tinyimagenet_SD)
net = torch.nn.Sequential(norm_layer, model).cuda()
net = net.eval()
for epsilon in epsilons:
if epsilon == 0:
print("Running PGD Attack")
atk = torchattacks.PGD(net, eps=1/510, alpha=2/225, steps=7) # for relevant dataset, use parameters from torchattacks official notebook
elif epsilon == 1:
print("Running CW Attack")
atk = torchattacks.CW(net, c=0.1, kappa=0, steps=100, lr=0.01) # choose suitable values for c, kappa, steps, and lr.
else:
print("Running FGSM Attacks on epsilon :", epsilon)
atk = torchattacks.FGSM(net, eps=epsilon)
ctr = 0
correct = 0
total = 0
for cur_iter, (inputs, labels) in enumerate(test_loader_adv):
inputs, labels = inputs.cuda(), labels.cuda(non_blocking=True)
adv_images = atk(inputs, labels)
outputs = net(adv_images)
_, predicted = torch.max(outputs.data, 1)
ctr += 1
total += 1
correct += (predicted == labels).sum()
if ctr > X: # replace X with the number of images to be generated for adversarial attacks.
print(ctr, " images done for epsilon:", epsilon)
break
acc = 100 * float(correct) / total
print("acc =", round(acc, 2), "correct =", float(correct), "total =", total)
accuracies.append(round(acc, 2))
print('Attack Accuracy = {:.3f} with epsilon = {:.4f}'.format(acc, epsilon))
print("accuracies after apend :", accuracies)
# save items inside accuracies list to separate float objects, update the # of variables according to requirement.
accPGD, accFGSM1, accFGSM2, accFGSM3, accFGSM4, accFGSM5, accFGSM6, accFGSM7, accCW = (items for items in accuracies)
# load the top1 error and top5 error from the evaluation results
f = open("{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH), "r")
c_ids = []
for i in f.readlines():
sub_id = list(map(float, i.split(",")))
c_ids.append(sub_id[3:5])
topK_errors = [sum(i) / len(c_ids) for i in zip(*c_ids)]
top1_error, top5_error = topK_errors[0], topK_errors[1]
result_info = ', '.join(
[str(cfg.RGRAPH.GROUP_NUM), str(cfg.RGRAPH.P), str(cfg.RGRAPH.SPARSITY),
'{:.3f}'.format(top1_error), '{:.3f}'.format(top5_error),
str(accPGD), str(accFGSM1), str(accFGSM2), str(accFGSM3), str(accFGSM4), str(accFGSM5),
str(accFGSM6), str(accFGSM7), str(accCW)])
with open("{}/stats.txt".format(cfg.OUT_DIR), "a") as text_file:
print(" Writing Text File with accuracies {} ".format(accuracies))
text_file.write(result_info + '\n')
def single_proc_train():
"""Performs single process training."""
# Setup logging
lu.setup_logging()
# Show the config
logger.info('Config:\n{}'.format(cfg))
# Setup tensorboard if provided
writer_train = None
writer_eval = None
## If use tensorboard
if cfg.TENSORBOARD and du.is_master_proc() and cfg.RGRAPH.SEED_TRAIN == cfg.RGRAPH.SEED_TRAIN_START:
comment = ''
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
logdir_train = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_train')
logdir_eval = os.path.join(cfg.OUT_DIR,
'runs', current_time + comment + '_eval')
if not os.path.exists(logdir_train):
os.makedirs(logdir_train)
if not os.path.exists(logdir_eval):
os.makedirs(logdir_eval)
writer_train = SummaryWriter(logdir_train)
writer_eval = SummaryWriter(logdir_eval)
# Fix the RNG seeds (see RNG comment in core/config.py for discussion)
np.random.seed(cfg.RGRAPH.SEED_TRAIN)
torch.manual_seed(cfg.RGRAPH.SEED_TRAIN)
# Configure the CUDNN backend
torch.backends.cudnn.benchmark = cfg.CUDNN.BENCHMARK
# Launch inference + adversarial run
train_model(writer_train, writer_eval, is_master=du.is_master_proc())
if writer_train is not None and writer_eval is not None:
writer_train.close()
writer_eval.close()
def check_seed_exists(i):
fname = "{}/results_epoch{}.txt".format(cfg.OUT_DIR, cfg.OPTIM.MAX_EPOCH)
if os.path.isfile(fname):
with open(fname, 'r') as f:
lines = f.readlines()
if len(lines) > i:
return True
return False
def main():
# Parse cmd line args
args = parse_args()
# Load config options
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
assert_cfg()
# cfg.freeze()
# Ensure that the output dir exists
os.makedirs(cfg.OUT_DIR, exist_ok=True)
# Save the config
dump_cfg()
for i, cfg.RGRAPH.SEED_TRAIN in enumerate(range(cfg.RGRAPH.SEED_TRAIN_START, cfg.RGRAPH.SEED_TRAIN_END)):
# check if a seed has been run
if not check_seed_exists(i):
print("Launching inference for seed {}".format(i))
single_proc_train()
else:
print('Inference seed {} already exists, stopping inference'.format(cfg.RGRAPH.SEED_TRAIN))
if __name__ == '__main__':
main()
| 23,184 | 38.768439 | 147 | py |
Dataset Card for "AlgorithmicResearchGroup/arxiv_python_research_code"
Dataset Description
https://huggingface.co/datasets/AlgorithmicResearchGroup/arxiv_deep_learning_python_research_code
Dataset Summary
AlgorithmicResearchGroup/arxiv_deep_learning_python_research_code contains over 1.49B of source code files referenced strictly in ArXiv papers. The dataset serves as a curated dataset for Code LLMs.
How to use it
from datasets import load_dataset
# full dataset (1.49GB of data)
ds = load_dataset("ArtifactAI/arxiv_deep_learning_python_research_code", split="train")
# dataset streaming (will only download the data as needed)
ds = load_dataset("ArtifactAI/arxiv_deep_learning_python_research_code", streaming=True, split="train")
for sample in iter(ds): print(sample["code"])
Dataset Structure
Data Instances
Each data instance corresponds to one file. The content of the file is in the code
feature, and other features (repo
, file
, etc.) provide some metadata.
Data Fields
repo
(string): code repository name.file
(string): file path in the repository.code
(string): code within the file.file_length
: (integer): number of characters in the file.avg_line_length
: (float): the average line-length of the file.max_line_length
: (integer): the maximum line-length of the file.extension_type
: (string): file extension.
Data Splits
The dataset has no splits and all data is loaded as train split by default.
Dataset Creation
Source Data
Initial Data Collection and Normalization
34,099 active GitHub repository names were extracted from ArXiv papers from its inception through July 21st, 2023 totaling 773G of compressed github repositories.
These repositories were then filtered, and the code from each file that mentions ["torch", "jax", "flax", "stax", "haiku", "keras", "fastai", "xgboost", "caffe", "mxnet"] was extracted into 1.4 million files.
Who are the source language producers?
The source (code) language producers are users of GitHub that created unique repository
Personal and Sensitive Information
The released dataset may contain sensitive information such as emails, IP addresses, and API/ssh keys that have previously been published to public repositories on GitHub.
Additional Information
Dataset Curators
Matthew Kenney, AlgorithmicResearchGroup, matt@algorithmicresearchgroup.com
Citation Information
@misc{arxiv_deep_learning_python_research_code,
title={arxiv_deep_learning_python_research_code},
author={Matthew Kenney},
year={2023}
}
- Downloads last month
- 127