#!/usr/bin/env python
# encoding: utf-8
'''
@author: wangjianrong
@software: pycharm
@file: pytorch常用代码段.py
@time: 2020/9/30 9:43
@desc:
https://zhuanlan.zhihu.com/p/104019160
https://zhuanlan.zhihu.com/p/205407928
'''

#导入包和版本查询
import torch
import torch.nn as nn
import torchvision
# print(torch.__version__)
# print(torch.version.cuda)
# print(torch.backends.cudnn.version())
# print(torch.cuda.get_device_name(0))

#可复现性
import numpy as np
import random
import os
# def init_seed(seed):
#   random.seed(seed)
#   os.environ['PYTHONHASHSEED'] =str(seed)
#   np.random.seed(seed)
#   torch.manual_seed(seed)
#   torch.cuda.manual_seed_all(seed)
#   torch.backends.cudnn.deterministic =True
#   torch.backends.cudnn.benchmark = False

random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False


#清除显存
torch.cuda.empty_cache()

# 在PyTorch 1.3之前，需要使用注释
# Tensor[N, C, H, W]
# images = torch.randn(32, 3, 56, 56)
# images.sum(dim=1)
# images.select(dim=1, index=0)

# PyTorch 1.3之后
# NCHW = ['N', 'C', 'H', 'W']
# images = torch.randn(32, 3, 56, 56, names=NCHW)
# images.sum('C')
# images.select('C', index=0)
# 也可以这么设置
# tensor = torch.rand(3,4,1,2,names=('C', 'N', 'H', 'W'))
# 使用align_to可以对维度方便地排序
# tensor = tensor.align_to('N', 'C', 'H', 'W')


# 设置默认类型，pytorch中的FloatTensor远远快于DoubleTensor
torch.set_default_tensor_type(torch.FloatTensor)

# 类型转换
# tensor = tensor.cuda()
# tensor = tensor.cpu()
# tensor = tensor.float()
# tensor = tensor.long()

#torch.Tensor与np.ndarray转换
# ndarray = tensor.cpu().numpy()
# tensor = torch.from_numpy(ndarray).float()
# tensor = torch.from_numpy(ndarray.copy()).float() # If ndarray has negative stride.

import PIL
#Torch.tensor与PIL.Image转换
# pytorch中的张量默认采用[N, C, H, W]的顺序，并且数据范围在[0,1]，需要进行转置和规范化
# torch.Tensor -> PIL.Image
# image = PIL.Image.fromarray(torch.clamp(tensor*255, min=0, max=255).byte().permute(1,2,0).cpu().numpy())
# image = torchvision.transforms.functional.to_pil_image(tensor)  # Equivalently way

# PIL.Image -> torch.Tensor
# path = r'./figure.jpg'
# tensor = torch.from_numpy(np.asarray(PIL.Image.open(path))).permute(2,0,1).float() / 255
# tensor = torchvision.transforms.functional.to_tensor(PIL.Image.open(path)) # Equivalently way

#np.ndarray与PIL.Image的转换
# image = PIL.Image.fromarray(ndarray.astype(np.uint8))

# ndarray = np.asarray(PIL.Image.open(path))

#打乱顺序
# tensor = tensor[torch.randperm(tensor.size(0))]  # 打乱第一个维度


#水平翻转
# pytorch不支持tensor[::-1]这样的负步长操作，水平翻转可以通过张量索引实现
# 假设张量的维度为[N, D, H, W].
# tensor = tensor[:,:,:,torch.arange(tensor.size(3) - 1, -1, -1).long()]

#复制张量
# Operation                 |  New/Shared memory | Still in computation graph |
# tensor.clone()            # |        New         |          Yes               |
# tensor.detach()           # |      Shared        |          No                |
# tensor.detach.clone()()   # |        New         |          No                |

#张量拼接
'''
注意torch.cat和torch.stack的区别在于torch.cat沿着给定的维度拼接，
而torch.stack会新增一维。例如当参数是3个10x5的张量，torch.cat的结果是30x5的张量，
而torch.stack的结果是3x10x5的张量。
'''
# list_of_tensors = [torch.randn(4,5) for i in range(3)]
# tensor = torch.cat(list_of_tensors, dim=1)
# print(tensor.size())
# tensor = torch.stack(list_of_tensors, dim=1)
# print(tensor.size())

#将整数标签转为one-hot编码
# pytorch的标记默认从0开始
# tensor = torch.tensor([0, 2, 1, 3])
# N = tensor.size(0)
# num_classes = 4
# one_hot = torch.zeros(N, num_classes).long()
# one_hot.scatter_(dim=1, index=torch.unsqueeze(tensor, dim=1), src=torch.ones(N, num_classes).long())
# print(torch.nn.functional.one_hot(tensor,num_classes))

#判断两个张量相等
# torch.allclose(tensor1, tensor2)  # float tensor
# torch.equal(tensor1, tensor2)     # int tensor

#张量扩展
# Expand tensor of shape 64*512 to shape 64*512*7*7.
# tensor = torch.rand(64,512)
# torch.reshape(tensor, (64, 512, 1, 1)).expand(64, 512, 7, 7)

#矩阵乘法
# tensor1 = torch.randn(4,5)
# tensor2 = torch.randn(5,3)
# Matrix multiplcation: (m*n) * (n*p) * -> (m*p).
# result = torch.mm(tensor1, tensor2)
# print(result.size())

# Batch matrix multiplication: (b*m*n) * (b*n*p) -> (b*m*p)
# tensor1 = torch.randn(3,4,5)
# tensor2 = torch.randn(3,5,3)
# result = torch.bmm(tensor1, tensor2)
# print(result.size())


# Element-wise multiplication.
# result = tensor1 * tensor2

#计算两组数据之间的两两欧式距离
#利用broadcast机制
# dist = torch.sqrt(torch.sum((X1[:,None,:] - X2) ** 2, dim=2))


#双线性汇合（bilinear pooling）
# N=4
# D=3
# H=4
# W=4
# X=torch.randn(N,D,H,W)
# X = torch.reshape(N, D, H * W)                        # Assume X has shape N*D*H*W
# X = torch.bmm(X, torch.transpose(X, 1, 2)) / (H * W)  # Bilinear pooling
# assert X.size() == (N, D, D)
# X = torch.reshape(X, (N, D * D))
# X = torch.sign(X) * torch.sqrt(torch.abs(X) + 1e-5)   # Signed-sqrt normalization
# X = torch.nn.functional.normalize(X)



#sync_bn = torch.nn.SyncBatchNorm(num_features, eps=1e-05, momentum=0.1, affine=True,
                                 # track_running_stats=True)

#将已有网络的所有BN层改为同步BN层 affine定义了BN层的参数γ和β是否是可学习的(不可学习默认是常数1和0)。
# def convertBNtoSyncBN(module, process_group=None):
#     '''Recursively replace all BN layers to SyncBN layer.
#
#     Args:
#         module[torch.nn.Module]. Network
#     '''
#     if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
#         sync_bn = torch.nn.SyncBatchNorm(module.num_features, module.eps, module.momentum,
#                                          module.affine, module.track_running_stats, process_group)
#         sync_bn.running_mean = module.running_mean
#         sync_bn.running_var = module.running_var
#         if module.affine:
#             sync_bn.weight = module.weight.clone().detach()
#             sync_bn.bias = module.bias.clone().detach()
#         return sync_bn
#     else:
#         for name, child_module in module.named_children():
#             setattr(module, name) = convert_syncbn_model(child_module, process_group=process_group)
#         return module

# 类似 BN 滑动平均
# 如果要实现类似 BN 滑动平均的操作，在 forward 函数中要使用原地（inplace）操作给滑动平均赋值。

# class BN(torch.nn.Module):
#     def __init__(self):
#         ...
#         self.register_buffer('running_mean', torch.zeros(num_features))
#
#     def forward(self, X):
#         ...
#         self.running_mean += momentum * (current - self.running_mean)

#计算模型整体参数量
# num_parameters = sum(torch.numel(parameter) for parameter in model.parameters())


#注意 model.modules() 和 model.children() 的区别：model.modules() 会迭代地遍历模型的所有子层，而 model.children() 只会遍历模型下的一层。
# model = torchvision.models.resnet18(False)
# num = 0
# for i,*_ in enumerate(model.children()):
#     num += 1
# print(num)
#
# num = 0
# for i,*_ in enumerate(model.named_parameters()):
#     num += 1
# print(num)
#
# num = 0
# for i,*_ in enumerate(model.modules()):
#     num += 1
# print(num)

#模型权重初始化
# Common practise for initialization.
model = torchvision.models.resnet18(False)
for layer in model.modules():
    if isinstance(layer, torch.nn.Conv2d):
        torch.nn.init.kaiming_normal_(layer.weight, mode='fan_out',
                                      nonlinearity='relu')
        if layer.bias is not None:
            torch.nn.init.constant_(layer.bias, val=0.0)
    elif isinstance(layer, torch.nn.BatchNorm2d):
        torch.nn.init.constant_(layer.weight, val=1.0)
        torch.nn.init.constant_(layer.bias, val=0.0)
    elif isinstance(layer, torch.nn.Linear):
        torch.nn.init.xavier_normal_(layer.weight)
        if layer.bias is not None:
            torch.nn.init.constant_(layer.bias, val=0.0)

# Initialization with given tensor.
# layer.weight = torch.nn.Parameter(tensor)

# 取模型中的前两层
# tmp = list(model.children())
# conv_model = torch.nn.Sequential()
# new_model = nn.Sequential(*list(model.children())[:2])
# 如果希望提取出模型中的所有卷积层，可以像下面这样操作：
# for layer in model.named_modules():
#     if isinstance(layer[1],nn.Conv2d):
#          conv_model.add_module(layer[0].replace('.','_'),layer[1])
# print(conv_model)

# 导入另一个模型的相同部分到新的模型
# 模型导入参数时，如果两个模型结构不一致，则直接导入参数会报错。用下面方法可以把另一个模型的相同的部分导入到新的模型中。

# model_new代表新的模型
# model_saved代表其他模型，比如用torch.load导入的已保存的模型
# model_new_dict = model_new.state_dict()
# model_common_dict = {k:v for k, v in model_saved.items() if k in model_new_dict.keys()}
# model_new_dict.update(model_common_dict)
# model_new.load_state_dict(model_new_dict)

#label smoothing
# import torch
# import torch.nn as nn
# class LSR(nn.Module):
#
#     def __init__(self, e=0.1, reduction='mean'):
#         super().__init__()
#
#         self.log_softmax = nn.LogSoftmax(dim=1)
#         self.e = e
#         self.reduction = reduction
#
#     def _one_hot(self, labels, classes, value=1):
#         """
#             Convert labels to one hot vectors
#
#         Args:
#             labels: torch tensor in format [label1, label2, label3, ...]
#             classes: int, number of classes
#             value: label value in one hot vector, default to 1
#
#         Returns:
#             return one hot format labels in shape [batchsize, classes]
#         """
#         one_hot = torch.nn.functional.one_hot(labels,classes)
#         one_hot *= value
#         # one_hot = torch.zeros(labels.size(0), classes)
#         #
#         # # labels and value_added  size must match
#         # labels = labels.view(labels.size(0), -1)
#         # value_added = torch.Tensor(labels.size(0), 1).fill_(value)
#         #
#         # value_added = value_added.to(labels.device)
#         # one_hot = one_hot.to(labels.device)
#
#         # one_hot.scatter_add_(1, labels, value_added)
#
#         return one_hot
#
#     def _smooth_label(self, target, length, smooth_factor):
#         """convert targets to one-hot format, and smooth
#         them.
#         Args:
#             target: target in form with [label1, label2, label_batchsize]
#             length: length of one-hot format(number of classes)
#             smooth_factor: smooth factor for label smooth
#
#         Returns:
#             smoothed labels in one hot format
#         """
#         one_hot = self._one_hot(target, length, value=1 - smooth_factor)
#         # one_hot += smooth_factor / (length - 1)
#         one_hot += smooth_factor / length
#
#         return one_hot.to(target.device)
#
#     def forward(self, x, target):
#
#         if x.size(0) != target.size(0):
#             raise ValueError('Expected input batchsize ({}) to match target batch_size({})'
#                              .format(x.size(0), target.size(0)))
#
#         if x.dim() < 2:
#             raise ValueError('Expected input tensor to have least 2 dimensions(got {})'
#                              .format(x.size(0)))
#
#         if x.dim() != 2:
#             raise ValueError('Only 2 dimension tensor are implemented, (got {})'
#                              .format(x.size()))
#
#         smoothed_target = self._smooth_label(target, x.size(1), self.e)
#         x = self.log_softmax(x)
#         loss = torch.sum(- x * smoothed_target, dim=1)
#
#         if self.reduction == 'none':
#             return loss
#
#         elif self.reduction == 'sum':
#             return torch.sum(loss)
#
#         elif self.reduction == 'mean':
#             return torch.mean(loss)
#
#         else:
#             raise ValueError('unrecognized option, expect reduction to be one of none, mean, sum')


# 不对偏置项进行权重衰减（weight decay）
# pytorch里的weight decay相当于l2正则

# bias_list = (param for name, param in model.named_parameters() if name[-4:] == 'bias')
# others_list = (param for name, param in model.named_parameters() if name[-4:] != 'bias')
# parameters = [{'parameters': bias_list, 'weight_decay': 0},
#               {'parameters': others_list}]
# optimizer = torch.optim.SGD(parameters, lr=1e-2, momentum=0.9, weight_decay=1e-4)


# 梯度裁剪（gradient clipping）
# torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=20)

#
# from torch.utils.tensorboard import SummaryWriter
# import numpy as np
#
# writer = SummaryWriter()
#
# for n_iter in range(100):
#     writer.add_scalar('Loss/train', np.random.random(), n_iter)
#     writer.add_scalar('Loss/test', np.random.random(), n_iter)
#     writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
#     writer.add_scalar('Accuracy/test', np.random.random(), n_iter)


# model = torchvision.models.resnet18(False)
# for n,m in model.named_children():
#     print(n)

# 微调全连接层
# model = torchvision.models.resnet18(pretrained=True)
# for param in model.parameters():
#     param.requires_grad = False
# model.fc = nn.Linear(512, 100)  # Replace the last fc layer
# optimizer = torch.optim.SGD(model.fc.parameters(), lr=1e-2, momentum=0.9, weight_decay=1e-4)

# 以较大学习率微调全连接层，较小学习率微调卷积层
# model = torchvision.models.resnet18(pretrained=True)
# finetuned_parameters = list(map(id, model.fc.parameters()))
# conv_parameters = (p for p in model.parameters() if id(p) not in finetuned_parameters)
# parameters = [{'params': conv_parameters, 'lr': 1e-3},
#               {'params': model.fc.parameters()}]
# optimizer = torch.optim.SGD(parameters, lr=1e-2, momentum=0.9, weight_decay=1e-4)

# model.eval() 和 torch.no_grad() 的区别在于，model.eval() 是将网络切换为测试状态，例如 BN 和dropout在训练和测试阶段使用不同的计算方法。torch.no_grad() 是关闭 PyTorch 张量的自动求导机制，以减少存储使用和加速计算，得到的结果无法进行 loss.backward()。
# model.zero_grad()会把整个模型的参数的梯度都归零, 而optimizer.zero_grad()只会把传入其中的参数的梯度归零.














