# -*- coding: utf-8 -*
import argparse
import MainCode.TOOL.ToolClass as tool
from torch.autograd import Variable
from torchvision import transforms
import re
from torchviz import make_dot
from Datasets import MyDatasets
import os, glob, datetime, time
import numpy as np
# import tensorwatch as tw
import torch
import torch.nn as nn
from torch.nn.modules.loss import _Loss
import torch.nn.init as init
from torch.utils.data import DataLoader
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR
from skimage.metrics import structural_similarity as sk_cpt_ssim


# 添加参数
parser = argparse.ArgumentParser(description='PyTorch IMPFCN')
parser.add_argument('--model', default='Ipr_FCN', type=str, help='choose a type of model')
parser.add_argument('--data_inchannels', default=1, type=int, help=r'the number of input-data channels ')
parser.add_argument('--model_midchannels', default=128, type=int, help=r'the number of model-head-layer channels ')
parser.add_argument('--dense_block_layers', default=[6,12,24,40,12], type=int, help='path of test data')
parser.add_argument('--train_data_dir', default=r'..\Data\Numpy_DATA\AddNoise', type=str, help='path of train data')
parser.add_argument('--target_data_dir', default=r'..\Data\Numpy_DATA\Original', type=str, help='path of target data')
parser.add_argument('--test_data_dir', default=r'..\Data\Numpy_DATA\test', type=str, help='path of test data')
parser.add_argument('--input_data_size', default=(1280,192), type=int, help='size of input data')
parser.add_argument('--epoch', default=20, type=int, help='number of train epoches')
parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate for Adam')
parser.add_argument('--batch_size', default=1, type=int, help='batch size')
parser.add_argument('--patch_size', default=(64,64), type=int, help='patch size')
args = parser.parse_args()


# 继承自nn类的DnCNN类
class DnCNNNet(nn.Module):
    # 卷积核中的 in_channels ，刚刚2中已经说了，就是上一次卷积的 out_channels ，如果是第一次做卷积，就是1中样本图片的 channels
    # 卷积操作完成后输出的 out_channels ，取决于卷积核的数量
    def __init__(self, depth=17, n_channels=64, image_channels=1, use_bnorm=True, kernel_size=3):
        super(DnCNNNet, self).__init__()

        # 卷积步长(stride)
        # 卷积核大小
        kernel_size = 3
        # 特征图填充宽度
        padding = 1
        # 用列表来表示所有卷积层
        layers = []

        # 初始化输入层
        layers.append(
            nn.Conv2d(in_channels=image_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding,
                      bias=True))
        layers.append(nn.ReLU(inplace=True))

        # 增加隐含层
        for _ in range(depth - 2):
            # 设置本层神经元
            layers.append(
                nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding,
                          bias=False))
            # 设置归一函数
            layers.append(nn.BatchNorm2d(n_channels, eps=0.0001, momentum=0.95))
            # 设置激活函数
            layers.append(nn.ReLU(inplace=True))

        # 增加输出层
        layers.append(
            nn.Conv2d(in_channels=n_channels, out_channels=image_channels, kernel_size=kernel_size, padding=padding,
                      bias=False))

        # Sequential：序贯模型是函数式模型的简略版，为最简单的线性、从头到尾的结构顺序，不分叉，是多个网络层的线性堆叠。
        # 通过将层的列表传递给Sequential的构造函数，来创建一个Sequential模型。
        self.dncnn = nn.Sequential(*layers)
        # 初始化权重
        self._initialize_weights()

    # 前向传播函数 使用Sequential序贯模型
    def forward(self, x):
        y = x
        # Sequential序贯模型，这里相当于一个递归
        out = self.dncnn(x)

        # 输出噪声
        return y - out

    # 初始化权重函数
    def _initialize_weights(self):
        # modules()返回一个包含 当前模型 所有模块的迭代器。
        for m in self.modules():
            # 判断一个变量是否是某个类型可以用isinstance()判断：
            if isinstance(m, nn.Conv2d):
                init.orthogonal_(m.weight)

                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
        print('init weight')


if __name__ == '__main__':


    block_layers = args.dense_block_layers
    Input_root_dir = args.train_data_dir
    Target_root_dir = args.target_data_dir
    Data_size = args.patch_size
    InputDataSize = args.input_data_size
    batch_size = args.batch_size

    cuda = torch.cuda.is_available()

    # model selection
    print('===> Building model')
    model = DnCNNNet()
    print(model)

    initial_epoch = 20


    device = torch.device('cuda')
    criterion = nn.MSELoss(reduce=True, size_average=False).to(device)

    if cuda:
        # 在pytorch中，即使是有GPU的机器，它也不会自动使用GPU，而是需要在程序中显示指定。调用model.cuda()，可以将模型加载到GPU上去。
        model = model.to(device)

    optimizer = optim.Adam(model.parameters(), lr=1e-3)

    # 动态调整学习速率
    # torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1, last_epoch=-1)
    # milestones为一个数组，如 [50,70]. gamma为倍数。如果learning rate开始为0.01 ，则当epoch为50时变为0.001，epoch 为70 时变为0.0001。
    scheduler = MultiStepLR(optimizer, milestones=[30, 60, 90], gamma=0.2)  # learning rates


    data_trans = transforms.Compose([transforms.ToTensor()])

    data = MyDatasets(Input_root_dir=Input_root_dir, Target_root_dir=Target_root_dir, input_data_size=InputDataSize,
                      transform=data_trans, Data_size=Data_size)
    dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=True)

    step = 0
    train_n = args.epoch

    time_open = time.time()
    for epoch in range(train_n):
        print('epoch:'+ epoch)

        model.train()
        # 在该周期中提高学习率
        scheduler.step(epoch)  # step to the learning rate in this epcoh

        # 开启训练
        model.train()
        # 初始化loss和corrects
        running_loss = 0.0
        running_corrects = 0.0
        epoch_loss = 0.0
        epoch_acc = 0.0
        snr_sum = 0.0

        for batch, (Input, Target, Name, NoiseLevel, inputPoints) in enumerate(dataloader, 1):
            # 将数据放在GPU上训练
            X, Y = Variable(Input).to(device), Variable(Target).to(device)
            X = X.type(torch.float32)
            Y = Y.type(torch.float32)

            # 模型预测概率
            y_pred = model(X)
            # pred，概率较大值对应的索引值，可看做预测结果，1表示行
            # _, pred = torch.max(y_pred.data, 1)
            # 梯度归零
            optimizer.zero_grad()
            # 计算损失
            loss = criterion(y_pred, Y)

            # 反向传播
            loss.backward()
            optimizer.step()

            # print('imag_narry:'+str(imag_narry))
            # print('out:'+str(out.shape))

            # 损失和
            running_loss += loss.data.item()

            # 预测正确的图片个数
            # running_corrects += torch.sum(y_pred == Y.data)

            # 输出每个epoch的loss和acc[平均损失]
            epoch_loss = running_loss * batch_size / len(data)
            # epoch_acc = 100 * running_corrects / len(data)
            # writer_array.add_scalar('batch loss',loss.data.item(),global_step=step)
            step += batch_size
            print('\rLoss:{:.4f} step:{} '.format(epoch_loss, step), end=' ', flush=True)

        # TODO：开启测试
        model.eval()

        data_path = r'..\Data\Numpy_DATA\test\theoretical_1NoiseData1.npy'
        original_data_path = r'..\Data\Numpy_DATA\Original\theoreticalData1.npy'
        # 读取数据
        data_input_z = (np.load(data_path, allow_pickle=True))[0:int(InputDataSize[0] // 2),
                       0:int(InputDataSize[1]) // 2]
        # print(np.min(data_input_z))
        # 数据转换为tensor
        data_input_z_tensor = data_trans(data_input_z)
        data_input_z_tensor = data_input_z_tensor.unsqueeze(0)
        # 将数据放入GPU
        data_input_z_tensor = (Variable(data_input_z_tensor).to(device)).type(torch.float32)
        data_input_s = (np.load(original_data_path, allow_pickle=True))[0:int(InputDataSize[0] // 2),
                       0:int(InputDataSize[1]) // 2]

        # 模型预测概率
        y_pred = model(data_input_z_tensor)
        # 将数据降维
        out = y_pred.squeeze(0)
        # 将数据从gpu放入cpu并再次降维
        imag_narry = ((out.squeeze(0)).cuda().data.cpu()).detach().numpy()

        out_data = imag_narry

        print()
        print('去噪前, 峰值信噪比为:{}'.format(tool.__mtx_similar2__(data_input_z, data_input_s)))
        print('去噪后, 峰值信噪比为:{}'.format(tool.__mtx_similar2__(out_data, data_input_s)))
        print('去噪前, 信噪比为:{}'.format(tool.__SNR__(data_input_z, data_input_s)))
        print('去噪后, 信噪比为:{}'.format(tool.__SNR__(out_data, data_input_s)))
        print('去噪前, 结构相似性：{}'.format(sk_cpt_ssim(data_input_z, data_input_s)))
        print('去噪后, 结构相似性：{}'.format(sk_cpt_ssim(out_data, data_input_s)))

    time_end = time.time() - time_open








