import os

import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm

from config import parser
from datalist2 import Dataset
from utils import myLoss
from vision_transformer import VisionTransformer

'''
细度分类
'''
'''
DDP模式，专门用于在单机多GPU的环境下显示，速度块

cmd: python -m torch.distributed.launch --nproc_per_node 4 trainDDP.py

严格注意：使用这个命令的时候，会修改你代码中的args.local_rank,所以一定要确保local_rank写在args里面
'''
best_acc = 0


class train(object):
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-----------")



        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)  # 为当前GPU设置随机种子
            torch.cuda.manual_seed_all(self.args.seed)
        else:
            torch.manual_seed(self.args.seed)  # 为CPU设置种子用于生成随机数，以使得结果是确定的

        if use_cuda:
            self.device = torch.device('cuda')
        else:
            self.device = torch.device('cpu')



        '''
        构造DataLoader
        '''
        print("Create Dataloader")


        kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}

        self.train_loader = DataLoader(
            Dataset("train"),
            batch_size=self.args.train_batch_size, shuffle=True, **kwargs)
        self.test_loader = DataLoader(
            Dataset("val"),
            batch_size=self.args.test_batch_size, shuffle=True, **kwargs)
        '''
        定义选择模型
        '''
        print('Create Model')
        self.model = VisionTransformer(
            image_size=(self.args.image_size, self.args.image_size),
            patch_size=(self.args.patch_size, self.args.patch_size),
            emb_dim=self.args.emb_dim,
            mlp_dim=self.args.mlp_dim,
            num_heads=self.args.num_heads,
            num_layers=self.args.num_layers,
            num_classes=self.args.num_class,
            attn_dropout_rate=self.args.attn_dropout_rate,
            dropout_rate=self.args.dropout_rate
        ).to(self.device)

        self.model.classifier.weight.data.normal_(0,0.1)


        '''
        根据需要加载与训练的模型权重参数
        '''
        if True :
            model_dict = self.model.state_dict()
            checkpoint = torch.load("./imagenet21k+imagenet2012_ViT-B_16.pth")
            checkpoint=torch.load("weight/best.pth")
            pretrained_dict = checkpoint['model_state_dict']

            self.model.load_state_dict(pretrained_dict, strict=True)
            print("Restoring the weight from pretrained-weight file \nFinished to load the weight")
        else:
            print("initial net weights from stratch")



        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''
        print("Establish the loss, optimizer and learning_rate function")
        self.criterion =nn.CrossEntropyLoss().to(self.device)
        self.optimizer = optim.Adam(params=self.model.parameters(),
                                   lr=self.args.lr,
                                    weight_decay=0.1
                                   # weight_decay=self.args.weight_decay,  # 防止过拟合
                                   # momentum=self.args.momentum
                                   )
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=5, eta_min=1e-6)

        '''
        模型开始训练
        '''
        print("Start training")
        for epoch in range(1, self.args.epoches + 1):

            if epoch % 1 == 0:
                self.test(epoch)
            self.train(epoch)
        # 清除部分无用变量
        torch.cuda.empty_cache()
        print("finish model training")

    '''
    train部分
    '''

    def train(self, epoch):

        # if epoch < self.args.freeze_epoch + 1:
        #     for name, param in self.model.named_parameters():
        #         if 'classifier' in name:
        #             param.requires_grad = True
        #         else:
        #             param.requires_grad = False
        # # 训练整个网络
        # else:
        #     for param in self.model.parameters():
        #         param.requires_grad = True

        self.model.train()

        average_loss = []
        pbar = tqdm(self.train_loader, desc=f'Train Epoch {epoch}/{self.args.epoches}')

        for data, target in pbar:
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()  # 模型参数梯度清零
            output = self.model(data)
            loss = self.criterion(output, target)
            loss.backward()
            average_loss.append(loss.item())
            self.optimizer.step()
            pbar.set_description(f'Train Epoch: {epoch}/{self.args.epoches} loss: {round(np.mean(average_loss), 6)}')
        self.scheduler.step()

    '''
    test部分
    '''

    def test(self, epoch):

        global best_acc

        self.model.eval()
        correct = torch.zeros(1).squeeze().cuda()
        total = torch.zeros(1).squeeze().cuda()
        average_loss = []
        pbar = tqdm(self.test_loader, desc=f'Test Epoch{epoch}/{self.args.epoches}')
        with torch.no_grad():
            for data, target in pbar:
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
                test_loss = self.criterion(output, target).item()  # sum up batch loss
                average_loss.append(test_loss)
                pred = torch.max(output, 1)[1]
                # target = torch.max(target, 1)[1]
                correct += (pred == target).sum()
                total += len(target)
                pbar.set_description(
                    f'Test  Epoch: {epoch}/{self.args.epoches} '
                    f'Acc :{correct / total}'
                )
                predict_acc = correct / total

        if self.args.save and predict_acc > best_acc:
            best_acc = predict_acc
            self.save_model(epoch, average_loss, predict_acc, correct, total)

    def get_image_label(self):
        images = []
        labels = []
        with open(self.images_path) as f:
            for line in f.readlines():
                images.append(line.split()[-1])
        with open(self.labels_path) as f:
            for line in f.readlines():
                labels.append(line.split()[-1])

        lines = []

        for image, label in zip(images, labels):
            # ToDo
            lines.append(
                "./weight/" + str(image) + '*' + str(label))

        return lines

    '''
    模型权重初始化
    '''

    def weights_init(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_normal_(m.weight)
            nn.init.constant_(m.bias, 0)
            # 也可以判断是否为conv2d，使用相应的初始化方式
        elif isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            # 是否为批归一化层
        elif isinstance(m, nn.BatchNorm2d):
            nn.init.constant_(m.weight, 1)
            nn.init.constant_(m.bias, 0)

    '''
    保存模型
    '''

    def save_model(self, epoch, average_loss, predict_acc, correct, total):
        if not os.path.isdir(self.args.saved_model + self.args.project_name) and self.args.is_save:
            os.mkdir(self.args.saved_model + self.args.project_name)
        torch.save({
            'epoch': epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'loss': round(np.mean(average_loss), 2)
        },
           'weight/best.pth')
        percentage = round(predict_acc.item(), 4) * 100
        print(
            f"\n预测准确率:{percentage}% "
            f"预测数量:{correct}/{total},"
            f"保存路径:./weight/best.pth")


if __name__ == "__main__":
    train()
