import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
# from pytorch_network.MobileNetV3 import MobileNetV3
from torchvision.models import resnet18
from tqdm import tqdm

from config import parser
from datalist import Dataset
from utils import Color_print

'''
细度分类
'''

best_acc = 0
f=open("train_log.txt",'a',encoding="utf-8")

class train(object):
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-------------")
        # 设置随机种子，保证结果的可重复性
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)  # 为当前GPU设置随机种子
        else:
            torch.manual_seed(self.args.seed)  # 为CPU设置种子用于生成随机数，以使得结果是确定的

        self.device = torch.device("cuda" if use_cuda else "cpu")
        kwargs = {'num_workers': 6,
                  'pin_memory': True} if use_cuda else {}  # num_workers的值容易影响调试的是否成功，windows下的numwork只能为0
        '''
        构造DataLoader
        '''
        with open("dog/test.txt", 'r', encoding='utf-8') as f:
            self.testlines = f.readlines()
        with open("dog/train.txt", 'r', encoding='utf-8') as f:
            self.trainlines = f.readlines()

        np.random.seed(10101)  # 保证实验的可重复性
        np.random.shuffle(self.testlines)
        np.random.shuffle(self.trainlines)
        np.random.seed(None)

        self.train_loader = DataLoader(
            Dataset(self.trainlines, type="train"),
            batch_size=self.args.train_batch_size, shuffle=False, **kwargs)
        self.test_loader = DataLoader(
            Dataset(self.testlines, type="test"),
            batch_size=self.args.test_batch_size, shuffle=False, **kwargs)

        self.model = resnet18(pretrained=True).to(device=self.device)
        self.model.fc = nn.Linear(self.model.fc.in_features, 10)
        '''
        根据需要加载与训练的模型权重参数
        '''

        if self.args.resume:
            try:
                print("load the weight from pretrained-weight file")
                model_dict = self.model.state_dict()
                pretrained_dict = torch.load("./weights/best.pth", map_location=self.device)['model_state_dict']
                pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items() if
                                   np.shape(model_dict[k[7:]]) == np.shape(v)}
                model_dict.update(pretrained_dict)
                self.model.load_state_dict(model_dict, strict=True)
                print("Finished to load the weight")
            except:
                print("can not load weight \n train the model from stratch")
                self.model.apply(self.weights_init)
        else:
            print("train from beginning")

        '''
        cuda 加速
        '''
        if use_cuda:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=range(torch.cuda.device_count()))  # parallel use GPU
            cudnn.benchmark = True  # speed up lightly
            cudnn.enabled = True
        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''

        self.criterion = nn.CrossEntropyLoss().to(self.device)
        self.optimizer = optim.Adam(self.model.parameters(), lr=self.args.lr, weight_decay=1e-4)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=3, gamma=0.8)

        '''
        模型开始训练
        '''
        try:
            for epoch in range(1, self.args.epochs + 1):
                self.train(epoch)
                if epoch % 1 == 0:
                    self.test(epoch)
            # 清除部分无用变量
            torch.cuda.empty_cache()
            Color_print("finish model training")
        except KeyboardInterrupt:
            torch.save({
                'model_state_dict': self.model.state_dict(),
                'optimizer_state_dict': self.optimizer.state_dict(),
            }, 'weights/best.pth')
            print("\nmodel saved, the performance if better than before")

    '''
    train部分
    '''

    def train(self, epoch):
        self.model.train()
        average_loss = []
        pbar = tqdm(self.train_loader,
                    desc=f'Train Epoch{epoch}/{self.args.epochs}')
        correct = torch.zeros(1).squeeze().cuda()
        total = torch.zeros(1).squeeze().cuda()

        for data, target in pbar:
            self.optimizer.zero_grad()  # 模型参数梯度清零
            data, target = data.to(self.device), target.to(self.device)
            output = self.model(data)
            loss = self.criterion(output, target)
            loss.backward()
            average_loss.append(loss.item())
            pred = torch.argmax(output, 1)
            correct += (pred == target).sum().float()
            total += len(target)
            predict_acc = correct / total

            self.optimizer.step()
            pbar.set_description(
                f'Train Epoch:{epoch}/{self.args.epochs} train_loss:{round(np.mean(average_loss), 10)} acc:{np.round(np.array(predict_acc.cpu()), 6)} lr: {self.optimizer.param_groups[0]["lr"]}')
        self.scheduler.step()

    '''
    test部分
    '''

    def test(self, epoch):

        global best_acc

        self.model.eval()
        test_loss = 0
        correct = torch.zeros(1).squeeze().cuda()
        total = torch.zeros(1).squeeze().cuda()

        average_loss = []

        pbar = tqdm(self.test_loader,
                    desc=f'Test Epoch{epoch}/{self.args.epochs}',
                    mininterval=0.3)
        for data, target in pbar:
            with torch.no_grad():
                data, target = data.to(self.device), target.to(self.device)
                output = self.model(data)
            average_loss.append(self.criterion(output, target).item())
            test_loss += self.criterion(output, target).item()  # sum up batch loss
            pred = torch.argmax(output, 1)
            correct += (pred == target).sum().float()
            total += len(target)
            predict_acc = correct / total
            pbar.set_description(
                f'Test Epoch:{epoch}/{self.args.epochs} test_loss:{round(np.mean(average_loss), 2)} acc:{predict_acc}')

        f.write(str(epoch)+"-->"+str(predict_acc)+'\n')

        if self.args.save and predict_acc > best_acc:
            best_acc = predict_acc

            # if not os.path.isdir(self.args.saved_model + self.args.project_name):
            #     os.mkdir(self.args.saved_model + self.args.project_name)
            torch.save({
                'epoch': epoch,
                'model_state_dict': self.model.state_dict(),
                'optimizer_state_dict': self.optimizer.state_dict(),
                'acc': best_acc,
                'loss': round(np.mean(average_loss), 2)
            },
                f'weights/best.pth')
            percentage = round(predict_acc.item(), 4) * 100
            Color_print(
                f"\n预测准确率:{percentage}% "
                f"预测数量:{correct}/{total}"
            )

    def weights_init(self, m):
        if isinstance(m, nn.Linear):
            nn.init.xavier_normal_(m.weight)
            nn.init.constant_(m.bias, 0)
            # 也可以判断是否为conv2d，使用相应的初始化方式
        elif isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            # 是否为批归一化层
        elif isinstance(m, nn.BatchNorm2d):
            nn.init.constant_(m.weight, 1)
            nn.init.constant_(m.bias, 0)


if __name__ == "__main__":
    train()
