import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from tqdm import tqdm
import torch.nn as nn
from config import parser
from datalist import VideoDataset2
from model import C3D
from utils import FocalLoss

# writer = SummaryWriter()

best_acc = 0

torch.cuda.is_available()
class train(object):
    def __init__(self):
        self.args = parser.parse_args()
        print(f"-----------{self.args.project_name}-------------")

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)  # 为当前GPU设置随机种子
        else:
            torch.manual_seed(self.args.seed)  # 为CPU设置种子用于生成随机数，以使得结果是确定的
        self.device = torch.device("cuda" if use_cuda else "cpu")
        train_kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
        test_kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
        '''
        构造DataLoader
        '''
        self.train_dataloader = DataLoader(VideoDataset2(self.args.train_dir), batch_size=self.args.train_batch_size,
                                           shuffle=True, **train_kwargs)
        self.test_dataloader = DataLoader(VideoDataset2(self.args.test_dir), batch_size=self.args.test_batch_size,
                                          shuffle=True, **test_kwargs)

        self.model = C3D(2).to(self.device)

        if use_cuda:
            self.model = torch.nn.DataParallel(self.model, device_ids=range(torch.cuda.device_count()))


        if self.args.resume:
            try:
                print("load the weight from pretrained-weight file")
                model_dict = self.model.state_dict()
                pretrained_dict = torch.load(self.args.pretrained_weight, map_location=self.device)[ 'model_state_dict']
                # pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
                # model_dict.update(pretrained_dict)
                self.model.load_state_dict(pretrained_dict)
                print("Finished to load the weight")
            except:
                print("can not load weight \n train the model from stratch")
                # self.model.apply(self.weights_init)


        '''
        构造loss目标函数
        选择优化器
        学习率变化选择
        '''
        self.criterion=nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.model.parameters(), lr=self.args.lr, momentum=self.args.momentum)
        self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=5, eta_min=1e-5)
        # self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=10)


        self.optimizer.load_state_dict(torch.load(self.args.pretrained_weight, map_location=self.device)[ 'optimizer_state_dict'])

        for epoch in range(1, self.args.epoches + 1):
            self.train(epoch)
            if epoch % 1 == 0:
                self.test(epoch)

        torch.cuda.empty_cache()
        print("finish model training")

    def train(self, epoch):
        self.model.train()
        average_loss = []
        pbar = tqdm(self.train_dataloader,
                    desc=f'Train Epoch {epoch}/{self.args.epoches}')

        correct = torch.zeros(1).squeeze().cuda()
        total = torch.zeros(1).squeeze().cuda()

        for data, target in pbar:
            data, target = data.to(self.device), target.to(self.device)
            self.optimizer.zero_grad()  # 计算之前,模型参数梯度清零
            output = self.model(data)

            loss = self.criterion(output, target)
            loss.backward()
            pred = torch.argmax(output, 1)

            average_loss.append(loss.item())
            correct += (pred == target).sum().float()
            total += len(target)
            predict_acc = correct / total

            self.optimizer.step()
            pbar.set_description(
                f'Train Epoch:{epoch}/{self.args.epoches} '
                f'train_loss:{np.mean(average_loss)} '
                f'acc:{predict_acc} '
                f'lr:{self.optimizer.param_groups[0]["lr"]}')
        self.scheduler.step()

        # writer.add_scalar('train/loss', np.mean(average_loss), epoch)
        # writer.add_scalar('train/acc', predict_acc, epoch)

    def test(self, epoch):

        global best_acc

        self.model.eval()

        correct = torch.zeros(1).squeeze().cuda()
        total = torch.zeros(1).squeeze().cuda()

        average_loss = []

        pbar = tqdm(self.test_dataloader,
                    desc=f'Test Epoch {epoch}/{self.args.epoches}',
                    mininterval=0.3)
        for data, target in pbar:
            data, target = data.to(self.device), target.to(self.device)
            with torch.no_grad():#表示不计算wrap在里面操作的梯度
                output = self.model(data)
                loss = self.criterion(output, target)
                average_loss.append(loss.item())
                pred = torch.argmax(output, 1)
                # target = torch.argmax(target, 1)
                correct += (pred == target).sum().float()
                total += len(target)
                predict_acc = correct / total
            pbar.set_description(
                f'Test Epoch:{epoch}/{self.args.epoches} acc:{predict_acc}')

        # writer.add_scalar('test/loss', np.mean(average_loss), epoch)
        # writer.add_scalar('test/acc', predict_acc, epoch)

        if self.args.save and predict_acc > best_acc:
            best_acc = predict_acc

            torch.save({
                'epoch': epoch,
                'model_state_dict': self.model.state_dict(),
                'optimizer_state_dict': self.optimizer.state_dict(),
                'loss': round(np.mean(average_loss), 2)
            },
                "./weights_flow" + f'/Epoch-{epoch}-best_acc-{best_acc}_loss_{round(np.mean(average_loss), 2)}.pth')
            print("model saved")


if __name__ == "__main__":
    train = train()
