import torch
import math
import sys
import os
from tqdm import tqdm
from hooks import SummaryHook, DefaultHook
from torch.utils.tensorboard import SummaryWriter


class Runner:
    def __init__(self, arg, model, device):
        self.arg = arg
        # 建立优化器
        params = [p for p in model.parameters() if p.requires_grad]
        self.optimizer = torch.optim.SGD(params=params, lr=arg.lr, momentum=0.9, weight_decay=5E-5)
        lf = lambda x: ((1 + math.cos(x * math.pi / arg.epochs)) / 2) * (1 - arg.lrf) + arg.lrf  # cosine
        self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lf)
        self.model = model.to(device)
        self.device = device
        if arg.load_from is not None and arg.load_from != '':
            weight_dict = torch.load(arg.load_from, map_location=device)
            model.load_state_dict(weight_dict)

        self.is_master = True  # 默认是主节点
        self._epoch = 0
        # 表示经过多少次train iter，每经过一次train_iter加1，val_iter不加
        self._iter = 0
        # 表示一个val epoch 或者 train epoch内，iter的次数
        # 经过一个val_iter或者train_iter都加1，经过一个epoch后就归零
        self._inner_iter = 0

        if self.is_master: #主节点记录summary
            summary_path = os.path.join(arg.work_dir,'summary')
            self.tbwriter = SummaryWriter(summary_path)

        self.hooks = []
        hook0 = DefaultHook()
        hook = SummaryHook()
        self.hooks.append(hook0)
        self.hooks.append(hook)
        self.hooks.sort()

    def call_hook(self, name):
        for hook in self.hooks:
            getattr(hook, name)(self)

    def run(self, dataloaders: dict):
        # 开始训练和验证
        assert 'train' in self.arg.work_flow.keys(), '必须要用训练任务'
        epoch_start = 0
        best_accuracy = 0.0
        self.call_hook('before_run')
        while epoch_start < self.arg.epochs:
            for task, times in self.arg.work_flow.items():
                if task == 'train':  # 开始训练
                    for _ in range(times):
                        epoch_start += 1  # epoch只记录训练轮
                        self.model.train()
                        self.call_hook('before_train_epoch')
                        loss_sum = 0.0
                        data_loader = tqdm(dataloaders['train'], file=sys.stdout)
                        for step, data_dict in enumerate(data_loader):
                            img, label = data_dict
                            instance = {
                                'data': img.to(self.device),
                                'label': label.to(self.device)
                            }
                            self._inner_iter = step
                            self.call_hook('before_train_iter')
                            loss = self.model.loss(**instance)
                            loss_sum += loss.detach()  # 要十分注意 避免往计算图中引入新的东西
                            loss.backward()
                            self.optimizer.step()
                            self.optimizer.zero_grad()
                            data_loader.desc = "[train epoch {}] loss: {:.3f}". \
                                format(epoch_start, loss_sum.item() / (step + 1))
                            self.call_hook('after_train_iter')
                        self.scheduler.step()
                        self.outputs = {
                            'loss': loss_sum / (step + 1.0)
                        }
                        print('train: epoch={}, loss={}'.format(epoch_start, self.outputs['loss']))
                        self.call_hook('after_train_epoch')

                elif task == 'val':  # 开始验证
                    for _ in range(times):
                        self.model.eval()
                        self.call_hook('before_val_epoch')
                        with torch.no_grad():
                            loss_sum = 0.0
                            sample_sum = 0.0
                            correct_sum = 0.0
                            for step, data_dict in enumerate(dataloaders['val']):
                                img, label = data_dict
                                instance = {
                                    'data': img.to(self.device),
                                    'label': label.to(self.device)
                                }
                                self._inner_iter = step
                                self.call_hook('before_val_iter')
                                loss, correct_num = self.model.predict(**instance)
                                loss_sum += loss.detach()
                                sample_sum += len(img)
                                correct_sum += correct_num
                                dataloaders['val'].desc = "[valid epoch {}] loss: {:.3f}, acc: {:.3f}". \
                                    format(epoch_start, loss_sum / (step + 1.0), float(correct_sum) / float(sample_sum))
                                self.call_hook('after_val_iter')

                        self.outputs = {
                            'loss': loss_sum / (step + 1.0),
                            'accuracy':float(correct_sum) / float(sample_sum)
                        }
                        print('val: epoch={}, loss={}, accuracy={}'.format
                                  (epoch_start, self.outputs['loss'], self.outputs['accuracy']))
                        self.call_hook('after_val_epoch')
                        if self.outputs['accuracy'] > best_accuracy:
                            best_accuracy = self.outputs['accuracy']
                            if epoch_start >= self.arg.save_epoch:
                                path = os.path.join(os.getcwd(), 'work_dir/vit.pth')
                                torch.save(self.model.state_dict(), path)
                                print('save model to {}'.format(path))

                else:
                    raise ValueError('task must be in [train, val, test]')
        self.call_hook('after_run')

    @property
    def epoch(self):
        """int: Current epoch."""
        return self._epoch

    @property
    def iter(self):
        """int: Current iteration."""
        return self._iter

    @property
    def inner_iter(self):
        """int: Iteration in an epoch."""
        return self._inner_iter