from distutils.command.config import config
from http.client import IM_USED
from pytorch_lightning import LightningDataModule, LightningModule, Trainer
from pytorch_lightning.callbacks.progress.tqdm_progress import TQDMProgressBar
from torch import nn
import torch
from torch.nn import functional as F
from torchmetrics import Accuracy
from model.arch import ModelLoader
from model.module.activation import act_layers
from model.loss import loss_function
import copy
import warnings
from typing import Any, List
import os
from os import mkdir


class ClassTask(LightningModule):
    def __init__(self, config, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.cfg = config
        self.task_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

        self.model = ModelLoader(config.model).to(self.task_device)
        self.loss_func = loss_function(config.model.arch.loss.name)
        # self.save_flog = 100
        self.val_accuracy = Accuracy().to(self.task_device)
        self.train_accuracy = Accuracy().to(self.task_device)

    def forward(self, x):
        x = self.model(x)
        return x

    # @torch.no_grad()
    # def predict(self, batch, batch_idx, dataloader_idx):
    #     preds = self.forward(batch['img'])
    #     results = self.model.head.post_process(preds, batch)
    #     return results

    def training_step(self, batch, batch_idx):
        image, label = batch
        logits = self.model(image)
        loss = self.loss_func(logits, label)
        preds = torch.argmax(logits, dim=1)
        self.train_accuracy.update(preds, label)
        self.log('train_loss', loss, prog_bar=True)
        self.log('train_acc',self.train_accuracy,prog_bar=True)
        return {'loss': loss, 'train_acc': self.train_accuracy}

    def training_epoch_end(self, outputs: List[Any]) -> None:

        print("调整学习率")
        self.lr_scheduler.step()

    def validation_step(self, batch, batch_idx):
        image, label = batch

        logits = self.model(image)
        loss = self.loss_func(logits, label)
        
        preds = torch.argmax(logits, dim=1)
        self.val_accuracy.update(preds, label)
        self.log('val_loss', loss, prog_bar=True)
        self.log('val_acc', self.val_accuracy, prog_bar=True)
        return {'val_loss': loss, 'val_acc': self.train_accuracy}

    # def save_model(model, path, epoch, iter, optimizer=None):
    #     model_state_dict = model.module.state_dict() if hasattr(
    #         model, 'module') else model.state_dict()
    #     data = {'epoch': epoch,
    #             'state_dict': model_state_dict,
    #             'iter': iter}
    #     if optimizer is not None:
    #         data['optimizer'] = optimizer.state_dict()

    #     torch.save(data, path)

    # def validation_epoch_end(self, validation_step_outputs):
    #     val_loss = 0
    #     val_acc = 0
    #     for res in validation_step_outputs:
    #         val_loss += res['val_loss']
    #         val_acc += res['val_acc']
    #     # val_loss = val_loss/len(validation_step_outputs)
    #     # val_acc = val_acc/len(validation_step_outputs)

    #     # eval_results = self.evaluator.evaluate(results, self.cfg.save_dir, self.current_epoch,
    #     #                                        self._logger, rank=self.local_rank)
    #     # metric = eval_results[self.cfg.evaluator.save_key]

    #     # ------save best model--------
    #     if val_loss < self.save_flag:
    #         self.save_flag = val_loss
    #         best_save_path = os.path.join(self.cfg.save_dir, 'model_best')
    #         mkdir(self.local_rank, best_save_path)
    #         # TODO: replace with saving checkpoint
    #         self.save_model(self.local_rank, self.model, os.path.join(best_save_path, 'model_best.pth'),
    #                         self.current_epoch+1, self.global_step)
    #         # txt_path = os.path.join(best_save_path, "eval_results.txt")
    #         # if self.local_rank < 1:
    #         #     with open(txt_path, "a") as f:
    #         #         f.write("Epoch:{}\n".format(self.current_epoch+1))
    #         #         for k, v in eval_results.items():
    #         #             f.write("{}: {}\n".format(k, v))
    #     # else:
    #     #     warnings.warn(
    #     #         'Warning! Save_key is not in eval results! Only save model last!')

    def configure_optimizers(self):
        optimizer_cfg = copy.deepcopy(self.cfg.schedule.optimizer)
        name = optimizer_cfg.pop('name')
        build_optimizer = getattr(torch.optim, name)
        optimizer = build_optimizer(params=self.parameters(), **optimizer_cfg)

        schedule_cfg = copy.deepcopy(self.cfg.schedule.lr_schedule)
        name = schedule_cfg.pop('name')
        build_scheduler = getattr(torch.optim.lr_scheduler, name)
        self.lr_scheduler = build_scheduler(
            optimizer=optimizer, **schedule_cfg)

        return optimizer

    # def optimizer_step(self,
    #                    epoch=None,
    #                    batch_idx=None,
    #                    optimizer=None,
    #                    optimizer_idx=None,
    #                    optimizer_closure=None,
    #                    on_tpu=None,
    #                    using_native_amp=None,
    #                    using_lbfgs=None):
    #     # warm up lr
    #     print('trainer.global_step:',self.trainer.global_step)
    #     if self.trainer.global_step <= self.cfg.schedule.warmup.steps:
    #         if self.cfg.schedule.warmup.name == 'constant':
    #             warmup_lr = self.cfg.schedule.optimizer.lr * self.cfg.schedule.warmup.ratio
    #         elif self.cfg.schedule.warmup.name == 'linear':
    #             k = (1 - self.trainer.global_step / self.cfg.schedule.warmup.steps) * \
    #                 (1 - self.cfg.schedule.warmup.ratio)
    #             warmup_lr = self.cfg.schedule.optimizer.lr * (1 - k)
    #         elif self.cfg.schedule.warmup.name == 'exp':
    #             k = self.cfg.schedule.warmup.ratio ** (
    #                 1 - self.trainer.global_step / self.cfg.schedule.warmup.steps)
    #             warmup_lr = self.cfg.schedule.optimizer.lr * k
    #         else:
    #             raise Exception('Unsupported warm up type!')
    #         for pg in optimizer.param_groups:
    #             pg['lr'] = warmup_lr
    #     # TODO: log lr to tensorboard
    #     self.log('lr', optimizer.param_groups[0]['lr'],
    #              on_step=True, on_epoch=True, prog_bar=True)

    #     # update params
    #     optimizer.step(closure=optimizer_closure)
    #     optimizer.zero_grad()

    # def get_progress_bar_dict(self):

    #     # don't show the version number
    #     items = super().get_progress_bar_dict()
    #     items.pop("v_num", None)
    #     return items
