import os
from pathlib import Path
import time
from tqdm import tqdm

import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
import torchvision.transforms as transforms

from dataset import *
from utils import *

log = Logger(os.path.join('.', 'log', 'log_sketch.txt'))


class StudentTrainer:
    """Implementation of the training process of student's architecture.

    Attributes:
        backbone: Backbone model with projection head and prediction head.
        optimizer: Optimizer.
        scheduler: Scheduler.
        device: 'cuda' or 'cpu'.
        classes: Total classes.
        cad_features: 3D models' features from teacher model.
        params: Input parameters.

    """
    def __init__(self, backbone: nn.Module, optimizer: torch.optim, scheduler: torch.optim, device: str, classes: int,
                 params) -> None:
        log.logger.info(params)
        self.backbone = backbone
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.device = device
        self.epochs = params.epochs
        self.classes = classes
        self.batch_size = params.batch_size
        self.save_model = params.save_model
        self.tb = SummaryWriter(os.path.join('.', 'events_sketch'))

    def train(self, num_workers: int = 0):
        """Train the student model.
        
        Args:
            batch_size: Batch size.
            num_workers: How many subprocesses to use for data loading. 0 means that the data will be loaded in the main process.
        
        """
        transform_train = transforms.Compose([
            transforms.RandomChoice([transforms.RandomCrop(224),
                                     transforms.RandomRotation(degrees=(0, 180), fill=255)]),
            transforms.RandomHorizontalFlip(p=0.5),
            transforms.RandomVerticalFlip(p=0.5),
            transforms.ToTensor(),
            transforms.Normalize(mean=0.9798703193664551, std=0.10752718895673752),
        ])
        # transform_test = transforms.Compose([
        #     transforms.ToTensor(),
        #     transforms.Normalize(mean=0.9798703193664551, std=0.10752718895673752),
        # ])
        log.logger.info("----- Start loading data -----")
        sk_train_data = SketchDataset(path=sk_load, transform=transform_train, split='train')
        # sk_test_data = SketchDataset(path=sk_load, transform=transform_test, split='test')
        sk_train_loader = DataLoader(dataset=sk_train_data,
                                     batch_size=self.batch_size,
                                     shuffle=True,
                                     num_workers=num_workers)
        # sk_test_loader = DataLoader(dataset=sk_test_data,
        #                             batch_size=self.batch_size,
        #                             shuffle=True,
        #                             num_workers=num_workers)
        log.logger.info("----- Loading data completed -----")

        max_acc = 98
        metrics = MetricFactory(self.classes, self.device)
        log.logger.info("----- Start training -----")
        for epoch in range(self.epochs):
            # train
            loss_rec = AverageMeter()
            loss_cls_rec = AverageMeter()
            start = time.time()
            for batch in sk_train_loader:
                query, label, id = batch
                query = query.to(self.device)
                label = label.to(self.device)

                loss, loss_cls = self.update(query, label, metrics)
                loss_rec.update(loss, self.batch_size)
                loss_cls_rec.update(loss_cls, self.batch_size)

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

            run_time = time.time() - start
            self.log_record(epoch + 1, run_time, loss_cls_rec.avg, loss_rec.avg, metrics, 'train')

            # metrics.reset()

            # test
            # with torch.no_grad():
            #     loss_rec = AverageMeter()
            #     loss_cls_rec = AverageMeter()
            #     start = time.time()
            #     for batch in sk_test_loader:
            #         query, label, id = batch
            #         query = query.to(self.device)
            #         label = label.to(self.device)

            #         loss, loss_cls = self.update(query, label, metrics)
            #         loss_rec.update(loss, self.batch_size)
            #         loss_cls_rec.update(loss_cls, self.batch_size)

            #     run_time = time.time() - start

            #     self.log_record(epoch + 1, run_time, loss_cls_rec.avg, loss_rec.avg, metrics, 'test')

            self.store_model(metrics.get('ACC'), max_acc, epoch + 1)
            max_acc = max(metrics.get('ACC'), max_acc)

            metrics.reset()

            self.scheduler.step()

        log.logger.info("Max ACC: {}".format(max_acc))
        log.logger.info("----- Complete the training -----")

    def update(self, query: torch.tensor, label: torch.tensor, metrics: MetricFactory):
        """Update parameters and metrics.

        Args:
            query: Query sketches, tensor shape of b, 1, W, H.
            label: Labels correspond to x, tensor shape of b.
            metrics: ACC, NN, FT, ST, E, DCG, mAP.

        """
        predictions, features = self.backbone(x=query, return_features=True)
        loss_cls = cross_entropy_loss(predictions, label)
        loss = loss_cls
        metrics.update(predictions, label)

        return loss, loss_cls

    def log_record(self, epoch: int, run_time, loss_cls: torch.tensor, loss: torch.tensor, metrics: MetricFactory,
                   mode: str):
        """Print logs to the console, record logs to log.txt file, record loss and accuracy to tensorboardX.

        Args:
            epoch: Current epoch.
            run_time: Run time of current epoch.
            loss_cls: Classification lss.
            loss: Total loss = triple loss + classification loss.
            metrics: Accuracy, Recall, Precision.
            mode: 'train' or 'test'.        
        
        """
        m = metrics.compute()
        info = "Epoch-{}: {:03d}/{:03d}\t run_time:{:.4f}\tloss: {:.4f}\tloss_cls: {:.4f}\tACC:{:.4f}\tNN: {:.4f}\tFT: {:.4f}\tST: {:.4f}".format(
            mode, epoch, self.epochs, run_time, loss, loss_cls, m['ACC'], m['NN'], m['FT'], m['ST'])
        if mode == 'test':
            info += '\n'
        log.logger.info(info)
        self.tb.add_scalar(mode + '/loss_cls', loss_cls, epoch)
        self.tb.add_scalar(mode + '/loss', loss, epoch)
        self.tb.add_scalar(mode + '/ACC', m['ACC'], epoch)
        self.tb.add_scalar(mode + '/NN', m['NN'], epoch)
        self.tb.add_scalar(mode + '/FT', m['FT'], epoch)
        self.tb.add_scalar(mode + '/ST', m['ST'], epoch)

    def store_model(self, acc: float, max_acc: float, epoch: int):
        """Save the model as .pth file.
        
        Args:
            acc: Accuracy of current epoch.
            max_acc: Max accuracy.
            epoch: Current epoch.
        
        """
        if (self.save_model and acc > max_acc):
            Path("checkpoints").mkdir(parents=True, exist_ok=True)
            torch.save(
                {
                    'sketch_backbone_state_dict': self.backbone.state_dict(),
                    'optimizer_state_dict': self.optimizer.state_dict(),
                },
                os.path.join('.', 'checkpoints',
                             str(epoch) + '_' + str(acc) + str(time.strftime('%Y%m%d%H%M%S')) + '_sketch.pth'))

    def save_features(self, backbone: str = 'resnet-50', num_workers: int = 0, pretrained: bool = True):
        """Save features of sketches to file.
        
        Args:
            backbone: Backbone of the network, 'resnet-50' or 'resnet-18'.
            num_workers: How many subprocesses to use for data loading. 0 means that the data will be loaded in the main process.
            pretrained: Whether to use prettrained model.

        """
        features_path = os.path.join('.', 'features')
        if not os.path.exists(features_path):
            os.makedirs(features_path)
        if pretrained:
            pth = None
            if backbone == 'resnet-50':
                pth = torch.load(os.path.join('.', 'pth', 'part-shrec14', 'part-sketch_resnet50_79.7917_model.pth'))
            else:
                log.logger.info("----- Wrong backbone input -----")
                return
            self.backbone.load_state_dict(pth['sketch_backbone_state_dict'])

        log.logger.info("----- Start loading data -----")
        transform = transforms.Compose([
            transforms.ToTensor(),
            # shrec13
            # transforms.Normalize(mean=0.9798703193664551, std=0.10752718895673752),
            # shrec14
            # transforms.Normalize(mean=0.9804596304893494, std=0.10598362982273102),
            # part-shrec14
            transforms.Normalize(mean=0.9791669249534607, std=0.10922393947839737),
        ])
        sk_train_data = SketchDataset(path=sk_load, transform=transform, split='train')
        sk_test_data = SketchDataset(path=sk_load, transform=transform, split='test')
        sk_train_loader = DataLoader(dataset=sk_train_data, batch_size=1, shuffle=True, num_workers=num_workers)
        sk_test_loader = DataLoader(dataset=sk_test_data, batch_size=1, shuffle=True, num_workers=num_workers)
        log.logger.info("----- Loading data completed -----")

        log.logger.info("----- Start saving features -----")
        features_memory = {'train': {}, 'test': {}}
        self.push_features_memory(sk_train_loader, features_memory, 'train')
        self.push_features_memory(sk_test_loader, features_memory, 'test')
        filename = 'by_part_shrec14_part_sketch_features_resnet50.pth'
        torch.save(features_memory, os.path.join(features_path, filename))
        log.logger.info("----- Saving features completed -----")

    @torch.no_grad()
    def push_features_memory(self, dataLoader: DataLoader, features_memory: dict, split: str):
        """Use Average pool to get features form dataLoader to features_memory.

        Args:
            dataLoader: `cad_train_loader` or `cad_test_loader`.
            features_memory: Dictionary of features.
            split: 'train' or 'test'.
        
        """
        for batch in tqdm(dataLoader):
            query, label, id = batch
            _, features = self.backbone(query, True)
            label = str(label.item())
            if label not in features_memory[split]:
                features_memory[split][label] = []
            # batch_size = 1, so id[0] is the true id
            features_memory[split][label].append({id[0]: features})
