import os
import logging
import time
import torch
import cv2
import numpy as np
from torch.utils.data import DataLoader

from models import MODEL
from loss import loss
from utils.model_utils import *
from torchvision import transforms
from loss import LOSS
from datasets.listDataSet import FileDataSet
from reader.read_utils import *
from reader import Struct
from tqdm import tqdm


# from datetime import datetime
torch.backends.cudnn.enabled = False

class trainEngine(object):
    def __init__(self, config, transform=None):
        self.config = config
        self.model = MODEL[config.model.model_name](in_channels=config.model.in_channels,
                                                    num_classes=config.model.num_pts * 2)
        if config.train.checkpoint:
            load_checkpoint(self.model, config.train.checkpoint)
        if config.model.gpus:
            self.model.cuda()

        logging.info(f'using {config.model.loss_name} loss function')
        self.criterion = LOSS[config.model.loss_name]()
        self.optimizer = get_optim(self.model, config)

        self.dataset = FileDataSet(config)  # 没做数据增强
        self.numbatchs = self.dataset.numbatchs

        self.dataloader = DataLoader(dataset=self.dataset, batch_size=config.train.batch_size, shuffle=config.train_flag,
                                     num_workers=config.train.num_workers, collate_fn=self.dataset.collate_fn)

        if config.model.mean_shape:
            self.mean_shape = np.load(config.model.mean_shape).reshape(1, -1)
            if config.model.gpus:
                self.mean_shape = torch.tensor(self.mean_shape, dtype=torch.float32, device='cuda')
            else:
                self.mean_shape = torch.tensor(self.mean_shape, dtype=torch.float32, device='cpu')

        self.test_engine = testEngine(self.config)

    def train(self):
        logging.info("Start train network")
        self.test_engine.model = self.model
        nme, failure_rate = self.test_engine.evolution(epoch=self.config.train.start_epoch)

        best_nme = 1
        for epoch in range(self.config.train.start_epoch, self.config.train.end_epoch + 1):
            self.train_epoch(epoch)
            save_checkpoint(self.model, config=self.config, epoch=epoch)

            # evolution
            self.test_engine.model = self.model
            nme, failure_rate = self.test_engine.evolution(epoch)
            if best_nme > nme:
                save_checkpoint(self.model, self.config, 'best')
                best_nme = nme
            adjust_lr(self.optimizer, self.config, epoch)

        # save_model_info(self.config, best_nme, failure_rate)

    def train_epoch(self, epoch):
        self.model.train()
        loss_meter = AverageMeter()
        batch_time = AverageMeter()

        end = time.time()
        for ii, data in enumerate(self.dataloader):
            if isinstance(data, list):
                data = data[1:]

            if self.config.test.show:
                showIMG_in_train(data)

            images = data[1]
            landmark_gt = data[2]
            landmark_gt = landmark_gt.reshape(landmark_gt.shape[0], -1)
            if self.config.model.gpus:
                images = images.cuda()
                landmark_gt = landmark_gt.cuda()
            if self.config.model.mean_shape:
                landmark_gt = (landmark_gt - self.mean_shape) * 2

            images = images.permute(0, 3, 1, 2)
            landmarks = self.model(images)

            loss = self.criterion(landmarks, landmark_gt)

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            loss_meter.updata(loss.item(), 1)
            batch_time.updata(time.time() - end)

            ii += 1
            if ii % self.config.train.print_interval == 0 or ii == self.numbatchs:
                message = f'Epoch:{epoch}/{self.config.train.end_epoch}[{ii}/{self.numbatchs}] ' \
                          f'Time:{round(batch_time.sum, 4)}s Speed:{int(self.config.train.batch_size / batch_time.avg)}sample/s ' \
                          f'Loss:{round(loss_meter.avg, 4)}, lr:{self.config.train.lr}'

                logging.info(message)
                loss_meter.reset()
                batch_time.reset()
            # break


class testEngine(object):
    def __init__(self, config, transform=None):
        self.config = Struct(config.copy())
        self.config.train_flag = False
        # self.model = MODEL[config.model.model_name](in_channels=config.model.in_channels,
        #                                          num_pts=config.model.num_pts,
        #                                          with_pose=config.model.with_pose,
        #                                          times=config.model.model_times)
        self.model = MODEL[self.config.model.model_name](in_channels=self.config.model.in_channels,
                                                         num_classes=self.config.model.num_pts * 2)
        if self.config.test.checkpoint:
            load_checkpoint(self.model, self.config.test.checkpoint)
        if config.model.gpus:
            self.model.cuda()


        self.dataset = FileDataSet(self.config)
        self.dataloader = DataLoader(dataset=self.dataset, batch_size=self.config.test.batch_size,
                                     shuffle=self.config.test.shuffle,
                                     num_workers=self.config.test.num_workers, collate_fn=self.dataset.collate_fn)

        if self.config.model.mean_shape:
            self.mean_shape = np.load(self.config.model.mean_shape)  # .reshape(-1, 2)
            if config.model.gpus:
                self.mean_shape = torch.tensor(self.mean_shape, dtype=torch.float32, device='cuda')
            else:
                self.mean_shape = torch.tensor(self.mean_shape, dtype=torch.float32, device='cpu')

    def test(self):
        self.model.eval()
        self.dataset.getTestFileList(self.dataset.config)
        self.dataloader = DataLoader(dataset=self.dataset, batch_size=self.config.test.batch_size,
                                     shuffle=self.config.test.shuffle,
                                     num_workers=self.config.test.num_workers, collate_fn=self.dataset.collate_fn)
        with torch.no_grad():
            for data in tqdm(self.dataloader):
                imagePath, images, lm, boxs = data
                if self.config.model.gpus:
                    images = images.cuda()

                # showIMG_in_train(data)
                images = images.permute(0, 3, 1, 2)
                landmarks_pre = self.model(images)
                if self.config.model.mean_shape:
                    landmarks_pre = landmarks_pre * 0.5 + self.mean_shape

                landmarks_pre = landmarks_pre.reshape(landmarks_pre.shape[0], -1, 2)
                if self.config.model.gpus:
                    landmarks_pre = landmarks_pre.cpu()

                images = images.permute(0, 2, 3, 1)
                for id, img in enumerate(images):
                    landM = landmarks_pre[id] * boxs[id][2:]
                    landM = landM.numpy().tolist()

                    if self.config.test.save_lm:
                        self.save_landmark(landM, imagePath[id], boxs[id])
                    if self.config.test.show:
                        img = self.draw_image(imagePath[id], boxs[id].numpy().tolist(), landM)
                        self.save_img(imagePath[id], img)

    def draw_image(self, imgpath, box, landM):
        img = cv2.imread(imgpath)
        img = self.dataset.get_img_box(box, img)
        img = drawPoints(img.copy(), landM, color=(0, 255, 0), idColor=(0, 0, 255), drawID=False)
        return img
    def save_img(self, imgpath, img):
        save_name = os.path.join(self.config.test.out_dir, os.path.basename(imgpath))
        cv2.imwrite(save_name, img)
    def save_landmark(self, landM, imgpath, box):
        savepath = os.path.splitext(imgpath)[0] + '.bigmodel_djw'
        with open(savepath, 'w') as f:
            for p in landM:
                print("{0} {1}".format(p[0], p[1]), file=f)

    def evolution(self, epoch=0):
        self.model.eval()

        nme_sum = 0
        count_nme = 0
        count_failure_008 = 0
        count_failure_010 = 0
        nme_list = []

        img_size = self.config.test.img_size

        with torch.no_grad():
            for data in self.dataloader:
                if isinstance(data, list):
                    data = data[1:]
                image = data[1]
                landmark_gt = data[2]
                image = image.permute([0, 3, 1, 2])
                if self.config.model.gpus:
                    image = image.cuda()
                    landmark_gt = landmark_gt.cuda()
                landmark = self.model(image)
                if self.config.model.mean_shape:
                    landmark = landmark * 0.5 + self.mean_shape
                landmark = landmark.reshape(landmark.shape[0], -1, 2)
                l2_distant = torch.mean(torch.sum((landmark_gt - landmark) * (landmark_gt - landmark), dim=1))

                nme_temp = compute_nme(landmark_gt, landmark)

                count_failure_008 += np.sum(nme_temp > 0.08)
                count_failure_010 += np.sum(nme_temp > 0.1)
                count_nme += nme_temp.shape[0]
                nme_sum += np.sum(nme_temp)
                nme_list += nme_temp.tolist()
                # break
        nme = nme_sum / count_nme
        failure_008_rate = count_failure_008 / count_nme
        failure_010_rate = count_failure_010 / count_nme
        # print(nme, failure_008_rate, failure_010_rate)
        logging.info(
            f'Evaluate: epoch: {epoch}, nme: {round(nme, 4)} failure_010_rate: {failure_010_rate} l2_loss: {l2_distant.item()}')
        compute_auc(nme_list, self.config.test.failure_threshold, show_curve=self.config.test.show_auc)
        return nme, failure_010_rate

