import os
import pickle
from copy import deepcopy

import numpy as np
import torch
from torch.optim import *
from accelerate import Accelerator
from tqdm.auto import tqdm
import pandas as pd
from matplotlib import pyplot as plt
import torch.nn.functional as F
from torchvision import transforms as T
from PIL import Image, ImageDraw, ImageFont
import matplotlib.font_manager as fm
from transformers import get_cosine_schedule_with_warmup

from ylcls.models import ClsModel
from ylcls.utils import LOGGER, colorstr, time_sync, smart_inference_mode


class Classification:
    def __init__(self, config=None):
        if config:
            self.init_base(config)
            self.init_dataloader(config)
            self.init_model(config)
    def init_base(self, config):
        self.config = config
        self.index2label = config.index2label
        self.logger = dict(
            train_loss=0, train_total_num=0, train_t_num=0,
            train_loss_list=[], valid_loss_list=[],
            valid_loss=0, valid_total_num=0, valid_t_num=0,
            train_acc_list=[], valid_acc_list=[],
            valid_best_acc=0
        )
    def init_dataloader(self, config):
        self.train_dataloader = config.train_dataloader
        self.valid_dataloader = config.valid_dataloader
    def init_model(self, config):
        self.model = deepcopy(config.model)
        self.get_optimizer(config)
        lr_warmup_steps = len(self.train_dataloader) * config.warm_epochs
        self.lr_scheduler = get_cosine_schedule_with_warmup(
            optimizer=self.optimizer,
            num_warmup_steps=lr_warmup_steps,
            num_training_steps=len(self.train_dataloader) * config.num_epochs)
        self.accelerator = Accelerator(mixed_precision=config.mixed_precision)
        self.device = self.accelerator.device
        self.model = self.model.to(self.device)
        self.model, self.optimizer, self.train_dataloader, self.valid_dataloader, self.lr_scheduler = self.accelerator.prepare(
            self.model,
            self.optimizer,
            self.train_dataloader,
            self.valid_dataloader,
            self.lr_scheduler)
    def get_optimizer(self, config):
        if isinstance(config.optimizer, SGD):
            self.optimizer = config.optimizer(self.model.parameters(), lr=config.learning_rate, momentum=config.momentum,weight_decay=config.weight_decay)
        else:
            self.optimizer = config.optimizer(self.model.parameters(), lr=config.learning_rate, weight_decay=config.weight_decay)
    def train(self):
        if not os.path.exists(self.config.output_dir):
            os.makedirs(self.config.output_dir)
        prefix = colorstr('train: ')
        self.model.train()
        LOGGER.info("🚀yl-classification training starts!")
        global_step = 0
        t1 = time_sync()
        # if self.config.pipeline:
        #     with open(self.config.pipeline, 'rb') as f:
        #         self.pipeline = pickle.load(f)
        #     self.nc_list = list(range(self.config.nc))
        #     self.i = 0
        #     self.trans = T.Compose(
        #         [
        #             T.Resize(self.config.image_size),
        #             T.ToTensor(),
        #             T.Normalize([0.5], [0.5]),
        #         ]
        #     )
        #
        # else:
        #     self.pipeline = None
        for epoch in range(self.config.num_epochs):
            self.logger['train_loss'] = 0
            self.logger['train_total_num'] = 0
            self.logger['train_t_num'] = 0
            with tqdm(total = len(self.train_dataloader), desc=f'train : Epoch [{epoch + 1}/{self.config.num_epochs}]', postfix=dict,mininterval=0.3) as pbar:
                for images, labels in self.train_dataloader:
                    images = images.to(self.device)
                    labels = labels.to(self.device)
                    # if self.pipeline:
                    #     images = self.pipeline(batch_size = images.shape[0], num_class = self.config.nc, label_index = torch.tensor([self.nc_list[self.i]]), num_inference_steps = 500).images
                    #     labels = torch.tensor([self.nc_list[self.i]] * images.shape[0]).to(self.device)
                    #     images = torch.cat([self.trans(image) for image in images], dim = 0).to(self.device)
                    #     self.i = (self.i + 1) % self.config.nc
                    output = self.model(images)
                    loss = F.cross_entropy(output, labels)
                    self.logger['train_loss'] += loss.detach().item()
                    self.logger['train_total_num'] += images.shape[0]
                    self.logger['train_t_num'] += sum(output.argmax(-1).squeeze() == labels).item()
                    mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                    self.accelerator.backward(loss)
                    self.optimizer.step()
                    self.lr_scheduler.step()
                    self.optimizer.zero_grad()
                    logs = {'mem' : mem,
                            "loss": loss.detach().item(),
                            "lr": self.lr_scheduler.get_last_lr()[0],
                            "step": global_step}
                    pbar.set_postfix(**logs)
                    pbar.update(1)
                    global_step += 1
            train_mean_loss = round(self.logger['train_loss'] / self.logger['train_total_num'], 4)
            self.logger['train_loss_list'].append(train_mean_loss)
            train_acc = round((self.logger['train_t_num'] / self.logger['train_total_num']) * 100, 2)
            self.logger['train_acc_list'].append(train_acc)
            LOGGER.info(f'{prefix} Epoch [{epoch + 1}/{self.config.num_epochs}] total_mean_loss:{train_mean_loss} acc:{train_acc}')
            self.val(epoch, save = True)
        del images, labels, output, loss  # 删除训练时数据，节省显存
        self.save_loss_acc_csv()
        self.save_loss_acc_curve()
        t2 = time_sync()
        LOGGER.info(f"🚀yl-classification training ends! total time:{(t2 - t1) / 3600:.3f} hours!")
        torch.cuda.empty_cache()
    @smart_inference_mode()
    def val(self, epoch = None, save = False):
        if not os.path.exists(self.config.output_dir):
            os.makedirs(self.config.output_dir)
        half = True if self.device != 'cpu' else False
        self.model = self.model.half() if half else self.model.float()
        self.model.eval()
        if epoch == None:
            epoch = self.config.num_epochs - 1
        prefix = colorstr('valid: ')
        self.logger['valid_loss'] = 0
        self.logger['valid_total_num'] = 0
        self.logger['valid_t_num'] = 0
        global_step = 0
        with tqdm(total = len(self.valid_dataloader), desc=f'valid : Epoch [{epoch + 1}/{self.config.num_epochs}]', postfix=dict,mininterval=0.3) as pbar:
                for images, labels in self.valid_dataloader:
                    images = images.to(self.device)
                    labels = labels.to(self.device)
                    images = images.half() if half else images
                    output = self.model(images)
                    output = output.float()
                    loss = F.cross_entropy(output, labels)
                    self.logger['valid_loss'] += loss.detach().item()
                    self.logger['valid_total_num'] += images.shape[0]
                    self.logger['valid_t_num'] += sum(output.argmax(-1).squeeze() == labels).item()
                    mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G'  # (GB)
                    logs = {'mem' : mem,
                            "loss": loss.detach().item(),
                            "lr": self.lr_scheduler.get_last_lr()[0],
                            "step": global_step}
                    pbar.set_postfix(**logs)
                    pbar.update(1)
                    global_step += 1
        del images, labels, output, loss #删除验证时数据，节省显存
        valid_mean_loss = round(self.logger['valid_loss'] / self.logger['valid_total_num'], 4)
        self.logger['valid_loss_list'].append(valid_mean_loss)
        valid_acc = round((self.logger['valid_t_num'] / self.logger['valid_total_num']) * 100, 2)
        self.logger['valid_acc_list'].append(valid_acc)
        if valid_acc > self.logger['valid_best_acc'] and save:
            self.logger['valid_best_acc'] = valid_acc
            self.save_model()
            LOGGER.info('🚀yl-classification best model has saved!')
        LOGGER.info(f'{prefix}Epoch [{epoch + 1}/{self.config.num_epochs}] total_mean_loss:{valid_mean_loss} acc:{valid_acc}')
        self.model.float()
        self.model.train()
    @smart_inference_mode()
    def detect_one(self, model, image_path, image_save_path_dir, save_img, trans):
        img = Image.open(image_path)
        # imgcv2 = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
        img_name = os.path.split(image_path)[-1]
        image_save_path = os.path.join(image_save_path_dir, img_name)
        img_tensor = trans(img).unsqueeze(0).to(self.device)
        img_tensor = img_tensor.half() if self.device != 'cpu' else img_tensor.float()
        t1 = time_sync()
        out = model(img_tensor).softmax(-1).squeeze().cpu()
        t2 = time_sync()
        t = int((t2 - t1)*1000)
        arg = out.argmax().item()
        conf = round(out[arg].item() * 100, 2)
        draw = ImageDraw.Draw(img)
        # font = cv2.FONT_HERSHEY_SIMPLEX
        font_type = ImageFont.truetype(fm.findfont(fm.FontProperties(family='DejaVu Sans')), 25)
        # img_detected = cv2.putText(imgcv2, f'label:{self.index2label[arg]}', (10, 30), font, 1, (255, 255, 255), 1)
        # img_detected = cv2.putText(img_detected, f'conf:{conf}', (10, 60), font, 1, (255, 255, 255), 1)
        draw.text((10, 10), f'label:{self.index2label[arg]}', font = font_type, fill=(255, 255, 255))
        draw.text((10, 40), f'conf:{conf}', font = font_type, fill=(255, 255, 255))
        LOGGER.info(f'image_name:{img_name} detect_time:{t}ms label:{self.index2label[arg]} conf:{conf}')
        if save_img:
            img.save(image_save_path)
            # cv2.imwrite(image_save_path, img_detected)
        return t
    @smart_inference_mode()
    def detect(self, image_path=None, save_img=True):
        if self.config.test_images_file_list != None and image_path == None:
            image_path = self.config.test_images_file_list
        model = deepcopy(self.model)
        if isinstance(model, ClsModel):
            model.fuse()
        model.half() if self.device != 'cpu' else model.float()
        image_save_path_dir = os.path.join(self.config.output_dir, "detect_images")
        if not os.path.exists(image_save_path_dir):
            os.makedirs(image_save_path_dir)
        LOGGER.info('🚀yl-classification detection starting!')
        t = 0
        trans = T.Compose(
                [
                    T.Resize((self.config.image_size, self.config.image_size)),
                    T.ToTensor(),
                    T.Normalize([0.5], [0.5]),
                ]
        )
        if isinstance(image_path, str):
            if os.path.isfile(image_path):
                t += self.detect_one(model, image_path, image_save_path_dir, save_img, trans)
                t_per = t
            else:
                image_list = os.listdir(image_path)
                image_path_list = [os.path.join(image_path, image) for image in image_list]
                for image in tqdm(image_path_list):
                    t += self.detect_one(model, image, image_save_path_dir, save_img, trans)
                t_per = int(t / len(image_path))
        else:
            for image in tqdm(image_path):
                t += self.detect_one(model, image, image_save_path_dir, save_img, trans)
            t_per = int(t / len(image_path))
        LOGGER.info('🚀yl-classification detection end!')
        LOGGER.info(f'detection time per image:{t_per}ms')

    def save_loss_acc_csv(self):
        loss_df = pd.DataFrame({'train_loss' : self.logger['train_loss_list'],
                                'valid_loss' : self.logger['valid_loss_list'],
                                'train_acc' : self.logger['train_acc_list'],
                                'valid_acc' : self.logger['valid_acc_list']})
        loss_df.to_csv(os.path.join(self.config.output_dir,'loss_acc.csv'), index=False)
    def save_loss_acc_curve(self):
        fig, ax = plt.subplots(1, 2, figsize=(12, 6))
        ax[0].set_title('loss_curve', fontsize = 15, fontweight ='bold')
        ax[1].set_title('acc_curve', fontsize=15, fontweight='bold')
        # 展示网格线
        ax[0].grid()
        ax[1].grid()
        # x轴标签
        ax[0].set_xlabel('epochs',fontsize = 15, fontweight ='bold')
        ax[1].set_xlabel('epochs',fontsize = 15, fontweight ='bold')
        # y轴标签
        ax[0].set_ylabel('loss',fontsize = 15, fontweight ='bold')
        ax[1].set_ylabel('acc',fontsize = 15, fontweight ='bold')
        # 绘制
        x = np.arange(0, self.config.num_epochs)
        ax[0].plot(x, self.logger['train_loss_list'], color = 'blue', label = 'train_loss')
        ax[0].plot(x, self.logger['valid_loss_list'], color = 'green', label = 'valid_loss')
        ax[1].plot(x, self.logger['train_acc_list'], color = 'red', label = 'train_acc')
        ax[1].plot(x, self.logger['valid_acc_list'], color = 'yellow', label = 'valid_acc')
        ax[0].legend(loc='upper right')  # 设置图表图例在右上角
        ax[1].legend(loc='upper right')  # 设置图表图例在右上角
        plt.savefig(os.path.join(self.config.output_dir, 'loss_acc_curve.png'), bbox_inches='tight', dpi=300)
        plt.show()
    def save_model(self):
        if isinstance(self.model, ClsModel):
            model_name = self.model.model_name
            torch.save(self.model.fuse(verbose=False).half(), os.path.join(self.config.output_dir, f'{model_name}.pt'))
        else:
            model_name = self.config.model_args['model_config']['model_name']
            torch.save(self.model.half(), os.path.join(self.config.output_dir, f'{model_name}.pt'))
    def save_config(self):
        if isinstance(self.model, ClsModel):
            config_name = self.model.model_name
        else:
            config_name = self.config.model_args['model_config']['model_name']
        # self.config.train_dataset = None
        # self.config.train_dataloader = None
        # self.config.valid_dataset = None
        # self.config.valid_dataloader = None
        self.config.model = None
        with open(os.path.join(self.config.output_dir, f'{config_name}.pickle'), "wb") as f:
            pickle.dump(self.config, f)
    def load(self, checkpoint_path, config_pickle_path):
        ckpt = torch.load(checkpoint_path)
        with open(config_pickle_path, 'rb') as f:
            config = pickle.load(f)
        config.model = ckpt
        self.init_base(config)
        self.init_dataloader(config)
        self.init_model(config)