import sys
import os
import math
import numpy as np
from io import StringIO
import time

from tqdm import tqdm
from torch.utils.tensorboard.writer import SummaryWriter
from torchinfo import summary

import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data.dataloader import DataLoader
from torch.cuda.amp import autocast

from torch.optim.adamw import AdamW
from lion_pytorch import Lion

from pytorch_metric_learning.utils.inference import MatchFinder, InferenceModel
from loss_func.TripletLoss import styleKnn, compute_gram

from datetime import datetime
from pathlib import Path
from dataset import ZhCharDataset, BalancedSampler
from utils import init_log, cycle_triangular, dict2json, config2Log

from IPython import embed


class TrainerConfig:
    # optimization parameters
    max_epochs = 10
    batch_size = 64
    learning_rate = 3e-4
    patience = 20
    max_num_trial = 10

    optim = 'AdamW' # optional: Lion
    betas = (0.9, 0.95)
    grad_norm_clip = 1.0
    weight_decay = 0.1 # only applied on matmul weights learning rate decay params: linear warmup followed by cosine decay to 10% of original
    lr_decay = False    
    lr_decay_type = 'exp'   #'cos', 'exp', 'cos+exp', 'cycle_exp'
    lr_decay_rate = 0.5
    warmup_num = 375e6 # these two numbers come from the GPT-3 paper, but may not be good defaults elsewhere
    final_num = 260e9 # (at what point we reach 10% of original LR)
    trained_num = 0

    # checkpoint settings
    ckpt_path = None
    num_workers = 1 # for DataLoader

    def __init__(self, **kwargs):
        for k,v in kwargs.items():
            setattr(self, k, v)

class Trainer:
    def __init__(self, model, train_dataset, val_dataset, config, pretrained_ckpt_path=None):
        self.best_acc = 0.0
        self.config = config
        self.model = model
        self.best_model_path = None
        self.train_dataset = train_dataset
        self.val_dataset = val_dataset
        
        # Create unique identifier for this run
        run_id = f"{model.name}_seed{config.seed}"
        
        # Setup logging paths
        log_dir = Path(f"{config.log_dir}")
        log_dir.mkdir(parents=True, exist_ok=True)
        self.logger = init_log(f'{run_id}_train_log', log_dir/f"{run_id}_train.log")
        
        # Setup tensorboard writer
        run_dir = Path(f"{config.run_dir}/{run_id}_train")
        run_dir.mkdir(parents=True, exist_ok=True)
        self.writer = SummaryWriter(run_dir)

        # 记录训练配置参数信息
        t_conf = {key:getattr(config, key) for key in dir(config) if not key.startswith('__')}
        json_data = dict2json(t_conf)
        self.logger.info("trainer_configure:\n%s", json_data)

        model_logInf = config2Log(model.model_conf)
        self.logger.info(f"{model_logInf}")

        # model_attar_inf = {key:value for key, value in vars(model).items() if not key.startswith('_') }
        # json_data = dict2json(model_attar_inf)
        # self.logger.info("model_attar_inf:\n%s", json_data)
        # self.logger.info(f'batch_size:{config.batch_size}')
        # self.logger.info(f'lr:{self.lr},   lr_decay_type:{config.lr_decay_type}')
        # self.logger.info(f'weight_decay:{config.weight_decay}')

        self.lr = config.learning_rate
        self.steps = 0
        self.num_correct_in_epoch = 0
        self.num_checked_in_epoch = 0
        self.losses_per_epoch = []
        self.losses_per_steps = []
        self.acc_per_steps = []
        self.lr_per_steps = []

        self.train_type = config.train_type

        if pretrained_ckpt_path is not None:
            self.model.load(pretrained_ckpt_path)

        # take over whatever gpus are on the system
        self.device = 'cpu'
        if torch.cuda.is_available():
            self.device = torch.cuda.current_device()
            self.logger.info(f"Set trainer.device to 'cuda:{self.device}")
            # self.model = torch.nn.DataParallel(self.model).to(self.device)
        self.model.to(self.device)

        if self.train_type == 'metric':
            # transfer data_embed to device,但有时torch会自动将存不下来的内容转入内存
            # 容易发生trained_embeds和模型输出在不同设备，干脆全部存入内容
            self.model.trained_embeds = self.model.trained_embeds.to(self.device)
            self.model.trained_labels = self.model.trained_labels.to(self.device)
            # choss mining method when use triplet loss
            self.logger.info(f"mining method set to {self.model.lossfunc.mining}")

        if self.train_type == 'metric_feature':
            self.logger.info(f"mining method set to {self.model.style_loss_fn.mining}")


    def save_checkpoint(self, ckpt_save_path:str):
        ckpt_model = self.model.module if hasattr(self.model, "module") else self.model
        # Generate filename with model name, seed, timestamp and accuracy
        base_name = f"{self.model.name}_seed{self.config.seed}_acc{self.best_acc:.4f}"
        model_path = os.path.join(ckpt_save_path, f"{base_name}.ckpt")
        optim_path = os.path.join(ckpt_save_path, f"{base_name}.ckpt.optim")
        
        # Use model's custom save method
        ckpt_model.save(model_path)
        # self.logger.info("Saved model weights to: %s", model_path)
        
        # Save optimizer state
        torch.save(self.optimizer.state_dict(), optim_path)
        self.logger.info("Saved optimizer state to: %s", optim_path)
        
        return model_path, optim_path

    def load_checkpoint(self, ckpt_save_path:str):
        ckpt_model = self.model.module if hasattr(self.model, "module") else self.model
        ckpt_model.load(ckpt_save_path)
        self.logger.info("load the model state from %s", ckpt_save_path)
        # also load the optimizers' state
        self.logger.info("restore the state of optimizer")
        self.optimizer.load_state_dict(torch.load(ckpt_save_path + '.optim'))

    def record_Info_per_step(self, writer, lr, loss,
                             num_checked, num_correct=None, acc=None, 
                             record_step=10, is_train=True, train_type='metric'):
        self.losses_per_steps.append(loss)
        self.losses_per_epoch.append(loss)
        self.lr_per_steps.append(lr)
        self.num_checked_in_epoch += num_checked
        if is_train:
            if train_type == 'feature' or train_type == 'metric_feature':
                assert acc is not None
                assert num_correct is not None
                self.acc_per_steps.append(acc)
                self.num_correct_in_epoch += num_correct

            self.steps += 1
            if self.steps % record_step == 0:
                avgLoss_per_steps = np.mean(self.losses_per_steps)
                writer.add_scalar(f'Train_per_{record_step}_steps/Loss', avgLoss_per_steps, self.steps)
                self.losses_per_steps = []
                #when implement metric learning, there is no validation when training

                if train_type == 'feature' or train_type == 'metric_feature':
                    avgCorrect_per_steps = np.mean(self.acc_per_steps)
                    writer.add_scalar(f'Train_per_{record_step}_steps/Acc', avgCorrect_per_steps, self.steps)
                    self.acc_per_steps = []

                assert lr is not None
                avgLr_per_steps = np.mean(self.lr_per_steps)
                writer.add_scalar(f'Train_per_{record_step}_steps/lr', avgLr_per_steps, self.steps)
                self.lr_per_steps = []
        else:
            if train_type == 'feature' or train_type == 'metric_feature':
                assert acc is not None
                assert num_correct is not None
                self.acc_per_steps.append(acc)
                self.num_correct_in_epoch += num_correct

    def report_Info_per_epoch(self, epoch, train=True, train_type='metric', time_elapsed=0.0):
        avgAcc_epoch = None
        if train:
            avgLoss_epoch = np.mean(self.losses_per_epoch)
            self.writer.add_scalar('Train_per_epoch/loss', avgLoss_epoch, epoch)

            # for feature learning: report the result of current training epoch
            if train_type == 'feature' or train_type == 'metric_feature':
                avgAcc_epoch = self.num_correct_in_epoch / self.num_checked_in_epoch
                self.writer.add_scalar('Train_per_epoch/acc', avgAcc_epoch, epoch)
                self.logger.info("Epoch: %d, Avg train loss: %f, correct in epoch: %d, accuracy: %f, time_elapsed: %fs", 
                                 epoch, avgLoss_epoch, self.num_correct_in_epoch, avgAcc_epoch, time_elapsed)
            if train_type == 'metric':
                self.logger.info("Epoch: %d, Avg train loss: %f, time_elapsed: %fs", 
                                 epoch, avgLoss_epoch, time_elapsed)

        else:
            avgLoss_epoch = np.mean(self.losses_per_epoch)
            self.writer.add_scalar('Val_per_epoch/loss', avgLoss_epoch, epoch)
            avgAcc_epoch = self.num_correct_in_epoch / self.num_checked_in_epoch
            self.writer.add_scalar('Val_per_epoch/acc', avgAcc_epoch, epoch)
            self.logger.info("Epoch: %d, Avg val loss: %f, correct in epoch: %d, accuracy: %f, time_elapsed: %fs",
                              epoch, avgLoss_epoch, self.num_correct_in_epoch, avgAcc_epoch, time_elapsed)

        self.losses_per_epoch = []
        self.acc_per_steps = []
        self.num_correct_in_epoch = 0
        self.num_checked_in_epoch = 0

        return avgLoss_epoch, avgAcc_epoch

                


    def train(self):

        config = self.config
        model = self.model.module if hasattr(self.model, "module") else self.model
        self.logger.info(f"training model:'{model.name}' on dataset '{self.train_dataset.root}'")
        self.logger.info(f"num_classes: {len(self.train_dataset.label2i)}")

        sample_x, sample_y, _ = self.train_dataset[0]
        C,H,W = sample_x.shape

        # 将summary的输出临时用logger捕获，输出后转回正常
        output_temp = StringIO()
        sys.stdout = output_temp
        print(f"For single sample:")
        summary_data = torch.randn([1, 1, 96, 96]).to(self.device)
        summary_label = torch.ones([1]).long().to(self.device)
        summary(model, input_data=[summary_data, summary_label])
        print(f"{'-'*80}\nFor current batch sample:")
        summary_data = torch.randn([self.config.batch_size, 1, 96, 96]).to(self.device)
        summary_label = torch.ones([self.config.batch_size]).long().to(self.device)
        summary(model, input_data=[summary_data, summary_label])
        sys.stdout = sys.__stdout__
        self.logger.info(output_temp.getvalue())

        # create the optimizer
        if config.weight_decay is not None:
            no_decay = ["bias", "LayerNorm.weight", "norm", "shortcut.1.0.weight"]
            params_decay = [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)]
            params_nodecay = [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)]
            optim_groups = [
                {"params": params_decay, "weight_decay": config.weight_decay},
                {"params": params_nodecay, "weight_decay": 0.0},
            ]
        else:
            optim_groups = model.parameters()

        if config.optim == 'Lion':
            self.optimizer = Lion(optim_groups, lr=config.learning_rate, betas=config.betas)
            self.logger.info(f"Using Lion Optimizer, betas:{self.config.betas}")
        else:
            self.optimizer = optim.AdamW(optim_groups, lr=config.learning_rate, betas=config.betas) 
            self.logger.info(f"Using AdamW Optimizer, betas:{self.config.betas}")

        # embed() # look up the param of optimizer
        self.train_sampler = None
        self.val_sampler = None
        def run_epoch(split):

            is_train = split == 'train'
            model.train(is_train)

            # set the dataLoader
            data = self.train_dataset if is_train else self.val_dataset
            if self.train_type == 'feature':
                loader = DataLoader(data, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers)
            if self.train_type == 'metric' or 'metric_feature':
                self.sampler = BalancedSampler(data, batch_size=config.batch_size, shuffle=True)
                loader = DataLoader(data, num_workers=config.num_workers, batch_sampler=self.sampler)
                # if is_train and self.train_sampler is None:
                #     self.train_sampler = BalancedSampler(self.train_dataset, batch_size=config.batch_size, shuffle=True)
                #     loader = DataLoader(data, num_workers=config.num_workers, batch_sampler=self.train_sampler)
                # if (not is_train) and self.val_sampler is None:
                #     self.val_sampler = BalancedSampler(self.val_dataset, batch_size=config.batch_size, shuffle=True)
                #     loader = DataLoader(data, num_workers=config.num_workers, batch_sampler=self.val_sampler)
                out_embeds = []

            # begin training
            avgAcc_epoch = None
            avgLoss_epoch = None
            pbar = tqdm(enumerate(loader), total=len(loader)) if is_train else enumerate(loader)
            start_time = time.time()
            for it, (x, y, idx) in pbar:
                # place data on the correct device
                x = x.to(self.device)
                y = y.to(self.device)
                idx = idx.to(self.device)
                acc = 0
                correct = 0
                
                # forward the model
                with torch.set_grad_enabled(is_train):
                    if self.train_type == 'feature' or self.train_type == 'metric_feature':
                        logits, loss = model(x, y)
                        loss = loss.mean() # collapse all losses if they are scattered on multiple gpus
                        correct = (logits.argmax(-1) == y).sum().item()
                        acc = correct / y.size(0)

                    if self.train_type == 'metric':
                        # 在tripletLoss_v3中，batch_embeds实际是打平的gram矩阵，[B, C*C]
                        batch_embeds, loss = model(x, y)
                        loss = loss.mean() # collapse all losses if they are scattered on multiple gpus
                        # save the trained embeddings and labels in a list
                        # out_embeds.append(batch_embeds)
                        # out_labels.append(y)

                    # write training info into tensorboard
                    self.record_Info_per_step(self.writer, self.lr, loss.item(), 
                                              num_checked=y.size(0), num_correct=correct, acc=acc, 
                                              record_step=10, is_train=is_train, train_type = self.train_type)

                # backprop and update the parameters, write train info into tensorboard, still in batch sampling loop
                if is_train:

                    # backprop and update the parameters
                    model.zero_grad()
                    loss.backward()
                    torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_norm_clip)
                    self.optimizer.step()
                    
                    # 如果是metric学习，反向传播后，需要用参与学习样本的out_embeds更新模型的trained_embeds
                    if self.train_type == 'metric':
                        with torch.no_grad():
                            # idx:[B,]
                            assert torch.all(idx < model.trained_embeds.size(0)), "索引越界"
                            # 更新训练得到的embeddings(或Gram矩阵)
                            # batch_embeds_cpu = batch_embeds.to('cpu')
                            # idx_cpu = idx.to('cpu')
                            # model.trained_embeds[idx_cpu] = batch_embeds_cpu
                            if batch_embeds.device != idx.device:
                                print(f"batch_embeds.device: {batch_embeds.device}, idx.device: {idx.device}")
                            model.trained_embeds[idx] = batch_embeds

                    # decay the learning rate based on our progress
                    if config.lr_decay:
                        self.trained_num += y.size(0) # number of tokens processed this step (i.e. label is not -100)
                        if self.trained_num < config.warmup_num:
                            # linear warmup
                            lr_mult = float(self.trained_num) / float(max(1, config.warmup_num))
                        else:
                            if config.lr_decay_type == 'cos': # cosine learning rate decay
                                progress = float(self.trained_num - config.warmup_num) / float(max(1, config.final_num - config.warmup_num))
                                lr_mult = max(0.05, 0.5 * (1.0 + math.cos(math.pi * progress)))
                            
                            if config.lr_decay_type == 'cos+exp': # cosine+exp learning rate decay
                                progress = float(self.trained_num - config.warmup_num) / float(max(1, config.final_num - config.warmup_num))
                                lr_mult = max(0.05, np.exp(-0.50*progress) + 0.05*(1.0 + math.sin(0.5*math.pi*progress)))
                            
                            if config.lr_decay_type == 'exp': # exponential decay
                                progress = float(self.trained_num - config.warmup_num) / float(max(1, config.final_num - config.warmup_num))
                                lr_mult = max(0.05, np.exp(-0.80 * progress))

                            if config.lr_decay_type == 'cycle_exp':
                                progress = float(self.trained_num - config.warmup_num) / float(max(1, config.final_num - config.warmup_num))
                                T = 3.0
                                a = T * float(config.final_num - config.warmup_num) / float(config.final_num)
                                exp_term = cycle_triangular(progress, T, a)
                                lr_mult = max(0.05, np.exp(-1.0 * exp_term))

                        self.lr = config.learning_rate * lr_mult
                    else:
                        self.lr = config.learning_rate
                    for param_group in self.optimizer.param_groups:
                        param_group['lr'] = self.lr

                    # report progress
                    pbar.set_description(f"epoch {epoch+1} iter {it}: train loss {loss.item():.5f}. lr {self.lr:e}")

                # 在验证阶段，metric学习的验证和feature学习的验证不同，需要用KNN算法进行验证
                else:
                    # some extra work after a metric training epoch
                    if self.train_type == 'metric':
                        assert self.model.trained_embeds is not None
                        # batch_embeds_cpu = batch_embeds.to('cpu')
                        # distances, nearest_neighbors = self.model.knn_func(batch_embeds_cpu, 
                        #                                                    reference=self.model.trained_embeds,
                        #                                                    K=config.k)
                        distances, nearest_neighbors = self.model.knn_func(batch_embeds, 
                                                                        reference=self.model.trained_embeds,
                                                                        K=config.k)

                        neighbor_labels = self.model.trained_labels[nearest_neighbors]
                        y_pred, _ = torch.mode(neighbor_labels, dim=1)  # y_pred: (num_val, )

                        # y_cpu = y.to('cpu')
                        # self.num_correct_in_epoch = (y_pred == y_cpu).sum()
                        self.num_correct_in_epoch = (y_pred == y).sum()
                        self.num_checked_in_epoch = y.size(0)

            # report result in this epoch
            time_elapsed = time.time() - start_time
            avgLoss_epoch, avgAcc_epoch = self.report_Info_per_epoch(epoch+1, is_train, self.train_type, time_elapsed=time_elapsed)

            return avgLoss_epoch, avgAcc_epoch
        
        
        self.trained_num = 0 # counter used for learning rate decay
        self.final_lr_rate = None
        epoch = 0
        hist_acc = []
        num_trial = 0
        patience = 0

        while(True):
            current_loss, acc = run_epoch('train')
            if self.val_dataset is not None:
                print("Begin Validation ...")
                current_loss, acc = run_epoch('val')

            hard_threshold = 0.98
            if 'TripletLoss' in model.name:
                if acc > hard_threshold:
                    self.model.lossfunc.mining = 'hard'
                    print(f"acc > {hard_threshold}, mining method is set to {self.model.lossfunc.mining}!")
                else:
                    self.model.lossfunc.mining = 'semi-hard'
                    print(f"acc < {hard_threshold}, mining method is set to {self.model.lossfunc.mining}!")


            is_better = acc > self.best_acc
            if is_better:
                patience = 0
                if self.best_model_path is not None:
                    try:
                        os.remove(self.best_model_path)
                        os.remove(self.best_model_path + '.optim')
                        os.remove(self.best_model_path + '.data') # for tripletLoss net
                    except:
                        pass
                self.best_acc = max(acc, self.best_acc)  # Update best accuracy
                self.best_model_path, _ = self.save_checkpoint(self.config.ckpt_path)  # Use the configured checkpoint path
            # early stop 
            elif patience < config.patience:
                assert self.best_model_path is not None
                patience += 1
                print('hit patience %d' % patience, file=sys.stderr)

                if patience == config.patience:
                    num_trial += 1
                    print('hit #%d trial' % num_trial, file=sys.stderr)
                    if num_trial == config.max_num_trial:
                        print('early stop!', file=sys.stderr)
                        return self.best_acc
                        # exit(0)

                    # recording the current lr
                    self.lr = self.optimizer.param_groups[0]['lr']
                    print('recording the learning rete : %.6f', self.lr, file=sys.stderr)

                    # load model
                    self.load_checkpoint(self.best_model_path)
                    self.model.to(self.device)
                    # self.lr = config.learning_rate
                    self.trained_num = 0
                    # set new lr
                    for param_group in self.optimizer.param_groups:
                        param_group['lr'] = self.lr

                    # reset patience
                    patience = 0

            epoch += 1
            if epoch == config.max_epochs:
                print('reached maximum number of epochs!', file=sys.stderr)
                self.writer.close()
                self.logger.info(f"best val_acc: {self.best_acc}")
                self.logger.info(f"="*90)
                return self.best_acc
                # exit(0)
