import os
import json
import subprocess
import math
import time
import datetime
import torch
import lightning as pl
from lightning_utilities.core.rank_zero import rank_zero_info, rank_zero_only


def cos_decay(initial_lr, final_lr, current_step, total_steps):
    """
    Compute the cosine decayed learning rate with a final learning rate.

    Args:
        initial_lr (float): Initial learning rate.
        final_lr (float): Final learning rate.
        current_step (int): Current training step.
        total_steps (int): Total number of training steps in one epoch.

    Returns:
        float: Decayed learning rate.
    """
    if current_step>=total_steps:
        lr = final_lr
    else:
        lr = final_lr + 0.5 * (initial_lr - final_lr) * (1 + math.cos(math.pi * current_step / total_steps))
    return lr


def wsd(initial_lr, final_lr, current_step, total_steps, warmup_steps=100):
    """
    Compute the learning rate using cosine annealing with a warmup phase.

    Warmup phase:
        For the first warmup_steps, the learning rate increases linearly from 0 to initial_lr.
    Cosine annealing phase:
        From warmup_steps to total_steps, the learning rate decays from initial_lr to final_lr
        following the cosine annealing schedule.

    Args:
        initial_lr (float): The target learning rate after warmup (also the starting learning rate for decay).
        final_lr (float): The final learning rate after total_steps.
        current_step (int): Current training step.
        total_steps (int): Total number of training steps.
        warmup_steps (int): Number of steps used for the warmup phase.

    Returns:
        float: The computed learning rate.
    """
    if warmup_steps<=0:
        warmup_steps = 100
    if current_step < warmup_steps:
        # Warmup phase: linearly increase LR from 0 to initial_lr.
        return initial_lr * current_step / max(1, warmup_steps)
    else:
        # Adjust step count for cosine annealing phase.
        effective_step = current_step - warmup_steps
        effective_total = total_steps - warmup_steps
        
        if effective_step >= effective_total:
            return final_lr
        
        # Compute cosine annealing decay.
        cosine_decay = 0.5 * (1 + math.cos(math.pi * effective_step / effective_total))
        decayed_lr = final_lr + (initial_lr - final_lr) * cosine_decay
        return decayed_lr
    

class train_callback(pl.Callback):
    def __init__(self, args):
        super().__init__()
        self.args = args
        self.loss_file = os.path.join(args.proj_dir, "loss_data.json")
        if os.path.exists(self.loss_file):
            os.remove(self.loss_file)
            
    def write_data(self, loss_data, t_cost, kt_s):
        # 将loss数据写入文件，便于streamlit绘图
        with open(self.loss_file, 'a') as f:
            json.dump({"loss": float(loss_data), "t_cost": t_cost, "kt_s": kt_s}, f)
            f.write('\n')

    def on_train_batch_start(self, trainer, pl_module, batch, batch_idx):
        args = self.args
        # if args.cuda_cleanup > 0:
        #     torch.cuda.empty_cache()
        real_step = trainer.global_step + args.epoch_begin * args.epoch_steps

        # LR schedule
        w_step = args.warmup_steps
        if args.lr_final == args.lr_init or args.epoch_count == 0:
            lr = args.lr_init
        else:
            if 'wsd' == args.lr_schedule:
                lr = wsd(args.lr_init, 0, real_step, args.epoch_steps//int(args.devices)//args.accumulate_grad_batches,warmup_steps=w_step)
            else:
                lr = cos_decay(args.lr_init, args.lr_final, real_step, args.epoch_steps//int(args.devices)//args.accumulate_grad_batches)



        if args.weight_decay_final > 0:
            wd_now = args.weight_decay * math.exp(math.log(args.weight_decay_final / args.weight_decay) * 0.5)   # TODO: 这里临时写死了0.5，后面可以根据progress动态调整
        else:
            wd_now = args.weight_decay

        for param_group in trainer.optimizers[0].param_groups:
            if param_group["weight_decay"] > 0:
                param_group["weight_decay"] = wd_now
            if args.layerwise_lr > 0:
                param_group["lr"] = lr * param_group["my_lr_scale"]
                # print(param_group["lr"], param_group["my_lr_scale"])
            else:
                param_group["lr"] = lr

        trainer.my_lr = lr
        trainer.my_wd = wd_now
        # rank_zero_info(f"{real_step} {lr}")

        if trainer.global_step == 0:
            if trainer.is_global_zero:  # logging
                trainer.my_loss_sum = 0
                trainer.my_loss_count = 0
                trainer.my_log = open(args.proj_dir + "/train_log.txt", "a")
                trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n")
                try:
                    print(f"\n{trainer.strategy.config}\n")
                    trainer.my_log.write(f"{trainer.strategy.config}\n")
                except:
                    pass
                trainer.my_log.flush()
                if len(args.swanlab) > 0:
                    print("Login to swanlab...")
                    import swanlab
                    swanlab.init(
                        project=args.swanlab,
                        name=args.run_name + " " + args.my_timestamp,
                        config=args,
                        save_code=False,
                    )
                    trainer.my_swanlab = swanlab

    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
        args = self.args
        token_per_step = args.ctx_len * args.real_bsz
        real_step = trainer.global_step + args.epoch_begin * args.epoch_steps

        if pl.__version__[0]=='2' :
            loss = outputs['loss']
            if int(args.devices)>1:
                torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM)

        if trainer.is_global_zero:  # logging
            t_now = time.time_ns()
            kt_s = 0
            t_cost = 0
            try:
                t_cost = (t_now - trainer.my_time_ns) / 1e9
                kt_s = token_per_step / t_cost / 1000
                t_cost = 1.0 / t_cost
                self.log("REAL it/s", t_cost, prog_bar=True, on_step=True)
                self.log("Kt/s", kt_s, prog_bar=True, on_step=True)
            except:
                pass
            trainer.my_time_ns = t_now
            if pl.__version__[0]=='2':
                trainer.my_loss = loss*trainer.accumulate_grad_batches/int(args.devices)
            else:
                trainer.my_loss = trainer.my_loss_all.float().mean().item()
            trainer.my_loss_sum += trainer.my_loss
            trainer.my_loss_count += 1
            trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count
            self.log("lr", trainer.my_lr, prog_bar=True, on_step=True)
            self.log("sum_loss", trainer.my_epoch_loss, prog_bar=True, on_step=True)
            self.log("loss", trainer.my_loss, prog_bar=True, on_step=True)

            # 将loss、t_cost、kt_s写入data.json
            if trainer.accumulate_grad_batches!=None:
                args.avg_loss += trainer.my_loss / trainer.accumulate_grad_batches
                if (batch_idx+1) % trainer.accumulate_grad_batches == 0:
                    if len(args.swanlab) > 0:
                        lll = {"loss": args.avg_loss, "lr": trainer.my_lr, "wd": trainer.my_wd, "Gtokens": real_step * token_per_step / 1e9}
                        if kt_s > 0:
                            lll["kt/s"] = kt_s
                        trainer.my_swanlab.log(lll, step=int(real_step))
                    self.write_data(args.avg_loss, t_cost, kt_s)
                    args.avg_loss = 0
            else:
                if len(args.swanlab) > 0:
                    lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "wd": trainer.my_wd, "Gtokens": real_step * token_per_step / 1e9}
                    if kt_s > 0:
                        lll["kt/s"] = kt_s
                    trainer.my_swanlab.log(lll, step=int(real_step))
                self.write_data(trainer.my_loss, t_cost, kt_s)
                

    def on_train_epoch_start(self, trainer, pl_module):
        args = self.args
        if pl.__version__[0]=='2':
            dataset = trainer.train_dataloader.dataset
        else:
            dataset = trainer.train_dataloader.dataset.datasets
        assert "MyDataset" in str(dataset)
        dataset.global_rank = trainer.global_rank
        dataset.real_epoch = int(args.epoch_begin + trainer.current_epoch)
        dataset.world_size = trainer.world_size
        # print(f'########## world_size {dataset.world_size} global_rank {dataset.global_rank} real_epoch {dataset.real_epoch} ##########')

    def on_train_epoch_end(self, trainer, pl_module):
        args = self.args
        to_save_dict = {}
        if (trainer.is_global_zero) or ('deepspeed_stage_3' in args.strategy):  # save pth
            if (args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0) or (trainer.current_epoch == args.epoch_count - 1):
                if args.data_type == 'wds_img':
                    raw_dict = pl_module.state_dict()
                    for k in raw_dict:
                        if k.startswith('encoder.') or k.startswith('decoder.'):
                            to_save_dict[k] = raw_dict[k]
                else:
                    # to_save_dict = pl_module.state_dict()
                    to_save_dict = {k.replace("model.", ""): v for k, v in pl_module.state_dict().items()}

                try:
                    torch.save(to_save_dict, f"{args.proj_dir}/rwkv-{args.epoch_begin + trainer.current_epoch}.pth")
                except Exception as e:
                    print('Error\n\n', e, '\n\n')

        if trainer.is_global_zero:  # logging
            trainer.my_log.write(f"{args.epoch_begin + trainer.current_epoch} {trainer.my_epoch_loss:.6f} {math.exp(trainer.my_epoch_loss):.4f} {trainer.my_lr:.8f} {datetime.datetime.now()} {trainer.current_epoch}\n")
            trainer.my_log.flush()

            trainer.my_loss_sum = 0
            trainer.my_loss_count = 0
            if (args.epoch_begin + trainer.current_epoch) >= args.my_exit:
                exit(0)


@rank_zero_only
def generate_init_weight(model, init_weight_name):
    mm = model.generate_init_weight()

    print(f"Save to {init_weight_name}...")
    torch.save(mm, init_weight_name)
