from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
import os
import torch
try:
    import torch_npu
    from torch_npu.contrib import transfer_to_npu
    npu_available = True
except:
    npu_available = False
import functools
from safetensors.torch import save_model, load_model
from tqdm import tqdm
from accelerate import dispatch_model
from flat_quant.utils import args_utils, data_utils
from flat_quant.utils.flat_linear import FakeQuantizedLinearConfig
from flat_quant.models.llama import LlamaStructureBridge
from flat_quant.processors.flat_quant import quantize_model
from contextlib import nullcontext
from flat_quant.utils.model_utils import LayerRuner
from flat_quant.utils.utils import seed_everything
from flat_quant.utils.args_utils import get_logger
from flat_quant.utils.eval_utils import eval_model
from flat_quant.processors.flat_quant import get_trainable_parameters

def empty_cache():
    if npu_available:
        torch.npu.empty_cache()
    else:
        torch.cuda.empty_cache()


def mse_loss(x, y):
    return torch.mean((x - y) ** 2)


def get_device_str(device):
    if isinstance(device, int):
        device = str(device)
    if npu_available:
        if not device.startswith("npu"):
            return "npu:" + device
        else:
            return device
    else:
        if not device.startswith("cuda"):
            return "cuda:" + device
        else:
            return device

def print_param_state(params):
    logger = get_logger()
    for name, param_list in params.items():
        for i, param in enumerate(param_list) :
            logger_str = f"{name} {i} sum:{param.float().sum().item():.5f}"
            # logger_str += f" max:{param.abs().float().max().item():.5f}"
            logger_str += f" min:{param.abs().float().min().item():.5f}"
            logger_str += f" shape:{param.shape}"
            logger_str += f" dtype:{param.dtype}"
            logger.debug(logger_str)

def get_input_dict(attention_mask, position_ids, position_embeddings):
    input_dict = {}
    if attention_mask is not None:
        input_dict['attention_mask'] = attention_mask
    if position_ids is not None:
        input_dict['position_ids'] = position_ids
    if position_embeddings is not None:
        input_dict['position_embeddings'] = position_embeddings
    return input_dict

def main():
    args, logger = args_utils.parser_gen()
    seed_everything(args.seed)
    config = AutoConfig.from_pretrained(pretrained_model_name_or_path=args.model,
                                     local_files_only=True,
                                     trust_remote_code=True)
    config.num_hidden_layers = args.layers if args.layers > 0 else config.num_hidden_layers
    config._attn_implementation = "eager"
    # 初始化Llama模型
    tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=args.model,
                                              local_files_only=True,
                                              trust_remote_code=True,
                                              use_fast=True)

    model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=args.model,
                                                 local_files_only=True,
                                                 config=config,
                                                 trust_remote_code=True,
                                                 torch_dtype="auto",device_map="auto")
    model.eval()
    tokenizer.pad_token = tokenizer.eos_token
    default_device = get_device_str(0)
    hf_device_map = getattr(model, "hf_device_map", None)
    # get calibration data
    trainloader = data_utils.get_loaders(tokenizer,
        args, args.cali_dataset, nsamples=args.nsamples,
        seed=args.seed, model=args.model,
        seqlen=2048, eval_mode=False
    )
    logger.info("Finished loading training data.")
    for param in model.parameters():
        param.requires_grad = False

    model_bridge = LlamaStructureBridge(model)
    model_bridge.analyze_structure()
    runner = LayerRuner(model_bridge)

    calib_data = [[data[0]] for data in trainloader]
    with torch.no_grad():
        calib_data = data_utils.to_device(calib_data, default_device)
        first_layer_data = runner.prepare_first_layer_input(calib_data)
    model.cpu()

    fp_inps = first_layer_data['data']
    fp_inps = [inp[0].cpu() for inp in fp_inps]
    fp_inps = torch.cat(fp_inps, dim=0)
    attention_mask = first_layer_data['attention_mask']
    position_ids = first_layer_data['position_ids']
    position_embeddings = first_layer_data['position_embeddings']
    del first_layer_data
    if attention_mask is not None:
        attention_mask_batch = attention_mask.repeat(args.cali_bsz, 1, 1, 1).float()
    else:
        attention_mask_batch = attention_mask

    # 量化
    config = FakeQuantizedLinearConfig(w_bits=args.w_bits, 
                                       a_bits=args.a_bits, 
                                       w_asym=args.w_asym, 
                                       a_asym=args.a_asym, 
                                       lwc=args.lwc, 
                                       lac=args.lac, 
                                       a_groupsize=args.a_groupsize)
    quantizer = quantize_model(model_bridge, 
                               config, 
                               diag_alpha=args.diag_alpha,
                               diag_relu=args.diag_relu)
    model.tie_weights()
    if args.load_path:
        load_model(model, args.load_path)
    # 训练量化模型
    if args.deactive_amp:
        dtype = torch.float32
        traincast = nullcontext
    else:
        if args.amp_dtype == "bfloat16":
            dtype = torch.bfloat16
        elif args.amp_dtype == "float16":
            dtype = torch.float16
        else:
            raise ValueError(f"Invalid AMP dtype: {args.amp_dtype}")
        if npu_available:
            traincast = functools.partial(torch.amp.autocast, device_type="npu", dtype=dtype)
        else:
            traincast = functools.partial(torch.amp.autocast, device_type="cuda", dtype=dtype)
    loss_fn = torch.nn.MSELoss()
    # loss_fn = mse_loss
    empty_cache()
    float_layer_input = fp_inps.to(dtype)
    quant_layer_input = fp_inps.to(dtype)
    logger.info(model)
    for i in tqdm(range(runner.num_layers)):
        if hf_device_map and f"model.layers.{i}" in hf_device_map:
            device = get_device_str(hf_device_map[f"model.layers.{i}"])
        else:
            device = default_device
        layer = runner.layers[i].to(device)
        dtype_dict = {}
        for name, param in layer.named_parameters():
            dtype_dict[name] = param.dtype
            
        # 提取fp模型输出
        fp_outs = []
        with torch.no_grad():
            layer.float()
            quantizer.to_org_mode()
            for j in range(args.nsamples):
                batch_input = float_layer_input[j:j + 1].to(device)
                input_dict = get_input_dict(attention_mask, position_ids, position_embeddings)
                fp_out = layer(batch_input, **input_dict)[0].cpu()
                fp_outs.append(fp_out)
            fp_outs = torch.cat(fp_outs, dim=0)

        empty_cache()
        # 训练量化模型
        quantizer.to_calib_mode(prefix=f"model.layers.{i}")
        params, trainable_params = get_trainable_parameters(layer, args.flat_lr)

        optimizer = torch.optim.AdamW(trainable_params)
        scheduler_main = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * (args.nsamples // args.cali_bsz), eta_min=args.flat_lr * 1e-3)
        if args.warmup:
            scheduler_warmup = torch.optim.lr_scheduler.LinearLR(optimizer, start_factor=0.01, total_iters=16)
            scheduler = torch.optim.lr_scheduler.ChainedScheduler([scheduler_warmup, scheduler_main])
        else:
            scheduler = scheduler_main
        for epoch in range(args.epochs):
            mse = 0
            quant_outputs = []
            print_param_state(params)
            for j in range(args.nsamples // args.cali_bsz):
                index = j * args.cali_bsz
                batch_input = quant_layer_input[index:index+args.cali_bsz].to(device)
                with traincast():
                    input_dict = get_input_dict(attention_mask_batch, position_ids, position_embeddings)
                    quant_output = layer(batch_input, **input_dict)[0]
                    loss = loss_fn(quant_output, fp_outs[index:index+args.cali_bsz].to(quant_output[0]))
                    mse += loss.detach().cpu().item()
                    logger.debug(f"layer {i} iter {epoch}, mse: {mse:.8f}, loss: {loss:.8f}" )
                    loss = loss / loss.clone().detach()
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    scheduler.step()
                    quant_outputs.append(quant_output.detach().cpu())
            cur_lr = optimizer.state_dict()['param_groups'][0]['lr']
            logger.info(f"layer {i} iter {epoch}, lr {cur_lr:.8f}, mse: {mse:.8f}" )
        if args.quant_by_quant:
            quant_layer_input = torch.cat(quant_outputs, dim=0)
            float_layer_input = quant_layer_input
        else:
            quant_layer_input = fp_outs
            float_layer_input = fp_outs
        layer.cpu()
        for name, param in layer.named_parameters():
            param.requires_grad = False
            if name in dtype_dict:
                param.data = param.to(dtype_dict[name])
        empty_cache()
    if args.save_path:
        # 确保保存路径的目录存在
        save_dir = os.path.dirname(args.save_path)
        if save_dir and not os.path.exists(save_dir):
            os.makedirs(save_dir, exist_ok=True)
        save_model(model, args.save_path)
    quantizer.to_eval_mode()
    if hasattr(model, "hf_device_map"):
        dispatch_model(model, model.hf_device_map)

    
    empty_cache()
    if args.lm_eval:
        eval_model(model, tokenizer, args.lm_eval_batch_size, args.tasks, logger)


if __name__ == '__main__':
    main()