try:
    import torch_npu
    npu_available = True
except:
    npu_available = False

from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM
from safetensors.torch import load_model
from accelerate import dispatch_model

from flat_quant.utils import args_utils
from flat_quant.processors.flat_quant import quantize_model
from flat_quant.utils.flat_linear import FakeQuantizedLinearConfig
from flat_quant.models.llama import LlamaStructureBridge
from flat_quant.utils.eval_utils import eval_model
from flat_quant.utils.utils import seed_everything

if __name__ == "__main__":
    args, logger = args_utils.parser_gen()
    seed_everything(args.seed)
    config = AutoConfig.from_pretrained(pretrained_model_name_or_path=args.model,
                                     local_files_only=True,
                                     trust_remote_code=True)
    config.num_hidden_layers = args.layers if args.layers > 0 else config.num_hidden_layers
    config._attn_implementation = "eager"
    # 初始化Llama模型
    tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=args.model,
                                              local_files_only=True,
                                              trust_remote_code=True,
                                              use_fast=True)

    model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=args.model,
                                                 local_files_only=True,
                                                 config=config,
                                                 trust_remote_code=True,
                                                 torch_dtype="auto",device_map="auto")
    model.eval()
    tokenizer.pad_token = tokenizer.eos_token

    if args.load_path:
        model_bridge = LlamaStructureBridge(model)
        model_bridge.analyze_structure()
        config = FakeQuantizedLinearConfig(w_bits=args.w_bits, 
                                       a_bits=args.a_bits, 
                                       w_asym=args.w_asym, 
                                       a_asym=args.a_asym, 
                                       lwc=args.lwc, 
                                       lac=args.lac, 
                                       a_groupsize=args.a_groupsize)
        flat_quant_visitor = quantize_model(model_bridge, 
                                            config, 
                                            diag_alpha=args.diag_alpha,
                                            diag_relu=args.diag_relu)
        model.tie_weights()
        load_model(model, args.load_path)
        model.cpu()
        flat_quant_visitor.to_eval_mode()
        if hasattr(model, "hf_device_map"):
            dispatch_model(model, model.hf_device_map)
        logger.info("Model loaded successfully.")
        logger.info(model)
    eval_model(model, tokenizer, args.lm_eval_batch_size, args.tasks, logger)
 