import argparse
import copy
import numpy as np
import torch
from transformers import AutoModelForCausalLM
from transformers.models.auto.configuration_auto import AutoConfig
from transformers.models.auto.tokenization_auto import AutoTokenizer
import time
import sacrebleu
from pathlib import Path

import torch.profiler

from llm_sparsity.utils.modify_attention import modify_attention


def set_seed(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

def run(argslist=None,inputText=None):
    parser = argparse.ArgumentParser()

    parser.add_argument("--model_name", type=str, default='huggyllama/llama-13b')
    parser.add_argument("--cache_dir", type=str, default='/NVME1/cxl/hf_cache/')
    # parser.add_argument("--model_name", type=str, default='../model/opt-125m')
    # parser.add_argument("--cache_dir", type=str, default='../model/opt-125m')
    parser.add_argument("--heavy_ratio", type=float, default=0.1)
    parser.add_argument("--recent_ratio", type=float, default=0.1)

    parser.add_argument("--stride_step", type=int, default=1)
    parser.add_argument("--mode", type=str, default="full")
    parser.add_argument("--length", type=int, default=64)

    parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
    parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
    parser.add_argument(
        "--fp16",
        action="store_true",
        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
    )
    args = parser.parse_args(argslist)
    #change
    # args.model_name = argslist['model_name']
    # args.cache_dir = argslist['cache_dir']
    # args.heavy_ratio = argslist['heavyratio']
    # args.recent_ratio = argslist['recentratio']
    # args.length = argslist['length']
    # args.fp16=argslist['fp16']
    # args.model_name = str(Path(args.model_name).resolve())
    # args.cache_dir = str(Path(args.cache_dir).resolve())

    args.device = torch.device("cuda:0" if torch.cuda.is_available() and not args.no_cuda else "cpu")
    args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()

    set_seed(args)

    prompt=inputText
    tokenizer = AutoTokenizer.from_pretrained(args.model_name, use_fast=True, cache_dir=args.cache_dir)

    # w.o. optimization   
    # 模型推理前
    start_mem = torch.cuda.memory_allocated(args.device)
    start_max_mem = torch.cuda.max_memory_allocated(args.device)

    model = AutoModelForCausalLM.from_pretrained(args.model_name, cache_dir=args.cache_dir, attn_implementation="eager")
    print(model.config)
    model.half().eval().to(args.device)
    input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids.to(model.device)
    print(input_ids.shape)
    start_time = time.time()
    generate_ids = model.generate(input_ids, max_new_tokens=args.length)
    print(generate_ids.shape)
    result = tokenizer.decode(generate_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
    end_time = time.time()
    latency = end_time - start_time

    # 模型推理后
    end_mem = torch.cuda.memory_allocated(args.device)
    end_max_mem = torch.cuda.max_memory_allocated(args.device)
    torch.cuda.reset_peak_memory_stats(args.device)
    torch.cuda.empty_cache()

    print(f"result: {result}")
    print(f"latency: {latency}s")
    print(f"推理前显存：{start_mem / 1024**2:.2f} MB")
    print(f"推理后显存：{end_mem / 1024**2:.2f} MB")
    print(f"推理过程峰值显存：{end_max_mem / 1024**2:.2f} MB")

    # calculate full cache memory footprint
    bsz = input_ids.shape[0]
    num_head = model.config.num_attention_heads
    length = generate_ids.shape[1]
    head_dim = model.config.head_dim
    full_cache_memory = bsz * num_head * length * head_dim * len(model.model.layers) * 2 * 2  # 2 bytes(fp16) and K + V
    # print(bsz, num_head, length, head_dim, len(model.model.layers))
    print(f"full cache memory footprint: {full_cache_memory / 1024**2:.2f}")
    # input()

    res_ori = (result, latency, full_cache_memory / 1024**2, 100)

    # w. optimization
    # 模型推理前
    start_mem = torch.cuda.memory_allocated(args.device)
    start_max_mem = torch.cuda.max_memory_allocated(args.device)
    checkpoint = copy.deepcopy(model.state_dict())
    model = modify_attention(model, args.heavy_ratio, args.recent_ratio, args.stride_step, args.mode)
    model.load_state_dict(checkpoint)
    model.half().eval().to(args.device)

    input_ids = tokenizer(prompt, add_special_tokens=False, return_tensors='pt').input_ids.to(model.device)
    start_time = time.time()
    generate_ids = model.generate(input_ids, max_new_tokens=args.length)
    opt_result = tokenizer.decode(generate_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
    end_time = time.time()
    latency = end_time - start_time

    # 模型推理后
    end_mem = torch.cuda.memory_allocated(args.device)
    opt_end_max_mem = torch.cuda.max_memory_allocated(args.device) - start_mem
    torch.cuda.reset_peak_memory_stats(args.device)
    torch.cuda.empty_cache()

    print(f"result: {opt_result}")
    print(f"latency: {latency}s")
    print(f"推理前显存：{start_mem / 1024**2:.2f} MB")
    print(f"推理后显存：{end_mem / 1024**2:.2f} MB")
    print(f"推理过程峰值显存：{opt_end_max_mem / 1024**2:.2f} MB")

    # calculate optimized kv cache memory footprint
    cache_budget = 0
    if args.mode == "heavy":
        cache_budget = input_ids.shape[1] * args.heavy_ratio + cache_budget * args.recent_ratio
    if args.mode == "recent":
        cache_budget = input_ids.shape[1] * args.recent_ratio
    if args.mode == "stride":
        cache_budget = generate_ids.shape[1] // args.stride_step

    opt_cache_memory = bsz * num_head * cache_budget * head_dim * len(model.model.layers) * 2 * 2  # 2 bytes(fp16) and K + V
    print(f"optimized cache memory footprint: {opt_cache_memory / 1024**2:.2f}")
    # print((result, opt_result))
    res_opt = (result, latency, opt_cache_memory / 1024**2, sacrebleu.corpus_bleu([opt_result], [[result]]).score)
    # print(res_opt)
    # input()

    return res_ori, res_opt

if __name__ == '__main__':
    run()
