import transformers
import torch
import json
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    LlamaForCausalLM,
    pipeline,
    TextStreamer,
)
import torch_xla
import torch_xla.core.xla_model as xm
import time
import argparse


def main(prompt, model_path, max_tokens, max_length, layer):
    device = xm.xla_device()
    # model = AutoModelForCausalLM.from_pretrained(model_path,
    #                                              device_map='auto',
    #                                              offload_folder='./ModelFiles/off_load')
    model = LlamaForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True)
    model.model.layers = model.model.layers[0:layer]
    model.eval()
    print("### model ###", model)
    model = model.half()
    # print(model.config.input_shape)
    print("### model to device ###")
    model.to(device)

    print("### model compile ###")
    #    model = torch.compile(model, backend='openxla')
    tokenizer = AutoTokenizer.from_pretrained(model_path, padding_side="left")

    # system_prompt = 'You are a helpful aasistant that provides accurate and concise response'

    #    B_INST, E_INST = '### i:', '### can:'
    B_INST, E_INST = "### Human:", "### Assistant:"
    prompt = f"{B_INST} {prompt.strip()} {E_INST} \n\n"
    # inputs = tokenizer([prompt], return_tensors='pt').to(device)
    tokenizer.pad_token = tokenizer.eos_token  # '\0'
    tokenizer.pad_to_max_length = True
    inputs = tokenizer(
        [prompt], padding="max_length", max_length=max_length, return_tensors="pt"
    ).to("cpu")
    ids_valid_length = inputs["attention_mask"].sum().item()
    print("ids_valid_length:", ids_valid_length)
    # inputs = tokenizer([prompt],padding="max_length",truncation=True,max_length=30,return_tensors='pt').to(device)
    print("inputs is : ", inputs)
    # return
    if "token_type_ids" in inputs:
        del inputs["token_type_ids"]
    streamer = TextStreamer(tokenizer)
    start_time = time.time()
    # result = model.generate(**inputs,  max_new_tokens=1)
    past_key_values_ori = ()
    for i in range(layer):
        # past_key_values = (torch.randn((1,32,128,128),dtype=torch.float16).to(device),torch.randn((1,32,128,128),dtype=torch.float16).to(device))
        past_key_values = (
            torch.zeros((1, 32, 128, 128), dtype=torch.float16).to(device),
            torch.zeros((1, 32, 128, 128), dtype=torch.float16).to(device),
        )
        past_key_values_ori += (past_key_values,)
        # print("past_key_values:",past_key_values)
    ori_attention_mask = inputs["attention_mask"]
    ori_attention_mask = ori_attention_mask.fill_(0)
    print("ori_attention_mask:", ori_attention_mask)
    ori_input_ids = inputs["input_ids"]
    ori_input_ids = torch.roll(ori_input_ids, shifts=ids_valid_length)
    # ori_input_ids = torch.roll(ori_input_ids,shifts=-1)
    print("ori_input_ids:", ori_input_ids)
    result = (ori_input_ids, past_key_values_ori)
    xm.mark_step()
    position_ids = 0
    with torch.no_grad():
        for i in range(max_tokens):
            mask = ori_attention_mask
            mask[:, -1 - position_ids] = 1
            mask = mask.to(device)
            ori_input_ids = torch.roll(ori_input_ids, shifts=-1)
            next_token = result[0].to("cpu")
            next_token = next_token[:, -1]

            if position_ids > (ids_valid_length - 1):
                ori_input_ids[:, -1] = next_token
                inputs_ids = ori_input_ids
            else:
                inputs_ids = ori_input_ids
            inputs_ids = inputs_ids.to(device)
            out_past_key_values = result[1]
            past_key_values = ()
            for kv_values in out_past_key_values:
                kv_tmp = ()
                for value in kv_values:
                    value = value.to("cpu")
                    # print("value:",value.shape,value)
                    value = value[:, :, 1:, :]
                    value = value.to(device)
                    # print("past_key_values:",value.shape)
                    kv_tmp += (value,)
                past_key_values += (kv_tmp,)

            inputs = {
                "input_ids": inputs_ids,
                "attention_mask": mask,
                "past_key_values": past_key_values,
                "use_cache": True,
            }
            # print("inputs is 1: ", inputs)
            # xm.mark_step()
            result = model.generate(**inputs, max_new_tokens=1)
            xm.mark_step()
            # print("result is :",result)
            # print("result1 is:",result[0])
            position_ids = position_ids + 1

    end_time = time.time()
    print("result is ", inputs_ids)
    print("result is ", result[0])
    # past_key_values = result[1]
    # for kv_pasts in past_key_values:
    #     for kv_past in kv_pasts:
    #         print("kv_past is ",kv_past.shape,kv_past)
    outputs = tokenizer.batch_decode(result[0], skip_special_tokens=True)
    # end_time1 = time.time()
    print("result is ", outputs)
    # elapsed_time = end_time - start_time  # 计算耗时
    # elapsed_time1 = end_time1 - end_time
    # print(f"The elapsed time is {elapsed_time,elapsed_time1} seconds.")


def get_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("-t", type=int, help=f"max token nums.", default=15)
    parser.add_argument("-i", help=f"input string.", default="hello")
    parser.add_argument("-p", type=int, help=f"padding max length.", default=128)
    parser.add_argument("-m", help=f"model path.", default="../ModelFiles/llama2-7b")
    parser.add_argument("-l", type=int, help=f"layer.", default=1)
    args = parser.parse_args()
    print(args)
    return args


if __name__ == "__main__":
    args = get_args()
    prompt = args.i
    MODELPATH = args.m
    # prompt = 'Life is a movie beacuse'
    # prompt = "hello"
    main(prompt, MODELPATH, args.t, args.p, args.l)
