import transformers
import torch
import json
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    LlamaForCausalLM,
    pipeline,
    TextStreamer,
)
import torch_xla
import torch_xla.core.xla_model as xm
import time

MODELPATH = "./ModelFiles/llama2-7b"


def main(prompt, model_path):
    device = xm.xla_device()
    # model = AutoModelForCausalLM.from_pretrained(model_path,
    #                                              device_map='auto',
    #                                              offload_folder='./ModelFiles/off_load')
    model = LlamaForCausalLM.from_pretrained("./ModelFiles/llama2-7b_1")
    # model.model.layers = model.model.layers[0:1]
    model.eval()
    print("### model ###", model)
    model = model.half()
    # print(model.config.input_shape)
    print("### model to device ###")
    model.to(device)
    print("### model compile ###")
    #    model = torch.compile(model, backend='openxla')
    tokenizer = AutoTokenizer.from_pretrained(model_path)

    # system_prompt = 'You are a helpful aasistant that provides accurate and concise response'

    #    B_INST, E_INST = '### i:', '### can:'
    B_INST, E_INST = "### Human:", "### Assistant:"
    prompt = f"{B_INST} {prompt.strip()} {E_INST} \n\n"
    # inputs = tokenizer([prompt], return_tensors='pt').to(device)

    # print("tokenizer.eos_token",tokenizer.eos_token)
    # tokenizer.add_special_tokens({'pad_token': 0})

    tokenizer.pad_token = "\0"
    tokenizer.pad_to_max_length = True
    inputs = tokenizer(
        [prompt], padding="max_length", max_length=128, return_tensors="pt"
    ).to(device)

    # inputs = tokenizer([prompt], padding="max_length", max_length=128,return_tensors='pt').to(device)
    # inputs = tokenizer([prompt], return_tensors='pt').to(device)
    print("intpus is : ", inputs)
    del inputs["token_type_ids"]
    # streamer = TextStreamer(tokenizer)
    # result = model.generate(**inputs,   max_new_tokens=1)
    # xm.mark_step()
    start_time = time.time()
    # result = model.generate(**inputs, streamer=streamer, max_new_tokens=3)
    # print("llama debug result",result)
    for i in range(5):
        # inputs = inputs.to(device)
        print("llama debug start token:", i)

        # mask = inputs['attention_mask']
        # mask = mask.to('cpu')
        # mask = torch.roll(mask, shifts=-1)
        # #print("mask:",mask)
        # mask[:, -1] = 1
        # #print("llama debug result:",result,result.shape)
        # inputs_ids = inputs['input_ids']
        # inputs_ids = inputs_ids.to('cpu')
        # inputs_ids = torch.roll(inputs_ids, shifts=-1)
        # inputs_ids[:, -1] = result
        # #print("llama debug inputs_ids:",inputs_ids,inputs_ids.shape)
        # inputs_ids = inputs_ids.to(device)
        # mask = mask.to(device)
        # inputs = model.prepare_inputs_for_generation(inputs_ids,attention_mask = mask,use_cache=True)
        # print("llama debug token:",i)

        result = model.generate(**inputs, max_new_tokens=1)
        mask = inputs["attention_mask"]
        mask = mask.to("cpu")
        mask = torch.roll(mask, shifts=-1)
        mask[:, -1] = 1
        # print("llama debug result:",result,result.shape)
        inputs_ids = inputs["input_ids"]
        inputs_ids = inputs_ids.to("cpu")
        inputs_ids = torch.roll(inputs_ids, shifts=-1)
        # inputs_ids[:, -1] = result[:, -1]
        inputs_ids[:, -1] = result
        # print("llama debug inputs_ids:",inputs_ids,inputs_ids.shape)
        inputs_ids = inputs_ids.to(device)
        mask = mask.to(device)
        inputs = model.prepare_inputs_for_generation(
            inputs_ids, attention_mask=mask, use_cache=True
        )
        print("llama debug token:", i)
    #     xm.mark_step()
    #     #print("intpus is : ", inputs)
    # result1 = model.generate(**inputs,  max_new_tokens=1)
    # print("result:",result1)

    end_time = time.time()
    print("result is ", result)
    print("llama debug get output")
    # outputs = tokenizer.batch_decode(inputs_ids, skip_special_tokens=True)
    print("result is ", inputs_ids)
    end_time1 = time.time()
    # print("result is ", outputs)
    elapsed_time = end_time - start_time  # 计算耗时
    elapsed_time1 = end_time1 - end_time
    print(f"The elapsed time is {elapsed_time,elapsed_time1} seconds.")


if __name__ == "__main__":
    # prompt = 'Life is a movie beacuse'
    prompt = "hello"
    main(prompt, MODELPATH)
