import torch
import torch_npu
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
import random
import sys
                                                                                                                 
def new_isin(elements, test_elements, *, assume_unique=False, invert=False, out=None):
    # 检查输入类型
    if not isinstance(elements, torch.Tensor) or not isinstance(test_elements, torch.Tensor):
        raise TypeError("Both elements and test_elements must be torch tensors.")
    
    # 确保输入张量在 NPU 上
    elements = elements.to('npu')
    test_elements = test_elements.to('npu')
    
    # 如果 assume_unique 为 True，确保 test_elements 是唯一的
    if assume_unique:
        test_elements = test_elements.unique()
    
    # 扁平化 test_elements 并展开为一维张量
    flat_test_elements = test_elements.flatten()
    
    # 使用广播机制逐元素检查
    result = (elements.unsqueeze(-1) == flat_test_elements).any(dim=-1)
                                                                                                                 
    # 根据 invert 参数反转结果
    if invert:
        result = ~result
    
    # 如果指定了 out 参数，赋值给 out
    if out is not None:
        if out.shape != result.shape:
            raise ValueError("Output tensor must have the same shape as the result.")
        out.copy_(result)
        return out
    
    return result
                                                                                                                 
def verify_isin():
    num = random.randint(1, 5)
    tensor1 = torch.randint(low=0, high=10, size=(num,num))
    tensor2 = torch.randint(low=0, high=10, size=(num,num))
    old_isin_out = torch.isin(tensor1, tensor2).to("npu")
    new_isin_out = new_isin(tensor1, tensor2)
    print("torch.isin output:",old_isin_out)
    print("new_isin output:",new_isin_out)
    are_equal = torch.equal(old_isin_out, new_isin_out)
    print(f"outputs is equal? {are_equal}")
    return are_equal
                                                                                                                 
                                                                                                                 
# 清理输出文本的方法
def clean_output(output):
    last_punctuation = max(output.rfind('.'), output.rfind('!'))
    if last_punctuation != -1:
        return output[:last_punctuation + 1]
    else:
        return output
                                                                                                                 
                                                                                                                 
if __name__ == "__main__":
    
    if torch.npu.is_available():
        print("NPU is available.")
        torch.npu.set_device(0)
        device = torch.device('npu')
    else:
        print("NPU is not available.")
        sys.exit()
    
    isin_are_equal = verify_isin()
    if isin_are_equal:
        print("torch.isin replaced")
        print(f"old isin:{torch.isin = }")
        torch.isin = new_isin
        print(f"new isin:{torch.isin = }")
    else:
        print("new_isin cannot replace torch.isin")
        sys.exit()
                                                                                                                 
                                                                                                                 
    # 模型名称
    model_name = "deepseek-ai/DeepSeek-V2-Lite"
    
    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code = True)
    
    # 加载模型到昇腾NPU上
    model = AutoModelForCausalLM.from_pretrained(
        model_name, 
        trust_remote_code = True,
        device_map="sequential",
        torch_dtype=torch.bfloat16,
        max_memory={i: "75GB" for i in range(8)},
        attn_implementation="eager"
        )
    # # 打印模型配置信息
    # print(model.config)
    print(f"Model is on device: {next(model.parameters()).device}")
                                                                                                                 
    # 设置生成配置
    model.generation_config = GenerationConfig.from_pretrained(model_name)
    model.generation_config.pad_token_id = model.generation_config.eos_token_id
                                                                                                                 
    # 输入文本
    text = "An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is"
    
    # 分词并转换为张量
    inputs = tokenizer(text, return_tensors="pt").to(device)
    print(f"Inputs are on device: {inputs['input_ids'].device}")
                                                                                                                     
    # 生成输出
    with torch.npu.device(device):
        outputs = model.generate(**inputs, max_new_tokens = 100, use_cache=True, )
    print(f"Outputs are on device: {outputs.device}")
    
    # 解码输出
    result = tokenizer.decode(outputs[0], skip_special_tokens=True)
    cleaned_result = clean_output(result)
    
    # 打印结果
    print("result:\n", cleaned_result)