"""
Author: LYh
Description: OK
"""
import argparse
import time

def get_model(arch='pytorch', device=None):
    """
    选择模型架构和运行设备
    Args:
        arch: 模型的架构"th"即"torch"或"pd"即"paddle"，默认torch
        device: 模型在cpu，npu还是cuda设备运行，默认自动旋转torch设备
    Returns:
        arch, device（都是str类型）
    """
    parser = argparse.ArgumentParser(description='选择模型架构和运行设备')
    parser.add_argument('--arch', type=str, help='模型的架构"th"即"torch"或"pd"即"paddle"')
    parser.add_argument('--device', type=str, help='模型在cpu，npu还是cuda设备运行')

    args = parser.parse_args()

    arch_dict = {
        "th": "pytorch",
        "torch": "pytorch",
        "pytorch": "pytorch",
        "pd": "paddle",
        "paddle": "paddle",
        "paddlepaddle": "paddle"
    }
    arch = args.arch or arch
    if arch in arch_dict:
        arch = arch_dict[arch]
    else:
        raise ValueError(f"{arch} 不属于支持的模型架构（pytorch 或 paddle）")

    device_list = ['cpu', 'npu', 'cuda']
    device = args.device or device
    if device is None: # 自动选择
        import torch
        if torch.cuda.is_available():
            device = 'cuda'
        else:
            try:
                import torch_npu
                device = 'npu' if torch_npu.npu.is_available() else 'cpu'
            except (AttributeError, ModuleNotFoundError):
                device = 'cpu'
    elif device not in device_list:
        raise ValueError(f"{device} 不在支持的设备中（cpu, npu, cuda）")
    print(f'当前模型架构为：{arch}，在{device}上进行推理')

    return arch, device

def preprocess(text):
  text = text.replace("\n", "\\n").replace("\t", "\\t")
  return text

def postprocess(text):
    """

    Args:
        text:

    Returns:

    """
    return text.replace("\\n", "\n").replace("\\t", "\t").replace('%20','  ')

def answer(tokenizer, model, text, sample=True, top_p=0.9, temperature=0.9, context = "", device='cpu'):
    """
    Args:
        temperature: 控制生成过程中的多样性；
        sample: 是否进行采样，而不是总是选择最高概率的 token；
        top_p: 0-1之间，仅在采样时使用，限制累积概率大于给定概率 p 的 token 作为采样的候选；
    """
    text = f"{context}\n用户：{text}\n小元："
    text = text.strip()
    text = preprocess(text)
    if device != 'cpu':
        encoding = tokenizer(text=[text], truncation=True, padding=True, max_length=1024, return_tensors="pt").to(device)
    else:
        encoding = tokenizer(text=[text], truncation=True, padding=True, max_length=1024, return_tensors="pt")

    start_time = time.time()
    if not sample:
        out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_new_tokens=256, num_beams=1, length_penalty=0.6)
    else:
        out = model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_new_tokens=256, do_sample=True, top_p=top_p, temperature=temperature, no_repeat_ngram_size=12)

    # 记录结束时间
    end_time = time.time()

    out_text = tokenizer.batch_decode(out["sequences"], skip_special_tokens=True)

    # 计算 token 数
    tokens = tokenizer.tokenize(out_text[0])
    num_tokens = len(tokens)

    # 计算每秒生成的 token 数
    elapsed_time = end_time - start_time
    tokens_per_second = num_tokens / elapsed_time

    print(f"Generated {num_tokens} tokens in {elapsed_time:.2f} seconds. {tokens_per_second:.2f}token/s")

    return postprocess(out_text[0])