from transformers import TextStreamer
from transformers import TextIteratorStreamer
from threading import Thread
from unsloth import FastLanguageModel
model, tokenizer = FastLanguageModel.from_pretrained(
    model_name = "./lora_model", # 加载训练后的LoRA模型
    max_seq_length = 2048,
    dtype = None,
    load_in_4bit = True,
)
FastLanguageModel.for_inference(model)
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{}
### Input:
{}
### Response:
{}"""


def response(request):
    _inputs = tokenizer(
        [
            alpaca_prompt.format(
                "请用中文回答",
                request,
                "",
            )
        ], return_tensors="pt").to("cuda")
    _streamer = TextIteratorStreamer(tokenizer=tokenizer)
    model.generate(**_inputs, streamer=_streamer, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
    _response = ""
    for _token in _streamer:
        _response = _response+_token
    _response = _response + "\n"
    return _response

alpaca_prompt_2 = """
### Request:
{}
{}
### Response:
{}"""

def response_2(request):
    _inputs = tokenizer(
        [
            alpaca_prompt_2.format(
                "请用中文回答",
                request,
                "",
            )
        ], return_tensors="pt").to("cuda")
    _streamer = TextIteratorStreamer(tokenizer=tokenizer)
    model.generate(**_inputs, streamer=_streamer, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
    _response = ""
    for _token in _streamer:
        _response = _response+_token
    _response = _response + "\n"
    return _response

alpaca_prompt_3 = """
### Request:
{}
{}
### Response:
{}"""

def response_3(request):
    _inputs = tokenizer(
        [
            alpaca_prompt_3.format(
                "请用中文回答",
                request,
                "",
            )
        ], return_tensors="pt").to("cuda")
    _streamer = TextIteratorStreamer(tokenizer=tokenizer)
    model.generate(**_inputs, streamer=_streamer, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
    _response = []
    for _token in _streamer:
        _response.append(_token)
    return _response


alpaca_prompt_4 = """
### Request:
{}
### Response:
{}"""

def response_4(request:str):
    _inputs = tokenizer(
        [
            alpaca_prompt_4.format(
                request,
                "",
            )
        ], return_tensors="pt").to("cuda")
    _streamer = TextIteratorStreamer(tokenizer=tokenizer)
    model.generate(**_inputs, streamer=_streamer, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
    _response = ""
    for _token in _streamer:
        _response += _token
    return _response[len(request):-1]

def response_5(request):
    _inputs = tokenizer(
        [
            alpaca_prompt.format(
                "请用中文回答",
                request,
                "",
            )
        ], return_tensors="pt").to("cuda")
    _streamer = TextIteratorStreamer(tokenizer=tokenizer)
    model.generate(**_inputs, streamer=_streamer, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
    _response = ""
    for _token in _streamer:
        _response = _response+_token
    _response = _response + "\n"
    return _response[len(alpaca_prompt_3)+len("请用中文回答")+len(request):-1]


def response_6(request):
    _text = alpaca_prompt.format(
            "请用中文回答",
            request,
            "",
        )
    _inputs = tokenizer([_text, ], return_tensors="pt").to("cuda")
    _streamer = TextIteratorStreamer(tokenizer=tokenizer)
    model.generate(**_inputs, streamer=_streamer, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
    _response = ""
    for _token in _streamer:
        _response = _response+_token
    _response = _response + "\n"
    return _response[len(_text):-1-len("<|end_of_text|>")]


# def response_7(request):
#     _alpaca_prompt = """
#         ### Instruction:
#         {}
#         ### Input:
#         {}
#         ### Response:
#         {}"""
#     _text = _alpaca_prompt.format(
#             "请用中文回答",
#             request,
#             "",
#         )
#     _inputs = tokenizer([_text, ], return_tensors="pt").to("cuda")
#     _streamer = TextIteratorStreamer(tokenizer=tokenizer)
#     model.generate(**_inputs, streamer=_streamer, max_new_tokens=128, pad_token_id=tokenizer.eos_token_id)
#     _response = ""
#     for _token in _streamer:
#         _response = _response+_token
#     _response = _response + "\n"
#     return _response[len(_text):-1-len("<|end_of_text|>")]

if __name__ == '__main__':
    # print(response('太阳为什么是红的'))
    # print(response_2('太阳为什么是黑的'))
    # print(response_3('我为什么那么帅'))
    # print(response_4('60多的老头骑电动三轮，喝了酒吹出个22，就罚款吗？'))
    # print(response_5('60多的老头骑电动三轮，喝了酒吹出个22，就罚款吗？'))
    # print(response_6('99多的老头骑电动三轮，喝了酒吹出个22，就罚款吗？'))
    # print("\n666666\n")
    print(response_6('60多的老头骑电动三轮，喝了酒吹出个22，就罚款吗？'))
    print("\n666666\n")
    # print(response_7('60多的老头骑电动三轮，喝了酒吹出个22，就罚款吗？'))
    # print("\n666666\n")
    # print(response_7('8u们怎么评价外卖空包这个事？'))