from typing import Optional, Any

import torch
from transformers import AutoTokenizer, AutoModel, AutoModelForCausalLM, BitsAndBytesConfig, TextIteratorStreamer
from langchain.llms.base import LLM
from langchain_core.prompts import ChatPromptTemplate
from langchain.schema.runnable import RunnablePassthrough
from threading import Thread
import langchain_core.messages


def get_messages(messages):
    result = []
    for item in messages:
        print(item)
        print(type(item))
        if isinstance(item, langchain_core.messages.system.SystemMessage):
            print(1)
            result.append({"role": "system", "content": item.content})
        elif isinstance(item, langchain_core.messages.human.HumanMessage):
            print(2)
            result.append({"role": "user", "content": item.content})
    return result


torch.cuda.empty_cache()

if __name__ == '__main__':
    # res2 = mod.invoke("你是谁？")
    # print(res2)
    # res3 = mod.invoke("西安的小吃")
    # print(res3)
    # res4 = mod.invoke("你刚刚说的是哪的小吃")
    # print(res4)
    # for res in mod.stream_chat("你好"): pass
    # for res in mod.stream_chat("西安的小吃"): pass
    # for res in mod.stream_chat("你刚刚说的是哪的小吃"): pass

    # 提示词，模板函数
    prompt = ChatPromptTemplate.from_messages([
        ("system", "你是一个面馆点餐机器人"),
        ("user", "{input}")
    ])

    res = get_messages(prompt)
    print(res)
