# from dotenv import load_dotenv
from fastapi import requests
from langchain_community.chat_models.tongyi import ChatTongyi

response = requests.get("http://127.0.0.1:8000/qa?text=拒保的关键有哪些", stream=True)


if response.status_code == 200:
    for chunk in response.iter_content(chunk_size=1024):
        # 在这里处理每个数据块（chunk）
        # 例如，将数据块写入文件或进行其他处理
        print(f"{chunk.decode('utf-8')}", end='')
else:
    print('请求失败:', response.status_code)
from langchain.prompts import ChatPromptTemplate
from langchain.llms import Tongyi
# load_dotenv()

import os
os.environ["OPENAI_API_KEY"] = "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"
os.environ["DASHSCOPE_API_KEY"] = "sk-3a2246e7cb7540878599202ad0cfc324"

# llm = ChatOpenAI(streaming=True,max_tokens=2048)

llm = ChatTongyi(streaming=True)


prompt = ChatPromptTemplate.from_messages(
    [("system", "你是一个专业的AI助手。"), ("human", "{query}")]
)

# llm_chain = prompt | llm.bind(model="chatglm3")  # bin的用法
llm_chain = prompt | llm

ret = llm_chain.stream({"query": "你是谁？"})
for token in ret:
    print(token.content,end="",flush=True)
print()

