from langchain_community.llms import Tongyi
from langchain.prompts import PromptTemplate
from dotenv import load_dotenv
import os
import dashscope
from langchain.memory import ConversationSummaryBufferMemory
from langchain.chains import ConversationChain
from config import Tongyi_API_KEY
from ini_prompt import user_template_text
from get_dy_hot import main as get_hot_data

# 设置 dashscope API 密钥
dashscope.api_key = Tongyi_API_KEY




# 初始化通义大模型
def init_model():
    """初始化通义大模型
    返回: Tongyi LLM实例
    """
    model = Tongyi(
        model_name="qwen-turbo",  # 使用通义千问-turbo模型
        dashscope_api_key=Tongyi_API_KEY, 
        temperature=0.7  # 控制输出的随机性
    )
    return model



def generate_topics(model):
    today_hot, upping_hot = get_hot_data()
    prompt = PromptTemplate(
        input_variables=["today_hot", "upping_hot"],
        template=user_template_text
    )
    chain = prompt | model
    response = chain.invoke({"today_hot": today_hot, "upping_hot": upping_hot})
    return response

def main():
    model = init_model()
    response = generate_topics(model)
    return response

if __name__ == '__main__':
    model = init_model()
    response = generate_topics(model)
    print(response)


