import os
import json
from dotenv import load_dotenv
from openai import OpenAI
from config import my_model, my_messages, my_data_weather,my_system_prompt_weather_tips, my_travel_input_info, my_system_prompt_travel_tips

load_dotenv()

client = OpenAI(
    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
    api_key=os.getenv("DASHSCOPE_API_KEY", default="sk-xxx"),
    base_url=os.getenv("DASHSCOPE_BASE_URL", default="https://dashscope.aliyuncs.com/compatible-mode/v1"),
)

def llm_gen(messages:list[dict]=my_messages, model:str=my_model):
    completion = client.chat.completions.create(
        model = model,
        messages = messages,
        # Qwen3模型通过enable_thinking参数控制思考过程（开源版默认True，商业版默认False）
        # 使用Qwen3开源版模型时，若未启用流式输出，请将下行取消注释，否则会报错
        extra_body = {"enable_thinking": False},
    )
    result_json = completion.model_dump_json()
    # print(result_json)
    return result_json

def llm_gen_weather_tips(data_weather:dict=my_data_weather, system_prompt_weather_tips:str=my_system_prompt_weather_tips)->dict:
    weather_messages = [
        {"role": "system", "content": system_prompt_weather_tips},
        {"role": "user", "content": str(data_weather)},
    ]
    result_json = llm_gen(messages=weather_messages)
    result_str =  json.loads(result_json)['choices'][0]['message']['content']
    result_dict = json.loads(result_str)
    print(result_dict)
    return result_dict

def llm_gen_travel_tips(travel_input_info:dict=my_travel_input_info, system_prompt_travel_tips:str=my_system_prompt_travel_tips):
    travel_messages = [
        {"role": "system", "content": system_prompt_travel_tips},
        {"role": "user", "content": str(travel_input_info)},
    ]
    result_json = llm_gen(messages=travel_messages)
    result_str =  json.loads(result_json)['choices'][0]['message']['content']
    result_dict = json.loads(result_str)
    print(result_dict)
    return result_dict


if __name__ == "__main__":
   llm_gen_travel_tips()
#    llm_gen_weather_tips()
#    llm_gen()

