import os

from langchain_community.chat_models.tongyi import ChatTongyi
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.llms.tongyi import Tongyi

os.environ["http-proxy"] = "http://127.0.0.1:7890"
os.environ["https-proxy"] = "http://127.0.0.1:7890"

os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_API_KEY"] = "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"

os.environ["TAVILY_API_KEY"] = "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"

# os.environ["DEEPSEEK_API_KEY"] = "sk-03b2117e54a74ac0919bf4060a36a99f"

# 模型
model = ChatTongyi(
    model_name="qwen-max",  # 可选：qwen-plus、qwen-max、qwen-omni-turbo、qwen2.5-omni-7b
    temperature=0.7,  # 控制生成多样性
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    streaming=True,  # 启用流式输出
)

# model = Tongyi(
#     model="qwen-omni-turbo",
#     api_key=os.getenv("DASHSCOPE_API_KEY"),
#     streaming=True  # 启用流式输出
# )

embedding = DashScopeEmbeddings(
    model="text-embedding-v1",  # 阿里云 DashScope 提供的 Embedding 模型
    dashscope_api_key=os.getenv("DASHSCOPE_API_KEY"),
)
