import os
from sshtunnel import SSHTunnelForwarder
from langchain_ollama import ChatOllama
from dotenv import load_dotenv

load_dotenv()  # 从 .env 文件加载环境变量

ssh_password = os.getenv("SSH_PASSWORD")
if not ssh_password:
    raise EnvironmentError("请设置环境变量 SSH_PASSWORD")

with SSHTunnelForwarder(
    ssh_address_or_host=("120.55.54.59", 2222),
    ssh_username="wch",
    ssh_password=ssh_password,
    remote_bind_address=("localhost", 11434),  # Ollama 默认运行在远程主机的 11434 端口
    local_bind_address=("localhost", 11435)     # 映射到本地的 11434 端口
) as tunnel:
    print("SSH 隧道已建立，正在通过本地端口访问远程 Ollama 服务...")

    llm = ChatOllama(
        model="qwen3:1.7b",
        base_url="http://localhost:11434"  # 连接本地映射端口
    )
    stream_result = llm.stream(" ollama 有支持minimax语音模型么？")

    for chunk in stream_result:
        print(chunk.content, end="", flush=True)
