#!/usr/bin/env python3
import argparse
import requests
import json
import sys
import readline  # 启用命令行输入历史和自动补全

# 默认配置
OLLAMA_API_URL = "http://localhost:11434/api/generate"
DEFAULT_MODEL = "deepseek-coder-v2"
DEFAULT_TEMPERATURE = 0.3
DEFAULT_MAX_TOKENS = 512

def query_ollama(prompt, model=DEFAULT_MODEL, temperature=DEFAULT_TEMPERATURE, max_tokens=DEFAULT_MAX_TOKENS):
    """向 Ollama API 发送请求并获取 DeepSeek-Coder-V2 的响应"""
    payload = {
        "model": model,
        "prompt": prompt,
        "temperature": temperature,
        "max_tokens": max_tokens,
        "stream": False
    }
    try:
        response = requests.post(OLLAMA_API_URL, json=payload)
        response.raise_for_status()
        return json.loads(response.text).get("response", "No response from model")
    except requests.RequestException as e:
        print(f"Error communicating with Ollama: {e}", file=sys.stderr)
        sys.exit(1)

def interactive_mode(model, temperature, max_tokens):
    """交互模式，允许用户连续输入提示"""
    print("进入交互模式 (输入 '/quit' 退出, '/clear' 清空历史):")
    while True:
        try:
            prompt = input(">>> ")
            if prompt.lower() == "/quit":
                print("退出交互模式")
                break
            elif prompt.lower() == "/clear":
                readline.clear_history()
                print("输入历史已清空")
                continue
            elif not prompt.strip():
                continue
            response = query_ollama(prompt, model, temperature, max_tokens)
            print(response)
        except KeyboardInterrupt:
            print("\n退出交互模式")
            break
        except EOFError:
            print("\n退出交互模式")
            break

def main():
    """主函数，解析命令行参数并执行"""
    parser = argparse.ArgumentParser(
        description="DeepSeek-Coder-V2 CLI: 与本地 DeepSeek-Coder-V2 模型交互",
        epilog="示例: deepseek_cli.py -p '写一个 Python 快速排序' 或 deepseek_cli.py -i"
    )
    parser.add_argument("-p", "--prompt", help="单次查询的提示文本")
    parser.add_argument("-i", "--interactive", action="store_true", help="进入交互模式")
    parser.add_argument("-m", "--model", default=DEFAULT_MODEL, help=f"模型名称 (默认: {DEFAULT_MODEL})")
    parser.add_argument("-t", "--temperature", type=float, default=DEFAULT_TEMPERATURE, help=f"生成温度 (默认: {DEFAULT_TEMPERATURE})")
    parser.add_argument("-n", "--max-tokens", type=int, default=DEFAULT_MAX_TOKENS, help=f"最大生成令牌数 (默认: {DEFAULT_MAX_TOKENS})")
    
    args = parser.parse_args()

    if args.interactive:
        interactive_mode(args.model, args.temperature, args.max_tokens)
    elif args.prompt:
        response = query_ollama(args.prompt, args.model, args.temperature, args.max_tokens)
        print(response)
    else:
        parser.print_help()
        sys.exit(1)

if __name__ == "__main__":
    main()
