import requests
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
import time
import json
from multiprocessing import Process, Queue, cpu_count, Event
from typing import Optional, Dict
from dataclasses import dataclass
import logging

# --- 配置类 ---
@dataclass
class AppConfig:
    OLLAMA_ENDPOINT: str = "http://localhost:11434/api/generate"
    CHAT_MODEL: str = "gemma3:4b"  # 仅保留对话模型
    MAX_RETRIES: int = 3
    BACKOFF_FACTOR: float = 0.5
    TIMEOUT: int = 300
    CHUNK_DELAY: float = 0.03  # 控制输出流速度

# --- 日志配置 ---
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[logging.StreamHandler()]
)
logger = logging.getLogger(__name__)

# --- HTTP客户端配置 ---
class APIClient:
    def __init__(self, config: AppConfig):
        self.config = config
        self.session = requests.Session()

        # 配置重试策略
        retry_strategy = Retry(
            total=self.config.MAX_RETRIES,
            backoff_factor=self.config.BACKOFF_FACTOR,
            status_forcelist=[408, 429, 500, 502, 503, 504]
        )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        self.session.mount("http://", adapter)
        self.session.mount("https://", adapter)

    def post_request(self, json_data: Dict, stream: bool = False) -> Optional[requests.Response]:
        try:
            response = self.session.post(
                self.config.OLLAMA_ENDPOINT,
                json=json_data,
                stream=stream,
                timeout=self.config.TIMEOUT
            )
            response.raise_for_status()
            return response
        except requests.exceptions.RequestException as e:
            logger.error(f"API请求失败: {str(e)}")
            return None

# --- AI生成进程函数 ---
def ai_generation_worker(
        input_queue: Queue,
        output_queue: Queue,
        config: AppConfig,
        stop_event: Event
):
    client = APIClient(config)

    while not stop_event.is_set():
        try:
            if not input_queue.empty():
                prompt = input_queue.get_nowait()
                if prompt == "EXIT":
                    break

                response = client.post_request({
                    "model": config.CHAT_MODEL,
                    "prompt": prompt,
                    "stream": True
                }, stream=True)

                if response is None:
                    output_queue.put(("error", "AI服务不可用"))
                    output_queue.put(("end", ""))
                    continue

                full_response = []
                for chunk in response.iter_lines():
                    if chunk:
                        try:
                            data = json.loads(chunk.decode())
                            if data.get("done", False):
                                break
                            if "response" in data:
                                output_queue.put(("chunk", data["response"]))
                                full_response.append(data["response"])
                        except json.JSONDecodeError as e:
                            logger.warning(f"解析数据块失败: {str(e)}")
                            continue

                output_queue.put(("end", ""))

                if not full_response:
                    output_queue.put(("error", "AI无响应"))
            else:
                time.sleep(0.1)
        except Exception as e:
            logger.error(f"工作进程错误: {str(e)}")
            output_queue.put(("error", f"内部错误: {str(e)}"))
            output_queue.put(("end", ""))

# --- 主控进程 ---
def main():
    config = AppConfig()
    input_queue = Queue(maxsize=10)
    output_queue = Queue(maxsize=20)
    stop_event = Event()

    # 启动AI生成进程
    ai_process = Process(
        target=ai_generation_worker,
        args=(input_queue, output_queue, config, stop_event),
        daemon=True
    )
    ai_process.start()

    print(f"AI: 你好！(双进程模式运行中，CPU核心数: {cpu_count()})")
    print("输入'退出'结束对话")

    try:
        while True:
            user_input = input("\n你: ").strip()
            if user_input.lower() in ("退出", "exit"):
                input_queue.put("EXIT")
                print("AI: 再见！")
                break

            # 发送到对话AI
            input_queue.put(user_input)
            print("AI: ", end="", flush=True)

            # 处理AI响应流
            receiving = True
            while receiving:
                try:
                    if not output_queue.empty():
                        msg_type, content = output_queue.get_nowait()
                        if msg_type == "chunk":
                            print(content, end="", flush=True)
                            time.sleep(config.CHUNK_DELAY)
                        elif msg_type == "error":
                            print(f"\n[错误] {content}")
                            receiving = False
                        elif msg_type == "end":
                            receiving = False
                except Exception as e:
                    logger.warning(f"输出处理错误: {str(e)}")
                    receiving = False
            print()

    except KeyboardInterrupt:
        print("\nAI: 对话被中断")
    finally:
        stop_event.set()
        ai_process.join(timeout=2)
        if ai_process.is_alive():
            ai_process.terminate()
        logger.info("系统已关闭")

if __name__ == "__main__":
    main()