import requests
from typing import Dict, Any, Optional, List, Union, Iterator
import json
import time
from config import MEDAI_CONFIG
from llm.model_interface import ModelInterface
from utils.model_logger import ModelLogger
from .system_prompt_store import system_prompt_store

class MedAIClient(ModelInterface):
    def __init__(self, config: Optional[Dict[str, Any]] = None):
        super().__init__()
        self.config = config or MEDAI_CONFIG
        self.base_url = self.config['base_url']
        self.api_key = self.config['api_key']
        self.app_id = self.config['app_id']
        self.default_model = self.config.get('default_model', None)
        self.timeout = self.config.get('timeout', 120)
        self.temperature = self.config.get('temperature', 0.2)
        self.logger = ModelLogger()

    def _get_headers(self, system_prompt_id: Optional[str] = None) -> Dict[str, str]:
        headers = {
            "Api-Key": self.api_key,
            "App-Id": self.app_id,
            "Content-Type": "application/json"
        }
        if system_prompt_id:
            headers["Extras"] = f"system_prompt_id={system_prompt_id}"
        return headers

    def _prepare_payload(self, messages: List[Dict[str, str]], temperature: Optional[float] = None, is_stream: bool = False, **kwargs) -> Dict[str, Any]:
        payload = {
            "messages": messages,
            "isStream": is_stream,
            "temperature": temperature if temperature is not None else self.temperature
        }
        payload.update(kwargs)
        return payload

    def generate(self, prompt: str, model: Optional[str] = None, max_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, stream: bool = False, **kwargs) -> Union[str, Iterator[str]]:
        # 用于兼容接口，直接走 chat
        messages = [{"role": "user", "content": prompt}]
        return self.chat(messages, model=model, max_tokens=max_tokens, temperature=temperature, top_p=top_p, stream=stream, **kwargs)

    def chat(self, messages: List[Dict[str, str]], model: Optional[str] = None, max_tokens: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, stream: bool = False, **kwargs) -> Union[str, Iterator[str]]:
        # 从messages中提取system_prompt（role为system的内容），并移除
        system_prompt = None
        new_messages = []
        for msg in messages:
            if msg.get("role") == "system" and system_prompt is None:
                system_prompt = msg.get("content", "")
            else:
                new_messages.append(msg)
        # 如果有system_prompt，生成唯一id并存储到全局store
        system_prompt_id = None
        if system_prompt:
            system_prompt_id = system_prompt_store.save(system_prompt)
        url = self.base_url
        payload = self._prepare_payload(new_messages, temperature=temperature, is_stream=stream, **kwargs)
        headers = self._get_headers(system_prompt_id)
        start_time = time.time()
        try:
            if stream:
                return self._stream_chat(url, payload, headers)
            else:
                response = requests.post(url, json=payload, headers=headers, timeout=self.timeout)
                response.raise_for_status()
                # 只取 thinkType==2 的 result 拼接
                results = []
                for line in response.text.splitlines():
                    if not line or not line.startswith("data:"):
                        continue
                    data = line[5:]
                    if data == "[DONE]":
                        break
                    try:
                        obj = json.loads(data)
                        if obj.get("thinkType") == 2:
                            results.append(obj.get("result", ""))
                    except Exception:
                        continue
                response_text = "".join(results)
                duration = time.time() - start_time
                self.logger.log_model_call(
                    call_type='chat',
                    request=payload,
                    response=response_text,
                    duration=duration
                )
                return response_text
        except Exception as e:
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='chat',
                request=payload,
                error=str(e),
                duration=duration
            )
            raise Exception(f"ModelService API 调用失败: {str(e)}")

    def _stream_chat(self, url: str, payload: Dict[str, Any], headers: Dict[str, str]) -> Iterator[str]:
        start_time = time.time()
        full_response = ""
        try:
            response = requests.post(url, json=payload, headers=headers, stream=True, timeout=self.timeout)
            response.raise_for_status()
            for line in response.iter_lines():
                if not line:
                    continue
                try:
                    line = line.decode('utf-8')
                    if not line.startswith("data:"):
                        continue
                    data = line[5:]
                    if data == "[DONE]":
                        break
                    obj = json.loads(data)
                    if obj.get("thinkType") == 2:
                        text = obj.get("result", "")
                        full_response += text
                        yield text
                except Exception:
                    continue
        except Exception as e:
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='chat',
                request=payload,
                error=str(e),
                duration=duration
            )
            raise Exception(f"ModelService 流式对话失败: {str(e)}")
        finally:
            duration = time.time() - start_time
            self.logger.log_model_call(
                call_type='chat',
                request=payload,
                response=full_response,
                duration=duration
            )

    def _fetch_remote_models(self) -> List[str]:
        # 假设模型服务平台有 /models 接口
        try:
            url = f"{self.base_url}/models"
            headers = self._get_headers()
            response = requests.get(url, headers=headers, timeout=self.timeout)
            response.raise_for_status()
            all_models = response.json().get('data', [])
            return [model['id'] for model in all_models]
        except Exception as e:
            print(f"Error fetching ModelService models: {str(e)}")
            return [self.default_model] if self.default_model else []