"""
基于 DashScope OpenAI 兼容接口的 LLM 服务
提供统一的聊天与流式输出能力，替换本地 Ollama 实现
"""

import asyncio
from typing import Dict, List, Any, Optional, AsyncGenerator
from datetime import datetime
import logging
import os

from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, BaseMessage

logger = logging.getLogger(__name__)


class LLMApiService:
	"""使用 OpenAI 兼容接口（DashScope）调用云端模型（LangChain 封装）"""

	def __init__(
		self,
		base_url: Optional[str] = None,
		model_name: Optional[str] = None,
		api_key: Optional[str] = None,
		timeout: Optional[int] = 60,
	):
		self.base_url = (base_url or os.getenv("DASHSCOPE_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")).rstrip("/")
		self.model_name = model_name or os.getenv("DASHSCOPE_MODEL", "qwen-plus")
		# 优先读取环境变量，若无则回退到 try.py 中的密钥（与用户保持一致）
		self.api_key = api_key or os.getenv("DASHSCOPE_API_KEY") or "sk-4ab6d948df104d729594e125a312153e"
		self.timeout = timeout
		# LangChain ChatOpenAI 封装（支持 OpenAI 兼容的 base_url）
		self.model = ChatOpenAI(
			model=self.model_name,
			api_key=self.api_key,
			base_url=self.base_url,
		)

	async def is_available(self) -> bool:
		"""通过一次轻量调用检查服务可用性和模型可用性"""
		try:
			resp: AIMessage = await self.model.ainvoke([
				SystemMessage(content="ping"),
				HumanMessage(content="ping"),
			])
			return bool(resp and getattr(resp, "content", ""))
		except Exception as e:
			logger.error(f"LLM API 可用性检查失败: {e}")
			return False

	async def check_model_exists(self, model_name: Optional[str] = None) -> bool:
		"""尝试以最小 token 数调用指定模型，判断是否可用"""
		model_to_test = model_name or self.model_name
		try:
			model = self.model if model_to_test == self.model_name else ChatOpenAI(
				model=model_to_test,
				api_key=self.api_key,
				base_url=self.base_url,
			)
			resp: AIMessage = await model.ainvoke([
				SystemMessage(content="model-check"),
				HumanMessage(content="ok?"),
			])
			return bool(resp and getattr(resp, "content", ""))
		except Exception:
			return False

	def _to_lc_messages(self, messages: List[Dict[str, str]], system_prompt: Optional[str] = None) -> List[BaseMessage]:
		"""将 {'role','content'} 列表转换为 LangChain 消息对象列表"""
		lc_messages: List[BaseMessage] = []
		if system_prompt:
			lc_messages.append(SystemMessage(content=system_prompt))
		for msg in messages:
			role = msg.get("role", "user")
			content = msg.get("content", "")
			if role == "system":
				lc_messages.append(SystemMessage(content=content))
			elif role == "assistant":
				lc_messages.append(AIMessage(content=content))
			else:
				lc_messages.append(HumanMessage(content=content))
		return lc_messages

	async def chat_completion(
		self,
		messages: List[Dict[str, str]],
		stream: bool = False,
		temperature: float = 0.7,
		max_tokens: int = 2000,
		system_prompt: Optional[str] = None,
	) -> Dict[str, Any]:
		"""统一的聊天补全接口，兼容之前调用约定"""
		# LangChain 的温度、最大 token 可在运行时通过绑定参数设置
		model = self.model.bind(temperature=temperature, max_tokens=max_tokens)
		lc_messages = self._to_lc_messages(messages, system_prompt)

		try:
			if stream:
				# 返回占位，实际由 generate_stream 处理
				return {"response": None}
			else:
				resp: AIMessage = await model.ainvoke(lc_messages)
				usage = getattr(resp, "usage_metadata", None) or {}
				return {
					"message": {"content": resp.content if hasattr(resp, "content") else ""},
					"done": True,
					"prompt_eval_count": usage.get("input_tokens", 0),
					"eval_count": usage.get("output_tokens", 0),
					"total_duration": 0,
				}
		except Exception as e:
			logger.error(f"聊天补全失败: {e}")
			raise

	async def generate_stream(
		self,
		messages: List[Dict[str, str]],
		system_prompt: Optional[str] = None,
		temperature: float = 0.7,
		max_tokens: int = 2000,
	) -> AsyncGenerator[str, None]:
		"""流式输出回复内容，逐段 yield 文本片段"""
		model = self.model.bind(temperature=temperature, max_tokens=max_tokens)
		lc_messages = self._to_lc_messages(messages, system_prompt)

		try:
			async for chunk in model.astream(lc_messages):
				# chunk: AIMessageChunk
				text = getattr(chunk, "content", None)
				if text:
					yield text
		except Exception as e:
			logger.error(f"流式生成失败: {e}")
			raise


class JHChatService:
	"""JH 子系统聊天服务，改为调用云端 API"""

	def __init__(self, llm_service: LLMApiService):
		self.llm = llm_service
		self.chart_conversations: Dict[str, List[Dict[str, str]]] = {}
		self.current_chart_id: Optional[str] = None

	def _get_system_prompt(self) -> str:
		"""从通用 AI 配置获取系统提示词"""
		from config.ai_config import get_jh_config
		jh_config = get_jh_config()
		return jh_config.get("system_prompt", "你是一个专业的AI求职助手。")

	async def chat(
		self,
		user_message: str,
		context_data: Dict[str, Any] = None,
		stream: bool = False,
		chart_id: str = None,
	) -> Dict[str, Any]:
		try:
			if chart_id:
				self.current_chart_id = chart_id
			conversation_history = self._get_chart_conversation_history(self.current_chart_id)
			conversation_history.append({"role": "user", "content": user_message})
			enhanced_message = self._enhance_message_with_context(user_message, context_data)
			messages = conversation_history[-20:]
			if enhanced_message != user_message:
				messages[-1]["content"] = enhanced_message

			if stream:
				return await self._handle_stream_response(messages)
			else:
				response = await self.llm.chat_completion(
					messages=messages,
					system_prompt=self._get_system_prompt(),
					temperature=0.7,
					max_tokens=2000,
				)
				ai_message = response.get("message", {}).get("content", "抱歉，我暂时无法回答您的问题。请稍后再试。")
				conversation_history.append({"role": "assistant", "content": ai_message})
				return {
					"response": ai_message,
					"timestamp": datetime.now().isoformat(),
					"token_usage": {
						"prompt_tokens": response.get("prompt_eval_count", 0),
						"completion_tokens": response.get("eval_count", 0),
						"total_duration": response.get("total_duration", 0),
					},
				}
		except Exception as e:
			logger.error(f"聊天服务错误: {e}")
			return {"response": "抱歉，服务暂时不可用，请稍后再试。", "error": str(e), "timestamp": datetime.now().isoformat()}

	async def _handle_stream_response(self, messages: List[Dict[str, str]]) -> Dict[str, Any]:
		full_response = ""
		async for chunk in self.llm.generate_stream(
			messages=messages,
			system_prompt=self._get_system_prompt(),
		):
			full_response += chunk
		self._get_chart_conversation_history(self.current_chart_id).append({"role": "assistant", "content": full_response})
		return {"response": full_response, "timestamp": datetime.now().isoformat(), "stream": True}

	def _enhance_message_with_context(self, message: str, context_data: Dict[str, Any] = None) -> str:
		if not context_data:
			return message
		context_parts: List[str] = []
		if "chart_data" in context_data and "chart_type" in context_data:
			chart_info: List[str] = []
			chart_info.append(f"当前图表类型：{context_data.get('chart_type', '未知')}")
			if "dimension" in context_data:
				chart_info.append(f"数据维度：{context_data['dimension']}")
			if "chart_title" in context_data:
				chart_info.append(f"图表标题：{context_data['chart_title']}")
			chart_data = context_data["chart_data"]
			if chart_data.get("labels") and chart_data.get("datasets"):
				labels = chart_data["labels"]
				datasets = chart_data["datasets"]
				if len(labels) > 0 and len(datasets) > 0:
					main_dataset = datasets[0]
					data_values = main_dataset.get("data", [])
					chart_info.append(f"数据分类：{', '.join(map(str, labels[:5]))}{'...' if len(labels) > 5 else ''}")
					chart_info.append(f"数据值：{', '.join(map(str, data_values[:5]))}{'...' if len(data_values) > 5 else ''}")
			if "data_summary" in context_data:
				summary = context_data["data_summary"]
				chart_info.append(
					f"数据统计：总数{summary.get('total', 0)}，最大值{summary.get('max', 0)}，最小值{summary.get('min', 0)}，项目数{summary.get('count', 0)}"
				)
			context_parts.append("当前图表信息：" + "；".join(chart_info))
		if "current_filters" in context_data and context_data["current_filters"]:
			filters = context_data["current_filters"]
			filter_info = []
			for key, value in filters.items():
				if value:
					filter_info.append(f"{key}={value}")
			if filter_info:
				context_parts.append(f"当前筛选条件：{', '.join(filter_info)}")
		if "user_skills" in context_data:
			skills = context_data["user_skills"]
			if skills:
				context_parts.append(f"我的技能背景：{', '.join(skills)}")
		if "current_job" in context_data:
			job = context_data["current_job"]
			context_parts.append(f"当前关注的职位：{job.get('title', '')} - {job.get('company', '')}")
		if "market_summary" in context_data:
			summary = context_data["market_summary"]
			context_parts.append(f"相关市场信息：{summary}")
		if context_parts:
			enhanced_message = f"[当前图表和数据背景]\n{chr(10).join(context_parts)}\n\n[用户问题]\n{message}"
			return enhanced_message
		return message

	def _get_chart_conversation_history(self, chart_id: str = None) -> List[Dict[str, str]]:
		if chart_id is None:
			chart_id = "default"
		if chart_id not in self.chart_conversations:
			self.chart_conversations[chart_id] = []
		return self.chart_conversations[chart_id]

	def set_current_chart(self, chart_id: str):
		self.current_chart_id = chart_id

	def clear_history(self, chart_id: str = None):
		if chart_id:
			if chart_id in self.chart_conversations:
				self.chart_conversations[chart_id] = []
		else:
			if self.current_chart_id and self.current_chart_id in self.chart_conversations:
				self.chart_conversations[self.current_chart_id] = []

	def get_conversation_summary(self, chart_id: str = None) -> Dict[str, Any]:
		conversation_history = self._get_chart_conversation_history(chart_id or self.current_chart_id)
		return {
			"chart_id": chart_id or self.current_chart_id,
			"total_messages": len(conversation_history),
			"user_messages": len([m for m in conversation_history if m["role"] == "user"]),
			"assistant_messages": len([m for m in conversation_history if m["role"] == "assistant"]),
			"last_interaction": (conversation_history[-1]["content"][:100] + "...") if conversation_history else None,
		} 