import asyncio
import time
from typing import Dict, List, Any, Optional, Tuple, Union
from datetime import datetime, timedelta
from sqlmodel import Session, select
from app.database import engine
from app.models.wikidata_models import WikidataEntity, WikidataSparqlModel
from .base import AsyncDataService, ConcurrentAsyncDataService
import httpx
import hashlib
import os

class WikidataQueryEntityByQidAsyncService(AsyncDataService[str, Dict[str, Any]]):
    """
    异步处理 wikidata 数据的服务
    使用 asyncio 实现异步处理，允许请求提交任务并等待结果
    """
    
    def _get_query_key(self, query_param: str) -> str:
        return query_param
        
    async def _check_cache(self, query_param: str, cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        检查缓存中是否有有效的数据
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            
        Returns:
            如果缓存中有有效数据，则返回数据字典；否则返回 None
        """
        # 由于数据库操作是阻塞的，使用线程池执行
        return await asyncio.to_thread(self._check_cache_sync, query_param, cache_ttl)
    
    def _check_cache_sync(self, query_param: str, cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        同步版本的缓存检查，用于在线程池中执行
        """
        with Session(engine) as session:
            # 根据参数类型构建查询

            statement = select(WikidataEntity).where(WikidataEntity.qid == query_param)
            result = session.exec(statement).first()
            
            if result:
                # 检查是否过期
                now = datetime.utcnow()
                
                if (now - result.updated_at).total_seconds() > cache_ttl:
                    return None
                
                # 将 SQLModel 对象转换为字典
                return result.dict(exclude={"created_at", "updated_at", "id"})
        
        return None
    
    async def _fetch_data(self, query_params: List[str]) -> List[Dict[str, Any]]:
        """
        从 wikidata API 获取数据
        
        Args:
            query_params: 查询参数列表 (标题或页面ID)
            
        Returns:
            包含 wikidata 页面数据的字典列表
        """
        # 并行获取数据
        title_results_dict = await self._bulk_fetch_wikidata_entity_by_qid(query_params) if query_params else []
        
        # 重新按原始查询参数顺序排列结果
        results = []
        for param in query_params:
            # 查找并移除匹配的标题结果
            obj = title_results_dict.get(param)
            if obj:
                results.append(obj)
            else:
                # 如果没找到匹配项，添加一个错误结果
                results.append(self._create_error_result(param))
        
        return results

    async def _bulk_fetch_wikidata_entity_by_qid(self, qid_list: List[str], 
    ) -> Dict[str, Dict[str, Any]]:
        """
        批量获取 wikidata 页面数据（使用缓存加速）。

        参数：
            titles: 要查询的标题列表

        返回：
            dict：每个 title 对应的 wikidata 数据
        """
        BATCH_SIZE = self.batch_size
        base_url = "https://www.wikidata.org/w/api.php"
        results: Dict[str, Dict[str, Any]] = {}
        
        qid_list = list(set(qid_list))
        # Step 1: 分批处理未命中缓存的 titles
        for i in range(0, len(qid_list), BATCH_SIZE):
            batch_ids = qid_list[i : i + BATCH_SIZE]

            params = {
                "action": "wbgetentities",
                "format": "json",
                "ids": "|".join(batch_ids),
            }
            for _ in range(10):
                try:
                    r1 = await self.httpx_client.get(base_url, params=params)
                    r1.raise_for_status()
                    break
                except httpx.RequestError as e:
                    self.logger.warning(f"Request warning: {e}, titles={batch_ids }")
                    self.init_http_client()
                    continue
            else:
                raise httpx.RequestError("Max retries exceeded")

            # Step 1: 获取数据 + 重定向映射
            data = r1.json().get("entities", {})

            # 主体处理
            for qid, entity_data in data.items():
                data = {
                    "qid": qid,
                    "data": entity_data,
                }
                results[qid] = data
        return results
    
    def _create_error_result(self, query_param: str) -> Dict[str, Any]:
        """
        创建错误结果
        """
        return {
            # "qid": f"miss: {query_param}",
            "qid": query_param,
            "data": None,
        }
    
    async def _save_to_cache(self, query_param: str, data: Dict[str, Any]) -> None:
        """
        将数据保存到缓存
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            data: 要缓存的数据
        """
        # 创建后台任务来执行数据库操作，不阻塞主流程
        asyncio.create_task(self._save_to_cache_background(query_param, data))

    async def _save_to_cache_background(self, query_param: str, data: Dict[str, Any]):
        """
        在后台保存数据到缓存，不阻塞主流程
        """
        try:
            await asyncio.to_thread(self._save_to_cache_sync, query_param, data)
        except Exception as e:
            self.logger.warning(f"后台缓存保存失败: {e}")
        
    def _save_to_cache_sync(self, query_param: str, data: Dict[str, Any]):
        """
        同步版本的缓存保存，用于在线程池中执行
        """
        # 如果是错误结果，不保存到缓存
        if data["qid"] == -1:
            return
            
        with Session(engine) as session:
            # 检查是否已存在
            statement = select(WikidataEntity).where(WikidataEntity.qid == query_param)
            existing = session.exec(statement).first()
            
            if existing:
                # 更新现有记录
                for key, value in data.items():
                    setattr(existing, key, value)
                existing.updated_at = datetime.utcnow()
            else:
                # 创建新记录
                page = WikidataEntity(**data)
                page.created_at=datetime.utcnow()
                page.updated_at=datetime.utcnow()
                session.add(page)
            
            try:
                session.commit()
            except Exception as e:
                session.rollback()
                self.logger.warning(f"缓存保存失败: {e}")


class WikidataSparqlAsyncService(ConcurrentAsyncDataService[str, Dict[str, Any]]):
    
    def init_http_client(self):
        self.httpx_client = httpx.AsyncClient(timeout=360.0, proxy=os.environ.get("HTTP_PROXY"))
        
    def _get_query_key(self, query_param: str) -> str:
        return hashlib.md5(query_param.encode("utf-8")).hexdigest()
    
    async def _check_cache(self, query_param: str, cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        检查缓存中是否有有效的数据
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            
        Returns:
            如果缓存中有有效数据，则返回数据字典；否则返回 None
        """
        # 由于数据库操作是阻塞的，使用线程池执行
        return await asyncio.to_thread(self._check_cache_sync, query_param, cache_ttl)
    
    def _check_cache_sync(self, query_param: str,   cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        同步版本的缓存检查，用于在线程池中执行
        """
        with Session(engine) as session:
            # 根据参数类型构建查询
            query_key = self._get_query_key(query_param)
            statement = select(WikidataSparqlModel).where(WikidataSparqlModel.query_key == query_key)
            result = session.exec(statement).first()
            
            if result:
                # 检查是否过期
                now = datetime.utcnow()
                
                if (now - result.updated_at).total_seconds() > cache_ttl:
                    return None
                
                # 将 SQLModel 对象转换为字典
                return result.dict(exclude={"created_at", "updated_at", "id"})
        
        return None
        
    async def _fetch_single_data(self, query_param: str) -> Dict[str, Any]:

        endpoint = "https://query.wikidata.org/sparql"
        
        results = {
            "query_key": self._get_query_key(query_param),
            "query":query_param,
        }

        headers = {
            "Accept": "application/sparql-results+json",
            "User-Agent": "knogen/1.1 (admin@knogen.cn)"
        }

        params = {
            "query": query_param
        }

        for _ in range(10):
            try:
                response = await self.httpx_client.get(endpoint, params=params, headers=headers)
                response.raise_for_status()
                results['data'] = response.json()
                return results
            except httpx.RequestError as e:
                self.logger.warning(f"Request warning: {e}, query_param={query_param }")
                self.init_http_client()
                continue
        else:
            raise httpx.RequestError("Max retries exceeded")
        return results
   
    def _create_error_result(self, query_param: str) -> Dict[str, Any]:
        """
        创建错误结果
        """
        query_key = self._get_query_key(query_param)
        return {
            "query_key": query_key,
            "query":query_param,
            "data": None
        }
    
    async def _save_to_cache(self, query_param: str, data: Dict[str, Any]) -> None:
        """
        将数据保存到缓存
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            data: 要缓存的数据
        """
        # 创建后台任务来执行数据库操作，不阻塞主流程
        asyncio.create_task(self._save_to_cache_background(query_param, data))

    async def _save_to_cache_background(self, query_param: str, data: Dict[str, Any]):
        """
        在后台保存数据到缓存，不阻塞主流程
        """
        try:
            await asyncio.to_thread(self._save_to_cache_sync, query_param, data)
        except Exception as e:
            self.logger.warning(f"后台缓存保存失败: {e}")
        
    def _save_to_cache_sync(self, query_param: str, data: Dict[str, Any]):
        """
        同步版本的缓存保存，用于在线程池中执行
        """
            
        with Session(engine) as session:
            # 检查是否已存在
            query_key = self._get_query_key(query_param)
            statement = select(WikidataSparqlModel).where(WikidataSparqlModel.query_key == query_key)
            existing = session.exec(statement).first()
            
            if existing:
                # 更新现有记录
                for key, value in data.items():
                    setattr(existing, key, value)
                existing.updated_at = datetime.utcnow()
            else:
                # 创建新记录
                page = WikidataSparqlModel(**data)
                page.created_at=datetime.utcnow()
                page.updated_at=datetime.utcnow()
                session.add(page)
            
            try:
                session.commit()
            except Exception as e:
                session.rollback()
                self.logger.warning(f"缓存保存失败: {e}")
