import asyncio
import time
from typing import Dict, List, Any, Optional, Tuple, Union
from datetime import datetime, timedelta
from sqlmodel import Session, select
from app.database import engine
from app.models.wikipedia_models import WikipediaPage, WikipediaWikiText, WikipediaCategoryMember
from .base import AsyncDataService, ConcurrentAsyncDataService
import httpx

class WikipediaQueryPageByTitleAsyncService(AsyncDataService[str, Dict[str, Any]]):
    """
    异步处理 Wikipedia 数据的服务
    使用 asyncio 实现异步处理，允许请求提交任务并等待结果
    支持通过标题(str)或页面ID(int)查询
    """
    
    def _get_query_key(self, query_param: str) -> str:
        """
        从查询参数获取唯一键
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            
        Returns:
            用于标识任务的唯一键
        """
        return f"{query_param}"
        
    async def _check_cache(self, query_param: str, cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        检查缓存中是否有有效的数据
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            
        Returns:
            如果缓存中有有效数据，则返回数据字典；否则返回 None
        """
        # 由于数据库操作是阻塞的，使用线程池执行
        return await asyncio.to_thread(self._check_cache_sync, query_param, cache_ttl)
    
    def _check_cache_sync(self, query_param: str,   cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        同步版本的缓存检查，用于在线程池中执行
        """
        with Session(engine) as session:
            # 根据参数类型构建查询

            statement = select(WikipediaPage).where(WikipediaPage.query_title == query_param, WikipediaPage.lang == self.lang)
            result = session.exec(statement).first()
            
            if result:
                # 检查是否过期
                now = datetime.utcnow()
                
                if (now - result.updated_at).total_seconds() > cache_ttl:
                    return None
                
                # 将 SQLModel 对象转换为字典
                return result.dict(exclude={"created_at", "updated_at", "id"})
        
        return None
    
    async def _fetch_data(self, query_params: List[str]) -> List[Dict[str, Any]]:
        """
        从 Wikipedia API 获取数据
        
        Args:
            query_params: 查询参数列表 (标题或页面ID)
            
        Returns:
            包含 Wikipedia 页面数据的字典列表
        """
        # 并行获取数据
        title_results_dict = await self._bulk_fetch_wikipedia_pages_by_titles(query_params) if query_params else []
        
        # 重新按原始查询参数顺序排列结果
        results = []
        for param in query_params:
            # 查找并移除匹配的标题结果
            obj = title_results_dict.get(param)
            if obj:
                results.append(obj)
            else:
                # 如果没找到匹配项，添加一个错误结果
                results.append(self._create_error_result(param))
        
        return results


    async def _bulk_fetch_wikipedia_pages_by_titles(self, page_id_list: List[str], ) -> Dict[str, Any]:
        """
        批量获取 Wikipedia 页面数据（使用缓存加速）。

        参数：
            titles: 要查询的标题列表

        返回：
            dict：每个 title 对应的 Wikipedia 数据
        """
        BATCH_SIZE = 50
        base_url = f"https://{self.lang}.wikipedia.org/w/api.php"
        results: Dict[str, Any] = {}

        # Step 1: 分批处理未命中缓存的 titles
        for i in range(0, len(page_id_list), BATCH_SIZE):
            batch_titles = page_id_list[i : i + BATCH_SIZE]

            query_params = {
                "action": "query",
                "prop": "extracts|pageprops|description|categories|images|imageinfo",
                'iiprop': 'url',  # 获取图片的 URL
                "titles": "|".join(batch_titles),
                "explaintext": 1,
                "format": "json",
                "formatversion": 2,
                "redirects": 1,
                "clshow": "!hidden",
                "cllimit": "max",
            }
            for _ in range(10):
                try:
                    r1 = await self.httpx_client.get(base_url, params=query_params)
                    r1.raise_for_status()
                    r1_json = r1.json()
                    break
                except httpx.RequestError as e:
                    self.logger.warning(f"Request warning: {e}, titles={batch_titles }")
                    self.init_http_client()
                    continue
            else:
                raise httpx.RequestError("Max retries exceeded")

            # Step 1: 获取数据 + 重定向映射
            pages = r1_json["query"]["pages"]
            # 构建标准化映射
            normalized = r1_json["query"].get("normalized", [])
            normalized_map = {n["to"]: n["from"] for n in normalized}

            # 构建重定向映射
            redirects = r1_json["query"].get("redirects", [])
            redirect_map = {r["to"]: r["from"] for r in redirects}

            def resolve_source_title(api_title: str) -> str:
                """通过 normalized/redirects 反查用户输入的 title"""
                if api_title in redirect_map:
                    api_title = redirect_map[api_title]
                if api_title in normalized_map:
                    api_title = normalized_map[api_title]
                return api_title  # 默认使用原始标题

            # 主体处理
            for page in pages:
                # self.logger.warning(page)
                truly_title = page.get("title")
                source_title = resolve_source_title(truly_title)
                page_id = page.get("pageid")
                pageprops = page.get("pageprops", {})
                images_list = page.get("images", [])
                image_info = page.get("imageinfo", [])
                
                wikibase_item = pageprops.get("wikibase_item")
                wikibase_shortdesc = pageprops.get("wikibase-shortdesc")
                image = pageprops.get("page_image") or pageprops.get("page_image_free")
                
                description = page.get("description")
                extract = page.get("extract", "")
                categories_data = page.get("categories", [])
                categories = [
                    cat["title"].replace("Category:", "") for cat in categories_data
                ]

                data = {
                    "query_title": source_title,
                    "title": truly_title,
                    "lang": self.lang,
                    "page_id": page_id,
                    "wikibase_item": wikibase_item,
                    "wikibase_shortdesc": wikibase_shortdesc,
                    "image": image,
                    "description": description,
                    "extract": extract,
                    "categories": categories,
                    "images_list": images_list,
                    "image_info": image_info,
                }
                results[source_title] = data

        return results
    
    def _create_error_result(self, query_param: str) -> Dict[str, Any]:
        """
        创建错误结果
        """
        return {
            "page_id": -1,
            "title": f"未找到: {query_param}",
            "wikibase_item": None,
            "description": "",
            "lang": "",
            "extract": "",
            "categories": [],
        }
    
    async def _save_to_cache(self, query_param: str, data: Dict[str, Any]) -> None:
        """
        将数据保存到缓存
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            data: 要缓存的数据
        """
        # 创建后台任务来执行数据库操作，不阻塞主流程
        asyncio.create_task(self._save_to_cache_background(query_param, data))

    async def _save_to_cache_background(self, query_param: str, data: Dict[str, Any]):
        """
        在后台保存数据到缓存，不阻塞主流程
        """
        try:
            await asyncio.to_thread(self._save_to_cache_sync, query_param, data)
        except Exception as e:
            self.logger.warning(f"后台缓存保存失败: {e}")
        
    def _save_to_cache_sync(self, query_param: str, data: Dict[str, Any]):
        """
        同步版本的缓存保存，用于在线程池中执行
        """
        # 如果是错误结果，不保存到缓存
        if data["page_id"] == -1:
            return
            
        with Session(engine) as session:
            # 检查是否已存在
            statement = select(WikipediaPage).where(WikipediaPage.query_title == query_param)
            existing = session.exec(statement).first()
            
            if existing:
                # 更新现有记录
                for key, value in data.items():
                    setattr(existing, key, value)
                existing.updated_at = datetime.utcnow()
            else:
                # 创建新记录
                page = WikipediaPage(**data)
                page.created_at=datetime.utcnow()
                page.updated_at=datetime.utcnow()
                session.add(page)
            
            try:
                session.commit()
            except Exception as e:
                session.rollback()
                self.logger.warning(f"缓存保存失败: {e}")

class WikipediaQueryPageByPageIDAsyncService(AsyncDataService[int, Dict[int, Any]]):
    """
    异步处理 Wikipedia 数据的服务
    使用 asyncio 实现异步处理，允许请求提交任务并等待结果
    支持通过标题(str)或页面ID(int)查询
    """
    
    def _get_query_key(self, query_param: int) -> int:
        return query_param
        
    async def _check_cache(self, query_param: int, cache_ttl: int) -> Optional[Dict[int, Any]]:
        """
        检查缓存中是否有有效的数据
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            
        Returns:
            如果缓存中有有效数据，则返回数据字典；否则返回 None
        """
        # 由于数据库操作是阻塞的，使用线程池执行
        return await asyncio.to_thread(self._check_cache_sync, query_param, cache_ttl)
    
    def _check_cache_sync(self, query_param: int,   cache_ttl: int) -> Optional[Dict[int, Any]]:
        """
        同步版本的缓存检查，用于在线程池中执行
        """
        with Session(engine) as session:
            # 根据参数类型构建查询

            statement = select(WikipediaPage).where(WikipediaPage.page_id == query_param, WikipediaPage.lang == self.lang)
            result = session.exec(statement).first()
            
            if result:
                # 检查是否过期
                now = datetime.utcnow()
        
                if (now - result.updated_at).total_seconds() > cache_ttl:
                    return None
                
                # 将 SQLModel 对象转换为字典
                return result.dict(exclude={"created_at", "updated_at", "id"})
        
        return None
    
    async def _fetch_data(self, query_params: List[int]) -> List[Dict[int, Any]]:
        """
        从 Wikipedia API 获取数据
        
        Args:
            query_params: 查询参数列表 (标题或页面ID)
            
        Returns:
            包含 Wikipedia 页面数据的字典列表
        """
        # 并行获取数据
        title_results_dict = await self._bulk_fetch_wikipedia_pages_by_pageids(query_params) if query_params else []
        
        # 重新按原始查询参数顺序排列结果
        results = []
        for param in query_params:
            # 查找并移除匹配的标题结果
            obj = title_results_dict.get(param)
            if obj:
                results.append(obj)
            else:
                # 如果没找到匹配项，添加一个错误结果
                results.append(self._create_error_result(param))
        
        return results


    async def _bulk_fetch_wikipedia_pages_by_pageids(self, page_id_list: List[int], ) -> Dict[int, Any]:
        """
        批量获取 Wikipedia 页面数据（使用缓存加速）。
        """
        BATCH_SIZE = 50
        base_url = f"https://{self.lang}.wikipedia.org/w/api.php"
        results: Dict[int, Any] = {}

        self.logger.info(12)
        # Step 1: 分批处理未命中缓存的 titles
        for i in range(0, len(page_id_list), BATCH_SIZE):
            batch_ids = page_id_list[i : i + BATCH_SIZE]

            query_params = {
                "action": "query",
                "prop": "extracts|pageprops|description|categories|images|imageinfo",
                'iiprop': 'url',  # 获取图片的 URL
                "pageids": "|".join([str(ID) for ID in batch_ids]),
                "explaintext": 1,
                "format": "json",
                "formatversion": 2,
                "redirects": 1,
                "clshow": "!hidden",
                "cllimit": "max",
            }
            for _ in range(10):
                try:
                    r1 = await self.httpx_client.get(base_url, params=query_params)
                    r1.raise_for_status()
                    break
                except httpx.RequestError as e:
                    self.logger.warning(f"Request warning: {e}, titles={batch_ids}")
                    self.init_http_client()
                    continue
            else:
                raise httpx.RequestError("Max retries exceeded")

            # Step 1: 获取数据 + 重定向映射
            r1_json = r1.json()
            pages = r1_json["query"]["pages"]

            # 主体处理
            for page in pages:
                truly_title = page.get("title")
                page_id = page.get("pageid")
                pageprops = page.get("pageprops", {})
                
                wikibase_item = pageprops.get("wikibase_item")
                wikibase_shortdesc = pageprops.get("wikibase-shortdesc")
                image = pageprops.get("page_image") or pageprops.get("page_image_free")
                images_list = page.get("images", [])
                image_info = page.get("imageinfo", [])
                
                description = page.get("description")
                extract = page.get("extract", "")
                categories_data = page.get("categories", [])
                categories = [
                    cat["title"].replace("Category:", "") for cat in categories_data
                ]

                data = {
                    "query_title": truly_title,
                    "title": truly_title,
                    "lang": self.lang,
                    "page_id": page_id,
                    "wikibase_item": wikibase_item,
                    "wikibase_shortdesc": wikibase_shortdesc,
                    "image": image,
                    "description": description,
                    "extract": extract,
                    "categories": categories,
                    "images_list": images_list,
                    "image_info": image_info,
                }
                results[page_id] = data

        return results
    
    def _create_error_result(self, query_param: str) -> Dict[str, Any]:
        """
        创建错误结果
        """
        return {
            "page_id": -1,
            "title": f"未找到: {query_param}",
            "wikibase_item": None,
            "description": "",
            "lang": "",
            "extract": "",
            "categories": [],
            "images_list":[],
        }
    
    async def _save_to_cache(self, query_param: str, data: Dict[str, Any]) -> None:
        """
        将数据保存到缓存
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            data: 要缓存的数据
        """
        # 创建后台任务来执行数据库操作，不阻塞主流程
        asyncio.create_task(self._save_to_cache_background(query_param, data))

    async def _save_to_cache_background(self, query_param: str, data: Dict[str, Any]):
        """
        在后台保存数据到缓存，不阻塞主流程
        """
        try:
            await asyncio.to_thread(self._save_to_cache_sync, query_param, data)
        except Exception as e:
            self.logger.warning(f"后台缓存保存失败: {e}")
        
    def _save_to_cache_sync(self, query_param: str, data: Dict[str, Any]):
        """
        同步版本的缓存保存，用于在线程池中执行
        """
        # 如果是错误结果，不保存到缓存
        if data["page_id"] == -1:
            return
            
        with Session(engine) as session:
            # 检查是否已存在
            statement = select(WikipediaPage).where(WikipediaPage.query_title == query_param)
            existing = session.exec(statement).first()
            
            if existing:
                # 更新现有记录
                for key, value in data.items():
                    setattr(existing, key, value)
                existing.updated_at = datetime.utcnow()
            else:
                # 创建新记录
                page = WikipediaPage(**data)
                page.created_at=datetime.utcnow()
                page.updated_at=datetime.utcnow()
                session.add(page)
            
            try:
                session.commit()
            except Exception as e:
                session.rollback()
                self.logger.warning(f"缓存保存失败: {e}")

class WikipediaQueryWikiTextByTitleAsyncService(ConcurrentAsyncDataService[str, Dict[str, Any]]):
    
    def _get_query_key(self, query_param: str) -> str:
        """
        从查询参数获取唯一键
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            
        Returns:
            用于标识任务的唯一键
        """
        return f"{query_param}"
    
    async def _check_cache(self, query_param: str, cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        检查缓存中是否有有效的数据
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            
        Returns:
            如果缓存中有有效数据，则返回数据字典；否则返回 None
        """
        # 由于数据库操作是阻塞的，使用线程池执行
        return await asyncio.to_thread(self._check_cache_sync, query_param, cache_ttl)
    
    def _check_cache_sync(self, query_param: str,   cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        同步版本的缓存检查，用于在线程池中执行
        """
        with Session(engine) as session:
            # 根据参数类型构建查询

            statement = select(WikipediaWikiText).where(WikipediaWikiText.query_title == query_param, WikipediaWikiText.lang == self.lang)
            result = session.exec(statement).first()
            
            if result:
                # 检查是否过期
                now = datetime.utcnow()
                
                if (now - result.updated_at).total_seconds() > cache_ttl:
                    return None
                
                # 将 SQLModel 对象转换为字典
                return result.dict(exclude={"created_at", "updated_at", "id"})
        
        return None
        
    async def _fetch_single_data(self, query_param: str) -> Dict[str, Any]:
        """
        从 Wikipedia API 获取数据
        
        Args:
            query_param: 查询参数列表 (标题或页面ID)
            
        Returns:
            包含 Wikipedia 页面数据的字典列表
        """
        base_url = f"https://{self.lang}.wikipedia.org/w/api.php"
        parse_params = {
            "action": "parse",
            "page": query_param,
            "prop": "wikitext|sections|langlinks",
            "format": "json",
            "redirects": 1,
        }
        for _ in range(10):
            try:
                r1 = await self.httpx_client.get(base_url, params=parse_params)
                r1.raise_for_status()
                break
            except httpx.RequestError as e:
                self.logger.warning(f"Request warning: {e}, titles={batch_titles }")
                self.init_http_client()
                continue
        else:
            raise httpx.RequestError("Max retries exceeded")

        # Step 1: 获取数据 + 重定向映射
        parse_data = r1.json().get("parse", {})

        lang_dict = {entity['lang']:entity['*'] for entity in parse_data.get("langlinks",[])}

        result = {
            "page_id": parse_data.get("pageid"),
            "query_title": query_param,
            "title": parse_data.get("title"),
            "lang": self.lang,
            "wikitext": parse_data.get("wikitext", {}).get("*", ""),
            "langlinks": lang_dict,
            "sections": parse_data.get("sections"),
        }
        
        return result
   
    def _create_error_result(self, query_param: str) -> Dict[str, Any]:
        """
        创建错误结果
        """
        return {
            "page_id": -1,
            "title": f"未找到: {query_param}",
            "wikibase_item": None,
            "wikitext": "",
            "lang": "",
        }
    
    async def _save_to_cache(self, query_param: str, data: Dict[str, Any]) -> None:
        """
        将数据保存到缓存
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            data: 要缓存的数据
        """
        # 创建后台任务来执行数据库操作，不阻塞主流程
        asyncio.create_task(self._save_to_cache_background(query_param, data))

    async def _save_to_cache_background(self, query_param: str, data: Dict[str, Any]):
        """
        在后台保存数据到缓存，不阻塞主流程
        """
        try:
            await asyncio.to_thread(self._save_to_cache_sync, query_param, data)
        except Exception as e:
            self.logger.warning(f"后台缓存保存失败: {e}")
        
    def _save_to_cache_sync(self, query_param: str, data: Dict[str, Any]):
        """
        同步版本的缓存保存，用于在线程池中执行
        """
        # 如果是错误结果，不保存到缓存
        if data["page_id"] == -1:
            return
            
        with Session(engine) as session:
            # 检查是否已存在
            statement = select(WikipediaWikiText).where(WikipediaWikiText.query_title == query_param)
            existing = session.exec(statement).first()
            
            if existing:
                # 更新现有记录
                for key, value in data.items():
                    setattr(existing, key, value)
                existing.updated_at = datetime.utcnow()
            else:
                # 创建新记录
                page = WikipediaWikiText(**data)
                page.created_at=datetime.utcnow()
                page.updated_at=datetime.utcnow()
                session.add(page)
            
            try:
                session.commit()
            except Exception as e:
                session.rollback()
                self.logger.warning(f"缓存保存失败: {e}")

class WikipediaQueryCategoryMemberByTitleAsyncService(ConcurrentAsyncDataService[str, Dict[str, Any]]):
    
    def _get_query_key(self, query_param: str) -> str:
        return query_param
    
    async def _check_cache(self, query_param: str, cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        检查缓存中是否有有效的数据
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            
        Returns:
            如果缓存中有有效数据，则返回数据字典；否则返回 None
        """
        # 由于数据库操作是阻塞的，使用线程池执行
        return await asyncio.to_thread(self._check_cache_sync, query_param, cache_ttl)
    
    def _check_cache_sync(self, query_param: str,   cache_ttl: int) -> Optional[Dict[str, Any]]:
        """
        同步版本的缓存检查，用于在线程池中执行
        """
        with Session(engine) as session:
            # 根据参数类型构建查询

            statement = select(WikipediaCategoryMember).where(WikipediaCategoryMember.query_title == query_param, WikipediaCategoryMember.lang == self.lang)
            result = session.exec(statement).first()
            
            if result:
                # 检查是否过期
                now = datetime.utcnow()
                
                if (now - result.updated_at).total_seconds() > cache_ttl:
                    return None
                
                # 将 SQLModel 对象转换为字典
                return result.dict(exclude={"created_at", "updated_at", "id"})
        
        return None
        
    async def _fetch_single_data(self, query_param: str) -> Dict[str, Any]:
        """
        从 Wikipedia API 获取数据
        
        Args:
            query_param: 查询参数列表 (标题或页面ID)
            
        Returns:
            包含 Wikipedia 页面数据的字典列表
        """
        base_url = f"https://{self.lang}.wikipedia.org/w/api.php"
        
        cmtitle = f"Category:{query_param}"
        results = {
            "query_title":query_param,
            "lang": self.lang,
            "subcategories": [], 
            "pages": []
        }
    
        params = {
            "action": "query",
            "list": "categorymembers",
            "cmtitle": cmtitle,
            "cmtype": "subcat|page",
            "cmlimit": 500,
            "format": "json",
        }
                
        page_count = 0
        max_pages = 16
        while page_count < max_pages:
            for _ in range(5):
                try:
                    response = await self.httpx_client.get(base_url, params=params)
                    response.raise_for_status()
                    break
                except httpx.RequestError as e:
                    print(f"Request error: {e}")
                    self.init_http_client()
                    continue
            else:
                print(f"Failed to fetch data after 5 attempts.")
                raise httpx.RequestError("Max retries exceeded")
            # response = client.get(base_url, params=params)
            data = response.json()
            for member in data.get("query", {}).get("categorymembers", []):
                if member["ns"] == 14:  # 子分类
                    results["subcategories"].append({"title": member["title"].replace("Category:",""),
                                                     "pageid": member["pageid"]})
                elif member["ns"] == 0:  # 条目
                    results["pages"].append({"title": member["title"], "pageid": member["pageid"]})
            # 分页处理
            if "continue" in data:
                params.update(data["continue"])
                page_count += 1
            else:
                break
        
        return results
   
    def _create_error_result(self, query_param: str) -> Dict[str, Any]:
        """
        创建错误结果
        """
        return {
            "query_title":query_param,
            "lang": self.lang,
            "subcategories": [], 
            "pages": []
        }
    
    async def _save_to_cache(self, query_param: str, data: Dict[str, Any]) -> None:
        """
        将数据保存到缓存
        
        Args:
            query_param: 查询参数 (标题或页面ID)
            data: 要缓存的数据
        """
        # 创建后台任务来执行数据库操作，不阻塞主流程
        asyncio.create_task(self._save_to_cache_background(query_param, data))

    async def _save_to_cache_background(self, query_param: str, data: Dict[str, Any]):
        """
        在后台保存数据到缓存，不阻塞主流程
        """
        try:
            await asyncio.to_thread(self._save_to_cache_sync, query_param, data)
        except Exception as e:
            self.logger.warning(f"后台缓存保存失败: {e}")
        
    def _save_to_cache_sync(self, query_param: str, data: Dict[str, Any]):
        """
        同步版本的缓存保存，用于在线程池中执行
        """
            
        with Session(engine) as session:
            # 检查是否已存在
            statement = select(WikipediaCategoryMember).where(WikipediaCategoryMember.query_title == query_param, WikipediaCategoryMember.lang == self.lang)
            existing = session.exec(statement).first()
            
            if existing:
                # 更新现有记录
                for key, value in data.items():
                    setattr(existing, key, value)
                existing.updated_at = datetime.utcnow()
            else:
                # 创建新记录
                page = WikipediaCategoryMember(**data)
                page.created_at=datetime.utcnow()
                page.updated_at=datetime.utcnow()
                session.add(page)
            
            try:
                session.commit()
            except Exception as e:
                session.rollback()
                self.logger.warning(f"缓存保存失败: {e}")
