# src/data_collector/platforms/facebook_collector.py
import os
import time
import logging
import requests
import urllib.parse
from datetime import datetime
from typing import List, Dict, Any, Optional
from src.utils.logger import get_logger
from src.utils.config_loader import load_config
from src.utils.api_key_rotator import ApiKeyRotator

logger = get_logger(__name__)


class FacebookCommentCollector:
    def __init__(self, access_token: str = None):
        """
        使用Facebook Graph API的评论采集器
        :param access_token: Facebook API访问令牌
        """
        # 首先初始化 rotator（因为后续构造需要用到）
        self.rotator = ApiKeyRotator(service_name="facebook")

        # 然后获取 access_token
        self.access_token = access_token or self._get_access_token()

        self.api_version = "v18.0"  # 使用最新的API版本
        self.base_url = f"https://graph.facebook.com/{self.api_version}"
        self.session = requests.Session()

        # 加载配置
        config = load_config()
        self.timeout = config.get("facebook", {}).get("timeout", 30)
        self.page_size = config.get("facebook", {}).get("page_size", 100)
        self.max_retries = config.get("facebook", {}).get("max_retries", 3)
        # 添加请求间隔以避免过于频繁的请求
        self.request_delay = config.get("facebook", {}).get("request_delay", 1)

    def _get_access_token(self) -> str:
        """从环境变量获取Facebook访问令牌（优先使用 ApiKeyRotator）"""
        return self.rotator.get_key()  # 使用 rotator 获取 Token

    def get_video_id(self, video_url: str) -> Optional[str]:
        """
        从Facebook视频URL中提取视频ID
        :param video_url: Facebook视频URL
        :return: 视频ID或None
        """
        if "/videos/" in video_url:
            parts = video_url.split("/videos/")
            if len(parts) > 1:
                return parts[1].split("/")[0].split("?")[0]

        # 如果无法从URL提取，则通过API解析
        try:
            response = self._make_request(
                f"{self.base_url}/",
                params={"id": video_url}
            )
            return response.get("id")
        except Exception as e:
            logger.error(f"⚠️ 通过API解析视频ID失败: {str(e)}")
            return None

    def search_group_posts(self, group_id: str, query: str, max_posts: int = 100) -> List[Dict[str, Any]]:
        """
        在指定的Facebook群组中搜索帖子
        :param group_id: Facebook群组ID
        :param query: 搜索关键词
        :param max_posts: 最大帖子数量
        :return: 帖子列表
        """
        if not group_id:
            logger.error("❌ 无效的群组ID")
            return []

        posts = []
        # 注意：Facebook Graph API 不支持直接在群组内搜索
        # 我们将获取群组中的所有帖子，然后在本地过滤
        endpoint = f"{self.base_url}/{group_id}/feed"
        params = {
            "fields": "id,message,created_time,from{name},permalink_url,comments.limit(0).summary(true)",
            "limit": min(self.page_size, max_posts),
            "access_token": self.rotator.get_key()
        }

        try:
            while len(posts) < max_posts and endpoint:
                try:
                    response = self._make_request(endpoint, params)
                    
                    # 处理返回的帖子数据
                    for post in response.get("data", []):
                        # 在本地过滤包含查询词的帖子
                        message = post.get("message", "")
                        if query.lower() in message.lower():
                            posts.append(self._parse_post(post))
                    
                    # 检查是否有更多数据
                    if "paging" in response and "next" in response["paging"] and len(posts) < max_posts:
                        endpoint = response["paging"]["next"]
                        params = {}  # 下一页URL已包含所有参数
                    else:
                        endpoint = None
                    
                except Exception as e:
                    logger.warning(f"⚠️ API请求失败: {str(e)}，尝试重试...")
                    continue

            logger.info(f"✅ 成功获取 {len(posts)} 条Facebook帖子")
            return posts[:max_posts]
        
        except Exception as e:
            logger.error(f"❌ 获取帖子异常: {str(e)}")
            return []

    def get_post_comments(self, post_id: str, max_comments: int = 100) -> List[RawComment]:
        """
        获取单个Facebook帖子的评论
        :param post_id: Facebook帖子ID
        :param max_comments: 最大评论数量
        :return: RawComment对象列表
        """
        logger.info(f"💬 开始获取帖子 {post_id} 的评论...")
        comments = []
        endpoint = f"{self.base_url}/{post_id}/comments"
        
        params = {
            "fields": "id,message,created_time,from{name},likes.summary(true),comments.summary(true)",
            "limit": min(self.page_size, max_comments),
            "access_token": self.rotator.get_key()
        }

        try:
            while len(comments) < max_comments and endpoint:
                try:
                    response = self._make_request(endpoint, params)
                    
                    # 处理返回的评论数据
                    for comment in response.get("data", []):
                        parsed_comment = self._parse_comment(comment)
                        comments.append(parsed_comment)
                    
                    # 检查是否有更多数据
                    if "paging" in response and "next" in response["paging"] and len(comments) < max_comments:
                        endpoint = response["paging"]["next"]
                        params = {}  # 下一页URL已包含所有参数
                    else:
                        endpoint = None
                    
                except Exception as e:
                    logger.warning(f"⚠️ API请求失败: {str(e)}，尝试重试...")
                    continue

            logger.info(f"✅ 成功获取 {len(comments)} 条Facebook评论")
            return comments[:max_comments]
        
        except Exception as e:
            logger.error(f"❌ 获取评论异常: {str(e)}")
            return []

    def _format_comments(self, comments: List[dict], video_info: dict) -> List[RawComment]:
        """
        将Facebook评论数据转换为RawComment对象
        :param comments: Facebook评论数据列表
        :param video_info: 视频信息，包含id和video_id
        :return: RawComment对象列表
        """
        raw_comments = []
        video_source_id = video_info.get("id", 0)  # 获取视频源ID
        video_id = video_info.get("video_id", "")
        
        for comment in comments:
            try:
                # 解析时间戳
                publish_date = datetime.fromtimestamp(comment["timestamp"]) if comment["timestamp"] else datetime.now()
                
                raw_comment = RawComment(
                    top_comment_id=comment["id"],
                    video_id=video_id,
                    video_source_id=video_source_id,  # 设置video_source_id
                    comment_text=comment["text"],
                    author=comment["author"],
                    like_count=comment["likes"],
                    published_at=publish_date,
                    platform="facebook",
                    raw_data=comment
                )
                raw_comments.append(raw_comment)
                
            except Exception as e:
                logger.warning(f"⚠️ 转换Facebook评论时出错: {e}")
                continue
        
        return raw_comments

    def get_comments(self, video_id: str, max_comments: int = 100) -> List[Dict[str, Any]]:
        """
        获取指定视频的评论
        :param video_id: Facebook视频ID
        :param max_comments: 最大评论数量
        :return: 评论列表
        """
        if not video_id:
            logger.error("❌ 无效的视频ID")
            return []

        comments = []
        endpoint = f"{self.base_url}/{video_id}/comments"
        params = {
            "fields": "id,message,created_time,from{name},likes.summary(true)",
            "limit": min(self.page_size, max_comments),
            "access_token": self.rotator.get_key()
        }

        try:
            while len(comments) < max_comments and endpoint:
                try:
                    response = self._make_request(endpoint, params)
                    
                    # 处理返回的评论数据
                    for comment in response.get("data", []):
                        comments.append(self._parse_comment(comment))
                    
                    # 检查是否有更多数据
                    if "paging" in response and "next" in response["paging"] and len(comments) < max_comments:
                        endpoint = response["paging"]["next"]
                        params = {}  # 下一页URL已包含所有参数
                    else:
                        endpoint = None
                    
                except Exception as e:
                    logger.warning(f"⚠️ API请求失败: {str(e)}，尝试重试...")
                    continue

            logger.info(f"✅ 成功获取 {len(comments)} 条Facebook评论")
            return comments[:max_comments]
        
        except Exception as e:
            logger.error(f"❌ 获取评论异常: {str(e)}")
            return []

    def _make_request(self, url: str, params: dict) -> dict:
        """执行API请求并处理响应"""
        retry_count = 0
        
        while retry_count <= self.max_retries:
            try:
                # 添加请求间隔以避免过于频繁的请求
                time.sleep(self.request_delay)
                
                response = self.session.get(url, params=params, timeout=self.timeout)
                
                # 检查响应状态码
                if response.status_code == 400:
                    logger.error(f"❌ 请求错误 400: {response.text}")
                    raise requests.exceptions.RequestException(f"Bad Request: {response.text}")
                elif response.status_code == 403:
                    logger.error(f"❌ 权限不足 403: {response.text}")
                    raise requests.exceptions.RequestException(f"Forbidden: {response.text}")
                elif response.status_code == 404:
                    logger.error(f"❌ 资源未找到 404: {response.text}")
                    raise requests.exceptions.RequestException(f"Not Found: {response.text}")
                
                response.raise_for_status()
                return response.json()
            
            except requests.exceptions.RequestException as e:
                logger.warning(f"⚠️ 请求失败: {str(e)}")
                if retry_count < self.max_retries:
                    wait_time = 2 ** retry_count
                    logger.info(f"🔄 第{retry_count+1}次重试（等待{wait_time}秒）...")
                    time.sleep(wait_time)
                    retry_count += 1
                else:
                    logger.error("❌ 达到最大重试次数，停止请求")
                    raise
            except Exception as e:
                logger.error(f"❌ 未知错误: {str(e)}")
                raise

    def _parse_post(self, post_data: dict) -> dict:
        """解析帖子数据"""
        comments_summary = post_data.get("comments", {}).get("summary", {})
        return {
            "id": post_data.get("id"),
            "author": post_data.get("from", {}).get("name", "Unknown"),
            "text": post_data.get("message", ""),
            "timestamp": self._convert_to_timestamp(post_data.get("created_time")),
            "comments_count": comments_summary.get("total_count", 0),
            "url": post_data.get("permalink_url", ""),
        }

    def _parse_comment(self, comment_data: dict) -> dict:
        likes_summary = comment_data.get("likes", {}).get("summary", {})
        replies_summary = comment_data.get("comments", {}).get("summary", {})
        return {
            "id": comment_data.get("id"),
            "author": comment_data.get("from", {}).get("name", "Unknown"),
            "text": comment_data.get("message", ""),
            "timestamp": self._convert_to_timestamp(comment_data.get("created_time")),
            "likes": likes_summary.get("total_count", 0),
            "replies_count": replies_summary.get("total_count", 0),
            "attachments": []
        }

    def _convert_to_timestamp(self, time_str: str) -> int:
        """
        将ISO时间字符串转换为Unix时间戳
        :param time_str: ISO 8601格式时间字符串
        :return: Unix时间戳
        """
        if not time_str:
            return int(time.time())
            
        try:
            dt = datetime.fromisoformat(time_str.replace("+0000", "+00:00"))
            return int(dt.timestamp())
        except ValueError:
            logger.warning(f"⚠️ 时间解析失败: {time_str}")
            return int(time.time())

    def get_video_details(self, video_id: str) -> dict:
        """获取视频详细信息"""
        try:
            response = self._make_request(
                f"{self.base_url}/{video_id}",
                params={
                    "fields": "id,description,created_time,length,title,permalink_url",
                    "access_token": self.rotator.get_key()
                }
            )
            
            return {
                "id": response.get("id"),
                "title": response.get("title", ""),
                "description": response.get("description", ""),
                "url": response.get("permalink_url", ""),
                "duration": response.get("length", 0),
                "created_at": self._convert_to_timestamp(response.get("created_time"))
            }
        except Exception as e:
            logger.error(f"❌ 获取视频详情失败: {str(e)}")
            return {}

    def extract_group_id_and_query(self, url: str) -> tuple:
        """
        从Facebook群组搜索URL中提取群组ID和查询词
        :param url: Facebook群组搜索URL
        :return: (group_id, query) 元组
        """
        try:
            # 解析URL
            parsed = urllib.parse.urlparse(url)
            path_parts = parsed.path.strip('/').split('/')
            
            # 提取群组ID (groups/{group_id}/search)
            group_id = None
            if 'groups' in path_parts:
                group_index = path_parts.index('groups')
                if group_index + 1 < len(path_parts):
                    group_id = path_parts[group_index + 1]
            
            # 提取查询参数
            query_params = urllib.parse.parse_qs(parsed.query)
            query = query_params.get('q', [None])[0]
            
            return group_id, query
        except Exception as e:
            logger.error(f"❌ 解析URL失败: {str(e)}")
            return None, None

    def get_group_search_comments(self, url: str, max_posts: int = 50, max_comments_per_post: int = 100) -> List[Dict[str, Any]]:
        """
        从Facebook群组搜索URL中获取所有相关评论
        :param url: Facebook群组搜索URL
        :param max_posts: 最大帖子数量
        :param max_comments_per_post: 每个帖子的最大评论数量
        :return: 所有评论的列表
        """
        # 从URL提取群组ID和查询词
        group_id, query = self.extract_group_id_and_query(url)
        
        if not group_id or not query:
            logger.error("❌ 无法从URL中提取群组ID或查询词")
            return []
        
        logger.info(f"🔍 在群组 {group_id} 中搜索 '{query}'")
        
        # 搜索帖子
        posts = self.search_group_posts(group_id, query, max_posts)
        
        if not posts:
            logger.warning("⚠️ 未找到相关帖子")
            return []
        
        logger.info(f"📝 找到 {len(posts)} 个帖子，开始获取评论...")
        
        # 收集所有评论
        all_comments = []
        for i, post in enumerate(posts):
            post_id = post["id"]
            logger.info(f"💬 获取帖子 {post_id} 的评论 ({i+1}/{len(posts)})...")
            try:
                comments = self.get_post_comments(post_id, max_comments_per_post)
                all_comments.extend(comments)
            except Exception as e:
                logger.error(f"❌ 获取帖子 {post_id} 的评论失败: {str(e)}")
                continue
        
        logger.info(f"✅ 总共获取 {len(all_comments)} 条评论")
        return all_comments