# 新增：素材榜数据抓取模块（磁力金牛）
# 说明：
# - 该模块用于从 https://niu.e.kuaishou.com/material/list 等接口抓取素材榜数据
# - 参照 data_crawler.DataCrawlerAPI 的结构，只是目标站点与字段不同
# - 暂时仅定义数据表及预留抓取入口，具体字段与筛选条件可根据后续接口联调补充

import ssl_fix  # noqa: F401

import json
import time
from typing import List, Dict, Any, Optional

import requests
from PyQt5.QtCore import QThread, pyqtSignal

from db_connection import DatabaseConnection


class MaterialCrawler(QThread):
    """素材榜数据抓取线程

    设计思路：
    - 从磁力金牛页面对应的接口抓取素材榜列表（需在浏览器中抓包确认具体 API）
    - 将数据保存到 ks_material_rank 表（结构见 create_table_sql）
    - 与现有 DataCrawlerAPI 一样，通过信号回传日志与完成状态
    """

    progress_updated = pyqtSignal(str)
    crawl_finished = pyqtSignal()
    error_occurred = pyqtSignal(str)

    def __init__(self, db_config, cookies: Optional[List[str]] = None):
        super().__init__()
        self.db_config = db_config
        self.running = False
        self.cookies = cookies or []
        self.current_cookie_index = 0
        # 抓取条件（参照网页上的筛选项，后续可扩展）
        self.selected_filters: Dict[str, Any] = {}

    # ------------------------- Cookie 轮询 -------------------------
    def set_cookies(self, cookies: List[str]):
        self.cookies = [c.strip() for c in cookies if c and c.strip()]
        self.current_cookie_index = 0

    def _next_cookie(self) -> str:
        if not self.cookies:
            return ""
        c = self.cookies[self.current_cookie_index]
        self.current_cookie_index = (self.current_cookie_index + 1) % len(self.cookies)
        return c

    # ------------------------- 对外启动接口 -------------------------
    def start_crawl(self, filters: Optional[Dict[str, Any]] = None):
        """启动素材榜抓取

        :param filters: 与网页筛选条件一致的过滤参数字典（如类目、排序方式、时间范围等）
        """
        self.selected_filters = filters or {}
        self.start()

    def stop(self):
        self.running = False

    # ------------------------- 主线程逻辑 -------------------------
    def run(self):
        try:
            self.running = True
            self.progress_updated.emit("开始连接数据库(素材榜)...")
            conn = DatabaseConnection.create_connection(self.db_config)
            try:
                with conn.cursor() as cursor:
                    # 表迁移：如存在旧表 ks_materialdata 且新表 ks_material_rank 不存在，则自动重命名，便于平滑升级
                    try:
                        cursor.execute("SHOW TABLES LIKE 'ks_material_rank'")
                        has_new = cursor.fetchone() is not None
                        cursor.execute("SHOW TABLES LIKE 'ks_materialdata'")
                        has_old = cursor.fetchone() is not None
                        if (not has_new) and has_old:
                            cursor.execute("RENAME TABLE ks_materialdata TO ks_material_rank")
                            conn.commit()
                            self.progress_updated.emit("检测到旧表 ks_materialdata，已自动重命名为 ks_material_rank")
                    except Exception:
                        pass
                    # 创建素材榜表（如不存在）
                    create_table_sql = """
                    CREATE TABLE IF NOT EXISTS ks_material_rank (
                        id int NOT NULL AUTO_INCREMENT COMMENT '主键',
                        materialId varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '素材ID',
                        title varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '标题',
                        coverUrl varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '封面图',
                        videoUrl varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '视频地址',
                        playCount bigint NULL DEFAULT NULL COMMENT '播放量',
                        likeCount bigint NULL DEFAULT NULL COMMENT '点赞量',
                        commentCount bigint NULL DEFAULT NULL COMMENT '评论量',
                        shareCount bigint NULL DEFAULT NULL COMMENT '分享量',
                        duration int NULL DEFAULT NULL COMMENT '时长(秒)',
                        authorName varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '作者昵称',
                        authorId varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '作者ID',
                        materialJson text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL COMMENT '原始JSON',
                        deleted int NULL DEFAULT 0 COMMENT '状态标记',
                        createDate datetime NULL DEFAULT NULL COMMENT '创建时间',
                        strTime varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '字符串时间',
                        PRIMARY KEY (id),
                        UNIQUE KEY unique_material (materialId)
                    ) ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_bin ROW_FORMAT = Dynamic;
                    """
                    cursor.execute(create_table_sql)
                    conn.commit()

                # 具体抓取逻辑：这里只保留骨架，方便后续根据实际接口补充
                total_saved = 0
                page = 1
                while self.running:
                    self.progress_updated.emit(f"开始抓取素材榜第 {page} 页数据...")
                    items = self._fetch_material_page(page)
                    if not items:
                        self.progress_updated.emit("本页未获取到素材数据，停止翻页")
                        break

                    saved_count = self._save_material_items(conn, items)
                    total_saved += saved_count
                    self.progress_updated.emit(f"第 {page} 页保存 {saved_count} 条素材，累计保存 {total_saved} 条")

                    # TODO: 根据接口返回的 hasMore / total 判断是否继续翻页
                    page += 1
                    # 简单限速
                    time.sleep(2)

                self.progress_updated.emit(f"素材榜抓取完成，累计保存 {total_saved} 条记录")
                self.crawl_finished.emit()
            finally:
                conn.close()
        except Exception as e:
            self.error_occurred.emit(f"素材榜抓取过程中出错: {e}")
        finally:
            self.running = False

    # ------------------------- HTTP 请求与存储 -------------------------
    def _build_headers(self) -> Dict[str, str]:
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0 Safari/537.36",
            "Accept": "application/json, text/plain, */*",
            "Referer": "https://niu.e.kuaishou.com/material/list",
        }
        ck = self._next_cookie()
        if ck:
            headers["Cookie"] = ck
        return headers

    def _fetch_material_page(self, page: int) -> List[Dict[str, Any]]:
        """抓取单页素材榜数据（需要根据实际接口补充参数和解析）"""
        try:
            url = "https://niu.e.kuaishou.com/rest/outer/material/list"  # 占位，需按抓包结果调整
            params = {
                "page": page,
                "pageSize": 20,
            }
            # 将外部设置的筛选条件合并进去
            params.update(self.selected_filters or {})

            resp = requests.get(url, headers=self._build_headers(), params=params, timeout=20)
            if resp.status_code != 200:
                self.progress_updated.emit(f"素材榜接口返回状态码 {resp.status_code}")
                return []
            data = resp.json()

            # 这里假设 data["data"]["list"] 是素材列表，后续可根据真实结构调整
            items = (
                data.get("data", {}).get("list")
                if isinstance(data, dict)
                else None
            )
            if not isinstance(items, list):
                return []
            return items
        except Exception as e:
            self.progress_updated.emit(f"获取素材榜第 {page} 页数据失败: {e}")
            return []

    def _save_material_items(self, conn, items: List[Dict[str, Any]]) -> int:
        """将素材数据保存到 ks_material_rank 表（字段映射需结合真实结构调整）"""
        saved = 0
        from datetime import datetime
        now = datetime.now()
        str_time = now.strftime("%Y%m%d")
        with conn.cursor() as cursor:
            for it in items:
                if not self.running:
                    break
                try:
                    material_id = str(it.get("id") or it.get("materialId") or "").strip()
                    if not material_id:
                        continue
                    cover = it.get("coverUrl") or it.get("cover") or ""
                    video_url = it.get("videoUrl") or ""
                    title = it.get("title") or ""

                    insert_sql = """
                    INSERT INTO ks_material_rank
                    (materialId, title, coverUrl, videoUrl, playCount, likeCount, commentCount,
                     shareCount, duration, authorName, authorId, materialJson, deleted, createDate, strTime)
                    VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,0,%s,%s)
                    ON DUPLICATE KEY UPDATE
                        title = VALUES(title),
                        coverUrl = VALUES(coverUrl),
                        videoUrl = VALUES(videoUrl),
                        materialJson = VALUES(materialJson)
                    """
                    cursor.execute(
                        insert_sql,
                        (
                            material_id,
                            title,
                            cover,
                            video_url,
                            it.get("playCount"),
                            it.get("likeCount"),
                            it.get("commentCount"),
                            it.get("shareCount"),
                            it.get("duration"),
                            it.get("authorName"),
                            it.get("authorId"),
                            json.dumps(it, ensure_ascii=False),
                            now,
                            str_time,
                        ),
                    )
                    saved += 1
                except Exception as e:
                    conn.rollback()
                    self.progress_updated.emit(f"保存素材失败: {e}")
                else:
                    conn.commit()
        return saved
