# 新增：素材榜数据抓取模块（磁力金牛）
# 用于从 https://niu.e.kuaishou.com/material/list 等接口抓取素材榜数据

import ssl_fix  # noqa: F401

import json
import time
from typing import List, Dict, Any, Optional

import requests
from PyQt5.QtCore import QThread, pyqtSignal

from db_connection import DatabaseConnection


class MaterialCrawler(QThread):
    """素材榜数据抓取线程

    思路：
    - 使用磁力金牛素材榜的接口，结合 Cookie 做登录态访问
    - 参数(filters) 尽量和网页上的筛选保持一致，由上层面板构造
    - 数据入库到 ks_materialdata 表
    """

    progress_updated = pyqtSignal(str)
    crawl_finished = pyqtSignal()
    error_occurred = pyqtSignal(str)

    def __init__(self, db_config, cookies: Optional[List[str]] = None):
        super().__init__()
        self.db_config = db_config
        self.running = False
        self.cookies = cookies or []
        self.current_cookie_index = 0
        # 抓取条件（由面板传入，尽量贴近网页筛选）
        self.selected_filters: Dict[str, Any] = {}

    # ------------------------- Cookie 轮询 -------------------------
    def set_cookies(self, cookies: List[str]):
        self.cookies = [c.strip() for c in cookies if c and c.strip()]
        self.current_cookie_index = 0

    def _next_cookie(self) -> str:
        if not self.cookies:
            return ""
        c = self.cookies[self.current_cookie_index]
        self.current_cookie_index = (self.current_cookie_index + 1) % len(self.cookies)
        return c

    # ------------------------- 对外启动接口 -------------------------
    def start_crawl(self, filters: Optional[Dict[str, Any]] = None):
        """启动素材榜抓取

        :param filters: 与网页筛选条件一致的过滤参数字典（如时间范围、排序方式、行业等）
        """
        self.selected_filters = filters or {}
        self.start()

    def stop(self):
        self.running = False

    # ------------------------- 主线程逻辑 -------------------------
    def run(self):
        try:
            self.running = True
            self.progress_updated.emit("开始连接数据库(素材榜)...")
            conn = DatabaseConnection.create_connection(self.db_config)
            try:
                # 检查是否需要自动遍历所有行业
                auto_traverse = self.selected_filters.get("auto_traverse_industries", False)
                industries_to_crawl = []
                
                if auto_traverse:
                    # 自动遍历模式：抓取所有行业（使用正确的行业名称格式）
                    industries_to_crawl = [
                        "彩妆/香水/美妆工具",
                        "身体护理",
                        "农资",
                        "冲调/水饮",
                        "3C配件",
                        "居家百货",
                        "男装",
                        "女装",
                        "其他"
                    ]
                    self.progress_updated.emit(f"⚡ 自动遍历模式：将依次抓取 {len(industries_to_crawl)} 个行业的数据")
                else:
                    # 单一行业模式：只抓取用户选择的行业（或全部）
                    industry = self.selected_filters.get("industry", "冲调水饮")
                    industries_to_crawl = [industry] if industry else [""]
                
                with conn.cursor() as cursor:
                    # 表迁移：如存在旧表 ks_materialdata 且新表 ks_material_rank 不存在，则自动重命名，便于平滑升级
                    try:
                        cursor.execute("SHOW TABLES LIKE 'ks_material_rank'")
                        has_new = cursor.fetchone() is not None
                        cursor.execute("SHOW TABLES LIKE 'ks_materialdata'")
                        has_old = cursor.fetchone() is not None
                        if (not has_new) and has_old:
                            cursor.execute("RENAME TABLE ks_materialdata TO ks_material_rank")
                            conn.commit()
                            self.progress_updated.emit("检测到旧表 ks_materialdata，已自动重命名为 ks_material_rank")
                    except Exception:
                        pass
                    # 创建素材榜表（如不存在）
                    create_table_sql = """
                    CREATE TABLE IF NOT EXISTS ks_material_rank (
                        id int NOT NULL AUTO_INCREMENT COMMENT '主键',
                        materialId varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '素材ID',
                        title varchar(512) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '标题',
                        coverUrl varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '封面图',
                        videoUrl varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '视频地址',
                        playCount bigint NULL DEFAULT NULL COMMENT '播放量',
                        likeCount bigint NULL DEFAULT NULL COMMENT '点赞量',
                        commentCount bigint NULL DEFAULT NULL COMMENT '评论量',
                        shareCount bigint NULL DEFAULT NULL COMMENT '分享量',
                        duration int NULL DEFAULT NULL COMMENT '时长(秒)',
                        authorName varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '作者昵称',
                        authorId varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '作者ID',
                        mainIndustry varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '所属行业',
                        categoryLevel1Name varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '一级分类',
                        categoryLevel2Name varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '二级分类',
                        categoryLevel3Name varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '三级分类',
                        costTotal bigint NULL DEFAULT NULL COMMENT '消耗总额',
                        adItemImpression bigint NULL DEFAULT NULL COMMENT '曝光量',
                        ctr decimal(10, 6) NULL DEFAULT NULL COMMENT '点击率',
                        cvr decimal(10, 6) NULL DEFAULT NULL COMMENT '转化率',
                        ueScore decimal(10, 6) NULL DEFAULT NULL COMMENT '体验分',
                        itemId bigint NULL DEFAULT NULL COMMENT '商品ID',
                        itemCoverUrl varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '商品封面',
                        photoCaption text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL COMMENT '素材描述',
                        itemLinkUrl varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '商品链接',
                        materialJson text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL COMMENT '原始JSON',
                        deleted int NULL DEFAULT 0 COMMENT '状态标记',
                        createDate datetime NULL DEFAULT NULL COMMENT '创建时间',
                        strTime varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NULL DEFAULT NULL COMMENT '字符串时间',
                        PRIMARY KEY (id),
                        UNIQUE KEY unique_material (materialId)
                    ) ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_bin ROW_FORMAT = Dynamic;
                    """
                    try:
                        cursor.execute(create_table_sql)
                        conn.commit()
                    except Exception as e:
                        # 表已存在，尝试添加新列
                        try:
                            new_columns = [
                                "mainIndustry varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin",
                                "categoryLevel1Name varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin",
                                "categoryLevel2Name varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin",
                                "categoryLevel3Name varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin",
                                "costTotal bigint",
                                "adItemImpression bigint",
                                "ctr decimal(10, 6)",
                                "cvr decimal(10, 6)",
                                "ueScore decimal(10, 6)",
                                "itemId bigint",
                                "itemCoverUrl varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin",
                                "photoCaption text CHARACTER SET utf8mb4 COLLATE utf8mb4_bin",
                                "itemLinkUrl varchar(1024) CHARACTER SET utf8mb4 COLLATE utf8mb4_bin",
                            ]
                            for col_def in new_columns:
                                col_name = col_def.split()[0]
                                try:
                                    cursor.execute(f"ALTER TABLE ks_material_rank ADD COLUMN {col_def}")
                                except:
                                    pass  # 列已存在
                            conn.commit()
                        except:
                            pass

                total_saved = 0
                
                # 遍历每个行业
                for industry_index, industry in enumerate(industries_to_crawl, 1):
                    if not self.running:
                        break
                    
                    if auto_traverse:
                        self.progress_updated.emit(f"\n▶️ 开始抓取第 {industry_index}/{len(industries_to_crawl)} 个行业：{industry or '全部'}")
                        # 临时更新 filters 中的行业
                        self.selected_filters["industry"] = industry
                    
                    page = 1
                    industry_saved = 0
                    self._last_page_info = {}
                    
                    while self.running:
                        prefix = f"[{industry or '全部'}] " if auto_traverse else ""
                        self.progress_updated.emit(f"{prefix}开始抓取第 {page} 页数据...")
                        items = self._fetch_material_page(page)
                        if not items:
                            self.progress_updated.emit(f"{prefix}本页未获取到素材数据，停止翻页")
                            break

                        saved_count = self._save_material_items(conn, items)
                        industry_saved += saved_count
                        total_saved += saved_count
                        self.progress_updated.emit(f"{prefix}第 {page} 页保存 {saved_count} 条，本行业累计 {industry_saved} 条，总计 {total_saved} 条")

                        # 检查是否还有更多数据
                        page_info = getattr(self, "_last_page_info", {})
                        has_more = page_info.get("hasMore", False)
                        if not has_more:
                            self.progress_updated.emit(f"{prefix}该行业数据抓取完毕（本行业: {industry_saved} 条）")
                            break
                        
                        page += 1
                        time.sleep(2)
                    
                    if auto_traverse and industry_index < len(industries_to_crawl):
                        self.progress_updated.emit(f"✅ 行业 '{industry}' 抓取完成，等待 3 秒后继续...")
                        time.sleep(3)

                self.progress_updated.emit(f"\n✨ 素材榜抓取全部完成，累计保存 {total_saved} 条记录")
                self.crawl_finished.emit()
            finally:
                conn.close()
        except Exception as e:
            self.error_occurred.emit(f"素材榜抓取过程中出错: {e}")
        finally:
            self.running = False

    # ------------------------- HTTP 请求与存储 -------------------------
    def _build_headers(self, cookie: str) -> Dict[str, str]:
        """构造 HTTP 请求头（基于实际浏览器 DevTools cURL）"""
        headers = {
            "accept": "application/json,*/*",
            "accept-language": "zh-CN,zh;q=0.9",
            "account-id": "83764854",
            "app-key": "ad.adUkmConfig.adEsp",
            "content-type": "application/json",
            "origin": "https://niu.e.kuaishou.com",
            "referer": "https://niu.e.kuaishou.com/material/list?__accountId__=83764854",
            "requestsource": "PC",
            "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
        }
        if cookie:
            headers["cookie"] = cookie
        return headers
    
    def _extract_esp_ph(self, cookie: str) -> Optional[str]:
        """从 Cookie 字符串中提取 kuaishou.ad.esp_ph 值"""
        if not cookie:
            return None
        for part in cookie.split(';'):
            part = part.strip()
            if part.startswith('kuaishou.ad.esp_ph='):
                return part.split('=', 1)[1]
        return None

    def _fetch_material_page(self, page: int) -> List[Dict[str, Any]]:
        """抓取单页素材榜数据（POST请求）
        
        基于实际浏览器 DevTools cURL 复刻的完整请求
        """
        try:
            url = "https://niu.e.kuaishou.com/rest/esp/promotion/materialCenter/ranking/list"
            
            # 获取 Cookie
            ck = self._next_cookie()
            if not ck:
                self.progress_updated.emit("❌ 未找到可用的 Cookie，请先登录")
                return []
            
            # 从 Cookie 中提取 esp_ph 作为 query 参数
            esp_ph = self._extract_esp_ph(ck)
            if not esp_ph:
                self.progress_updated.emit("❌ Cookie 中未找到 kuaishou.ad.esp_ph，可能已过期")
                return []
            
            # 查询参数
            params = {"kuaishou.ad.esp_ph": esp_ph}
            
            # 请求头
            headers = self._build_headers(ck)
            
            # 映射前端 filters 到 API 参数
            filters = self.selected_filters or {}
            
            # 获取用户选择的行业（默认为"冲调/水饮"）
            industry = filters.get("industry", "冲调/水饮")
            
            # timeRange: "3d"/"7d"/"30d" -> selectDay: 3/7/30
            tr = filters.get("timeRange", "3d")
            select_day = 3
            if tr == "7d":
                select_day = 7
            elif tr == "30d":
                select_day = 30
            
            # 计算 selectTime：前2天 00:00:00 的时间戳（与浏览器保持一致）
            from datetime import datetime, timedelta
            two_days_ago = (datetime.now() - timedelta(days=2)).replace(hour=0, minute=0, second=0, microsecond=0)
            select_time = int(two_days_ago.timestamp())
            
            # 构造请求体（与浏览器实际请求保持一致）
            payload = {
                "industry": industry,
                "selectDay": select_day,
                "keyword": "",
                "searchType": 1,
                "ordered": "cost_total",  # 按消耗排序
                "selectTime": select_time,
            }
            
            # 输出完整请求参数用于调试
            self.progress_updated.emit(f"🔍 请求行业: {industry}, 第 {page} 页, 时间范围: {select_day}天")
            self.progress_updated.emit(f"📦 请求payload: {json.dumps(payload, ensure_ascii=False)}")
            
            # 发送请求
            resp = requests.post(url, params=params, headers=headers, json=payload, timeout=20)
            
            if resp.status_code != 200:
                self.progress_updated.emit(f"❌ API 返回状态码 {resp.status_code}")
                self.progress_updated.emit(f"响应内容: {resp.text[:500]}")
                return []
            
            data = resp.json()
            result = data.get("result")
            if result != 1:
                msg = data.get("msg") or data.get("message") or f"未知错误(result={result})"
                self.progress_updated.emit(f"❌ API 返回错误: {msg}")
                self.progress_updated.emit(f"📊 完整响应: {json.dumps(data, ensure_ascii=False, indent=2)[:800]}")
                return []
            
            resp_data = data.get("data", {})
            items = resp_data.get("list", [])
            
            # 输出完整响应用于调试
            self.progress_updated.emit(f"📊 API响应: result={result}, data.keys={list(resp_data.keys())}, list长度={len(items) if isinstance(items, list) else 'N/A'}")
            
            if not isinstance(items, list):
                self.progress_updated.emit(f"⚠️ 返回数据格式异常: list 字段类型为 {type(items)}")
                self.progress_updated.emit(f"📊 完整resp_data: {json.dumps(resp_data, ensure_ascii=False)[:500]}")
                return []
            
            if not items:
                # 空列表，输出更多信息
                self.progress_updated.emit(f"⚠️ 返回的list为空列表")
                self.progress_updated.emit(f"📊 resp_data完整内容: {json.dumps(resp_data, ensure_ascii=False, indent=2)[:1000]}")
            
            # 提取翻页信息
            page_info = {
                "hasMore": resp_data.get("hasMore", False),
                "total": resp_data.get("total", 0),
                "pageNo": resp_data.get("pageNo"),
                "pageSize": resp_data.get("pageSize"),
            }
            # 存储翻页信息以供 run() 使用
            if not hasattr(self, "_last_page_info"):
                self._last_page_info = {}
            self._last_page_info = page_info
            
            if items:
                msg = f"✓ 第 {page} 页获取 {len(items)} 条数据"
                if page_info.get("total"):
                    msg += f" (总计: {page_info['total']} 条)"
                self.progress_updated.emit(msg)
            return items
        except Exception as e:
            self.progress_updated.emit(f"获取第 {page} 页数据失败: {e}")
            import traceback
            traceback.print_exc()
            return []

    def _save_material_items(self, conn, items: List[Dict[str, Any]]) -> int:
        """将素材数据保存到 ks_material_rank 表，存储所有API返回字段
        """
        saved = 0
        from datetime import datetime

        now = datetime.now()
        str_time = now.strftime("%Y%m%d")
        with conn.cursor() as cursor:
            for it in items:
                if not self.running:
                    break
                # 提取 itemId，需要不为0
                item_id = it.get("itemId")
                if not item_id or item_id == 0:
                    # itemId为空或0，跳过不保存
                    continue
                
                try:
                    # 字段映射
                    material_id = str(it.get("photoId") or it.get("id") or "").strip()
                    if not material_id:
                        continue
                    
                    title = it.get("itemTitle") or it.get("title") or ""
                    cover = it.get("photoUrl") or it.get("coverUrl") or ""
                    video_url = it.get("photoUrl") or it.get("videoUrl") or ""
                    
                    # 提取所有指标字段
                    play_count = it.get("adItemImpression", 0)  # 曝光量作为播放量
                    duration = it.get("photoDuration", 0)
                    main_industry = it.get("mainIndustry") or ""
                    category_level1 = it.get("categoryLevel1Name") or ""
                    category_level2 = it.get("categoryLevel2Name") or ""
                    category_level3 = it.get("categoryLevel3Name") or ""
                    cost_total = it.get("costTotal") or 0
                    ad_item_impression = it.get("adItemImpression") or 0
                    ctr = it.get("ctr") or 0
                    cvr = it.get("cvr") or 0
                    ue_score = it.get("ueScore") or 0
                    item_cover_url = it.get("itemCoverUrl") or it.get("coverUrl") or ""
                    photo_caption = it.get("photoCaption") or ""
                    item_link_url = it.get("itemLinkUrl") or ""
                    # item_id 已在前面提取过，这里不需要重新赋值

                    # 先检查 itemId 是否已经存在
                    check_sql = "SELECT 1 FROM ks_material_rank WHERE itemId = %s"
                    cursor.execute(check_sql, (item_id,))
                    if cursor.fetchone():
                        # 已经存在，跳过
                        continue
                    
                    insert_sql = """
                    INSERT INTO ks_material_rank
                    (materialId, title, coverUrl, videoUrl, playCount, likeCount, commentCount,
                     shareCount, duration, authorName, authorId, mainIndustry, categoryLevel1Name,
                     categoryLevel2Name, categoryLevel3Name, costTotal, adItemImpression, ctr, cvr,
                     ueScore, itemId, itemCoverUrl, photoCaption, itemLinkUrl, materialJson, deleted, createDate, strTime)
                    VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,0,%s,%s)
                    """
                    cursor.execute(
                        insert_sql,
                        (
                            material_id,
                            title,
                            cover,
                            video_url,
                            play_count,
                            0,  # likeCount
                            0,  # commentCount
                            0,  # shareCount
                            duration,
                            "",  # authorName
                            "",  # authorId
                            main_industry,
                            category_level1,
                            category_level2,
                            category_level3,
                            cost_total,
                            ad_item_impression,
                            ctr,
                            cvr,
                            ue_score,
                            item_id,
                            item_cover_url,
                            photo_caption,
                            item_link_url,
                            json.dumps(it, ensure_ascii=False),
                            now,
                            str_time,
                        ),
                    )
                    saved += 1
                    conn.commit()
                except Exception as e:
                    conn.rollback()
                    self.progress_updated.emit(f"保存素材 itemId={item_id} 失败: {e}")
        return saved
