#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import argparse
import json
import re
from typing import Optional, Any, List, Dict
import pandas as pd
from sqlalchemy import create_engine, text
from datetime import datetime

import requests
from io import BytesIO
from PIL import Image as PILImage
from openpyxl.utils import get_column_letter
from openpyxl.drawing.image import Image as XLImage


def normalize_tags(val: Optional[Any]) -> Optional[str]:
    """
    将 product_tags 归一化为人类可读的逗号分隔字符串：
    - 支持 JSON 字符串（list/dict）
    - 支持 Python list/tuple
    - 支持普通逗号或 | 分隔的字符串
    - 其他类型兜底为 str
    """
    if val is None:
        return None

    try:
        # 已经是 list/tuple
        if isinstance(val, (list, tuple)):
            return ", ".join([str(x) for x in val])

        # 字符串情况
        if isinstance(val, str):
            s = val.strip()
            if not s:
                return None

            # 可能是 JSON
            if s.startswith("{") or s.startswith("["):
                data = json.loads(s)
                if isinstance(data, dict):
                    # 取 values 为字符串
                    return ", ".join([str(v) for v in data.values()])
                if isinstance(data, list):
                    return ", ".join([str(x) for x in data])
                return str(data)
            else:
                # 普通分隔符情况，兼容 | 或 ,
                parts = [x.strip() for x in s.replace("|", ",").split(",") if x.strip()]
                return ", ".join(parts)

        # 其他类型
        return str(val)
    except Exception:
        # 解析失败兜底返回原值
        try:
            return str(val)
        except Exception:
            return None


def best_image_url(record: dict) -> Optional[str]:
    """
    优先 cos_url，其次 image_url。若 detail_content 是 URL，也尝试作为兜底。
    """
    url = record.get("cos_url") or record.get("image_url")
    if url:
        return url
    detail = record.get("detail_content")
    if isinstance(detail, str) and detail.lower().startswith(("http://", "https://")):
        return detail
    return None


def fetch_image_pil(url: str, timeout: int = 10, max_width: int = 160) -> Optional[PILImage.Image]:
    """
    下载图片并按最大宽度等比缩放，返回 PIL Image（RGB）。
    """
    try:
        resp = requests.get(url, timeout=timeout, stream=True)
        resp.raise_for_status()
        content = resp.content
        im = PILImage.open(BytesIO(content))
        # 部分格式需要转换
        if im.mode not in ("RGB", "L"):
            im = im.convert("RGB")
        # 等比缩放到指定最大宽度
        if max_width and im.width > max_width:
            ratio = max_width / float(im.width)
            new_size = (max_width, int(im.height * ratio))
            im = im.resize(new_size, PILImage.LANCZOS)
        return im
    except Exception as e:
        print(f"[WARN] 下载或处理图片失败: {url} -> {e}")
        return None


def insert_thumbnails(writer: pd.ExcelWriter, sheet_name: str, df: pd.DataFrame,
                      image_col: str, thumb_width: int = 160, timeout: int = 10) -> None:
    """
    在指定工作表的 image_col 列插入缩略图，并设置行高与列宽。
    image_col 为中文列名：如 '裂变图' 或 '详情图'
    """
    if sheet_name not in writer.sheets:
        return
    ws = writer.sheets[sheet_name]

    if image_col not in df.columns:
        return

    col_idx = df.columns.get_loc(image_col) + 1  # 1-based
    col_letter = get_column_letter(col_idx)
    ws.column_dimensions[col_letter].width = max(20, int(thumb_width / 6))

    # 用 iterrows，避免 itertuples 对中文列名属性访问的问题
    for i, (_, row) in enumerate(df.iterrows(), start=2):  # Excel 从第2行开始是数据
        url = row.get(image_col)
        if not url or not isinstance(url, str) or not url.lower().startswith(("http://", "https://")):
            continue

        pil_img = fetch_image_pil(url, timeout=timeout, max_width=thumb_width)
        if not pil_img:
            continue

        try:
            # 写入内存缓冲并提供给 openpyxl，避免 img.fp 属性缺失导致保存报错
            img_buf = BytesIO()
            pil_img.save(img_buf, format="PNG")
            img_buf.seek(0)

            xl_img = XLImage(img_buf)

            # 防止缓冲被 GC，挂到 worksheet 上持有引用
            if not hasattr(ws, "_image_buffers"):
                ws._image_buffers = []
            ws._image_buffers.append(img_buf)

            # 设置行高（像素约等于 point*1.333，这里用 *0.75 反推）
            target_height_pt = int(pil_img.height * 0.75) + 2
            ws.row_dimensions[i].height = target_height_pt

            anchor_cell = f"{col_letter}{i}"
            ws.add_image(xl_img, anchor_cell)
        except Exception as e:
            print(f"[WARN] 插入图片失败: {url} -> {e}")


def extract_url_from_text(text):
    """
    从文本中提取图片 URL（适配多种格式）：
    1) 优先匹配 Markdown 图片：![alt]( `https://...` ) 或 ![alt](https://...)
    2) 再匹配反引号包裹的 URL：`https://...`
    3) 兜底匹配普通 URL：https://...
    """
    if not text or not isinstance(text, str):
        return None
    s = text.strip().strip('"').strip("'")

    # 新增：如果本身就是 http/https 开头的纯 URL，直接返回
    if s.startswith("http://") or s.startswith("https://"):
        return s

    # 1) Markdown 图片语法，括号内允许可选反引号及空格
    m = re.search(r'!\[[^\]]*\]\(\s*`?(https?://[^)\s`]+)`?\s*\)', s)
    if m:
        return m.group(1)

    # 2) 反引号 URL
    m = re.search(r'`(https?://[^\s`]+)`', s)
    if m:
        return m.group(1)

    # 3) 普通 URL（避免右括号等结尾符号）
    m = re.search(r'(https?://[^\s)]+)', s)
    if m:
        return m.group(1).rstrip(').,;')

    return None

# 新增：通用解析任意字段中的图片 URL（兼容 str / JSON 字符串 / list / dict）
def get_image_url_from_field(value):
    # 尝试把字符串解析成 JSON（如果是 JSON 字符串）
    def _coerce(v):
        if isinstance(v, str):
            s = v.strip()
            if (s.startswith("{") and s.endswith("}")) or (s.startswith("[") and s.endswith("]")) or (s.startswith('"') and s.endswith('"')) or (s.startswith("'") and s.endswith("'")):
                try:
                    return json.loads(s)
                except Exception:
                    return v
        return v

    v = _coerce(value)
    if v is None:
        return None

    # 字符串：直接判断或正则抽取
    if isinstance(v, str):
        s = v.strip().strip('"').strip("'")
        if s.startswith("http://") or s.startswith("https://"):
            return s
        return extract_url_from_text(s)

    # 字典：常见字段优先，其次遍历所有值
    if isinstance(v, dict):
        for key in ["cos_url", "image_url", "url", "src", "href", "detail_content"]:
            if key in v and v[key]:
                url = get_image_url_from_field(v[key])
                if url:
                    return url
        for _, val in v.items():
            url = get_image_url_from_field(val)
            if url:
                return url
        return None

    # 列表：返回第一个解析到的 URL
    if isinstance(v, list):
        for item in v:
            url = get_image_url_from_field(item)
            if url:
                return url
        return None

    return None


def convert_result_data_to_dataframe(result_data: Dict, workflow_id: int, user_id: int) -> pd.DataFrame:
    """
    将 ai_workflows.result_data 转换为与 products 表结构一致的 DataFrame
    """
    records = []
    
    # 从 flux_variations 构建数据
    flux_variations = result_data.get('flux_variations', [])
    product_info = result_data.get('product_info', [])
    detail_pages = result_data.get('detail_pages', [])
    
    print(f"🔍 从 result_data 解析到:")
    print(f"  - flux_variations: {len(flux_variations)}")
    print(f"  - product_info: {len(product_info)}")
    print(f"  - detail_pages: {len(detail_pages)}")
    
    # 按索引配对处理：flux_variations[i] + product_info[i] + detail_pages[i] → 1条完整记录
    max_len = max(len(flux_variations), len(product_info), len(detail_pages))
    for i in range(max_len):
        # 获取对应索引的数据
        flux_item = flux_variations[i] if i < len(flux_variations) else {}
        product_item = product_info[i] if i < len(product_info) else {}
        detail_item = detail_pages[i] if i < len(detail_pages) else {}
        
        # 从 detail_pages[i] 提取详情图 URL
        dp = detail_item.get('detail_pages')
        dc = detail_item.get('detail_content')
        detail_url = (
            get_image_url_from_field(dp) or
            get_image_url_from_field(dc) or
            get_image_url_from_field(detail_item.get('image_url')) or
            ''
        )
        
        # 调试打印：每条详情图的来源与解析结果
        try:
            print(f"[DETAIL] index={i} extracted_url={detail_url} | dp_type={type(dp).__name__} | dc_type={type(dc).__name__}")
        except Exception:
            pass
        
        # 创建一条完整记录（包含裂变图+产品信息+详情图）
        record = {
            'id': f"product_{i}",  # 虚拟ID
            'workflow_id': workflow_id,
            'user_id': user_id,
            'source_type': 'product',  # 统一标记为产品（包含裂变+详情）
            'source_index': i,
            'image_url': flux_item.get('url', ''),  # 裂变图
            'cos_url': flux_item.get('url', ''),   # 裂变图
            'cos_key': '',
            'image_width': None,
            'image_height': None,
            'detail_content': detail_url,  # 详情图存储在这里
            'created_at': datetime.now(),
            'updated_at': datetime.now()
        }
        
        # 从 product_info[i] 获取产品信息
        record.update({
            'product_title': product_item.get('title', detail_item.get('product_title', f"产品 #{i+1}")),
            'product_description': product_item.get('description', detail_item.get('product_description', '')),
        })
        
        # 处理标签（优先从 product_info，回退到 detail_pages）
        tags = product_item.get('tags') or detail_item.get('product_tags', [])
        if tags:
            record['product_tags'] = json.dumps(tags, ensure_ascii=False) if isinstance(tags, list) else str(tags)
        else:
            record['product_tags'] = None

        records.append(record)
    
    return pd.DataFrame(records)


def main():
    parser = argparse.ArgumentParser(description="导出指定 AI 工作流（workflow_id）的 flux 裂变及产品数据为 Excel，并嵌入图片缩略图")
    parser.add_argument("--workflow-id", type=int, required=True, help="AI 工作流 ID（必填）")
    parser.add_argument("--database-url", type=str, default=os.getenv("DATABASE_URL"),
                        help="数据库连接串（可选），例如：mysql+pymysql://user:pass@host:3306/fastapi_auth?charset=utf8mb4"
                             "，默认读取环境变量 DATABASE_URL")
    parser.add_argument("--output", type=str, default=None, help="输出 Excel 文件路径，默认 workflow_{id}_flux_export.xlsx")
    parser.add_argument("--thumb-width", type=int, default=160, help="图片缩略图最大宽度（像素），默认160")
    parser.add_argument("--timeout", type=int, default=10, help="单张图片下载超时（秒），默认10")
    args = parser.parse_args()

    if not args.database_url:
        raise SystemExit("未提供数据库连接串。请通过 --database-url 或设置环境变量 DATABASE_URL 提供。")

    output_path = args.output or f"workflow_{args.workflow_id}_flux_export.xlsx"

    # 创建引擎，开启 pre_ping 防止空闲连接失效
    engine = create_engine(args.database_url, pool_pre_ping=True)

    # 首先尝试从 products 表读取
    print(f"🔍 尝试从 products 表读取数据（workflow_id={args.workflow_id}）...")
    products_sql = text("""
        SELECT
            id,
            workflow_id,
            user_id,
            product_title,
            product_tags,
            product_description,
            image_url,
            image_width,
            image_height,
            cos_url,
            cos_key,
            source_type,
            source_index,
            detail_content,
            created_at,
            updated_at
        FROM products
        WHERE workflow_id = :workflow_id
        ORDER BY source_type, COALESCE(source_index, 0), id
    """)

    df = pd.read_sql_query(products_sql, con=engine, params={"workflow_id": args.workflow_id})

    # 如果 products 表为空，尝试从 ai_workflows.result_data 读取
    if df.empty:
        print("📦 products 表无数据，尝试从 ai_workflows.result_data 读取...")
        
        workflow_sql = text("""
            SELECT id, user_id, result_data
            FROM ai_workflows
            WHERE id = :workflow_id
        """)
        
        workflow_result = pd.read_sql_query(workflow_sql, con=engine, params={"workflow_id": args.workflow_id})
        
        if workflow_result.empty:
            print(f"❌ workflow_id={args.workflow_id} 不存在")
            raise SystemExit("指定的 workflow_id 不存在")
        
        workflow_row = workflow_result.iloc[0]
        result_data_str = workflow_row['result_data']
        user_id = workflow_row['user_id']
        
        if not result_data_str:
            print("⚠️  ai_workflows.result_data 为空，将导出空 Excel")
            df = pd.DataFrame()
        else:
            print("🔄 正在解析 result_data...")
            try:
                # 解析 JSON 字符串
                result_data = json.loads(result_data_str)
                df = convert_result_data_to_dataframe(result_data, args.workflow_id, user_id)
                print(f"✅ 从 result_data 解析到 {len(df)} 条记录")
            except json.JSONDecodeError as e:
                print(f"❌ result_data JSON 解析失败: {e}")
                print("⚠️  将导出空 Excel")
                df = pd.DataFrame()
            except Exception as e:
                print(f"❌ 处理 result_data 时出错: {e}")
                print("⚠️  将导出空 Excel")
                df = pd.DataFrame()

    if df.empty:
        print("没有查询到任何数据，将导出一个空的 Excel。")
    else:
        print(f"📊 找到 {len(df)} 条记录")

    # 将记录映射成中文 5 列 -> 6 列（新增“详情图的url”）
    def to_cn_row(rec: dict) -> dict:
        # 裂变图：从 cos_url/image_url
        flux_url = rec.get("cos_url") or rec.get("image_url") or ""
        # 详情图：从 detail_content
        detail_url = get_image_url_from_field(rec.get("detail_content")) or ""
        
        try:
            print(f"[PRODUCT->CN] index={rec.get('source_index')} flux_url={flux_url} detail_url={detail_url}")
        except Exception:
            pass

        return {
            "裂变图": flux_url,
            "产品标签": normalize_tags(rec.get("product_tags")) or "",
            "产品标题": rec.get("product_title") or "",
            "产品简介": rec.get("product_description") or "",
            "详情图": detail_url,
            "详情图的url": detail_url,
        }
        is_flux = (rec.get("source_type") == "flux_variation")
        # 裂变图：flux 行使用 cos_url/image_url
        flux_url = (rec.get("cos_url") or rec.get("image_url")) if is_flux else ""
        # 详情图：优先用已提取好的 cos_url/image_url，若为空则回退从 detail_content（兼容多结构）解析
        detail_url = ""
        if rec.get("source_type") == "detail_page":
            detail_url = rec.get("cos_url") or rec.get("image_url") or get_image_url_from_field(rec.get("detail_content")) or ""

        return {
            "裂变图": flux_url or "",
            "产品标签": normalize_tags(rec.get("product_tags")) or "",
            "产品标题": rec.get("product_title") or "",
            "产品简介": rec.get("product_description") or "",
            "详情图": detail_url or "",
            "详情图的url": detail_url or "",
        }

    records_all = [to_cn_row(r) for r in df.to_dict(orient="records")]
    df_all_cn = pd.DataFrame(records_all, columns=["裂变图", "产品标签", "产品标题", "产品简介", "详情图", "详情图的url"])

    df_flux_src = df[df.get("source_type") == "flux_variation"] if "source_type" in df.columns else pd.DataFrame()
    df_flux_cn = pd.DataFrame([to_cn_row(r) for r in df_flux_src.to_dict(orient="records")],
                              columns=["裂变图", "产品标签", "产品标题", "产品简介", "详情图", "详情图的url"])

    df_detail_src = df[df.get("source_type") == "detail_page"] if "source_type" in df.columns else pd.DataFrame()
    df_detail_cn = pd.DataFrame([to_cn_row(r) for r in df_detail_src.to_dict(orient="records")],
                                columns=["裂变图", "产品标签", "产品标题", "产品简介", "详情图", "详情图的url"])

    output_path = args.output or f"workflow_{args.workflow_id}_flux_export.xlsx"
    print(f"📝 写入 Excel -> {output_path}")
    with pd.ExcelWriter(output_path, engine="openpyxl") as writer:
        # 中文分类工作表
        df_all_cn.to_excel(writer, index=False, sheet_name="全部")
        df_flux_cn.to_excel(writer, index=False, sheet_name="裂变")
        df_detail_cn.to_excel(writer, index=False, sheet_name="详情")

        # 在中文图片列中插入缩略图（只针对“裂变图”和“详情图”，不对“详情图的url”插图）
        if len(df_all_cn) > 0:
            print("🖼️ 插入图片缩略图（全部/裂变图）...")
            insert_thumbnails(writer, "全部", df_all_cn, image_col="裂变图", thumb_width=args.thumb_width, timeout=args.timeout)
            print("🖼️ 插入图片缩略图（全部/详情图）...")
            insert_thumbnails(writer, "全部", df_all_cn, image_col="详情图", thumb_width=args.thumb_width, timeout=args.timeout)

        if len(df_flux_cn) > 0:
            print("🖼️ 插入图片缩略图（裂变）...")
            insert_thumbnails(writer, "裂变", df_flux_cn, image_col="裂变图", thumb_width=args.thumb_width, timeout=args.timeout)

        if len(df_detail_cn) > 0:
            print("🖼️ 插入图片缩略图（详情）...")
            insert_thumbnails(writer, "详情", df_detail_cn, image_col="详情图", thumb_width=args.thumb_width, timeout=args.timeout)

    print("🎉 导出完成！")
    print(f"- 总记录数: {len(df_all_cn)}")
    print(f"- 裂变: {len(df_flux_cn)}")
    print(f"- 详情: {len(df_detail_cn)}")
    print(f"文件已生成: {output_path}")


if __name__ == "__main__":
    main()