import json
import os
import traceback
from pathlib import Path

# 🔧 重要：应用StarRocks连接补丁（必须在其他数据库相关导入之前）
try:
    from airflow_connection_patch import apply_all_patches
    patch_success = apply_all_patches()
    print(f"🎉 StarRocks连接补丁应用结果: {patch_success}")
except Exception as e:
    print(f"❌ 导入StarRocks连接补丁失败: {str(e)}")
    # 继续执行，但记录警告
    print("⚠️ 将继续使用原始连接方式，可能会遇到超时问题")

from conn_manager import get_ts_ec_database_cursor, FAQ_DATA_DIR, EC_DATA_DIR
import logging

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 查询函数：根据pid查询子节点
def fetch_child_titles(cursor, pid):
    query = f"SELECT id, title FROM bluetti_wiki WHERE pid = '{pid}'"
    logger.debug(f"🔍 执行查询子节点: {query}")
    cursor.execute(query)
    results = cursor.fetchall()
    logger.debug(f"📊 查询到 {len(results)} 个子节点")
    return results

# 查询wikiid对应的title和content
def fetch_article_info(cursor, wikiid):
    query = f"SELECT title, COALESCE(staticContent, content) AS content FROM bluetti_wiki_article WHERE wikiId = '{wikiid}' AND status = 1"
    logger.debug(f"🔍 执行查询文章信息: wikiId={wikiid}")
    cursor.execute(query)
    results = cursor.fetchall()
    logger.debug(f"📝 查询到 {len(results)} 篇文章")
    return results

# 递归查询函数：生成树状结构
def build_tree(cursor, pid, level=0, filter_error_codes=False):
    indent = "  " * level
    child_rows = fetch_child_titles(cursor, pid)
    
    # 如果没子节点，就返回文章列表
    if not child_rows:
        articles = [{"title": t, "content": c}
                    for t, c in fetch_article_info(cursor, pid)
                    if t != '1' and c != '1']
        # logger.info(f"✅ 获取到 {len(articles)} 篇文章")
        return articles

    nodes = []
    for child_id, title in child_rows:
        # logger.debug(f"🔄 递归处理类别: {title} (id={child_id}, level={level})")
        
        # 如果是错误代码过滤模式，且在第3层，只处理"Error Codes"
        if filter_error_codes and level == 3 and title != "Error Codes":
            logger.debug(f"⏭️ 错误代码模式：跳过非Error Codes类别: {title}")
            continue
            
        subtree = build_tree(cursor, child_id, level + 1, filter_error_codes)
        node = {"title": title}
        if subtree:
            node["children"] = subtree
        nodes.append(node)
    
    logger.debug(f"✅ 处理完成 {len(nodes)} 个类别节点 (level={level})")
    return nodes

# 添加型号到文章中，并将嵌套结构转为扁平列表
def add_model_to_articles(article_data, model_name):
    logger.debug(f"🔄 为型号 {model_name} 添加标识并扁平化数据...")
    result = []

    # 遍历文章数据
    for category, items in article_data.items():
        logger.debug(f"  📂 处理分类: {category} ({len(items)} 个项目)")
        for item in items:
            new_item = {'model': model_name}  # 首先添加model字段
            for field_key, field_value in item.items():
                new_item[field_key] = field_value
            result.append(new_item)

    logger.debug(f"✅ 扁平化完成，共 {len(result)} 篇文章")
    return result

# 导出JSON文件
def export_to_json(data, output_dir, data_type="general"):
    logger.info(f"💾 开始导出{data_type}JSON文件...")
    
    output_dir = Path(output_dir)
    abs_output_dir = output_dir.resolve()
    logger.info(f"📁 {data_type}目标输出目录: {abs_output_dir}")
    
    # 创建输出目录
    try:
        output_dir.mkdir(exist_ok=True, parents=True)
        logger.info(f"✅ {data_type}输出目录已准备: {abs_output_dir}")
        
        if not output_dir.exists():
            raise Exception(f"无法创建或访问{data_type}目录: {abs_output_dir}")
            
    except Exception as e:
        logger.error(f"❌ 创建{data_type}输出目录失败: {str(e)}")
        logger.error(f"🔍 详细错误信息:\n{traceback.format_exc()}")
        raise

    exported_files = 0
    total_articles = 0

    # 遍历顶层类别
    for top_category, series_list in data.items():
        # 处理文件名中的空格问题
        safe_top_category = top_category.replace(" ", "")
        logger.info(f"📋 处理{data_type}顶级类别: {top_category} -> {safe_top_category}")
        
        for series in series_list:
            second_category = series['title']
            # logger.info(f"  📦 处理{data_type}系列: {second_category}")
            
            for model in series.get('children', []):
                third_category = model['title']
                # logger.info(f"    📱 处理{data_type}型号: {third_category}")

                try:
                    # 构建以分类标题为键、文章列表为值的字典格式
                    content_dict = {}
                    for category in model.get('children', []):
                        category_title = category['title']
                        articles = category.get('children', [])
                        content_dict[category_title] = articles
                        # logger.debug(f"      📂 分类 {category_title}: {len(articles)} 篇文章")

                    # 将每篇文章扁平化并添加型号字段
                    flat_articles = add_model_to_articles(content_dict, third_category)

                    # 构建输出文件名
                    filename = f"{safe_top_category}-{third_category}.txt"
                    filepath = output_dir / filename
                    abs_filepath = filepath.resolve()
                    
                    # logger.info(f"💾 准备写入{data_type}文件: {abs_filepath}")

                    # 将扁平化后的文章列表写入 JSON 文件
                    with open(filepath, 'w', encoding='utf-8') as f:
                        json.dump(flat_articles, f, ensure_ascii=False, indent=4)

                    # 验证文件是否真的被创建
                    if filepath.exists():
                        file_size = filepath.stat().st_size
                        logger.info(f"✅ 已导出{data_type}文件: {filepath.name} ({len(flat_articles)} 篇文章, {file_size} 字节)")
                    else:
                        logger.error(f"❌ {data_type}文件创建失败: {abs_filepath}")
                        
                    exported_files += 1
                    total_articles += len(flat_articles)
                    
                except Exception as e:
                    logger.error(f"❌ 导出{data_type}文件失败 {top_category}-{third_category}: {str(e)}")
                    logger.error(f"🔍 详细错误信息:\n{traceback.format_exc()}")

    logger.info(f"🎉 {data_type}JSON导出完成！成功导出 {exported_files} 个文件，共 {total_articles} 篇文章")
    
    # 列出目录中的所有文件
    try:
        files_in_dir = list(output_dir.glob("*.txt"))
        logger.info(f"📋 {data_type}目录中的文件列表:")
        for file in files_in_dir:
            file_size = file.stat().st_size
            logger.info(f"  📄 {file.name} ({file_size} 字节)")
    except Exception as e:
        logger.warning(f"⚠️ 无法列出{data_type}目录文件: {str(e)}")

def export_all_data():
    """统一导出所有数据 - FAQ、故障排除和错误代码"""
    logger.info("🚀 开始统一数据导出 (FAQ + 故障排除 + 错误代码)...")
    
    try:
        with get_ts_ec_database_cursor() as cursor:
            logger.info("🔍 查询顶层节点...")
            # 查询顶层节点（pid=0）
            query = "SELECT id, title FROM bluetti_wiki WHERE pid = '0' AND status = 1"
            cursor.execute(query)
            top_level = cursor.fetchall()
            logger.info(f"📊 找到 {len(top_level)} 个顶层节点")

            # 分别处理不同类型的数据
            all_data = {}
            faq_ts_data = {}  # FAQ和故障排除数据
            errorcode_data = {}  # 错误代码数据
            
            for row in top_level:
                pid = row[0]
                title = row[1]
                logger.info(f"🔍 检查顶层类别: {title}")
                
                if title in ["FAQ", "Trouble Shooting"]:
                    logger.info(f"✅ 开始处理类别: {title}")
                    print(f"📋 顶级类别: {title}")
                    
                    try:
                        # 对于FAQ和一般故障排除，不过滤Error Codes
                        tree_data = build_tree(cursor, pid, level=1, filter_error_codes=False)
                        all_data[title] = tree_data
                        
                        # 对于故障排除，同时生成只包含Error Codes的版本
                        if title == "Trouble Shooting":
                            logger.info("🔧 生成专门的错误代码数据...")
                            errorcode_tree = build_tree(cursor, pid, level=1, filter_error_codes=True)
                            errorcode_data[title] = errorcode_tree
                            faq_ts_data[title] = tree_data
                        else:
                            faq_ts_data[title] = tree_data
                            
                        logger.info(f"✅ 完成处理类别: {title}")
                    except Exception as e:
                        logger.error(f"❌ 处理类别 {title} 时出错: {str(e)}")
                        logger.error(f"🔍 详细错误信息:\n{traceback.format_exc()}")
                else:
                    logger.debug(f"⏭️ 跳过类别: {title}")

            # 导出FAQ和故障排除数据到FAQ目录
            if faq_ts_data:
                logger.info("💾 开始导出FAQ和故障排除数据...")
                export_to_json(faq_ts_data, FAQ_DATA_DIR, "FAQ和故障排除")
            
            # 导出错误代码数据到专门的错误代码目录
            if errorcode_data:
                logger.info("💾 开始导出错误代码数据...")
                export_to_json(errorcode_data, EC_DATA_DIR, "错误代码")
            
            logger.info(f"🎉 统一数据导出完成！")
            return len(all_data)

    except Exception as e:
        logger.error(f"❌ 统一数据导出时出错: {str(e)}")
        logger.error(f"🔍 详细错误信息:\n{traceback.format_exc()}")
        raise

# 兼容性函数 - 保持向后兼容
def export_faq_troubleshooting_data(output_dir=None):
    """导出FAQ和故障排除数据（向后兼容）"""
    if output_dir is None:
        output_dir = FAQ_DATA_DIR
    
    return export_all_data()

def export_errorcode_data(output_dir=None):
    """导出错误代码数据（向后兼容）"""
    if output_dir is None:
        output_dir = EC_DATA_DIR
    
    return export_all_data()

if __name__ == "__main__":
    logger.info("🚀 直接运行统一爬虫...")
    try:
        result = export_all_data()
        logger.info(f"✅ 统一爬虫运行完成，处理了 {result} 个类别")
    except Exception as e:
        logger.error(f"❌ 统一爬虫运行失败: {str(e)}")
        logger.error(f"🔍 详细错误信息:\n{traceback.format_exc()}") 