import asyncio
import logging
import os
import shutil
import time
from pathlib import Path
from typing import List, Dict, Optional, Any

from jinja2 import Template
from pydantic import BaseModel

from app.config import settings
from app.converter.html_converter import html_convert_to_md
from app.converter.pdf_converter import pdf_convert_to_md
from app.llm import optimize_with_llm
from app.utils import md_utils
from app.utils.archive import extract_archive
from app.utils.email_utils import send_email
from app.utils.zip_utils import create_zip

logger = logging.getLogger(__name__)


class WorkflowState(BaseModel):
    errors: List[Dict[str, str]] = []
    mysql_pool: Any = None
    temp_path: Path
    processed_files: List[Path] = []
    origin_url: str
    zip_url: Optional[str] = None
    language: str
    owner_name: str
    owner_email: str
    knowledge_base_name: str
    converter_email: str
    token: str


async def extract_archives(state: WorkflowState):
    start_time = time.time()
    extracted_files = []
    for file_path in state.temp_path.iterdir():
        if file_path.suffix in [".zip", ".rar", ".tar", ".gz", ".bz2"]:
            try:
                extract_dir = file_path.parent  # Path 类型
                extract_archive(file_path, extract_dir)
                file_path.unlink()  # 删除原始压缩文件
                extracted_files.extend(list(extract_dir.iterdir()))
            except Exception as e:
                state.errors.append(
                    {"file": file_path.name, "error": f"Extract failed: {str(e)}"}
                )
        else:
            extracted_files.append(file_path)
    elapsed = time.time() - start_time
    logger.info(f"Exiting node: extract_archives, elapsed: {elapsed:.2f}s")
    if state.errors:
        logger.error(f"extract_archives errors: {state.errors}")
    return {**state.model_dump(), "processed_files": extracted_files}


async def convert_to_markdown(state: WorkflowState):
    start_time = time.time()
    converted_files = []

    def iter_files(paths):
        for path in paths:
            if path.is_dir():
                for sub in path.iterdir():
                    yield from iter_files([sub])
            else:
                yield path

    def has_html(file_dir: Path):
        for file in file_dir.rglob("*"):
            if file.is_file() and (file.suffix.lower() == ".html" or file.suffix == ".htm" or file.suffix == ".mhtml"):
                return True
        return False

    def has_md(file_dir: Path):
        for file in file_dir.rglob("*"):
            if file.is_file() and file.suffix.lower() == ".md":
                return True
        return False

    def has_pdf(file_dir: Path):
        for file in file_dir.rglob("*"):
            if file.is_file() and file.suffix.lower() == ".pdf":
                return True
        return False

    for file_path in state.processed_files:
        if file_path.name.startswith(".") or file_path.name.startswith("_"):
            continue
        if file_path.is_dir():
            if has_html(file_path):
                md_path = html_convert_to_md(file_path)
                if md_path:
                    converted_files.append(md_path)
            elif has_md(file_path):
                for sub in file_path.iterdir():
                    converted_files.append(sub)
            elif has_pdf(file_path):
                for sub in file_path.iterdir():
                    md_path = await pdf_convert_to_md(sub, state.language)
                    if md_path:
                        converted_files.append(md_path)
            else:
                continue
        elif file_path.is_file():
            if file_path.suffix.lower() == ".pdf":
                md_path = await pdf_convert_to_md(file_path, state.language)
                if md_path:
                    converted_files.append(md_path)
            elif file_path.suffix.lower() == ".html" or file_path.suffix.lower() == ".htm" or file_path.suffix.lower() == ".mhtml":
                md_path = html_convert_to_md(file_path)
                if md_path:
                    converted_files.append(md_path)
            elif file_path.suffix.lower() == ".md":
                converted_files.append(file_path)
            else:
                continue
    elapsed = time.time() - start_time
    logger.info(f"Exiting node: convert_to_markdown, elapsed: {elapsed:.2f}s")
    if state.errors:
        logger.error(f"convert_to_markdown errors: {state.errors}")
    return {**state.model_dump(), "processed_files": converted_files}


async def optimize_documents(state: WorkflowState):
    start_time = time.time()
    processed_files = state.processed_files

    # 使用配置的并发数限制
    semaphore = asyncio.Semaphore(settings.llm_max_concurrent)

    async def optimize_single_file(md_file: Path):
        async with semaphore:
            try:
                content = await optimize_with_llm(md_file, state.language)
                # 将优化后的内容写回原文件，替换原有内容
                md_file.write_text(content, encoding="utf-8")
            except Exception as e:
                logger.error(f"LLM optimization failed for {md_file}: {str(e)}")
                state.errors.append(
                    {"file": md_file.name, "error": f"LLM optimize failed: {str(e)}"}
                )

    # 收集所有需要优化的md文件
    all_md_files = []
    for file_path in state.processed_files:
        if file_path.is_dir():
            # 收集文件夹下所有md文件
            md_files = list(file_path.rglob("*.md"))
            all_md_files.extend(md_files)
        elif file_path.suffix == ".md":
            all_md_files.append(file_path)

    if all_md_files:
        logger.info(
            f"Creating {len(all_md_files)} concurrent tasks with max concurrency: {settings.llm_max_concurrent}"
        )
        # 创建并发任务
        tasks = [
            asyncio.create_task(optimize_single_file(md_file))
            for md_file in all_md_files
        ]
        # 等待所有任务完成
        await asyncio.gather(*tasks)

    elapsed = time.time() - start_time
    logger.info(f"Exiting node: optimize_documents, elapsed: {elapsed:.2f}s")
    if state.errors:
        logger.error(f"optimize_documents errors: {state.errors}")
    return {**state.model_dump(), "processed_files": processed_files}


async def custom_optimize_documents(state: WorkflowState):
    start_time = time.time()
    processed_files = state.processed_files
    for file in processed_files:
        if file.suffix == ".md":
            md_utils.remove_anchor_link(str(file.absolute()))
            md_utils.replace_path_with_default_file(str(file.absolute()), "index.md")
        elif file.is_dir():
            for root, dirs, files in os.walk(file):
                for inner_file in files:
                    if inner_file.lower().endswith(".md"):
                        file_path = os.path.join(root, inner_file)
                        md_utils.remove_anchor_link(file_path)
                        md_utils.replace_path_with_default_file(file_path, "index.md")
        else:
            continue
    elapsed = time.time() - start_time
    logger.info(f"Exiting node: custom_optimize_documents, elapsed: {elapsed:.2f}s")


async def create_result_zip(state: WorkflowState):
    start_time = time.time()
    zip_name = state.temp_path.name + ".zip"
    zip_path = state.temp_path / zip_name
    try:
        create_zip(state.processed_files, zip_path)
        parent_name = Path(zip_path).parent.name
        target_dir = os.path.join(settings.nginx_static_dir, parent_name)
        os.makedirs(target_dir, exist_ok=True)
        target_zip_path = os.path.join(target_dir, zip_name)
        shutil.move(str(zip_path), target_zip_path)
        zip_url = f"{settings.nginx_url_prefix}/{parent_name}/{zip_name}"
        elapsed = time.time() - start_time
        logger.info(f"Exiting node: create_result_zip, elapsed: {elapsed:.2f}s")
        if state.errors:
            logger.error(f"create_result_zip errors: {state.errors}")
        return {**state.model_dump(), "zip_url": zip_url}
    except Exception as e:
        state.errors.append(
            {"file": "Packaging", "error": f"Create ZIP failed: {str(e)}"}
        )
        elapsed = time.time() - start_time
        logger.info(
            f"Exiting node: create_result_zip, elapsed: {elapsed:.2f}s (with error)"
        )
        logger.error(f"create_result_zip errors: {state.errors}")
        return state


async def send_result_email(state: WorkflowState):
    start_time = time.time()
    try:
        if state.errors and len(state.errors) > 0:
            # 失败时只发给converter，参数只传需要的
            body = Template(settings.converter_fail_email_template).render(
                errors=state.errors
            )
            send_email(state.converter_email, body)
        else:
            # 成功时先发详细邮件给owner
            owner_body = Template(settings.owner_email_template).render(
                origin_url=state.origin_url,
                zip_url=state.zip_url,
                owner_name=state.owner_name,
                converter_email=state.converter_email,
                knowledge_base_name=state.knowledge_base_name,
                language=state.language,
                token=state.token,
            )
            send_email(state.owner_email, owner_body)
            # 再发简短通知给converter，只传必要参数
            converter_body = Template(settings.converter_success_email_template).render(ownerName=state.owner_name)
            send_email(state.converter_email, converter_body)
    except Exception as e:
        state.errors.append(
            {"file": "Email send", "error": f"Send email failed: {str(e)}"}
        )
        logger.error(f"==============Send Email Fail===========")
        logger.error(f"send_result_email error: {e}", exc_info=True)
        logger.error(f"==============Send Email Fail===========")
    elapsed = time.time() - start_time
    logger.info(f"Exiting node: send_result_email, elapsed: {elapsed:.2f}s")
    if state.errors:
        logger.error(f"send_result_email errors: {state.errors}")
    return state


# 新增 insertFlow 节点
async def insert_flow(state: WorkflowState):
    start_time = time.time()
    try:
        pool = state.mysql_pool
        if pool is None:
            logger.error("MySQL pool is not available in insert_flow.")
            return state
        # Determine status and comment based on errors
        if state.errors and len(state.errors) > 0:
            status = "2"  # refused
            comment = f"Conversion failed: {', '.join([e.get('error', 'Unknown error') for e in state.errors])}"
        else:
            status = "0"  # created
            comment = "Conversion completed successfully"
        async with pool.acquire() as conn:
            async with conn.cursor() as cur:
                await cur.execute(
                    """
                    INSERT INTO t_approve_flow (
                        token, creator_email, file_url, origin_url, rag_owner, rag_owner_email, knowledge_base_id, status, comment, create_by, create_at
                    ) VALUES (
                        %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, NOW()
                    )
                    """,
                    (
                        state.token,
                        state.converter_email,
                        state.zip_url,
                        state.origin_url,
                        state.owner_name,
                        state.owner_email,
                        state.knowledge_base_name,
                        status,
                        comment,
                        state.owner_name,
                    ),
                )
    except Exception as e:
        state.errors.append({"error": f"insert flow failed: {str(e)}"})
    elapsed = time.time() - start_time
    logger.info(f"Exiting node: insert_flow, elapsed: {elapsed:.2f}s")
    if state.errors:
        logger.error(f"insert_flow errors: {state.errors}")
    return state
