# sync_manager.py
import os
import json
from datetime import datetime

import requests
import logging

from ai_platform.models.wiki_document import WikiDocument
from ai_platform.sync.html_cleaner_until import html_to_markdown
from ..pipeline.data_pipeline import FileDataPipeline

# ✅ 配置项
BASE_URL = "https://wiki.fantuan.ca"
USERNAME = "xubo"
PASSWORD = "xubo1996"
ROOT_PAGE_ID = "88860159"  # ✅ 只同步这个页面下的树
DATA_DIR = "wiki_page_info"

session = requests.Session()
logger = logging.getLogger(__name__)

async def sync_wiki_data(knowledge_base_id: str, root_page_id: str):
    if not knowledge_base_id:
        raise ValueError("knowledge_base_id 参数不能为空")
    if not root_page_id:
        raise ValueError("root_page_id 参数不能为空")
    login(USERNAME, PASSWORD)
    pipeline = FileDataPipeline()
    updated_count = crawl_page_tree(root_page_id, 0, knowledge_base_id, pipeline)
    # 等待异步处理wiki数据
    await pipeline.process_wiki_data(knowledge_base_id, DATA_DIR)
    return updated_count

def login(username: str, password: str):
    login_url = f"{BASE_URL}/dologin.action"
    payload = {
        'os_username': username,
        'os_password': password,
        'os_cookie': 'true',
        'login': 'Log In'
    }
    resp = session.post(login_url, data=payload)
    if "logout.action" in resp.text:
        print("✅ 登录成功")
    else:
        print("❌ 登录失败，请检查用户名密码")
        exit(1)

def get_child_pages(page_id: str):
    url = f"{BASE_URL}/rest/api/content/{page_id}/child/page?limit=100"
    try:
        response = session.get(url)
        response.raise_for_status()
        resp = response.json()
        return resp.get("results", [])
    except requests.exceptions.RequestException as e:
        logger.error(f"获取子页面时网络请求失败: {e}")
        raise ValueError(f"无法获取页面 {page_id} 的子页面: {e}")
    except json.JSONDecodeError as e:
        logger.error(f"解析子页面响应JSON失败: {e}")
        logger.error(f"响应内容: {response.text}")
        raise ValueError(f"Wiki API返回非JSON格式响应: {e}")

def get_page_detail(page_id: str,knowledge_base_id: str = "") -> WikiDocument :
    url = f"{BASE_URL}/rest/api/content/{page_id}?expand=body.storage,version,history"
    try:
        response = session.get(url)
        response.raise_for_status()  # 检查HTTP状态码
        resp = response.json()
    except requests.exceptions.RequestException as e:
        logger.error(f"请求Wiki API失败: {e}")
        raise ValueError(f"无法获取页面 {page_id} 的详细信息: {e}")
    except json.JSONDecodeError as e:
        logger.error(f"解析Wiki API响应失败: {e}")
        logger.error(f"响应内容: {response.text[:500]}")
        raise ValueError(f"Wiki API返回了无效的JSON格式: {e}")

    try:
        html = resp["body"]["storage"]["value"]
        markdown = html_to_markdown(html)
        return WikiDocument(
            doc_id=page_id,
            title=resp["title"],
            doc_type="page",
            url=f"{BASE_URL}/pages/viewpage.action?pageId={page_id}",
            author=resp["history"]["createdBy"]["displayName"],
            updated_at=resp["version"]["when"],
            knowledge_base_id=knowledge_base_id,
            content=markdown,
            metadata={
                "source": "wiki",
                "sync_time": datetime.now().isoformat(),
            }
        )
    except KeyError as e:
        logger.error(f"Wiki API响应缺少必要字段: {e}")
        logger.error(f"响应内容: {resp}")
        raise ValueError(f"Wiki API响应格式不正确，缺少字段: {e}")

def has_page_updated(page_id: str, new_updated_time) -> bool:
    filepath = os.path.join(DATA_DIR, f"{page_id}.json")
    logger.info(f"正在检查页面filepath：{filepath}")
    if not os.path.exists(filepath):
        return True
    try:
        with open(filepath, "r", encoding="utf-8") as f:
            old_data = json.load(f)
        return old_data.get("updated_at") != new_updated_time
    except (json.JSONDecodeError, FileNotFoundError) as e:
        logger.warning(f"读取页面文件失败: {e}")
        return True

def save_page_json(detail: WikiDocument):
    os.makedirs(DATA_DIR, exist_ok=True)
    filename = os.path.join(DATA_DIR, f"{detail.doc_id}.json")
    with open(filename, "w", encoding="utf-8") as f:
        json.dump(detail.json(), f, ensure_ascii=False, indent=2)


def crawl_page_tree(page_id: str, level: int = 0,knowledge_base_id: str = "",pipeline: FileDataPipeline = None)-> int:

    detail = get_page_detail(page_id,knowledge_base_id)
    logger.info(f"正在处理页面：{detail.title}")
    updated_count = 0
    
    if has_page_updated(page_id, detail.updated_at):
        print("  " * level + f"🔄 更新页面：{detail.title}")
        save_page_json(detail)
        updated_count += 1
    else:
        print("  " * level + f"✅ 跳过未变更页面：{detail.title}")

    for child in get_child_pages(page_id):
        updated_count += crawl_page_tree(child["id"], level + 1,knowledge_base_id)
    
    return updated_count

