import pickle
import tempfile
import logging
import gzip
import shutil
from pathlib import Path
from datetime import datetime, timedelta

import requests
from lxml import etree

from .exceptions import FileSystemError, RepoDataError, NetworkError
from .utils import download_file_with_progress

# --- Constants ---
OC9_REPO_URLS = [
    "https://mirrors.opencloudos.org/opencloudos/9/BaseOS/x86_64/os/",
    "https://mirrors.opencloudos.org/opencloudos/9/AppStream/x86_64/os/",
    "https://mirrors.opencloudos.tech/epol/9/Everything/x86_64/os/",
    "https://mirrors.opencloudos.org/opencloudos/9/extras/x86_64/os/"
]

CACHE_DIR = Path.home() / ".cache" / "go-compat-checker"
METADATA_CACHE_FILE = CACHE_DIR / "oc9_repo_db.pkl"
RPM_CACHE_DIR = CACHE_DIR / "rpms"
CACHE_EXPIRATION = timedelta(days=1)


def clear_cache():
    """清理所有缓存文件"""
    if CACHE_DIR.exists():
        cache_size = sum(
            f.stat().st_size for f in CACHE_DIR.rglob('*') if f.is_file())
        cache_size_mb = cache_size / (1024 * 1024)

        logging.info(f"正在清理缓存目录: {CACHE_DIR}")
        logging.info(f"缓存大小: {cache_size_mb:.1f} MB")

        try:
            shutil.rmtree(CACHE_DIR)
            logging.info("✅ 缓存清理完成")
        except Exception as e:
            logging.error(f"❌ 缓存清理失败: {e}")
            return False
    else:
        logging.info("缓存目录不存在，无需清理")
    return True


def show_cache_info():
    """显示缓存信息"""
    if not CACHE_DIR.exists():
        logging.info("缓存目录不存在")
        return

    logging.info(f"缓存目录: {CACHE_DIR}")

    # 元数据缓存信息
    if METADATA_CACHE_FILE.exists():
        size = METADATA_CACHE_FILE.stat().st_size / (1024 * 1024)
        mtime = METADATA_CACHE_FILE.stat().st_mtime
        mtime_str = datetime.fromtimestamp(mtime).strftime('%Y-%m-%d %H:%M:%S')
        logging.info(f"  元数据缓存: {size:.1f} MB (更新时间: {mtime_str})")
    else:
        logging.info("  元数据缓存: 不存在")

    # RPM 缓存信息
    if RPM_CACHE_DIR.exists():
        rpm_files = list(RPM_CACHE_DIR.glob('*.rpm'))
        if rpm_files:
            total_size = sum(
                f.stat().st_size for f in rpm_files) / (1024 * 1024)
            logging.info(f"  RPM 缓存: {len(rpm_files)} 个文件, {total_size:.1f} MB")
        else:
            logging.info("  RPM 缓存: 空")
    else:
        logging.info("  RPM 缓存: 不存在")

    # 总大小
    total_size = sum(f.stat().st_size for f in CACHE_DIR.rglob(
        '*') if f.is_file()) / (1024 * 1024)
    logging.info(f"  总缓存大小: {total_size:.1f} MB")


def download_repodata_from_url(base_url: str, temp_dir: Path):
    """从单个URL下载repodata"""
    if not base_url.endswith('/'):
        base_url += '/'

    try:
        repodata_dir = temp_dir / Path(base_url).parts[-3]
        repodata_dir.mkdir(exist_ok=True)
    except (OSError, IOError) as exc:
        raise FileSystemError(
            f"Failed to create directory {repodata_dir}: {exc}") from exc

    repomd_url = f"{base_url}repodata/repomd.xml"
    logging.debug("Downloading metadata from: %s", base_url)

    try:
        with requests.Session() as session:
            repomd_path = repodata_dir / 'repomd.xml'
            download_file_with_progress(session, repomd_url, repomd_path)

            root = etree.parse(str(repomd_path))
            namespaces = {'repo': 'http://linux.duke.edu/metadata/repo'}
            primary_location_elem = root.find(
                "repo:data[@type='primary']/repo:location", namespaces=namespaces)

            if primary_location_elem is None:
                raise RepoDataError(
                    f"Cannot find primary data location in {repomd_url}")

            primary_href = primary_location_elem.get('href')
            primary_url = f"{base_url}{primary_href}"
            primary_path = repodata_dir / Path(primary_href).name
            download_file_with_progress(session, primary_url, primary_path)

        return repodata_dir
    except (NetworkError, FileSystemError, RepoDataError):
        raise
    except Exception as exc:
        logging.warning("Failed to download from %s: %s", base_url, exc)
        return None


def parse_repodata(repo_path: Path, repo_url: str):
    """解析仓库数据"""
    logging.debug("Parsing repository data from: %s", repo_path)

    # 查找 primary XML 文件
    try:
        repomd_path = repo_path / 'repomd.xml'
        root = etree.parse(str(repomd_path))
        namespaces = {'repo': 'http://linux.duke.edu/metadata/repo'}
        primary_location_elem = root.find(
            "repo:data[@type='primary']/repo:location", namespaces=namespaces)
        primary_href = primary_location_elem.get('href')
        primary_xml_path = repo_path / Path(primary_href).name
    except Exception as exc:
        raise RepoDataError(f"Failed to find primary XML: {exc}") from exc

    return _parse_primary_xml(primary_xml_path, repo_url)


def _parse_primary_xml(primary_xml_path, repo_url):
    """解析primary XML文件"""
    packages, file_map, location_map = {}, {}, {}
    namespaces = {'common': 'http://linux.duke.edu/metadata/common',
                  'rpm': 'http://linux.duke.edu/metadata/rpm'}

    try:
        with gzip.open(primary_xml_path, 'rb') as file_handle:
            context = etree.iterparse(file_handle, events=(
                'end',), tag=f'{{{namespaces["common"]}}}package')
            for _, elem in context:
                _process_package_element(elem, namespaces, packages,
                                         file_map, location_map, repo_url)
                elem.clear()
    except Exception as exc:
        raise RepoDataError(f"Failed to parse XML: {exc}") from exc

    logging.debug("Parsed %d packages and %d library files.",
                  len(packages), len(file_map))
    return packages, file_map, location_map


def _process_package_element(elem, namespaces, packages, file_map,
                             location_map, repo_url):
    """处理单个package元素"""
    pkg_name_elem = elem.find('common:name', namespaces=namespaces)
    if pkg_name_elem is None:
        return
    pkg_name = pkg_name_elem.text

    version_info = elem.find('common:version', namespaces=namespaces)
    if version_info is None:
        return
    ver_str = f"{version_info.get('ver')}-{version_info.get('rel')}"

    location_elem = elem.find('common:location', namespaces=namespaces)
    if location_elem is not None:
        location_map[pkg_name] = f"{repo_url}{location_elem.get('href')}"

    if pkg_name not in packages:
        packages[pkg_name] = {'version': ver_str, 'files': set()}

    _parse_package_provides(elem, namespaces, file_map, pkg_name)


def _parse_package_provides(elem, namespaces, file_map, pkg_name):
    """解析包提供的库文件"""
    for format_node in elem.iterfind('common:format', namespaces=namespaces):
        for provides_node in format_node.iterfind('rpm:provides', namespaces=namespaces):
            for entry in provides_node.iterfind('rpm:entry', namespaces=namespaces):
                entry_name = entry.get('name')
                if entry_name and '.so' in entry_name:
                    lib_name = entry_name.split('(')[0]
                    if lib_name:
                        file_map[lib_name] = pkg_name


def build_oc9_database(temp_dir):
    """构建OC9数据库"""
    all_oc9_packages, all_oc9_file_map, all_oc9_location_map = {}, {}, {}
    for repo_url in OC9_REPO_URLS:
        repo_path = download_repodata_from_url(repo_url, temp_dir)
        if repo_path:
            packages, file_map, location_map = parse_repodata(
                repo_path, repo_url)
            all_oc9_packages.update(packages)
            all_oc9_file_map.update(file_map)
            all_oc9_location_map.update(location_map)
    return all_oc9_packages, all_oc9_file_map, all_oc9_location_map


def get_or_build_package_database():
    """获取或构建包数据库"""
    CACHE_DIR.mkdir(parents=True, exist_ok=True)
    RPM_CACHE_DIR.mkdir(exist_ok=True)

    if METADATA_CACHE_FILE.exists():
        try:
            cache_mtime = datetime.fromtimestamp(
                METADATA_CACHE_FILE.stat().st_mtime)
            if datetime.now() - cache_mtime < CACHE_EXPIRATION:
                logging.debug("Loading OC9 package database from cache.")
                with open(METADATA_CACHE_FILE, 'rb') as file_handle:
                    package_db, library_db, location_db = pickle.load(
                        file_handle)
                logging.debug("Loaded data for %d packages and %d libraries from cache.",
                              len(package_db), len(library_db))
                return package_db, library_db, location_db

            logging.debug("Metadata cache is expired.")
        except Exception as exc:
            logging.warning(
                "Could not load cache file: %s. Rebuilding database.", exc)

    with tempfile.TemporaryDirectory(prefix="oc-repo-") as temp_dir_str:
        temp_dir = Path(temp_dir_str)
        package_db, library_db, location_db = build_oc9_database(temp_dir)

        try:
            logging.debug(
                "Saving OC9 package database to cache: %s", METADATA_CACHE_FILE)
            with open(METADATA_CACHE_FILE, 'wb') as file_handle:
                pickle.dump((package_db, library_db, location_db), file_handle)
            logging.debug("Cache saved successfully.")
        except (IOError, pickle.PickleError) as exc:
            logging.error("Failed to save cache file: %s", exc)

    return package_db, library_db, location_db
