from typing import Dict, List, Tuple, Optional
import rasterio
import numpy as np
import os
import re
from calc_v1.utils import load_parameters_from_json, extract_codes
from concurrent.futures import ProcessPoolExecutor, as_completed
import logging


def save_extracted_data(
    base_filename: str,
    output_dir: str,
    eco_name: str,
    extracted_data: np.ndarray,
    eco_profile: dict,
    air_nodata: float,
    eco_crs: rasterio.crs.CRS,
    eco_transform: rasterio.transform.Affine,
    compress: str = "lzw",
    bigtiff: str = "YES",
    skip_if_exists: bool = True
) -> bool:
    """
    保存提取后的生态系统核算数据为新的 GeoTIFF 文件。

    :param base_filename: 原始文件名（用于构建新文件名）
    :param output_dir: 输出目录路径
    :param eco_name: 生态系统名称（替换文件名中的第3个字段）
    :param extracted_data: 提取后的数据数组
    :param eco_profile: Rasterio profile 信息
    :param air_nodata: 空气质量图的 NoData 值
    :param eco_crs: 坐标参考系统
    :param eco_transform: 地理变换矩阵
    :param compress: 压缩方式
    :param bigtiff: 是否启用 BIGTIFF 支持
    :param skip_if_exists: 如果文件已存在是否跳过保存

    :return: 是否成功保存
    """
    # 分割文件名并替换第三段字段
    parts = base_filename.split("_")
    if len(parts) <= 2:
        raise ValueError("文件名格式不符合预期，无法找到第3个字段")

    new_parts = parts[:2] + [eco_name] + parts[3:]
    new_filename = "_".join(new_parts)
    output_path = os.path.join(output_dir, new_filename)

    # 如果文件已存在且开启跳过模式，则跳过
    if skip_if_exists and os.path.exists(output_path):
        print(f"文件已存在，跳过保存：{output_path}")
        return False

    # 更新 profile
    eco_profile.update(
        dtype=rasterio.float32,
        nodata=air_nodata,
        count=1,
        crs=eco_crs,
        transform=eco_transform,
        driver='GTiff',
        compress=compress,
        bigtiff=bigtiff
    )

    # 写入文件
    with rasterio.open(output_path, 'w', **eco_profile) as dst:
        dst.write(extracted_data.astype(np.float32), 1)

    print(f"已保存 {eco_name} 的核算结果到 {output_path}")
    return True


def split_tif_by_ecosystem(
        eco_tif_path: str,
        gep_res_tif_path: str,
        output_dir: str = None,
        eco_type_json_path: str = None,
        compress: str = "lzw",
        bigtiff: str = "YES",
        skip_if_exists: bool = True  # 新增参数
) -> Tuple[bool, Optional[str]]:
    """
    根据生态图分类拆分空气质量核算 TIFF 文件。

    :param eco_tif_path: 生态图 TIFF 文件路径
    :param gep_res_tif_path: 空气质量/土壤保持等核算结果 TIFF 文件路径
    :param output_dir: 输出目录路径
    :param eco_type_json_path: 包含生态系统映射的 JSON 文件路径
    :param compress: TIFF 压缩方式，默认 'lzw'
    :param bigtiff: 是否启用 BIGTIFF 支持，默认 'YES'
    :param skip_if_exists: 如果目标文件已存在是否跳过写入，默认为 True

    :return: (成功标志, 错误信息)
    """
    if output_dir is None:
        output_dir = os.path.join(os.path.dirname(gep_res_tif_path), "spiltByEcoOutput")
    if eco_type_json_path is None:
        eco_type_json_path = r"F:\code\dev\calc-gep-regulate-cqc\calc_v1\data\ecosystems_json\ecosystems_type_GB.json"

    try:
        # 读取生态系统映射
        ecosystem_json = load_parameters_from_json(eco_type_json_path)
        eco_mapping: Dict[str, List[int]] = {}
        for ecosystem in ecosystem_json["applicable_ecosystems"]:
            name = ecosystem["name"]
            codes = extract_codes(ecosystem)
            eco_mapping[name] = codes

        # 创建输出目录
        os.makedirs(output_dir, exist_ok=True)

        # 读取生态图数据
        with rasterio.open(eco_tif_path) as eco_src:
            eco_data: np.ndarray = eco_src.read(1)
            eco_nodata: float = eco_src.nodata
            eco_profile = eco_src.profile
            eco_transform = eco_src.transform
            eco_crs = eco_src.crs

        # 读取空气质量数据
        with rasterio.open(gep_res_tif_path) as air_src:
            air_data: np.ndarray = air_src.read(1)
            air_nodata: float = air_src.nodata

        # 检查分辨率是否一致
        assert eco_data.shape == air_data.shape, "生态图与空气质量图尺寸不一致！请确保它们的空间对齐。"

        # 创建掩膜
        if eco_nodata is not None:
            eco_valid_mask = eco_data != eco_nodata
        else:
            eco_valid_mask = np.ones_like(eco_data, dtype=bool)

        if air_nodata is not None:
            air_valid_mask = air_data != air_nodata
        else:
            air_valid_mask = np.ones_like(air_data, dtype=bool)

        valid_mask = eco_valid_mask & air_valid_mask

        # 构建基础文件名
        base_filename = os.path.basename(gep_res_tif_path)
        parts = base_filename.split("_")

        # 遍历每个生态系统类型
        for eco_name, eco_codes in eco_mapping.items():
            eco_mask = np.isin(eco_data, eco_codes)
            final_mask = eco_mask & valid_mask
            extracted_data = np.where(final_mask, air_data, np.nan)

            # 如果全是 NaN，则跳过
            if np.isnan(extracted_data).all():
                print(f"{eco_name} 在生态图中无对应区域，跳过保存。")
                continue

            # 构建输出文件路径并保存
            success = save_extracted_data(
                base_filename=os.path.basename(gep_res_tif_path),
                output_dir=output_dir,
                eco_name=eco_name,
                extracted_data=extracted_data,
                eco_profile=eco_profile.copy(),  # 注意复制一份避免重复修改
                air_nodata=air_nodata,
                eco_crs=eco_crs,
                eco_transform=eco_transform,
                compress=compress,
                bigtiff=bigtiff,
                skip_if_exists=skip_if_exists
            )

            if not success:
                continue
        return True, None

    except Exception as e:
        print(f"发生错误：{e}")
        return False, str(e)


def process_single_file(
        tif_file: str,
        input_folder: str,
        eco_folder: str,
        output_root_folder: str,
        eco_type_json_path: str,
        compress: str,
        bigtiff: str,
        skip_if_exists: bool,
        output_sub:bool  = False
) -> Tuple[str, bool, Optional[str]]:
    """
    单个文件的处理函数，供多进程调用
    """
    # 提取行政区划码
    match = re.search(r"(\d{6})_", tif_file)
    if not match:
        return tif_file, False, "未找到 6 位行政区划码"

    region_code = match.group(1)

    # 构建生态图路径
    eco_tif_name = f"china_{region_code}.tif"
    eco_tif_path = os.path.join(eco_folder, eco_tif_name)

    if not os.path.exists(eco_tif_path):
        return tif_file, False, f"生态图文件不存在: {eco_tif_path}"

    gep_res_tif_path = os.path.join(input_folder, tif_file)

    # 构建输出目录
    # output_dir = os.path.join(output_root_folder, region_code)
    if output_sub:
        output_dir = os.path.join(output_root_folder, region_code)
    else:
        output_dir = output_root_folder

    # 调用拆分函数
    success, error = split_tif_by_ecosystem(
        eco_tif_path=eco_tif_path,
        gep_res_tif_path=gep_res_tif_path,
        output_dir=output_dir,
        eco_type_json_path=eco_type_json_path,
        compress=compress,
        bigtiff=bigtiff,
        skip_if_exists=skip_if_exists
    )

    return tif_file, success, error


def batch_split_tif_by_ecosystem_parallel(
        input_folder: str,
        eco_folder: str,
        output_root_folder: str = None,
        eco_type_json_path: str = None,
        compress: str = "lzw",
        bigtiff: str = "YES",
        skip_if_exists: bool = True,
        max_workers: int = 4,
        output_sub:bool = False
) -> None:
    """
    并行批量处理文件夹中的核算结果 TIFF，按文件名提取行政区划码，并匹配生态图进行拆分。

    :param input_folder: 包含核算结果 TIF 的文件夹路径
    :param eco_folder: 包含生态图 TIF 的文件夹路径
    :param output_root_folder: 输出根目录（将为每个行政区创建子文件夹）
    :param eco_type_json_path: 生态系统类型 JSON 路径
    :param compress: TIFF 压缩方式，默认 'lzw'
    :param bigtiff: 是否启用 BIGTIFF 支持，默认 'YES'
    :param skip_if_exists: 如果目标文件已存在是否跳过，默认 True
    :param max_workers: 最大并发进程数
    :param output_sub: 是否开启按照行政区划代码创建子文件夹
    """

    # 获取所有 .tif 核算结果文件
    tif_files = [f for f in os.listdir(input_folder) if f.lower().endswith(".tif")]
    total = len(tif_files)
    logging.info(f"共发现 {total} 个 TIFF 文件，开始并行处理...")

    with ProcessPoolExecutor(max_workers=max_workers) as executor:

        futures = [
            executor.submit(
                process_single_file,
                tif_file,
                input_folder,
                eco_folder,
                output_root_folder,
                eco_type_json_path,
                compress,
                bigtiff,
                skip_if_exists,
                output_sub
            )
            for tif_file in tif_files
        ]

        for future in as_completed(futures):
            tif_file, success, error = future.result()
            if success:
                logging.info(f"[{tif_file}] 处理完成")
            else:
                logging.error(f"[{tif_file}] 处理失败: {error}")


# 设置日志格式
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)

if __name__ == "__main__":
    batch_split_tif_by_ecosystem_parallel(
        input_folder=r"I:\GEP_result\2023test\Qsr_main",  # 存放核算结果 TIF 的文件夹
        eco_folder=r"H:\land30m2010\制作成新国标标准\maskByShp",  # 存放生态图 china_xxxxxx.tif 的文件夹
        # output_root_folder=r"H:\test\eco_classify\output",  # 输出根目录，会自动创建子文件夹
        # eco_type_json_path=r"F:\code\dev\calc-gep-regulate-cqc\calc_v1\data\ecosystems_json\ecosystems_type_GB.json",
        # compress="lzw",
        # bigtiff="YES",
        # skip_if_exists=True,
        max_workers=8,  # 可根据 CPU 核心数调整
        output_sub=False
    )
