# -*- coding: utf-8 -*-
# scripts/processor.py
import os
import geopandas as gpd
import rasterio
from tqdm import tqdm
import numpy as np
import config
import utils
import re
from collections import defaultdict


def process_et_data():
    # --- 去重处理 ---
    all_hdf_files = [f for f in os.listdir(
        config.DOWNLOAD_DIR) if f.endswith(".hdf")]
    if not all_hdf_files:
        raise FileNotFoundError("❌ 未找到 HDF 文件")

    pattern = re.compile(
        r"A(\d{7})\.h\d{2}v\d{2}\.\d+\.(\d+)\.hdf")  # 日期+时间戳匹配
    file_by_date = defaultdict(list)

    for fname in all_hdf_files:
        match = pattern.search(fname)
        if match:
            date = match.group(1)      # 如：2023185
            timestamp = match.group(2)  # 如：2023202121523
            file_by_date[date].append((timestamp, fname))
        else:
            # 不符合命名规则的文件也保留（可选）
            file_by_date["unknown"].append(("0", fname))

    # 每个日期只保留时间戳最大的文件
    hdf_files = []
    for date, files in file_by_date.items():
        latest_file = max(files, key=lambda x: x[0])[1]
        hdf_files.append(latest_file)

    # --- 原有处理流程 ---
    gdf = gpd.read_file(config.SHAPEFILE_PATH)
    if "市" not in gdf.columns:
        raise ValueError("❌ shapefile 缺少 '市' 字段")

    zibo = gdf[gdf["市"] == config.TARGET_CITY]
    if zibo.empty:
        raise ValueError(f"❌ shapefile 中未找到 '{config.TARGET_CITY}'")
    # zibo = gpd.read_file("data/json/淄博市.json")

    sample_file = os.path.join(config.DOWNLOAD_DIR, hdf_files[0])
    with rasterio.open(f'HDF4_EOS:EOS_GRID:"{sample_file}":MOD_Grid_MOD16A2:ET_500m') as src:
        modis_crs = src.crs

    zibo_proj = zibo.to_crs(modis_crs)

    total_volume = 0.0
    count_success = 0
    count_fail = 0

    for file in tqdm(hdf_files, desc="📦 处理 HDF 文件"):
        path = os.path.join(config.DOWNLOAD_DIR, file)
        et_path = f'HDF4_EOS:EOS_GRID:"{path}":MOD_Grid_MOD16A2:ET_500m'
        qc_path = f'HDF4_EOS:EOS_GRID:"{path}":MOD_Grid_MOD16A2:ET_QC_500m'

        et_data = utils.read_and_mask_layer(et_path, zibo_proj.geometry)
        qc_data = utils.read_and_mask_layer(qc_path, zibo_proj.geometry)

        if et_data is None or qc_data is None:
            utils.check_subdatasets(path)
            count_fail += 1
            continue

        et_data[et_data == 32767] = np.nan
        # MOD16A2 产品中的 ET 数据是以 int16 类型存储的，缺失值被编码为 32767，这一步是将缺失值替换为 NaN 以便后续处理。

        qc_mask = (qc_data == 0) | (qc_data == 1)

        et_data[~qc_mask] = np.nan
        # 这里使用质量控制（QC）数据进行掩膜，只保留 QC == 0 或 QC == 1 的像元，即“优质”或“次优质”数据。其余部分设为无效（NaN）。

        et_volume = et_data * 0.1 / 1000 * config.PIXEL_AREA_M2
        # 这一步将 ET 像元值转换为实际蒸散发水量（单位：立方米）：
        # * 0.1: 将原始 ET 像元值（单位：0.1 mm）转换为 mm；
        # / 1000: 将 mm 转换为米；
        # * PIXEL_AREA_M2: 每个像元面积乘以 ET 厘米高度得到体积（m³）
        # V=ET*A={(像素值*0.1)/1000}*像元面积（m²）得到单位为立方米（m³）

        valid_volume_sum = np.nansum(et_volume)
        # 汇总当前图像中所有有效像元的蒸散发总量（单位：立方米）

        if valid_volume_sum > 0:
            total_volume += valid_volume_sum
            # 将当前影像中的 ET 总量累加到全局变量中，最终得到所有影像（天/周期）的总蒸散发体积
            count_success += 1
        else:
            print(f"⚠️ 无有效数据: {file}")
            count_fail += 1

    print(f"\n✅ 成功: {count_success} 张, ❌ 失败: {count_fail} 张")
    print(f"💧 蒸散发水量总量: {total_volume / 1e8:.2f} 亿立方米")


def analyze_et_timeseries():
    import pandas as pd
    import matplotlib.pyplot as plt
    from datetime import datetime

    all_hdf_files = [f for f in os.listdir(
        config.DOWNLOAD_DIR) if f.endswith(".hdf")]
    if not all_hdf_files:
        raise FileNotFoundError("❌ 未找到 HDF 文件")

    pattern = re.compile(r"A(\d{7})\.h\d{2}v\d{2}\.\d+\.(\d+)\.hdf")
    file_by_date = defaultdict(list)

    for fname in all_hdf_files:
        match = pattern.search(fname)
        if match:
            date = match.group(1)
            timestamp = match.group(2)
            file_by_date[date].append((timestamp, fname))

    hdf_files = []
    for date, files in file_by_date.items():
        latest_file = max(files, key=lambda x: x[0])[1]
        hdf_files.append(latest_file)

    gdf = gpd.read_file(config.SHAPEFILE_PATH)
    zibo = gdf[gdf["市"] == config.TARGET_CITY]
    sample_file = os.path.join(config.DOWNLOAD_DIR, hdf_files[0])
    with rasterio.open(f'HDF4_EOS:EOS_GRID:"{sample_file}":MOD_Grid_MOD16A2:ET_500m') as src:
        modis_crs = src.crs
    zibo_proj = zibo.to_crs(modis_crs)

    results = []

    for file in tqdm(hdf_files, desc="📈 构建 ET 时序数据"):
        match = pattern.search(file)
        if not match:
            continue
        date_str = match.group(1)
        date = datetime.strptime(date_str, "%Y%j")

        path = os.path.join(config.DOWNLOAD_DIR, file)
        et_path = f'HDF4_EOS:EOS_GRID:"{path}":MOD_Grid_MOD16A2:ET_500m'
        qc_path = f'HDF4_EOS:EOS_GRID:"{path}":MOD_Grid_MOD16A2:ET_QC_500m'

        et_data = utils.read_and_mask_layer(et_path, zibo_proj.geometry)
        qc_data = utils.read_and_mask_layer(qc_path, zibo_proj.geometry)

        if et_data is None or qc_data is None:
            continue

        et_data[et_data == 32767] = np.nan
        qc_mask = (qc_data == 0) | (qc_data == 1)
        et_data[~qc_mask] = np.nan

        et_volume = et_data * 0.1 / 1000 * config.PIXEL_AREA_M2
        valid_volume_sum = np.nansum(et_volume)
        results.append((date, valid_volume_sum))

    df = pd.DataFrame(results, columns=["date", "et_volume"])
    df.sort_values("date", inplace=True)

    # 折线图
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.figure(figsize=(10, 5))
    plt.plot(df["date"], df["et_volume"] / 1e6, marker='o')
    plt.title(f"{config.TARGET_CITY} 2024年蒸散发时序图")
    plt.xlabel("日期")
    plt.ylabel("ET 蒸散发量（百万立方米）")
    plt.grid(True)
    plt.tight_layout()
    plt.show()


def evaluate_irrigation_gap(crop_map_path, crop_water_demand_dict):
    """
    参数：
        crop_map_path: str，栅格作物分布图路径（每个像素为作物ID）
        crop_water_demand_dict: dict，形如 {作物ID: 年需水量mm}
    """
    from rasterio import open as rio_open

    all_hdf_files = [f for f in os.listdir(
        config.DOWNLOAD_DIR) if f.endswith(".hdf")]
    if not all_hdf_files:
        raise FileNotFoundError("❌ 未找到 HDF 文件")

    pattern = re.compile(r"A(\d{7})\.h\d{2}v\d{2}\.\d+\.(\d+)\.hdf")
    file_by_date = defaultdict(list)
    for fname in all_hdf_files:
        match = pattern.search(fname)
        if match:
            date = match.group(1)
            timestamp = match.group(2)
            file_by_date[date].append((timestamp, fname))

    hdf_files = []
    for date, files in file_by_date.items():
        latest_file = max(files, key=lambda x: x[0])[1]
        hdf_files.append(latest_file)

    gdf = gpd.read_file(config.SHAPEFILE_PATH)
    zibo = gdf[gdf["市"] == config.TARGET_CITY]
    sample_file = os.path.join(config.DOWNLOAD_DIR, hdf_files[0])
    with rasterio.open(f'HDF4_EOS:EOS_GRID:"{sample_file}":MOD_Grid_MOD16A2:ET_500m') as src:
        modis_crs = src.crs
        transform = src.transform
        profile = src.profile

    zibo_proj = zibo.to_crs(modis_crs)

    # 年度累计 ET（单位：mm）
    annual_et_mm = None
    for file in tqdm(hdf_files, desc="📊 累计 ET 数据"):
        path = os.path.join(config.DOWNLOAD_DIR, file)
        et_path = f'HDF4_EOS:EOS_GRID:"{path}":MOD_Grid_MOD16A2:ET_500m'
        qc_path = f'HDF4_EOS:EOS_GRID:"{path}":MOD_Grid_MOD16A2:ET_QC_500m'

        et_data = utils.read_and_mask_layer(et_path, zibo_proj.geometry)
        qc_data = utils.read_and_mask_layer(qc_path, zibo_proj.geometry)

        if et_data is None or qc_data is None:
            continue

        et_data[et_data == 32767] = np.nan
        qc_mask = (qc_data == 0) | (qc_data == 1)
        et_data[~qc_mask] = np.nan

        et_mm = et_data * 0.1
        if annual_et_mm is None:
            annual_et_mm = np.zeros_like(et_mm)
        annual_et_mm += np.nan_to_num(et_mm)

    # 读取作物分布图
    with rio_open(crop_map_path) as crop_src:
        crop_map = crop_src.read(1)
        crop_affine = crop_src.transform

    # 评估每类作物的平均缺水量
    result = {}
    for crop_id, demand_mm in crop_water_demand_dict.items():
        crop_mask = (crop_map == crop_id)
        crop_et = np.where(crop_mask, annual_et_mm, np.nan)
        avg_et = np.nanmean(crop_et)
        deficit = demand_mm - avg_et
        result[crop_id] = {
            "需水量": demand_mm,
            "实际ET": round(avg_et, 2),
            "缺水量": round(deficit, 2),
        }

    print(f"🌾 作物灌溉评估结果（单位：mm）")
    for cid, data in result.items():
        print(
            f"作物 {cid}：实际ET={data['实际ET']}, 需水={data['需水量']}, 缺水={data['缺水量']}")
