"""并行生成日度 / 月度 时间序列动图（不保存中间静态图）

用法（示例）：
  ./.venv/bin/python scripts/generate_time_series_gif.py \
      --parquet data/云南乡镇月度灯光亮度.parquet \
      --out figures/月度灯光_test.gif --limit 6

特性：
- 自动尝试检测时间列和数值列（若未指定）。
- 并行渲染每帧（ProcessPoolExecutor），只在内存中生成 PNG 并合成 GIF，不保存中间静态图。
- 支持 --limit 限制帧数，便于 smoke test。
"""
from pathlib import Path
import sys

# Ensure project root is on sys.path so `import src` works when running from scripts/
project_root = Path(__file__).resolve().parents[1]
if str(project_root) not in sys.path:
    sys.path.insert(0, str(project_root))
import argparse
import json
import yaml
import io
from concurrent.futures import ProcessPoolExecutor, as_completed
from typing import List, Optional, Tuple
import multiprocessing
import math

import pandas as pd
import geopandas as gpd
from PIL import Image as PILImage

from src.aux_utils import merge_geo_population
from utils.plot_style import set_plot_style, set_chinese_font
import matplotlib.font_manager as fm
import matplotlib as mpl

# Import colorbar formatters from existing module (they are module-level helpers)
from src.gif_generator import _format_cb_ticks_identity, _format_cb_ticks_percent, _format_cb_ticks_percent_identity

# 自动检测可用中文字体（用于 colorbar label 与刻度）
preferred_cn = ['Noto Sans CJK SC', 'SimHei', 'PingFang', 'Heiti SC', 'WenQuanYi Zen Hei', 'Microsoft YaHei', 'STHeiti', 'SimSun']
available_font_name = None
try:
    names = {f.name for f in fm.fontManager.ttflist}
    for cand in preferred_cn:
        if cand in names:
            available_font_name = cand
            break
except Exception:
    available_font_name = None


def _render_frame_worker_ts(task):
    """Worker run in subprocess: draws one frame and returns (label, png_bytes).

    task: dict with keys: label(str), features(list), params(dict)
    """
    label = task['label']
    features = task['features']
    params = task['params']
    mpl.use('Agg')
    import matplotlib.pyplot as plt
    from shapely.geometry import shape
    from matplotlib.ticker import FuncFormatter
    # local imports for style
    from utils.plot_style import set_plot_style, set_chinese_font

    props = [feat['properties'] for feat in features]
    geoms = [shape(feat['geometry']) for feat in features]
    import pandas as pd
    import geopandas as gpd
    df = pd.DataFrame(props)
    gdf = gpd.GeoDataFrame(df, geometry=geoms, crs=params.get('crs'))

    set_plot_style(params.get('plot_cfg', {}))
    font_family = params.get('font_family')
    if font_family:
        set_chinese_font(font_family, quiet=True)

    figsize = params.get('figsize', (10, 12))
    dpi = params.get('dpi', 150)
    column = params.get('column')
    cmap = params.get('cmap', 'Reds')
    vmin = params.get('vmin')
    vmax = params.get('vmax')
    # 如果没有从主进程传入 bbox，使用更靠近图像的默认值
    colorbar_bbox = params.get('colorbar_bbox', [0.15, 0.16, 0.7, 0.03])

    fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
    if column in gdf.columns:
        col = column
    else:
        col = 'mean' if 'mean' in gdf.columns else '总人口'

    norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
    gdf.plot(column=col, cmap=cmap, linewidth=params.get('linewidth', 0.2), edgecolor=params.get('edgecolor', '#444444'), ax=ax, norm=norm)
    ax.set_axis_off()

    mappable = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
    mappable._A = []
    cax = fig.add_axes(params.get('colorbar_bbox', colorbar_bbox))
    cb = fig.colorbar(mappable, cax=cax, orientation='horizontal')

    tick_percent = params.get('colorbar_tick_percent', False)
    tick_percent_mult = params.get('colorbar_tick_percent_multiply', True)
    if tick_percent:
        if tick_percent_mult:
            cb.ax.xaxis.set_major_formatter(FuncFormatter(_format_cb_ticks_percent))
        else:
            cb.ax.xaxis.set_major_formatter(FuncFormatter(_format_cb_ticks_percent_identity))
    else:
        cb.ax.xaxis.set_major_formatter(FuncFormatter(_format_cb_ticks_identity))
    cb.ax.tick_params(labelsize=9)

    label = params.get('colorbar_label', '')
    if tick_percent and ('%' not in label and '（' not in label and '(' not in label):
        label = f"{label}（%）"
    cb.set_label(label, fontsize=params.get('colorbar_label_size', 11))

    year_label_kwargs = dict(color=params.get('frame_label_color', 'white'), fontsize=params.get('frame_label_size', 18), weight='bold', va='bottom', ha='right')
    if font_family:
        year_label_kwargs['fontfamily'] = font_family
    ax.text(0.97, 0.05, str(label if params.get('use_label_as_cb', False) else task['label']), transform=ax.transAxes, bbox=dict(facecolor='black', alpha=0.35, pad=4), **year_label_kwargs)

    buf = io.BytesIO()
    fig.savefig(buf, format='png', dpi=dpi, bbox_inches='tight')
    plt.close(fig)
    buf.seek(0)
    data = buf.getvalue()
    buf.close()
    return task['label'], data


def detect_columns(df: pd.DataFrame) -> Tuple[Optional[str], Optional[str]]:
    # time candidates (Chinese and English)
    time_cands = ['日期', '日', 'time', '时间', 'timestamp', '月份', 'month', '年月', '年月日']
    val_cands = ['mean', 'value', '亮度', 'avg', 'radiance', '值', '亮度均值']
    time_col = None
    val_col = None
    for c in df.columns:
        if c in time_cands:
            time_col = c
            break
    for c in df.columns:
        if c in val_cands:
            val_col = c
            break
    # fallback: first column that looks like datetime
    if time_col is None:
        for c in df.columns:
            if pd.api.types.is_datetime64_any_dtype(df[c]) or pd.api.types.is_object_dtype(df[c]):
                # try parseable
                try:
                    pd.to_datetime(df[c].iloc[:10])
                    time_col = c
                    break
                except Exception:
                    continue
    return time_col, val_col


def main(argv: Optional[List[str]] = None):
    p = argparse.ArgumentParser(description='生成月度/日度时间序列 GIF（并行，无中间静态图）')
    p.add_argument('--parquet', required=True)
    p.add_argument('--geojson', default='data/云南乡镇.json')
    p.add_argument('--time-col', default=None)
    p.add_argument('--value-col', default=None)
    p.add_argument('--merge-on', default=None, help='用于合并的 code 字段（如 town_code），默认自动检测')
    p.add_argument('--out', required=True)
    p.add_argument('--limit', type=int, default=0, help='仅用于测试：限制渲染的帧数（取最前面的 n 个时间点）')
    p.add_argument('--n-workers', type=int, default=0)
    p.add_argument('--verbose', action='store_true', help='显示渲染进度')
    p.add_argument('--only-border-cities', action='store_true', help='仅渲染 sites.yaml 中列为边境城市的观测')
    p.add_argument('--border-city-field', default='border_city', help='（向后兼容）用于标记边境城市的字段名（默认 border_city）')
    p.add_argument('--city-field', default=None, help='用于匹配市名的列（例如 市/地名/地级市），优先用于与 sites.yaml 中的边境城市匹配')
    p.add_argument('--only-border-counties', action='store_true', help='仅渲染 border_county 字段为 1 的观测')
    p.add_argument('--border-county-field', default='border_county', help='用于标记边境县的字段名（默认 border_county）')
    p.add_argument('--duration', type=float, default=0.8)
    p.add_argument('--dpi', type=int, default=150)
    p.add_argument('--figsize', type=float, nargs=2, default=(10, 12))
    p.add_argument('--cmap', default='viridis')
    p.add_argument('--colorbar-label', default='')
    p.add_argument('--colorbar-percent', action='store_true')
    args = p.parse_args(argv)

    # 先读取 geojson（用于可能的基于 geo 的过滤）
    geo = gpd.read_file(args.geojson)

    df = pd.read_parquet(args.parquet)
    initial_count = len(df)
    if args.verbose:
        print(f"[debug] parquet rows before filtering: {initial_count}")

    # 如果设置了仅渲染边境城市/县，优先采用表格级的名称标准化匹配（与 prepare_border_counties_population_data 一致），
    # 若表格匹配失败，再回退到基于 geo 的匹配，最后回退到数值字段。
    if args.only_border_cities:
        # 优先使用 src.plot.prepare_border_counties_population_data 来准备边境城市的 geo 信息，
        # 以保证与 try_plot.py 的行为完全一致。该函数会在内部做 name normalization、sites.yaml 读取和 geo 交集。
        try:
            from src.plot import prepare_border_counties_population_data
            # 调用 prepare 函数以得到合并后的 geo（传入 minimal params）
            # prepare_border_counties_population_data expects a population parquet (not the time-series light parquet).
            # Use the project's population summary file as the input so the prepare function can find border cities.
            population_parquet_path = str(project_root / 'data' / '边境城市七普乡镇.parquet')
            prep = prepare_border_counties_population_data(year=None, sites_cfg_path=str(project_root / 'sites.yaml'), population_parquet=population_parquet_path, geojson_path=args.geojson, scope='city')
            if prep is None or 'geo' not in prep:
                raise RuntimeError('prepare_border_counties_population_data 未返回有效的 geo')
            merged_geo = prep['geo']
            if merged_geo is None or getattr(merged_geo, 'empty', False):
                # 如果 prepare 返回的 geo 为空，raise 以便触发回退逻辑
                raise RuntimeError('prepare_border_counties_population_data 返回的 geo 为空')
            # 从 merged_geo 中收集 town identifiers（优先 code，再 name）
            geo_town_codes = set()
            geo_town_names = set()
            if '乡镇代码' in merged_geo.columns:
                geo_town_codes.update(merged_geo['乡镇代码'].astype(str).str.strip().dropna().unique().tolist())
            elif 'tid' in merged_geo.columns or 'town_code' in merged_geo.columns:
                col = 'tid' if 'tid' in merged_geo.columns else 'town_code'
                geo_town_codes.update(merged_geo[col].astype(str).str.strip().dropna().unique().tolist())
            else:
                name_cols = [c for c in ['乡', '乡镇', '名称', 'name', 'town_name'] if c in merged_geo.columns]
                if name_cols:
                    geo_town_names.update([str(x).strip().lower().replace(' ', '') for x in merged_geo[name_cols[0]].astype(str).dropna().unique().tolist()])

            pre_len = len(df)
            if geo_town_codes:
                for cand in ['乡镇代码', 'town_code', 'tid', 'code', 'townid']:
                    if cand in df.columns:
                        df = df[df[cand].astype(str).str.strip().isin(geo_town_codes)]
                        break
            elif geo_town_names:
                for cand in ['town_name', '乡', '乡镇', '名称', 'name']:
                    if cand in df.columns:
                        df = df[df[cand].astype(str).apply(lambda x: str(x).strip().lower().replace(' ', '')).isin(geo_town_names)]
                        break
            if args.verbose:
                print(f"[debug] prepare_border_counties_population_data selected towns: rows before {pre_len} -> after {len(df)}")
        except Exception as e:
            # 如果 prepare_* 调用失败或返回空结果，回退到之前的本地匹配逻辑（表格级 -> geo -> numeric），以保证鲁棒性
            if args.verbose:
                print(f"[debug] prepare_border_counties_population_data 失败或返回空结果，回退到本地匹配逻辑: {e}")
            # 本地回退实现（与早期实现相同）
            try:
                sites_cfg = yaml.safe_load((project_root / 'sites.yaml').read_text())
            except Exception:
                sites_cfg = {}
            border_city_list = sites_cfg.get('BORDER_CITIES', []) if isinstance(sites_cfg, dict) else []
            border_city_set = {str(x).strip() for x in border_city_list}
            name_suffixes = sites_cfg.get('geo', {}).get('name_suffixes_to_strip', []) if isinstance(sites_cfg, dict) else []

            def normalize_name_local(s: str) -> str:
                if s is None:
                    return ''
                t = str(s).strip()
                for suf in (name_suffixes or []):
                    if suf and isinstance(suf, str) and t.endswith(suf):
                        t = t[: -len(suf)]
                        break
                return t.replace(' ', '').lower()

            border_city_set_norm = {normalize_name_local(x) for x in border_city_set}

            # 表格级匹配
            def try_table_city_filter_local(df_table):
                df_local = df_table.copy()
                # county_norm
                if any(c in df_local.columns for c in ['县', 'county', 'COUNTY', '县级']):
                    for cand in ['县', 'county', 'COUNTY', '县级']:
                        if cand in df_local.columns:
                            df_local['county_norm'] = df_local[cand].astype(str).apply(lambda x: normalize_name_local(x))
                            break
                # city_norm via direct city column
                city_col = None
                for cand in ['地名', '地级市', '市', 'city', 'city_name']:
                    if cand in df_local.columns:
                        city_col = cand
                        break
                if city_col:
                    df_local['city_norm'] = df_local[city_col].astype(str).apply(lambda x: normalize_name_local(x))
                else:
                    # 通过 sites.yaml 的 border_city_non_border_counties 回退映射 county->city
                    try:
                        bmap = sites_cfg.get('border_city_non_border_counties') if isinstance(sites_cfg, dict) else {}
                        county_to_city = {normalize_name_local(c): normalize_name_local(city) for city, lst in (bmap or {}).items() for c in lst}
                    except Exception:
                        county_to_city = {}
                    if 'county_norm' in df_local.columns:
                        df_local['city_norm'] = df_local['county_norm'].map(lambda x: county_to_city.get(x))

                if border_city_set_norm and 'city_norm' in df_local.columns:
                    return df_local[df_local['city_norm'].isin(border_city_set_norm)]
                elif 'border_city' in df_local.columns:
                    return df_local[df_local['border_city'] == 1]
                return None

            df_filtered = try_table_city_filter_local(df)
            if df_filtered is not None and not df_filtered.empty:
                df = df_filtered
                # 与 geo 做交集，收集 geo 的 town identifiers 并过滤 df
                try:
                    geo_local = geo.copy()
                    for cand in ['县', 'county', 'COUNTY', '县级']:
                        if cand in geo_local.columns:
                            geo_local['county_norm'] = geo_local[cand].astype(str).apply(lambda x: normalize_name_local(x))
                            break
                    for cand in ['地名', '地级市', '市', 'city', 'city_name']:
                        if cand in geo_local.columns:
                            geo_local['city_norm'] = geo_local[cand].astype(str).apply(lambda x: normalize_name_local(x))
                            break
                    geo_sel = geo_local[geo_local['city_norm'].isin(border_city_set_norm)] if 'city_norm' in geo_local.columns and border_city_set_norm else geo_local
                    geo_town_codes = set()
                    geo_town_names = set()
                    if '乡镇代码' in geo_sel.columns:
                        geo_town_codes.update(geo_sel['乡镇代码'].astype(str).str.strip().dropna().unique().tolist())
                    elif 'tid' in geo_sel.columns or 'town_code' in geo_sel.columns:
                        col = 'tid' if 'tid' in geo_sel.columns else 'town_code'
                        geo_town_codes.update(geo_sel[col].astype(str).str.strip().dropna().unique().tolist())
                    else:
                        for nc in ['乡', '乡镇', '名称', 'name', 'town_name']:
                            if nc in geo_sel.columns:
                                geo_town_names.update([normalize_name_local(x) for x in geo_sel[nc].astype(str).dropna().unique().tolist()])
                                break
                    pre_len = len(df)
                    if geo_town_codes:
                        for cand in ['乡镇代码', 'town_code', 'tid', 'code', 'townid']:
                            if cand in df.columns:
                                df = df[df[cand].astype(str).str.strip().isin(geo_town_codes)]
                                break
                    elif geo_town_names:
                        for cand in ['town_name', '乡', '乡镇', '名称', 'name']:
                            if cand in df.columns:
                                df = df[df[cand].astype(str).apply(lambda x: normalize_name_local(x)).isin(geo_town_names)]
                                break
                    if args.verbose:
                        print(f"[debug] fallback table-level+geo matched towns: rows before {pre_len} -> after {len(df)}")
                except Exception as e2:
                    if args.verbose:
                        print(f"[debug] fallback geo intersection failed: {e2}")
            else:
                # 回退：使用数值字段 border_city
                field = args.border_city_field
                if field in df.columns:
                    df = df[(df[field] == 1) | (df[field] == '1')]
                    if args.verbose:
                        print(f"[debug] fallback numeric border_city field '{field}'; rows after: {len(df)}")
                else:
                    if args.verbose:
                        print("[debug] 未能进行任何边境城市过滤（表格/geo/numeric 均失败）")
        

    # 支持按边境县过滤（独立开关）
    if args.only_border_counties:
        bc_field = args.border_county_field
        if bc_field not in df.columns:
            raise RuntimeError(f"指定的 border-county 字段 '{bc_field}' 在数据中不存在")
        df = df[(df[bc_field] == 1) | (df[bc_field] == '1')]
        if df.empty:
            raise RuntimeError(f"过滤后没有符合 '{bc_field} == 1' 的观测，请检查数据或字段名")
    time_col = args.time_col
    val_col = args.value_col
    if time_col is None or val_col is None:
        det_time, det_val = detect_columns(df)
        time_col = time_col or det_time
        val_col = val_col or det_val
    if time_col is None:
        raise RuntimeError('无法检测到时间列，请使用 --time-col 指定')
    if val_col is None:
        raise RuntimeError('无法检测到数值列，请使用 --value-col 指定')

    # parse time column to datetime for sorting
    try:
        df[time_col] = pd.to_datetime(df[time_col])
    except Exception:
        # leave as is
        pass

    # load geo
    geo = gpd.read_file(args.geojson)

    # determine merge key
    merge_on = args.merge_on
    if merge_on is None:
        if '乡镇代码' in df.columns or 'town_code' in df.columns or 'tid' in df.columns:
            merge_on = 'town_code'
        else:
            merge_on = None

    # build times list
    times = sorted(df[time_col].dropna().unique())
    if args.limit and args.limit > 0:
        times = times[: args.limit]

    # compute global vmin/vmax across selected times (use val_col)
    all_vals = []
    for t in times:
        sub = df[df[time_col] == t]
        if val_col in sub.columns:
            vals = pd.to_numeric(sub[val_col], errors='coerce').dropna().values
            if vals.size > 0:
                all_vals.append(vals)
    if all_vals:
        import numpy as np
        stacked = np.concatenate(all_vals)
        vmin = float(stacked.min())
        vmax = float(np.percentile(stacked, 95))
        if vmin == vmax:
            vmax = vmin + 1e-6
    else:
        vmin, vmax = 0.0, 1.0

    # prepare tasks: for each time, merge with geo and serialize features
    tasks = []
    for t in times:
        sub = df[df[time_col] == t]
        # ensure merge key exists in sub
        merged = merge_geo_population(geo, sub, merge_on_code='town_code' if 'town_code' in sub.columns else merge_on, name_suffixes=None, normalize_name_fn=None)
        try:
            features = json.loads(merged.to_json())['features']
        except Exception:
            features = None
            # fallback: use geometry __geo_interface__
            try:
                features = merged.__geo_interface__['features']
            except Exception:
                raise
        # Decide colorbar label: prefer user-specified, else auto for light/brightness columns
        if args.colorbar_label:
            cb_label_val = args.colorbar_label
        else:
            if isinstance(val_col, str) and ('灯' in val_col or '亮' in val_col):
                cb_label_val = '平均灯光亮度'
            else:
                cb_label_val = '值'

        params = {
            'figsize': tuple(args.figsize),
            'dpi': args.dpi,
            'column': val_col,
            'cmap': args.cmap,
            'vmin': vmin,
            'vmax': vmax,
                'colorbar_bbox': (0.15, 0.16, 0.7, 0.03),
            'plot_cfg': {},
            'font_family': available_font_name,
            'colorbar_label': cb_label_val,
            'colorbar_tick_percent': args.colorbar_percent,
            'colorbar_tick_percent_multiply': True,
            'colorbar_label_size': 11,
            'frame_label_color': 'white',
            'frame_label_size': 18,
            'linewidth': 0.2,
            'edgecolor': '#444444',
        }
        # 格式化帧标签为 YYYY-MM
        try:
            import pandas as _pd
            label_dt = _pd.to_datetime(t)
            label_str = label_dt.strftime('%Y-%m')
        except Exception:
            label_str = str(t)
        tasks.append({'label': label_str, 'features': features, 'params': params})

    # run parallel worker (支持 verbose 进度显示)
    n_workers = args.n_workers if args.n_workers and args.n_workers > 0 else (multiprocessing.cpu_count() or 1)
    results = []
    progress = None
    progress_task = None
    if args.verbose:
        from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeElapsedColumn
        progress = Progress(SpinnerColumn(), TextColumn('[bold blue]渲染帧'), BarColumn(), TextColumn('{task.completed}/{task.total}'), TimeElapsedColumn())
        progress.start()
        progress_task = progress.add_task('frames', total=len(tasks))

    with ProcessPoolExecutor(max_workers=n_workers) as ex:
        futures = {ex.submit(_render_frame_worker_ts, task): task['label'] for task in tasks}
        for fut in as_completed(futures):
            try:
                label, data = fut.result()
                results.append((label, data))
                if progress:
                    progress.advance(progress_task)
            except Exception:
                import traceback
                print('Worker exception:', traceback.format_exc())

    # sort by label order in tasks
    label_order = {task['label']: i for i, task in enumerate(tasks)}
    results_sorted = sorted(results, key=lambda x: label_order.get(x[0], 0))

    frames = []
    for lbl, data in results_sorted:
        buf = io.BytesIO(data)
        pil_img = PILImage.open(buf).convert('RGBA')
        frames.append(pil_img.copy())
        buf.close()

    if not frames:
        raise RuntimeError('没有渲染到任何帧')

    outp = Path(args.out).resolve()
    outp.parent.mkdir(parents=True, exist_ok=True)
    duration_ms = int(args.duration * 1000)
    frames[0].save(outp, save_all=True, append_images=frames[1:], duration=duration_ms, loop=0, optimize=False, disposal=2)
    print(f'已保存 GIF 到 {outp} (frames={len(frames)})')


if __name__ == '__main__':
    main()
