# ga_region_partition.py  —— 仅展示已替换后的完整文件（含新增打印）
from __future__ import annotations
import argparse
import random
import json
from dataclasses import dataclass
from typing import Callable, Dict, List, Tuple, Optional

try:
    import pandas as pd
    import numpy as np
except Exception as e:
    raise RuntimeError("需要 pandas 与 numpy 环境") from e

# 放在 import GA_TS 之前
try:
    from numba import njit
except Exception:
    def njit(*args, **kwargs):
        def wrap(f): return f
        return wrap


# =========================
# 数据结构
# =========================
@dataclass
class TaskData:
    ids: np.ndarray            # 任务编号
    x: np.ndarray              # x坐标(km)
    y: np.ndarray              # y坐标(km)
    weight: np.ndarray         # 载重(kg)
    time_limit: np.ndarray     # 规定配送时间(分钟)
    penalty_rate: np.ndarray   # 延迟处罚成本(元/分钟)

    @property
    def N(self) -> int:
        return self.ids.shape[0]


def load_tasks_from_excel(path: str, sheet: str = "Case1") -> TaskData:
    """从附件一_A_data.xlsx 读取一个 Case 表"""
    df = pd.read_excel(path, sheet_name=sheet)
    required = ['任务编号', 'x坐标 (km)', 'y坐标 (km)', '载重 (kg)', '规定配送时间 (分钟)', '延迟处罚成本 (元/分钟)']
    for c in required:
        if c not in df.columns:
            raise ValueError(f"Excel 缺少列: {c}")

    ids = df['任务编号'].to_numpy(dtype=int)
    x = df['x坐标 (km)'].to_numpy(dtype=float)
    y = df['y坐标 (km)'].to_numpy(dtype=float)
    w = df['载重 (kg)'].to_numpy(dtype=float)
    t = df['规定配送时间 (分钟)'].to_numpy(dtype=float)
    p = df['延迟处罚成本 (元/分钟)'].to_numpy(dtype=float)

    return TaskData(ids=ids, x=x, y=y, weight=w, time_limit=t, penalty_rate=p)


# =========================
# 工具函数
# =========================
def compute_region_centers(assignments: np.ndarray, x: np.ndarray, y: np.ndarray, K: int) -> np.ndarray:
    centers = np.zeros((K, 2), dtype=float)
    for k in range(K):
        mask = (assignments == k)
        if np.any(mask):
            centers[k, 0] = float(x[mask].mean())
            centers[k, 1] = float(y[mask].mean())
        else:
            centers[k, :] = 0.0
    return centers

def allocate_vehicles_linear(assignments: np.ndarray, K: int, total: int) -> np.ndarray:
    """
    根据区域任务量线性分配车辆（Hamilton 最大剩余法），保证每区 >=1，总计=total。
    assignments: shape (N,), 值域 0..K-1
    Return: shape (K,), 每区车辆数
    """
    if total < K:
        raise ValueError("总车辆数必须 >= 区域数 K")

    N = assignments.shape[0]
    counts = np.array([(assignments == k).sum() for k in range(K)], dtype=int)

    # 先给每区保底1辆
    base = np.ones(K, dtype=int)
    left = total - K
    if left <= 0:
        return base

    ratio = counts / max(1, N)
    real = ratio * left
    floor = np.floor(real).astype(int)
    remain = left - floor.sum()

    # 最大剩余法
    frac = real - floor
    order = np.argsort(-frac)
    alloc = base + floor
    for i in range(remain):
        alloc[order[i]] += 1
    return alloc


# =========================
# 经验参数（上一版的 surrogate 仍保留，用于速度/服务时间等）
# =========================
@dataclass
class SurrogateParams:
    speed_kmh: float = 40.0     # 车辆速度
    service_min: float = 0.0    # 单任务固定服务时间
    dist_factor: float = 1.0    # 路径经验系数 c
    multi_vehicle_eta: float = 0.4
    vehicle_cap_kg: Optional[float] = None
    big_empty_penalty: float = 1e6
    MIN_TASKS_PER_TRIP: int = 2
    MAX_TASKS_PER_TRIP: int = 10
    SINGLE_TASK_TRIP_PENALTY: float = 200.0

SUR_PARAMS = SurrogateParams()  # 全局可被命令行覆盖


# =========================
# 用 GA_TS.py 精确评估单区域最优“违约成本”
# =========================
def evaluate_partition(
    assignments: np.ndarray,
    vehicle_alloc: np.ndarray,
    centers: np.ndarray,
    data: TaskData
) -> float:
    """
    用 GA+Tabu+DP（GA_TS.py）对每个区域进行“单区域最优调度”求解，
    返回六个区域【总违约成本】之和（单位：元）。
    说明：
      - 以区域质心作为仓库坐标；
      - 若该区域分配到 v>1 辆车，则按截止时间先后把任务均匀分成 v 组，
        对每一组分别调用 GA_TS 求解并将其违约成本相加（近似模拟多车并行）。
      - 仅以“延迟处罚成本”作为区域成本（忽略行驶时间项），与题目“总违约成本最低”一致。
    """
    # --- 延迟载入 GA_TS 模块 ---
    import importlib.util, os, sys
    ga_ts_path = os.path.join(os.path.dirname(__file__), "GA_TS.py")
    if not os.path.exists(ga_ts_path):
        alt = "GA_TS.py"
        ga_ts_path = alt if os.path.exists(alt) else ga_ts_path
    spec = importlib.util.spec_from_file_location("GA_TS", ga_ts_path)
    if spec is None or spec.loader is None:
        raise RuntimeError(f"无法加载 GA_TS.py：{ga_ts_path}")
    GA_TS = importlib.util.module_from_spec(spec)
    sys.modules["GA_TS"] = GA_TS
    spec.loader.exec_module(GA_TS)

    # 只关心违约成本
    ep = GA_TS.EvalParams(
        late_weight=1.0,
        lambda_duration=0.0,
        overtime_penalty=1e9,
        seed=123
    )
    gp = GA_TS.GAParams(
        pop_size=60, generations=600,
        crossover_rate=0.9, mutation_rate=0.30,
        elite_frac=0.30,
        tabu_elite_k=1, tabu_iters=10, tabu_tenure=8,
        neigh_k=10, run_tabu_every=10, neigh_window=6,
        seed=123,
        early_patience=120, early_min_delta=1e-4, early_start_gen=40
    )

    total_late_cost = 0.0
    K = int(vehicle_alloc.size)
    df = pd.DataFrame({
        "id": data.ids,
        "x": data.x,
        "y": data.y,
        "deadline": data.time_limit,
        "penalty": data.penalty_rate,
        "demand": data.weight,
        "region": assignments.astype(int)
    })

    for k in range(K):
        veh = int(vehicle_alloc[k])
        if veh <= 0:
            continue
        sub = df[df["region"] == k].copy()
        if sub.empty:
            continue

        depot_xy = tuple(map(float, centers[k].tolist()))

        # 多车并行：按截止时间轮转均分为 v 组
        sub.sort_values("deadline", inplace=True)
        if veh <= 1:
            bins = [sub]
        else:
            splits = [[] for _ in range(veh)]
            rows = sub.values.tolist()
            for i, row in enumerate(rows):
                splits[i % veh].append(row)
            cols = sub.columns.tolist()
            bins = [pd.DataFrame(s, columns=cols) for s in splits if len(s) > 0]

        for part in bins:
            ins = GA_TS.build_instance_from_df(
                part,
                XCOL="x", YCOL="y", LCOL="deadline", PCOL="penalty", WCOL="demand",
                depot_xy=depot_xy,
                speed_kmh=SUR_PARAMS.speed_kmh,
                work_budget_min=3200.0
            )
            best_sol, best_eval = GA_TS.solve(ins, ep, gp)
            total_late_cost += float(best_eval.total_late_cost)

    return float(total_late_cost)


# =========================
# 遗传算法主体（区域划分 GA）
# =========================
@dataclass
class GAConfig:
    K: int = 6
    total_vehicles: int = 20
    pop_size: int = 80
    generations: int = 200
    crossover_rate: float = 0.8
    mutation_rate: float = 0.01
    elite_num: int = 2
    seed: Optional[int] = 42
    patience: int = 50   # 若多少代无提升可提前停止

    # ----- 初始化相关 -----
    init_strategy: str = "kmeans+random"   # ["random", "kmeans", "kmeans+random"]
    kmeans_frac: float = 0.5               # 用KMeans生成的个体占比（其余为随机）
    kmeans_max_iter: int = 30
    kmeans_n_init: int = 3
    kmeans_perturb: float = 0.08

    # ----- 经验参数透传 -----
    speed_kmh: float = 40.0
    service_min: float = 0.0
    eta: float = 0.4
    dist_factor: float = 1.0
    cap_kg: float = 400.0  # 单车可装(kg)，可设为 None 表示不启用

class GeneticRegionPartitioner:
    def __init__(self, config: GAConfig, data: TaskData,
                 evaluator: Callable[[np.ndarray, np.ndarray, np.ndarray, TaskData], float] = evaluate_partition):
        self.cfg = config
        self.data = data
        self.K = config.K
        if config.seed is not None:
            random.seed(config.seed)
            np.random.seed(config.seed)
        self.evaluator = evaluator

    @staticmethod
    def _repair_static(ind: np.ndarray, K: int) -> np.ndarray:
        present = set(ind.tolist())
        missing = [k for k in range(K) if k not in present]
        if not missing:
            return ind
        for k in missing:
            counts = np.array([(ind == j).sum() for j in range(K)], dtype=int)
            candidates_regions = np.where(counts > 1)[0].tolist()
            if len(candidates_regions) == 0:
                i = random.randrange(ind.size)
                ind[i] = k
            else:
                r = random.choice(candidates_regions)
                idxs = np.where(ind == r)[0]
                i = random.choice(idxs.tolist())
                ind[i] = k
        return ind

    def _repair(self, ind: np.ndarray) -> np.ndarray:
        return self._repair_static(ind, self.K)

    def _random_individual(self) -> np.ndarray:
        return np.array([random.randrange(self.K) for _ in range(self.data.N)], dtype=int)

    def _fitness(self, ind: np.ndarray) -> float:
        """把“越小越好”的成本转成“越大越好”的适应度"""
        vehicle_alloc = allocate_vehicles_linear(ind, self.K, self.cfg.total_vehicles)
        centers = compute_region_centers(ind, self.data.x, self.data.y, self.K)
        cost = self.evaluator(ind, vehicle_alloc, centers, self.data)
        if cost < 0:
            cost = 0.0
        return 1.0 / (1.0 + cost)

    def _roulette(self, population: List[np.ndarray], fits: List[float]) -> np.ndarray:
        s = sum(fits)
        if s <= 0:
            return random.choice(population).copy()
        r = random.random() * s
        acc = 0.0
        for ind, f in zip(population, fits):
            acc += f
            if acc >= r:
                return ind.copy()
        return population[-1].copy()

    def _crossover(self, p1: np.ndarray, p2: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        if random.random() > self.cfg.crossover_rate:
            return p1.copy(), p2.copy()
        n = p1.size
        point = random.randrange(1, n)  # [1, n-1]
        c1 = np.concatenate([p1[:point], p2[point:]])
        c2 = np.concatenate([p2[:point], p1[point:]])
        return self._repair(c1), self._repair(c2)

    def _mutate(self, ind: np.ndarray) -> np.ndarray:
        arr = ind.copy()
        n = arr.size
        m = max(1, int(n * self.cfg.mutation_rate))
        idxs = random.sample(range(n), m)
        for i in idxs:
            old = arr[i]
            choices = list(range(self.K))
            choices.remove(old)
            arr[i] = random.choice(choices)
        return self._repair(arr)

    def run(self) -> Dict:
        # -------- 初始化种群：支持 kmeans / random / 混合 --------
        pop: List[np.ndarray] = []
        if self.cfg.init_strategy in ("kmeans", "kmeans+random"):
            pop.extend(make_kmeans_population(self.data, self.cfg))
        while len(pop) < self.cfg.pop_size:
            pop.append(self._random_individual())

        fits = [self._fitness(ind) for ind in pop]
        best_ind = pop[int(np.argmax(fits))].copy()
        best_fit = max(fits)
        best_cost = (1.0 / best_fit) - 1.0
        no_improve = 0

        for _ in range(self.cfg.generations):
            elite_idxs = list(np.argsort(fits))[-self.cfg.elite_num:]
            elites = [pop[i].copy() for i in elite_idxs]

            new_pop: List[np.ndarray] = elites[:]
            while len(new_pop) < self.cfg.pop_size:
                p1 = self._roulette(pop, fits)
                p2 = self._roulette(pop, fits)
                c1, c2 = self._crossover(p1, p2)
                c1 = self._mutate(c1)
                c2 = self._mutate(c2)
                new_pop.append(c1)
                if len(new_pop) < self.cfg.pop_size:
                    new_pop.append(c2)
            pop = new_pop

            fits = [self._fitness(ind) for ind in pop]
            cur_best_idx = int(np.argmax(fits))
            cur_best_ind = pop[cur_best_idx].copy()
            cur_best_fit = fits[cur_best_idx]
            if cur_best_fit > best_fit + 1e-12:
                best_fit = cur_best_fit
                best_ind = cur_best_ind
                best_cost = (1.0 / best_fit) - 1.0
                no_improve = 0
            else:
                no_improve += 1

            if no_improve >= self.cfg.patience:
                break

        vehicles = allocate_vehicles_linear(best_ind, self.K, self.cfg.total_vehicles)
        centers = compute_region_centers(best_ind, self.data.x, self.data.y, self.K)
        region_binaries = labels_to_region_binaries(best_ind, self.K)

        return {
            "best_assignments": best_ind,
            "best_cost": best_cost,
            "vehicle_allocation": vehicles,
            "region_centers": centers,
            "region_binaries": region_binaries
        }


# =========================
# 编码转换 & 导出
# =========================
def labels_to_region_binaries(labels: np.ndarray, K: int) -> List[List[int]]:
    """把长度N的标签向量（0..K-1）转为K个二进制编码，每个是长度N的0/1列表。"""
    bins: List[List[int]] = []
    for k in range(K):
        arr = (labels == k).astype(int).tolist()
        bins.append(arr)
    return bins

def export_region_binaries(ids: np.ndarray, binaries: List[List[int]], prefix: str = "region_binaries"):
    """
    保存：
      - {prefix}.json : { "R0":[0/1...], "R1":[...], ... }
      - partition_binaries.csv : N 行，包含 id 与各区 0/1。
      - region_binaries_Rk.txt : 逐区 0/1。
    """
    K = len(binaries)
    mapping = {f"R{k}": binaries[k] for k in range(K)}
    with open(f"{prefix}.json", "w", encoding="utf-8") as f:
        json.dump(mapping, f, ensure_ascii=False, indent=2)

    df = pd.DataFrame({ "id": ids })
    for k in range(K):
        df[f"R{k}"] = binaries[k]
    df.to_csv("partition_binaries.csv", index=False, encoding="utf-8-sig")

    for k in range(K):
        with open(f"region_binaries_R{k}.txt", "w", encoding="utf-8") as f:
            f.write(" ".join(map(str, binaries[k])) + "\n")


# =========================
# KMeans 初始化（可选）
# =========================
def _closest_center(X: np.ndarray, centers: np.ndarray) -> np.ndarray:
    diff = X[:, None, :] - centers[None, :, :]
    dist2 = np.sum(diff*diff, axis=2)
    return np.argmin(dist2, axis=1)

def _compute_centers(X: np.ndarray, labels: np.ndarray, K: int, rng: np.random.RandomState) -> np.ndarray:
    centers = np.zeros((K, 2), dtype=float)
    for k in range(K):
        mask = (labels == k)
        if not np.any(mask):
            idx = rng.randint(0, X.shape[0])
            centers[k] = X[idx]
        else:
            centers[k] = X[mask].mean(axis=0)
    return centers

def run_kmeans(X: np.ndarray, K: int, max_iter: int = 30, n_init: int = 3, seed: Optional[int] = 42) -> np.ndarray:
    rng = np.random.RandomState(seed)
    best_labels = None
    best_inertia = float("inf")
    for _ in range(max(1, n_init)):
        idx = rng.choice(X.shape[0], K, replace=False)
        centers = X[idx].copy()
        labels = _closest_center(X, centers)
        for _ in range(max_iter):
            centers = _compute_centers(X, labels, K, rng)
            new_labels = _closest_center(X, centers)
            if np.array_equal(new_labels, labels):
                break
            labels = new_labels

        diff = X - centers[labels]
        inertia = float(np.sum(diff*diff))
        if inertia < best_inertia:
            best_inertia = inertia
            best_labels = labels.copy()

    return best_labels

def make_kmeans_population(data: TaskData, cfg: "GAConfig") -> List[np.ndarray]:
    """
    生成一批基于KMeans的个体（带少量随机扰动以增强多样性）。
    """
    X = np.stack([data.x, data.y], axis=1)
    base_labels = run_kmeans(X, cfg.K, max_iter=cfg.kmeans_max_iter, n_init=cfg.kmeans_n_init, seed=cfg.seed)

    def perturb(labels: np.ndarray, p: float) -> np.ndarray:
        lab = labels.copy()
        N = lab.size
        m = np.random.rand(N) < p
        if m.any():
            for i in np.where(m)[0]:
                choices = list(range(cfg.K))
                choices.remove(lab[i])
                lab[i] = random.choice(choices)
        return lab

    kmeans_pop = [base_labels]
    target = max(1, int(cfg.pop_size * cfg.kmeans_frac))
    while len(kmeans_pop) < target:
        kmeans_pop.append(perturb(base_labels, cfg.kmeans_perturb))
    return kmeans_pop


# =========================
# CLI
# =========================
@dataclass
class CLIArgs:
    excel: str
    sheet: str
    K: int
    T: int
    pop: int
    gens: int
    pc: float
    pm: float
    elite: int
    seed: int
    patience: int
    init: str
    kmeans_frac: float
    kmeans_max_iter: int
    kmeans_n_init: int
    kmeans_perturb: float
    speed_kmh: float
    service_min: float
    eta: float
    dist_factor: float
    cap_kg: float
    export_csv: bool
    export_binaries: bool


def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--excel", type=str, default="附件一_A_data.xlsx", help="数据 Excel 路径")
    parser.add_argument("--sheet", type=str, default="Case1", help="工作表名（Case1..Case10）")
    parser.add_argument("--K", type=int, default=6)
    parser.add_argument("--T", type=int, default=20)
    parser.add_argument("--pop", type=int, default=80)
    parser.add_argument("--gens", type=int, default=200)
    parser.add_argument("--pc", type=float, default=0.8, help="交叉率")
    parser.add_argument("--pm", type=float, default=0.01, help="变异率")
    parser.add_argument("--elite", type=int, default=2)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--patience", type=int, default=50)

    # 初始化策略 & KMeans 参数
    parser.add_argument("--init", type=str, default="kmeans+random", choices=["random", "kmeans", "kmeans+random"])
    parser.add_argument("--kmeans-frac", type=float, default=0.5)
    parser.add_argument("--kmeans-max-iter", type=int, default=30)
    parser.add_argument("--kmeans-n-init", type=int, default=3)
    parser.add_argument("--kmeans-perturb", type=float, default=0.08)

    # 经验参数
    parser.add_argument("--speed", type=float, default=40.0)
    parser.add_argument("--service", type=float, default=0.0)
    parser.add_argument("--eta", type=float, default=0.4)
    parser.add_argument("--c", type=float, default=1.0)
    parser.add_argument("--cap", type=float, default=400.0)

    # 导出选项
    parser.add_argument("--export-csv", action="store_true")
    parser.add_argument("--export-binaries", action="store_true")

    args = parser.parse_args()
    args_ns = CLIArgs(
        excel=args.excel, sheet=args.sheet, K=args.K, T=args.T,
        pop=args.pop, gens=args.gens, pc=args.pc, pm=args.pm, elite=args.elite,
        seed=args.seed, patience=args.patience,
        init=args.init, kmeans_frac=args.kmeans_frac, kmeans_max_iter=args.kmeans_max_iter,
        kmeans_n_init=args.kmeans_n_init, kmeans_perturb=args.kmeans_perturb,
        speed_kmh=args.speed, service_min=args.service, eta=args.eta,
        dist_factor=args.c, cap_kg=args.cap,
        export_csv=args.export_csv, export_binaries=args.export_binaries
    )

    data = load_tasks_from_excel(args_ns.excel, args_ns.sheet)

    cfg = GAConfig(
        K=args_ns.K, total_vehicles=args_ns.T, pop_size=args_ns.pop, generations=args_ns.gens,
        crossover_rate=args_ns.pc, mutation_rate=args_ns.pm, elite_num=args_ns.elite,
        seed=args_ns.seed, patience=args_ns.patience,
        init_strategy=args_ns.init, kmeans_frac=args_ns.kmeans_frac,
        kmeans_max_iter=args_ns.kmeans_max_iter, kmeans_n_init=args_ns.kmeans_n_init,
        kmeans_perturb=args_ns.kmeans_perturb,
        speed_kmh=args_ns.speed_kmh, service_min=args_ns.service_min,
        eta=args_ns.eta, dist_factor=args_ns.dist_factor, cap_kg=args_ns.cap_kg
    )

    # 透传经验参数（供 GA_TS 的实例构建使用）
    SUR_PARAMS.speed_kmh = args_ns.speed_kmh
    SUR_PARAMS.service_min = args_ns.service_min
    SUR_PARAMS.multi_vehicle_eta = args_ns.eta
    SUR_PARAMS.dist_factor = args_ns.dist_factor
    SUR_PARAMS.vehicle_cap_kg = None if (args_ns.cap_kg is None or args_ns.cap_kg <= 0) else args_ns.cap_kg

    ga = GeneticRegionPartitioner(cfg, data, evaluator=evaluate_partition)
    result = ga.run()
    # —— 输出六个区域总违约成本（调用 GA_TS 精确评估） ——
    total_penalty = evaluate_partition(
        result["best_assignments"],
        result["vehicle_allocation"],
        result["region_centers"],
        data
    )
    print(f"六个区域总违约成本之和: {total_penalty:.2f} 元")

    # 打印主要结果
    print("\n=== GA Done (Problem 1 / Upper-Level) ===")
    print("Best surrogate cost (for GA selection):", result["best_cost"])
    print("Vehicle allocation per region:", result["vehicle_allocation"].tolist())
    print("Region centers:\n", result["region_centers"])

    # 可选导出：任务->区域标签
    if args_ns.export_csv:
        df_out = pd.DataFrame({
            "task_id": data.ids,
            "region_id": result["best_assignments"]
        })
        df_out.to_csv("partition_result.csv", index=False, encoding="utf-8-sig")
        print("Saved: partition_result.csv")

    # 可选导出：区域二进制编码（JSON/CSV/TXT）
    if args_ns.export_binaries:
        export_region_binaries(data.ids, result["region_binaries"], prefix="region_binaries")
        print("Saved: region_binaries.json, partition_binaries.csv, region_binaries_R{k}.txt")

if __name__ == "__main__":
    main()
