from __future__ import annotations
import argparse
import random
import json
from dataclasses import dataclass
from typing import Callable, Dict, List, Tuple, Optional

try:
    import pandas as pd
    import numpy as np
except Exception as e:
    raise RuntimeError("需要 pandas 与 numpy 环境") from e


# =========================
# 数据结构
# =========================

@dataclass
class TaskData:
    """封装任务数据（列名适配 附件一_A_data.xlsx）"""
    ids: np.ndarray            # shape (N,), 任务编号（从1开始）
    x: np.ndarray              # shape (N,), x坐标(km)
    y: np.ndarray              # shape (N,), y坐标(km)
    weight: np.ndarray         # 载重(kg)
    time_limit: np.ndarray     # 规定配送时间(分钟)
    penalty_rate: np.ndarray   # 延迟处罚成本(元/分钟)

    @property
    def N(self) -> int:
        return self.ids.shape[0]


def load_tasks_from_excel(path: str, sheet: str = "Case1") -> TaskData:
    """从附件一_A_data.xlsx 读取一个 Case 表"""
    df = pd.read_excel(path, sheet_name=sheet)
    required = ['任务编号', 'x坐标 (km)', 'y坐标 (km)', '载重 (kg)', '规定配送时间 (分钟)', '延迟处罚成本 (元/分钟)']
    for c in required:
        if c not in df.columns:
            raise ValueError(f"Excel 缺少列: {c}")

    ids = df['任务编号'].to_numpy(dtype=int)
    x = df['x坐标 (km)'].to_numpy(dtype=float)
    y = df['y坐标 (km)'].to_numpy(dtype=float)
    w = df['载重 (kg)'].to_numpy(dtype=float)
    t = df['规定配送时间 (分钟)'].to_numpy(dtype=float)
    p = df['延迟处罚成本 (元/分钟)'].to_numpy(dtype=float)

    return TaskData(ids=ids, x=x, y=y, weight=w, time_limit=t, penalty_rate=p)


# =========================
# 车辆线性分配（每区至少1）
# =========================

def allocate_vehicles_linear(assignments: np.ndarray, K: int, total: int) -> np.ndarray:
    """
    根据区域任务量线性分配车辆（Hamilton 最大剩余法），保证每区 >=1，总计=total。
    assignments: shape (N,), 值域 0..K-1
    Return: shape (K,), 每区车辆数
    """
    if total < K:
        raise ValueError("总车辆数必须 >= 区域数 K")

    N = assignments.shape[0]
    counts = np.array([(assignments == k).sum() for k in range(K)], dtype=int)

    # 先给每区保底1辆
    base = np.ones(K, dtype=int)
    left = total - K
    if left == 0:
        return base

    props = counts / max(N, 1)
    raw = props * left
    add_floor = np.floor(raw).astype(int)
    remain = left - add_floor.sum()

    frac = raw - add_floor
    order = np.argsort(-frac)
    add = add_floor.copy()
    for i in range(remain):
        add[order[i]] += 1

    return base + add


# =========================
# 区域中心（重心）
# =========================

def compute_region_centers(assignments: np.ndarray, x: np.ndarray, y: np.ndarray, K: int) -> np.ndarray:
    """返回 shape (K,2) 的中心坐标（每区 x,y 的均值）。在流程中保证不会有空区。"""
    centers = np.zeros((K, 2), dtype=float)
    for k in range(K):
        mask = (assignments == k)
        xs = x[mask]
        ys = y[mask]
        centers[k, 0] = xs.mean()
        centers[k, 1] = ys.mean()
    return centers


# =========================
# K-means (no sklearn)
# =========================

def _kmeans_plus_plus_init(X: np.ndarray, K: int, rng: np.random.RandomState) -> np.ndarray:
    """k-means++ 初始化中心，X shape (N,2)"""
    N = X.shape[0]
    centers = np.empty((K, 2), dtype=float)
    idx0 = rng.randint(0, N)
    centers[0] = X[idx0]
    d2 = np.full(N, np.inf)
    for k in range(1, K):
        d2 = np.minimum(d2, np.sum((X - centers[k-1])**2, axis=1))
        probs = d2 / d2.sum()
        idx = rng.choice(N, p=probs)
        centers[k] = X[idx]
    return centers

def _assign_labels(X: np.ndarray, centers: np.ndarray) -> np.ndarray:
    diff = X[:, None, :] - centers[None, :, :]  # (N,K,2)
    dist2 = np.sum(diff*diff, axis=2)           # (N,K)
    return np.argmin(dist2, axis=1)

def _compute_centers(X: np.ndarray, labels: np.ndarray, K: int, rng: np.random.RandomState) -> np.ndarray:
    centers = np.zeros((K, 2), dtype=float)
    for k in range(K):
        mask = (labels == k)
        if not np.any(mask):
            idx = rng.randint(0, X.shape[0])
            centers[k] = X[idx]
        else:
            centers[k] = X[mask].mean(axis=0)
    return centers

def run_kmeans(X: np.ndarray, K: int, max_iter: int = 30, n_init: int = 3, seed: Optional[int] = 42) -> np.ndarray:
    """返回最佳 labels（基于最小化总平方和）。"""
    rng = np.random.RandomState(seed)
    best_inertia = np.inf
    best_labels = None

    for _ in range(n_init):
        centers = _kmeans_plus_plus_init(X, K, rng)
        labels = _assign_labels(X, centers)
        for _ in range(max_iter):
            centers = _compute_centers(X, labels, K, rng)
            new_labels = _assign_labels(X, centers)
            if np.all(new_labels == labels):
                break
            labels = new_labels

        diff = X - centers[labels]
        inertia = float(np.sum(diff*diff))
        if inertia < best_inertia:
            best_inertia = inertia
            best_labels = labels.copy()

    return best_labels

def make_kmeans_population(data: TaskData, cfg: "GAConfig") -> List[np.ndarray]:
    """
    生成一批基于KMeans的个体（带少量随机扰动以增强多样性）。
    """
    X = np.stack([data.x, data.y], axis=1)  # (N,2)
    base_labels = run_kmeans(X, cfg.K, max_iter=cfg.kmeans_max_iter, n_init=cfg.kmeans_n_init, seed=cfg.seed)

    def perturb(labels: np.ndarray, p: float) -> np.ndarray:
        lab = labels.copy()
        N = lab.size
        m = np.random.rand(N) < p
        if m.any():
            for i in np.where(m)[0]:
                choices = list(range(cfg.K))
                choices.remove(lab[i])
                lab[i] = random.choice(choices)
        return lab

    kmeans_pop = [base_labels]
    target = max(1, int(cfg.pop_size * cfg.kmeans_frac))
    while len(kmeans_pop) < target:
        kmeans_pop.append(perturb(base_labels, cfg.kmeans_perturb_prob))
    kmeans_pop = [GeneticRegionPartitioner._repair_static(ind, cfg.K) for ind in kmeans_pop]
    return kmeans_pop


# =========================
# 粗糙的二层成本 Surrogate
# =========================

@dataclass
class SurrogateParams:
    speed_kmh: float = 40.0          # 车辆平均速度
    service_min: float = 60.0         # 每单固定服务时间
    dist_factor: float = 1.0         # 路径经验系数 c
    multi_vehicle_eta: float = 0.4   # 多车效率增益系数 eta
    vehicle_cap_kg: Optional[float] = None  # 单车容量(kg)，None 表示不考虑容量
    big_empty_penalty: float = 1e6   # 防空区的极端惩罚（理论上不会触发）
    MIN_TASKS_PER_TRIP: int = 2
    MAX_TASKS_PER_TRIP: int = 10
    SINGLE_TASK_TRIP_PENALTY: float = 200.0  # 单任务趟的轻度惩罚（可调）

SUR_PARAMS = SurrogateParams()  # 全局可被命令行覆盖

def evaluate_partition(
    assignments: np.ndarray,
    vehicle_alloc: np.ndarray,
    centers: np.ndarray,
    data: TaskData
) -> float:
    """
    违约成本粗估（越小越好）：
    - 用 2*sum(dist_to_center) 近似区域“星型”路径长度，并按多车效率缩短；
    - （可选）若超载，按比例放大行驶时间，模拟多轮次；
    - 按时间窗升序把任务均匀分配到各车，估计到达时刻 -> 迟到罚金。
    """
    v_km_per_min = SUR_PARAMS.speed_kmh / 60.0
    total_penalty = 0.0

    K = vehicle_alloc.size
    for k in range(K):
        veh = int(vehicle_alloc[k])
        idx = np.where(assignments == k)[0]
        n = idx.size
        if n == 0:
            total_penalty += SUR_PARAMS.big_empty_penalty
            continue

        cx, cy = centers[k]
        xk = data.x[idx]
        yk = data.y[idx]
        tk = data.time_limit[idx]      # 规定配送时间(分钟)
        wk = data.weight[idx]          # 载重(kg)

        # --- 距离与“近似路径” ---
        d = np.hypot(xk - cx, yk - cy)              # 到中心的距离
        star_len = 2.0 * float(d.sum())             # 单单往返上限
        eff = 1.0 + SUR_PARAMS.multi_vehicle_eta * max(veh - 1, 0)
        L_region = SUR_PARAMS.dist_factor * star_len / eff  # 多车缩短后的近似总路径（km）

        # --- 容量引起的多轮次（可选） ---
        if SUR_PARAMS.vehicle_cap_kg is not None:
            cap_total = veh * SUR_PARAMS.vehicle_cap_kg
            total_w = float(wk.sum())
            overload_ratio = max(1.0, total_w / max(cap_total, 1e-6))
        else:
            overload_ratio = 1.0

        travel_min_total = (L_region / max(v_km_per_min, 1e-9)) * overload_ratio  # 区域总行驶时间（分钟）
        travel_min_per_task = travel_min_total / n
        per_task_slot = travel_min_per_task + SUR_PARAMS.service_min   # 单任务“平均节拍”

        # --- 时间窗优先 + 多车并行，估计到达时刻 ---
        order = np.argsort(tk)  # 早到期先服务
        routes = [[] for _ in range(veh)]
        for rank, j in enumerate(order):
            routes[rank % veh].append(j)

        # 路线“分摊”行驶时间：假设每车承担 travel_min_total/veh
        for r in routes:
            m = len(r)
            if m == 0:
                continue
            route_travel_min = travel_min_total / veh
            route_travel_per_task = route_travel_min / m
            slot = route_travel_per_task + SUR_PARAMS.service_min
            for pos, j in enumerate(r):
                i_global = idx[j]  # 映射回原全局索引
                arrival = pos * slot
                late = max(0.0, arrival - data.time_limit[i_global])
                total_penalty += data.penalty_rate[i_global] * late

    return float(total_penalty)


# =========================
# 遗传算法主体
# =========================

@dataclass
class GAConfig:
    K: int = 6
    total_vehicles: int = 20
    pop_size: int = 80
    generations: int = 200
    crossover_rate: float = 0.8
    mutation_rate: float = 0.01
    elite_num: int = 2
    seed: Optional[int] = 42
    patience: int = 50   # 若多少代无提升可提前停止

    # ----- 初始化相关 -----
    init_strategy: str = "kmeans+random"   # ["random", "kmeans", "kmeans+random"]
    kmeans_frac: float = 0.5               # 用KMeans生成的个体占比（其余为随机）
    kmeans_max_iter: int = 30              # Lloyd 迭代轮数
    kmeans_n_init: int = 3                 # 多次不同随机种子取最优
    kmeans_perturb_prob: float = 0.01      # 对kmeans标签的小扰动概率（增强多样性）


class GeneticRegionPartitioner:
    def __init__(self, config: GAConfig, data: TaskData,
                 evaluator: Callable[[np.ndarray, np.ndarray, np.ndarray, TaskData], float] = evaluate_partition):
        self.cfg = config
        self.data = data
        self.K = config.K
        if config.seed is not None:
            random.seed(config.seed)
            np.random.seed(config.seed)
        self.evaluator = evaluator

    @staticmethod
    def _repair_static(ind: np.ndarray, K: int) -> np.ndarray:
        present = set(ind.tolist())
        missing = [k for k in range(K) if k not in present]
        if not missing:
            return ind
        for k in missing:
            counts = np.array([(ind == j).sum() for j in range(K)], dtype=int)
            candidates_regions = np.where(counts > 1)[0].tolist()
            if len(candidates_regions) == 0:
                pos = np.random.randint(0, ind.size)
            else:
                donor_k = random.choice(candidates_regions)
                pos_list = np.where(ind == donor_k)[0]
                pos = int(random.choice(pos_list))
            ind[pos] = k
        return ind

    def _repair(self, ind: np.ndarray) -> np.ndarray:
        return self._repair_static(ind, self.K)

    def _random_individual(self) -> np.ndarray:
        ind = np.random.randint(0, self.K, size=self.data.N, dtype=int)
        return self._repair(ind)

    def _fitness(self, ind: np.ndarray) -> float:
        """把“越小越好”的成本转成“越大越好”的适应度"""
        vehicle_alloc = allocate_vehicles_linear(ind, self.K, self.cfg.total_vehicles)
        centers = compute_region_centers(ind, self.data.x, self.data.y, self.K)
        cost = self.evaluator(ind, vehicle_alloc, centers, self.data)
        if cost < 0:
            cost = 0.0
        return 1.0 / (1.0 + cost)

    def _roulette(self, population: List[np.ndarray], fits: List[float]) -> np.ndarray:
        s = sum(fits)
        if s <= 0:
            return random.choice(population).copy()
        r = random.random() * s
        acc = 0.0
        for ind, f in zip(population, fits):
            acc += f
            if acc >= r:
                return ind.copy()
        return population[-1].copy()

    def _crossover(self, p1: np.ndarray, p2: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
        if random.random() > self.cfg.crossover_rate:
            return p1.copy(), p2.copy()
        n = p1.size
        point = random.randrange(1, n)  # [1, n-1]
        c1 = np.concatenate([p1[:point], p2[point:]])
        c2 = np.concatenate([p2[:point], p1[point:]])
        return self._repair(c1), self._repair(c2)

    def _mutate(self, ind: np.ndarray) -> np.ndarray:
        for i in range(ind.size):
            if random.random() < self.cfg.mutation_rate:
                old = ind[i]
                choices = list(range(self.K))
                choices.remove(old)
                ind[i] = random.choice(choices)
        return self._repair(ind)

    def run(self) -> Dict:
        # -------- 初始化种群：支持 kmeans / random / 混合 --------
        pop: List[np.ndarray] = []
        if self.cfg.init_strategy in ("kmeans", "kmeans+random"):
            pop.extend(make_kmeans_population(self.data, self.cfg))
        while len(pop) < self.cfg.pop_size:
            pop.append(self._random_individual())

        fits = [self._fitness(ind) for ind in pop]
        best_ind = pop[int(np.argmax(fits))].copy()
        best_fit = max(fits)
        best_cost = (1.0 / best_fit) - 1.0
        no_improve = 0

        for _ in range(self.cfg.generations):
            elite_idxs = list(np.argsort(fits))[-self.cfg.elite_num:]
            elites = [pop[i].copy() for i in elite_idxs]

            new_pop: List[np.ndarray] = []
            while len(new_pop) < self.cfg.pop_size - self.cfg.elite_num:
                p1 = self._roulette(pop, fits)
                p2 = self._roulette(pop, fits)
                c1, c2 = self._crossover(p1, p2)
                c1 = self._mutate(c1)
                c2 = self._mutate(c2)
                new_pop.extend([c1, c2])

            new_pop = new_pop[:self.cfg.pop_size - self.cfg.elite_num] + elites
            pop = new_pop
            fits = [self._fitness(ind) for ind in pop]

            gen_best_idx = int(np.argmax(fits))
            gen_best_fit = fits[gen_best_idx]
            if gen_best_fit > best_fit:
                best_fit = gen_best_fit
                best_ind = pop[gen_best_idx].copy()
                best_cost = (1.0 / best_fit) - 1.0
                no_improve = 0
            else:
                no_improve += 1

            if no_improve >= self.cfg.patience:
                break

        vehicles = allocate_vehicles_linear(best_ind, self.K, self.cfg.total_vehicles)
        centers = compute_region_centers(best_ind, self.data.x, self.data.y, self.K)
        region_binaries = labels_to_region_binaries(best_ind, self.K)

        return {
            "best_assignments": best_ind,                  # shape(N,)
            "best_cost": best_cost,                        # 依赖 surrogate 参数
            "vehicle_allocation": vehicles,                # shape(K,)
            "region_centers": centers,                     # shape(K,2)
            "region_binaries": region_binaries            # List[List[int]], K 个区域编码
        }


# =========================
# 编码转换 & 导出
# =========================

def labels_to_region_binaries(labels: np.ndarray, K: int) -> List[List[int]]:
    """把长度N的标签向量（0..K-1）转为K个二进制编码，每个是长度N的0/1列表。"""
    bins: List[List[int]] = []
    for k in range(K):
        arr = (labels == k).astype(int).tolist()
        bins.append(arr)
    return bins

def export_region_binaries(
    ids: np.ndarray,
    region_binaries: List[List[int]],
    prefix: str = "region_binaries"
) -> None:
    """
    导出 3 种格式：
    1) JSON: prefix.json -> {"region_0":[...],...}
    2) CSV : partition_binaries.csv -> task_id,region_0,...,region_{K-1}
    3) TXT : prefix_R{k}.txt -> 每区一行 0/1 序列
    """
    K = len(region_binaries)
    # JSON
    payload = {f"region_{k}": region_binaries[k] for k in range(K)}
    with open(f"{prefix}.json", "w", encoding="utf-8") as f:
        json.dump(payload, f, ensure_ascii=False)

    # CSV
    df_cols = {"task_id": ids}
    for k in range(K):
        df_cols[f"region_{k}"] = np.array(region_binaries[k], dtype=int)
    df = pd.DataFrame(df_cols)
    df.to_csv("partition_binaries.csv", index=False, encoding="utf-8-sig")

    # TXT per region
    for k in range(K):
        with open(f"{prefix}_R{k}.txt", "w", encoding="utf-8") as f:
            f.write(" ".join(str(v) for v in region_binaries[k]))


# =========================
# CLI & 主程序
# =========================

@dataclass
class CLIArgs:
    excel: str
    sheet: str
    K: int
    T: int
    pop: int
    gens: int
    pc: float
    pm: float
    elite: int
    seed: int
    patience: int
    init: str
    kmeans_frac: float
    kmeans_max_iter: int
    kmeans_n_init: int
    kmeans_perturb: float
    speed_kmh: float
    service_min: float
    eta: float
    dist_factor: float
    cap_kg: float
    export_csv: bool
    export_binaries: bool

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--excel", type=str, default="附件一_A_data.xlsx", help="数据 Excel 路径")
    parser.add_argument("--sheet", type=str, default="Case1", help="工作表名（Case1..Case10）")
    parser.add_argument("--K", type=int, default=6)
    parser.add_argument("--T", type=int, default=20)
    parser.add_argument("--pop", type=int, default=80)
    parser.add_argument("--gens", type=int, default=200)
    parser.add_argument("--pc", type=float, default=0.8, help="交叉率")
    parser.add_argument("--pm", type=float, default=0.01, help="变异率")
    parser.add_argument("--elite", type=int, default=2)
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--patience", type=int, default=50)

    # 初始化策略 & KMeans 参数
    parser.add_argument("--init", type=str, default="kmeans+random", choices=["random", "kmeans", "kmeans+random"])
    parser.add_argument("--kmeans_frac", type=float, default=0.5)
    parser.add_argument("--kmeans_max_iter", type=int, default=30)
    parser.add_argument("--kmeans_n_init", type=int, default=3)
    parser.add_argument("--kmeans_perturb", type=float, default=0.01)

    # Surrogate 参数
    parser.add_argument("--speed_kmh", type=float, default=40.0)
    parser.add_argument("--service_min", type=float, default=300.0)
    parser.add_argument("--eta", type=float, default=0.4)
    parser.add_argument("--dist_factor", type=float, default=1.0)
    parser.add_argument("--cap_kg", type=float, default=-1.0, help="单车容量(kg)，<0 表示不启用")

    # 导出选项
    parser.add_argument("--export_csv", action="store_true", help="导出任务-区域划分到 partition_result.csv")
    parser.add_argument("--export_binaries", action="store_true", help="导出区域二进制编码到 JSON/CSV/TXT")

    args_ns = parser.parse_args()

    # 装载数据
    data = load_tasks_from_excel(args_ns.excel, args_ns.sheet)

    # 全局 surrogate 参数覆盖
    global SUR_PARAMS
    SUR_PARAMS = SurrogateParams(
        speed_kmh=args_ns.speed_kmh,
        service_min=args_ns.service_min,
        dist_factor=args_ns.dist_factor,
        multi_vehicle_eta=args_ns.eta,
        vehicle_cap_kg=None if args_ns.cap_kg is None or args_ns.cap_kg < 0 else args_ns.cap_kg,
    )

    # GA 配置
    cfg = GAConfig(
        K=args_ns.K,
        total_vehicles=args_ns.T,
        pop_size=args_ns.pop,
        generations=args_ns.gens,
        crossover_rate=args_ns.pc,
        mutation_rate=args_ns.pm,
        elite_num=args_ns.elite,
        seed=args_ns.seed,
        patience=args_ns.patience,
        init_strategy=args_ns.init,
        kmeans_frac=args_ns.kmeans_frac,
        kmeans_max_iter=args_ns.kmeans_max_iter,
        kmeans_n_init=args_ns.kmeans_n_init,
        kmeans_perturb_prob=args_ns.kmeans_perturb,
    )

    ga = GeneticRegionPartitioner(cfg, data, evaluator=evaluate_partition)
    result = ga.run()

    # 打印主要结果
    print("\n=== GA Done (Problem 1 / Upper-Level) ===")
    print(f"Best surrogate cost: {result['best_cost']:.6f}")
    print(f"Vehicle allocation (K={cfg.K}, T={cfg.total_vehicles}): {result['vehicle_allocation'].tolist()}")
    print("Region centers (x,y):")
    for k, (cx, cy) in enumerate(result["region_centers"]):
        print(f"  Region {k}: ({cx:.3f}, {cy:.3f})")

    # 输出 K 个区域的完整二进制编码
    print("\n=== FULL Region Binaries (each length = N) ===")
    for k, arr in enumerate(result["region_binaries"]):
        print(f"Region {k}: {arr}")

    # 可选导出：任务->区域标签
    if args_ns.export_csv:
        df_out = pd.DataFrame({
            "task_id": data.ids,
            "region_id": result["best_assignments"]
        })
        df_out.to_csv("partition_result.csv", index=False, encoding="utf-8-sig")
        print("Saved: partition_result.csv")

    # 可选导出：区域二进制编码（JSON/CSV/TXT）
    if args_ns.export_binaries:
        export_region_binaries(data.ids, result["region_binaries"], prefix="region_binaries")
        print("Saved: region_binaries.json, partition_binaries.csv, region_binaries_R{k}.txt")

if __name__ == "__main__":
    main()
