import os, re, math, numpy as np, pandas as pd, matplotlib.pyplot as plt
from scipy.spatial import cKDTree  
from collections import Counter  
from matplotlib import rcParams  

# 解决 matplotlib 中文显示问题，设置字体和负号显示
rcParams['font.sans-serif'] = ['Microsoft YaHei', 'SimHei']  
rcParams['axes.unicode_minus'] = False  

def _parse_coord_str(s):
    # 若输入不是字符串，直接返回 None
    if not isinstance(s, str): 
        return None
    # 正则匹配提取坐标部分，格式类似 "{{x,y}}"
    m = re.search(r"\{([^}]*)\}", s)  
    if not m: 
        return None
    parts = m.group(1).split(",")  
    if len(parts) < 2:  
        return None
    # 将毫米转换为米，返回坐标元组
    return (float(parts[0])/1000.0, float(parts[1])/1000.0)  


def parse_segments_from_sheet(xls_path, sheet):
    df = pd.read_excel(xls_path, sheet_name=sheet)  
    if sheet == "植物":  
        rows = []
        for _, row in df.iterrows():  
            c = _parse_coord_str(row.iloc[0])  
            v = row.iloc[1]  
            try:
                r = float(v)/1000.0  
            except:
                try:
                    # 正则提取数值部分转换
                    r = float(re.findall(r"[+-]?\d*\.?\d+", str(v))[0])/1000.0  
                except:
                    r = np.nan  
            if c and r == r:  
                rows.append((c[0], c[1], r))  
        return pd.DataFrame(rows, columns=["x", "y", "r"])  
    segs, cur = [], []  

    for v in df.iloc[:, 0].astype(str).tolist():  
        # 匹配特定分割标识，重置当前线段
        if re.search(r"\{0;\s*\d+\}", v):  
            if cur:  
                segs.append(cur)  
                cur = []  
            continue
        pt = _parse_coord_str(v)  
        if pt:  
            cur.append(pt)  
    if cur:  
        segs.append(cur)  
    return segs  


def polygon_area(seg):
    # 提取 x、y 坐标数组
    x = np.array([p[0] for p in seg])  
    y = np.array([p[1] for p in seg])  
    # 利用向量点积计算多边形面积（鞋带公式）
    return 0.5 * abs(np.dot(x, np.roll(y, -1)) - np.dot(y, np.roll(x, -1)))  

def perim(seg):
    L = 0.0  
    for i in range(1, len(seg)):  
        (x1, y1), (x2, y2) = seg[i-1], seg[i]  
        # 累加相邻点距离
        L += math.hypot(x2 - x1, y2 - y1)  
    # 闭合图形，加上最后一点与起点距离
    L += math.hypot(seg[0][0] - seg[-1][0], seg[0][1] - seg[-1][1])  
    return L  

def shape_complexity(segments):
    vals = []  
    for seg in segments:  
        # 判断是否近似闭合（首尾距离小于 1 米）
        if len(seg) > 2 and math.hypot(seg[0][0] - seg[-1][0], seg[0][1] - seg[-1][1]) < 1.0:  
            A = polygon_area(seg)  
            P = perim(seg)  
            if A > 1e-6:  
                # 形状复杂度公式：(P²)/(4πA) （类似圆形复杂度基准）
                vals.append((P * P) / (4 * math.pi * A))  
    # 返回复杂度均值，无数据则返回 nan
    return float(np.mean(vals)) if vals else np.nan  

def sample_points(segments, step=2.0):
    pts = []  
    for seg in segments:  
        pts.append(seg[0])  
        for i in range(1, len(seg)):  
            (x1, y1), (x2, y2) = seg[i-1], seg[i]  
            dx, dy = x2 - x1, y2 - y1  
            dist = math.hypot(dx, dy)  
            # 计算采样点数
            n = max(1, int(dist // step))  
            for k in range(1, n+1):  
                # 按步长插值采样点
                pts.append((x1 + dx * k / n, y1 + dy * k / n))  
    # 返回采样点数组，无数据则返回空数组
    return np.array(pts) if pts else np.zeros((0, 2))  

def majority_smooth(labels, win=5):
    if len(labels) == 0:  
        return labels
    # 计算窗口半长
    k = max(1, win // 2)  
    out = []  
    for i in range(len(labels)):  
        # 确定窗口范围
        L = max(0, i - k)  
        R = min(len(labels), i + k + 1)  
        # 多数投票平滑
        c = Counter(labels[L:R]).most_common(1)[0][0]  
        out.append(c)  
    return out  

#-----------------幻境感--------------------
def compute_realm(xls_path, out_dir, name,
                  R=12.0,          # 视域半径（可调/自适应）
                  step_road=2.0,   # 道路采样步长（m）
                  theme_win=5,     # 主题平滑窗口
                  save_plots=False):
    os.makedirs(out_dir, exist_ok=True)  

    # 从 Excel 读取各要素线段数据
    roads = parse_segments_from_sheet(xls_path, "道路")  
    buildings = parse_segments_from_sheet(xls_path, "实体建筑")  
    semi_open = parse_segments_from_sheet(xls_path, "半开放建筑")  
    rockeries = parse_segments_from_sheet(xls_path, "假山")  
    waters = parse_segments_from_sheet(xls_path, "水体")  
    plants = parse_segments_from_sheet(xls_path, "植物")  
    


    # 1）元素分布（面积比例、多样性、形状复杂度）
    # 计算各要素面积
    water_area = sum(polygon_area(seg) for seg in waters)  
    rock_area = sum(polygon_area(seg) for seg in rockeries)  
    bld_area = sum(polygon_area(seg) for seg in (buildings + semi_open))  
    # 植物按圆形面积计算（若为 DataFrame）
    plant_area = float((np.pi * (plants["r"] ** 2)).sum()) if isinstance(plants, pd.DataFrame) else 0.0  
    # 分母（总有效面积）
    denom = max(1e-6, water_area + rock_area + bld_area + plant_area)  
    # 各要素面积比例
    props = [water_area/denom, rock_area/denom, bld_area/denom, plant_area/denom]  
    # 计算 Shannon 熵和均匀度
    p = np.array([v for v in props if v > 1e-12])  
    H = -np.sum(p * np.log(p)) if len(p) > 0 else 0.0  
    J = (H / np.log(4.0)) if len(p) > 0 else 0.0  
    # 计算形状复杂度
    sc_water = shape_complexity(waters)  
    sc_rock = shape_complexity(rockeries)  

    # 形状复杂度归一化（1~2.5 映射到 0~1）
    def sc_norm(x):
        if not (x == x):  # 处理 nan
            return 0.0
        x = min(max(x, 1.0), 2.5)  
        return (x - 1.0)/(2.5 - 1.0)  
    sc_feat = np.nanmean([sc_norm(sc_water), sc_norm(sc_rock)]) if (sc_water == sc_water or sc_rock == sc_rock) else 0.0  

    # 2）路径采样
    road_pts = sample_points(roads, step=step_road)  
    if len(road_pts) == 0:  
        # 极端情况容错
        row = dict(name=name, raw=0, score=0)  
        pd.DataFrame([row]).to_csv(os.path.join(out_dir, f"{name}_realm.csv"), index=False, encoding="utf-8-sig")  
        return row  

    # 3）要素点云 & KDTree（用于开合/主题判定）
    # 构建各要素点云及空间索引
    water_pts = np.vstack([seg for seg in waters]) if waters else np.zeros((0, 2))  
    solid_pts = np.vstack([seg for seg in (buildings + rockeries)]) if (buildings or rockeries) else np.zeros((0, 2))  
    plant_pts = np.array(plants[["x", "y"]]) if isinstance(plants, pd.DataFrame) else np.zeros((0, 2))  
    tree_w = cKDTree(water_pts) if len(water_pts) > 0 else None  
    tree_s = cKDTree(solid_pts) if len(solid_pts) > 0 else None  
    tree_p = cKDTree(plant_pts) if len(plant_pts) > 0 else None  

    # 4）开阔度曲线 O(x) 及开合状态
    O = []  
    for pnt in road_pts:  
        # 统计视域内水体、实体要素数量
        wc = len(tree_w.query_ball_point(pnt, r=R)) if tree_w else 0  
        sc = len(tree_s.query_ball_point(pnt, r=R)) if tree_s else 0  
        # 计算开阔度值
        val = 0.5 * (1 if wc > 0 else 0) + 0.5 * (1 - (1 if sc > 0 else 0))  
        O.append(val)  
    O = np.array(O)  
    # 判定开合状态
    open_state = (O >= 0.6)  
    encl_state = (O <= 0.4)  
    # 计算状态变化次数
    alt_open = int(np.sum(open_state[1:] ^ open_state[:-1]) + np.sum(encl_state[1:] ^ encl_state[:-1]))  
    KAI = alt_open * (O.max() - O.min())  
    open_ratio = float(np.mean(open_state))  
    balance = 1.0 - abs(open_ratio - 0.5)/0.5  
    open_mean = float(np.mean(O))  
    # 5）主题序列（观水/观山/观建/观树）
    def nearest(tree, pnt):
        if tree is None:
            return np.inf
        d, _ = tree.query(pnt, k=1)
        return float(d)
    # 距离越小越“主导”，给“实体”加轻微权重（更易带来场景界面）
    alpha_solid = 0.9
    labels_raw = []
    for pnt in road_pts:
        dw = nearest(tree_w, pnt)
        dr = nearest(tree_s, pnt)  # 实体（建筑+假山）
        db = dr  # 为了可读性拆开：实体即“观建/观山”再分配
        dp = nearest(tree_p, pnt)
        # 先判最近类别，再把“实体”细分为建/山：比较到“建筑轮廓点云”和“假山轮廓点云”
        candidates = {
            "观水": dw,
            "观建/山": alpha_solid * dr,
            "观树": dp
        }
        main = min(candidates, key=candidates.get)
        if main == "观建/山":
            # 细分建/山
            d_build = nearest(cKDTree(np.vstack([seg for seg in (buildings + semi_open)] if (buildings or semi_open) else np.zeros((0, 2)))), pnt)
            d_rock = nearest(cKDTree(np.vstack([seg for seg in rockeries] if rockeries else np.zeros((0, 2)))), pnt)
            main = "观建" if d_build <= d_rock else "观山"
        labels_raw.append(main)

    labels = majority_smooth(labels_raw, win=theme_win)

    # 主题统计
    uniq = list(dict.fromkeys(labels))  # 保留出现顺序
    theme_k = len(set(labels))
    # 主题频率与熵
    c = Counter(labels)
    freqs = np.array([c.get(t, 0) for t in ["观水", "观山", "观建", "观树"]], float)
    if freqs.sum() > 0:
        q = freqs / freqs.sum()
        H_theme = -np.sum(q[q > 0] * np.log(q[q > 0]))
        H_theme_norm = H_theme / np.log(4.0)
    else:
        H_theme_norm = 0.0
    # 主题交替
    alt_theme = sum(1 for i in range(1, len(labels)) if labels[i] != labels[i - 1])
    alt_theme_norm = alt_theme / max(1, len(labels) - 1)
    theme_k_norm = min(theme_k, 4) / 4.0
    T = 0.4 * H_theme_norm + 0.3 * theme_k_norm + 0.3 * alt_theme_norm

    # 6）幻境原始分 & 导出
    raw = 0.26 * (H + J) + 0.20 * T + 0.32 * KAI + 0.12 * balance + 0.10 * open_mean + 0.08 * sc_feat

    row = dict(
        name=name, raw=raw,
        H_elem=H, J_elem=J,
        theme_k=theme_k, theme_entropy=H_theme_norm, theme_alt=alt_theme,
        KAI=KAI, open_ratio=open_ratio, balance=balance, open_mean=open_mean,
        sc_water=sc_water, sc_rock=sc_rock
    )
    pd.DataFrame([row]).to_csv(os.path.join(out_dir, f"{name}_realm.csv"), index=False, encoding="utf-8-sig")

    # 可选：输出两张小图（开阔度曲线 & 主题序列）
    if save_plots:
        x = np.arange(len(O))
        plt.figure()
        plt.axhline(y=0, color='gray', linestyle='--')  # 绘制参考水平线
        plt.plot(x, O)  # 绘制开阔度曲线
        plt.ylim(-0.05, 1.05)
        plt.title(f"{name} 开阔度曲线 O(x)")
        plt.tight_layout()
        plt.savefig(os.path.join(out_dir, f"{name}_openness.png"), dpi=170)
        plt.close()
    # if save_plots:
    #     x = np.arange(len(O))
    #     plt.figure()
    #     plt.plot(x, 0)
    #     plt.ylim(-0.05, 1.05)
    #     plt.title(f"{name} 开阔度曲线 O(x)")
    #     plt.tight_layout()
    #     plt.savefig(os.path.join(out_dir, f"{name}_openness.png"), dpi=170)
    #     plt.close()

    #     color_map = {"观水": "#1f77b4", "观山": "#8c564b", "观建": "#7f7f7f", "观树": "#2ca02c"}
    #     y = [{"观水": 3, "观山": 2, "观建": 1, "观树": 0}[t] for t in labels]
    #     plt.figure()
    #     plt.step(x, y, where='mid')
    #     # 简单地在图例中列出各主题颜色
    #     for t, col in color_map.items():
    #         plt.plot([], [], color=col, label=t)
    #     plt.legend(loc='upper right')
    #     plt.yticks([0, 1, 2, 3], ["观树", "观建", "观山", "观水"])
    #     plt.title(f"{name} 主题序列")
    #     plt.tight_layout()
    #     plt.savefig(os.path.join(out_dir, f"{name}_themes.png"), dpi=170)
    #     plt.close()

    return row

# ———————— 主程序（十园批量 & 标定寄畅园=100） ————————
if __name__ == "__main__":
    file_map = {
    "拙政园": r"data/拙政园数据坐标.xlsx",
    "留园": r"data/留园数据坐标.xlsx",
    "寄畅园": r"data/寄畅园数据坐标.xlsx",
    "瞻园": r"data/瞻园数据坐标.xlsx",
    "豫园": r"data/豫园数据坐标.xlsx",
    "秋霞园": r"data/秋霞园数据坐标.xlsx",
    "沈园": r"data/沈园数据坐标.xlsx",
    "怡园": r"data/怡园数据坐标.xlsx",
    "耦园": r"data/耦园数据坐标.xlsx",
    "绮园": r"data/绮园数据坐标.xlsx",
    }

    out_dir = "./out_problem2"
    os.makedirs(out_dir, exist_ok=True)

    # 先算寄畅园原始分作标定
    ref_row = compute_realm(file_map["寄畅园"], out_dir, "寄畅园", R=12.0, step_road=2.0, theme_win=5, save_plots=True)
    ref_raw = ref_row["raw"] if ref_row["raw"] > 0 else 1e-6
    
    print(f"ref_raw={ref_row}")

    # 其他九园
    rows = [ref_row]
    for name, path in file_map.items():
        if name == "寄畅园":
            continue
        rows.append(compute_realm(path, out_dir, name, R=12.0, step_road=2.0, theme_win=5, save_plots=True))

    # 评分 & 排序（寄畅园=100 分）
    df = pd.DataFrame(rows)
    df["score"] = 100.0 * df["raw"] / ref_raw
    # 按分数降序；要求寄畅园为第一行，且分数=100
    df.loc[df["name"] == "寄畅园", "score"] = 100.0
    df = df.sort_values(by=["name"], ascending=True).sort_values(by=["score"], ascending=False)
    # 输出总表
    df_round = df[["name", "score", "raw", "H_elem", "J_elem", "theme_k", "theme_entropy", "theme_alt", "KAI", "open_ratio", "balance", "open_mean", "sc_water", "sc_rock"]]
    df_round.to_csv(os.path.join(out_dir, "realm_summary.csv"), index=False, encoding="utf-8-sig")
    print("完成：已在 out_problem2/ 生成每园 csv、开阔度与主题小图（可选）、以及汇总表 realm_summary.csv。")