#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, re, json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

FACE_FILE = "face1.ly"
MARM_FILE = "m-arm3.ly"
LOGGER_FILE = "logger.csv"

def try_read_generic_table(path, default_rate_hz=30.0):
    # 1) try JSON head
    try:
        with open(path, "r", encoding="utf-8", errors="ignore") as f:
            head = "".join([next(f) for _ in range(6)])
        js = json.loads(head)
        df = pd.json_normalize(js)
    except Exception:
        df = None
    # 2) try CSV/TSV/空白
    if df is None or df.empty:
        for sep in [",", ";", "\t", r"\s+"]:
            try:
                df = pd.read_csv(path, sep=sep, engine="python", comment="#", header=None)
                df = df.dropna(axis=1, how="all")
                break
            except Exception:
                df = None
    # 3) fallback 粗暴分割
    if df is None or df.empty:
        rows = []
        with open(path, "r", encoding="utf-8", errors="ignore") as f:
            for line in f:
                if not line.strip() or line.strip().startswith("#"):
                    continue
                parts = re.split(r"[,\s;]+", line.strip())
                vals = []
                for p in parts:
                    try: vals.append(float(p))
                    except: pass
                if vals: rows.append(vals)
        if not rows:
            raise ValueError(f"无法解析 {path}")
        width = max(len(r) for r in rows)
        rows = [r + [np.nan]*(width-len(r)) for r in rows]
        df = pd.DataFrame(rows)

    # 列命名与时间合成/检测
    for c in df.columns: df[c] = pd.to_numeric(df[c], errors="coerce")
    df = df.dropna(how="all").reset_index(drop=True)
    c = len(df.columns)
    # 检测递增列作为时间
    t_col = None
    for i in range(c):
        s = df.iloc[:, i]
        d = s.diff().dropna()
        if s.notna().sum()>3 and (d>0).mean()>0.8 and d.abs().median()>1e-6:
            t_col = i; break
    if c >= 4:
        if t_col is not None:
            order = [t_col] + [i for i in range(c) if i != t_col]
            df = df.iloc[:, order]
            df.columns = ["t","x","y","z"] + [f"c{k}" for k in range(4, c)]
        else:
            df.columns = ["x","y","z"] + [f"c{k}" for k in range(3, c)]
            t = np.arange(len(df))/float(default_rate_hz)
            df.insert(0, "t", t)
    elif c == 3:
        df.columns = ["x","y","z"]
        t = np.arange(len(df))/float(default_rate_hz)
        df.insert(0, "t", t)
    elif c == 2:
        df.columns = ["x","y"]
        t = np.arange(len(df))/float(default_rate_hz)
        df.insert(0, "t", t)
    else:
        df.columns = ["x"]
        t = np.arange(len(df))/float(default_rate_hz)
        df.insert(0, "t", t)
    return df

def try_read_logger_csv(path):
    df = pd.read_csv(path, engine="python")
    cols = [c.lower() for c in df.columns]
    cmap = {c.lower(): c for c in df.columns}
    t = None
    if "t" in cols: t = pd.to_numeric(df[cmap["t"]], errors="coerce")
    elif "time" in cols: t = pd.to_numeric(df[cmap["time"]], errors="coerce")
    elif "timestamp" in cols: t = pd.to_numeric(df[cmap["timestamp"]], errors="coerce")
    elif "stamp" in cols: t = pd.to_numeric(df[cmap["stamp"]], errors="coerce")
    elif "sec" in cols and "nsec" in cols:
        t = pd.to_numeric(df[cmap["sec"]], errors="coerce") + pd.to_numeric(df[cmap["nsec"]], errors="coerce")*1e-9
    elif "header.stamp.sec" in cols and "header.stamp.nanosec" in cols:
        t = pd.to_numeric(df[cmap["header.stamp.sec"]], errors="coerce") + pd.to_numeric(df[cmap["header.stamp.nanosec"]], errors="coerce")*1e-9
    else:
        t = np.arange(len(df))/20.0  # 你的 logger 目标是 20Hz
    df.insert(0, "t", t.values if hasattr(t, "values") else t)
    return df

def resample_to_common(a, b, dt=0.02, cols=("x","y","z")):
    t0, t1 = max(a["t"].min(), b["t"].min()), min(a["t"].max(), b["t"].max())
    if t1 <= t0: raise ValueError("时间区间无重叠")
    t = np.arange(t0, t1, dt)
    def interp(df):
        out = []
        for c in cols:
            out.append(np.interp(t, df["t"].values, pd.to_numeric(df[c], errors="coerce").fillna(0).values))
        return np.vstack(out).T
    return t, interp(a), interp(b)

def xcorr_lag(a, b, fs):
    a = np.nan_to_num((a - np.nanmean(a)) / (np.nanstd(a) + 1e-9))
    b = np.nan_to_num((b - np.nanmean(b)) / (np.nanstd(b) + 1e-9))
    corr = np.correlate(a, b, mode="full")
    k = int(np.argmax(corr) - (len(a)-1))
    return k/fs, k, float(corr.max())

def main():
    face = try_read_generic_table(FACE_FILE, default_rate_hz=30.0)
    marm = try_read_generic_table(MARM_FILE, default_rate_hz=30.0)
    # ly 默认是 mm，转米
    for c in ("x","y","z"):
        if c in face.columns: face[c] = face[c] / 1000.0

    # 估计 face ↔ m-arm 的时延与误差
    t, f_xyz, m_xyz = resample_to_common(face, marm, dt=0.02)
    fs = 50.0
    lags = {}
    for i, ax in enumerate(("x","y","z")):
        lags[ax] = xcorr_lag(f_xyz[:, i], m_xyz[:, i], fs)
    lag_med_s = float(np.median([lags[a][0] for a in ("x","y","z")]))

    # 按中位时延对齐（m-arm 滞后则右移）
    shift = int(round(lag_med_s * fs))
    if shift > 0:
        m_aln = m_xyz[shift:]; f_aln = f_xyz[:-shift]; t_aln = t[:-shift]
    elif shift < 0:
        f_aln = f_xyz[-shift:]; m_aln = m_xyz[:shift]; t_aln = t[-shift:]
    else:
        f_aln, m_aln, t_aln = f_xyz, m_xyz, t

    diff = m_aln - f_aln
    rmse_xyz = np.sqrt(np.nanmean(diff**2, axis=0))
    dist = np.sqrt((diff**2).sum(axis=1))
    rmse_dist = float(np.sqrt(np.nanmean(dist**2)))
    mae_dist  = float(np.nanmean(np.abs(dist)))

    # 画图
    plt.figure(); plt.plot(t_aln, f_aln[:,2], label="face.z"); plt.plot(t_aln, m_aln[:,2], label="m-arm.z")
    plt.title("Z over time (aligned)"); plt.xlabel("time (s)"); plt.ylabel("Z (m)"); plt.legend(); plt.tight_layout()
    plt.savefig("plot_face_marm_z.png"); plt.close()

    plt.figure(); plt.plot(t_aln, dist, label="||m-arm - face||")
    plt.title("Distance error over time"); plt.xlabel("time (s)"); plt.ylabel("distance (m)"); plt.legend(); plt.tight_layout()
    plt.savefig("plot_dist_err.png"); plt.close()

    # logger 相关性（可选）
    summ = [{
        "pair":"face_vs_marm",
        "lag_x_s": lags["x"][0], "lag_y_s": lags["y"][0], "lag_z_s": lags["z"][0],
        "lag_median_s": lag_med_s,
        "rmse_x_m": float(rmse_xyz[0]), "rmse_y_m": float(rmse_xyz[1]), "rmse_z_m": float(rmse_xyz[2]),
        "rmse_dist_m": rmse_dist, "mae_dist_m": mae_dist
    }]

    if os.path.exists(LOGGER_FILE):
        log = try_read_logger_csv(LOGGER_FILE)
        # 选一些常见列做与 face.z 的互相关
        cand = [c for c in log.columns if ("twist" in c.lower() and "linear" in c.lower()) or ("ibvs" in c.lower())]
        if len(cand) > 0:
            # 统一到 face 时间网格
            t0, t1 = face["t"].min(), face["t"].max()
            tb = np.arange(t0, t1, 0.02)
            fz = np.interp(tb, face["t"].values, pd.to_numeric(face["z"], errors="coerce").fillna(0).values)
            for c in cand:
                sig = np.interp(tb, log["t"].values, pd.to_numeric(log[c], errors="coerce").fillna(0).values)
                lag_s, _, corr_max = xcorr_lag(fz, sig, 50.0)
                summ.append({"pair":f"face_z_vs_{c}", "lag_s":lag_s, "corr_max":corr_max})
            # 举例画一张
            ex = next((c for c in cand if "linear.x" in c.lower()), cand[0])
            sig = np.interp(tb, log["t"].values, pd.to_numeric(log[ex], errors="coerce").fillna(0).values)
            plt.figure(); plt.plot(tb, fz, label="face.z"); plt.plot(tb, sig, label=ex)
            plt.title("face.z vs logger signal"); plt.xlabel("time (s)"); plt.ylabel("value"); plt.legend(); plt.tight_layout()
            plt.savefig("plot_facez_vs_logger.png"); plt.close()

    pd.DataFrame(summ).to_csv("analysis_summary.csv", index=False)
    print("完成。已生成：analysis_summary.csv, plot_face_marm_z.png, plot_dist_err.png（以及可能的 plot_facez_vs_logger.png）")

if __name__ == "__main__":
    main()
