#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import argparse
import numpy as np
import torch
from numpy.lib.stride_tricks import sliding_window_view
from scipy import stats
from scipy.spatial.distance import pdist, squareform
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import mutual_info_classif
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd


# ==============================================================
# 1. 参考代码（完全不动！）
# ==============================================================

def comp_cum_sum(signal_arr):
    batch_size = signal_arr.shape[0]
    zero_array = np.expand_dims(np.array([0] * batch_size), axis=1)
    cum_sum_sig = np.cumsum(np.concatenate((zero_array, signal_arr), axis=1), axis=1)
    cum_sum_sig_square = np.cumsum(np.concatenate((zero_array, signal_arr ** 2), axis=1), axis=1)
    return cum_sum_sig, cum_sum_sig_square


def comp_t_stat(cum_sum_sig, cum_sum_sig_square, s_len, w_len):
    eta = np.finfo(float).eps
    t_stat = np.zeros((cum_sum_sig.shape[0], cum_sum_sig.shape[1] - 1))

    if s_len < 2 * w_len or w_len < 2:
        return t_stat

    sum1 = cum_sum_sig[:, w_len:s_len - w_len + 1] - cum_sum_sig[:, :s_len - 2 * w_len + 1]
    sumsq1 = cum_sum_sig_square[:, w_len:s_len - w_len + 1] - cum_sum_sig_square[:, :s_len - 2 * w_len + 1]

    sum2 = cum_sum_sig[:, 2 * w_len:s_len + 1] - cum_sum_sig[:, w_len:s_len - w_len + 1]
    sumsq2 = cum_sum_sig_square[:, 2 * w_len:s_len + 1] - cum_sum_sig_square[:, w_len:s_len - w_len + 1]

    mean1 = sum1 / w_len
    mean2 = sum2 / w_len

    combined_var = (sumsq1 / w_len - mean1 ** 2) + (sumsq2 / w_len - mean2 ** 2)
    combined_var = np.maximum(combined_var, eta)

    delta_mean = mean2 - mean1
    t_stat_res = np.abs(delta_mean) / np.sqrt(combined_var / w_len)

    t_stat[:, w_len:s_len - w_len + 1] = t_stat_res

    return torch.tensor(t_stat)


def diff1(sig):
    diffs = np.diff(sig, prepend=0)
    return torch.tensor(diffs)


def window_mean_std(sig, w_len):
    sig = sliding_window_view(sig, window_shape=w_len, axis=1)
    w_means = np.mean(sig, axis=2)
    w_stds = np.std(sig, axis=2)

    zero_array = np.expand_dims(np.array([0] * sig.shape[0]), axis=1)
    w_means = np.concatenate((zero_array, np.concatenate((w_means, zero_array), axis=1)), axis=1)
    w_stds = np.concatenate((zero_array, np.concatenate((w_stds, zero_array), axis=1)), axis=1)

    return torch.tensor(w_means), torch.tensor(w_stds)


# ==============================================================
# 2. add_features（仅加 nan 填 0 + 输入兼容）
# ==============================================================

def add_features(signals, s_len=315, w_len=3):
    if isinstance(signals, torch.Tensor):
        sig_np = signals.cpu().numpy()
    else:
        sig_np = signals.astype(np.float32)

    B, L = sig_np.shape
    if L != s_len:
        raise ValueError(f"信号长度必须为 {s_len}，当前为 {L}")

    # 填 nan
    sig_np = np.nan_to_num(sig_np, nan=0.0)

    # 完全使用原函数
    cum_sum_sig, cum_sum_sig_square = comp_cum_sum(sig_np)
    t_stat = comp_t_stat(cum_sum_sig, cum_sum_sig_square, s_len, w_len)
    diff = diff1(sig_np)
    w_means, w_stds = window_mean_std(sig_np, w_len=w_len)

    raw = torch.from_numpy(sig_np)
    features = torch.stack([raw, diff, w_means, w_stds, t_stat], dim=1)  # (B, 5, L)

    assert not torch.isnan(features).any(), "特征中仍有 nan！"
    return features


# ==============================================================
# 3. 关联性分析（read-level + 向量级 + Top15）
# ==============================================================

def analyze_association(features, labels, sample_n=100_000):
    """
    features: (N, 5, L) torch tensor
    labels:   (N,) np array
    """
    N, C, L = features.shape
    X_flat = features.reshape(N, -1).cpu().numpy()  # (N, 5*L)
    labels = np.array(labels)

    # ------------------- 1. AUC -------------------
    cv = 2 if N < 10 else 5
    model = LogisticRegression(random_state=42, max_iter=2000, n_jobs=8)
    try:
        scores = cross_val_score(model, X_flat, labels, cv=cv, scoring='roc_auc', n_jobs=8)
        auc_mean = scores.mean()
        print(f"5特征联合 AUC: {auc_mean:.4f} ± {scores.std():.4f}")
    except Exception as e:
        print("AUC 计算失败:", e)
        auc_mean = None

    # ------------------- 2. 特征重要性 -------------------
    sub_n = min(20000, N)
    model.fit(X_flat[:sub_n], labels[:sub_n])
    coef = np.abs(model.coef_[0]).reshape(5, L)
    mean_imp = coef.mean(axis=1)
    print("\n各特征通道平均重要性（|coef| 均值）:")
    feat_names = ['raw', 'diff', 'w_mean', 'w_std', 't_stat']
    for name, val in zip(feat_names, mean_imp):
        print(f"  {name:8}: {val:.4f}")

    # ------------------- 3. 逐位置相关性分析（采样） -------------------
    sample_n = min(sample_n, N)
    idx = np.random.choice(N, sample_n, replace=False)
    X_s = X_flat[idx]
    y_s = labels[idx]

    corr_results = {name: {'spearman': [], 'pearson': [], 'mi': []} for name in feat_names}

    print(f"\n各特征通道与标签的相关性（采样 {sample_n} 条）:")
    for i, name in enumerate(feat_names):
        for pos in range(L):
            col = i * L + pos
            x = X_s[:, col]

            # Spearman
            r, _ = stats.spearmanr(x, y_s, nan_policy='omit')
            if not np.isnan(r):
                corr_results[name]['spearman'].append(abs(r))

            # Pearson
            r, _ = stats.pearsonr(x, y_s)
            if not np.isnan(r):
                corr_results[name]['pearson'].append(abs(r))

            # Mutual Information
            mi = mutual_info_classif(x.reshape(-1, 1), y_s, random_state=42)[0]
            corr_results[name]['mi'].append(mi)

        # 打印均值
        s_mean = np.mean(corr_results[name]['spearman']) if corr_results[name]['spearman'] else np.nan
        p_mean = np.mean(corr_results[name]['pearson']) if corr_results[name]['pearson'] else np.nan
        m_mean = np.mean(corr_results[name]['mi'])
        print(f"  {name:8}: Spearman: {s_mean:.4f} | Pearson: {p_mean:.4f} | MI: {m_mean:.4f}")

    # ------------------- 4. 逐位置 可视化 + CSV -------------------
    plot_correlation_summary(corr_results)
    summary_df = save_correlation_summary(corr_results)

    # ------------------- 5. 向量级分析（Distance Correlation + RV） -------------------
    print(f"\n{'='*70}")
    print("向量级关联性分析（315维信号模式 vs 甲基化标签）")
    print(f"{'='*70}")

    vec_sample_n = min(10000, N)
    idx_vec = np.random.choice(N, vec_sample_n, replace=False)
    X_vec = features[idx_vec].cpu().numpy()  # (S, 5, L)
    y_vec = labels[idx_vec]

    vector_results = {}
    for i, name in enumerate(feat_names):
        X_channel = X_vec[:, i, :]  # (S, L)

        # 距离相关
        dcor = distance_correlation(X_channel, y_vec)
        
        # RV 系数
        rv = rv_coefficient(X_channel, y_vec)

        vector_results[name] = {'dcor': dcor, 'rv': rv}
        print(f"  {name:8}: Dist-Corr: {dcor:.4f} | RV: {rv:.4f}")

    # 向量级可视化 + CSV
    plot_vector_correlation(vector_results)
    save_vector_summary(vector_results)

    # ------------------- 6. Top 15 最高相关性位置 -------------------
    top15_df = save_top15_positions(X_s, y_s, L, top_k=15)

    return auc_mean, summary_df, vector_results, top15_df


# ==============================================================
# 4. 逐位置可视化 + CSV
# ==============================================================

def plot_correlation_summary(corr_results):
    metrics = ['spearman', 'pearson', 'mutual_info']
    feat_names = ['raw', 'diff', 'w_mean', 'w_std', 't_stat']

    mat = np.zeros((len(metrics), len(feat_names)))
    for i, m in enumerate(metrics):
        for j, f in enumerate(feat_names):
            key = m if m != 'mutual_info' else 'mi'
            vals = corr_results[f][key]
            mat[i, j] = np.mean(vals) if len(vals) > 0 else 0.0

    plt.figure(figsize=(8, 5))
    sns.heatmap(mat, annot=True, fmt='.3f', cmap='RdYlBu_r',
                xticklabels=feat_names, yticklabels=['Spearman', 'Pearson', 'MI'],
                cbar_kws={'label': 'Mean Absolute Correlation'})
    plt.title('Position-Level: Feature vs Methylation')
    plt.tight_layout()
    plt.show()


def save_correlation_summary(corr_results):
    records = []
    for f in corr_results:
        for m in ['spearman', 'pearson', 'mi']:
            vals = corr_results[f][m]
            records.append({
                'feature': f,
                'metric': 'mutual_info' if m == 'mi' else m,
                'mean_abs': np.mean(vals) if len(vals) > 0 else 0.0,
                'std': np.std(vals) if len(vals) > 0 else 0.0,
                'max': np.max(vals) if len(vals) > 0 else 0.0
            })
    df = pd.DataFrame(records)
    out_path = "position_level_correlation_summary.csv"
    df.to_csv(out_path, index=False)
    print(f"\nPosition-level summary saved: {out_path}")
    return df


# ==============================================================
# 5. 向量级相关性函数
# ==============================================================

def distance_correlation(X, y):
    """Distance Correlation between (N, L) and (N,)"""
    N = X.shape[0]
    if N < 2:
        return 0.0

    a = squareform(pdist(X, 'euclidean'))
    b = squareform(pdist(y.reshape(-1, 1), 'euclidean'))

    A = a - a.mean(axis=0) - a.mean(axis=1)[:, None] + a.mean()
    B = b - b.mean(axis=0) - b.mean(axis=1)[:, None] + b.mean()

    dcov2_xy = np.sum(A * B) / (N * N)
    dcov2_xx = np.sum(A * A) / (N * N)
    dcov2_yy = np.sum(B * B) / (N * N)

    dcor = np.sqrt(dcov2_xy) / np.sqrt(np.sqrt(dcov2_xx) * np.sqrt(dcov2_yy)) if (dcov2_xx > 0 and dcov2_yy > 0) else 0.0
    return dcor


def rv_coefficient(X, y):
    """RV Coefficient between (N, L) and (N,)"""
    X_c = X - X.mean(axis=0)
    y_c = y.reshape(-1, 1) - y.mean()

    Sxx = X_c.T @ X_c
    Syy = y_c.T @ y_c
    Sxy = X_c.T @ y_c

    rv = np.trace(Sxy @ Sxy.T) / np.sqrt(np.trace(Sxx @ Sxx) * np.trace(Syy @ Syy) + 1e-12)
    return rv


def plot_vector_correlation(vector_results):
    feat_names = list(vector_results.keys())
    mat = np.array([[vector_results[f]['dcor'], vector_results[f]['rv']] for f in feat_names])

    plt.figure(figsize=(7, 5))
    sns.heatmap(mat, annot=True, fmt='.4f', cmap='viridis',
                xticklabels=['Dist-Corr', 'RV'],
                yticklabels=feat_names,
                cbar_kws={'label': 'Vector Correlation'})
    plt.title('Vector-Level: 315-dim Pattern vs Methylation')
    plt.tight_layout()
    plt.show()


def save_vector_summary(vector_results):
    records = []
    for f, vals in vector_results.items():
        records.append({'feature': f, 'dist_corr': vals['dcor'], 'rv_coeff': vals['rv']})
    df = pd.DataFrame(records)
    out_path = "vector_level_correlation.csv"
    df.to_csv(out_path, index=False)
    print(f"Vector-level summary saved: {out_path}")


# ==============================================================
# 6. Top 15 最高相关性位置
# ==============================================================

def save_top15_positions(features_sampled, y_sampled, L, top_k=15):
    feat_names = ['raw', 'diff', 'w_mean', 'w_std', 't_stat']
    records = []

    print(f"\n{'='*80}")
    print(f"Top {top_k} positions with highest correlation to methylation label")
    print(f"{'='*80}")

    for i, name in enumerate(feat_names):
        print(f"\n>>> {name.upper()} 通道 Top {top_k} 位置:")
        print(f"{'Rank':<4} {'Pos':<6} {'Spearman':<12} {'Pearson':<12} {'MI':<10}")

        spearman_list = []
        pearson_list = []
        mi_list = []

        for pos in range(L):
            col = i * L + pos
            x = features_sampled[:, col]
            if np.std(x) < 1e-8:
                continue

            r_s, _ = stats.spearmanr(x, y_sampled, nan_policy='omit')
            if not np.isnan(r_s):
                spearman_list.append((pos, abs(r_s)))

            r_p, _ = stats.pearsonr(x, y_sampled)
            if not np.isnan(r_p):
                pearson_list.append((pos, abs(r_p)))

            mi = mutual_info_classif(x.reshape(-1, 1), y_sampled, random_state=42)[0]
            mi_list.append((pos, mi))

        def add_top_k(metric_name, data_list):
            data_list = sorted(data_list, key=lambda x: x[1], reverse=True)[:top_k]
            for rank, (pos, val) in enumerate(data_list, 1):
                records.append({
                    'feature': name,
                    'metric': metric_name,
                    'rank': rank,
                    'position': pos,
                    'correlation': val
                })
                s_val = f"{val:.4f}" if metric_name == 'spearman' else ""
                p_val = f"{val:.4f}" if metric_name == 'pearson' else ""
                m_val = f"{val:.4f}" if metric_name == 'mutual_info' else ""
                print(f"{rank:<4} {pos:<6} {s_val:<12} {p_val:<12} {m_val:<10}".rstrip())

        add_top_k('spearman', spearman_list)
        add_top_k('pearson', pearson_list)
        add_top_k('mutual_info', mi_list)

    df = pd.DataFrame(records)
    out_path = "top15_correlated_positions.csv"
    df.to_csv(out_path, index=False)
    print(f"\nTop {top_k} positions saved: {out_path}")
    return df


# ==============================================================
# 7. 数据读取 + 主流程
# ==============================================================

def parse_a_line(line, L=315):
    words = line.strip().split('\t')
    vals = [float(x) for x in words[10].split(';') for x in x.split(',')][:L]
    if len(vals) < L:
        vals += [0.0] * (L - len(vals))
    label = int(words[11])
    return np.array(vals, dtype=np.float32), label


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--tsv", "-t", type=str, required=True)
    parser.add_argument("--reads", "-r", type=int, help="读取条数")
    parser.add_argument("--len", type=int, default=315, help="信号长度")
    parser.add_argument("--sample", type=int, default=100_000, help="逐位置采样数")
    args = parser.parse_args()

    L_fixed = args.len
    max_reads = args.reads

    raw_signals = []
    labels = []

    print(f"读取前 {max_reads or '全部'} 条数据（长度 {L_fixed}）...")
    with open(args.tsv) as f:
        for i, line in enumerate(f):
            if max_reads is not None and i >= max_reads: break
            if not line.strip(): continue
            sig, lab = parse_a_line(line, L=L_fixed)
            raw_signals.append(sig)
            labels.append(lab)

    if not raw_signals:
        raise ValueError("没有读取到数据！")

    N = len(raw_signals)
    print(f"成功读取 {N} 条信号")

    # 特征提取
    sig_tensor = torch.stack([torch.from_numpy(s) for s in raw_signals])
    features = add_features(sig_tensor, s_len=L_fixed, w_len=3)

    # 关联分析
    auc, pos_df, vec_results, top15_df = analyze_association(features, np.array(labels), sample_n=args.sample)
    print(f"\n最终 AUC: {auc:.4f}" if auc else "\nAUC 未计算")