# omnianomaly_with_timestamp.py
import torch
import torch.nn as nn
import numpy as np
from torch.distributions import Normal
import os
import pandas as pd
from sklearn.preprocessing import StandardScaler
from torch.utils.data import TensorDataset, DataLoader
import matplotlib.pyplot as plt

# ----------------------------
# OmniAnomaly Model
# ----------------------------
class OmniAnomaly(nn.Module):
    def __init__(self, n_features, hidden_size=100, latent_size=16):
        super(OmniAnomaly, self).__init__()
        self.n_features = n_features
        self.encoder_gru = nn.GRU(n_features, hidden_size, batch_first=True)
        self.encoder_mu = nn.Linear(hidden_size, latent_size)
        self.encoder_sigma = nn.Linear(hidden_size, latent_size)

        self.decoder_gru = nn.GRU(latent_size, hidden_size, batch_first=True)
        self.decoder_mu = nn.Linear(hidden_size, n_features)
        self.decoder_sigma = nn.Linear(hidden_size, n_features)

        self.prior_gru = nn.GRU(n_features, hidden_size, batch_first=True)
        self.prior_mu = nn.Linear(hidden_size, latent_size)
        self.prior_sigma = nn.Linear(hidden_size, latent_size)

    def _reparameterize(self, mu, logvar):
        std = torch.exp(0.5 * logvar)
        eps = torch.randn_like(std)
        return mu + eps * std

    def forward(self, x):
        prior_hiddens, _ = self.prior_gru(x)
        prior_mu = self.prior_mu(prior_hiddens)
        prior_logvar = self.prior_sigma(prior_hiddens)

        encoder_hiddens, _ = self.encoder_gru(x)
        encoder_mu = self.encoder_mu(encoder_hiddens)
        encoder_logvar = self.encoder_sigma(encoder_hiddens)
        z = self._reparameterize(encoder_mu, encoder_logvar)

        decoder_hiddens, _ = self.decoder_gru(z)
        recon_mu = self.decoder_mu(decoder_hiddens)
        recon_logvar = self.decoder_sigma(decoder_hiddens)

        q_dist = Normal(encoder_mu, torch.exp(0.5 * encoder_logvar))
        p_dist = Normal(prior_mu, torch.exp(0.5 * prior_logvar))
        kl_loss = torch.distributions.kl_divergence(q_dist, p_dist).mean()
        return recon_mu, recon_logvar, kl_loss

    def reconstruction_probability(self, x, n_samples=10):
        total_log_prob = torch.zeros(x.shape[0], x.shape[1]).to(x.device)
        for _ in range(n_samples):
            encoder_hiddens, _ = self.encoder_gru(x)
            encoder_mu = self.encoder_mu(encoder_hiddens)
            encoder_logvar = self.encoder_sigma(encoder_hiddens)
            z = self._reparameterize(encoder_mu, encoder_logvar)

            decoder_hiddens, _ = self.decoder_gru(z)
            recon_mu = self.decoder_mu(decoder_hiddens)
            recon_logvar = self.decoder_sigma(decoder_hiddens)
            recon_sigma = torch.exp(0.5 * recon_logvar)

            dist = Normal(recon_mu, recon_sigma)
            log_prob = dist.log_prob(x).sum(dim=-1)
            total_log_prob += log_prob
        return total_log_prob / n_samples

    def reconstruction_probability_by_dimension(self, x, n_samples=10):
        """返回每个维度的重构概率"""
        batch_size, seq_len, n_features = x.shape
        total_log_prob = torch.zeros(batch_size, seq_len, n_features).to(x.device)

        for _ in range(n_samples):
            encoder_hiddens, _ = self.encoder_gru(x)
            encoder_mu = self.encoder_mu(encoder_hiddens)
            encoder_logvar = self.encoder_sigma(encoder_hiddens)
            z = self._reparameterize(encoder_mu, encoder_logvar)

            decoder_hiddens, _ = self.decoder_gru(z)
            recon_mu = self.decoder_mu(decoder_hiddens)
            recon_logvar = self.decoder_sigma(decoder_hiddens)
            recon_sigma = torch.exp(0.5 * recon_logvar)

            dist = Normal(recon_mu, recon_sigma)
            # 不进行sum，保留每个维度的log_prob
            log_prob = dist.log_prob(x)  # shape: [batch, seq_len, n_features]
            total_log_prob += log_prob

        return total_log_prob / n_samples  # 平均采样结果



# ----------------------------
# 数据加载（支持时间戳）
# ----------------------------
def load_and_merge_csvs_with_timestamp(csv_dir, freq='1min'):
    dfs = []
    for file in sorted(f for f in os.listdir(csv_dir) if f.endswith('.csv')):
        df = pd.read_csv(os.path.join(csv_dir, file))
        time_col = next((col for col in df.columns
                        if 'time' in col.lower() or 'date' in col.lower() or 'stamp' in col.lower()),
                       df.columns[0])
        df[time_col] = pd.to_datetime(df[time_col], errors='coerce')
        df = df.set_index(time_col).select_dtypes(include=[np.number])
        sensor_name = os.path.splitext(file)[0]
        df.columns = [f"{sensor_name}_{col}" if col != 'value' else sensor_name for col in df.columns]
        dfs.append(df)

    if not dfs:
        raise ValueError(f"No CSV files in {csv_dir}")
    merged = pd.concat(dfs, axis=1)
    merged = merged[merged.index.notna()]  # 移除 NaT
    if merged.empty:
        raise ValueError("All timestamps are invalid!")
    if freq:
        merged = merged.resample(freq).mean().ffill().bfill()
    return merged.dropna()

def create_sequences_with_time(df, seq_len):
    data = df.values.astype(np.float32)
    timestamps = df.index
    seqs, ts_seqs = [], []
    for i in range(len(data) - seq_len + 1):
        seqs.append(data[i:i+seq_len])
        ts_seqs.append(timestamps[i:i+seq_len])
    return np.array(seqs), np.array(ts_seqs)

# ----------------------------
# 训练与检测
# ----------------------------
def train_omni(model, dataloader, num_epochs=30, lr=1e-3, device='cpu'):
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    model.train()
    for epoch in range(num_epochs):
        total_loss = 0
        for x, in dataloader:
            x = x.to(device)
            optimizer.zero_grad()
            recon_mu, recon_logvar, kl_loss = model(x)
            recon_sigma = torch.exp(0.5 * recon_logvar)
            recon_loss = torch.mean(
                0.5 * torch.log(2 * np.pi * recon_sigma**2) + 0.5 * (x - recon_mu)**2 / (recon_sigma**2)
            )
            loss = recon_loss + kl_loss
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        if (epoch + 1) % 10 == 0:
            print(f'Epoch {epoch+1}/{num_epochs}, Loss: {total_loss/len(dataloader):.4f}')

def detect_anomalies_omni_aligned(model, sequences, original_length, seq_len, device='cpu', threshold_percentile=1):
    model.eval()
    N, L, D = sequences.shape
    total_log_prob = np.zeros(original_length)
    count = np.zeros(original_length)
    with torch.no_grad():
        for i in range(0, N, 64):
            end = min(i + 64, N)
            x_batch = torch.tensor(sequences[i:end], dtype=torch.float32).to(device)
            log_prob_batch = torch.zeros(x_batch.shape[0], L).to(device)
            for _ in range(10):
                encoder_hiddens, _ = model.encoder_gru(x_batch)
                encoder_mu = model.encoder_mu(encoder_hiddens)
                encoder_logvar = model.encoder_sigma(encoder_hiddens)
                z = model._reparameterize(encoder_mu, encoder_logvar)
                decoder_hiddens, _ = model.decoder_gru(z)
                recon_mu = model.decoder_mu(decoder_hiddens)
                recon_logvar = model.decoder_sigma(decoder_hiddens)
                recon_sigma = torch.exp(0.5 * recon_logvar)
                dist = Normal(recon_mu, recon_sigma)
                log_prob = dist.log_prob(x_batch).sum(dim=-1)
                log_prob_batch += log_prob
            log_prob_np = (log_prob_batch / 10).cpu().numpy()
            for b in range(end - i):
                start_t = i + b
                end_t = start_t + seq_len
                total_log_prob[start_t:end_t] += log_prob_np[b]
                count[start_t:end_t] += 1
    count = np.clip(count, 1, None)
    aligned_log_prob = total_log_prob / count
    threshold = np.percentile(aligned_log_prob, threshold_percentile)
    return aligned_log_prob < threshold, aligned_log_prob, threshold

def detect_anomalies_with_dimension_analysis(model, sequences, original_length, seq_len,
                                             feature_names, device='cpu', threshold_percentile=1):
    model.eval()
    N, L, D = sequences.shape
    total_log_prob = np.zeros(original_length)
    total_log_prob_by_dim = np.zeros((original_length, D))  # 新增：每个维度的log_prob
    count = np.zeros(original_length)

    with torch.no_grad():
        for i in range(0, N, 64):
            end = min(i + 64, N)
            x_batch = torch.tensor(sequences[i:end], dtype=torch.float32).to(device)

            # 获取每个维度的重构概率
            log_prob_by_dim_batch = torch.zeros(x_batch.shape[0], L, D).to(device)
            for _ in range(10):
                encoder_hiddens, _ = model.encoder_gru(x_batch)
                encoder_mu = model.encoder_mu(encoder_hiddens)
                encoder_logvar = model.encoder_sigma(encoder_hiddens)
                z = model._reparameterize(encoder_mu, encoder_logvar)
                decoder_hiddens, _ = model.decoder_gru(z)
                recon_mu = model.decoder_mu(decoder_hiddens)
                recon_logvar = model.decoder_sigma(decoder_hiddens)
                recon_sigma = torch.exp(0.5 * recon_logvar)
                dist = Normal(recon_mu, recon_sigma)
                log_prob_by_dim = dist.log_prob(x_batch)  # 保留维度信息
                log_prob_by_dim_batch += log_prob_by_dim

            log_prob_by_dim_np = (log_prob_by_dim_batch / 10).cpu().numpy()
            log_prob_batch = log_prob_by_dim_np.sum(axis=-1)  # 聚合得到总体log_prob

            for b in range(end - i):
                start_t = i + b
                end_t = start_t + seq_len
                total_log_prob[start_t:end_t] += log_prob_batch[b]
                total_log_prob_by_dim[start_t:end_t] += log_prob_by_dim_np[b]  # 保存维度信息
                count[start_t:end_t] += 1

    count = np.clip(count, 1, None)
    aligned_log_prob = total_log_prob / count
    aligned_log_prob_by_dim = total_log_prob_by_dim / count[:, None]  # 每个维度的平均log_prob

    threshold = np.percentile(aligned_log_prob, threshold_percentile)
    anomalies = aligned_log_prob < threshold

    return anomalies, aligned_log_prob, threshold, aligned_log_prob_by_dim

def analyze_anomaly_dimensions(anomaly_indices, log_prob_by_dim, feature_names, top_k=3):
    """分析每个异常点的top-k异常维度"""
    anomaly_analysis = []

    for idx in anomaly_indices:
        dim_scores = log_prob_by_dim[idx]  # 该时间点每个维度的log_prob
        # log_prob越低，异常程度越高
        anomaly_scores = -dim_scores  # 转换为异常分数

        # 获取top-k最异常的维度
        top_indices = np.argsort(anomaly_scores)[-top_k:][::-1]
        top_features = [feature_names[i] for i in top_indices]
        top_scores = [anomaly_scores[i] for i in top_indices]

        anomaly_analysis.append({
            'index': idx,
            'top_anomalous_dims': top_features,
            'anomaly_scores': top_scores
        })

    return anomaly_analysis


# 新增可视化函数
def plot_anomaly_dimensions(test_df, anomalies, scores_by_dim, feature_names, n_top_dims=3):
    """可视化最异常的维度"""
    anomaly_indices = np.where(anomalies)[0]

    if len(anomaly_indices) == 0:
        print("No anomalies to plot")
        return

    # 找出总体最异常的维度
    overall_anomaly_scores = -scores_by_dim[anomalies].mean(axis=0)
    top_dim_indices = np.argsort(overall_anomaly_scores)[-n_top_dims:][::-1]

    fig, axes = plt.subplots(n_top_dims, 1, figsize=(14, 4 * n_top_dims))
    if n_top_dims == 1:
        axes = [axes]

    for i, dim_idx in enumerate(top_dim_indices):
        dim_name = feature_names[dim_idx]
        dim_scores = -scores_by_dim[:, dim_idx]  # 转换为异常分数

        axes[i].plot(test_df.index, dim_scores, label=f'{dim_name} Anomaly Score', alpha=0.7)
        axes[i].fill_between(test_df.index, dim_scores, alpha=0.3)

        # 标记异常点
        anomaly_mask = anomalies & (dim_scores > np.percentile(dim_scores[anomalies], 50))
        anomaly_times = test_df.index[anomaly_mask]
        anomaly_values = dim_scores[anomaly_mask]
        axes[i].scatter(anomaly_times, anomaly_values, color='red', s=20, label='Major Anomalies')

        axes[i].set_title(f'Anomaly Scores for {dim_name}')
        axes[i].set_ylabel('Anomaly Score')
        axes[i].legend()
        axes[i].grid(True, alpha=0.3)

    plt.xlabel('Timestamp')
    plt.tight_layout()
    plt.savefig("anomaly_dimensions_analysis.png", dpi=150)
    plt.show()



# ----------------------------
# 主流程
# ----------------------------
if __name__ == "__main__":
    TRAIN_DIR = "train_clean"
    TEST_DIR = "test_point"
    SEQ_LEN = 100
    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

    # 加载训练数据（干净）
    print("Loading training data...")
    train_df = load_and_merge_csvs_with_timestamp(TRAIN_DIR, freq='1min')
    scaler = StandardScaler()
    train_scaled = scaler.fit_transform(train_df.values)
    train_seq, _ = create_sequences_with_time(
        pd.DataFrame(train_scaled, index=train_df.index, columns=train_df.columns), SEQ_LEN
    )
    train_loader = DataLoader(TensorDataset(torch.tensor(train_seq, dtype=torch.float32)),
                              batch_size=64, shuffle=True)

    # 训练
    model = OmniAnomaly(n_features=train_seq.shape[2])
    train_omni(model, train_loader, num_epochs=30, device=DEVICE)

    # 测试
    print("Loading test data...")
    test_df = load_and_merge_csvs_with_timestamp(TEST_DIR, freq='1min')
    test_scaled = scaler.transform(test_df.values)
    test_seq, _ = create_sequences_with_time(
        pd.DataFrame(test_scaled, index=test_df.index, columns=test_df.columns), SEQ_LEN
    )

    # 检测
    # anomalies, scores, thresh = detect_anomalies_omni_aligned(
    #     model, test_seq, len(test_df), SEQ_LEN, device=DEVICE, threshold_percentile=1
    # )
    #
    # # 保存结果
    # result_df = pd.DataFrame({
    #     'timestamp': test_df.index,
    #     'log_prob': scores,
    #     'is_anomaly': anomalies
    # })
    # result_df.to_csv("omnianomaly_results.csv", index=False)
    # print(f"\n✅ Detected {anomalies.sum()} anomalies. Results saved to 'omnianomaly_results.csv'")
    #
    # # 打印异常时间
    # anomaly_times = result_df[result_df['is_anomaly']]['timestamp']
    # print("Anomaly timestamps:")
    # print(anomaly_times.head(10))
    #
    # # 可视化
    # plt.figure(figsize=(14, 5))
    # plt.plot(test_df.index, scores, label='Reconstruction Log Probability')
    # plt.axhline(y=thresh, color='r', linestyle='--', label='Threshold (1% quantile)')
    # plt.title('OmniAnomaly Anomaly Detection Results')
    # plt.xlabel('Timestamp')
    # plt.ylabel('Log Probability')
    # plt.legend()
    # plt.grid(True, alpha=0.3)
    # plt.xticks(rotation=30)
    # plt.tight_layout()
    # plt.savefig("omnianomaly_results.png", dpi=150)
    # plt.show()

    # 在主流程中替换检测部分
    # 在主流程的检测部分之后添加：
    print("Detecting anomalies with dimension analysis...")
    anomalies, scores, thresh, scores_by_dim = detect_anomalies_with_dimension_analysis(
        model, test_seq, len(test_df), SEQ_LEN,
        feature_names=test_df.columns.tolist(),
        device=DEVICE, threshold_percentile=1
    )

    # 分析异常维度
    # 获取异常点索引
    anomaly_indices = np.where(anomalies)[0]

    print(f"\n✅ Detection completed!")
    print(f"📊 Total anomalies detected: {len(anomaly_indices)}")
    print(f"📈 Anomaly percentage: {len(anomaly_indices) / len(test_df) * 100:.2f}%")
    print(f"🎯 Threshold (1% quantile): {thresh:.6f}")

    # 打印前20个异常点的详细信息
    print(f"\n🕒 First 20 anomaly points:")
    print("Index | Timestamp            | Log Prob    | Diff from Threshold")
    print("-" * 75)

    for i, idx in enumerate(anomaly_indices[:20]):
        timestamp = test_df.index[idx]
        log_prob = scores[idx]
        diff = log_prob - thresh  # 与阈值的差异（负值表示异常）
        print(f"{idx:5d} | {timestamp} | {log_prob:10.6f} | {diff:10.6f}")

    # 如果有更多异常点，提示用户
    if len(anomaly_indices) > 20:
        print(f"... and {len(anomaly_indices) - 20} more anomalies")

    # 保存所有异常点到文件
    anomaly_timestamps = test_df.index[anomaly_indices]
    anomaly_log_probs = scores[anomaly_indices]

    anomaly_summary = pd.DataFrame({
        'index': anomaly_indices,
        'timestamp': anomaly_timestamps,
        'log_probability': anomaly_log_probs
    })
    anomaly_summary.to_csv("detected_anomalies_summary.csv", index=False)
    plot_anomaly_dimensions(test_df, anomalies, scores_by_dim, test_df.columns.tolist())
    print(f"\n💾 Anomaly summary saved to 'detected_anomalies_summary.csv'")