# omnianomaly_with_timestamp.py
import torch
import torch.nn as nn
import numpy as np
from torch.distributions import Normal
import os
import pandas as pd
from sklearn.preprocessing import StandardScaler
from torch.utils.data import TensorDataset, DataLoader
import matplotlib.pyplot as plt
import matplotlib.dates as mdates


# ----------------------------
# OmniAnomaly Model (保持不变)
# ----------------------------
class OmniAnomaly(nn.Module):
    def __init__(self, n_features, hidden_size=100, latent_size=16):
        super(OmniAnomaly, self).__init__()
        self.n_features = n_features
        self.encoder_gru = nn.GRU(n_features, hidden_size, batch_first=True)
        self.encoder_mu = nn.Linear(hidden_size, latent_size)
        self.encoder_sigma = nn.Linear(hidden_size, latent_size)

        self.decoder_gru = nn.GRU(latent_size, hidden_size, batch_first=True)
        self.decoder_mu = nn.Linear(hidden_size, n_features)
        self.decoder_sigma = nn.Linear(hidden_size, n_features)

        self.prior_gru = nn.GRU(n_features, hidden_size, batch_first=True)
        self.prior_mu = nn.Linear(hidden_size, latent_size)
        self.prior_sigma = nn.Linear(hidden_size, latent_size)

    def _reparameterize(self, mu, logvar):
        std = torch.exp(0.5 * logvar)
        eps = torch.randn_like(std)
        return mu + eps * std

    def forward(self, x):
        prior_hiddens, _ = self.prior_gru(x)
        prior_mu = self.prior_mu(prior_hiddens)
        prior_logvar = self.prior_sigma(prior_hiddens)

        encoder_hiddens, _ = self.encoder_gru(x)
        encoder_mu = self.encoder_mu(encoder_hiddens)
        encoder_logvar = self.encoder_sigma(encoder_hiddens)
        z = self._reparameterize(encoder_mu, encoder_logvar)

        decoder_hiddens, _ = self.decoder_gru(z)
        recon_mu = self.decoder_mu(decoder_hiddens)
        recon_logvar = self.decoder_sigma(decoder_hiddens)

        q_dist = Normal(encoder_mu, torch.exp(0.5 * encoder_logvar))
        p_dist = Normal(prior_mu, torch.exp(0.5 * prior_logvar))
        kl_loss = torch.distributions.kl_divergence(q_dist, p_dist).mean()
        return recon_mu, recon_logvar, kl_loss


# ----------------------------
# 修复的数据加载函数
# ----------------------------
def load_and_merge_csvs_with_timestamp(csv_dir, freq='1min'):
    """加载并合并多个CSV文件，处理时间戳索引重复问题"""
    dfs = []
    for file in sorted(f for f in os.listdir(csv_dir) if f.endswith('.csv')):
        df = pd.read_csv(os.path.join(csv_dir, file))

        # 找到时间戳列
        time_col = next((col for col in df.columns
                         if 'time' in col.lower() or 'date' in col.lower() or 'stamp' in col.lower()),
                        df.columns[0])

        # 转换为datetime并设置为索引
        df[time_col] = pd.to_datetime(df[time_col], errors='coerce')
        df = df.set_index(time_col)

        # 选择数值列
        df = df.select_dtypes(include=[np.number])

        # 重命名列以避免重复
        sensor_name = os.path.splitext(file)[0]
        df.columns = [f"{sensor_name}_{col}" if col != 'value' else sensor_name for col in df.columns]

        # 确保索引唯一 - 处理重复时间戳
        if not df.index.is_unique:
            print(f"警告: {file} 中存在重复时间戳，进行去重处理")
            df = df[~df.index.duplicated(keep='first')]

        dfs.append(df)

    if not dfs:
        raise ValueError(f"No CSV files in {csv_dir}")

    # 合并所有DataFrame，使用outer join处理不同时间范围的数据
    merged = dfs[0]
    for i in range(1, len(dfs)):
        merged = merged.merge(dfs[i], left_index=True, right_index=True, how='outer', suffixes=('', f'_{i}'))

    # 按频率重采样并填充缺失值
    if freq:
        print(f"按频率 {freq} 重采样数据...")
        merged = merged.resample(freq).mean()
        # 填充缺失值
        merged = merged.ffill().bfill()

    # 移除仍然包含NaN的行
    merged = merged.dropna()

    if merged.empty:
        raise ValueError("合并后的数据为空！")

    print(f"成功加载数据: {merged.shape[0]} 行, {merged.shape[1]} 列")
    print(f"数据时间范围: {merged.index.min()} 到 {merged.index.max()}")

    return merged


def create_sequences_with_time(df, seq_len):
    """创建时间序列数据"""
    data = df.values.astype(np.float32)
    timestamps = df.index
    seqs, ts_seqs = [], []
    for i in range(len(data) - seq_len + 1):
        seqs.append(data[i:i + seq_len])
        ts_seqs.append(timestamps[i:i + seq_len])
    return np.array(seqs), np.array(ts_seqs)


# ----------------------------
# 训练函数
# ----------------------------
def train_omni(model, dataloader, num_epochs=30, lr=1e-3, device='cpu'):
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    model.train()

    for epoch in range(num_epochs):
        total_loss = 0
        for x, in dataloader:
            x = x.to(device)
            optimizer.zero_grad()
            recon_mu, recon_logvar, kl_loss = model(x)

            # 计算重构损失
            recon_sigma = torch.exp(0.5 * recon_logvar)
            recon_loss = -torch.distributions.Normal(recon_mu, recon_sigma).log_prob(x).mean()

            loss = recon_loss + kl_loss
            loss.backward()
            optimizer.step()
            total_loss += loss.item()

        if (epoch + 1) % 10 == 0:
            avg_loss = total_loss / len(dataloader)
            print(f'Epoch {epoch + 1}/{num_epochs}, Loss: {avg_loss:.4f}')


# ----------------------------
# 检测函数
# ----------------------------
def detect_anomalies_with_dimension_analysis(model, sequences, original_length, seq_len,
                                             feature_names, device='cpu', threshold_percentile=1):
    """检测异常并进行维度分析"""
    model.eval()
    N, L, D = sequences.shape
    total_log_prob = np.zeros(original_length)
    total_log_prob_by_dim = np.zeros((original_length, D))
    count = np.zeros(original_length)

    with torch.no_grad():
        for i in range(0, N, 64):
            end = min(i + 64, N)
            x_batch = torch.tensor(sequences[i:end], dtype=torch.float32).to(device)

            # 获取每个维度的重构概率
            log_prob_by_dim_batch = torch.zeros(x_batch.shape[0], L, D).to(device)
            for _ in range(10):  # 多次采样
                encoder_hiddens, _ = model.encoder_gru(x_batch)
                encoder_mu = model.encoder_mu(encoder_hiddens)
                encoder_logvar = model.encoder_sigma(encoder_hiddens)
                z = model._reparameterize(encoder_mu, encoder_logvar)

                decoder_hiddens, _ = model.decoder_gru(z)
                recon_mu = model.decoder_mu(decoder_hiddens)
                recon_logvar = model.decoder_sigma(decoder_hiddens)
                recon_sigma = torch.exp(0.5 * recon_logvar)

                dist = Normal(recon_mu, recon_sigma)
                log_prob_by_dim = dist.log_prob(x_batch)  # 保留维度信息
                log_prob_by_dim_batch += log_prob_by_dim

            log_prob_by_dim_np = (log_prob_by_dim_batch / 10).cpu().numpy()
            log_prob_batch = log_prob_by_dim_np.sum(axis=-1)  # 聚合得到总体log_prob

            for b in range(end - i):
                start_t = i + b
                end_t = start_t + seq_len
                total_log_prob[start_t:end_t] += log_prob_batch[b]
                total_log_prob_by_dim[start_t:end_t] += log_prob_by_dim_np[b]
                count[start_t:end_t] += 1

    count = np.clip(count, 1, None)
    aligned_log_prob = total_log_prob / count
    aligned_log_prob_by_dim = total_log_prob_by_dim / count[:, None]

    threshold = np.percentile(aligned_log_prob, threshold_percentile)
    anomalies = aligned_log_prob < threshold

    return anomalies, aligned_log_prob, threshold, aligned_log_prob_by_dim


# ----------------------------
# 可视化函数
# ----------------------------
def plot_anomaly_dimensions(test_df, anomalies, scores_by_dim, feature_names, n_top_dims=3):
    """可视化最异常的维度"""
    anomaly_indices = np.where(anomalies)[0]

    if len(anomaly_indices) == 0:
        print("没有检测到异常点，跳过维度分析图")
        return

    # 找出总体最异常的维度
    try:
        overall_anomaly_scores = -scores_by_dim[anomalies].mean(axis=0)
        top_dim_indices = np.argsort(overall_anomaly_scores)[-n_top_dims:][::-1]
    except:
        print("计算异常维度分数时出错")
        return

    fig, axes = plt.subplots(n_top_dims, 1, figsize=(14, 4 * n_top_dims))
    if n_top_dims == 1:
        axes = [axes]

    for i, dim_idx in enumerate(top_dim_indices):
        if dim_idx >= len(feature_names):
            continue

        dim_name = feature_names[dim_idx]
        dim_scores = -scores_by_dim[:, dim_idx]  # 转换为异常分数

        axes[i].plot(test_df.index, dim_scores, label=f'{dim_name} Abnormal score', alpha=0.7, linewidth=1)
        axes[i].fill_between(test_df.index, dim_scores, alpha=0.3)

        # 标记异常点
        if len(anomaly_indices) > 0:
            anomaly_times = test_df.index[anomaly_indices]
            anomaly_values = dim_scores[anomaly_indices]
            axes[i].scatter(anomaly_times, anomaly_values, color='red', s=30,
                            label=f'Abnormal point ({len(anomaly_indices)} nums)', alpha=0.6)

        axes[i].set_title(f'{dim_name} abnormal score')
        axes[i].set_ylabel('Abnormal score')
        axes[i].legend()
        axes[i].grid(True, alpha=0.3)

        # 设置x轴格式
        axes[i].xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
        plt.setp(axes[i].xaxis.get_majorticklabels(), rotation=45)

    plt.xlabel('时间')
    plt.tight_layout()
    plt.savefig("anomaly_dimensions_analysis.png", dpi=150, bbox_inches='tight')
    plt.show()


def plot_main_results(test_df, scores, anomalies, threshold):
    """绘制主要检测结果"""
    plt.figure(figsize=(14, 8))

    # 子图1: 重构概率
    plt.subplot(2, 1, 1)
    plt.plot(test_df.index, scores, label='Reconstruct log probability', color='blue', alpha=0.7, linewidth=1)
    plt.axhline(y=threshold, color='red', linestyle='--', linewidth=2,
                label=f'Threshold ({threshold:.4f})')

    # 标记异常点
    anomaly_indices = np.where(anomalies)[0]
    if len(anomaly_indices) > 0:
        plt.scatter(test_df.index[anomaly_indices], scores[anomaly_indices],
                    color='red', s=40, label=f'Anomalous point({len(anomaly_indices)} nums)', zorder=5)

    plt.title('OmniAnomaly Anomaly detection results')
    plt.ylabel('Logarithmic probability')
    plt.legend()
    plt.grid(True, alpha=0.3)

    # 子图2: 原始数据（前3个传感器）
    plt.subplot(2, 1, 2)
    n_sensors_to_plot = min(3, test_df.shape[1])
    for i in range(n_sensors_to_plot):
        col = test_df.columns[i]
        plt.plot(test_df.index, test_df[col], label=col, alpha=0.7, linewidth=1)

    # 标记异常区域
    if len(anomaly_indices) > 0:
        for idx in anomaly_indices:
            plt.axvline(x=test_df.index[idx], color='red', alpha=0.3, linestyle='-', linewidth=0.5)

    plt.title('Sensor data (abnormal points marked with red vertical lines)')
    plt.xlabel('Time')
    plt.ylabel('Value')
    plt.legend()
    plt.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.savefig("omnianomaly_main_results.png", dpi=150, bbox_inches='tight')
    plt.show()


# ----------------------------
# 主流程
# ----------------------------
if __name__ == "__main__":
    TRAIN_DIR = "train_csv_data"
    TEST_DIR = "test_period_phase"
    SEQ_LEN = 50  # 减小序列长度以避免内存问题
    DEVICE = "cuda" if torch.cuda.is_available() else "cpu"

    print(f"使用设备: {DEVICE}")
    print(f"序列长度: {SEQ_LEN}")

    try:
        # 加载训练数据（干净）
        print("加载训练数据...")
        train_df = load_and_merge_csvs_with_timestamp(TRAIN_DIR, freq='1min')
        print(f"训练数据形状: {train_df.shape}")

        # 数据标准化
        scaler = StandardScaler()
        train_scaled = scaler.fit_transform(train_df.values)

        # 创建序列
        train_seq, _ = create_sequences_with_time(
            pd.DataFrame(train_scaled, index=train_df.index, columns=train_df.columns), SEQ_LEN
        )
        print(f"训练序列形状: {train_seq.shape}")

        train_loader = DataLoader(
            TensorDataset(torch.tensor(train_seq, dtype=torch.float32)),
            batch_size=32, shuffle=True  # 减小batch_size
        )

        # 训练模型
        print("开始训练模型...")
        model = OmniAnomaly(n_features=train_seq.shape[2])
        train_omni(model, train_loader, num_epochs=50, device=DEVICE)  # 增加训练轮数

        # 测试
        print("加载测试数据...")
        test_df = load_and_merge_csvs_with_timestamp(TEST_DIR, freq='1min')
        print(f"测试数据形状: {test_df.shape}")

        test_scaled = scaler.transform(test_df.values)
        test_seq, _ = create_sequences_with_time(
            pd.DataFrame(test_scaled, index=test_df.index, columns=test_df.columns), SEQ_LEN
        )
        print(f"测试序列形状: {test_seq.shape}")

        # 检测异常
        print("开始异常检测...")
        anomalies, scores, thresh, scores_by_dim = detect_anomalies_with_dimension_analysis(
            model, test_seq, len(test_df), SEQ_LEN,
            feature_names=test_df.columns.tolist(),
            device=DEVICE, threshold_percentile=1
        )

        # 分析结果
        anomaly_indices = np.where(anomalies)[0]

        print(f"\n✅ 检测完成!")
        print(f"📊 检测到异常点: {len(anomaly_indices)} 个")
        print(f"📈 异常比例: {len(anomaly_indices) / len(test_df) * 100:.2f}%")
        print(f"🎯 阈值 (1% 分位数): {thresh:.6f}")

        # 打印前20个异常点
        if len(anomaly_indices) > 0:
            print(f"\n🕒 前20个异常点:")
            print("索引 | 时间戳 | 对数概率 | 与阈值差值")
            print("-" * 70)

            for i, idx in enumerate(anomaly_indices[:20]):
                timestamp = test_df.index[idx]
                log_prob = scores[idx]
                diff = log_prob - thresh
                print(f"{idx:5d} | {timestamp} | {log_prob:10.6f} | {diff:10.6f}")

            if len(anomaly_indices) > 20:
                print(f"... 还有 {len(anomaly_indices) - 20} 个异常点")

        # 保存结果
        result_df = pd.DataFrame({
            'timestamp': test_df.index,
            'log_prob': scores,
            'is_anomaly': anomalies
        })
        result_df.to_csv("omnianomaly_results.csv", index=False)

        # 保存异常点摘要
        if len(anomaly_indices) > 0:
            anomaly_summary = pd.DataFrame({
                'index': anomaly_indices,
                'timestamp': test_df.index[anomaly_indices],
                'log_probability': scores[anomaly_indices]
            })
            anomaly_summary.to_csv("detected_anomalies_summary.csv", index=False)

        # 可视化结果
        print("生成可视化图表...")
        plot_main_results(test_df, scores, anomalies, thresh)
        plot_anomaly_dimensions(test_df, anomalies, scores_by_dim, test_df.columns.tolist())

        print(f"\n💾 结果已保存:")
        print(f"   - omnianomaly_results.csv (完整结果)")
        if len(anomaly_indices) > 0:
            print(f"   - detected_anomalies_summary.csv (异常点摘要)")
        print(f"   - omnianomaly_main_results.png (主要结果图)")
        print(f"   - anomaly_dimensions_analysis.png (维度分析图)")

    except Exception as e:
        print(f"❌ 发生错误: {e}")
        import traceback

        traceback.print_exc()