import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import os

import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv1D, MaxPooling1D
from scipy.linalg import sqrtm

# 真实数据路径
real_data_paths = [r'E:\EEG\EEG-TransNet-main\data\dataset\bci_iv_2a\A0{}T_data.npy'.format(i) for i in range(1, 2)]

# 生成数据路径
generated_data_paths = [r'E:\EEG\EEG-TransNet-main\data\dataset\bci_iv_2a\A01Efake_eeg_{}.npy'.format(i) for i in range(10)]

# 加载和预处理数据
def load_data(paths):
    data = []
    for path in paths:
        data.append(np.load(path))
    return np.array(data)

real_data = load_data(real_data_paths)
generated_data = load_data(generated_data_paths)

# 打印数据形状
print("Real data shape:", real_data.shape)
print("Generated data shape:", generated_data.shape)

# 调整数据形状
real_data = real_data.reshape(-1, 22, 1125)
generated_data = generated_data.reshape(-1, 22, 1125)

# 打印调整后的数据形状
print("Adjusted Real data shape:", real_data.shape)
print("Adjusted Generated data shape:", generated_data.shape)

# 数据预处理（可选）
real_data = (real_data - np.mean(real_data)) / np.std(real_data)
generated_data = (generated_data - np.mean(generated_data)) / np.std(generated_data)

# 假设每个EEG样本的形状为 (channels, time_steps)
channels = real_data.shape[1]  # 22
time_steps = real_data.shape[2]  # 1125
input_shape = (channels, time_steps)

# 创建特征提取模型
def create_feature_extractor(input_shape):
    model = Sequential([
        Conv1D(64, kernel_size=3, activation='relu', input_shape=input_shape),
        MaxPooling1D(pool_size=2),
        Flatten(),
        Dense(128, activation='relu'),
        Dense(64, activation='relu')
    ])
    return model

feature_extractor = create_feature_extractor(input_shape)

# 计算特征向量
def calculate_features(data, model):
    features = model.predict(data)
    return features

real_features = calculate_features(real_data, feature_extractor)
generated_features = calculate_features(generated_data, feature_extractor)

# 计算FID
def calculate_fid(features1, features2):
    mu1, sigma1 = features1.mean(axis=0), np.cov(features1, rowvar=False)
    mu2, sigma2 = features2.mean(axis=0), np.cov(features2, rowvar=False)
    diff = mu1 - mu2
    covmean = sqrtm(sigma1.dot(sigma2))
    if np.iscomplexobj(covmean):
        covmean = covmean.real
    fid = diff.dot(diff) + np.trace(sigma1 + sigma2 - 2.0 * covmean)
    return fid

fid_value = calculate_fid(real_features, generated_features)
print('FID: ', fid_value)

# 加载真实数据
real_data = [np.load(path) for path in real_data_paths]
real_data = np.vstack(real_data)  # 假设每个文件包含多个样本，使用 vstack 将它们堆叠在一起

# 加载生成数据
generated_data = [np.load(path) for path in generated_data_paths]
generated_data = np.vstack(generated_data)  # 假设每个文件包含多个样本，使用 vstack 将它们堆叠在一起

# 检查数据形状
print("Real data shape:", real_data.shape)
print("Generated data shape:", generated_data.shape)

# 确保数据形状一致
if real_data.shape[1:] != generated_data.shape[1:]:
    raise ValueError("Real and generated data shapes do not match.")

# 将三维数据展平为二维数据
real_data_flat = real_data.reshape(real_data.shape[0], -1)
generated_data_flat = generated_data.reshape(generated_data.shape[0], -1)

# 合并真实数据和生成数据
combined_data = np.vstack((real_data_flat, generated_data_flat))

# 创建PCA对象，设置降维到2维
pca = PCA(n_components=2)

# 拟合数据并进行降维
pca_result = pca.fit_transform(combined_data)

# 分离真实数据和生成数据的PCA结果
real_pca = pca_result[:len(real_data_flat)]
generated_pca = pca_result[len(real_data_flat):]

# 绘制PCA降维后的数据
plt.figure(figsize=(10, 8))
plt.scatter(real_pca[:, 0], real_pca[:, 1], c='red', label='Real Data', alpha=0.5)
plt.scatter(generated_pca[:, 0], generated_pca[:, 1], c='blue', label='Generated Data', alpha=0.5)
plt.title('PCA Visualization of Real vs Generated Data')
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend()
plt.show()