import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.cross_decomposition import PLSRegression
from ctgan import CTGAN
from skimage.metrics import structural_similarity as ssim
from scipy.spatial.distance import chebyshev
from tqdm import tqdm
import warnings
import os

warnings.filterwarnings('ignore')


# 数据重采样
def resample_data(file_path, target_samples_per_class=200, output_file=None):
    df = pd.read_csv(file_path)
    categories = df['0'].unique()
    resampled_data = []

    for category in categories:
        class_data = df[df['0'] == category]
        resampled_class_data = class_data.sample(n=target_samples_per_class, replace=True, random_state=42)
        resampled_data.append(resampled_class_data)

    final_data = pd.concat(resampled_data, ignore_index=True)
    if output_file:
        final_data.to_csv(output_file, index=False)
    return final_data


# 数据质量评价
def evaluate_data_quality(original_file, augmented_file, file=None):
    df_original = pd.read_csv(original_file)
    df_augmented = pd.read_csv(augmented_file)

    features_original = df_original.iloc[:, 1:].values
    features_augmented = df_augmented.iloc[:, 1:].values

    avg_original = np.mean(features_original, axis=0)
    avg_augmented = np.mean(features_augmented, axis=0)

    correlation = np.corrcoef(avg_original, avg_augmented)[0, 1]
    ssim_value, _ = ssim(avg_original, avg_augmented, full=True, data_range=np.max(avg_original) - np.min(avg_original))
    chebyshev_distance = np.abs(avg_original - avg_augmented).max()

    output = (f"Correlation Coefficient: {correlation}\n"
              f"SSIM Value: {ssim_value}\n"
              f"Chebyshev Distance: {chebyshev_distance}\n")

    if file:
        file.write(output)
    else:
        print(output)


# GAN数据增强
class Generator(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_dim=128):
        super(Generator, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU(),
            nn.Linear(hidden_dim, hidden_dim * 2),
            nn.ReLU(),
            nn.Linear(hidden_dim * 2, output_dim),
            nn.Tanh()
        )

    def forward(self, z):
        return self.net(z)


class Discriminator(nn.Module):
    def __init__(self, input_dim, hidden_dim=128):
        super(Discriminator, self).__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, hidden_dim * 2),
            nn.LeakyReLU(0.2),
            nn.Linear(hidden_dim * 2, hidden_dim),
            nn.LeakyReLU(0.2),
            nn.Linear(hidden_dim, 1),
            nn.Sigmoid()
        )

    def forward(self, x):
        return self.net(x)


def train_gan(file_path, output_file, epochs=500, batch_size=6, z_dim=100):
    df = pd.read_csv(file_path)
    label_column = '0'
    labels = df[label_column].unique()
    synthetic_data_list = []

    for label in labels:
        group_data = df[df[label_column] == label].drop(columns=[label_column]).values
        scaler = StandardScaler()
        scaled_data = scaler.fit_transform(group_data)
        data = torch.tensor(scaled_data, dtype=torch.float32)
        dataset = TensorDataset(data)
        dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

        input_dim = data.shape[1]
        generator = Generator(z_dim, input_dim)
        discriminator = Discriminator(input_dim)

        criterion = nn.BCELoss()
        optimizer_g = optim.Adam(generator.parameters(), lr=0.0001, betas=(0.5, 0.999))
        optimizer_d = optim.Adam(discriminator.parameters(), lr=0.0001, betas=(0.5, 0.999))

        for epoch in range(epochs):
            for real_data, in dataloader:
                real_data = real_data
                batch_size = real_data.size(0)

                optimizer_d.zero_grad()
                real_labels = torch.ones(batch_size, 1)
                output = discriminator(real_data)
                loss_d_real = criterion(output, real_labels)
                loss_d_real.backward()

                z = torch.randn(batch_size, z_dim)
                fake_data = generator(z)
                fake_labels = torch.zeros(batch_size, 1)
                output = discriminator(fake_data.detach())
                loss_d_fake = criterion(output, fake_labels)
                loss_d_fake.backward()
                optimizer_d.step()

                optimizer_g.zero_grad()
                output = discriminator(fake_data)
                loss_g = criterion(output, real_labels)
                loss_g.backward()
                optimizer_g.step()

            if (epoch + 1) % 100 == 0:
                print(
                    f"Epoch [{epoch + 1}/{epochs}], Loss D: {loss_d_real.item() + loss_d_fake.item():.4f}, Loss G: {loss_g.item():.4f}")

        with torch.no_grad():
            z = torch.randn(200, z_dim)
            generated_data = generator(z).numpy()
        generated_data = scaler.inverse_transform(generated_data)
        generated_data_df = pd.DataFrame(generated_data, columns=df.drop(columns=[label_column]).columns)
        generated_data_df.insert(0, label_column, label)
        synthetic_data_list.append(generated_data_df)

    synthetic_data = pd.concat(synthetic_data_list, ignore_index=True)
    synthetic_data.to_csv(output_file, index=False)


# CTGAN数据增强
def train_ctgan(file_path, output_file, epochs=200, batch_size=6, ctgan_pac=2):
    df = pd.read_csv(file_path)
    label_column = '0'
    labels = df[label_column].unique()
    synthetic_data_list = []

    model = CTGAN(
        epochs=epochs,
        batch_size=batch_size,
        generator_dim=(128, 256),
        discriminator_dim=(256, 128),
        pac=ctgan_pac,
        verbose=True,
        cuda=True
    )

    for label in labels:
        group_data = df[df[label_column] == label]
        model.fit(group_data, discrete_columns=[label_column])
        new_samples = model.sample(200)
        synthetic_data_list.append(new_samples)

    synthetic_data = pd.concat(synthetic_data_list, ignore_index=True)
    synthetic_data.to_csv(output_file, index=False)


# 模型训练与评估
def load_data(file_path):
    df = pd.read_csv(file_path)
    labels = df.iloc[:, 0].values
    features = df.iloc[:, 1:].values
    return features, labels


def create_combined_data(X_Y, X_B, X_G, X_C, y_Y, y_B, y_G, y_C):
    X_YB = np.vstack((X_Y, X_B))
    y_YB = np.hstack((y_Y, y_B))
    X_YG = np.vstack((X_Y, X_G))
    y_YG = np.hstack((y_Y, y_G))
    X_YC = np.vstack((X_Y, X_C))
    y_YC = np.hstack((y_Y, y_C))
    X_YBG = np.vstack((X_Y, X_B, X_G))
    y_YBG = np.hstack((y_Y, y_B, y_G))
    X_YBC = np.vstack((X_Y, X_B, X_C))
    y_YBC = np.hstack((y_Y, y_B, y_C))
    return (X_YB, y_YB), (X_YG, y_YG), (X_YC, y_YC), (X_YBG, y_YBG), (X_YBC, y_YBC)


def train_svm(X_train, y_train, X_test, y_test, kernel='linear', C=1.0):
    svm_model = SVC(kernel=kernel, C=C)
    svm_model.fit(X_train, y_train)
    y_train_pred = svm_model.predict(X_train)
    y_test_pred = svm_model.predict(X_test)
    train_accuracy = accuracy_score(y_train, y_train_pred)
    test_accuracy = accuracy_score(y_test, y_test_pred)
    return train_accuracy, test_accuracy


def train_bp_nn(X_train, y_train, X_test, y_test, hidden_layer_sizes=(100,), max_iter=500):
    bp_nn_model = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, max_iter=max_iter)
    bp_nn_model.fit(X_train, y_train)
    y_train_pred = bp_nn_model.predict(X_train)
    y_test_pred = bp_nn_model.predict(X_test)
    train_accuracy = accuracy_score(y_train, y_train_pred)
    test_accuracy = accuracy_score(y_test, y_test_pred)
    return train_accuracy, test_accuracy


def train_pls_da(X_train, y_train, X_test, y_test, n_components=10):
    pls_da_model = PLSRegression(n_components=n_components)
    pls_da_model.fit(X_train, y_train)
    y_train_pred = pls_da_model.predict(X_train)
    y_test_pred = pls_da_model.predict(X_test)
    y_train_pred = np.round(y_train_pred)
    y_test_pred = np.round(y_test_pred)
    train_accuracy = accuracy_score(y_train, y_train_pred)
    test_accuracy = accuracy_score(y_test, y_test_pred)
    return train_accuracy, test_accuracy


class CNN(nn.Module):
    def __init__(self, input_dim, num_classes, conv_channels=32, kernel_size=3, hidden_units=128):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv1d(in_channels=1, out_channels=conv_channels, kernel_size=kernel_size, padding=1)
        self.pool = nn.MaxPool1d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(conv_channels * (input_dim // 2), hidden_units)
        self.fc2 = nn.Linear(hidden_units, num_classes)

    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = x.view(-1, self.conv1.out_channels * (x.size(2)))
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x


def train_cnn(X_train, y_train, X_test, y_test, input_dim, num_classes, epochs=10, learning_rate=0.001):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = CNN(input_dim=input_dim, num_classes=num_classes).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    X_train_tensor = torch.tensor(X_train, dtype=torch.float32).unsqueeze(1).to(device)
    y_train_tensor = torch.tensor(y_train, dtype=torch.long).to(device)
    X_test_tensor = torch.tensor(X_test, dtype=torch.float32).unsqueeze(1).to(device)
    y_test_tensor = torch.tensor(y_test, dtype=torch.long).to(device)

    model.train()
    for epoch in range(epochs):
        optimizer.zero_grad()
        output = model(X_train_tensor)
        loss = criterion(output, y_train_tensor)
        loss.backward()
        optimizer.step()

    model.eval()
    with torch.no_grad():
        output_train = model(X_train_tensor)
        _, predicted_train = torch.max(output_train, 1)
        train_accuracy = (predicted_train == y_train_tensor).sum().item() / y_train_tensor.size(0)
        output_test = model(X_test_tensor)
        _, predicted_test = torch.max(output_test, 1)
        test_accuracy = (predicted_test == y_test_tensor).sum().item() / y_test_tensor.size(0)
    return train_accuracy, test_accuracy


def train_and_evaluate(X, y, model_params, n_runs=5):
    svm_train_accuracies = []
    svm_test_accuracies = []
    bp_nn_train_accuracies = []
    bp_nn_test_accuracies = []
    pls_da_train_accuracies = []
    pls_da_test_accuracies = []
    cnn_train_accuracies = []
    cnn_test_accuracies = []

    for _ in tqdm(range(n_runs)):
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
        scaler = StandardScaler()
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)

        svm_train_accuracy, svm_test_accuracy = train_svm(X_train, y_train, X_test, y_test,
                                                          **model_params.get('svm', {}))
        svm_train_accuracies.append(svm_train_accuracy)
        svm_test_accuracies.append(svm_test_accuracy)

        bp_nn_train_accuracy, bp_nn_test_accuracy = train_bp_nn(X_train, y_train, X_test, y_test,
                                                                **model_params.get('bp_nn', {}))
        bp_nn_train_accuracies.append(bp_nn_train_accuracy)
        bp_nn_test_accuracies.append(bp_nn_test_accuracy)

        pls_da_train_accuracy, pls_da_test_accuracy = train_pls_da(X_train, y_train, X_test, y_test,
                                                                   **model_params.get('pls_da', {}))
        pls_da_train_accuracies.append(pls_da_train_accuracy)
        pls_da_test_accuracies.append(pls_da_test_accuracy)

        cnn_train_accuracy, cnn_test_accuracy = train_cnn(X_train, y_train, X_test, y_test, X_train.shape[1],
                                                          len(np.unique(y)), **model_params.get('cnn', {}))
        cnn_train_accuracies.append(cnn_train_accuracy)
        cnn_test_accuracies.append(cnn_test_accuracy)

    avg_svm_train_accuracy = np.mean(svm_train_accuracies)
    avg_svm_test_accuracy = np.mean(svm_test_accuracies)
    avg_bp_nn_train_accuracy = np.mean(bp_nn_train_accuracies)
    avg_bp_nn_test_accuracy = np.mean(bp_nn_test_accuracies)
    avg_pls_da_train_accuracy = np.mean(pls_da_train_accuracies)
    avg_pls_da_test_accuracy = np.mean(pls_da_test_accuracies)
    avg_cnn_train_accuracy = np.mean(cnn_train_accuracies)
    avg_cnn_test_accuracy = np.mean(cnn_test_accuracies)

    return (avg_svm_train_accuracy, avg_svm_test_accuracy), (avg_bp_nn_train_accuracy, avg_bp_nn_test_accuracy), (
    avg_pls_da_train_accuracy, avg_pls_da_test_accuracy), (avg_cnn_train_accuracy, avg_cnn_test_accuracy)


def main(dataset_path):
    # 可手动调整的参数
    target_samples_per_class = 200
    gan_epochs = 500
    gan_batch_size = 6
    gan_z_dim = 100
    ctgan_epochs = 200
    ctgan_batch_size = 16
    ctgan_pac = 8
    n_runs = 1

    # 根据数据集路径生成输出文件名
    dataset_name = dataset_path.split('/')[-1].split('.')[0]  # 提取数据集名称（如 gancao）
    output_file = f'./results/{dataset_name}_results.txt'  # 结果保存路径

    # 创建结果目录
    os.makedirs('./results', exist_ok=True)

    # 打开文件用于保存结果
    with open(output_file, 'w') as f:
        # 数据重采样
        resample_data(dataset_path, target_samples_per_class, f'./data/{dataset_name}_B.csv')
        f.write("Data Resampling Completed.\n")
        evaluate_data_quality(dataset_path, f'./data/{dataset_name}_B.csv', file=f)

        # GAN数据增强
        train_gan(dataset_path, f'./data/{dataset_name}_G.csv', gan_epochs, gan_batch_size, gan_z_dim)
        f.write("\nGAN Data Augmentation Completed.\n")
        evaluate_data_quality(dataset_path, f'./data/{dataset_name}_G.csv', file=f)

        # CTGAN数据增强
        train_ctgan(dataset_path, f'./data/{dataset_name}_C.csv', ctgan_epochs, ctgan_batch_size, ctgan_pac)
        f.write("\nCTGAN Data Augmentation Completed.\n")
        evaluate_data_quality(dataset_path, f'./data/{dataset_name}_C.csv', file=f)

        # 模型训练与评估
        model_params = {
            'svm': {'kernel': 'rbf', 'C': 2.0},
            'bp_nn': {'hidden_layer_sizes': (32, 64), 'max_iter': 500},
            'pls_da': {'n_components': 10},
            'cnn': {'epochs': 100, 'learning_rate': 0.001}
        }

        X_Y, y_Y = load_data(dataset_path)
        X_B, y_B = load_data(f'./data/{dataset_name}_B.csv')
        X_G, y_G = load_data(f'./data/{dataset_name}_G.csv')
        X_C, y_C = load_data(f'./data/{dataset_name}_C.csv')

        (X_YB, y_YB), (X_YG, y_YG), (X_YC, y_YC), (X_YBG, y_YBG), (X_YBC, y_YBC) = create_combined_data(X_Y, X_B, X_G,
                                                                                                        X_C, y_Y, y_B,
                                                                                                        y_G, y_C)

        # 训练并评估模型
        results = {}
        for dataset_name, (X, y) in zip(['Y', 'Y-B', 'Y-G', 'Y-C', 'Y-B-G', 'Y-B-C'],
                                        [(X_Y, y_Y), (X_YB, y_YB), (X_YG, y_YG), (X_YC, y_YC), (X_YBG, y_YBG),
                                         (X_YBC, y_YBC)]):
            (svm_train, svm_test), (bp_nn_train, bp_nn_test), (pls_da_train, pls_da_test), (
            cnn_train, cnn_test) = train_and_evaluate(X, y, model_params, n_runs=n_runs)
            results[dataset_name] = {
                'SVM Train Accuracy': svm_train,
                'SVM Test Accuracy': svm_test,
                'PLS-DA Train Accuracy': pls_da_train,
                'PLS-DA Test Accuracy': pls_da_test,
                'BP Neural Network Train Accuracy': bp_nn_train,
                'BP Neural Network Test Accuracy': bp_nn_test,
                'CNN Train Accuracy': cnn_train,
                'CNN Test Accuracy': cnn_test
            }

        # 将结果保存到文件
        f.write("\nModel Evaluation Results:\n")
        for dataset_name, result in results.items():
            f.write(f"\nDataset: {dataset_name}\n")
            for metric, value in result.items():
                f.write(f"{metric}: {value:.4f}\n")

        # 打印结果到控制台
        print(f"Results saved to {output_file}")


if __name__ == "__main__":
    # 直接在代码中指定数据集路径
    dataset_name = 'tablet.csv'  # 可以改为 'grapewine.csv' 或 'tablet.csv'
    dataset_path = f'./data/{dataset_name}'  # 数据集路径指向 ./data/ 目录
    main(dataset_path)