import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.cross_decomposition import PLSRegression
from tqdm import tqdm
import warnings
import os

warnings.filterwarnings("ignore")

# 加载数据
def load_data(file_path):
    df = pd.read_csv(file_path)
    labels = df.iloc[:, 0].values
    features = df.iloc[:, 1:].values
    return features, labels

# 创建组合数据
def create_combined_data(X_Y, X_B, X_G, X_C, y_Y, y_B, y_G, y_C):
    X_YB = np.vstack((X_Y, X_B))
    y_YB = np.hstack((y_Y, y_B))
    X_YG = np.vstack((X_Y, X_G))
    y_YG = np.hstack((y_Y, y_G))
    X_YC = np.vstack((X_Y, X_C))
    y_YC = np.hstack((y_Y, y_C))
    X_YBG = np.vstack((X_Y, X_B, X_G))
    y_YBG = np.hstack((y_Y, y_B, y_G))
    X_YBC = np.vstack((X_Y, X_B, X_C))
    y_YBC = np.hstack((y_Y, y_B, y_C))
    return (X_YB, y_YB), (X_YG, y_YG), (X_YC, y_YC), (X_YBG, y_YBG), (X_YBC, y_YBC)

# KS划分
def ks_split(X, y, test_size=0.2):
    n_samples = X.shape[0]
    n_test = int(n_samples * test_size)
    distances = np.zeros((n_samples, n_samples))
    
    # 计算样本之间的距离
    for i in range(n_samples):
        for j in range(i + 1, n_samples):
            distances[i, j] = np.linalg.norm(X[i] - X[j])
            distances[j, i] = distances[i, j]
    
    # 选择第一个样本
    selected = [np.argmax(np.sum(distances, axis=1))]
    
    # 选择剩余的测试样本
    for _ in range(1, n_test):
        remaining = list(set(range(n_samples)) - set(selected))
        max_min_distance = -1
        next_sample = -1
        for i in remaining:
            min_distance = np.min(distances[i, selected])
            if min_distance > max_min_distance:
                max_min_distance = min_distance
                next_sample = i
        selected.append(next_sample)
    
    # 构建训练集和测试集
    test_indices = selected
    train_indices = list(set(range(n_samples)) - set(test_indices))
    
    X_train, X_test = X[train_indices], X[test_indices]
    y_train, y_test = y[train_indices], y[test_indices]
    
    return X_train, X_test, y_train, y_test

# 训练SVM模型
def train_svm(X_train, y_train, X_test, y_test, kernel="linear", C=1.0):
    svm_model = SVC(kernel=kernel, C=C)
    svm_model.fit(X_train, y_train)
    y_train_pred = svm_model.predict(X_train)
    y_test_pred = svm_model.predict(X_test)
    train_accuracy = accuracy_score(y_train, y_train_pred)
    test_accuracy = accuracy_score(y_test, y_test_pred)
    return train_accuracy, test_accuracy

# 训练BP神经网络
def train_bp_nn(X_train, y_train, X_test, y_test, hidden_layer_sizes=(100,), max_iter=500):
    bp_nn_model = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, max_iter=max_iter)
    bp_nn_model.fit(X_train, y_train)
    y_train_pred = bp_nn_model.predict(X_train)
    y_test_pred = bp_nn_model.predict(X_test)
    train_accuracy = accuracy_score(y_train, y_train_pred)
    test_accuracy = accuracy_score(y_test, y_test_pred)
    return train_accuracy, test_accuracy

# 训练PLS-DA模型
def train_pls_da(X_train, y_train, X_test, y_test, n_components=10):
    pls_da_model = PLSRegression(n_components=n_components)
    pls_da_model.fit(X_train, y_train)
    y_train_pred = pls_da_model.predict(X_train)
    y_test_pred = pls_da_model.predict(X_test)
    y_train_pred = np.round(y_train_pred)
    y_test_pred = np.round(y_test_pred)
    train_accuracy = accuracy_score(y_train, y_train_pred)
    test_accuracy = accuracy_score(y_test, y_test_pred)
    return train_accuracy, test_accuracy

# 定义CNN模型
class CNN(nn.Module):
    def __init__(self, input_dim, num_classes, conv_channels=32, kernel_size=3, hidden_units=128):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv1d(in_channels=1, out_channels=conv_channels, kernel_size=kernel_size, padding=1)
        self.pool = nn.MaxPool1d(kernel_size=2, stride=2)
        self.fc1 = nn.Linear(conv_channels * (input_dim // 2), hidden_units)
        self.fc2 = nn.Linear(hidden_units, num_classes)

    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = x.view(-1, self.conv1.out_channels * (x.size(2)))
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 训练CNN模型
def train_cnn(X_train, y_train, X_test, y_test, input_dim, num_classes, epochs=10, learning_rate=0.001):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = CNN(input_dim=input_dim, num_classes=num_classes).to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    X_train_tensor = torch.tensor(X_train, dtype=torch.float32).unsqueeze(1).to(device)
    y_train_tensor = torch.tensor(y_train, dtype=torch.long).to(device)
    X_test_tensor = torch.tensor(X_test, dtype=torch.float32).unsqueeze(1).to(device)
    y_test_tensor = torch.tensor(y_test, dtype=torch.long).to(device)

    model.train()
    for epoch in range(epochs):
        optimizer.zero_grad()
        output = model(X_train_tensor)
        loss = criterion(output, y_train_tensor)
        loss.backward()
        optimizer.step()

    model.eval()
    with torch.no_grad():
        output_train = model(X_train_tensor)
        _, predicted_train = torch.max(output_train, 1)
        train_accuracy = (predicted_train == y_train_tensor).sum().item() / y_train_tensor.size(0)
        output_test = model(X_test_tensor)
        _, predicted_test = torch.max(output_test, 1)
        test_accuracy = (predicted_test == y_test_tensor).sum().item() / y_test_tensor.size(0)
    return train_accuracy, test_accuracy

# 训练并评估模型
def train_and_evaluate(X, y, model_params, n_runs=5):
    svm_train_accuracies = []
    svm_test_accuracies = []
    bp_nn_train_accuracies = []
    bp_nn_test_accuracies = []
    pls_da_train_accuracies = []
    pls_da_test_accuracies = []
    cnn_train_accuracies = []
    cnn_test_accuracies = []

    for _ in tqdm(range(n_runs)):
        X_train, X_test, y_train, y_test = ks_split(X, y, test_size=0.2)
        scaler = StandardScaler()
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)

        svm_train_accuracy, svm_test_accuracy = train_svm(X_train, y_train, X_test, y_test, **model_params.get("svm", {}))
        svm_train_accuracies.append(svm_train_accuracy)
        svm_test_accuracies.append(svm_test_accuracy)

        bp_nn_train_accuracy, bp_nn_test_accuracy = train_bp_nn(X_train, y_train, X_test, y_test, **model_params.get("bp_nn", {}))
        bp_nn_train_accuracies.append(bp_nn_train_accuracy)
        bp_nn_test_accuracies.append(bp_nn_test_accuracy)

        pls_da_train_accuracy, pls_da_test_accuracy = train_pls_da(X_train, y_train, X_test, y_test, **model_params.get("pls_da", {}))
        pls_da_train_accuracies.append(pls_da_train_accuracy)
        pls_da_test_accuracies.append(pls_da_test_accuracy)

        cnn_train_accuracy, cnn_test_accuracy = train_cnn(X_train, y_train, X_test, y_test, X_train.shape[1], len(np.unique(y)), **model_params.get("cnn", {}))
        cnn_train_accuracies.append(cnn_train_accuracy)
        cnn_test_accuracies.append(cnn_test_accuracy)

    avg_svm_train_accuracy = np.mean(svm_train_accuracies)
    avg_svm_test_accuracy = np.mean(svm_test_accuracies)
    avg_bp_nn_train_accuracy = np.mean(bp_nn_train_accuracies)
    avg_bp_nn_test_accuracy = np.mean(bp_nn_test_accuracies)
    avg_pls_da_train_accuracy = np.mean(pls_da_train_accuracies)
    avg_pls_da_test_accuracy = np.mean(pls_da_test_accuracies)
    avg_cnn_train_accuracy = np.mean(cnn_train_accuracies)
    avg_cnn_test_accuracy = np.mean(cnn_test_accuracies)

    return (
        (avg_svm_train_accuracy, avg_svm_test_accuracy),
        (avg_bp_nn_train_accuracy, avg_bp_nn_test_accuracy),
        (avg_pls_da_train_accuracy, avg_pls_da_test_accuracy),
        (avg_cnn_train_accuracy, avg_cnn_test_accuracy),
    )

def main(dataset_path, n_runs=5):
    # 模型参数
    model_params = {
        "svm": {"kernel": "rbf", "C": 2.0},
        "bp_nn": {"hidden_layer_sizes": (32, 64), "max_iter": 500},
        "pls_da": {"n_components": 10},
        "cnn": {"epochs": 100, "learning_rate": 0.001},
    }

    # 加载数据
    dataset_name = dataset_path.split("/")[-1].split(".")[0]  # 提取数据集名称（如 gancao）
    output_file = f"./results/{dataset_name}_results.txt"  # 结果保存路径
    X_Y, y_Y = load_data(dataset_path)
    X_B, y_B = load_data(f"./data/{dataset_name}_B.csv")
    X_G, y_G = load_data(f"./data/{dataset_name}_G.csv")
    X_C, y_C = load_data(f"./data/{dataset_name}_C.csv")

    # 创建组合数据
    (X_YB, y_YB), (X_YG, y_YG), (X_YC, y_YC), (X_YBG, y_YBG), (X_YBC, y_YBC) = create_combined_data(X_Y, X_B, X_G, X_C, y_Y, y_B, y_G, y_C)

    # 训练并评估模型
    results = {}
    for dataset_name, (X, y) in zip(
        ["Y", "Y-B", "Y-G", "Y-C", "Y-B-G", "Y-B-C"],
        [(X_Y, y_Y), (X_YB, y_YB), (X_YG, y_YG), (X_YC, y_YC), (X_YBG, y_YBG), (X_YBC, y_YBC)],
    ):
        (
            (svm_train, svm_test),
            (bp_nn_train, bp_nn_test),
            (pls_da_train, pls_da_test),
            (cnn_train, cnn_test),
        ) = train_and_evaluate(X, y, model_params, n_runs=n_runs)
        results[dataset_name] = {
            "SVM Train Accuracy": svm_train,
            "SVM Test Accuracy": svm_test,
            "PLS-DA Train Accuracy": pls_da_train,
            "PLS-DA Test Accuracy": pls_da_test,
            "BP Neural Network Train Accuracy": bp_nn_train,
            "BP Neural Network Test Accuracy": bp_nn_test,
            "CNN Train Accuracy": cnn_train,
            "CNN Test Accuracy": cnn_test,
        }

    # 将结果保存到文件
    os.makedirs("./results", exist_ok=True)
    with open(output_file, "w") as f:
        f.write("\nModel Evaluation Results:\n")
        for dataset_name, result in results.items():
            f.write(f"\nDataset: {dataset_name}\n")
            for metric, value in result.items():
                f.write(f"{metric}: {value:.4f}\n")

    # 打印结果到控制台
    print(f"Results saved to {output_file}")

if __name__ == "__main__":
    # 直接在代码中指定数据集路径和 n_runs 参数
    dataset_name = "gancao.csv"  # 可以改为 'grapewine.csv' 或 'tablet.csv'
    dataset_path = f"./data/{dataset_name}"  # 数据集路径指向 ./data/ 目录
    n_runs = 1  # 设置 n_runs 参数
    main(dataset_path, n_runs)