import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
import pandas as pd
from base import getEnvPath

# ========================================
# 1. 模拟数据生成（鸢尾花数据集格式）
# ========================================
# 生成4个特征、3个类别的模拟数据 (150个样本)
# X, y = make_classification(
#     n_samples=300,
#     n_features=4,
#     n_classes=3,
#     n_clusters_per_class=1,
#     random_state=42
# )
#
# # 分割数据集：训练集60%、验证集20%、测试集20%
# X_train, X_temp, y_train, y_temp = train_test_split(X, y, test_size=0.4, random_state=42)
# X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, test_size=0.5, random_state=42)

# 模拟数据保存到本地
# path = getEnvPath()
# X_train_csv = path + "data/out/X_train.csv"
# y_train_csv = path + "data/out/y_train.csv"
# X_val_csv = path + "data/out/X_val.csv"
# y_val_csv = path + "data/out/Y_val.csv"
# X_test_csv = path + "data/out/X_test.csv"
# y_test_csv = path + "data/out/Y_test.csv"
path = getEnvPath()

X_train_csv = path + "data/X_train.csv"
y_train_csv = path + "data/Y_train.csv"
X_val_csv = path + "data/X_val.csv"
y_val_csv = path + "data/Y_val.csv"
X_test_csv = path + "data/X_test.csv"
y_test_csv = path + "data/Y_test.csv"

# 保存模拟数据
# pd.DataFrame(X_train).to_csv(X_train_csv,index=None)
# pd.DataFrame(y_train).to_csv(y_train_csv, index=None)
# pd.DataFrame(X_val).to_csv(X_val_csv, index=None)
# pd.DataFrame(y_val).to_csv(y_val_csv, index=None)
# pd.DataFrame(X_test).to_csv(X_test_csv, index=None)
# pd.DataFrame(y_test).to_csv(y_test_csv, index=None)


X_train = pd.read_csv(X_train_csv, skiprows=1).values
y_train = pd.read_csv(y_train_csv, skiprows=1).values.round().astype("int32").flatten()
X_val = pd.read_csv(X_val_csv, skiprows=1).values
y_val = pd.read_csv(y_val_csv, skiprows=1).values.round().astype("int32").flatten()
X_test = pd.read_csv(X_test_csv, skiprows=1).values
y_test = pd.read_csv(y_test_csv, skiprows=1).values.round().astype("int32").flatten()

# 数据标准化（使用训练集参数）
# scaler = StandardScaler()
# X_train = scaler.fit_transform(X_train)
# X_val = scaler.transform(X_val)
# X_test = scaler.transform(X_test)

# 转换为PyTorch张量
X_train_tensor = torch.tensor(X_train, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train, dtype=torch.long)
X_val_tensor = torch.tensor(X_val, dtype=torch.float32)
y_val_tensor = torch.tensor(y_val, dtype=torch.long)
X_test_tensor = torch.tensor(X_test, dtype=torch.float32)
y_test_tensor = torch.tensor(y_test, dtype=torch.long)

# 创建DataLoader
train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
val_dataset = TensorDataset(X_val_tensor, y_val_tensor)
test_dataset = TensorDataset(X_test_tensor, y_test_tensor)

batch_size = 16
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=batch_size)
test_loader = DataLoader(test_dataset, batch_size=batch_size)


# ========================================
# 2. 定义MLP模型
# ========================================
class IrisClassifier(nn.Module):
    def __init__(self, input_size=78, hidden_size=128, num_classes=30):
        super(IrisClassifier, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Linear(input_size, hidden_size),
            nn.ReLU(),
            nn.Dropout(0.2)
        )
        self.layer2 = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        return x


# 初始化模型
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = IrisClassifier().to(device)
print(model)

# ========================================
# 3. 训练配置
# ========================================
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3)

# ========================================
# 4. 训练与验证
# ========================================
best_val_loss = float('inf')
num_epochs = 100

for epoch in range(num_epochs):
    # 训练阶段
    model.train()
    train_loss = 0.0
    for inputs, labels in train_loader:
        inputs, labels = inputs.to(device), labels.to(device)

        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        train_loss += loss.item() * inputs.size(0)

    # 验证阶段
    model.eval()
    val_loss = 0.0
    correct = 0
    with torch.no_grad():
        for inputs, labels in val_loader:
            inputs, labels = inputs.to(device), labels.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            val_loss += loss.item() * inputs.size(0)

            _, predicted = torch.max(outputs, 1)
            correct += (predicted == labels).sum().item()

    # 统计指标
    train_loss = train_loss / len(train_loader.dataset)
    val_loss = val_loss / len(val_loader.dataset)
    val_acc = 100 * correct / len(val_loader.dataset)

    # 学习率调度
    scheduler.step(val_loss)

    # 保存最佳模型
    if val_loss < best_val_loss:
        best_val_loss = val_loss
        torch.save(model.state_dict(), 'best_model.pth')

    # 打印进度
    if (epoch + 1) % 10 == 0:
        print(f'Epoch [{epoch + 1}/{num_epochs}]')
        print(f'  Train Loss: {train_loss:.4f}')
        print(f'  Val Loss: {val_loss:.4f} | Val Acc: {val_acc:.2f}%')

# ========================================
# 5. 模型测试与保存
# ========================================
# 加载最佳模型
model.load_state_dict(torch.load('best_model.pth', weights_only=True))

# 测试集评估
model.eval()
correct = 0
with torch.no_grad():
    for inputs, labels in test_loader:
        inputs, labels = inputs.to(device), labels.to(device)
        outputs = model(inputs)
        _, predicted = torch.max(outputs, 1)
        correct += (predicted == labels).sum().item()

test_acc = 100 * correct / len(test_loader.dataset)
print(f'\nFinal Test Accuracy: {test_acc:.2f}%')

# ========================================
# 6. 预测新数据
# ========================================
# def predict_new_data(model, sample):
#     model.eval()
#     with torch.no_grad():
#         sample_tensor = torch.tensor(sample, dtype=torch.float32).to(device)
#         output = model(sample_tensor)
#         probabilities = torch.nn.functional.softmax(output, dim=1)[0]
#         _, predicted_class = torch.max(output, 1)
#     return predicted_class.item(), probabilities.cpu().numpy()
#
#
# # 示例预测（新数据需要先标准化）
# new_sample = np.array([[5.1, 3.5, 1.4, 0.2]])  # 模拟鸢尾花数据
# new_sample_scaled = new_sample  # 使用相同的scaler
#
# pred_class, probs = predict_new_data(model, new_sample_scaled)
# print(f"\nPredicted Class: {pred_class}")
# print("Class Probabilities:")
# for i, prob in enumerate(probs):
#     print(f"  Class {i}: {prob * 100:.2f}%")
