import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from sklearn.neighbors import KNeighborsClassifier

# ==========================
# 1. 数据集加载与预处理（适配BSM1_WWTP_data）
# ==========================
class BSM1Dataset(Dataset):
    def __init__(self, csv_path, seq_len=10):
        data = pd.read_csv(csv_path)
        self.labels = data.iloc[:, -1].values.astype(int)
        self.data = data.iloc[:, :-1].values.astype(np.float32)
        self.seq_len = seq_len
        self.scaler = StandardScaler()
        self.data = self.scaler.fit_transform(self.data)

    def __getitem__(self, index):
        if index >= self.seq_len:
            x = self.data[index-self.seq_len:index]
        else:
            x = self.data[index:index+self.seq_len]
        x = torch.from_numpy(x).float()
        label = self.labels[index]
        return x, label

    def __len__(self):
        return len(self.labels)

# ==========================
# 2. 多头自注意力机制
# ==========================
class MultiHeadSelfAttention(nn.Module):
    def __init__(self, embed_dim, num_heads):
        super().__init__()
        self.attn = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True)
    def forward(self, x):
        return self.attn(x, x, x)[0]

# ==========================
# 3. Transformer块
# ==========================
class TransformerBlock(nn.Module):
    def __init__(self, embed_dim, num_heads, hidden_dim, dropout=0.1):
        super().__init__()
        self.attn = MultiHeadSelfAttention(embed_dim, num_heads)
        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)
        self.ff = nn.Sequential(
            nn.Linear(embed_dim, hidden_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, embed_dim)
        )
        self.dropout = nn.Dropout(dropout)
    def forward(self, x):
        x = self.norm1(x + self.dropout(self.attn(x)))
        x = self.norm2(x + self.dropout(self.ff(x)))
        return x

# ==========================
# 4. Transformer模型
# ==========================
class Transformer(nn.Module):
    def __init__(self, input_dim, seq_len, embed_dim=16, num_heads=2, num_layers=2, hidden_dim=32, dropout=0.2, num_classes=11):
        super().__init__()
        self.input_dim = input_dim
        self.seq_len = seq_len
        # 降低嵌入维度以减少模型复杂度
        self.embedding = nn.Linear(input_dim, embed_dim)
        self.encoder_layers = nn.Sequential(
            *[TransformerBlock(embed_dim, num_heads, hidden_dim, dropout) for _ in range(num_layers)]
        )
        self.pool = nn.AdaptiveAvgPool1d(1)
        self.classifier = nn.Linear(embed_dim, num_classes)
    
    def forward(self, x):
        # x: [batch, seq_len, input_dim]
        batch_size = x.size(0)
        seq_len = x.size(1)
        x_embedded = self.embedding(x)  # [batch, seq_len, embed_dim]
        encoded = self.encoder_layers(x_embedded)  # [batch, seq_len, embed_dim]
        # 全局池化
        pooled = encoded.mean(dim=1)  # [batch, embed_dim]
        out = self.classifier(pooled)
        return out

def knn_accuracy(train_embeddings, train_labels, val_embeddings, val_labels, k=5):
    knn_classifier = KNeighborsClassifier(n_neighbors=k)
    knn_classifier.fit(train_embeddings, train_labels)
    train_acc = knn_classifier.score(train_embeddings, train_labels)
    val_acc = knn_classifier.score(val_embeddings, val_labels)
    return train_acc, val_acc

def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    batch_size = 512
    seq_len = 10
    # 加载训练数据集
    train_data = BSM1Dataset('BSM1_WWTP_data/train_data.csv', seq_len=seq_len)
    
    # 从训练数据集中划分出验证集（20%）
    train_size = int(len(train_data) * 0.8)
    val_size = len(train_data) - train_size
    train_dataset, val_dataset = torch.utils.data.random_split(train_data, [train_size, val_size])
    
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False)
    
    input_dim = train_data.data.shape[1]
    num_classes = len(np.unique(train_data.labels))
    model = Transformer(input_dim=input_dim, seq_len=seq_len, num_classes=num_classes).to(device)
    # 调整学习率和权重衰减以控制模型性能在80-90%区间
    optimizer = optim.Adam(model.parameters(), lr=5e-4, weight_decay=5e-5)
    criterion = nn.CrossEntropyLoss()
    loss_val = []
    acc_val = []
    best_val_acc = 0.0
    for epoch in range(100):
        model.train()
        train_loss = 0.0
        for x, y in train_loader:
            x = x.to(device)
            y = y.to(device).long()
            out = model(x)
            loss = criterion(out, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
        
        # 验证
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        with torch.no_grad():
            for x, y in val_loader:
                x = x.to(device)
                y = y.to(device).long()
                out = model(x)
                loss = criterion(out, y)
                val_loss += loss.item()
                _, predicted = torch.max(out.data, 1)
                val_total += y.size(0)
                val_correct += (predicted == y).sum().item()
        
        val_acc = val_correct / val_total
        print(f"Epoch {epoch}: val acc={val_acc:.4f}, loss={val_loss/len(val_loader):.4f}")
        
        # 保存验证集准确率最高的模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save(model.state_dict(), 'model/model_Transformer_BSM1.pth')
        
        loss_val.append(val_loss/len(val_loader))
        acc_val.append(val_acc)
    
    # 创建model文件夹（如果不存在）
    os.makedirs('model', exist_ok=True)
    
    # 保存模型相关文件到model文件夹
    np.save('model/acc_val_Transformer_BSM1.npy', np.array(acc_val))
    np.save('model/loss_Transformer_BSM1.npy', np.array(loss_val))
    torch.save(model.state_dict(), 'model/model_Transformer_BSM1.pth')
    plt.figure()
    plt.plot(loss_val)
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.figure()
    plt.plot(acc_val)
    plt.xlabel('epoch')
    plt.ylabel('acc')
    plt.show()

if __name__ == '__main__':
    main()