import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from transformers import BertTokenizer, BertModel
import numpy as np

# 自定义数据集
class CustomDataset(Dataset):
    def __init__(self, features, labels):
        self.features = features
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]

# 定义 Transformer 模型
class TransformerModel(nn.Module):
    def __init__(self, num_classes):
        super(TransformerModel, self).__init__()
        self.bert = BertModel.from_pretrained('bert-base-uncased')
        self.fc = nn.Linear(self.bert.config.hidden_size, num_classes)

    def forward(self, x, attention_mask):
        outputs = self.bert(x, attention_mask=attention_mask)
        logits = self.fc(outputs.pooler_output)
        return logits

# 初始化分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

# 数据准备
np.random.seed(42)
data = np.random.rand(1000, 6) * 5  # 生成随机数据，范围在 [0, 5) 之间
data = np.round(data, 2)  # 保留两位小数
labels = np.random.randint(0, 3, size=(1000,))  # 生成随机标签

# 将数据转换为 BERT 输入格式
def encode_data(data):
    encoded = [tokenizer.encode(" ".join(map(str, row)), add_special_tokens=True) for row in data]
    max_length = max(len(enc) for enc in encoded)  # 找到最大长度
    padded_encoded = [enc + [tokenizer.pad_token_id] * (max_length - len(enc)) for enc in encoded]
    attention_masks = [[1] * len(enc) + [0] * (max_length - len(enc)) for enc in encoded]
    return padded_encoded, attention_masks

encoded_data, attention_masks = encode_data(data)

# 创建数据加载器
train_dataset = CustomDataset(torch.LongTensor(encoded_data[:800]), torch.LongTensor(labels[:800]))  # 训练集
test_dataset = CustomDataset(torch.LongTensor(encoded_data[800:]), torch.LongTensor(labels[800:]))  # 测试集
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)

# 模型训练
model = TransformerModel(num_classes=3)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=1e-5)

for epoch in range(10):  # 训练10个epoch
    model.train()
    for features, labels in train_loader:
        optimizer.zero_grad()
        attention_mask = torch.LongTensor(attention_masks[:len(features)])  # 将 attention_mask 转换为 LongTensor
        outputs = model(features, attention_mask)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
    print(f'Epoch {epoch + 1}, Loss: {loss.item()}')

# 保存模型
torch.save(model.state_dict(), 'transformer_model.pth')  # 保存模型参数

# 加载模型
model.load_state_dict(torch.load('transformer_model.pth'))  # 加载模型参数
model.eval()  # 设置为评估模式

# 模型预测并统计准确度
correct = 0
total = 0

with torch.no_grad():
    for features, labels in test_loader:
        attention_mask = torch.LongTensor(attention_masks[800:800 + len(features)])  # 将 attention_mask 转换为 LongTensor
        outputs = model(features, attention_mask)
        _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

accuracy = correct / total
print(f'Accuracy: {accuracy * 100:.2f}%')  # 输出准确度