import json
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import MultiLabelBinarizer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from tqdm import tqdm
import nltk
import numpy as np

nltk.download("wordnet")
nltk.download("stopwords")

# 文件路径
label_path = r"D:\研一\课程\自然语言处理\data\data\label_list.txt"
train_path = r"D:\研一\课程\自然语言处理\data\data\train.json"
valid_path = r"D:\研一\课程\自然语言处理\data\data\valid.json"
test_path = r"D:\研一\课程\自然语言处理\data\data\test.txt"
output_path = r"D:/桌面文件/result/distilbert.txt"

# 加载标签列表
with open(label_path, "r", encoding="utf-8") as f:
    labels = [line.strip() for line in f.readlines()]
label_to_idx = {label: idx for idx, label in enumerate(labels)}
idx_to_label = {idx: label for label, idx in label_to_idx.items()}

# 加载数据
def load_json_data(file_path):
    with open(file_path, "r", encoding="utf-8") as f:
        return [json.loads(line.strip()) for line in f]
train_data = load_json_data(train_path)
valid_data = load_json_data(valid_path)
with open(test_path, "r", encoding="utf-8") as f:
    test_data = [line.strip() for line in f.readlines()]

# 停用词和词形还原处理
stop_words = set(stopwords.words("english"))
lemmatizer = WordNetLemmatizer()

def preprocess_text(text):
    tokens = text.split()
    tokens = [
        lemmatizer.lemmatize(word.lower()) for word in tokens if word.lower() not in stop_words
    ]
    return " ".join(tokens)

# 数据预处理
train_texts = [preprocess_text(item["text"]) for item in train_data]
valid_texts = [preprocess_text(item["text"]) for item in valid_data]
test_texts = [preprocess_text(text) for text in test_data]

# TF-IDF 词典构建
vectorizer = TfidfVectorizer(max_features=10000)  # 最大特征数为 10,000
train_tfidf = vectorizer.fit_transform(train_texts).toarray()
valid_tfidf = vectorizer.transform(valid_texts).toarray()
test_tfidf = vectorizer.transform(test_texts).toarray()

# 标签多标签二值化
mlb = MultiLabelBinarizer(classes=labels)
train_labels = mlb.fit_transform([item["label"] for item in train_data])
valid_labels = mlb.transform([item["label"] for item in valid_data])

# 数据集定义
class TfidfDataset(Dataset):
    def __init__(self, features, labels=None):
        self.features = features
        self.labels = labels

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        if self.labels is not None:
            return torch.tensor(self.features[idx], dtype=torch.float32), torch.tensor(
                self.labels[idx], dtype=torch.float32
            )
        return torch.tensor(self.features[idx], dtype=torch.float32)

train_dataset = TfidfDataset(train_tfidf, train_labels)
valid_dataset = TfidfDataset(valid_tfidf, valid_labels)
test_dataset = TfidfDataset(test_tfidf)

train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=32)
test_loader = DataLoader(test_dataset, batch_size=32)

# 模型定义
class TFIDFDistilBertClassifier(torch.nn.Module):
    def __init__(self, input_dim, num_labels):
        super().__init__()
        self.fc1 = torch.nn.Linear(input_dim, 512)
        self.fc2 = torch.nn.Linear(512, 256)
        self.fc3 = torch.nn.Linear(256, num_labels)
        self.relu = torch.nn.ReLU()
        self.dropout = torch.nn.Dropout(0.3)
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, x):
        x = self.relu(self.fc1(x))
        x = self.dropout(x)
        x = self.relu(self.fc2(x))
        x = self.dropout(x)
        x = self.sigmoid(self.fc3(x))
        return x

device = "cuda" if torch.cuda.is_available() else "cpu"
model = TFIDFDistilBertClassifier(input_dim=10000, num_labels=len(labels)).to(device)

# 优化器和损失函数
optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)
criterion = torch.nn.BCELoss()

# 训练函数
def train_model(model, train_loader, optimizer, criterion, device):
    model.train()
    total_loss = 0
    progress_bar = tqdm(train_loader, desc="Training", leave=False)
    for features, labels in progress_bar:
        features, labels = features.to(device), labels.to(device)
        optimizer.zero_grad()
        outputs = model(features)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
        progress_bar.set_postfix(loss=loss.item())
    return total_loss / len(train_loader)

# 验证函数
def validate_model(model, valid_loader, criterion, device):
    model.eval()
    total_loss = 0
    all_preds, all_labels = [], []
    progress_bar = tqdm(valid_loader, desc="Validation", leave=False)
    with torch.no_grad():
        for features, labels in progress_bar:
            features, labels = features.to(device), labels.to(device)
            outputs = model(features)
            loss = criterion(outputs, labels)
            total_loss += loss.item()
            all_preds.extend((outputs > 0.5).cpu().numpy())
            all_labels.extend(labels.cpu().numpy())
            progress_bar.set_postfix(loss=loss.item())
    accuracy = accuracy_score(all_labels, all_preds)
    return total_loss / len(valid_loader), accuracy

# 测试并保存结果
def predict_and_save_results(model, test_loader, output_path, idx_to_label):
    model.eval()
    predictions = []
    with torch.no_grad():
        for features in test_loader:
            features = features.to(device)
            outputs = model(features)
            predicted_labels = [
                idx_to_label[idx] for idx, prob in enumerate(outputs[0]) if prob > 0.5
            ]
            predictions.append(",".join(predicted_labels))
    with open(output_path, "w", encoding="utf-8") as f:
        for line in predictions:
            f.write(line + "\n")

# 训练和验证
num_epochs = 3
for epoch in range(num_epochs):
    print(f"Epoch {epoch + 1}/{num_epochs}")
    train_loss = train_model(model, train_loader, optimizer, criterion, device)
    valid_loss, valid_accuracy = validate_model(model, valid_loader, criterion, device)
    print(f"Train Loss: {train_loss:.4f}, Valid Loss: {valid_loss:.4f}, Accuracy: {valid_accuracy:.4f}")

# 保存测试结果
predict_and_save_results(model, test_loader, output_path, idx_to_label)
print(f"预测结果已保存至 {output_path}")
