import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from collections import Counter
import re
import os
import torch.nn.functional as F  # 添加这一行

# 参数配置
MAX_LEN = 500
BATCH_SIZE = 32
EMBEDDING_DIM = 50
NUM_EPOCHS = 10
LR = 0.001
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 数据处理函数
def read_data(file_path):
    texts, labels = [], []
    with open(file_path, "r", encoding="utf-8") as f:
        for line in f:
            try:
                label, content = line.strip().split("\t", 1)
                texts.append(content.strip())
                labels.append(int(label))
            except:
                continue
    return texts, labels

def tokenize(text):
    return re.findall(r"\b\w+\b", text.lower())

def build_vocab(texts, max_vocab_size=20000):
    counter = Counter()
    for text in texts:
        counter.update(tokenize(text))
    most_common = counter.most_common(max_vocab_size - 2)
    word2idx = {"<PAD>": 0, "<UNK>": 1}
    for idx, (word, _) in enumerate(most_common, start=2):
        word2idx[word] = idx
    return word2idx

def text_to_sequence(text, word2idx):
    tokens = tokenize(text)
    return [word2idx.get(t, word2idx["<UNK>"]) for t in tokens][:MAX_LEN]

class TextDataset(Dataset):
    def __init__(self, texts, labels, word2idx):
        self.sequences = [text_to_sequence(t, word2idx) for t in texts]
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        seq = self.sequences[idx]
        padded = seq + [0] * (MAX_LEN - len(seq))
        return torch.tensor(padded), torch.tensor(self.labels[idx])

# CNN 模型定义
class CNNTextClassifier(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_classes=2, kernel_sizes=[3, 4, 5], num_filters=100):
        super(CNNTextClassifier, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
        self.convs = nn.ModuleList([
            nn.Conv2d(1, num_filters, (k, embed_dim)) for k in kernel_sizes
        ])
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(num_filters * len(kernel_sizes), num_classes)

    def forward(self, x):
        x = self.embedding(x)  # [batch, seq, embed]
        x = x.unsqueeze(1)     # [batch, 1, seq, embed]
        convs = [F.relu(conv(x)).squeeze(3) for conv in self.convs]  # [(batch, num_filter, ~seq), ...]
        pools = [F.max_pool1d(c, c.size(2)).squeeze(2) for c in convs]
        out = torch.cat(pools, dim=1)
        out = self.dropout(out)
        return self.fc(out)

# 训练函数
def train(model, dataloader, optimizer, criterion):
    model.train()
    total_loss, correct, total = 0, 0, 0
    for inputs, labels in dataloader:
        inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        total_loss += loss.item()
        pred = outputs.argmax(dim=1)
        correct += (pred == labels).sum().item()
        total += labels.size(0)
    return total_loss / len(dataloader), correct / total

# 评估函数
def evaluate(model, dataloader):
    model.eval()
    correct, total = 0, 0
    with torch.no_grad():
        for inputs, labels in dataloader:
            inputs, labels = inputs.to(DEVICE), labels.to(DEVICE)
            outputs = model(inputs)
            pred = outputs.argmax(dim=1)
            correct += (pred == labels).sum().item()
            total += labels.size(0)
    return correct / total

# 预测函数
def predict(model, sentence, word2idx):
    model.eval()
    seq = text_to_sequence(sentence, word2idx)
    padded = seq + [0] * (MAX_LEN - len(seq))
    input_tensor = torch.tensor([padded]).to(DEVICE)
    with torch.no_grad():
        output = model(input_tensor)
        pred = torch.argmax(output, dim=1).item()
    return "正面" if pred == 1 else "负面"

def main():
    train_texts, train_labels = read_data("data/train.txt")
    test_texts, test_labels = read_data("data/test.txt")
    word2idx = build_vocab(train_texts + test_texts)

    train_dataset = TextDataset(train_texts, train_labels, word2idx)
    test_dataset = TextDataset(test_texts, test_labels, word2idx)
    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)

    model = CNNTextClassifier(vocab_size=len(word2idx), embed_dim=EMBEDDING_DIM).to(DEVICE)
    optimizer = optim.Adam(model.parameters(), lr=LR)
    criterion = nn.CrossEntropyLoss()

    for epoch in range(NUM_EPOCHS):
        loss, acc = train(model, train_loader, optimizer, criterion)
        val_acc = evaluate(model, test_loader)
        print(f"Epoch {epoch+1}/{NUM_EPOCHS} - Loss: {loss:.4f} - Train Acc: {acc:.4f} - Test Acc: {val_acc:.4f}")

    os.makedirs("models", exist_ok=True)
    torch.save(model.state_dict(), "models/cnn_model.pth")
    print("训练完成，模型已保存。")

    model.load_state_dict(torch.load("models/cnn_model.pth"))
    while True:
        text = input("请输入一段文本进行预测 (输入'quit'退出): ")
        if text.strip().lower() == "quit":
            break
        result = predict(model, text, word2idx)
        print("预测结果:", result)

if __name__ == "__main__":
    main()
