import torch
import torch.nn as nn
import torch.optim as optim
import jieba
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import metrics

# 1. 数据准备（中文文本）
texts = [
    "今天的足球比赛非常激烈，球队表现出色，最终赢得了比赛。",
    "NBA比赛今天开打，球员们的表现非常精彩，球迷们热情高涨。",
    "张艺谋的新电影上映了，票房成绩非常好，观众反响热烈。",
    "娱乐圈最近又出了一些新闻，明星们的私生活成了大家讨论的焦点。",
    "昨晚的篮球赛真是太精彩了，球员们的进攻和防守都非常强硬。",
    "李宇春在最新的音乐会上演出了她的新歌，现场观众反应热烈。",
    "今年的世界杯比赛激烈异常，球队之间的竞争越来越激烈。",
    "最近的综艺节目非常火，明星嘉宾的表现让观众们大笑不已。"
]

# 标签：0表示体育，1表示娱乐
labels = [0, 0, 1, 1, 0, 1, 0, 1]


# 2. 数据预处理：中文分词和 TF-IDF 特征提取
def jieba_cut(text):
    return " ".join(jieba.cut(text))


texts_cut = [jieba_cut(text) for text in texts]

vectorizer = TfidfVectorizer(max_features=10000)
X_tfidf = vectorizer.fit_transform(texts_cut).toarray()
y = np.array(labels)

# 3. 数据集分割为训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X_tfidf, y, test_size=0.2, random_state=42)


# 4. PyTorch 数据加载
class NewsGroupDataset(torch.utils.data.Dataset):
    def __init__(self, features, labels):
        self.features = torch.tensor(features, dtype=torch.float32)
        self.labels = torch.tensor(labels, dtype=torch.long)

    def __len__(self):
        return len(self.features)

    def __getitem__(self, idx):
        return self.features[idx], self.labels[idx]


train_dataset = NewsGroupDataset(X_train, y_train)
test_dataset = NewsGroupDataset(X_test, y_test)

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=2, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=2, shuffle=False)


# 5. 定义 SVM 模型（使用线性层）
class SVM(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(SVM, self).__init__()
        self.fc = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        return self.fc(x)


# 6. 获取特征数并初始化模型
input_dim = X_tfidf.shape[1]  # 自动获取特征数
model = SVM(input_dim=input_dim, output_dim=2)  # 使用特征数量设置输入维度
criterion = nn.CrossEntropyLoss()  # 损失函数
optimizer = optim.SGD(model.parameters(), lr=0.01)  # 优化器，调整学习率

# 7. 训练模型
num_epochs = 50  # 增加训练周期

for epoch in range(num_epochs):
    model.train()
    running_loss = 0.0
    correct = 0
    total = 0

    for inputs, labels in train_loader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    print(
        f"Epoch [{epoch + 1}/{num_epochs}], Loss: {running_loss / len(train_loader)}, Accuracy: {100 * correct / total}%")

# 8. 测试模型
model.eval()
correct = 0
total = 0

with torch.no_grad():
    for inputs, labels in test_loader:
        outputs = model(inputs)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

print(f"Test Accuracy: {100 * correct / total}%")

# 9. 输出性能指标
y_pred = []
y_true = []

with torch.no_grad():
    for inputs, labels in test_loader:
        outputs = model(inputs)
        _, predicted = torch.max(outputs.data, 1)
        y_pred.extend(predicted.numpy())
        y_true.extend(labels.numpy())

print(metrics.classification_report(y_true, y_pred))


# 10. 测试新样本
def predict(text, model, vectorizer):
    # 1. 进行分词
    text_cut = jieba_cut(text)

    # 2. 将文本转为 TF-IDF 特征向量
    text_tfidf = vectorizer.transform([text_cut]).toarray()

    # 3. 转换为 PyTorch 张量
    text_tensor = torch.tensor(text_tfidf, dtype=torch.float32)

    # 4. 模型预测
    model.eval()  # 设置模型为评估模式
    with torch.no_grad():
        output = model(text_tensor)
        _, predicted = torch.max(output.data, 1)

    # 5. 返回预测结果
    return predicted.item()


# 测试一个新的中文文本
new_text = "今天的篮球比赛真是太精彩了，球员们的表现让大家都为之喝彩。"
predicted_label = predict(new_text, model, vectorizer)

# 输出预测结果
if predicted_label == 0:
    print("预测类别: 体育")
else:
    print("预测类别: 娱乐")
