#本项目目的是利用人工智能技术进行新闻类别判别

"""
第一步：准备数据
"""
#1.1 读取语料。采用的数据是带有新闻类别的新闻数据
X = []
y = []
with open(file="./class/news.txt", mode="r", encoding="utf8") as f:
    for line in f:
        label, x = line.strip().split("\t")
        X.append(x)
        y.append(label)
 
#1.2 切分数据集
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)

"""
    1.3 构建字典
        - word: 按字构建特征
        - phrase：按词构建特征
"""
import jieba
words = {"<UNK>", "<PAD>"}
for sentence in X_train:
    words = words.union(set(jieba.lcut(sentence)))
word2idx = {word:idx for idx, word in enumerate(words)}
idx2word = {idx: word for word, idx in word2idx.items()}

"""
    1.4 处理特征：
        文本截取固定的长度
        文本变索引号
"""
seq_len = 100
# 1.4.1 先处理训练集
X_train1 = []
for sentence in X_train:
    sentence = jieba.lcut(sentence)
    sentence = (sentence + seq_len * ["<PAD>"])[:seq_len]
    sentence = [word2idx.get(word, word2idx.get("<UNK>")) for word in sentence]
    X_train1.append(sentence)

# 1.4.2 处理测试集
X_test1 = []
for sentence in X_test:
    sentence = jieba.lcut(sentence)
    sentence = (sentence + seq_len * ["<PAD>"])[:seq_len]
    sentence = [word2idx.get(word, word2idx.get("<UNK>")) for word in sentence]
    X_test1.append(sentence)

# 1.5 处理标签
labels = set(y_train)
label2idx = {label: idx for idx, label in enumerate(labels)}
idx2label = {idx: label for label, idx in label2idx.items()}
y_train1 = [label2idx[label] for label in y_train]
y_test1 = [label2idx[label] for label in y_test]

#1.6 打包数据为 dataloader
import torch
from torch import nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader

# 1.6.1 自定义Dataset
class NewsDataset(Dataset):

    def __init__(self, X, y):
        self.X = X
        self.y = y

    def __len__(self):
        return len(self.X)

    def __getitem__(self, idx):
        x = torch.tensor(data=self.X[idx], dtype=torch.long)
        y = torch.tensor(data=self.y[idx], dtype=torch.long)
        return x, y


# 1.6.2 打包 训练集
train_dataset = NewsDataset(X=X_train1, y=y_train1)
train_dataloader = DataLoader(dataset=train_dataset, shuffle=True, batch_size=16)

# 1.6.3 打包 测试集
test_dataset = NewsDataset(X=X_test1, y=y_test1)
test_dataloader = DataLoader(dataset=test_dataset, shuffle=False, batch_size=16)


"""
第二步：准备模型
"""
dict_len = len(word2idx)
embedding_dim = 256

#2.1 定义模型
class Model(nn.Module):
    def __init__(self,
                 num_embeddings=dict_len,
                 embedding_dim=embedding_dim):
        super(Model, self).__init__()
        self.embed = nn.Embedding(num_embeddings=num_embeddings,
                                  embedding_dim=embedding_dim,
                                 padding_idx=word2idx.get("<PAD>"))
        # self.rnn = nn.RNN(input_size=embedding_dim,
        #                    hidden_size=512)
        # self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=512)
        self.gru = nn.GRU(input_size=embedding_dim, hidden_size=512, num_layers=2, bidirectional=False)
        self.linear = nn.Linear(in_features=512,
                               out_features=10)
    def forward(self, x):
        x = self.embed(x)
        x = torch.permute(input=x, dims=(1, 0 ,2))
        # x = x.permute(1,0,2)
        # out, hn = self.rnn(x)
        out, hn = self.gru(x)
        # 只使用最后一步的信息
        # x = hn.sum(dim=0)
        x = torch.sum(input=out, dim=0)
        # x = hn[-1,:,:]
        x = self.linear(x)
        return x
model = Model()


"""
第三步 训练模型
"""
# 3.1 定义优化器
optimizer = torch.optim.SGD(params=model.parameters(), lr=1e-3)
# 3.2 定义损失函数
loss_fn = nn.CrossEntropyLoss()
# 训练轮次
epochs = 30
# GPU
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model.to(device=device)

#3.3 过程监控 （即： 评估模型）
def get_acc(data_loader):
    model.eval()
    accs = []
    with torch.no_grad():
        for X, y in data_loader:
            X = X.to(device=device)
            y = y.to(device=device)
            y_pred = model(X)
            y_pred = y_pred.argmax(dim=1)
            acc = (y_pred == y).to(dtype=torch.float32).mean().item()
            accs.append(acc)
        return round(number=torch.tensor(data=accs, dtype=torch.float32).mean().item(),
                    ndigits=4)

#3.4 定义训练过程
def train():
    for epoch in range(epochs):
        model.train()
        for X, y in train_dataloader:
            X = X.to(device=device)
            y = y.to(device=device)
            # 正向传播
            y_pred = model(X)
            # 计算损失
            loss = loss_fn(y_pred, y)
            # 反向传播
            loss.backward()
            # 优化参数
            optimizer.step()
            # 清空梯度
            optimizer.zero_grad()

        train_acc = get_acc(data_loader=train_dataloader)
        test_acc = get_acc(data_loader=test_dataloader)
        print(f"Epoch: {epoch + 1}, Train_Acc: {train_acc}, Test_Acc: {test_acc}")
train()
