academy_titles = [] 
job_titles = []
with open("academy_titles.txt", encoding="utf-8", mode="r") as f:
    for line in f:
        academy_titles.append(line.strip())

with open("job_titles.txt", encoding="utf-8", mode="r") as f:
    for line in f:
        job_titles.append(line.strip())


academy_titles[:5]


char_set = set() #定义set集合用于字符去重
for title in academy_titles:
    for ch in title:
        char_set.add(ch)

for title in job_titles:
    for ch in title:
        char_set.add(ch)

print(len(char_set))


import torch
char_list = list(char_set)
n_chars = len(char_list) + 1 #<UNK>


def title_to_tensor(title):
    tensor = torch.zeros(len(title), dtype=torch.long)
    for li, ch in enumerate(title):
        try:
            ind = char_list.index(ch)
        except ValueError:
            ind = n_chars - 1
        tensor[li] = ind
    return tensor


embedding = torch.nn.Embedding(n_chars, 100)


print(job_titles[1])


print(title_to_tensor(job_titles[1]))


#print(title_to_tensor(job_titles[1])[1].item())


import torch.nn as nn


#word_count 词表大小
#embedding_size 词嵌入维度
#hidden_size 隐藏层维度
#output_size 输出维度

class RNN(nn.Module): #定义模型继承自nn.Module
    def __init__(self, word_count, embedding_size, hidden_size, output_size): #词表容量， 词嵌入维度， 隐藏层大小， 输出大小
        super(RNN, self).__init__() #调用父类的构造函数
        self.hidden_size = hidden_size
        self.embedding = torch.nn.Embedding(num_embeddings = word_count, embedding_dim = embedding_size)
        self.i2h = nn.Linear(embedding_size + hidden_size, hidden_size)
        self.i2o = nn.Linear(embedding_size + hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1) #softmax层
        
    def forward(self, input_tensor, hidden):
        #print("input_tensor: ", input_tensor)
        word_vector = self.embedding(input_tensor)
        #print("word_vector: ", word_vector)
        combine = torch.cat([word_vector, hidden], 1)
        hidden = self.i2h(combine) #隐藏层输出
        output = self.i2o(combine) #输出

        output = self.softmax(output)
        return output, hidden
    
    def initHidden(self): #初始化隐藏层的数据
        return torch.zeros(1, self.hidden_size)


embedding_size = 200
n_hidden = 128
n_categorizes = 2
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
rnn = RNN(n_chars, embedding_size, n_hidden, n_categorizes)


academy_titles[0]


input_tensor = title_to_tensor(academy_titles[0])


print("input_tensor:\n", input_tensor)
print(input_tensor[0]) #.unsqueeze(dim=0))
print(input_tensor[0].unsqueeze(dim=0))
print(input_tensor.size())


hidden = rnn.initHidden()
output, hidden = rnn(input_tensor[0].unsqueeze(dim=0), hidden)  #RNN中每次输入一个字符

print("output:\n", output)
print("hidden:\n", hidden)
print("size of hidden:\n", hidden.size())


#合并数据添加标签

all_data = []
categories = ["考研考博", "招聘信息"]
for l in academy_titles:
    all_data.append((title_to_tensor(l), torch.tensor([0], dtype = torch.long)))  #考研考博 0
for l in job_titles:
    all_data.append((title_to_tensor(l), torch.tensor([1], dtype=torch.long)))  #招聘信息 1


#划分训练集和验证集

import random
random.shuffle(all_data)
data_len = len(all_data)
split_ratio = 0.7

train_data = all_data[:int(data_len * split_ratio)]
test_data = all_data[int(data_len * split_ratio):]

print("size of train_data: ", len(train_data))
print("size of test_data: ", len(test_data))


#对于一句话循环使用字符级别的RNN模型

def run_RNN(rnn, input_tensor):
    hidden = rnn.initHidden()
    for i in range(input_tensor.size()[0]):  #tensor.size() -> torch.Size([num]), 添加索引[0]获取数值
        #hidden = hidden.to(device)
        output, hidden = rnn(input_tensor[i].unsqueeze(dim=0), hidden)
    return output


#模型的训练

def train(rnn, criterion,input_tensor, category_tensor):
    rnn.zero_grad()
    output = run_RNN(rnn, input_tensor)
    loss = criterion(output, category_tensor)
    loss.backward()
    
    #根据梯度更新参数
    for p in rnn.parameters():
        p.data.add_(p.grad.data, alpha = -learning_rate)
    
    return output, loss.item()


#模型的评估

def evaluate(rnn, input_tensor):
    with torch.no_grad():
        hidden = rnn.initHidden()
        output = run_RNN(rnn, input_tensor)
        
        return output


from tqdm import tqdm
epoch = 1
embedding_size = 200
n_hidden = 10
n_categories = 2
learning_rate = 0.005
rnn = RNN(n_chars, embedding_size, n_hidden, n_categories)
criterion = nn.NLLLoss()
loss_sum = 0
all_losses = []
plot_every = 100
for e in range(epoch):
    for ind, (title_tensor, label) in enumerate(tqdm(train_data)):
        output, loss = train(rnn, criterion, title_tensor, label)
        loss_sum += loss
        if ind % plot_every == 0:
            all_losses.append(loss_sum / plot_every)
            loss_sum = 0
    c = 0
    for title, category in tqdm(test_data):
        output = evaluate(rnn, title)
        topn, topi = output.topk(1)
        if topi.item() == category[0].item():
            c += 1
    print('accuracy', c / len(test_data))


#模型参数的保存
# torch.save(rnn.state_dict(), "rnn_parameter.pkl")
torch.save(rnn, "rnn_model.pkl")


#模型参数的加载(需要重新定义模型)
# embedding_size = 200
# n_hidden = 128
# n_categories = 2
# rnn = RNN(n_chars, embedding_size, n_hidden, n_categories)
# rnn.load_state_dict(torch.load("rnn_parameter.pkl"))
rnn = torch.load("rnn_model.pkl")


#保存词表
import json

with open("char_list.json", mode="w") as f:
    json.dump(char_list, f)

#加载词表
with open("char_list.json", mode="r") as f:
    char_list = json.load(f)


def get_category(title):
    title = title_to_tensor(title)
    output = evaluate(rnn, title)
    topn, topi = output.topk(1)
    return categories[topi.item()]


def print_test(title):
    print((title, get_category(title)))


print_test("考研心得")
print_test("北大实验室博士")
print_test("校外博士招考")
print_test("急求自然语言处理工程师")
print_test("校招offer比较")
print_test("工作还是考研")
print_test("工作吧")
print_test("招聘人员")



