


import jieba


academy_titles = []
job_titles = []
with open("academy_titles.txt", encoding="utf-8", mode="r") as f:
    for line in f:
        academy_titles.append(list(jieba.cut(line.strip())))

with open("job_titles.txt", encoding="utf-8", mode="r") as f:
    for line in f:
        job_titles.append(list(jieba.cut(line.strip())))


academy_titles[:3]


job_titles[:3]





word_set = set()

for title in academy_titles:
    for word in title:
        word_set.add(word)

for title in job_titles:
    for word in title:
        word_set.add(word)


print(len(word_set))


word_list = list(word_set)
n_words = len(word_list) + 1  #添加<UNK>





import torch


def title_to_tensor(title):  #title 一条数据
    tensor = torch.zeros(len(title), dtype = torch.long)  #tensor([0, 0, 0, ......, 0])
    for li, word in enumerate(title):
        try:
            ind = word_list.index(word)  #获取word_list中单词对应的索引值
        except ValueError:
            ind = n_words - 1  #未找到单词，则用<UNK>索引代替
        tensor[li] = ind  #替换句子中每个单词的索引值
    return tensor


print(academy_titles[0])


print(title_to_tensor(academy_titles[0]))





#使用pytorch中自带的RNN模型， torch.nn.RNN定义模型
import torch.nn as nn


class RNN(nn.Module):
    def __init__(self, word_count, embedding_size, hidden_size, output_size):
        super(RNN, self).__init__()
        self.hidden_size = hidden_size
        self.embedding = nn.Embedding(word_count, embedding_size)
        self.rnn = nn.RNN(embedding_size, hidden_size, num_layers=1, bidirectional=False, batch_first=True)
        self.cls = nn.Linear(hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=0)
        
    def forward(self, input_tensor):
        word_vector = self.embedding(input_tensor)
        output = self.rnn(word_vector)[0][0][len(input_tensor) - 1]
        output = self.cls(output)
        output = self.softmax(output)
        return output


def run_rnn(rnn, input_tensor):
    output = rnn(input_tensor.unsqueeze(dim=0))
    return output


def train(rnn, criterion, input_tensor, category_tensor):
    rnn.zero_grad()
    output = run_rnn(rnn, input_tensor)
    loss = criterion(output.unsqueeze(dim=0), category_tensor)
    loss.backward()
    
    for p in rnn.parameters():
        p.data.add_(p.grad.data, alpha=-learning_rate)
    return output, loss.item()


def evaluate(rnn, input_tensor):
    with torch.no_grad():
        output = run_rnn(rnn, input_tensor)
        return output





all_data = []
categories = ["考研考博", "招聘信息"]

for l in academy_titles:
    all_data.append((title_to_tensor(l), torch.tensor([0], dtype=torch.long)))

for l in job_titles:
    all_data.append((title_to_tensor(l), torch.tensor([1], dtype=torch.long)))


import random
random.shuffle(all_data)
data_len = len(all_data)
split_ratio = 0.7
train_data = all_data[:int(data_len * split_ratio)]
test_data = all_data[int(data_len * split_ratio):]
print("train size:", len(train_data))
print("test size:", len(test_data))





#参数设置
from tqdm import tqdm

epoch = 1
embedding_size = 200
n_hidden = 10
n_categories = 2
learning_rate = 0.005
rnn = RNN(n_words, embedding_size, n_hidden, n_categories)
criterion = nn.NLLLoss()
loss_sum = 0
all_losses = []
plot_every = 100


for e in range(epoch):
    for ind, (title_tensor, label) in enumerate(tqdm(train_data)):
               output, loss = train(rnn, criterion, title_tensor, label)
               loss_sum += loss
                
               if ind % plot_every == 0:
                   all_losses.append(loss_sum /plot_every)
                   loss_sum = 0
    c = 0
    for title, category in tqdm(test_data):
        output = evaluate(rnn, title)
        topn, topi = output.topk(1)
        if topi.item() == category[0].item():
            c += 1
    print('accuracy', c / len(test_data))





torch.save(rnn, "rnn_model.pkl")


rnn = torch.load("rnn_model.pkl")





def get_category(title):
    title = title_to_tensor(title)
    output = evaluate(rnn, title)
    topn, topi = output.topk(1)
    return categories[topi.item()]
def print_test(title):
    print((title, get_category(title)))


print_test("考研心得")
print_test("北大实验室博士")
print_test("校外博士招考")
print_test("急求自然语言处理工程师")
print_test("校招offer比较")
print_test("工作还是考研")
print_test("工作吧")
print_test("招聘人员")



