# 定义两个list分别存放两个板块的帖子数据
import jieba
import os
import torch
import torch.nn as nn

academy_titles = []
job_titles = []
with open( 'academy_titles.txt', encoding='utf8') as f:
    for l in f:  # 按行读取文件
        academy_titles.append(list(jieba.cut(l.strip( ))))  # strip 方法用于去掉行尾空格
with open('job_titles.txt', encoding='utf8') as f:
    for l in f:  # 按行读取文件
        job_titles.append(list(jieba.cut(l.strip( ))))  # strip 方法用于去掉行尾空格


word_set = set()
for title in academy_titles:
    for word in title:
        word_set.add(word.lower())
for title in job_titles:
    for word in title:
        word_set.add(word.lower())
print(len(word_set))



from tqdm import tqdm
f = open('F:\\datas\\nlp\\Tencent_AILab_ChineseEmbedding.txt', encoding='utf8')
word2v = {}
wl = []
for l in tqdm(f):
    l = l.strip().split(' ')
    wl.append(l[0])
    if l[0] in word_set:
        word2v[l[0]] = list(map(float, l[1:]))




class RNN(nn.Module):
    def __init__(self, embedding_size, hidden_size, output_size):
        super(RNN, self).__init__()
        self.hidden_size = hidden_size
        self.i2h = nn.Linear(embedding_size + hidden_size, hidden_size)
        self.i2o = nn.Linear(embedding_size + hidden_size, output_size)
        self.softmax = nn.LogSoftmax(dim=1)

    def forward(self, input_tensor, hidden):
        word_vector = input_tensor
        combined = torch.cat((word_vector, hidden), 1)
        hidden = self.i2h(combined)
        output = self.i2o(combined)
        output = self.softmax(output)
        return output, hidden

    def initHidden(self):
        return torch.zeros(1, self.hidden_size)

embedding_size = 200
n_hidden = 128
n_categories = 2
rnn = RNN(embedding_size, n_hidden, n_categories)

def title_to_tensor(title):
    words_vectors = []
    for word in title:
        if word in word2v:
            words_vectors.append(word2v[word])
    tensor = torch.tensor(words_vectors, dtype=torch.float)
    return tensor

def run_rnn(rnn, input_tensor):
    hidden = rnn.initHidden()
    for i in range(input_tensor.size()[0]):
        output, hidden = rnn(input_tensor[i].unsqueeze(dim=0), hidden)
    return output

all_data = []
categories = ["考研考博", "招聘信息"]
for l in academy_titles:
    all_data.append((title_to_tensor(l), torch.tensor([0], dtype=torch.long)))
for l in job_titles:
    all_data.append((title_to_tensor(l), torch.tensor([1], dtype=torch.long)))

input_tensor = title_to_tensor(academy_titles[0])
print('input_tensor:\n', input_tensor)

hidden = rnn.initHidden()
output, hidden = rnn(input_tensor[0].unsqueeze(dim=0), hidden)
print('output:\n', output)
print('hidden:\n', hidden)
print('size of hidden:\n', hidden.size())

import random
random.shuffle(all_data)
data_len = len(all_data)
split_ratio = 0.7
train_data = all_data[:int(data_len*split_ratio)]
test_data = all_data[int(data_len*split_ratio):]
print("Train data size: ", len(train_data))
print("Test data size: ", len(test_data))

def evaluate(rnn, input_tensor):
    with torch.no_grad():
        hidden = rnn.initHidden()
        output = run_rnn(rnn, input_tensor)
        return output

