#读取数据
academy_titles = []
job_titles = []
with open("academy_titles.txt", encoding="utf-8", mode="r") as f:
    for line in f:
        academy_titles.append(line.strip())

with open("job_titles.txt", encoding="utf-8", mode="r") as f:
    for line in f:
        job_titles.append(line.strip())


job_titles[:3]


#合并列表,并添加label
data_list = []

for title in academy_titles:
    data_list.append([title, 0])

for title in job_titles:
    data_list.append([title, 1])


data_list[:10]


#计算最长的标题
max_len = 0
for case in data_list:
    max_len = max(max_len, len(case[0]) + 2)
print(max_len)


#划分数据集
from sklearn.model_selection import train_test_split
train_list, dev_list = train_test_split(data_list, test_size=0.3, random_state=15, shuffle=True)


import os
import time
import random
import torch
import torch.nn.functional as F
from torch import nn
from tqdm import tqdm

from transformers import get_linear_schedule_with_warmup, AdamW
from transformers import BertTokenizer, BertForSequenceClassification


device = "cuda" if torch .cuda.is_available() else "cpu"
max_train_epochs = 5
warmup_proportion = 0.05
gradient_accumulation_steps = 4
train_bacth_size = 8
valid_batch_size = 8
test_batch_size = 8

learning_rate = 2e-5
weight_decay = 0.01
max_grad_norm = 1.0
cur_time = time.strftime("%Y-%m-%d_%H:%M:%S")


tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")


class MyDataset(torch.utils.data.Dataset):
    def __init__(self, examples):
        self.examples = examples
    
    def __len__(self):
        return len(self.examples)
    
    def __getitem__(self, index):
        example = self.examples[index]
        title = example[0]
        label = example[1]
        # encode_plus返回所有的编码信息，具体如下：
        # input_ids:是单词在词典中的编码
        # token_type_ids’:区分两个句子的编码（上句全为0，下句全为1）
        # attention_mask’:指定对哪些词进行self-Attention操作
        r = tokenizer.encode_plus(title, max_length=max_len, padding="max_length")
        return title, label, index


def the_collate_fn(batch):
    r = tokenizer([b[0] for b in batch], padding=True)
    input_ids = torch.LongTensor(r["input_ids"])
    attention_mask = torch.LongTensor(r["attention_mask"])
    #token_type_ids = torch.LongTensor(r["token_type_ids"])
    label = torch.LongTensor([b[1] for b in batch])
    indexs = [b[2] for b in batch]
    return input_ids, attention_mask, label, indexs  #, token_type_ids


train_dataet = MyDataset(train_list)
train_data_loader = torch.utils.data.DataLoader(
    train_dataet,
    batch_size=train_bacth_size,
    shuffle=True,
    collate_fn=the_collate_fn,
)
dev_dataset = MyDataset(dev_list)
dev_data_loader = torch.utils.data.DataLoader(
    dev_dataset,
    batch_size = valid_batch_size,
    shuffle = False,
    collate_fn = the_collate_fn
)


#直接使用预训练的BertForSequenceClassification模型
model = BertForSequenceClassification.from_pretrained("bert-base-chinese")
model.to(device)

t_total = len(train_data_loader)  #训练集大小
num_warmup_steps = int(warmup_proportion * t_total)
print("warmup steps : %d" % num_warmup_steps)
no_decay = ["bias", "LayerNorm.weight"]  #  no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [
    {'params':[p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],'weight_decay': weight_decay},
    {'params':[p for n, p in param_optimizer if any(nd in n for nd in no_decay)],'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, correct_bias=False)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)


def get_score():
    y_true = []
    y_pred = []
    for step, batch in enumerate(tqdm(dev_data_loader)):
        model.eval()
        with torch.no_grad():
            input_ids, attention_mask = (b.to(device) for b in batch[:2])
        y_true += batch[2].numpy().tolist()
        logist = model(input_ids, attention_mask)[0]
        result = torch.argmax(logist, 1).cpu().numpy().tolist()
        y_pred += result
    correct = 0
    for i in range(len(y_true)):
        if y_true[i] == y_pred[i]:
            correct += 1
    accuracy = correct / len(y_pred)
    
    return accuracy


for epoch in range(max_train_epochs):
    b_time = time.time()  # 开始时间
    
    model.train()
    for step, batch in enumerate(tqdm(train_data_loader)):
        input_ids, attention_mask, label = (b.to(device) for b in batch[:-1])
        loss = model(input_ids, attention_mask, labels=label)
        loss = loss[0]
        loss.backward()
        
        if(step + 1) % gradient_accumulation_steps == 0:
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
    print('Epoch = get_ipython().run_line_magic("d", " Epoch Mean Loss %.4f Time %.2f min' % (epoch+1, loss.item(), (time.time() - b_time)/60))")
    print(get_score())


torch.save(model, 'bert_model.pkl')


bert_model = torch.load("bert_model.pkl")


def print_test(title):
    r = tokenizer([title])
    input_ids = torch.LongTensor(r["input_ids"]).to(device)
    attention_mask = torch.LongTensor(r["attention_mask"]).to(device)
    logist = bert_model(input_ids, attention_mask)[0]
    result = torch.argmax(logist, 1).cpu().numpy().tolist()[0]
    result = ['考研考博', '招聘信息'][result]
    print(title, result)


print_test('考研心得')
print_test('考北大实验室博士')
print_test('考外校博士')
print_test('北大实验室招博士')
print_test('工作or考研?')
print_test('急求自然语言处理工程师')
print_test('校招offer比较')



