import pandas as pd
import numpy as np
import jieba
import math
import re
from transformers import BertTokenizer, BertModel
import torch
import lightgbm as lgb
import json
import joblib
from sklearn.metrics import accuracy_score

if torch.cuda.is_available():

    device = torch.device("cuda")
else:
    device = torch.device("cpu")


model = BertModel.from_pretrained('bert-base-chinese').to(device)
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')

# 加载数据集
df4=pd.read_csv('./predata_q.csv',encoding='utf-8')
q = df4["q"].values
content = df4["content"].values

q1=list(df4['qidx'].values)
q2=list(df4['didx'].apply(str).values)

pred_all=[]
for i in range(len(q1)):
    pred_all.append([q1[i],q2[i]])


train = []
for i in range(len(df4)):
    query_tokens = tokenizer.encode(q[i],max_length=512)
    query_vec = model(torch.tensor([query_tokens]).to(device))[0][0][0].detach().cpu().numpy()
    candidate_tokens = tokenizer.encode(content[i],max_length=512)
    candidate_vec = model(torch.tensor([candidate_tokens]).to(device))[0][0][0].detach().cpu().numpy()
    vec = np.concatenate((query_vec, candidate_vec), axis=0)
    train.append(vec)
train = np.array(train)

gbm=joblib.load('./loan_model4.pkl')

pre=gbm.predict(train)

print(pre)