import re
import pandas as pd
from transformers import BertTokenizer, BertModel
import torch
import numpy as np
import lightgbm as lgb
import json
import joblib


# query=pd.read_json('D:/small_dev.json')
#
# docs=query['docs']
# c=[]
# d=[]
# p=[]
# q=[]
# for i in range(len(query)):
#     for j in range(len(docs[i])):
#         c.append(docs[i][j]['content'])
#         d.append(docs[i][j]['didx'])
#         p.append(query.loc[i,'qidx'])
#         q.append(query.loc[i,'q'])
# df1=pd.DataFrame({'qidx':p,'q':q,'content':c,'didx':d})
#
#
# df1.to_csv('D:/test.csv',encoding='utf-8')


# Check if a GPU is available
if torch.cuda.is_available():
    # If a GPU is available, move the input tensors and the model to the GPU
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

# Load the BERT model and tokenizer
model = BertModel.from_pretrained('bert-base-chinese').to(device)
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')

# Load the input data
df4=pd.read_csv('./test.csv',encoding='utf-8')
q = df4["q"].values
content = df4["content"].values

q1=list(df4['qidx'].values)
q2=(df4['didx'].apply(str).values)

pred_all=[]
for i in range(len(q1)):
    pred_all.append([q1[i],q2[i]])

# Encode the input data using BERT and concatenate the resulting vectors
test = []
for i in range(len(df4)):
    query_tokens = tokenizer.encode(q[i],max_length=512)
    query_vec = model(torch.tensor([query_tokens]).to(device))[0][0][0].detach().cpu().numpy()
    candidate_tokens = tokenizer.encode(content[i],max_length=512)
    candidate_vec = model(torch.tensor([candidate_tokens]).to(device))[0][0][0].detach().cpu().numpy()
    vec = np.concatenate((query_vec, candidate_vec), axis=0)
    test.append(vec)
train = np.array(test)

gbm = joblib.load('./loan_model.pkl')

pre=gbm.predict(test)

for i in range(len(pred_all)):
    pred_all[i].append(pre[i])

dic={}
for x,y,z in pred_all:
    if str(x) not in dic:
        dic[str(x)]=[]
    dic[str(x)].append(y)

qidx_list=list(df4['qidx'].unique())

for i in qidx_list:
    dic[str(i)]=sorted(dic[str(i)],reverse=True)


json.dump(dic, open('./prediction_test.json', "w", encoding="utf8"), indent=2,ensure_ascii=False)