import pandas as pd
import numpy as np
import jieba
import re
from transformers import BertTokenizer, BertModel
import torch
import lightgbm as lgb



# query=pd.read_json('./small_dev-1.json')
# 
# docs=query['docs']
# c=[]
# d=[]
# p=[]
# q=[]
# for i in range(len(query)):
#     for j in range(len(docs[i])):
#         c.append(docs[i][j]['content'])
#         d.append(docs[i][j]['didx'])
#         p.append(query.loc[i,'qidx'])
#         q.append(query.loc[i,'q'])
# df1=pd.DataFrame({'qidx':p,'q':q,'content':c,'didx':d})
# df1['label']=[0 for i in range(len(df1))]
# 
# 
# label=query['labels']
# list(label[0].items())
# labels=[]
# for i in range(len(label)):
#     for j in range(len(list(label[i].items()))):
#         labels.append(list(label[i].items())[j])
# 
# 
# for i in range(len(df1)):
#     for j in range(len(labels)):
#         if df1.loc[i,'didx'] == labels[j][0]:
#             df1.loc[i,'label'] = labels[j][1]
# 
# df1.to_csv('predata_q.csv',encoding='utf-8')
# 
# 
# 
# # 对文本进行分词
# def tokenize(text):
#     return jieba.lcut(text)
# 
# # 停用词过滤
# def remove_stop_words(tokens):
#     return [token for token in tokens if token not in stop_words]
# 
# # 预处理文本
# def preprocess(text):
#     tokens = tokenize(text)
#     filtered_tokens = remove_stop_words(tokens)
#     return filtered_tokens
# q=[]
# t=[]
# for i in range(len(df4)):
#     q.append(preprocess(str(df4.loc[i,'q'])))
#     t.append(preprocess(str(df4.loc[i,'content'])))
# df4['q']=q
# df4['content']=t


model = BertModel.from_pretrained('bert-base-chinese')
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')


df4=pd.read_csv('./predata_q.csv',encoding='utf-8')

q = df4["q"].values
content = df4["content"].values

train=[]
for i in range(len(df4)):
    # inputs = tokenizer.encode_plus(str(df4.loc[i,'q']), add_special_tokens=True, max_length=512, padding=True, return_tensors='pt')
    # inputs2 = tokenizer.encode_plus(str(df4.loc[i,'content']), add_special_tokens=True, max_length=512, padding=True, return_tensors='pt')
    # 
    # output=inputs['input_ids']
    # output2=inputs2['input_ids']
    # data = torch.cat([output, output2],dim=1)
    # data=data.detach().numpy()
    # train.append(data)
    # query_tokens = tokenizer.encode(q[i])
    query_tokens = tokenizer.encode(q[i],max_length=512)
    query_vec = model(torch.tensor([query_tokens]))[0][0][0].detach().numpy()
    candidate_tokens = tokenizer.encode(content[i],max_length=512)
    candidate_vec = model(torch.tensor([candidate_tokens]))[0][0][0].detach().numpy()
    vec = np.concatenate((query_vec, candidate_vec), axis=0)
    train.append(vec)
train = np.array(train)
# text_vec = model(output)
# text_vec2 = model(output2)
label = df4["label"].values



# 将向量转换成可以输入到lightgbm回归模型的格式
# data = data.detach().numpy().reshape(-1, 768)
# print(train)

# 建立lightgbm模型并进行训练
gbm = lgb.LGBMClassifier()
# 
gbm.fit(train, label)

accuracy = gbm.score(train, label)
print("Accuracy:", accuracy)
