
import json
import faiss
import time
import sys
import torch
sys.path.append('..')
from text2vec import SentenceModel, cos_sim, semantic_search
from sent2vec import Sent2VecEmbeddings
import json
import numpy as np
import os
# BAAI/bge-large-zh-v1.5


# model=Sent2VecEmbeddings(model_name='BAAI/bge-large-zh-v1.5')
def make_rat_file(index_dir,sft_dir,vdb_dir,output_dir,model_name=''):
    
    model=Sent2VecEmbeddings(model_name=model_name)
    print('模型加载完成，开始读取数据')
    data=[]
    if sft_dir.endswith('json'):
        with open(sft_dir,'r',encoding='utf-8') as f:
            data = json.load(f)
    elif sft_dir.endswith('jsonl'):
          with open(sft_dir,'r',encoding='utf-8') as f:
               for line in f:
                    data.append(json.loads(line))
    else:
         print('sft格式不对')
         exit()
    print('读完了sft数据',len(data))
    with open(vdb_dir,'r',encoding='utf-8') as f:
        data2=json.load(f)
    print('读完了vdb数据',len(data2))

    
    maxNorm=torch.load(os.path.join(index_dir,'maxNorm.pt'))
    index=faiss.read_index(os.path.join(index_dir,'index.faiss'))
    res = faiss.StandardGpuResources()
    # 中间的是gpuid，我默认加载到0卡上√
    index = faiss.index_cpu_to_gpu(res, 0,index)

    print('开始读取instruction')
    query=[]
    for i,d in enumerate(data):
        query.append(d['question'])

    embeddings,_=model.embed_documents(query)

    # torch.save(embeddings,'fuck_save.pt')
    embeddings/=maxNorm

    # embeddings=torch.load('/home/lxy/DPR/fuck_save.pt')[:30000]
    print('读取完成了embedding',embeddings.shape)
    D,I=index.search(embeddings,k=2000)
    print('搜索完成了')
    from tqdm import tqdm,trange
    progress = trange(len(I), desc="Processing docs into embeddings", ncols=100,mininterval=1.5)
    print('开始修改json文件了')
    s=set()
    one,two,three=0,0,0
    for i in progress:
        prompt="背景：\n"

        index1=int(I[i][0])
        prompt+=f'{1}.'+data2[index1]['answer']+'\n'
        s.add(data2[index1]['answer'])
        one+=1

        index2=int(I[i][1])
        if data2[index2]['answer'] not in s:
            prompt+=f'{2}.'+data2[index2]['answer']+'\n'
            s.add(data2[index2]['answer'])
            two+=1
        # else:
        #     print(data2[index2]['answer'] in s)
        #     print(data[i]['question'],index1,data2[index1]['answer'],index2,data2[index2]['answer'],'\n',data2[index1]['answer']==data2[index2]['answer'])

        

        index3=int(I[i][2])
        if data2[index3]['answer'] not in s:
            prompt+=f'{3}.'+data2[index3]['answer']+'\n'
            three+=1
        # else:
        #      print(data[i]['question'],index1,data2[index1]['answer'],index2,data2[index2]['answer'],index3,data2[index3]['answer'])
        #      exit()

        prompt=prompt+"\n\n问题: "+data[i]['question']

        data[i]['question']=prompt
        s.clear()

    print('1,2,3',one,two,three)
    with open(output_dir, 'w',encoding='utf-8') as merged_file:
            json.dump(data, merged_file, indent=4,ensure_ascii=False)
    
index_dir='/data/lxy/RAT/10m9d_wxb_bge1.5_hnswivf_20500'
model_name='BAAI/bge-large-zh-v1.5'
sft_dir='/home/lxy/wxbdata/output.json'
vdb_dir='/home/lxy/wxbdata/merge.json'
out_dir='/home/lxy/wxbdata/vdb.json'
make_rat_file(index_dir=index_dir,model_name=model_name,sft_dir=sft_dir,vdb_dir=vdb_dir,output_dir=out_dir)

