
import json
import faiss
import time
import sys
import torch
sys.path.append('..')
from text2vec import SentenceModel, cos_sim, semantic_search
from sent2vec import Sent2VecEmbeddings
import json
import numpy as np
import os
# BAAI/bge-large-zh-v1.5


# model=Sent2VecEmbeddings(model_name='BAAI/bge-large-zh-v1.5')
def make_rat_file(index_dir,sft_dir,vdb_dir,output_dir,model_name=''):
    
    model=Sent2VecEmbeddings(model_name=model_name)
    print('模型加载完成，开始读取数据')
    data=[]
    if sft_dir.endswith('json'):
        with open(sft_dir,'r',encoding='utf-8') as f:
            data = json.load(f)
    elif sft_dir.endswith('jsonl'):
          with open(sft_dir,'r',encoding='utf-8') as f:
               for line in f:
                    data.append(json.loads(line))
    else:
         print('sft格式不对')
         exit()
    print('读完了sft数据',len(data))
    with open(vdb_dir,'r',encoding='utf-8') as f:
        data2=json.load(f)
    print('读完了vdb数据',len(data2))

    
    maxNorm=torch.load(os.path.join(index_dir,'maxNorm.pt'))
    index=faiss.read_index(os.path.join(index_dir,'index.faiss'))
    res = faiss.StandardGpuResources()
    # 中间的是gpuid，我默认加载到0卡上√
    index = faiss.index_cpu_to_gpu(res, 0,index)

    print('开始读取instruction')
    query=[]
    for i,d in enumerate(data):
        query.append(d['question'])

    embeddings,_=model.embed_documents(query)

    # torch.save(embeddings,'fuck_save.pt')
    embeddings/=maxNorm

    # embeddings=torch.load('/home/lxy/DPR/fuck_save.pt')[:30000]
    print('读取完成了embedding',embeddings.shape)
    D,I=index.search(embeddings,k=2000)
    print('搜索完成了')
    from tqdm import tqdm,trange
    progress = trange(len(I), desc="Processing docs into embeddings", ncols=100,mininterval=1.5)
    print('开始修改json文件了')
    
    for i in progress:
        s=set()
        answer_len=data[i]['answer_len']
        pos=0
        for j in range(answer_len):
            s.add(data[i][f'retrieved_result_{j+1}'])
        for j in range(answer_len+1,6):
             
            index=int(I[i][pos])
            data[i][f'retrieved_result_{j}']=data2[index]['answer']
            s.add(data2[index]['answer'])

            pos+=int(1999/(5-answer_len))

        s.clear()

    
    with open(output_dir, 'w',encoding='utf-8') as merged_file:
            json.dump(data, merged_file, indent=4,ensure_ascii=False)
    
index_dir='/data/lxy/RAT/10m9d_wxb_bge1.5_hnswivf_20500'
model_name='BAAI/bge-large-zh-v1.5'
sft_dir='/home/lxy/wxbdata/sft.json'
vdb_dir='/home/lxy/wxbdata/merge.json'
out_dir='/home/lxy/wxbdata/rat.json'
make_rat_file(index_dir=index_dir,model_name=model_name,sft_dir=sft_dir,vdb_dir=vdb_dir,output_dir=out_dir)

