

# from transformers import BertTokenizer, BertModel,AutoModel,AutoTokenizer
# import torch

# text="你好，请问一下，东北大学的图书馆报告厅在哪儿？"

# model_name='BAAI/bge-large-zh-v1.5'
# tokenizer = AutoTokenizer.from_pretrained(model_name,model_max_length=512)
# # tokenizer = AutoTokenizer.from_pretrained(model_name)
# model = AutoModel.from_pretrained(model_name)
# print(isinstance(model,torch.nn.Module))
# model.cuda()
# model1 = AutoModel.from_pretrained(model_name)
# model2 = AutoModel.from_pretrained(model_name)
# # model.eval()
# model1.cuda()
# model2.cuda()
# encoded_input = tokenizer(text, padding=True, truncation=True, return_tensors='pt').to('cuda')
# print(encoded_input)
# print(tokenizer.decode(101))
# with torch.no_grad():
#     model_output = model(**encoded_input).last_hidden_state


# print(model_output)
# print(model_output.shape)
# norm=torch.norm(model_output,dim=-1)
# values,indices=torch.topk(norm,k=13,sorted=False)
# print(indices)
# text_split=list(text)
# for i in indices[0].tolist():
#     print(text_split[i-1])
# # if torch.max()
# #     embeddings/=maxNorm
# #     print(f'自己编码的向量最大范数原本是{maxNorm},现在变成了{torch.max(torch.norm(embeddings,dim=1))}')

history=[
                {
                    "id":0,
                    "content": "相似问题0"
                },
                {
                    "id":1,
                    "content": "相似问题1"
                },
                {
                    "id":2,
                    "content": "相似问题2"
                }
            ]

history_for_vdb=[]
for h in history:
    history_for_vdb.append(h['queText'])
    history_for_vdb.append(h['ansText'])
print([aa['id'],aa['content'] for aa in a])