from nltk.tokenize import sent_tokenize
from sentence_transformers import CrossEncoder

import OpenApiFunction
from ExtractTextFromPdf import *
from OpenApiFunction import *
from Chromadb import *

paragraphs = extract_text_from_pdf("../llama2.pdf", page_numbers=[2, 3])


def split_text(paragraphs, chunk_size=300, overlap_size=100, min_line_length=10):
    '''按指定 chunk_size 和 overlap_size 交叠割文本'''
    sentences = [s.strip() for p in paragraphs for s in sent_tokenize(p)]
    chunks = []
    i = 0
    while i < len(sentences):
        chunk = sentences[i]
        overlap = ''
        prev_len = 0
        prev = i - 1
        # 向前计算重叠部分
        while prev >= 0 and len(sentences[prev]) + len(overlap) <= overlap_size:
            overlap = sentences[prev] + ' ' + overlap
            prev -= 1
        chunk = overlap + chunk
        next = i + 1
        # 向后计算当前chunk
        while next < len(sentences) and len(sentences[next]) + len(chunk) <= chunk_size:
            chunk = chunk + ' ' + sentences[next]
            next += 1
        chunks.append(chunk)
        i = next
    return chunks


chunks = split_text(paragraphs, 300, 100)

# 创建一个向量数据库对象
vector_db = MyVectorDBConnector("demo", get_embeddings)

chunk_embeddings = [get_embeddings(chunk) for chunk in chunks]

# 向 collection 中添加chunk与向量
vector_db.collection.add(
    embeddings=chunk_embeddings,
    documents=chunks,
    metadatas=[{"source": "llama2.pdf"} for _ in chunks],
    ids=[f"ck_{i}" for i in range(len(chunks))]
)

user_query = "how safe is llama 2"

search_results = vector_db.semantic_search(get_embeddings, user_query, 5)

model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', max_length=512)
scores = model.predict([(user_query, doc) for doc in search_results['documents'][0]])
# 按得分排序
sorted_list = sorted(zip(scores, search_results['documents'][0]), key=lambda x: x[0], reverse=True)
for score, doc in sorted_list:
    print(f"{score}\t{doc}\n")
