vella-backend / _utils /resumo_completo_cursor.py
luanpoppe
feat: adicionando opção de escolher a llm das duas últimas requisições
3736ce1
import os
from _utils.gerar_relatorio_modelo_usuario.prompts import prompt_auxiliar_SEM_CONTEXT
from _utils.gerar_relatorio_modelo_usuario.EnhancedDocumentSummarizer import (
EnhancedDocumentSummarizer,
)
from _utils.gerar_relatorio_modelo_usuario.contextual_retriever import (
ContextualRetriever,
)
from _utils.gerar_relatorio_modelo_usuario.utils import (
gerar_resposta_compilada,
get_full_text_and_all_PDFs_chunks,
get_response_from_auxiliar_contextual_prompt,
)
from _utils.models.gerar_relatorio import (
RetrievalConfig,
)
def reciprocal_rank_fusion(result_lists, weights=None):
"""Combine multiple ranked lists using reciprocal rank fusion"""
fused_scores = {}
num_lists = len(result_lists)
if weights is None:
weights = [1.0] * num_lists
for i in range(num_lists):
for doc_id, score in result_lists[i]:
if doc_id not in fused_scores:
fused_scores[doc_id] = 0
fused_scores[doc_id] += weights[i] * score
# Sort by score in descending order
sorted_results = sorted(fused_scores.items(), key=lambda x: x[1], reverse=True)
return sorted_results
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ.get("LANGCHAIN_API_KEY")
os.environ["LANGCHAIN_PROJECT"] = "VELLA"
async def get_llm_summary_answer_by_cursor_complete(
serializer, listaPDFs, isBubble=False
):
"""Parâmetro "contexto" só deve ser passado quando quiser utilizar o teste com ragas, e assim, não quiser passar PDFs"""
# Configuration
config = RetrievalConfig(
num_chunks=serializer["num_chunks_retrieval"],
embedding_weight=serializer["embedding_weight"],
bm25_weight=serializer["bm25_weight"],
context_window=serializer["context_window"],
chunk_overlap=serializer["chunk_overlap"],
)
contextual_retriever = ContextualRetriever(
config, serializer["claude_context_model"]
)
# Initialize enhanced summarizer
summarizer = EnhancedDocumentSummarizer(
config=config,
embedding_model=serializer["hf_embedding"],
chunk_overlap=serializer["chunk_overlap"],
chunk_size=serializer["chunk_size"],
num_k_rerank=serializer["num_k_rerank"],
model_cohere_rerank=serializer["model_cohere_rerank"],
prompt_auxiliar=serializer["prompt_auxiliar"],
gpt_model=serializer["model"],
gpt_temperature=serializer["gpt_temperature"],
prompt_gerar_documento=serializer["prompt_gerar_documento"],
reciprocal_rank_fusion=reciprocal_rank_fusion,
)
all_PDFs_chunks, full_text_as_array = await get_full_text_and_all_PDFs_chunks(
listaPDFs, summarizer.splitter, serializer["should_use_llama_parse"], isBubble
)
is_contextualized_chunk = serializer["should_have_contextual_chunks"]
if is_contextualized_chunk:
response_auxiliar_summary = await get_response_from_auxiliar_contextual_prompt(
full_text_as_array
)
contextualized_chunks = await contextual_retriever.contextualize_all_chunks(
all_PDFs_chunks, response_auxiliar_summary
)
chunks_processados = contextualized_chunks
else:
chunks_processados = all_PDFs_chunks
# Create enhanced vector store and BM25 index
vector_store, bm25, chunk_ids = (
summarizer.vector_store.create_enhanced_vector_store(
chunks_processados, is_contextualized_chunk
)
)
llm_ultimas_requests = serializer["llm_ultimas_requests"]
# Generate enhanced summary
structured_summaries = await summarizer.generate_enhanced_summary(
vector_store, bm25, chunk_ids, llm_ultimas_requests, prompt_auxiliar_SEM_CONTEXT
)
if not isinstance(structured_summaries, list):
from rest_framework.response import Response
return Response({"erro": structured_summaries})
texto_completo = summarizer.resumo_gerado + "\n\n"
for x in structured_summaries:
texto_completo = texto_completo + x["content"] + "\n"
print("\n\ntexto_completo[0: 1000]: ", texto_completo[0:1000])
return {
"resultado": structured_summaries,
"texto_completo": texto_completo,
"parametros-utilizados": gerar_resposta_compilada(serializer),
}