chagu-demo / rag_sec /senamtic_response_generator.py
talexm
update
f861dee
raw
history blame
434 Bytes
from transformers import pipeline
class SemanticResponseGenerator:
def __init__(self):
self.generator = pipeline("text-generation", model="gpt2")
def generate_response(self, retrieved_docs):
combined_docs = " ".join(retrieved_docs[:2]) # Use top 2 matches
response = self.generator(f"Based on the following information: {combined_docs}", max_length=100)
return response[0]["generated_text"]