import numpy as np
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from transformers import pipeline

# Load documents
loader = TextLoader("data/Jay_Chou.txt")
documentation = loader.load()

# Split documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=128, chunk_overlap=0)
documentation = text_splitter.split_documents(documentation)

# Define embedding model
model_name = "sentence-transformers/all-MiniLM-L6-v2"
# model_name = "data/models--sentence-transformers--all-MiniLM-L6-v2"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
embeddings = HuggingFaceEmbeddings(model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs)

# Create vector store
db = Chroma.from_documents(documentation, embeddings)

# Perform similarity search
query = "Who is Jay Chou's wife?"
results = db.similarity_search(query)

# Print retrieved document content
print("Retrieved document content:", results[0].page_content)

# Load Hugging Face pipeline for question answering
qa_pipeline = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad", tokenizer="distilbert-base-uncased-distilled-squad")

# Manually format the context for the pipeline
context = results[0].page_content  # Access the content of the most relevant document
input_data = {
    "context": context,
    "question": query
}

# Print input to the model
print("Input to the model:", input_data)

# Get the answer using Hugging Face pipeline
answer = qa_pipeline(input_data)

# Print the result
print("Answer from the model:", answer['answer'])
