import PyPDF2
from pinecone.grpc import PineconeGRPC as Pinecone
from io import BytesIO

# Initialize Pinecone client
pc = Pinecone(api_key="pcsk_7FjwAR_HXQ9RNcBfMrbtRqHcojo6AEsZgNDzC5ZQedeBVhXHS7UpxpYLapgiL7o9ufbjUg")


# Function to split text into chunks with a specified size and overlap
def split_text(text, chunk_size=50, overlap=10):
    """
    Split text into chunks with a specified size and overlap.

    :param text: The text to split.
    :param chunk_size: The size of each chunk (number of words).
    :param overlap: The number of overlapping words between chunks.
    :return: A list of text chunks.
    """
    words = text.split()
    chunks = []
    for i in range(0, len(words), chunk_size - overlap):
        chunk = " ".join(words[i:i + chunk_size])
        chunks.append(chunk)
    return chunks


# Function to process PDF file
def process_pdf(file, chunk_size=50, overlap=10):
    """
    Process a PDF file and split its content into text chunks.

    :param file: The file object (e.g., from FastAPI's UploadFile).
    :param chunk_size: The size of each chunk (number of words).
    :param overlap: The number of overlapping words between chunks.
    :return: A list of dictionaries containing chunk IDs and text.
    """
    # Convert UploadFile to BytesIO
    file_content = BytesIO(file.file.read())
    reader = PyPDF2.PdfReader(file_content)
    text_chunks = []
    for page_num, page in enumerate(reader.pages):
        page_text = page.extract_text()
        if page_text:  # Ensure the page has text
            chunks = split_text(page_text, chunk_size, overlap)
            for chunk_num, chunk in enumerate(chunks):
                text_chunks.append({
                    "id": f"page_{page_num + 1}_chunk_{chunk_num + 1}",
                    "text": chunk
                })
    return text_chunks


# Function to upload chunks to Pinecone
def upload_to_pinecone(chunks, index_name="chatbot", namespace="example-namespace"):
    """
    Upload text chunks to Pinecone vector database.

    :param chunks: A list of dictionaries containing chunk IDs and text.
    :param index_name: The name of the Pinecone index.
    :param namespace: The namespace in the Pinecone index.
    """
    index = pc.Index(index_name)

    # Convert text to embeddings
    embeddings = pc.inference.embed(
        model="multilingual-e5-large",
        inputs=[chunk['text'] for chunk in chunks],
        parameters={"input_type": "passage", "truncate": "END"}
    )

    # Prepare records for upsert
    records = []
    for chunk, embedding in zip(chunks, embeddings):
        records.append({
            "id": chunk['id'],
            "values": embedding['values'],
            "metadata": {'text': chunk['text']}
        })

    # Upsert records into the index
    index.upsert(
        vectors=records,
        namespace=namespace
    )


# Main function to handle PDF processing and upload
def handle_pdf_upload(file):
    """
    Handle the entire process of PDF parsing and uploading to Pinecone.

    :param file: The file object (e.g., from FastAPI's UploadFile).
    :return: The number of chunks processed and uploaded.
    """
    # Process the PDF file
    chunks = process_pdf(file)

    # Upload the chunks to Pinecone
    upload_to_pinecone(chunks)

    return len(chunks)