import json
from pathlib import Path
import requests
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from langchain.text_splitter import RecursiveCharacterTextSplitter
import PyPDF2

class VitisHLSDocLoader:
    def __init__(self):
        self.pdf_path = Path("docs/ug1399-vitis-hls.pdf")
        self.cache_dir = Path("cache")
        self.cache_dir.mkdir(exist_ok=True)
        self.vectorizer = TfidfVectorizer()
        self.chunks = []
        self.tfidf_matrix = None
        
    def fetch_documentation(self):
        """Fetch and parse the Vitis HLS documentation from PDF"""
        try:
            if not self.pdf_path.exists():
                raise FileNotFoundError(f"PDF file not found at {self.pdf_path}")
            
            # Read PDF file
            text_content = []
            with open(self.pdf_path, 'rb') as file:
                pdf_reader = PyPDF2.PdfReader(file)
                for page in pdf_reader.pages:
                    text_content.append(page.extract_text())
            
            return "\n".join(text_content)
            
        except Exception as e:
            print(f"Error reading PDF documentation: {e}")
            return None
    
    def create_vector_store(self):
        """Create a vector store from the documentation"""
        # Check cache first
        cache_file = self.cache_dir / "vitis_hls_docs.json"
        if cache_file.exists():
            with open(cache_file, 'r') as f:
                self.chunks = json.load(f)
        else:
            # Fetch and process documentation
            docs = self.fetch_documentation()
            if not docs:
                return None
            
            # Split text into chunks
            text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=1000,
                chunk_overlap=200,
                length_function=len,
            )
            self.chunks = text_splitter.split_text(docs)
            
            # Cache the chunks
            with open(cache_file, 'w') as f:
                json.dump(self.chunks, f)
        
        # Create TF-IDF matrix
        self.tfidf_matrix = self.vectorizer.fit_transform(self.chunks)
        return True
    
    def get_relevant_context(self, query: str, k: int = 3):
        """Get relevant documentation context for a query"""
        if not self.tfidf_matrix:
            if not self.create_vector_store():
                return ""
        
        # Transform query to TF-IDF
        query_vector = self.vectorizer.transform([query])
        
        # Calculate cosine similarity
        similarities = cosine_similarity(query_vector, self.tfidf_matrix).flatten()
        
        # Get top k most similar chunks
        top_k_indices = np.argsort(similarities)[-k:][::-1]
        
        # Return the most relevant chunks
        return "\n\n".join(self.chunks[i] for i in top_k_indices) 