# prepare_knowledge.py - Script to prepare the floral art knowledge base

import os
import re
import json
import logging
import requests
import PyPDF2
from typing import List, Dict, Any, Optional
from tqdm import tqdm
from bs4 import BeautifulSoup
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
import torch
from dotenv import load_dotenv

# Load environment variables
load_dotenv()
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")

# Constants
KNOWLEDGE_DIR = "./knowledge"
PROCESSED_DIR = "./processed_knowledge"
DB_PATH = "./vector_db"
EMBEDDING_MODEL = "BAAI/bge-m3"
CHUNK_SIZE = 1000
CHUNK_OVERLAP = 150

# Set up logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class KnowledgeBasePreparation:
    def __init__(self):
        # Create directories
        for directory in [KNOWLEDGE_DIR, PROCESSED_DIR, DB_PATH]:
            os.makedirs(directory, exist_ok=True)
            
        # Initialize text splitter
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=CHUNK_SIZE,
            chunk_overlap=CHUNK_OVERLAP,
            separators=["\n\n", "\n", ". ", " ", ""]
        )
        
        # Initialize embedding model
        self.embedding_model = HuggingFaceEmbeddings(
            model_name=EMBEDDING_MODEL,
            model_kwargs={'device': 'cuda' if torch.cuda.is_available() else 'cpu'}
        )
        
    def download_books(self, urls_file: str):
        """Download books from Z-Library using a file of URLs"""
        if not os.path.exists(urls_file):
            logger.error(f"URLs file not found: {urls_file}")
            return
            
        with open(urls_file, 'r') as f:
            urls = [line.strip() for line in f if line.strip()]
            
        logger.info(f"Found {len(urls)} URLs to process")
        
        for url in tqdm(urls, desc="Downloading books"):
            # Extract book title from URL
            match = re.search(r'/book/\d+/[^/]+/([^.]+)\.html', url)
            if not match:
                logger.warning(f"Could not extract book title from URL: {url}")
                continue
                
            book_title = match.group(1)
            output_path = os.path.join(KNOWLEDGE_DIR, f"{book_title}.pdf")
            
            # Skip if already downloaded
            if os.path.exists(output_path):
                logger.info(f"Book already downloaded: {book_title}")
                continue
                
            # NOTE: In a real implementation, you would need to handle Z-Library's
            # authentication and download process. This is simplified here.
            logger.info(f"Would download: {book_title} from {url}")
            
            # Placeholder for actual download code
            # with open(output_path, 'wb') as f:
            #     f.write(response.content)
            
    def process_pdfs(self):
        """Process all PDFs in the knowledge directory"""
        pdf_files = [f for f in os.listdir(KNOWLEDGE_DIR) if f.endswith('.pdf')]
        logger.info(f"Found {len(pdf_files)} PDF files to process")
        
        all_texts = []
        all_metadata = []
        
        for pdf_file in tqdm(pdf_files, desc="Processing PDFs"):
            file_path = os.path.join(KNOWLEDGE_DIR, pdf_file)
            
            try:
                # Extract text
                loader = PyPDFLoader(file_path)
                documents = loader.load()
                
                # Add metadata
                for doc in documents:
                    doc.metadata['source'] = pdf_file
                    doc.metadata['type'] = 'book'
                
                # Extract metadata from PDF if possible
                try:
                    with open(file_path, 'rb') as f:
                        pdf_reader = PyPDF2.PdfReader(f)
                        info = pdf_reader.metadata
                        
                        # Save metadata
                        metadata = {
                            'title': pdf_file.replace('.pdf', ''),
                            'author': info.author if info and hasattr(info, 'author') else "Unknown",
                            'pages': len(pdf_reader.pages),
                            'source': pdf_file
                        }
                        
                        all_metadata.append(metadata)
                except Exception as e:
                    logger.warning(f"Error extracting metadata from {pdf_file}: {str(e)}")
                
                # Add to collection
                all_texts.extend(documents)
                logger.info(f"Processed {pdf_file}: {len(documents)} pages")
                
            except Exception as e:
                logger.error(f"Error processing {pdf_file}: {str(e)}")
        
        # Save metadata
        with open(os.path.join(PROCESSED_DIR, 'metadata.json'), 'w', encoding='utf-8') as f:
            json.dump(all_metadata, f, ensure_ascii=False, indent=2)
            
        return all_texts
        
    def extract_floral_categories(self, texts):
        """Extract key floral categories and concepts"""
        # Combine some text for analysis
        sample_text = "\n".join([doc.page_content for doc in texts[:50]])
        
        # Use DeepSeek API to extract categories
        if DEEPSEEK_API_KEY:
            try:
                headers = {
                    "Content-Type": "application/json",
                    "Authorization": f"Bearer {DEEPSEEK_API_KEY}"
                }
                
                system_prompt = """You are a floral art expert. Extract the main categories, concepts, and terminology from the provided text about floral arrangements. 
                Format the output as JSON with these categories:
                1. floral_styles (list of different floral arrangement styles)
                2. flower_types (list of flower names)
                3. arrangement_techniques (list of techniques)
                4. color_schemes (list of color combinations)
                5. occasions (list of events/occasions for arrangements)
                6. containers (list of vases and containers)
                7. tools (list of tools used)"""
                
                prompt = f"Extract floral categories from this text:\n\n{sample_text[:4000]}"
                
                data = {
                    "model": "deepseek-chat",
                    "messages": [
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": prompt}
                    ],
                    "temperature": 0.2,
                    "max_tokens": 1024
                }
                
                response = requests.post(
                    "http://10.2.8.77:3000/v1/chat/completions",
                    headers=headers,
                    json=data
                )
                
                result = response.json()
                categories_text = result["choices"][0]["message"]["content"]
                
                # Try to parse as JSON
                try:
                    # Find JSON content (might be wrapped in markdown code blocks)
                    json_match = re.search(r'```json\n(.*?)\n```', categories_text, re.DOTALL)
                    if json_match:
                        categories_text = json_match.group(1)
                    
                    categories = json.loads(categories_text)
                    
                    # Save categories
                    with open(os.path.join(PROCESSED_DIR, 'categories.json'), 'w', encoding='utf-8') as f:
                        json.dump(categories, f, ensure_ascii=False, indent=2)
                        
                    logger.info(f"Extracted {sum(len(v) for v in categories.values())} items across {len(categories)} categories")
                    
                except json.JSONDecodeError:
                    logger.warning("Could not parse categories as JSON. Saving raw text.")
                    with open(os.path.join(PROCESSED_DIR, 'categories.txt'), 'w', encoding='utf-8') as f:
                        f.write(categories_text)
                
            except Exception as e:
                logger.error(f"Error extracting categories: {str(e)}")
        else:
            logger.warning("No API key available for category extraction")
            
    def create_vector_store(self, texts):
        """Create vector store from processed texts"""
        logger.info(f"Creating chunks from {len(texts)} documents")
        chunks = self.text_splitter.split_documents(texts)
        logger.info(f"Created {len(chunks)} chunks")
        
        logger.info("Creating vector store")
        vector_store = FAISS.from_documents(chunks, self.embedding_model)
        
        logger.info(f"Saving vector store to {DB_PATH}")
        vector_store.save_local(DB_PATH)
        
        return vector_store
        
    def create_specialized_guides(self, vector_store):
        """Create specialized guides for common floral topics"""
        # Topics to create guides for
        topics = [
            "中式插花风格与技巧",
            "欧式插花风格与技巧",
            "日式插花风格与技巧",
            "韩式插花风格与技巧",
            "现代插花风格与技巧",
            "田园风插花风格与技巧",
            "插花色彩搭配原理",
            "商务场合插花指南",
            "婚礼插花设计",
            "节日插花创意"
        ]
        
        # Create guide for each topic
        for topic in tqdm(topics, desc="Creating guides"):
            # Retrieve relevant chunks
            results = vector_store.similarity_search(topic, k=15)
            content = "\n\n".join([doc.page_content for doc in results])
            
            # Generate guide using DeepSeek API if available
            if DEEPSEEK_API_KEY:
                try:
                    headers = {
                        "Content-Type": "application/json",
                        "Authorization": f"Bearer {DEEPSEEK_API_KEY}"
                    }
                    
                    system_prompt = """You are a floral art expert creating comprehensive guides. Based on the provided content, 
                    create a detailed, well-structured guide on the specified topic. Include sections like:
                    1. Introduction
                    2. Key characteristics
                    3. Recommended flowers and materials
                    4. Step-by-step techniques
                    5. Color combinations
                    6. Common arrangements
                    7. Tips for beginners"""
                    
                    prompt = f"Create a detailed guide about '{topic}' based on this content:\n\n{content[:4000]}"
                    
                    data = {
                        "model": "deepseek-chat",
                        "messages": [
                            {"role": "system", "content": system_prompt},
                            {"role": "user", "content": prompt}
                        ],
                        "temperature": 0.3,
                        "max_tokens": 2048
                    }
                    
                    response = requests.post(
                        "http://10.2.8.77:3000/v1/chat/completions",
                        headers=headers,
                        json=data
                    )
                    
                    result = response.json()
                    guide_text = result["choices"][0]["message"]["content"]
                    
                    # Save guide
                    file_name = topic.replace(" ", "_").replace("，", "_").replace("、", "_") + ".md"
                    with open(os.path.join(PROCESSED_DIR, file_name), 'w', encoding='utf-8') as f:
                        f.write(guide_text)
                        
                    logger.info(f"Created guide: {file_name}")
                    
                except Exception as e:
                    logger.error(f"Error creating guide for {topic}: {str(e)}")
            else:
                logger.warning("No API key available for guide creation")
                
    def run_full_preparation(self, urls_file: Optional[str] = None):
        """Run the full knowledge base preparation pipeline"""
        # 1. Download books if URLs file provided
        if urls_file:
            self.download_books(urls_file)
            
        # 2. Process PDFs
        texts = self.process_pdfs()
        
        if not texts:
            logger.warning("No texts extracted from PDFs")
            return
            
        # 3. Extract categories 
        self.extract_floral_categories(texts)
        
        # 4. Create vector store
        vector_store = self.create_vector_store(texts)
        
        # 5. Create specialized guides
        self.create_specialized_guides(vector_store)
        
        logger.info("Knowledge base preparation complete!")

if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description="Prepare floral art knowledge base")
    parser.add_argument("--urls", help="File containing Z-Library URLs")
    args = parser.parse_args()
    
    prep = KnowledgeBasePreparation()
    prep.run_full_preparation(args.urls)
