# 1. Setup and Dependencies
import os
import json
import re
import numpy as np
import faiss
import torch
import requests
from typing import List, Dict, Tuple, Optional, Union, Any
from dotenv import load_dotenv
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import PyPDFLoader, TextLoader, DirectoryLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.prompts import PromptTemplate
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
import logging

# Set up logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

# Load environment variables
load_dotenv()
DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY")
DEEPSEEK_API_URL = "http://10.2.8.77:3000"

# Constants
KNOWLEDGE_DIR = "./knowledge"
DB_PATH = "./vector_db"
EMBEDDING_MODEL = "BAAI/bge-m3"
LOCAL_MODEL_PATH = ""
CHUNK_SIZE = 1000
CHUNK_OVERLAP = 150
TOP_K_RESULTS = 5

# 2. Knowledge Base Processing
class FloralKnowledgeProcessor:
    def __init__(self, knowledge_dir: str = KNOWLEDGE_DIR):
        self.knowledge_dir = knowledge_dir
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=CHUNK_SIZE,
            chunk_overlap=CHUNK_OVERLAP,
            separators=["\n\n", "\n", ". ", " ", ""]
        )
        self.embedding_model = HuggingFaceEmbeddings(
            model_name=EMBEDDING_MODEL,
            model_kwargs={'device': 'cuda' if torch.cuda.is_available() else 'cpu'}
        )
        
        # Ensure directories exist
        os.makedirs(knowledge_dir, exist_ok=True)
        os.makedirs(DB_PATH, exist_ok=True)
        
    def load_documents(self) -> List[Any]:
        """Load documents from knowledge directory"""
        loader = DirectoryLoader(
            self.knowledge_dir,
            glob="**/*.{pdf,txt}",
            loader_cls=lambda file_path: PyPDFLoader(file_path) if file_path.endswith('.pdf') else TextLoader(file_path)
        )
        documents = loader.load()
        logger.info(f"Loaded {len(documents)} documents from {self.knowledge_dir}")
        return documents
        
    def process_documents(self, documents: List[Any]) -> List[Any]:
        """Split documents into chunks"""
        chunks = self.text_splitter.split_documents(documents)
        logger.info(f"Split documents into {len(chunks)} chunks")
        return chunks
        
    def create_vector_db(self, chunks: List[Any]) -> FAISS:
        """Create vector store from document chunks"""
        vector_db = FAISS.from_documents(chunks, self.embedding_model)
        vector_db.save_local(DB_PATH)
        logger.info(f"Vector database created and saved to {DB_PATH}")
        return vector_db
        
    def load_vector_db(self) -> Optional[FAISS]:
        """Load existing vector database if it exists"""
        if os.path.exists(DB_PATH):
            vector_db = FAISS.load_local(DB_PATH, self.embedding_model)
            logger.info(f"Loaded existing vector database from {DB_PATH}")
            return vector_db
        return None
        
    def update_knowledge_base(self) -> FAISS:
        """Process documents and update vector database"""
        documents = self.load_documents()
        chunks = self.process_documents(documents)
        
        # Try to load existing DB first
        vector_db = self.load_vector_db()
        
        if vector_db:
            # Add new documents to existing DB
            vector_db.add_documents(chunks)
            vector_db.save_local(DB_PATH)
            logger.info("Updated existing vector database")
        else:
            # Create new DB
            vector_db = self.create_vector_db(chunks)
            
        return vector_db

# 3. Query Classification
class QueryClassifier:
    def __init__(self, threshold: float = 0.6):
        self.threshold = threshold
        self.high_freq_templates = [
            r"推荐(.*?)插花花材",
            r"(.*?)风格的插花特点",
            r"如何搭配(.*?)颜色的花",
            r"插花中(.*?)花材的寓意",
            r"适合(.*?)场合的插花",
            r"(.*?)插花风格的基本要素",
            r"如何保养(.*?)花材",
            r"插花中的(.*?)技巧",
        ]
        
        # Initialize TF-IDF vectorizer for semantic matching
        self.tfidf = TfidfVectorizer()
        
        # Common floral terms to identify domain-specific queries
        self.floral_terms = [
            "插花", "花材", "花艺", "花束", "花器", "花瓶", "枝条", "叶材", 
            "花形", "色彩", "质感", "风格", "搭配", "技巧", "修剪", "保养",
            "日式", "中式", "欧式", "韩式", "现代", "古典", "田园"
        ]
        
        # Train vectorizer on floral terms
        self.tfidf.fit_transform([" ".join(self.floral_terms)])
        
    def is_high_frequency(self, query: str) -> bool:
        """Determine if query is high-frequency based on templates and semantic content"""
        # Check pattern matching first (exact templates)
        for pattern in self.high_freq_templates:
            if re.search(pattern, query):
                logger.info(f"Query matched high-frequency template: {pattern}")
                return True
                
        # Check semantic similarity to floral domain
        query_vec = self.tfidf.transform([query])
        domain_vec = self.tfidf.transform([" ".join(self.floral_terms)])
        
        # Calculate cosine similarity
        similarity = (query_vec * domain_vec.T).toarray()[0][0]
        
        if similarity > self.threshold:
            logger.info(f"Query semantically matched floral domain with score: {similarity}")
            return True
            
        return False

# 4. Local Model Handler (for high-frequency queries)
class LocalModelHandler:
    def __init__(self, model_path: str = LOCAL_MODEL_PATH):
        self.model_path = model_path
        
        if os.path.exists(model_path):
            logger.info(f"Loading local model from {model_path}")
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            self.model = AutoModelForCausalLM.from_pretrained(
                model_path,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                device_map="auto"
            )
            self.pipeline = pipeline(
                "text-generation",
                model=self.model,
                tokenizer=self.tokenizer,
                max_new_tokens=512,
                temperature=0.7,
                top_p=0.9
            )
        else:
            logger.warning(f"Local model not found at {model_path}. Local processing unavailable.")
            self.pipeline = None
            
    def is_available(self) -> bool:
        """Check if local model is available"""
        return self.pipeline is not None
        
    def generate_response(self, query: str) -> str:
        """Generate response using local model"""
        if not self.is_available():
            return "Local model not available"
            
        prompt_template = f"""作为一个专业的插花艺术顾问，请回答下面关于插花艺术的问题：

问题：{query}

回答："""
        
        result = self.pipeline(prompt_template)[0]["generated_text"]
        # Extract only the generated part (answer)
        answer = result.split("回答：")[-1].strip()
        return answer

# 5. DeepSeek API Handler
from openai import OpenAI


class DeepSeekAPIHandler:
    def __init__(self, api_key: str = DEEPSEEK_API_KEY, api_url: str = DEEPSEEK_API_URL):
        self.api_key = "sk-93nWYhI8SrnXad5m9932CeBdDeDf4233B21d93D217095f22"
        # Update this to include "/v1" in the base URL
        self.base_url = "http://10.2.8.77:3000/v1"

        # Initialize OpenAI client
        self.client = OpenAI(
            api_key=self.api_key,
            base_url=self.base_url
        )


    def generate_response(self, prompt: str, system_prompt: str = None) -> str:
        """Generate response using DeepSeek API with OpenAI client"""
        messages = []

        if system_prompt:
            messages.append({"role": "system", "content": system_prompt})

        messages.append({"role": "user", "content": prompt})

        try:
            completion = self.client.chat.completions.create(
                model="DeepSeek-R1",  # Update the model name to match your example
                messages=messages,
                #temperature=0.7,
                #top_p=0.9,
                #max_tokens=1024
            )
            # Get the full response
            response = completion.choices[0].message.content

            # Clean up thinking/reasoning markers if present
            cleaned_response = re.sub(r'<think>.*?</think>', '', response, flags=re.DOTALL)

            # Additional cleaning if needed
            cleaned_response = cleaned_response.strip()

            return cleaned_response

        except Exception as e:
            logger.error(f"API request failed: {str(e)}")
            return f"API request failed: {str(e)}"

    def generate_qa_pairs(self, topic: str, count: int = 10) -> List[Dict[str, str]]:
        """Generate QA pairs for training local model"""
        system_prompt = """You are a professional floral artist. Create realistic question-answer pairs about floral arrangement in Chinese.
Each pair should include a detailed question and a comprehensive expert answer. Focus on professional floral knowledge."""

        prompt = f"请生成{count}对关于'{topic}'的问答对，每对问答应包含一个详细的问题和一个专业的回答。"

        try:
            response = self.generate_response(prompt, system_prompt)

            # Parse the response into QA pairs
            qa_pairs = []
            pairs = re.split(r'问题\s*\d+:', response)[1:]  # Split by "问题X:"

            for pair in pairs:
                qa = pair.split('回答:')
                if len(qa) == 2:
                    question = qa[0].strip()
                    answer = qa[1].strip()
                    qa_pairs.append({
                        "question": f"问题: {question}",
                        "answer": f"回答: {answer}"
                    })

            return qa_pairs
        except Exception as e:
            logger.error(f"Failed to generate QA pairs: {str(e)}")
            return []

# 6. RAG System
class FloralRAGSystem:
    def __init__(self):
        self.knowledge_processor = FloralKnowledgeProcessor()
        self.vector_db = self.knowledge_processor.load_vector_db()
        if not self.vector_db:
            self.vector_db = self.knowledge_processor.update_knowledge_base()
            
        self.query_classifier = QueryClassifier()
        self.local_model = LocalModelHandler()
        self.api_handler = DeepSeekAPIHandler()
        
    def retrieve_context(self, query: str, top_k: int = TOP_K_RESULTS) -> str:
        """Retrieve relevant contexts from vector store"""
        results = self.vector_db.similarity_search(query, k=top_k)
        context = "\n\n".join([doc.page_content for doc in results])
        return context
        
    def format_rag_prompt(self, query: str, context: str) -> str:
        """Format prompt with retrieved context for DeepSeek API"""
        return f"""请基于以下参考信息回答关于插花艺术的问题。只使用提供的参考信息和你的专业知识回答，不要编造信息。

参考信息:
{context}

问题: {query}

详细回答:"""
        
    def process_query(self, query: str) -> Tuple[str, str]:
        """Process user query and return response with source (local/api)"""
        # Classify query
        is_high_freq = self.query_classifier.is_high_frequency(query)
        
        # For high-frequency queries, use local model if available
        if is_high_freq and self.local_model.is_available():
            response = self.local_model.generate_response(query)
            source = "local"
        else:
            # For long-tail queries or if local model is unavailable, use RAG+API
            context = self.retrieve_context(query)
            rag_prompt = self.format_rag_prompt(query, context)
            
            system_prompt = "你是一位专业的插花艺术顾问，精通各种插花风格、技巧和花材知识。请基于提供的参考信息回答问题。"
            response = self.api_handler.generate_response(rag_prompt, system_prompt)
            source = "rag+api"
            
        return response, source
        
    def update_knowledge_base(self):
        """Update the knowledge base with new documents"""
        self.vector_db = self.knowledge_processor.update_knowledge_base()
        
    def generate_training_data(self, topics: List[str], count_per_topic: int = 10) -> List[Dict[str, str]]:
        """Generate training data for local model fine-tuning"""
        all_qa_pairs = []
        
        for topic in topics:
            logger.info(f"Generating QA pairs for topic: {topic}")
            qa_pairs = self.api_handler.generate_qa_pairs(topic, count_per_topic)
            all_qa_pairs.extend(qa_pairs)
            
        # Save QA pairs to file
        os.makedirs("training_data", exist_ok=True)
        with open("training_data/qa_pairs.json", "w", encoding="utf-8") as f:
            json.dump(all_qa_pairs, f, ensure_ascii=False, indent=2)
            
        logger.info(f"Generated {len(all_qa_pairs)} QA pairs for training")
        return all_qa_pairs

# 7. Main Application
class FloralArtApp:
    def __init__(self):
        logger.info("Initializing Floral Art Application")
        self.rag_system = FloralRAGSystem()
        
    def process_user_query(self, query: str) -> str:
        """Process user query and return response"""
        logger.info(f"Processing query: {query}")
        response, source = self.rag_system.process_query(query)
        logger.info(f"Response generated using {source} approach")
        return response
        
    def setup_knowledge_base(self, file_paths: List[str]):
        """Add new documents to knowledge base"""
        # Copy files to knowledge directory
        os.makedirs(KNOWLEDGE_DIR, exist_ok=True)
        
        for file_path in file_paths:
            if os.path.exists(file_path):
                file_name = os.path.basename(file_path)
                destination = os.path.join(KNOWLEDGE_DIR, file_name)
                
                # Simple file copy (in real app, use shutil.copy)
                with open(file_path, 'rb') as src, open(destination, 'wb') as dst:
                    dst.write(src.read())
                    
                logger.info(f"Added {file_name} to knowledge base")
            else:
                logger.warning(f"File not found: {file_path}")
                
        # Update vector database
        self.rag_system.update_knowledge_base()
        
    def prepare_training_data(self):
        """Prepare training data for local model"""
        topics = [
            "中式插花", "欧式插花", "日式插花", "韩式插花", "现代插花", "田园风插花",
            "常见花材特性", "插花色彩搭配", "插花基本技巧", "花材保养",
            "插花适用场景", "花材寓意", "花器选择", "花艺历史"
        ]
        
        return self.rag_system.generate_training_data(topics, count_per_topic=100)
        
    def train_local_model(self, model_name: str = "THUDM/chatglm3-6b"):
        """Train local model for high-frequency queries (simplified)"""
        # In a real implementation, this would use proper model training pipeline
        # For now, we'll just outline the steps
        
        logger.info(f"Training local model based on {model_name}")
        logger.info("Step 1: Prepare training data")
        qa_pairs = self.prepare_training_data()
        
        logger.info(f"Step 2: Fine-tuning model (would fine-tune {model_name})")
        # This would use proper training code, e.g., with LoRA/QLoRA
        # For now, just log the process
        
        logger.info(f"Local model would be saved to {LOCAL_MODEL_PATH}")
        # In real implementation, save model to LOCAL_MODEL_PATH
        
        return len(qa_pairs)

# Example usage
if __name__ == "__main__":
    # Initialize app
    app = FloralArtApp()
    
    # Setup knowledge base with example files
    # In a real application, you would load the actual files
    example_files = [
        "./knowledge/11.pdf"
    ]
    
    # Comment out for testing without actual files
    # app.setup_knowledge_base(example_files)
    
    # Generate training data and train local model
    # Comment out for testing without actual API key
    # app.train_local_model()
    
    # Example queries
    example_queries = [
        "推荐适合中式插花的花材",
        "欧式插花与日式插花的区别",
        "如何搭配红色和白色的花",
        "设计符合2024巴黎时装周主题的插花",
        "适合商务办公室的插花风格"
    ]
    
    # Process example queries
    for query in example_queries:
        print(f"\nQuery: {query}")
        response = app.process_user_query(query)
        print(f"Response: {response}...")  # Print first 100 chars
