
import os
import sqlite3
import hashlib
import json
import sys
import faiss
import numpy as np
from openai import OpenAI
from dotenv import load_dotenv

# --- CONFIGURATION ---
load_dotenv()

KNOWLEDGE_BASE_DIR = "knowledge_base"
DATA_DIR = "data"

# --- Dify API Models ---
client = OpenAI(
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
)

# --- UTILITY FUNCTIONS ---
def get_db_connection(knowledge_id):
    """Establishes a connection to the SQLite database for a given knowledge base."""
    db_path = os.path.join(DATA_DIR, f"{knowledge_id}_meta.db")
    conn = sqlite3.connect(db_path)
    conn.row_factory = sqlite3.Row
    return conn

def setup_database(knowledge_id):
    """Creates the necessary tables in the metadata database if they don't exist."""
    with get_db_connection(knowledge_id) as conn:
        cursor = conn.cursor()
        # Create a table to store file hashes and modification times
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS file_index (
                file_path TEXT PRIMARY KEY,
                content_hash TEXT NOT NULL
            )
        ''')
        # Create a table to store text chunks and their metadata
        cursor.execute('''
            CREATE TABLE IF NOT EXISTS chunks (
                chunk_id INTEGER PRIMARY KEY AUTOINCREMENT,
                file_path TEXT NOT NULL,
                chunk_text TEXT NOT NULL,
                metadata_json TEXT,
                FOREIGN KEY (file_path) REFERENCES file_index(file_path)
            )
        ''')
        conn.commit()

def get_file_hash(file_path):
    """Computes the SHA256 hash of a file's content."""
    hasher = hashlib.sha256()
    with open(file_path, 'rb') as f:
        buf = f.read()
        hasher.update(buf)
    return hasher.hexdigest()

def extract_metadata_from_path(file_path, rule, base_path):
    """Extracts metadata from the file path based on a rule."""
    relative_path = os.path.relpath(file_path, base_path)
    parts = relative_path.split(os.sep)
    index = rule.get("index", 0)
    if index < len(parts) -1: # -1 to exclude filename
        return parts[index]
    return None

def extract_metadata(file_path, config, base_path):
    """Extracts metadata for a file based on the provided configuration."""
    metadata = {}
    rules = config.get("metadata_rules", {})

    # Default rule: use filename as topic
    if not rules:
        metadata['topic'] = os.path.splitext(os.path.basename(file_path))[0]
        return metadata

    for key, rule in rules.items():
        if rule["source"] == "path_part":
            metadata[key] = extract_metadata_from_path(file_path, rule, base_path)
        elif rule["source"] == "frontmatter":
            # Placeholder for frontmatter parsing logic
            # For a real implementation, you'd use a library like `frontmatter`
            # For now, we'll simulate it by extracting the title.
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
                if content.startswith("---"):
                    try:
                        end_fm = content.find("---", 3)
                        fm_text = content[3:end_fm]
                        for line in fm_text.splitlines():
                            if line.startswith(rule["key"] + ":"):
                                metadata[key] = line.split(":", 1)[1].strip()
                                break
                    except Exception:
                        pass # Ignore if frontmatter is malformed
    return metadata

def build_index_for_kb(knowledge_id):
    """Builds or updates the vector index for a single knowledge base."""
    print(f"--- Processing knowledge base: {knowledge_id} ---")
    kb_path = os.path.join(KNOWLEDGE_BASE_DIR, knowledge_id)
    if not os.path.isdir(kb_path):
        print(f"Error: Directory not found for knowledge_id '{knowledge_id}'")
        return

    # Ensure data directory for this KB exists
    os.makedirs(os.path.join(DATA_DIR), exist_ok=True)

    # Setup database
    setup_database(knowledge_id)

    # Load knowledge base-specific config, if it exists
    config = {}
    config_path = os.path.join(kb_path, "kb_config.json")
    if os.path.exists(config_path):
        with open(config_path, 'r', encoding='utf-8') as f:
            config = json.load(f)
        print("Loaded custom configuration.")
    else:
        print("No custom configuration found. Using default rules.")

    conn = get_db_connection(knowledge_id)
    cursor = conn.cursor()

    # --- Main Indexing Logic ---
    all_chunks_to_process = []
    all_files_to_update_in_db = set()

    for root, _, files in os.walk(kb_path):
        for file in files:
            if not file.endswith(".md") and not file.endswith(".txt"):
                continue

            file_path = os.path.join(root, file)
            file_hash = get_file_hash(file_path)

            cursor.execute("SELECT content_hash FROM file_index WHERE file_path = ?", (file_path,))
            result = cursor.fetchone()

            if result and result['content_hash'] == file_hash:
                # print(f"Skipping unchanged file: {file_path}")
                continue

            print(f"Found updated file: {file_path}")
            all_files_to_update_in_db.add(file_path)

            # 1. Extract Metadata
            metadata = extract_metadata(file_path, config, kb_path)

            # 2. Read and Chunk Content (Hybrid Strategy)
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()

            chunks = []
            # Use a safe character count as a proxy for token limit.
            # Average token length is ~4 chars, so 6000 chars is a safe bet for < 8192 tokens.
            TOKEN_LIMIT_PROXY_CHARS = 6000

            if len(content) > TOKEN_LIMIT_PROXY_CHARS:
                print(f"  -> File is long, splitting by paragraphs.")
                paragraphs = content.split('\n\n')
                current_chunk = ""
                for p in paragraphs:
                    if len(current_chunk) + len(p) + 2 > TOKEN_LIMIT_PROXY_CHARS:
                        if current_chunk:
                            chunks.append(current_chunk)
                        current_chunk = p
                    else:
                        if current_chunk: # Add separator if not the first paragraph
                            current_chunk += "\n\n" + p
                        else:
                            current_chunk = p
                if current_chunk: # Add the last remaining chunk
                    chunks.append(current_chunk)
            else:
                # File is short enough, treat as a single chunk
                if content.strip():
                    chunks.append(content)

            # 3. Add chunk(s) to the list for batch processing
            for chunk in chunks:
                if not chunk.strip():
                    continue

                # --- Final enhancement: Boost topic keywords ---
                topic_str = metadata.get('topic', file)
                # Clean up topic from 'category / topic' format
                if '/' in topic_str:
                    topic_str = topic_str.split('/')[-1].strip()

                boosted_text = f"{topic_str} {topic_str} {topic_str}. "

                metadata_str = ", ".join([f"{k}: {v}" for k, v in metadata.items() if v])
                text_to_embed = f"{boosted_text}分类: {metadata.get('category', 'general')}, 主题: {metadata.get('topic', file)}. 内容: {chunk}"

                all_chunks_to_process.append({
                    "text": text_to_embed,
                    "original_content": chunk,
                    "file_path": file_path,
                    "metadata_json": json.dumps(metadata)
                })

    if not all_chunks_to_process:
        print("No new or updated files to process. Index is up to date.")
        conn.close()
        return

    print(f"Found {len(all_chunks_to_process)} total chunks to process for vectorization.")

    # --- Vectorization and FAISS Indexing ---
    try:
        all_vectors = []
        batch_size = 10  # Set batch size according to API documentation
        num_batches = (len(all_chunks_to_process) + batch_size - 1) // batch_size

        for i in range(num_batches):
            start_index = i * batch_size
            end_index = start_index + batch_size
            batch_chunks = all_chunks_to_process[start_index:end_index]
            batch_texts = [chunk['text'] for chunk in batch_chunks]

            print(f"Processing batch {i + 1}/{num_batches}...")
            # Batch embed all chunks
            response = client.embeddings.create(
                model="text-embedding-v4",
                input=batch_texts
            )
            batch_vectors = [item.embedding for item in response.data]
            all_vectors.extend(batch_vectors)

        dimension = len(all_vectors[0])
        print(f"Successfully generated {len(all_vectors)} vectors of dimension {dimension}.")

        # Create and populate FAISS index
        index = faiss.IndexFlatL2(dimension)
        index.add(np.array(all_vectors, dtype='float32'))
        index_path = os.path.join(DATA_DIR, f"{knowledge_id}_index.faiss")
        faiss.write_index(index, index_path)
        print(f"FAISS index saved to {index_path}")

        # Update the database
        # First, clear old chunks for all files that are being updated
        # Using a tuple for the IN clause
        if all_files_to_update_in_db:
            placeholders = ', '.join('?' for _ in all_files_to_update_in_db)
            cursor.execute(f"DELETE FROM chunks WHERE file_path IN ({placeholders})", tuple(all_files_to_update_in_db))

        # Insert new chunks and get their IDs
        chunk_ids = []
        for i, chunk_info in enumerate(all_chunks_to_process):
            cursor.execute(
                "INSERT INTO chunks (file_path, chunk_text, metadata_json) VALUES (?, ?, ?)",
                (chunk_info['file_path'], chunk_info['original_content'], chunk_info['metadata_json'])
            )
            # Note: This assumes the order of vectors in FAISS matches the insertion order.
            # A more robust system might map FAISS index to chunk_id explicitly.

        # Update file hashes
        for file_path in all_files_to_update_in_db:
            new_hash = get_file_hash(file_path)
            cursor.execute(
                "INSERT OR REPLACE INTO file_index (file_path, content_hash) VALUES (?, ?)",
                (file_path, new_hash)
            )

        conn.commit()
        print("Database updated successfully.")

    except Exception as e:
        print(f"An error occurred during vectorization or DB update: {e}")
    finally:
        conn.close()

if __name__ == "__main__":
    if len(sys.argv) > 1:
        if sys.argv[1] == "--all":
            print("Processing all knowledge bases...")
            for kb_id in os.listdir(KNOWLEDGE_BASE_DIR):
                if os.path.isdir(os.path.join(KNOWLEDGE_BASE_DIR, kb_id)):
                    build_index_for_kb(kb_id)
        else:
            build_index_for_kb(sys.argv[1])
    else:
        print("Usage: python generate_index.py <knowledge_id> | --all")
