import base64
import enum
import hashlib
import hmac
import json
import logging
import os
import pickle
import re
import time
from json import JSONDecodeError
from typing import Any, cast

from sqlalchemy import func, ForeignKey # Added ForeignKey
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import Mapped, relationship # Added relationship

from flask import current_app

from core.rag.index_processor.constant.built_in_field import BuiltInField, MetadataDataSource
from core.rag.retrieval.retrieval_methods import RetrievalMethod
from services.entities.knowledge_entities.knowledge_entities import ParentMode, Rule

from .account import Account 
from .base import Base
from .engine import db # Use .engine.db as per previous structure
from .model import App, Tag, TagBinding, UploadFile 
from .types import StringUUID


class DatasetPermissionEnum(enum.StrEnum):
    ONLY_ME = "only_me"
    ALL_TEAM = "all_team_members"
    PARTIAL_TEAM = "partial_members"


class Dataset(Base):
    __tablename__ = "datasets"
    __table_args__ = (
        db.PrimaryKeyConstraint("id", name="dataset_pkey"),
        # db.Index("dataset_tenant_idx", "tenant_id"), # Removed tenant_id specific index
        db.Index("retrieval_model_idx", "retrieval_model", postgresql_using="gin"),
    )

    INDEXING_TECHNIQUE_LIST = ["high_quality", "economy", None]
    PROVIDER_LIST = ["vendor", "external", None]

    id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True)
    # tenant_id = db.Column(StringUUID, nullable=False) # Removed tenant_id
    name = db.Column(db.String(255), nullable=False)
    description = db.Column(db.Text, nullable=True)
    provider = db.Column(db.String(255), nullable=False, server_default=db.text("\"vendor\"::character varying"))
    # permission = db.Column(db.String(255), nullable=False, server_default=db.text("\"only_me\"::character varying")) # Removed permission
    data_source_type = db.Column(db.String(255))
    indexing_technique = db.Column(db.String(255), nullable=True)
    index_struct = db.Column(db.Text, nullable=True) 
    # created_by = db.Column(StringUUID, nullable=False) # Removed created_by, can be added if user management is implemented
    created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp())
    # updated_by = db.Column(StringUUID, nullable=True) # Removed updated_by
    updated_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp())
    embedding_model = db.Column(db.String(255), nullable=True)
    embedding_model_provider = db.Column(db.String(255), nullable=True)
    collection_binding_id = db.Column(StringUUID, nullable=True) 
    retrieval_model = db.Column(JSONB, nullable=True) 
    built_in_field_enabled = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))

    # Relationships (if needed, define corresponding backrefs in other models)
    documents = relationship("Document", back_populates="dataset", cascade="all, delete-orphan")
    process_rules = relationship("DatasetProcessRule", back_populates="dataset", cascade="all, delete-orphan")
    keyword_tables = relationship("DatasetKeywordTable", back_populates="dataset", cascade="all, delete-orphan")
    app_dataset_joins = relationship("AppDatasetJoin", back_populates="dataset", cascade="all, delete-orphan")
    metadata_entries = relationship("DatasetMetadata", back_populates="dataset", cascade="all, delete-orphan")

    def to_dict(self):
        return {
            "id": self.id,
            "name": self.name,
            "description": self.description,
            "provider": self.provider,
            "data_source_type": self.data_source_type,
            "indexing_technique": self.indexing_technique,
            "created_at": self.created_at.isoformat() if self.created_at else None,
            "updated_at": self.updated_at.isoformat() if self.updated_at else None,
            "embedding_model": self.embedding_model,
            "embedding_model_provider": self.embedding_model_provider,
            "retrieval_model": self.retrieval_model,
            "document_count": self.document_count,
            "available_document_count": self.available_document_count,
            "available_segment_count": self.available_segment_count,
            "word_count": self.word_count
        }

    @property
    def dataset_keyword_table(self):
        return db.session.query(DatasetKeywordTable).filter(DatasetKeywordTable.dataset_id == self.id).first()

    @property
    def index_struct_dict(self):
        return json.loads(self.index_struct) if self.index_struct else None

    # @property
    # def created_by_account(self): # Removed as created_by is removed
    #     return db.session.get(Account, self.created_by) 

    @property
    def latest_process_rule(self):
        return (
            db.session.query(DatasetProcessRule)
            .filter(DatasetProcessRule.dataset_id == self.id)
            .order_by(DatasetProcessRule.created_at.desc())
            .first()
        )
    
    @property
    def document_count(self):
        return db.session.query(func.count(Document.id)).filter(Document.dataset_id == self.id).scalar()

    @property
    def available_document_count(self):
        return (
            db.session.query(func.count(Document.id))
            .filter(
                Document.dataset_id == self.id,
                Document.indexing_status == "completed",
                Document.enabled == True,
                Document.archived == False,
            )
            .scalar()
        )

    @property
    def available_segment_count(self):
        return (
            db.session.query(func.count(DocumentSegment.id))
            .filter(
                DocumentSegment.dataset_id == self.id,
                DocumentSegment.status == "completed",
                DocumentSegment.enabled == True,
            )
            .scalar()
        )

    @property
    def word_count(self):
        # Ensure Document.word_count is a numeric type in the Document model
        # Querying sum directly on Document.word_count
        return db.session.query(func.sum(Document.word_count)).filter(Document.dataset_id == self.id).scalar() or 0

    @property
    def doc_form(self):
        document = db.session.query(Document.doc_form).filter(Document.dataset_id == self.id).first()
        return document.doc_form if document else None

    @property
    def retrieval_model_dict(self):
        default_retrieval_model = {
            "search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
            "reranking_enable": False,
            "reranking_model": {"reranking_provider_name": "", "reranking_model_name": ""},
            "top_k": 2,
            "score_threshold_enabled": False,
        }
        # Ensure self.retrieval_model is a dict or valid JSON string
        if isinstance(self.retrieval_model, dict):
            return {**default_retrieval_model, **self.retrieval_model}
        try:
            return {**default_retrieval_model, **(json.loads(self.retrieval_model) if self.retrieval_model else {})}
        except (json.JSONDecodeError, TypeError):
            return default_retrieval_model

    @property
    def tags(self):
        return (
            db.session.query(Tag)
            .join(TagBinding, Tag.id == TagBinding.tag_id)
            .filter(
                TagBinding.target_id == self.id,
                Tag.type == "knowledge",
            )
            .all()
        ) or []

    @property
    def doc_metadata(self):
        dataset_metadatas = db.session.query(DatasetMetadata).filter(DatasetMetadata.dataset_id == self.id).all()
        doc_metadata_list = [
            {"id": dm.id, "name": dm.name, "type": dm.type}
            for dm in dataset_metadatas
        ]
        if self.built_in_field_enabled: 
            built_in_fields_to_add = [
                {"id": "built-in-doc-name", "name": BuiltInField.document_name.value, "type": "string"},
                {"id": "built-in-uploader", "name": BuiltInField.uploader.value, "type": "string"},
                {"id": "built-in-upload-date", "name": BuiltInField.upload_date.value, "type": "time"},
                {"id": "built-in-last-update", "name": BuiltInField.last_update_date.value, "type": "time"},
                {"id": "built-in-source", "name": BuiltInField.source.value, "type": "string"},
            ]
            doc_metadata_list.extend(built_in_fields_to_add)
        return doc_metadata_list

    @staticmethod
    def gen_collection_name_by_id(dataset_id: str) -> str:
        normalized_dataset_id = dataset_id.replace("-", "_")
        return f"Vector_index_{normalized_dataset_id}_Node"


class DatasetProcessRule(Base):
    __tablename__ = "dataset_process_rules"
    __table_args__ = (
        db.PrimaryKeyConstraint("id", name="dataset_process_rule_pkey"), 
        db.Index("dataset_process_rule_dataset_id_idx", "dataset_id"),
    )
    id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True)
    dataset_id = db.Column(StringUUID, ForeignKey("datasets.id"), nullable=False)
    mode = db.Column(db.String(255), nullable=False, server_default=db.text("\"automatic\"::character varying"))
    rules = db.Column(db.Text, nullable=True) 
    # created_by = db.Column(StringUUID, nullable=False) # Removed
    created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp())

    dataset = relationship("Dataset", back_populates="process_rules")

    MODES = ["automatic", "custom", "hierarchical"]
    AUTOMATIC_RULES: dict[str, Any] = {
        "pre_processing_rules": [
            {"id": "remove_extra_spaces", "enabled": True},
            {"id": "remove_urls_emails", "enabled": False},
        ],
        "segmentation": {"delimiter": "\n", "max_tokens": 500, "chunk_overlap": 50},
    }
    @property
    def rules_dict(self):
        try:
            return json.loads(self.rules) if self.rules else None
        except JSONDecodeError:
            return None

    def to_dict(self):
        return {
            "id": self.id,
            "dataset_id": self.dataset_id,
            "mode": self.mode,
            "rules": self.rules_dict,
            "created_at": self.created_at.isoformat() if self.created_at else None
        }

class Document(Base):
    __tablename__ = "documents"
    __table_args__ = (
        db.PrimaryKeyConstraint("id", name="document_pkey"),
        db.Index("document_dataset_id_idx", "dataset_id"),
        db.Index("document_is_paused_idx", "is_paused"),
        db.Index("document_metadata_idx", "doc_metadata", postgresql_using="gin"),
    )

    id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True)
    dataset_id = db.Column(StringUUID, ForeignKey("datasets.id"), nullable=False)
    position = db.Column(db.Integer, nullable=False)
    data_source_type = db.Column(db.String(255), nullable=False) 
    data_source_info = db.Column(db.Text, nullable=True) 
    dataset_process_rule_id = db.Column(StringUUID, ForeignKey("dataset_process_rules.id"), nullable=True)
    batch = db.Column(db.String(255), nullable=False) 
    name = db.Column(db.String(255), nullable=False) 
    created_from = db.Column(db.String(255), nullable=False) 
    # created_by = db.Column(StringUUID, nullable=False) # Removed
    # created_api_request_id = db.Column(StringUUID, nullable=True) # Removed
    created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp())

    processing_started_at = db.Column(db.DateTime, nullable=True)
    file_id = db.Column(db.Text, nullable=True) 
    word_count = db.Column(db.Integer, nullable=True)
    parsing_completed_at = db.Column(db.DateTime, nullable=True)
    cleaning_completed_at = db.Column(db.DateTime, nullable=True)
    splitting_completed_at = db.Column(db.DateTime, nullable=True)
    tokens = db.Column(db.Integer, nullable=True)
    indexing_latency = db.Column(db.Float, nullable=True)
    completed_at = db.Column(db.DateTime, nullable=True)
    is_paused = db.Column(db.Boolean, nullable=True, server_default=db.text("false"))
    # paused_by = db.Column(StringUUID, nullable=True) # Removed
    paused_at = db.Column(db.DateTime, nullable=True)
    error = db.Column(db.Text, nullable=True)
    stopped_at = db.Column(db.DateTime, nullable=True)
    indexing_status = db.Column(db.String(255), nullable=False, server_default=db.text("\"waiting\"::character varying"))
    enabled = db.Column(db.Boolean, nullable=False, server_default=db.text("true"))
    disabled_at = db.Column(db.DateTime, nullable=True)
    # disabled_by = db.Column(StringUUID, nullable=True) # Removed
    archived = db.Column(db.Boolean, nullable=False, server_default=db.text("false"))
    archived_reason = db.Column(db.String(255), nullable=True)
    # archived_by = db.Column(StringUUID, nullable=True) # Removed
    archived_at = db.Column(db.DateTime, nullable=True)
    updated_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp())
    doc_type = db.Column(db.String(40), nullable=True) 
    doc_metadata = db.Column(JSONB, nullable=True) 
    doc_form = db.Column(db.String(255), nullable=False, server_default=db.text("\"text_model\"::character varying")) 
    doc_language = db.Column(db.String(255), nullable=True)

    dataset = relationship("Dataset", back_populates="documents")
    segments = relationship("DocumentSegment", back_populates="document", cascade="all, delete-orphan")

    DATA_SOURCES = ["upload_file", "notion_import", "website_crawl"] 

    def to_dict(self):
        return {
            "id": self.id,
            "dataset_id": self.dataset_id,
            "name": self.name,
            "data_source_type": self.data_source_type,
            "indexing_status": self.indexing_status,
            "created_at": self.created_at.isoformat() if self.created_at else None,
            "word_count": self.word_count,
            "tokens": self.tokens,
            "enabled": self.enabled,
            "archived": self.archived,
            "display_status": self.display_status
        }

    @property
    def display_status(self):
        if self.indexing_status == "waiting": return "queuing"
        if self.is_paused and self.indexing_status not in ["completed", "error"] : return "paused"
        if self.indexing_status in ["parsing", "cleaning", "splitting", "indexing"]: return "indexing"
        if self.indexing_status == "error": return "error"
        if self.indexing_status == "completed":
            if self.archived: return "archived"
            if not self.enabled: return "disabled"
            return "available"
        return "unknown" 

    @property
    def data_source_info_dict(self):
        return json.loads(self.data_source_info) if self.data_source_info else None

    @property
    def data_source_detail_dict(self):
        if self.data_source_info and self.data_source_type == "upload_file":
            info_dict = self.data_source_info_dict
            if info_dict and "upload_file_id" in info_dict:
                file_detail = db.session.get(UploadFile, info_dict["upload_file_id"])
                if file_detail:
                    return {
                        "upload_file": {
                            "id": file_detail.id,
                            "name": file_detail.name,
                            "size": file_detail.size,
                            "extension": file_detail.extension,
                            "mime_type": file_detail.mime_type,
                            "created_at": file_detail.created_at.isoformat() if file_detail.created_at else None,
                        }
                    }
        return {}

class DocumentSegment(Base):
    __tablename__ = "document_segments"
    __table_args__ = (
        db.PrimaryKeyConstraint("id", name="document_segment_pkey"),
        db.Index("document_segment_dataset_id_idx", "dataset_id"),
        db.Index("document_segment_document_id_idx", "document_id"),
        db.Index("document_segment_status_idx", "status"),
        db.Index("document_segment_index_node_id_idx", "index_node_id"),
        db.Index("document_segment_keywords_idx", "keywords", postgresql_using="gin"),
        db.Index("document_segment_hit_count_idx", "hit_count"),
    )

    id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True)
    dataset_id = db.Column(StringUUID, ForeignKey("datasets.id"), nullable=False)
    document_id = db.Column(StringUUID, ForeignKey("documents.id"), nullable=False)
    doc_id = db.Column(StringUUID, nullable=True) 
    position = db.Column(db.Integer, nullable=False) 
    content = db.Column(db.Text, nullable=False)
    answer = db.Column(db.Text, nullable=True) 
    word_count = db.Column(db.Integer, nullable=False)
    tokens = db.Column(db.Integer, nullable=False)
    keywords = db.Column(JSONB, nullable=True)
    index_node_id = db.Column(db.String(255), nullable=True) 
    index_node_hash = db.Column(db.String(255), nullable=True) 
    hit_count = db.Column(db.Integer, nullable=False, server_default=db.text("0"))
    enabled = db.Column(db.Boolean, nullable=False, server_default=db.text("true"))
    disabled_at = db.Column(db.DateTime, nullable=True)
    # disabled_by = db.Column(StringUUID, nullable=True) # Removed
    status = db.Column(db.String(255), nullable=False, server_default=db.text("\"waiting\"::character varying"))
    # created_by = db.Column(StringUUID, nullable=False) # Removed
    created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp())
    # updated_by = db.Column(StringUUID, nullable=True) # Removed
    updated_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp(), onupdate=func.current_timestamp())
    indexed_at = db.Column(db.DateTime, nullable=True)

    document = relationship("Document", back_populates="segments")
    child_chunks = relationship("ChildChunk", back_populates="parent_segment", cascade="all, delete-orphan")

    def to_dict(self):
        return {
            "id": self.id,
            "document_id": self.document_id,
            "dataset_id": self.dataset_id,
            "position": self.position,
            "content": self.content,
            "answer": self.answer,
            "word_count": self.word_count,
            "tokens": self.tokens,
            "keywords": self.keywords,
            "index_node_id": self.index_node_id,
            "enabled": self.enabled,
            "status": self.status,
            "hit_count": self.hit_count,
            "created_at": self.created_at.isoformat() if self.created_at else None,
            "updated_at": self.updated_at.isoformat() if self.updated_at else None,
        }

class Embedding(Base):
    __tablename__ = "embeddings"
    __table_args__ = (
        db.PrimaryKeyConstraint("id", name="embedding_pkey"),
        db.Index("embedding_hash_idx", "hash"),
        db.Index("embedding_model_idx", "model_name"),
        db.Index("embedding_provider_idx", "provider_name"),
    )
    id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True)
    model_name = db.Column(db.String(255), nullable=False)
    hash = db.Column(db.String(255), nullable=False)  
    embedding = db.Column(db.Text, nullable=False) 
    provider_name = db.Column(db.String(255), nullable=False)
    created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp())

    def set_embedding(self, embedding_list: list[float]):
        self.embedding = json.dumps(embedding_list)

    def get_embedding(self) -> list[float]:
        return json.loads(self.embedding)

class DatasetKeywordTable(Base):
    __tablename__ = "dataset_keyword_tables"
    id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True)
    dataset_id = db.Column(StringUUID, ForeignKey("datasets.id"), nullable=False, index=True)
    keyword_table = db.Column(JSONB, nullable=False)

    dataset = relationship("Dataset", back_populates="keyword_tables")

class AppDatasetJoin(Base):
    __tablename__ = "app_dataset_joins"
    id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True)
    app_id = db.Column(StringUUID, ForeignKey("apps.id"), nullable=False, index=True) 
    dataset_id = db.Column(StringUUID, ForeignKey("datasets.id"), nullable=False, index=True)
    created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp())

    app = relationship("App") # Add backref in App model if needed
    dataset = relationship("Dataset", back_populates="app_dataset_joins")

class DatasetMetadata(Base):
    __tablename__ = "dataset_metadata"
    id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True)
    dataset_id = db.Column(StringUUID, ForeignKey("datasets.id"), nullable=False, index=True)
    name = db.Column(db.String(255), nullable=False)
    type = db.Column(db.String(255), nullable=False) 
    created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp())

    dataset = relationship("Dataset", back_populates="metadata_entries")

class ChildChunk(Base):
    __tablename__ = "child_chunks"
    id = db.Column(StringUUID, server_default=db.text("uuid_generate_v4()"), primary_key=True)
    segment_id = db.Column(StringUUID, ForeignKey("document_segments.id"), nullable=False, index=True)
    index_node_id = db.Column(db.String(255), nullable=False, index=True) 
    content = db.Column(db.Text, nullable=False)
    position = db.Column(db.Integer, nullable=False) 
    created_at = db.Column(db.DateTime, nullable=False, server_default=func.current_timestamp())

    parent_segment = relationship("DocumentSegment", back_populates="child_chunks")

