import copy
import datetime
import json
import logging
import random
import time
import uuid
from collections import Counter
from typing import Any, Optional

from sqlalchemy import func, select
from sqlalchemy.orm import Session
from werkzeug.exceptions import NotFound

from core.errors.error import LLMBadRequestError, ProviderTokenNotInitError
from core.model_manager import ModelManager 
from core.model_runtime.entities.model_entities import ModelType
# from core.plugin.entities.plugin import ModelProviderID # Not directly used in this simplified version
# from core.rag.index_processor.constant.built_in_field import BuiltInField # Not directly used
# from core.rag.index_processor.constant.index_type import IndexType # Not directly used
from core.rag.retrieval.retrieval_methods import RetrievalMethod
from extensions import db
from libs import helper 
from models.dataset import (
    Dataset,
    DatasetProcessRule,
    Document,
    DocumentSegment,
)
# from services.entities.knowledge_entities.knowledge_entities import RetrievalModel # This was commented out, assuming retrieval_model is a dict
from services.errors.dataset import DatasetNameDuplicateError

# Imports for perform_rag_query
from core.rag.datasource.retrieval_service import RetrievalService
from core.rag.models.document import Document as RAGDocument # Alias to avoid name collision


class DocumentService:
    DEFAULT_RULES = {
        "mode": "automatic",
        "rules": DatasetProcessRule.AUTOMATIC_RULES
    }

class DatasetService:
    @staticmethod
    def get_datasets(page, per_page, search=None, tag_ids=None):
        query = select(Dataset).order_by(Dataset.created_at.desc())

        if search:
            query = query.filter(Dataset.name.ilike(f"%{search}%"))

        datasets = db.paginate(select=query, page=page, per_page=per_page, max_per_page=100, error_out=False)

        return datasets.items, datasets.total

    @staticmethod
    def get_process_rules(dataset_id):
        dataset_process_rule = (
            db.session.query(DatasetProcessRule)
            .filter(DatasetProcessRule.dataset_id == dataset_id)
            .order_by(DatasetProcessRule.created_at.desc())
            .limit(1)
            .one_or_none()
        )
        if dataset_process_rule:
            mode = dataset_process_rule.mode
            rules = dataset_process_rule.rules_dict
        else:
            mode = DocumentService.DEFAULT_RULES["mode"]
            rules = DocumentService.DEFAULT_RULES["rules"]
        return {"mode": mode, "rules": rules}

    @staticmethod
    def get_datasets_by_ids(ids):
        stmt = select(Dataset).filter(Dataset.id.in_(ids))
        datasets = db.paginate(select=stmt, page=1, per_page=len(ids), max_per_page=len(ids), error_out=False)
        return datasets.items, datasets.total

    @staticmethod
    def create_empty_dataset(
        name: str,
        description: Optional[str],
        indexing_technique: Optional[str],
        provider: str = "vendor",
        embedding_model_provider: Optional[str] = None,
        embedding_model_name: Optional[str] = None,
        retrieval_model: Optional[dict] = None, # Changed from RetrievalModel to dict
    ):
        if db.session.query(Dataset).filter_by(name=name).first():
            raise DatasetNameDuplicateError(f"Dataset with name {name} already exists.")
        
        embedding_model_instance = None
        effective_tenant_id = "standalone_tenant"

        if indexing_technique == "high_quality":
            model_manager = ModelManager()
            if embedding_model_provider and embedding_model_name:
                DatasetService.check_embedding_model_setting(effective_tenant_id, embedding_model_provider, embedding_model_name)
                embedding_model_instance = model_manager.get_model_instance(
                    tenant_id=effective_tenant_id,
                    provider=embedding_model_provider,
                    model_type=ModelType.TEXT_EMBEDDING,
                    model=embedding_model_name,
                )
            else:
                try:
                    embedding_model_instance = model_manager.get_default_model_instance(
                        tenant_id=effective_tenant_id, model_type=ModelType.TEXT_EMBEDDING
                    )
                except Exception as e:
                    logging.warning(f"Could not get default embedding model: {e}. Embedding will be disabled for this dataset.")
                    indexing_technique = None 
            
            # Simplified: Removed reranking model check during dataset creation for now
            # if retrieval_model and retrieval_model.get("reranking_model"):
            #     rerank_cfg = retrieval_model["reranking_model"]
            #     if rerank_cfg.get("reranking_provider_name") and rerank_cfg.get("reranking_model_name"):
            #         DatasetService.check_embedding_model_setting(
            #             effective_tenant_id,
            #             rerank_cfg["reranking_provider_name"],
            #             rerank_cfg["reranking_model_name"],
            #         )

        dataset = Dataset(
            name=name, 
            description=description,
            indexing_technique=indexing_technique,
            provider=provider,
            embedding_model_provider=embedding_model_instance.provider if embedding_model_instance else None,
            embedding_model=embedding_model_instance.model if embedding_model_instance else None,
            retrieval_model=retrieval_model # Stored as JSON/dict
        )
        
        db.session.add(dataset)
        db.session.commit()
        return dataset

    @staticmethod
    def get_dataset(dataset_id) -> Optional[Dataset]:
        dataset: Optional[Dataset] = db.session.query(Dataset).filter_by(id=dataset_id).first()
        return dataset

    @staticmethod
    def check_embedding_model_setting(tenant_id: str, embedding_model_provider: str, embedding_model: str):
        try:
            model_manager = ModelManager()
            model_manager.get_model_instance(
                tenant_id=tenant_id, 
                provider=embedding_model_provider,
                model_type=ModelType.TEXT_EMBEDDING,
                model=embedding_model,
            )
        except LLMBadRequestError:
            raise ValueError(
                "Embedding Model not available or misconfigured. Please check settings."
            )
        except ProviderTokenNotInitError as ex:
            raise ValueError(f"Embedding model provider error: {ex.description}")
        except Exception as e:
            logging.error(f"Unknown error checking embedding model: {e}")
            raise ValueError("Failed to validate embedding model due to an unexpected error.")

    @staticmethod
    def delete_dataset(dataset_id: str):
        dataset = DatasetService.get_dataset(dataset_id)
        if not dataset:
            raise NotFound("Dataset not found")
        try:
            Document.query.filter_by(dataset_id=dataset_id).delete()
            DocumentSegment.query.filter_by(dataset_id=dataset_id).delete()
            DatasetProcessRule.query.filter_by(dataset_id=dataset_id).delete()
            db.session.delete(dataset)
            db.session.commit()
            # TODO: Placeholder for actual vector index deletion via a VectorService or similar.
            # e.g., VectorService.delete_dataset_collection(dataset)
            logging.info(f"Dataset {dataset_id} and its related data deleted successfully.")
            return True
        except Exception as e:
            db.session.rollback()
            logging.error(f"Error deleting dataset {dataset_id}: {e}")
            raise Exception(f"Failed to delete dataset: {e}")

    @staticmethod
    def perform_rag_query(dataset_id: str, query_text: str, top_k: int = 2):
        dataset = DatasetService.get_dataset(dataset_id)
        if not dataset:
            raise NotFound("Dataset not found")

        # This RAG query assumes documents have been indexed and Dataset.get_vector_index() is functional.
        # Full functionality depends on ModelManager, vector store setup, and prior document indexing.
        # The Dataset model (dataset_refactored.py) would need get_vector_index() method similar to Dify's original,
        # which in turn relies on ModelManager for embedding functions and index_struct for vector store config.

        if dataset.indexing_technique != "high_quality" or not dataset.embedding_model:
            # Only high_quality datasets with embedding models support semantic/hybrid search via RetrievalService typically.
            # Economy datasets might use full-text search, which would need a different path or check.
            # For now, we raise an error if not configured for embedding-based search.
            raise ValueError("Dataset is not configured for high-quality (embedding-based) search or embedding model is missing.")

        dataset_retrieval_config = dataset.retrieval_model if isinstance(dataset.retrieval_model, dict) else json.loads(dataset.retrieval_model or "{}")

        search_method = dataset_retrieval_config.get("search_method", RetrievalMethod.SEMANTIC_SEARCH.value)
        score_threshold_enabled = dataset_retrieval_config.get("score_threshold_enabled", False)
        score_threshold = dataset_retrieval_config.get("score_threshold") if score_threshold_enabled else None
        
        # Reranking is simplified to be off for now. Implementing it would require RerankingModelEntity and more config.
        reranking_model_api_config = None 

        retrieval_service = RetrievalService() 

        try:
            # The call to search_by_dataset_model relies on dataset.get_vector_index() being implemented and working.
            # This is a critical dependency that needs to be addressed in models/dataset_refactored.py and related configurations.
            rag_documents = retrieval_service.search_by_dataset_model(
                dataset=dataset, # This is the SQLAlchemy model instance
                query=query_text,
                search_method=search_method,
                top_k=top_k, # API provided top_k overrides dataset default for this query
                score_threshold=score_threshold,
                reranking_model=None # Simplified: no reranking for now
            )
        except NotImplementedError as nie:
            # This might be raised if get_vector_index or similar is not implemented
            logging.error(f"RAG query failed for dataset {dataset_id} due to missing implementation: {nie}")
            raise ValueError(f"RAG feature not fully configured or implemented for this dataset: {nie}")
        except Exception as e:
            logging.error(f"Error during RAG query for dataset {dataset_id}: {e}")
            if "ProviderTokenNotInitError" in str(e) or "LLMBadRequestError" in str(e) or "AuthenticationError" in str(e):
                 raise ValueError(f"Failed to perform RAG query due to model provider/configuration issue: {e}")
            raise Exception(f"RAG query failed: {e}")

        results = []
        for doc in rag_documents: # doc is core.rag.models.document.Document
            results.append({
                "content": doc.page_content,
                "score": doc.metadata.get("score"),
                "source": { 
                    "dataset_id": doc.metadata.get("dataset_id"),
                    "document_id": doc.metadata.get("document_id"),
                    "segment_id": doc.metadata.get("segment_id"),
                    # Add other relevant metadata if needed
                    "document_name": doc.metadata.get("document_name"), 
                    "data_source_type": doc.metadata.get("data_source_type")
                }
            })
        
        return results


