# -*- coding: utf-8 -*-
# @Author: Cursor
# @Date: 2025-01-14
# @Last Modified by: Tim Liu
# @Last Modified time: 2025-01-14
# @Description: Schema Manager for managing predefined domain schemas and ontology related schemas

import json
from typing import Optional, Dict

from langchain.prompts import ChatPromptTemplate
from langchain.output_parsers import PydanticOutputParser

from langchain_core.language_models import BaseLanguageModel
from langchain_core.vectorstores.base import VectorStore
from langchain_core.embeddings import Embeddings
from langchain_core.documents import Document

#from crewplus.apps.rag.models.oschema import OSchemaDB
from crewplus.apps.rag.cruds.ontology import OntologyDal

from sqlalchemy.ext.asyncio import AsyncSession
from crewplus.services.vdb_service import VDBService

from .schema_classifier import SchemaClassifier
from .schema_hub import HAZARD_SUBSTANCE, CREWPLUS_ITR

import logging

class SchemaManager:
    """Schema Manager for managing predefined domain schemas and ontology related schemas."""
    
    CONFIDENCE_THRESHOLD = 0.6
    
    DEFAULT_SCHEMA = None
    
    HAZARD_SUBSTANCE = HAZARD_SUBSTANCE
    
    TRAINING_MATERIAL_SCHEMA = """
    {
        "node_types": {
            "Training Material": {
                "properties": {
                    "title": "STRING",
                    "type": "STRING",
                    "author": "STRING",
                    "date_created": "STRING",
                    "training_objective": "STRING",
                    "content": "STRING",
                    "source": "STRING",
                    "page": "STRING"                             
                },
                "indexes": {
                    "training_material_vidx": {
                        "type": "VECTOR",
                        "vprop": "node_embedding"
                    }
                }
            }
        },
        "relationships": {}
    }
    """    

    def __init__(self, db: AsyncSession, ontology_name: Optional[str] = None):
        self.db = db
        self.ontology_name = ontology_name
        self.available_schemas: Dict[str, str] = {
            "hazard": self.HAZARD_SUBSTANCE,
            "training_material": self.TRAINING_MATERIAL_SCHEMA,
            "crewplus_itr": CREWPLUS_ITR
        }

        if ontology_name:
            self._load_ontology_schemas()

    async def _load_ontology_schemas(self):
        """Load ontology and related schemas from the database."""
        ontology = await OntologyDal(self.db).get_data(name=self.ontology_name)
        if ontology:
            for schema in ontology.schemas:
                self.available_schemas[schema.name] = schema.schema_content

    async def classify_w_llm(self, content: str, llm: BaseLanguageModel) -> str:
        """Classify content to a certain schema in available_schemas using LLM, with Pydantic output."""

        parser = PydanticOutputParser(pydantic_object=SchemaClassifier)

        try:
            prompt = ChatPromptTemplate.from_messages(
                [
                    (
                        "system",
                        """Classify the following content to one of the available schemas. Output a classification, which contains the 'schema_name' and optionally a 'confidence' score (from 0.0 to 1.0). Do NOT wrap the output in `json` tags.
                        Available Schemas:
                        ```json
                        {schemas_json}
                        ```
                        {format_instructions}""",
                    ),
                    ("human", "{content}"),
                ]
            ).partial(schemas_json=json.dumps(self.available_schemas), format_instructions=parser.get_format_instructions()) # Moved schemas_json to partial

            chain = prompt | llm | parser
            parsed_output = chain.invoke({"content": content})
            
            # Return schema_content only if confidence is greater than the threshold
            if parsed_output.confidence and parsed_output.confidence > self.CONFIDENCE_THRESHOLD:
                return self.available_schemas.get(parsed_output.schema_name)
            else:
                return None
        except Exception as e:
            logging.info("Failed to classify schema ---- " + str(e))
            return None

    async def classify_content(self, content: str, vector_store: Optional[VectorStore], embedder: Optional[Embeddings]) -> str:
        """Classify content to a certain schema in available_schemas using embeddings and vector store."""
        vdbsrv = VDBService()
        embeddings = embedder or vdbsrv.get_embeddings()
        vector_store = vector_store or vdbsrv.get_vector_store(collection_name=self.ontology_name or 'ontology_schemas', embeddings=embeddings)

        schema_docs = []
        for schema_name, schema_content in self.available_schemas.items():
            if not vector_store.similarity_search(schema_content, k=1, filter={"schema_name": schema_name}):
                schema_doc = Document(
                    page_content=schema_content,
                    metadata={"schema_name": schema_name},
                )
                schema_docs.append(schema_doc)

        if schema_docs:
            vector_store.add_documents(documents=schema_docs)

        results = vector_store.similarity_search_with_relevance_scores(query=content, k=1)
        if results and results[0][1] > 0.7:
            most_similar_schema = results[0][0].metadata["schema_name"]
            schema_content = self.available_schemas.get(most_similar_schema)
        else:
            schema_content = None

        return schema_content