from typing import (
    List, 
    Optional, 
    Union, 
    Dict, 
    Any,
    Literal
)
import os
import asyncio
from tqdm.asyncio import tqdm as tqdm_async

from neo4j import (
    AsyncGraphDatabase,
    GraphDatabase,
    exceptions as neo4jExceptions,
    AsyncDriver,
)

from consts import(
    NODE_LABEL_NEWS,
    NODE_LABEL_DESCRIPTION,
    NODE_LABEL_ENTITY,
    NODE_LABEL_REPORTER,
    NODE_LABEL_VOICEOVER,
    NODE_LABEL_CAMERAMAN,
)

from utils import divide_chunks

from loguru import logger

_driver = None

def init_storage():
    global _driver
    url = os.environ['NEO4J_URL']
    user = os.environ['NEO4J_USER']
    password = os.environ['NEO4J_PASS']

    try:
        if _driver is None:
            _driver = GraphDatabase.driver(
                url, auth=(user, password), 
                max_connection_pool_size= 50,
                connection_timeout = 60,
                connection_acquisition_timeout=120,
                fetch_size=300
            )
    except Exception as e:
        logger.error(f"Error in initializing Storage {e}")
    
    return _driver

def close_storage():
    if _driver is not None:
        asyncio.run(_driver.close())

class Neo4jStorage:
    def __init__(self):
        self._driver = None

    def _query(
            self,
            query: str,
            params: Optional[dict] = None
        ) -> List[Dict[str, Any]]:
        """
        This method sends a Cypher query to the connected Neo4j database
        and returns the results as a list of dictionaries.

        Args:
            query (str): The Cypher query to execute.
            params (dict, optional): Dictionary of query parameters. Defaults to {}.

        Returns:
            List[Dict[str, Any]]: List of dictionaries containing the query results.
        """
        from neo4j.exceptions import CypherSyntaxError, SessionExpired

        params = params or {}

        with init_storage().session() as session:
            try:
                data = session.run(query, params)
                return [r.data() for r in data]
            except CypherSyntaxError as e:
                logger.error(f"Failed in {query} with params:{params}, {e}")
                # raise ValueError(f"Cypher Statement is not valid\n{e}")
            except (
                SessionExpired
            ) as e:  # Session expired is a transient error that can be retried?
                logger.error(f"SessionExpired in {query} with params:{params}, {e}")
                # raise e

    def _get_similar_entities(self, entity: str) -> List[str]:
        cql = "CALL db.index.fulltext.queryNodes('idx_keyword_Entity_id', $entity) YIELD node, score WHERE score>5 RETURN node.id"
        return self._query(
            cql, 
            {
                "entity": entity
            }
        )

    def _description_contains(self, keywords: List[List[str]] = None, start: str = None, end: str = None) -> str:
            """
            For example: [['陈金虎', '市委陈书记'], ['中法会议', '中法第六次会议']]
            RETURNS: WHERE (d.content CONTAINS '陈金虎' OR d.content CONTAINS '市委陈书记') AND 
                           (d.content CONTAINS '中法会议' OR d.content CONTAINS '中法第六次会议')
            """
            def _merge_cond_contains(words: List[str]) -> str:
                full_text_query = ""
                for word in words[:-1]:
                    full_text_query += f" d.content CONTAINS '{word}' OR "
                full_text_query += f" d.content CONTAINS '{words[-1]}'"
                return f" ({full_text_query}) "
            cond = " WHERE TRUE "

            if keywords:
                cond = cond + "AND" + "AND".join([_merge_cond_contains(words) for words in keywords])

            if start:
                cond = cond + " AND " + f"d.occurred_on >= '{start}'"
            if end:
                cond = cond + " AND " + f"d.occurred_on <= '{end}'"

            return cond

    def keyword_search(self, 
                             keywords: List[List[str]] = None, 
                             reported_start: str = None, 
                             reported_end: str = None, 
                             occurred_start: str = None, 
                             occurred_end: str = None,
                             limit: int = 3)->List[str]:
        cond_desc = self._description_contains(keywords, occurred_start, occurred_end)
        cond_news = "WHERE TRUE "
        if reported_start:
            cond_news = cond_news + f" AND d.reported_on >= $occurred_start"
        
        if reported_end:
            cond_news = cond_news + f" AND d.reported_on <= $occurred_end"
        
        cql = f"""
MATCH (n:News) -[:MENTIONS]-(d:Description)
{cond_desc}
WITH n, d
{cond_news}
RETURN n.content as news
ORDER BY score DESC LIMIT {limit};
"""
        return self._query(cql)
    
    def vector_search(self, 
                             task: str, 
                             keywords: List[List[str]] = None, 
                             reported_start: str = None, 
                             reported_end: str = None, 
                             occurred_start: str = None, 
                             occurred_end: str = None,
                             limit: int = 3)->List[str]:
        cond_desc = self._description_contains(keywords, occurred_start, occurred_end)
        cond_news = "WHERE score > 0.9 "
        if reported_start:
            cond_news = cond_news + f" AND n.reported_on >= $reported_start"
        
        if reported_end:
            cond_news = cond_news + f" AND n.reported_on <= $reported_end"
        
        cql = f"""
CALL apoc.ml.openai.embedding([$task], $api_key, {{endpoint: $endpoint, model: $model}}) YIELD text, embedding
WITH text, embedding
MATCH (n:News) -[:MENTIONS]-(d:Description)
{cond_desc}
WITH n, d, vector.similarity.cosine(d.embedding, embedding) AS score
{cond_news}
ORDER BY score DESC
WITH n
RETURN DISTINCT n.content AS content, n.title as title, n.reported_on AS reported_on
LIMIT {limit}
"""
        openai_api_base = os.environ["OPENAI_EMBEDDING_ENDPOINT"]
        model = os.environ["OPENAI_EMBEDDING_MODEL"]
        key = os.environ["OPENAI_KEY"]

        return self._query(
            cql, 
            {
                "task": task, 
                "api_key": key, 
                "endpoint": openai_api_base, 
                "model": model,
                "reported_start": reported_start,
                "reported_end": reported_end,
                "occurred_start": occurred_start,
                "occurred_end": occurred_end
            }
        )

    def _create_vector_index(self, node_label: str, embedding_node_property: str, embedding_dimension: int) -> None:
        """
        This method constructs a Cypher query and executes it
        to create a new vector index in Neo4j.
        """
        index_name = f"idx_vector_{node_label}_{embedding_node_property}"
        index_query = (
            f"CREATE VECTOR INDEX {index_name} "
            " IF NOT EXISTS "
            f"FOR (n:{node_label}) ON "
            f"{'n.' + embedding_node_property} "
            "OPTIONS { indexConfig: {"
            f"`vector.dimensions`: {embedding_dimension},"
            f"`vector.similarity_function`: 'cosine'"
            "}}"
        )

        self._query(index_query)

    def _create_keyword_index(self, node_label: str, text_node_property: Union[str, List[str]]) -> None:
        """
        This method constructs a Cypher query and executes it
        to create a new full text index in Neo4j.
        """
        node_props = [text_node_property] if isinstance(text_node_property, str) else text_node_property
        index_name = f"idx_keyword_{node_label}_{'and'.join(node_props)}"
        fts_index_query = (
            f"CREATE FULLTEXT INDEX {index_name} "
            " IF NOT EXISTS "
            f"FOR (n:`{node_label}`) ON EACH "
            f"[{', '.join(['n.`' + el + '`' for el in node_props])}]"
            "OPTIONS {"
            "indexConfig: {"
            "`fulltext.analyzer`: 'cjk',"   # call db.index.fulltext.listAvailableAnalyzers() 
            "`fulltext.eventually_consistent`: true"
            "}}"
        )
        self._query(fts_index_query)

    def _create_search_index(self, node_label: str, text_node_property: Union[str, List[str]], range_index:bool = True) -> None:
        """
        This method constructs a Cypher query and executes it
        to create a new search index in Neo4j.
        """
        node_props = [text_node_property] if isinstance(text_node_property, str) else text_node_property
        index_name = f"idx_search_{node_label}_{'and'.join(node_props)}"
        fts_index_query = (
            f"CREATE {'RANGE' if range_index else 'TEXT'} INDEX {index_name} "
            " IF NOT EXISTS "
            f"FOR (n:`{node_label}`) ON "
            f"({', '.join(['n.`' + el + '`' for el in node_props])})"
        )
        self._query(fts_index_query)

    def get_similiar_entity(self, id: str, limit=3) -> List[dict]:
        cql=f"CALL db.index.fulltext.queryNodes('idx_keyword_Entity_id', $id) YIELD node, score WHERE score>4 RETURN node.id as id, node.summary as summary LIMIT {limit}"
        return self._query(cql, {"id": id})

    def get_associated_descriptions(self, id: str) -> List[dict]:
        query = f"MATCH (d:{NODE_LABEL_DESCRIPTION}) -- (n:{NODE_LABEL_ENTITY} {{id:$id}}) ORDER BY d.occurred_on DESC RETURN d.id AS id, d.content as content, d.occurred_on as occurred_on"
        return self._query(query, {"id": id})

    def get_nodes_by_degree(self, min_degree: int, in_or_out:Union[Literal["in"], Literal["out"]] = "in", label=NODE_LABEL_ENTITY) -> List[dict]:
        query = f"MATCH (p:{label}) WITH p, apoc.node.degree.{in_or_out}(p) AS degree WHERE degree >= {min_degree} RETURN p.id As id, degree ORDER BY degree DESC"
        return self._query(query)

    def update_properties(self, id: str, label:str=NODE_LABEL_ENTITY, **kwargs):
        params = ",".join([f"p.{k}=${k} " for k, v in kwargs.items()])
        query = f"MATCH (p: {label} {{id: $id}}) SET {params}"
        self._query(query, {"id": id, **kwargs})

    def _batch_load(self, records: List[dict]) -> int:
        asyncio.gather(*[self._query(
                f"CREATE CONSTRAINT IF NOT EXISTS FOR (b:{label}) "
                "REQUIRE b.id IS UNIQUE"
        ) for label in [NODE_LABEL_VOICEOVER, NODE_LABEL_REPORTER, NODE_LABEL_CAMERAMAN, NODE_LABEL_NEWS, NODE_LABEL_ENTITY, NODE_LABEL_DESCRIPTION]])
            
        def _node_import(rows: List[dict]) -> str:
            cql = (
                "UNWIND $data AS row "
                "CALL apoc.merge.node([row.type], {id: row.id}, row.properties, {}) YIELD node "
                "RETURN distinct 'done' AS result"
            )

            self._query(cql, {"data": rows})

        def _rel_import(rows: List[dict]) -> str:
            cql = (
                "UNWIND $data AS row "
                "CALL apoc.merge.node([row.stpe], {id: row.sid}, {}, {}) YIELD node as source "
                "CALL apoc.merge.node([row.ttpe], {id: row.tid}, {}, {}) YIELD node as target "
                "CALL apoc.merge.relationship(source, row.type, {}, {}, target) YIELD rel "
                "RETURN distinct 'done'"
            )
            self._query(cql, {"data": rows})

        from functools import reduce
        node_chunks = list(divide_chunks(reduce(list.__add__, [r.get("nodes") for r in records]), 100))
        for result in tqdm_async(
            asyncio.as_completed([_node_import(doc) for doc in node_chunks]),
            total=len(node_chunks),
            desc="Importing Nodes",
            unit="Node Chunks",
        ):
            doc = result

        relations = list(divide_chunks(reduce(list.__add__, [r.get("relationships") for r in records]), 100))
        for result in tqdm_async(
            asyncio.as_completed([_rel_import(doc) for doc in relations]),
            total=len(node_chunks),
            desc="Importing Relations",
            unit="Relation Chunks",
        ):
            doc = result

    def clean(self):
        self._query("MATCH (n) DETACH DELETE n")
            
    def upsert(self, embedding_dimension: int, records: List[dict]):
        if len(records) > 0:
            self._batch_load(records)
        
        # for News
        self._create_keyword_index(NODE_LABEL_NEWS, "content"),
        self._create_keyword_index(NODE_LABEL_NEWS, "title"),
        self._create_search_index(NODE_LABEL_NEWS, "reported_on"),

        # for Description
        self._create_keyword_index(NODE_LABEL_DESCRIPTION, "content"),
        self._create_vector_index(NODE_LABEL_DESCRIPTION, "content", embedding_dimension),
        self._create_search_index(NODE_LABEL_DESCRIPTION, "occurred_on"),
    
        # for Entity
        self._create_keyword_index(NODE_LABEL_ENTITY, "id")
