from uuid import uuid4
import os
import orjson
from tqdm import tqdm
from tqdm.asyncio import tqdm as tqdm_async
from functools import reduce
from concurrent.futures import ThreadPoolExecutor

from typing import List, Optional, Dict, Tuple
from langchain_core.runnables import Runnable
from langchain_core.documents import Document

from consts import(
    NODE_LABEL_NEWS,
    NODE_LABEL_DESCRIPTION,
    NODE_LABEL_ENTITY,
    NODE_LABEL_REPORTER,
    NODE_LABEL_VOICEOVER,
    NODE_LABEL_CAMERAMAN,
    RELATION_TYPE_MENTIONS,
    RELATION_TYPE_COMPOSES
)
from consts import (
    _FILE_META,
    _FILE_NER,
    _FILE_GRAPH,
    _FILE_GRAPH_COMPRESSED,
    _FILE_GRAPH_VECTORIZED,
)

from load.entity import (
    NER,NERS,
)


from load.prompt import (
    prompt_extract_ner,
    prompt_extract_ner_event,
    prompt_extract_ner_lead,
    prompt_extract_ner_extra,
    prompt_summarize_news
)

from storage import Neo4jStorage

from llm import LLM, LLMFunction
from load.embedding import EmbeddingService, Embedder
from utils import divide_chunks, _save, _load
from exceptions import NoDocumentFoundError
from loguru import logger

# Neo4j的数据库Schema，尽可能让Schema简单，查询时生成的Cypher的语句准确性高
# 1. 省略掉Relationship的描述，取而代之用Description
# Reporter          Voiceover          Cameraman
#     |-----------------|-----------------|
#       \               |              /
#          \            |            /
#            \---------------------/
#                       |-- COMPOSES
#                       |
#                     News (reported_on, title, content)
#                       |
#                       |-- MENTIONS
#                       |
#                 Description (occurred_on, content, embedding)
#                       |
#                       |-- MENTIONS
#                       |
#                    Entity (id, content, embedding, summary)
class Checkpoint:
    def __init__(self, working_dir: str = "/tmp/randmin123j"):
        self.storage = Neo4jStorage()
        self.llm = LLM()
        self.embedder = Embedder()
        self.working_dir = working_dir
        self.file_meta  = os.path.join(self.working_dir, _FILE_META)
        self.file_ner = os.path.join(self.working_dir, _FILE_NER)
        self.file_graph = os.path.join(self.working_dir, _FILE_GRAPH)
        self.file_graph_compressed = os.path.join(self.working_dir, _FILE_GRAPH_COMPRESSED)
        self.file_vectorized = os.path.join(self.working_dir, _FILE_GRAPH_VECTORIZED)
        self.file_vectorized_nodes = os.path.join(self.working_dir, f"{_FILE_GRAPH_VECTORIZED}_nodes.json")
        self.file_vectorized_relations = os.path.join(self.working_dir, f"{_FILE_GRAPH_VECTORIZED}_relations.json")

    def reset(self):
        self.storage.clean()

    async def batch_import(self) -> Tuple[int, int]:
        if not os.path.exists(self.file_vectorized):
            raise Exception(f"Cannot importing the nodes and relations due to {self.file_vectorized} not found")
        records = _load(self.file_vectorized)

        # 更新到数据库中
        self.storage.upsert(self.embedder.get_embedding_dimension(), records)

        logger.debug(
            "Finished the importing Nodes:"
            f" {len(records[0].get('nodes'))} "
            f" Relationship: {len(records[0].get('relationships'))}"
        )
        return len(records[0].get("nodes")), len(records[0].get("relationships"))
        
    async def process(self):
        # 扫描指定文件下的所有txt文件
        os.makedirs(self.working_dir, mode=511, exist_ok=True)

        if not os.path.exists(self.file_meta):
            raise NoDocumentFoundError(self.file_meta)

        if not os.path.exists(self.file_ner):
            # 还没有抽取实体关系信息
            documents = _load(self.file_meta)
            # 抽取实体关系信息
            records = await self._extract_ner(documents)
            # 保存实体关系信息
            _save(records, self.file_ner)
            logger.debug(f"Finished the NER exracting {self.file_meta} - Input: {len(documents)} / Output: {len(records)}")

        if not os.path.exists(self.file_graph):
            # 还没有对实体关系信息进行后续处理
            records1 = _load(self.file_ner)
            # 合并知识图谱
            records2 = await self._post_process(records1)
            # 保存优化后的
            _save(records2, self.file_graph)
            logger.debug(f"Finished the Post Processing {self.file_ner} - Input: {len(records1)} / Output: {len(records2)}")

        if not os.path.exists(self.file_graph_compressed):
            # 还为对实体关系图进行提层优化
            records1 = _load(self.file_graph)
            # 图谱提层压缩
            records2 = await self._compress_graph(records1)
            # 保存优化后的
            _save(records2, self.file_graph_compressed)
            logger.debug(f"Finished the Compress graph {self.file_graph} - Input: {len(records1)} / Output: {len(records2)}")

        if not os.path.exists(self.file_vectorized):
            # 还为对实体关系图进行向量化计算
            records1 = _load(self.file_graph_compressed)
            # 图谱关系、向量化计算
            records2 = await self._vectorize_graph(records1)
            # 保存向量化计算后的结果
            _save(records2, self.file_vectorized)
            logger.debug(f"Finished the embedding {self.file_vectorized}")

        if not os.path.exists(self.file_vectorized_nodes) or not os.path.exists(self.file_vectorized_relations):
            # embedding for Description
            records = _load(self.file_vectorized)
            # split the final data into nodes & relationships for batch load from json in neo4j server
            _save(records[0]["nodes"], self.file_vectorized_nodes)
            _save(records[0]["relationships"], self.file_vectorized_relations)
        
            logger.debug(
                f"Please use the following command to load the json files into Neo4j in neo4j server cli.\n"
                """
CALL apoc.load.json("file:///data/vectorized_nodes.json")
YIELD value
UNWIND value AS row
CALL apoc.merge.node([row.type], {id: row.id}, row.properties, {}) YIELD node 
RETURN distinct 'done' AS result

CALL apoc.load.json("file:///data/vectorized_relations.json")
YIELD value
UNWIND value AS row
CALL apoc.merge.node([row.stpe], {id: row.sid}, {}, {}) YIELD node as source
CALL apoc.merge.node([row.ttpe], {id: row.tid}, {}, {}) YIELD node as target
CALL apoc.merge.relationship(source, row.type, {}, {}, target) YIELD rel
RETURN distinct 'done'
""")
    
    def _ner_to_graph(self, document: dict, ners: List[NER]) -> dict:
        descriptions = [
            {
                "id": str(uuid4()),
                "head": n.head, 
                "tail": n.tail, 
                "description": n.description, 
                "occurred_on": n.occurred_on
             } for n in ners
        ]

        return {
            "id": f"{document['metadata']['reported_on']} - {document['metadata']['title']} - {document['metadata']['chunk_id']}",
            "title": document["metadata"]['title'], 
            "reported_on": document["metadata"]['reported_on'], 
            "cameraman": document["metadata"].get('cameraman', []),
            "reporter": document["metadata"].get('reporter', []),
            "voiceover": document["metadata"].get('voiceover', []),
            "content": document["page_content"],
            "descriptions": descriptions
        }

    async def _extract_ner(self, documents: List[Document]) -> List[dict]:
        prompts = [
            prompt_extract_ner, 
            # prompt_extract_ner_event, 
            # prompt_extract_ner_extra, 
            prompt_extract_ner_lead
        ]
        
        chains = [self.llm.create("Extract NER", prompt, NERS) for prompt in prompts]
        
        with ThreadPoolExecutor(max_workers=25) as executor:
            def llm(chains: List[LLMFunction], doc: dict) -> List[dict]:
                params = {"input": doc.get("page_content"), "reported_on": doc.get("metadata").get("reported_on")}

                futures = [
                    executor.submit(chain.invoke, **params) 
                for chain in chains]

                return doc, futures

            def merge(result) -> dict:
                doc, futures = result
                ners = [f.result() for f in futures]
                entities = reduce(NERS.from_ners, ners).entities
                return self._ner_to_graph(doc, entities)
            
            records = []

            for result in tqdm(
                [llm(chains, doc) for doc in documents],
                total=len(documents),
                desc="Extracting entities from chunks",
                unit="chunk",
            ):
                records.append(merge(result))

            return records

    async def _post_process(self, records: List[dict]) -> List[dict]:
        nodes = []
        relationships = []

        def create_node(id: str, type: str, properties=None):
            return {
                "id": id,
                "type": type,
                "properties": properties if properties else {}
            }

        def create_relationship(type: str, source_id: str, target_id: str, source_type: str, target_type: str):
            return {
                "type": type,
                "sid": source_id,
                "tid": target_id,
                "stpe": source_type,
                "ttpe": target_type
            }
            
        for record in records:
            descriptions = record.get("descriptions")
            news = create_node(id=record.get("id"), type=NODE_LABEL_NEWS, properties={"title": record.get("title"), "content": record.get("content"), "reported_on": record.get("reported_on")})
            nodes.append(news)

            for name in record.get('cameraman', []):
                s = create_node(id=name, type=NODE_LABEL_CAMERAMAN)
                nodes.append(s)
                relationships.append(create_relationship(source_id=name, target_id=news.get("id"), type=RELATION_TYPE_COMPOSES, source_type=NODE_LABEL_CAMERAMAN, target_type=NODE_LABEL_NEWS))
            for name in record.get('reporter', []):
                s = create_node(id=name, type=NODE_LABEL_REPORTER)
                nodes.append(s)
                relationships.append(create_relationship(source_id=name, target_id=news.get("id"), type=RELATION_TYPE_COMPOSES, source_type=NODE_LABEL_REPORTER, target_type=NODE_LABEL_NEWS))
            for name in record.get('voiceover', []):
                s = create_node(id=name, type=NODE_LABEL_VOICEOVER)
                nodes.append(s)
                relationships.append(create_relationship(source_id=name, target_id=news.get("id"), type=RELATION_TYPE_COMPOSES, source_type=NODE_LABEL_VOICEOVER, target_type=NODE_LABEL_NEWS))

            for desc in descriptions:
                s = create_node(id=desc.get("head"), type=NODE_LABEL_ENTITY)
                e = create_node(id=desc.get("tail"), type=NODE_LABEL_ENTITY)
                d = create_node(id=desc.get("id"), type=NODE_LABEL_DESCRIPTION, properties={"occurred_on": desc.get("occurred_on"), "content": desc.get("description")})

                nodes.append(s)
                nodes.append(e)
                nodes.append(d)

                relationships.append(create_relationship(source_id=d.get("id"), target_id=s.get("id"), type=RELATION_TYPE_MENTIONS, source_type=NODE_LABEL_DESCRIPTION, target_type=NODE_LABEL_ENTITY))
                relationships.append(create_relationship(source_id=d.get("id"), target_id=e.get("id"), type=RELATION_TYPE_MENTIONS, source_type=NODE_LABEL_DESCRIPTION, target_type=NODE_LABEL_ENTITY))
                relationships.append(create_relationship(source_id=news.get("id"), target_id=d.get("id"), type=RELATION_TYPE_MENTIONS, source_type=NODE_LABEL_NEWS, target_type=NODE_LABEL_DESCRIPTION))

        nodes = [orjson.loads(n) for n in set([orjson.dumps(n) for n in nodes])]  # remove the duplicated nodes, and no need to dedup the relationships since it's should not be duplicated
        return [{"nodes": nodes, "relationships": relationships}]

    async def _compress_graph(self, records: List[dict]) -> List[dict]:
        # 合并相似度极高的Entity或者Description Node (TODO)
        return records

    async def _vectorize_graph(self, records: List[dict]) -> List[dict]:
        from functools import reduce
        all_nodes = reduce(list.__add__, [r.get("nodes") for r in records])

        description_nodes = [n for n in all_nodes if n.get("type") == NODE_LABEL_DESCRIPTION]
        
        es = EmbeddingService([n["properties"]["content"] for n in description_nodes])

        for desc in description_nodes:
            desc["properties"]["embedding"] = await es.get(desc["properties"]["content"])
        
        return records

    def _group_by_content(self, descriptions: List[dict]) -> dict:
            from itertools import groupby

            data = groupby(descriptions, key=lambda x: x["content"])
            return dict([(key, list(group)) for key, group in data])

    def _merge_description_semantically(self, keyword: str, descriptions: List[dict]) -> List[dict]:
        return sorted(descriptions, key=lambda x: x["occurred_on"] or "")

    # 生成小批次摘要的函数
    def _summarize_map(self, data: List[dict], keyword: str) -> List[str]:
        func_summarize_map = self.llm.create("Summarize News", prompt_summarize_news)

        # 将数据按批次分割
        batches = list(divide_chunks(data, 50))
        
        summaries = []

        # 对每个批次生成摘要
        for batch in batches:
            descriptions = "\n".join([f"{entry}" for entry in batch])
            
            # 生成当前批次的摘要
            summaries.append(func_summarize_map.invoke(keyword=keyword, input=descriptions))

        return [result.content for result in summaries]

    # 生成最终总结的函数
    def summarize(self, data: List[dict], keyword: str) -> str:
        # 1. Step 1: 分批次生成摘要
        batch_summaries = [f"{entry.get('occurred_on')} - {entry.get('content')}" for entry in data]

        while(len(batch_summaries) > 1):
            batch_summaries = self._summarize_map(batch_summaries, keyword)
        
        return batch_summaries[0]
    
    def _summarize(self, entity: dict):
        keyword = entity.get("id")
        degree = entity.get("degree")
        descriptions = self.storage.get_associated_descriptions(keyword)
        semantical_merged = self._merge_description_semantically(keyword, descriptions)
        summary = self.summarize(descriptions, keyword)
        self.storage.update_properties(keyword, label=NODE_LABEL_ENTITY, summary=summary)
        return keyword, descriptions, semantical_merged, summary

    def optimize(self, min_degree: int = 10):
        nodes = self.storage.get_nodes_by_degree(min_degree=min_degree)

        bak = set([n.get('id') for n in nodes])

        with ThreadPoolExecutor(max_workers=25) as executor:

            for result in tqdm(
                [executor.submit(self._summarize, keyword) for keyword in nodes],
                total=len(nodes),
                desc="Semantically Summarizing",
                unit="keyword",
            ):
                keyword, descriptions, semantical_merged, summary = result.result()
                logger.debug(f"Summarize {keyword} - original:{len(descriptions)} - semantical_merged: {len(semantical_merged)} - summary:{summary}")
                bak.remove(keyword)
                logger.debug(f"Left:{bak}")

        self.storage.upsert(self.embedder.get_embedding_dimension(), [])
        return len(nodes)
