# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License

"""A module containing run_workflow method definition."""

import logging

import pandas as pd

from graphrag.config.models.graph_rag_config import GraphRagConfig
from graphrag.data_model.schemas import DOCUMENTS_FINAL_COLUMNS
from graphrag.index.typing.context import PipelineRunContext
from graphrag.index.typing.workflow import WorkflowFunctionOutput
from graphrag.utils.storage import load_table_from_storage, write_table_to_storage

logger = logging.getLogger(__name__)


async def run_workflow(
    _config: GraphRagConfig,
    context: PipelineRunContext,
) -> WorkflowFunctionOutput:
    """All the steps to transform final documents."""
    logger.info("Workflow started: create_final_documents")
    documents = await load_table_from_storage("documents", context.output_storage)
    text_units = await load_table_from_storage("text_units", context.output_storage)

    output = create_final_documents(documents, text_units)

    await write_table_to_storage(output, "documents", context.output_storage)

    logger.info("Workflow completed: create_final_documents")
    return WorkflowFunctionOutput(result=output)


def create_final_documents(
    documents: pd.DataFrame, text_units: pd.DataFrame
) -> pd.DataFrame:
    """All the steps to transform final documents.


    ## text_units
    List of all text chunks parsed from the input documents.

    | name              | type  | description |
    | ----------------- | ----- | ----------- |
    | text              | str   | Raw full text of the chunk. |
    | n_tokens          | int   | Number of tokens in the chunk. This should normally match the `chunk_size` config parameter, except for the last chunk which is often shorter. |
    | document_ids      | str[] | List of document IDs the chunk came from. This is normally only 1 due to our default groupby, but for very short text documents (e.g., microblogs) it can be configured so text units span multiple documents. |
    | entity_ids        | str[] | List of entities found in the text unit. |
    | relationships_ids | str[] | List of relationships found in the text unit. |

    text_units = [
      {
        "text": "The company announced new products...",
        "n_tokens": 150,
        "document_ids": ["doc_a"],
        "entity_ids": ["entity_1", "entity_2"],
        "relationship_ids": ["rel_1"]
      },
      {
        "id": "chunk_002",
        "text": "Market analysts predict growth...",
        "n_tokens": 200,
        "document_ids": ["doc_a"],
        "entity_ids": ["entity_3"],
        "relationship_ids": []
      },
      {
        "id": "chunk_003",
        "text": "In related news from both sources...",
        "n_tokens": 180,
        "document_ids": ["doc_a", "doc_b"],
        "entity_ids": ["entity_1", "entity_4"],
        "relationship_ids": ["rel_2"]
      },
      {
        "id": "chunk_004",
        "text": "The CEO stated in an interview...",
        "n_tokens": 120,
        "document_ids": ["doc_b"],
        "entity_ids": ["entity_5"],
        "relationship_ids": ["rel_3"]
      }
    ]
    """


    """
    exploded= [  
      {  
        "chunk_id": "chunk_001",  
        "chunk_doc_id": "doc_a",  
        "chunk_text": "The company announced new products..."  
      },  
      {  
        "chunk_id": "chunk_002",  
        "chunk_doc_id": "doc_a",  
        "chunk_text": "Market analysts predict growth..."  
      },  
      {  
        "chunk_id": "chunk_003",  
        "chunk_doc_id": "doc_a",  
        "chunk_text": "In related news from both sources..."  
      },  
      {  
        "chunk_id": "chunk_003",  
        "chunk_doc_id": "doc_b",  
        "chunk_text": "In related news from both sources..."  
      },  
      {  
        "chunk_id": "chunk_004",  
        "chunk_doc_id": "doc_b",  
        "chunk_text": "The CEO stated in an interview..."  
      }  
    ]
    """
    exploded = (
        text_units.explode("document_ids")
        .loc[:, ["id", "document_ids", "text"]]
        .rename(
            columns={
                "document_ids": "chunk_doc_id",
                "id": "chunk_id",
                "text": "chunk_text",
            }
        )
    )

    """
    [  
      {  
        "chunk_id": "chunk_001",  
        "chunk_doc_id": "doc_a",  
        "chunk_text": "The company announced new products...",  
        "id": "doc_a",  
        "title": "Company News Article",  
        "text": "(full document text...)",  
        "creation_date": "2024-01-15T10:00:00Z"  
      },  
      {  
        "chunk_id": "chunk_002",  
        "chunk_doc_id": "doc_a",  
        "chunk_text": "Market analysts predict growth...",  
        "id": "doc_a",  
        "title": "Company News Article",  
        "text": "(full document text...)",  
        "creation_date": "2024-01-15T10:00:00Z"  
      },  
      {  
        "chunk_id": "chunk_003",  
        "chunk_doc_id": "doc_a",  
        "chunk_text": "In related news from both sources...",  
        "id": "doc_a",  
        "title": "Company News Article",  
        "text": "(full document text...)",  
        "creation_date": "2024-01-15T10:00:00Z"  
      },  
      {  
        "chunk_id": "chunk_003",  
        "chunk_doc_id": "doc_b",  
        "chunk_text": "In related news from both sources...",  
        "id": "doc_b",  
        "title": "CEO Interview",  
        "text": "(full document text...)",  
        "creation_date": "2024-01-16T14:30:00Z"  
      },  
      {  
        "chunk_id": "chunk_004",  
        "chunk_doc_id": "doc_b",  
        "chunk_text": "The CEO stated in an interview...",  
        "id": "doc_b",  
        "title": "CEO Interview",  
        "text": "(full document text...)",  
        "creation_date": "2024-01-16T14:30:00Z"  
      }  
    ]
    """
    joined = exploded.merge(
        documents,
        left_on="chunk_doc_id",
        right_on="id",
        how="inner",
        copy=False,
    )

    """
    [  
      {  
        "id": "doc_a",  
        "text_unit_ids": ["chunk_001", "chunk_002", "chunk_003"]  
      },  
      {  
        "id": "doc_b",  
        "text_unit_ids": ["chunk_003", "chunk_004"]  
      }  
    ]
    """
    docs_with_text_units = joined.groupby("id", sort=False).agg(
        text_unit_ids=("chunk_id", list)
    )

    """
    [  
      {  
        "id": "doc_a",  
        "text_unit_ids": ["chunk_001", "chunk_002", "chunk_003"],  // 拼接上文本单元
        "title": "Company News Article",  
        "text": "(full document text...)",  
        "creation_date": "2024-01-15T10:00:00Z",  
        "human_readable_id": 0,  
        "metadata": null  
      },  
      {  
        "id": "doc_b",  
        "text_unit_ids": ["chunk_003", "chunk_004"],  
        "title": "CEO Interview",  
        "text": "(full document text...)",  
        "creation_date": "2024-01-16T14:30:00Z",  
        "human_readable_id": 1,  
        "metadata": null  
      }  
    ]
    """
    rejoined = docs_with_text_units.merge(
        documents,
        on="id",
        how="right",
        copy=False,
    ).reset_index(drop=True)

    rejoined["id"] = rejoined["id"].astype(str)
    rejoined["human_readable_id"] = rejoined.index

    if "metadata" not in rejoined.columns:
        rejoined["metadata"] = pd.Series(dtype="object")

    return rejoined.loc[:, DOCUMENTS_FINAL_COLUMNS]
