import argparse
import sys
from collections.abc import Callable

import toml
from toml import TomlDecodeError

from src.marshalers import (
    HTMLClusterMarshaler,
    HTMLWordCloudMarshaler,
    ImageClusterMarshaler,
    ImageWordCloudMarshaler,
    KMeansClusterExcelMarshaler,
    LDAKeywordsExcelMarshaler,
    LDAStatisticsMarshaler,
    LDAVisualizationMarshaler,
)
from src.pipelines import Dispatch, Sequential
from src.processors import (
    ChineseEmotionSmallClassifier,
    EmotionalClassificationProcessor,
    GensimLDAProcessor,
    JiebaKeywordExtractionProcessor,
    JiebaTokenizationProcessor,
    KMeansClusterizationProcessor,
)
from src.unmarshalers import DocumentUnmarshaler, UserUnmarshaler
from src.utils import OutputManager

# The main parser
#
# The main parser is used to parse the command line arguments. Among all the subcommands, the `filename` is required.
# Additionally, the output directory and the cache directory can be specified.
parser = argparse.ArgumentParser(
    description="Postnatal Depression Analysis Tool.",
    epilog="Use -h after a subcommand to see the detailed help for it.",
    exit_on_error=False,
)
subparsers = parser.add_subparsers(dest="subcommand", required=True, help="Subcommands")
parser.add_argument("filename", type=str, help="The path to the file to process")
parser.add_argument(
    "-o",
    "--output-dir",
    type=str,
    default="./output",
    help="The directory to output the results to. Defaults to './output'.",
)
parser.add_argument(
    "--cache-dir",
    type=str,
    default="./cache",
    help="The directory to cache the models to. Defaults to './cache'.",
)

# Wordcloud generation
#
# This subcommand generates wordclouds from the posts.
wordcloud_parser = subparsers.add_parser(
    "wordcloud",
    help="Perform semantic analysis",
    exit_on_error=False,
)
wordcloud_parser.add_argument(
    "-c",
    "--content-column",
    type=str,
    required=True,
    help="The column name of the content to analyze. Usually the column with the posts' content.",
)
wordcloud_parser.add_argument(
    "-s",
    "--stopwords",
    type=str,
    default="./metadata/stopwords.txt",
    help="The path to the stopwords file.",
)

# LDA analysis
#
# This subcommand performs LDA analysis on the posts.
lda_parser = subparsers.add_parser("lda", help="Perform LDA analysis", exit_on_error=False)
lda_parser.add_argument(
    "-c",
    "--content-column",
    type=str,
    required=True,
    help="The column name of the content to analyze. Usually the column with the posts' content.",
)
lda_parser.add_argument(
    "-s",
    "--stopwords",
    type=str,
    default="./metadata/stopwords.txt",
    help="The path to the stopwords file.",
)

# User analysis
#
# This subcommand analyzes the users: it extracts emotions from the posts (through AI Models) and clusters the users.
user_parser = subparsers.add_parser("user", help="Perform user analysis", exit_on_error=False)
user_parser.add_argument(
    "-u",
    "--username-column",
    type=str,
    required=True,
    help="The column name of the username to analyze. Usually the column with the posts' username.",
)
user_parser.add_argument(
    "-c",
    "--content-column",
    type=str,
    required=True,
    help="The column name of the content to analyze. Usually the column with the posts' content.",
)


def main() -> None:
    try:
        args = parser.parse_args()
        global_config = toml.load("deploy-config.toml")
    except argparse.ArgumentError as e:
        print(e.message)
        parser.print_help()
        sys.exit(1)
    except TomlDecodeError as e:
        print("invalid TOML file:", e, file=sys.stderr)
        sys.exit(1)
    except FileNotFoundError as e:
        print("TOML file not found:", e.filename, file=sys.stderr)
        sys.exit(1)

    output_manager = OutputManager(args.output_dir, args.filename)

    pipeline: Callable
    match args.subcommand:
        case "wordcloud":
            pipeline = create_wordcloud_pipeline(output_manager, global_config, args)
        case "lda":
            pipeline = create_lda_pipeline(output_manager, global_config, args)
        case "user":
            pipeline = create_user_pipeline(output_manager, global_config, args)
        case _:
            raise NotImplementedError()  # unreachable

    pipeline()


def create_wordcloud_pipeline(output_manager: OutputManager, global_config: dict, args: argparse.Namespace) -> Callable:
    return Sequential(
        DocumentUnmarshaler(path=args.filename, column=args.content_column),
        JiebaTokenizationProcessor(args.stopwords),
        JiebaKeywordExtractionProcessor(),
        Dispatch(
            ImageWordCloudMarshaler(output_manager, global_config),
            HTMLWordCloudMarshaler(output_manager, global_config),
        ),
    )


def create_lda_pipeline(output_manager: OutputManager, global_config: dict, args: argparse.Namespace) -> Callable:
    return Sequential(
        DocumentUnmarshaler(path=args.filename, column=args.content_column),
        JiebaTokenizationProcessor(args.stopwords),
        GensimLDAProcessor(global_config),
        Dispatch(
            LDAVisualizationMarshaler(output_manager),
            LDAStatisticsMarshaler(output_manager, global_config),
            LDAKeywordsExcelMarshaler(output_manager),
        ),
    )


def create_user_pipeline(output_manager: OutputManager, global_config: dict, args: argparse.Namespace) -> Callable:
    return Sequential(
        UserUnmarshaler(
            path=args.filename,
            username_column=args.username_column,
            content_column=args.content_column,
        ),
        EmotionalClassificationProcessor(ChineseEmotionSmallClassifier(args.cache_dir)),
        KMeansClusterizationProcessor(global_config),
        Dispatch(
            ImageClusterMarshaler(output_manager, global_config),
            HTMLClusterMarshaler(output_manager, global_config),
            KMeansClusterExcelMarshaler(output_manager),
        ),
    )


if __name__ == "__main__":
    main()
