import hashlib
import time
import pandas as pd
from pathlib import Path
from hashlib import md5
import re
from typing import List, Tuple, Dict
import logging
from concurrent.futures import ProcessPoolExecutor, as_completed
import os
from io import BytesIO
from enum import StrEnum, auto
import modin.pandas as mpd
import polars as pl
import numpy as np
import zstd

from backend.api.utils import dataframe_t, dataframe_wide2long, deprecated
from backend.analysis.upstream import UpstreamAnalysis
from backend.db.interface import GetDataBaseInterface as db

logger = logging.getLogger(__name__)


class FileType(StrEnum):
    xlsx = auto()
    csv = auto()
    parquet = auto()


class GeneDataType(StrEnum):
    tpm = auto()
    counts = auto()


class UploadFileProcessor:
    """
    A class to process uploaded files `sample_sheet`, `gene_expression_tpm`,
    `gene_expression_counts`, `rawdata`.
    Raises:
        FileNotFoundError: If the file does not exist.
        ValueError: If the file is not an xlsx file.
        RuntimeError: If there is an error reading or parsing the file.
    """

    @staticmethod
    def read_file(file: BytesIO, file_type: FileType) -> pd.DataFrame:
        """
        Read file and return a DataFrame using pandas.
        Args:
            file (BytesIO): The file to read.
            file_type (FileType): The type of the file.
        Returns:
            pd.DataFrame: The DataFrame containing the file data.
        Raises:
            RuntimeError: If there is an error reading or parsing the file.
        """
        try:
            match file_type:
                case FileType.xlsx:
                    df = pd.read_excel(file, sheet_name="SampleInfo", header=0,
                                       dtype={"CollectionTime": str},
                                       na_values=["", "NA", "null", "None", "999"])
                case FileType.csv:
                    df = pd.read_csv(file, header=0)
                case FileType.parquet:
                    df = pd.read_parquet(file)
            logger.info(
                f"{file_type} file successfully read")
            return df
        except Exception as e:
            logger.error(
                f"Error reading {file_type} file", exc_info=True)
            raise RuntimeError(
                f"Error reading or parsing {file_type} file: {e}"
            )

    @staticmethod
    def read_file_v2(file: BytesIO, file_type: FileType = FileType.parquet) -> pl.DataFrame:
        """
        Read parquet file and return a DataFrame using polars.
        Args:
            file (BytesIO): The file to read.
            file_type (FileType): Only parquet files are supported.
        Returns:
            pl.DataFrame: The DataFrame containing the file data.
        Raises:
            RuntimeError: If there is an error reading or parsing the file.
        """
        try:
            df = pl.read_parquet(file)
            logger.info(
                f"{file_type} file successfully read")
            return df
        except Exception as e:
            logger.error(
                f"Error reading {file_type} file", exc_info=True)
            raise RuntimeError(
                f"Error reading or parsing {file_type} file: {e}"
            )

    @staticmethod
    def sample_data_validation(name: str, dataframe: pd.DataFrame) -> bool:
        """
        Validate the uploaded file. Checks if the xlsx file is created according to the
        standard list in the 'Explain' sheet.

        Returns:
            bool: True if valid, False otherwise.
        Raises:
            RuntimeError: If there is an error transferring
            collection time or sample age to standard format.
            ValueError: If SampleID contains invalid characters or duplicates.
        """
        try:
            df = dataframe
            required_columns = set(['SampleID', 'CollectionTime', 'SampleAge', 'CollectionPart', 'ExperimentCategory',
                                    'Experiment', 'SampleDetail', 'DepositDatabase', 'Accession', 'Origin', 'FileName1',
                                    'MD5checksum1', 'FileName2', 'MD5checksum2'])
            df_columns = set(df.columns.to_list())
            if df_columns != required_columns:
                missing_cols = required_columns - df_columns
                extra_cols = df_columns - required_columns
                error_msg = ""
                if missing_cols:
                    error_msg += f"Missing columns: {', '.join(missing_cols)}. "
                if extra_cols:
                    error_msg += f"Extra columns: {', '.join(extra_cols)}."
                logger.error(f"{name} file columns are incorrect: {error_msg}")
                raise ValueError(
                    f"File columns are incorrect: {error_msg}\n"
                    f"Required columns are: {', '.join(required_columns)}\n"
                )

            # Check if CollectionTime and SampleAge is in the correct format
            df["CollectionTime"] = pd.to_datetime(df["CollectionTime"])
            logger.info(
                "CollectionTime column successfully converted to datetime")
            df["SampleAge"] = pd.to_numeric(df["SampleAge"]).astype("Int64")
            logger.info("SampleAge column successfully converted to numeric")
        except Exception as e:
            logger.error(
                f"Error transfer collection time or sample age to standard format: {name}", exc_info=True)
            raise RuntimeError(
                f"Error parsing file {name}: {e}")

        # Check if SampleID is in the correct format
        invalid_ids_index = df["SampleID"].apply(lambda x: not re.match(
            r"^[A-Za-z][A-Za-z0-9]*-\d{1,2}$", str(x)))
        invalid_ids = df.loc[invalid_ids_index, "SampleID"]
        if not invalid_ids.empty:
            logger.error(
                f"{name} SampleID contains invalid characters: {invalid_ids.to_list()}")
            raise ValueError(
                f"SampleID contains invalid characters: {invalid_ids.tolist()}\n"
                "SampleID must follow the format: start with a letter (A-Z or a-z), "
                "followed by any number of letters or digits, then a hyphen '-', "
                "and end with 1 or 2 digits. Example: 'A-1', 'SBA23-11'.")
        else:
            logger.info(f"{name} SampleID format is valid.")

        duplicated_ids = df.loc[df["SampleID"].duplicated(), "SampleID"]
        if not duplicated_ids.empty:
            logger.error(
                f"{name} SampleID contains duplicate values: {duplicated_ids.tolist()}")
            raise ValueError(
                f"SampleID contains duplicate values: {duplicated_ids.tolist()}\n"
                "Each SampleID must be unique.")
        else:
            logger.info(f"{name} SampleID is unique.")

        not_nullable_columns = ["SampleID", "CollectionTime", "CollectionPart"]
        # 必填列不得为缺失
        null_mask = df[not_nullable_columns].isna().any(axis=1)
        if null_mask.any():
            bad = df.loc[null_mask, not_nullable_columns].head(5)
            logger.error(
                f"Required fields contain NULL in {name} file: \n{bad.to_string(index=False)}")
            raise ValueError(f"Required fields contain NULL: \n{bad.to_string(index=False)}")

        return True

    @staticmethod
    def check_md5(file_path: Path, expected_md5: str) -> Tuple[str, bool]:
        if not file_path.exists():
            logger.error(f"file {file_path.name} does not exist.")
            raise FileNotFoundError(
                f"File {file_path.name} does not exist in raw data path.")
        with open(file_path, "rb") as f:
            logger.info(f"{file_path.name} is checking md5 value")
            file_md5 = md5(f.read()).hexdigest()
        return file_path.name, file_md5 == expected_md5

    @staticmethod
    def gene_ex_validation(sample_sheet: pd.DataFrame, ex: pd.DataFrame) -> bool:
        """
        Validate the gene expression file.
        Args:
            df (pd.DataFrame): The DataFrame containing the gene expression data.
        Returns:
            bool: True if the gene expression file is valid, False otherwise.
        """
        sample_ids = set(sample_sheet["SampleID"].tolist())
        ex_sample_ids = ex.columns.tolist()
        ex_sample_ids.remove("gene_id")

        if sample_ids != set(ex_sample_ids):
            logger.error(
                "Sample IDs in the gene expression file do not match the sample sheet.")
            raise ValueError(
                "Sample IDs in the gene expression file do not match the sample sheet.")
        else:
            logger.info("Gene expression file validation passed.")
            return True

    @staticmethod
    def sample_file_md5(df: pd.DataFrame) -> dict[str, str]:
        """
        Get the md5 of sample files from the sample sheet DataFrame.
        Args:
            df (pd.DataFrame): The DataFrame containing the sample sheet data.
        Returns:
            dict: A dictionary containing the filenames and their corresponding md5 checksums.
        """
        files1 = df[["FileName1", "MD5checksum1"]]
        files2 = df[["FileName2", "MD5checksum2"]]
        files1_dict = dict(zip(files1["FileName1"], files1["MD5checksum1"]))
        files2_dict = dict(zip(files2["FileName2"], files2["MD5checksum2"]))
        files_dict = {**files1_dict, **files2_dict}
        logger.info("Sample file md5 checksums extracted successfully.")
        return files_dict

    @staticmethod
    def rawdata_validation(files_dict: dict, rawdata_path: Path, files_to_check: list[str]) -> Tuple[bool, List[str]]:
        """
        Validate the md5 of raw data.
        Args:
            df (pd.DataFrame): The DataFrame containing the sample sheet data.
            rawdata_path (Path): The path to the raw data directory.
            files_to_check (list[str]): List of filenames to check MD5.
        Returns:
            Tuple[bool, List[str]]: A tuple containing a boolean indicating
            whether the validation passed and a list of failed files.
        Raises:
            FileNotFoundError: If the raw data path does not exist.
        """
        if not rawdata_path.exists():
            logger.error(f"Rawdata path {rawdata_path} does not exist.")
            raise FileNotFoundError(
                f"Raw data path {rawdata_path} does not exist.")

        failed_files = []
        files_check = {file: files_dict[file]
                       for file in files_to_check if file in files_dict}
        max_workers = max(os.cpu_count() or 1, 2)
        with ProcessPoolExecutor(max_workers=max_workers) as executor:
            futures = {
                executor.submit(UploadFileProcessor.check_md5, rawdata_path / file_name, md5_checksum): file_name
                for file_name, md5_checksum in files_check.items()
            }
            for future in as_completed(futures):
                try:
                    file_name, is_valid = future.result()
                    if not is_valid:
                        logger.error(f"{file_name} md5 value check failed")
                        failed_files.append(file_name)
                except Exception as e:
                    logger.error(f"Error checking {futures[future]}: {e}")
                    failed_files.append(futures[future])

        if not failed_files:
            return True, []
        else:
            return False, failed_files

    @staticmethod
    def trans_to_smk_samples(dataframe: pd.DataFrame,
                             rawdata_path: Path,
                             to_file: bool = False,
                             output_path: Path | None = None) -> pd.DataFrame:
        """
        Convert the DataFrame to snakemake samples format.
        Args:
            dataframe (pd.DataFrame): The DataFrame to convert. The dataframe
                should be the processed dataframe from the UploadFileProcessor
                class (the dataframe to write into mysql).
            rawdata_path (Path): The path to the raw data directory.
            to_file (bool): Whether to save the converted DataFrame to a file.
            output_path (Path | None): The path to save the converted DataFrame.
                If None, the DataFrame will not be saved to a file.
        Returns:
            pd.DataFrame: The converted DataFrame.
        """

        sample_sheet = pd.DataFrame()
        sample_sheet["sample"] = dataframe["FileName1"].apply(
            lambda x: str(x).split("_")[0])
        sample_sheet["sample_id"] = dataframe["SampleID"]
        sample_sheet["read1"] = dataframe["FileName1"].apply(
            lambda x: str(Path(rawdata_path, x).absolute()))
        sample_sheet["read2"] = dataframe["FileName2"].apply(
            lambda x: str(Path(rawdata_path, x).absolute()))

        logger.info(
            "Sample sheet converted to snakemake samples format successfully.")

        if to_file:
            sample_sheet.to_csv(output_path, index=False)
            logger.info(f"Sample sheet saved to {output_path} successfully.")
        return sample_sheet


class PutDataBaseWrapper:
    """
    A wrapper class for add data to the database.
    This class provides methods to insert experimental class, experiment,
    sample, gene expression data in TPM and counts format into the database.
    """

    def __init__(self,
                 sample_sheet: pd.DataFrame,
                 gene_expression_tpm: pl.DataFrame,
                 gene_expression_counts: pl.DataFrame):
        self.sample_sheet = sample_sheet
        self.gene_expression_tpm = gene_expression_tpm
        self.gene_expression_counts = gene_expression_counts
        self.sample_sheet_wrapped = self.__database_wrapper()

    def __database_wrapper(self) -> pd.DataFrame:
        """
        Wrap the valid DataFrame with "UniqueID", "UniqueEXID" and "ExpClass" columns
        for database insertion.
        Returns:
            pd.DataFrame: The DataFrame with additional columns.
        """
        try:
            df = PutDataBaseWrapper.add_row_md5(self.sample_sheet)
            timestamp_int = int(time.time())
            date_str = format(timestamp_int, "x").zfill(8)

            # Add a unique identifier for each sample
            hex_ids = [format(i+1, "x").zfill(3) for i in range(len(df))]
            hex_series_uid = pd.Series(hex_ids, index=df.index)
            df["UniqueID"] = "LRX" + df["row_md5"] + hex_series_uid
            logger.info("UniqueID column added to sample sheet.")

            # Add a unique experiment ID
            exp_md5 = df.groupby("Experiment")["row_md5"].transform("min")
            df["UniqueEXID"] = "TRCRIE" + exp_md5
            logger.info("UniqueEXID column added to sample sheet.")
            exp_class_grp = df.groupby("ExperimentCategory").ngroup() + 1
            df["ExpClass"] = [
                f"EXPC{date_str}{str(idx).zfill(3)}" for idx in exp_class_grp]
            logger.info("ExpClass column added to sample sheet.")

        except Exception as e:
            logger.error(
                f"Error occurred while wrapping sample sheet for database: {e}")

        return df.drop(columns=["row_md5"])

    @staticmethod
    def add_row_md5(df: pd.DataFrame, cols=None) -> pd.DataFrame:
        # 选择参与计算的列，默认用全部列（固定顺序）
        cols = list(df.columns) if cols is None else list(cols)
        norm = pd.DataFrame(index=df.index)
        for c in cols:
            s = df[c]
            if pd.api.types.is_datetime64_any_dtype(s):
                norm[c] = s.dt.strftime("%Y-%m-%d %H:%M:%S").fillna("")
            else:
                norm[c] = s.astype("string").fillna("")
        sep = "\x1f"
        joined = norm[cols].agg(sep.join, axis=1)
        out = df.copy()
        out["row_md5"] = joined.map(lambda s: hashlib.md5(s.encode("utf-8")).hexdigest()[:12])
        return out

    def communicate_id_in_db(self) -> list[dict[str, str]]:
        """
        Parsing the experiment class and experiment category from the sample_sheet to communicate
        with the database if the class ID is duplicated.
        Returns:
            list[dict[str, str]]: A list of dictionaries containing the experiment class
            and experiment category.
        """
        sample_sheet_wrapped = self.sample_sheet_wrapped
        exclass = sample_sheet_wrapped[[
            "ExpClass", "ExperimentCategory"]].drop_duplicates().to_dict(orient="records")
        logger.info(
            "Experiment class and category extracted for database communication.")
        return exclass

    @staticmethod
    async def get_existing_exp_from_db() -> pd.DataFrame:
        """
        Get existing experiment table from the database.
        Returns:
            pd.DataFrame: A DataFrame containing existing experiment table.
        """
        data = await db.get_exp_all()
        logger.info("Existing experiment table fetched from database.")
        return data

    async def db_insert(self, exclass: tuple[list[bool], List[Dict[str, str]]]) -> Tuple[pd.DataFrame]:
        """
        Parsing exp_sheet and sample_sheet from the sample_sheet_wrapped based on the communicated exclass.

        Args:
            exclass: Tuple[bool, List[Dict[str, str]]]: A tuple containing a boolean indicating
            whether the exclass need to be replaced. True for not to replace and False for replacing,
            and a list of dictionaries containing the experiment class and experiment category.

        Returns:
            Tuple[pd.DataFrame]: A tuple containing two DataFrames:
                exp_sheet: experiment category DataFrame
                sample_sheet: sample sheet DataFrame
        """
        if not all(exclass[0]):
            new_exclass = pd.DataFrame.from_records(exclass[1])
            self.sample_sheet_wrapped = self.sample_sheet_wrapped.drop(columns=["ExpClass"]).merge(
                new_exclass, on="ExperimentCategory", how="left")
            logger.info("Exclass replaced in sample sheet.")

        exp_db = await PutDataBaseWrapper.get_existing_exp_from_db()
        exp_db = exp_db.set_index("Experiment")
        sheet = self.sample_sheet_wrapped.set_index("Experiment")

        # 只对两列做更新
        matched = sheet.index.intersection(exp_db.index)
        sheet.loc[matched, ["ExpClass", "UniqueEXID"]] = exp_db.loc[matched, ["ExpClass", "UniqueEXID"]]

        # 新增标记列
        sheet["updated"] = False
        sheet.loc[matched, "updated"] = True

        self.sample_sheet_wrapped = sheet.reset_index()
        logger.info("Experiment sheet updated with existing database records.")

        exp_sheet_wrapped = self.sample_sheet_wrapped.copy()
        exp_sheet_wrapped = exp_sheet_wrapped[~exp_sheet_wrapped["updated"]]

        exp_sheet = exp_sheet_wrapped[[
            "ExpClass", "UniqueEXID", "Experiment"]].drop_duplicates()
        logger.info("Experiment sheet extracted from original sample sheet.")
        sample_sheet = self.sample_sheet_wrapped.drop(
            columns=["ExpClass", "ExperimentCategory", "Experiment", "FileName1",
                     "FileName2", "MD5checksum1", "MD5checksum2", "updated"])
        sample_sheet["FileName"] = None
        sample_sheet["Sample"] = None

        nullable_cols = [
            "FileName", "Sample", "SampleAge",
            "SampleDetail", "DepositDatabase", "Accession", "Origin"
        ]
        for col in (c for c in nullable_cols if c in sample_sheet.columns):
            sample_sheet[col] = sample_sheet[col].where(pd.notna(sample_sheet[col]), None)
        logger.info("Sample sheet extracted from original sample sheet.")

        return exp_sheet, sample_sheet

    @deprecated(replace="expression_wrapper_v2",
                remove_in="v3.0",
                since="v2.0")
    def expression_wrapper(self, sample_sheet: pd.DataFrame) -> mpd.DataFrame:
        """
        Wrap the gene expression DataFrame with "UniqueID" column for database insertion.
        Returns:
            pd.DataFrame: The DataFrame with additional columns.
        """
        tpm_t = dataframe_t(self.gene_expression_tpm)
        counts_t = dataframe_t(self.gene_expression_counts)

        ids = sample_sheet[["SampleID", "UniqueID"]]
        ids = ids.rename(columns={"UniqueID": "SampleRealID"})

        tpm_t_merge = tpm_t.merge(ids, on="SampleID", how="left")
        counts_t_merge = counts_t.merge(ids, on="SampleID", how="left")

        logger.info("UniqueID column added to gene expression data.")

        tpm = dataframe_wide2long(mpd.DataFrame(tpm_t_merge), "Tpm")
        counts = dataframe_wide2long(mpd.DataFrame(counts_t_merge), "Counts")

        logger.info("Gene expression data converted to long format.")

        return tpm, counts

    @staticmethod
    def _sort_gene_id_pl(df: pl.DataFrame) -> pl.DataFrame:
        """
        按 gene_id 中的数字部分排序 (g123 -> 123)，保持行顺序一致。
        假设 gene_id 形如 g<number>，否则被提取为 NA 会排在前面，必要时可再过滤。
        Args:
            df (pl.DataFrame): 包含 gene_id 列的 DataFrame。
        Returns:
            pl.DataFrame: 按 gene_id 中的数字部分排序后的 DataFrame。
        """
        df_sorted = df.with_columns(
            pl.col("gene_id").str.extract(
                r"g(\d+)").cast(pl.Int32).alias("gene_num")
        ).sort("gene_num").drop("gene_num")
        return df_sorted

    @staticmethod
    def _compress_expression_task(task: tuple[str, str, bytes, bytes, int]) -> dict:
        """
        进程池任务：压缩单个样本的 TPM / Counts 字节
        task: (SampleID, UniqueID, tpm_bytes, counts_bytes, compression_level)
        """
        sid, uid, t_bytes, c_bytes, level = task
        t_blob = zstd.compress(t_bytes, level)
        c_blob = zstd.compress(c_bytes, level)
        return {
            "SampleID": sid,
            "UniqueID": uid,
            "TPMBlob": t_blob,
            "CountsBlob": c_blob
        }

    def expression_wrapper_v2(self, sample_sheet: pd.DataFrame | None = None,
                              processes: int | None = None,
                              compression_level: int = 3) -> pl.DataFrame:
        """
        Wrap the gene expression DataFrame with "UniqueID" column for database insertion.
        Returns:
            pl.DataFrame: The DataFrame with additional columns.
        """

        logger.info(
            "Start building gene expression blob table for database insertion.")

        tpm_df = PutDataBaseWrapper._sort_gene_id_pl(self.gene_expression_tpm)
        counts_df = PutDataBaseWrapper._sort_gene_id_pl(
            self.gene_expression_counts)

        logger.info("Gene expression data sorted by gene ID.")

        if tpm_df.height != counts_df.height:
            raise ValueError("TPM / Counts gene row mismatch")

        if tpm_df["gene_id"].to_list() != counts_df["gene_id"].to_list():
            raise ValueError("TPM / Counts gene_id mismatch")

        sample_ids = [c for c in tpm_df.columns if c != "gene_id"]
        if sample_sheet is None:
            sample_sheet = self.sample_sheet_wrapped
        id_map = dict(zip(sample_sheet["SampleID"], sample_sheet["UniqueID"]))

        np_dtype = np.float32
        tpm_bytes_map = {
            sid: tpm_df[sid].to_numpy().astype(np_dtype).tobytes()
            for sid in sample_ids
        }
        counts_bytes_map = {
            sid: counts_df[sid].to_numpy().astype(np_dtype).tobytes()
            for sid in sample_ids
        }

        # 任务列表: (SampleID, UniqueID, t_bytes, c_bytes, compression_level)
        tasks = [
            (sid, id_map.get(sid, ""),
             tpm_bytes_map[sid], counts_bytes_map[sid], compression_level)
            for sid in sample_ids
        ]

        max_workers = processes or min(len(tasks), os.cpu_count() or 1)
        if max_workers <= 1:
            rows = [PutDataBaseWrapper._compress_expression_task(
                tk) for tk in tasks]
        else:
            with ProcessPoolExecutor(max_workers=max_workers) as pool:
                rows = list(
                    pool.map(PutDataBaseWrapper._compress_expression_task, tasks))

        unified_df = pl.DataFrame(rows)
        logger.info(
            "Unified blob build done: samples=%d avg_tpm_blob=%dB avg_counts_blob=%dB",
            len(sample_ids),
            int(np.mean([len(r['TPMBlob']) for r in rows])) if rows else 0,
            int(np.mean([len(r['CountsBlob']) for r in rows])) if rows else 0,
        )
        return unified_df


class UpstreamAnalysisWrapper:
    """
    Rawdata analysis wrapper for upstream processing.
    Args:
        user (str): The user ID.
        work_dir (Path): The working directory.
        rawdata_dir (Path): The raw data directory.
        sample_sheet (pd.DataFrame): The sample sheet DataFrame.
        genome (Path): The genome file path.
        annotation (Path): The annotation file path.
        smk_file (Path): The Snakemake file path.
    """

    def __init__(self, user: str, work_dir: Path, rawdata_dir: Path, sample_sheet: pd.DataFrame,
                 genome: Path, annotation: Path,
                 smk_file: Path = Path("backend/analysis/workflow/Snakefile")):
        self.user = user
        self.work_dir = work_dir
        work_dir.mkdir(parents=True, exist_ok=True)
        self.rawdata_dir = rawdata_dir
        self.smk_file = smk_file
        self.genome = genome
        self.annotation = annotation
        self.sample_sheet = sample_sheet
        self.smk_sample_sheet = self._get_smk_samples()
        self.upstream = self._snakemake_wrapper()

    def _get_smk_samples(self) -> pd.DataFrame:
        """
        Convert the DataFrame to snakemake samples format.
        Returns:
            pd.DataFrame: The converted DataFrame.
        """

        res = UploadFileProcessor.trans_to_smk_samples(self.sample_sheet, self.rawdata_dir,
                                                       True, self.work_dir / "samples.csv")
        logger.info("Snakemake sample sheet created successfully.")
        return res

    def _snakemake_wrapper(self) -> UpstreamAnalysis | None:
        """
        Create and return an UpstreamAnalysis instance.
        Returns:
            UpstreamAnalysis | None: Instance if success else None.
        """
        samples_csv = self.work_dir / "samples.csv"
        if not samples_csv.exists():
            logger.error(
                f"Snakemake wrapper aborted: {samples_csv} not found.")
            return None
        try:
            upstream_analysis = UpstreamAnalysis(
                snakefile_path=self.smk_file,
                work_dir=self.work_dir,
                sample_sheet=samples_csv,
                genome=self.genome,
                annotation=self.annotation
            )
            logger.info("UpstreamAnalysis instance created successfully.")
            return upstream_analysis
        except Exception as e:
            logger.error(f"Failed to create UpstreamAnalysis: {e}")
            return None

    def smk_dry_run(self) -> bool:
        """
        Run Snakemake in dry run mode.
        Returns:
            bool: if dry run is successful
        """
        if not self.upstream:
            logger.error("Snakemake wrapper not initialized.")
            return False
        try:
            result = self.upstream.run_analysis(dryrun=True)
            logger.info("Snakemake dry run completed successfully.")
            return result
        except Exception as e:
            logger.error(f"Failed to execute Snakemake dry run: {e}")
            return False

    def smk_run(self, cores: int) -> bool:
        """
        Run Snakemake in normal mode.
        Returns:
            bool: if run is successful
        """
        if not self.upstream:
            logger.error("Snakemake wrapper not initialized.")
            return False
        try:
            result = self.upstream.run_analysis(dryrun=False, ncores=cores)
            logger.info("Snakemake run completed successfully.")
            return result
        except Exception as e:
            logger.error(f"Failed to execute Snakemake run: {e}")
            return False

    def post_process(self, clean: bool = True) -> Dict:
        """
        Post-process the results of the upstream analysis.
        Args:
            clean (bool): Whether to clean up the working directory after processing.
        Returns:
            Dict: A dictionary containing processed results,
            including quantification data, alignment reports, and fastp reports.
            The dictionary has the following structure:
            - "quantification": List containing TPM and counts DataFrames.
            - "align_report": List containing success status, alignment DataFrame,
              and list of failed HISAT2 logs.
            - "fastp_report": List containing success status, fastp DataFrame,
              and list of failed fastp logs.
        Raises:
            RuntimeError: If there is an error processing the reports.
        """
        res = self.upstream.post_process(clean=clean) if self.upstream else {}
        if res:
            logger.info("Post-processing completed successfully.")
        else:
            logger.error("Post-processing failed.")
        return res
