import os
import sys
import json
import argparse
import math
from typing import Dict, Any, Optional

import pandas as pd
import numpy as np


def _safe_int(x):
    try:
        return int(x)
    except Exception:
        return None


def summarize_distribution(counts: np.ndarray) -> Dict[str, Any]:
    total = int(counts.sum())
    k = int((counts > 0).sum())
    if total == 0 or k == 0:
        return {
            "total_samples": total,
            "num_classes": k,
            "majority_class_pct": 0.0,
            "imbalance_ratio": None,
            "entropy_norm": None,
            "gini": None,
        }

    p = counts / total
    majority_class_pct = float(p.max())
    nonzero = counts[counts > 0]
    imbalance_ratio = float(nonzero.max() / max(1, nonzero.min()))
    # Shannon entropy normalized by log(k)
    H = float(-(p[p > 0] * np.log(p[p > 0])).sum())
    entropy_norm = float(H / math.log(k)) if k > 1 else 0.0
    # Gini = 1 - sum(p^2)
    gini = float(1.0 - (p * p).sum())

    return {
        "total_samples": total,
        "num_classes": k,
        "majority_class_pct": majority_class_pct,
        "imbalance_ratio": imbalance_ratio,
        "entropy_norm": entropy_norm,
        "gini": gini,
    }


def audit_dataset(csv_path: str, image_root: str, tokenize: bool = False,
                  text_model: str = "distilbert-base-uncased", max_len: int = 64,
                  check_open: bool = False, output_dir: Optional[str] = None,
                  create_split: bool = False, train_ratio: float = 0.8,
                  val_ratio: float = 0.1, test_ratio: float = 0.1,
                  random_state: int = 42) -> Dict[str, Any]:
    summary: Dict[str, Any] = {
        "inputs_required": ["image", "text", "label"],
        "optional_columns": ["disease_category"],
        "csv_path": csv_path,
        "image_root": image_root,
        "tokenize": tokenize,
        "text_model": text_model,
        "max_len": max_len,
        "check_open": check_open,
        "create_split": create_split,
        "ratios": {"train": train_ratio, "val": val_ratio, "test": test_ratio},
    }

    if output_dir is None:
        output_dir = os.path.dirname(csv_path) or os.getcwd()
    os.makedirs(output_dir, exist_ok=True)

    # Load CSV
    try:
        df = pd.read_csv(csv_path)
    except Exception as e:
        summary["error"] = f"Failed to read CSV: {e}"
        return summary

    columns = list(df.columns)
    summary["columns"] = columns
    required_ok = all(col in df.columns for col in ["image", "text", "label"])
    summary["has_required_columns"] = required_ok
    if not required_ok:
        summary["errors"] = [
            "CSV must contain columns: image, text, label (per train_classifier.py)"
        ]
        # Save early snapshot
        out_json = os.path.join(output_dir, "audit_summary.json")
        with open(out_json, "w", encoding="utf-8") as f:
            json.dump(summary, f, ensure_ascii=False, indent=2)
        return summary

    # Basic stats
    n_total = int(len(df))
    summary["num_samples"] = n_total

    # Missing values and anomalies
    missing = {col: int(df[col].isnull().sum()) for col in ["image", "text", "label"]}
    summary["missing_counts"] = missing

    # Empty text
    empty_text_mask = df["text"].astype(str).str.strip().eq("")
    empty_text_count = int(empty_text_mask.sum())
    summary["empty_text_count"] = empty_text_count
    if empty_text_count > 0:
        df.loc[empty_text_mask].to_csv(os.path.join(output_dir, "empty_text_samples.csv"), index=False)

    # Image path existence
    image_paths = df["image"].astype(str).apply(lambda p: os.path.join(image_root, p))
    exists_mask = image_paths.apply(os.path.exists)
    missing_images_count = int((~exists_mask).sum())
    summary["missing_images_count"] = missing_images_count
    if missing_images_count > 0:
        df_missing = df.loc[~exists_mask].copy()
        df_missing["resolved_path"] = image_paths.loc[~exists_mask]
        df_missing.to_csv(os.path.join(output_dir, "missing_images.csv"), index=False)

    # Optional: try opening images to detect corruption
    opened_ok = None
    if check_open:
        try:
            from PIL import Image
            ok_flags = []
            for p in image_paths:
                try:
                    with Image.open(p) as im:
                        im.verify()
                    ok_flags.append(True)
                except Exception:
                    ok_flags.append(False)
            opened_ok = int(sum(ok_flags))
            corrupt_count = int(len(ok_flags) - opened_ok)
            summary["corrupt_images_count"] = corrupt_count
            if corrupt_count > 0:
                df_corrupt = df.loc[[not f for f in ok_flags]].copy()
                df_corrupt["resolved_path"] = image_paths.loc[[not f for f in ok_flags]]
                df_corrupt.to_csv(os.path.join(output_dir, "corrupt_images.csv"), index=False)
        except Exception as e:
            summary["corrupt_check_error"] = str(e)

    # Label type and mapping
    label_numeric = pd.to_numeric(df["label"], errors="coerce")
    non_numeric_mask = label_numeric.isnull()
    summary["non_numeric_label_count"] = int(non_numeric_mask.sum())

    label_mapping: Optional[Dict[str, int]] = None
    if int(non_numeric_mask.sum()) > 0:
        # Attempt to map categorical labels to indices using disease_category or label strings
        cat_source = None
        if "disease_category" in df.columns:
            cat_source = df["disease_category"].astype(str)
        else:
            cat_source = df["label"].astype(str)
        unique_cats = sorted(set(cat_source))
        label_mapping = {c: i for i, c in enumerate(unique_cats)}
        df["label_raw"] = df["label"].astype(str)
        df["label"] = cat_source.map(label_mapping)
        summary["label_mapping"] = label_mapping
        summary["label_mapped"] = True
        df.to_csv(os.path.join(output_dir, "label_mapped.csv"), index=False)
    else:
        summary["label_mapped"] = False

    # Label distribution
    labels = df["label"].astype(int)
    counts_series = labels.value_counts().sort_index()
    counts = counts_series.values
    summary["label_counts"] = {int(k): int(v) for k, v in counts_series.items()}
    pd.DataFrame({"label": counts_series.index, "count": counts_series.values}).to_csv(
        os.path.join(output_dir, "class_distribution.csv"), index=False
    )

    # Distribution metrics
    dist_metrics = summarize_distribution(np.array(counts, dtype=np.float64))
    summary["distribution_metrics"] = dist_metrics

    # Text length stats
    text_series = df["text"].astype(str)
    char_len = text_series.apply(len)
    word_len = text_series.apply(lambda s: len(s.split()))
    text_stats = {
        "char_len": {
            "min": int(char_len.min()) if n_total else 0,
            "p50": float(char_len.median()) if n_total else 0.0,
            "p95": float(char_len.quantile(0.95)) if n_total else 0.0,
            "max": int(char_len.max()) if n_total else 0,
        },
        "word_len": {
            "min": int(word_len.min()) if n_total else 0,
            "p50": float(word_len.median()) if n_total else 0.0,
            "p95": float(word_len.quantile(0.95)) if n_total else 0.0,
            "max": int(word_len.max()) if n_total else 0,
        },
    }

    # Optional tokenization for max_len compatibility
    token_stats = None
    if tokenize:
        try:
            from transformers import AutoTokenizer
            tokenizer = AutoTokenizer.from_pretrained(text_model)
            token_lens = []
            for s in text_series:
                enc = tokenizer(
                    s,
                    truncation=True,
                    padding=False,
                    max_length=max_len,
                    return_attention_mask=False,
                    return_tensors=None,
                )
                ids = enc.get("input_ids", [])
                token_lens.append(len(ids))
            token_lens = pd.Series(token_lens)
            token_stats = {
                "min": int(token_lens.min()) if n_total else 0,
                "p50": float(token_lens.median()) if n_total else 0.0,
                "p95": float(token_lens.quantile(0.95)) if n_total else 0.0,
                "max": int(token_lens.max()) if n_total else 0,
                "exceed_max_len_pct": float((token_lens > max_len).mean()) if n_total else 0.0,
            }
        except Exception as e:
            summary["tokenize_error"] = str(e)

    summary["text_stats"] = text_stats
    if token_stats is not None:
        summary["token_stats"] = token_stats
        if token_stats["p95"] > max_len:
            summary["max_len_recommendation"] = int(min(128, token_stats["p95"]))

    # Split suggestion and optional creation
    if create_split:
        try:
            from sklearn.model_selection import StratifiedShuffleSplit
            assert abs(train_ratio + val_ratio + test_ratio - 1.0) < 1e-6, "Ratios must sum to 1"
            sss = StratifiedShuffleSplit(n_splits=1, test_size=(1.0 - train_ratio), random_state=random_state)
            labels_arr = labels.values
            idx = np.arange(n_total)
            for train_idx, rest_idx in sss.split(idx, labels_arr):
                # Split rest into val/test
                rest_labels = labels_arr[rest_idx]
                val_size = int(round(len(rest_idx) * val_ratio / (val_ratio + test_ratio)))
                rest_idx_arr = np.array(rest_idx)
                sss2 = StratifiedShuffleSplit(n_splits=1, test_size=(len(rest_idx) - val_size), random_state=random_state)
                for val_idx_rel, test_idx_rel in sss2.split(np.arange(len(rest_idx_arr)), rest_labels):
                    val_idx = rest_idx_arr[val_idx_rel]
                    test_idx = rest_idx_arr[test_idx_rel]
                    df_train = df.iloc[train_idx]
                    df_val = df.iloc[val_idx]
                    df_test = df.iloc[test_idx]
                    df_train.to_csv(os.path.join(output_dir, "train.csv"), index=False)
                    df_val.to_csv(os.path.join(output_dir, "val.csv"), index=False)
                    df_test.to_csv(os.path.join(output_dir, "test.csv"), index=False)
                    summary["splits_created"] = {
                        "train": int(len(train_idx)),
                        "val": int(len(val_idx)),
                        "test": int(len(test_idx)),
                    }
                    break
                break
        except Exception as e:
            summary["split_error"] = str(e)

    # Save summary JSON
    out_json = os.path.join(output_dir, "audit_summary.json")
    with open(out_json, "w", encoding="utf-8") as f:
        json.dump(summary, f, ensure_ascii=False, indent=2)

    return summary


def main():
    parser = argparse.ArgumentParser(description="Dataset audit for MultiModalMoEClassifier requirements")
    parser.add_argument("--csv", required=True, help="Path to dataset CSV with columns image,text,label")
    parser.add_argument("--image_root", required=True, help="Root directory where images reside")
    parser.add_argument("--tokenize", action="store_true", help="Compute token length stats with a HuggingFace tokenizer")
    parser.add_argument("--text_model", default="distilbert-base-uncased", help="Tokenizer model name if --tokenize is set")
    parser.add_argument("--max_len", type=int, default=64, help="Model's max token length for comparison")
    parser.add_argument("--check_open", action="store_true", help="Try opening images to detect corruption (slower)")
    parser.add_argument("--output_dir", default=None, help="Directory to save audit outputs (defaults to CSV directory)")
    parser.add_argument("--create_split", action="store_true", help="Create stratified train/val/test CSVs")
    parser.add_argument("--train_ratio", type=float, default=0.8, help="Train ratio when creating splits")
    parser.add_argument("--val_ratio", type=float, default=0.1, help="Val ratio when creating splits")
    parser.add_argument("--test_ratio", type=float, default=0.1, help="Test ratio when creating splits")
    parser.add_argument("--random_state", type=int, default=42, help="Random seed for splitting")
    args = parser.parse_args()

    summary = audit_dataset(
        csv_path=args.csv,
        image_root=args.image_root,
        tokenize=args.tokenize,
        text_model=args.text_model,
        max_len=args.max_len,
        check_open=args.check_open,
        output_dir=args.output_dir,
        create_split=args.create_split,
        train_ratio=args.train_ratio,
        val_ratio=args.val_ratio,
        test_ratio=args.test_ratio,
        random_state=args.random_state,
    )

    print(json.dumps(summary, ensure_ascii=False, indent=2))


if __name__ == "__main__":
    main()