"""
主执行脚本：
- train_fold: 训练和验证一个折，保存最佳和最后一个模型
- evaluate_saved_model_set: 加载保存的模型（最佳或最后）并在外部集上评估
- main: 协调整个交叉验证和最终评估流程
"""

import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import pandas as pd
import logging
from typing import Dict, Union, Tuple, Optional
import itertools  # NEW: 用于超参数搜索

# NEW: 导入 TensorBoard
try:
    from torch.utils.tensorboard import SummaryWriter
except ImportError:
    print(
        "Warning: torch.utils.tensorboard not found. TensorBoard logging will be disabled."
    )
    SummaryWriter = None

# 导入重构的模块
from fpt.config import mil_config as config

from fpt.fptaux.aux import (
    set_seed,
    load_manifest,
    get_model,
    get_dataloaders,
    calculate_metrics_with_ci_pvalue,
    setup_logging,
)
from fpt.fptaux.core_traing_test_mil import train_one_epoch, evaluate_model

# ExternalH5Dataset 需要在 main 中用于创建 ext_loader
try:
    from fpt.m3dataset.m3il_dataset import ExternalH5Dataset
except ImportError:
    print("Warning: Could not perform relative import for ExternalH5Dataset in run.py.")
    from .m3dataset.m3il_dataset import ExternalH5Dataset


class CLAMLoss(nn.Module):
    """
    CLAM 复合损失函数。
    结合了用于包级别分类的 BCE 损失和
    用于实例级别聚类约束的 BCE 损失。
    """

    def __init__(
        self, k: int = 8, instance_loss_weight: float = 0.7, n_classes: int = 1
    ):
        """
        参数:
            k (int): 用于聚类损失的 Top-K 和 Bottom-K 实例的数量。
            instance_loss_weight (float): 实例损失的权重 (alpha)。
            n_classes (int): 类别数。
        """
        super().__init__()
        self.k = k
        self.instance_loss_weight = instance_loss_weight
        self.n_classes = n_classes

        # 用于包级别和实例级别损失的标准二元交叉熵损失
        # (假设 logits vs 0/1 标签)
        self.bag_criterion = nn.BCEWithLogitsLoss()
        self.instance_criterion = nn.BCEWithLogitsLoss()

    def forward(self, results_dict: dict, labels: torch.Tensor) -> torch.Tensor:
        """
        计算 CLAM 的总损失。

        参数:
            results_dict (dict): 来自 CLAMModel 的输出, 包含:
                - "bag_logit": [B] or [B, n_classes]
                - "instance_logits": [B, N] or [B, N, n_classes]
                - "attention": [B, N] (原始注意力分数)
            labels (torch.Tensor): 真实的包级别标签, [B]
        """

        # --- 1. 准备数据 ---
        bag_logit = results_dict["bag_logit"]
        instance_logits = results_dict["instance_logits"]
        attention = results_dict["attention"]

        # 确保标签是 float 类型以用于 BCE
        labels = labels.float()

        # --- 2. 包级别损失 (L_bag) ---
        # 适用于所有样本 (正例和负例)
        bag_loss = self.bag_criterion(bag_logit, labels)

        # --- 3. 实例级别聚类损失 (L_inst) ---
        # 这只在 *正例* 包 (label == 1) 上计算

        pos_indices = torch.where(labels == 1)[0]

        # 如果批次中没有正例, 则实例损失为 0
        if len(pos_indices) == 0:
            instance_loss = torch.tensor(0.0, device=bag_logit.device)
        else:
            # 仅选择正例包的数据
            pos_instance_logits = instance_logits[pos_indices]  # [B_pos, N]
            pos_attention = attention[pos_indices]  # [B_pos, N]

            # 查找 Top-K 和 Bottom-K 的注意力实例
            # (使用 -attention 来查找 Bottom-K)
            _, top_k_indices = torch.topk(pos_attention, k=self.k, dim=1)  # [B_pos, k]
            _, bottom_k_indices = torch.topk(
                -pos_attention, k=self.k, dim=1
            )  # [B_pos, k]

            # --- 创建伪标签 ---
            # Top-K 实例的伪标签为 1 (正例)
            top_k_logits = torch.gather(pos_instance_logits, dim=1, index=top_k_indices)
            top_pseudo_labels = torch.ones_like(
                top_k_logits, device=top_k_logits.device
            )
            top_loss = self.instance_criterion(top_k_logits, top_pseudo_labels)

            # Bottom-K 实例的伪标签为 0 (负例)
            bottom_k_logits = torch.gather(
                pos_instance_logits, dim=1, index=bottom_k_indices
            )
            bottom_pseudo_labels = torch.zeros_like(
                bottom_k_logits, device=bottom_k_logits.device
            )
            bottom_loss = self.instance_criterion(bottom_k_logits, bottom_pseudo_labels)

            # 实例损失是 Top-K 和 Bottom-K 损失的平均值
            instance_loss = (top_loss + bottom_loss) / 2.0

        # --- 4. 总损失 ---
        total_loss = bag_loss + self.instance_loss_weight * instance_loss

        return total_loss


def train_fold(
    model_type: str,
    fold_col: str,
    df: pd.DataFrame,
    ext_loader: Optional[DataLoader],
    device: torch.device,
) -> Tuple[float, float]:
    """
    训练、验证和保存单个折。
    返回: (best_val_auc, best_val_acc)
    """
    logger = logging.getLogger()
    logger.info(f"--- 开始处理: {model_type.upper()} | 折: {fold_col} ---")

    # NEW: 初始化 TensorBoard Writer
    writer = None
    if SummaryWriter:
        # 使用在 main 函数中设置的 config.current_log_dir
        log_dir = os.path.join(config.current_log_dir, model_type, f"fold_{fold_col}")
        writer = SummaryWriter(log_dir=log_dir)
        logger.info(f"TensorBoard 日志保存至: {log_dir}")

    # 1. 初始化模型、优化器、损失函数
    if model_type == "CLAMModel":
        logger.info("使用 CLAM 复合损失函数。CLAMLoss()")
        # MODIFIED: 允许从 config 中配置 CLAM 参数 (如果需要)
        criterion = CLAMLoss(
            k=getattr(config, "clam_k", 8),
            instance_loss_weight=getattr(config, "clam_instance_loss_weight", 0.7),
        )
    else:
        # pos_weight = 1.78 # 250 / 140
        # pos_weight=torch.tensor([pos_weight])
        criterion = nn.BCEWithLogitsLoss()
    model = get_model(model_type, config.feature_dim).to(device)
    optimizer = optim.Adam(model.parameters(), lr=config.learning_rate)

    # 2. 创建 DataLoaders
    train_loader, val_loader = get_dataloaders(
        df,
        config.feats_path,
        fold_col,
        config.task_label_col,
        config.batch_size,
        config.SEED,
        config.train_patches_sampled,
    )
    logger.info(
        f"Fold {fold_col}: 训练样本 = {len(train_loader.dataset)}, "
        f"验证样本 = {len(val_loader.dataset)}"
    )

    # 3. 训练循环 (带 epoch 验证)
    best_val_auc = -1.0
    best_val_acc = 0.0  # 存储与最佳 AUC 对应的 Acc
    best_epoch = -1

    # MODIFIED: 路径现在基于 config.save_dir (已包含 run_name)
    model_save_path = os.path.join(
        config.save_dir, model_type, f"model_{fold_col}_best.pth"
    )
    last_model_save_path = os.path.join(
        config.save_dir, model_type, f"model_{fold_col}_last.pth"
    )
    os.makedirs(os.path.dirname(model_save_path), exist_ok=True)

    for epoch in range(config.num_epochs):
        # 训练
        train_loss = train_one_epoch(model, train_loader, optimizer, criterion, device)

        # 内部验证
        val_auc, val_acc, val_loss = evaluate_model(
            model, val_loader, device, criterion
        )

        # 外部验证 (每个 epoch)
        ext_auc_epoch, ext_acc_epoch = np.nan, np.nan
        if ext_loader:
            ext_auc_epoch, ext_acc_epoch, _ = evaluate_model(
                model, ext_loader, device, criterion=None
            )

        logger.info(
            f"Epoch {epoch+1}/{config.num_epochs} | "
            f"TrainLoss: {train_loss:.4f} | "
            f"ValLoss: {val_loss:.4f} | "
            f"ValAUC: {val_auc:.4f} | "
            f"ValAcc: {val_acc:.4f} | "
            f"ExtAUC: {ext_auc_epoch:.4f} | "
            f"ExtAcc: {ext_acc_epoch:.4f}"
        )

        # NEW: 记录到 TensorBoard
        if writer:
            writer.add_scalar("Loss/train", train_loss, epoch)
            writer.add_scalar("Loss/val", val_loss, epoch)
            writer.add_scalar("AUC/val", val_auc, epoch)
            writer.add_scalar("Accuracy/val", val_acc, epoch)
            if ext_loader:
                writer.add_scalar("AUC/ext_epoch", ext_auc_epoch, epoch)
                writer.add_scalar("Accuracy/ext_epoch", ext_acc_epoch, epoch)

        # 保存最佳模型
        if val_auc > best_val_auc:
            best_val_auc = val_auc
            best_val_acc = val_acc
            best_epoch = epoch + 1
            torch.save(model.state_dict(), model_save_path)
            logger.info(
                f"  -> 新的最佳验证 AUC: {best_val_auc:.4f}。模型已保存到: {model_save_path}"
            )

        # 保存最后一个模型
        if epoch == config.num_epochs - 1:
            torch.save(model.state_dict(), last_model_save_path)
            logger.info(f"  -> 最后一个 epoch 的模型已保存到: {last_model_save_path}")

    logger.info(f"--- Fold {fold_col} 结束 ---")
    logger.info(f"最佳内部验证 AUC: {best_val_auc:.4f} (在 Epoch {best_epoch})")

    # NEW: 记录超参数和最终指标到 TensorBoard
    if writer:
        hparam_dict = {
            "lr": config.learning_rate,
            "batch_size": config.batch_size,
            "model_type": model_type,
            "fold": fold_col,
        }
        # 添加其他您关心的超参数
        if model_type == "CLAMModel":
            hparam_dict["clam_k"] = getattr(config, "clam_k", 8)
            hparam_dict["clam_inst_weight"] = getattr(
                config, "clam_instance_loss_weight", 0.7
            )

        metric_dict = {
            "best_val_auc": best_val_auc,
            "best_val_acc": best_val_acc,
            "best_epoch": best_epoch,
        }

        # add_hparams 期望所有值都是标量
        writer.add_hparams(hparam_dict, metric_dict, run_name="fold_summary")
        writer.close()

    # 返回最佳 AUC 及其对应的 ACC
    return best_val_auc, best_val_acc


def evaluate_saved_model_set(
    model_type: str,
    model_suffix: str,  # "_best" or "_last"
    ext_loader: DataLoader,
    device: torch.device,
) -> Dict[str, Union[float, Tuple[float, float]]]:
    """
    加载 5-折 模型并在外部集上评估它们。
    返回一个包含聚合指标的字典。
    """
    logger = logging.getLogger()
    logger.info(
        f"\n{'-'*50}\n"
        f"外部测试 ({model_suffix.strip('_').title()} Model): {model_type.upper()}\n"
        f"{'-'*50}"
    )

    # NEW: 初始化 TensorBoard Writer 用于聚合结果
    writer = None
    if SummaryWriter:
        # 使用在 main 函数中设置的 config.current_log_dir
        log_dir = os.path.join(
            config.current_log_dir,
            model_type,
            f"External_Test_{model_suffix.strip('_')}",
        )
        writer = SummaryWriter(log_dir=log_dir)
        logger.info(f"TensorBoard 聚合日志保存至: {log_dir}")

    fold_aucs, fold_accs = [], []

    for fold_idx, fold_col in enumerate(config.fold_columns):
        # MODIFIED: 路径现在基于 config.save_dir (已包含 run_name)
        model_path = os.path.join(
            config.save_dir, model_type, f"model_{fold_col}{model_suffix}.pth"
        )
        if not os.path.exists(model_path):
            logger.warning(f"模型文件不存在，跳过: {model_path}")
            continue

        try:
            # 1. 加载模型和权重
            model = get_model(model_type, config.feature_dim).to(device)
            model.load_state_dict(torch.load(model_path, map_location=device))

            # 2. 评估
            auc, acc, _ = evaluate_model(model, ext_loader, device, criterion=None)

            logger.info(
                f"  {fold_col} ({model_suffix.strip('_')}) -> Ext AUC: {auc:.4f}, Ext Acc: {acc:.4f}"
            )
            fold_aucs.append(auc)
            fold_accs.append(acc)

            # NEW: 记录每个折的外部测试表现
            if writer:
                writer.add_scalar("AUC_per_fold", auc, fold_idx)
                writer.add_scalar("Accuracy_per_fold", acc, fold_idx)

        except Exception as e:
            logger.error(f"加载或评估模型 {model_path} 时出错: {e}")

    # 总结此模型的外部测试
    if not fold_aucs:
        logger.warning(f"未找到或评估 {model_type} ({model_suffix}) 的模型。")
        if writer:
            writer.close()
        return {}

    # 计算 AUC, CI, 和 P-value (与 0.5 比较)
    mean_auc, std_auc, ci_auc, p_auc = calculate_metrics_with_ci_pvalue(
        fold_aucs, baseline_for_p_val=0.5
    )

    # 计算 Acc, CI (无 P-value)
    mean_acc, std_acc, ci_acc, _ = calculate_metrics_with_ci_pvalue(
        fold_accs, baseline_for_p_val=None
    )

    results = {
        "mean_auc": mean_auc,
        "std_auc": std_auc,
        "ci_auc": ci_auc,
        "p_auc": p_auc,
        "mean_acc": mean_acc,
        "std_acc": std_acc,
        "ci_acc": ci_acc,
    }

    # 记录此模型的汇总
    logger.info(
        f"  [汇总] {model_type.upper()} 外部测试 ({model_suffix.strip('_').title()}):\n"
        f"    AUC: {mean_auc:.4f} \u00b1 {std_auc:.4f} (95% CI: {ci_auc[0]:.4f}-{ci_auc[1]:.4f}, p={p_auc:.4e})\n"
        f"    Acc: {mean_acc:.4f} \u00b1 {std_acc:.4f} (95% CI: {ci_acc[0]:.4f}-{ci_acc[1]:.4f})"
    )

    # NEW: 记录 HParams 和聚合的指标
    if writer:
        hparam_dict = {
            "lr": config.learning_rate,
            "batch_size": config.batch_size,
            "model_type": model_type,
            "model_suffix": model_suffix.strip("_"),
        }
        metric_dict = {
            "mean_auc": mean_auc,
            "std_auc": std_auc,
            "mean_acc": mean_acc,
            "std_acc": std_acc,
            "p_auc": p_auc,
        }
        writer.add_hparams(hparam_dict, metric_dict, run_name="external_summary")
        writer.close()

    return results


def log_final_summary(
    title: str, results_dict: Dict[str, Dict[str, Union[float, Tuple[float, float]]]]
):
    """记录最终的汇总结果。"""
    logger = logging.getLogger()
    logger.info(f"\n{'='*60}")
    logger.info(f"------ {title} ------")
    logger.info(f"{'='*60}")

    for model_type, res in results_dict.items():
        if not res:
            logger.info(f"{model_type.upper():18}: 未找到结果。")
            continue

        logger.info(f"{model_type.upper():18}:")

        # 检查内部 CV 结果
        if "mean_auc" in res and "std_auc" in res and "mean_accuracy" in res:
            logger.info(
                f"  平均 最佳Val-AUC: {res['mean_auc']:.4f} \u00b1 {res['std_auc']:.4f}"
            )
            logger.info(
                f"  平均 最佳Val-Acc: {res['mean_accuracy']:.4f} \u00b1 {res['std_accuracy']:.4f}"
            )

        # 检查外部测试结果
        elif "mean_auc" in res and "ci_auc" in res and "p_auc" in res:
            logger.info(
                f"  AUC: {res['mean_auc']:.4f} \u00b1 {res['std_auc']:.4f} "
                f"(95% CI: {res['ci_auc'][0]:.4f}-{res['ci_auc'][1]:.4f}, p={res['p_auc']:.4e})"
            )
            logger.info(
                f"  Acc: {res['mean_acc']:.4f} \u00b1 {res['std_acc']:.4f} "
                f"(95% CI: {res['ci_acc'][0]:.4f}-{res['ci_acc'][1]:.4f})"
            )


def main():

    # NEW: --- 1. 定义超参数搜索空间 ---
    # 在这里定义你想要搜索的参数网格
    # 你也可以在 config.py 中定义它，然后在这里导入
    search_space = {
        "learning_rate": [1e-4],
        "batch_size": [16],
        "train_patches_sampled":[128],
        "num_epochs":[10],
        # 'clam_instance_loss_weight': [0.5, 0.7], # 示例：如果你想调整CLAM的损失权重
    }

    keys, values = zip(*search_space.items())
    all_run_results = []

    # 保存原始路径
    original_save_dir = config.save_dir
    base_tensorboard_dir = "runs"  # TensorBoard 的根目录

    # NEW: --- 2. 开始超参数搜索循环 ---
    for param_combination in itertools.product(*values):
        hparams = dict(zip(keys, param_combination))
        run_name = "_".join(f"{k}_{v}" for k, v in hparams.items())

        # 7.1 --- (MODIFIED) 初始化 ---
        # 为每个 HParam run 创建一个唯一的日志文件
        logger, log_file_path = setup_logging(f"fpt_train_mil2_{run_name}")

        logger.info(f"\n{'='*70}")
        logger.info(f"------ STARTING HPARAM RUN: {run_name} ------")
        logger.info(f"PARAMS: {hparams}")
        logger.info(f"Log file: {log_file_path}")
        logger.info(f"{'='*70}")

        # NEW: 动态修改全局 config 对象
        for key, value in hparams.items():
            if hasattr(config, key):
                setattr(config, key, value)
                logger.info(f"Set config.{key} = {value}")
            else:
                logger.warning(f"Warning: '{key}' not found in config object.")

        # NEW: 为此 run 设置唯一的模型保存和 TB 日志路径
        config.save_dir = os.path.join(original_save_dir, run_name)
        config.current_log_dir = os.path.join(base_tensorboard_dir, run_name)  # 用于 TB

        os.makedirs(config.save_dir, exist_ok=True)
        os.makedirs(config.current_log_dir, exist_ok=True)

        # 在每次 HParam run 开始时重置种子，以保证可复现性
        set_seed(config.SEED)
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        logger.info(f"使用设备: {device}")
        logger.info(f"特征路径 (CV): {config.feats_path}")
        logger.info(f"特征路径 (Ext): {config.external_feats_path}")
        logger.info(
            f"Batch Size: {config.batch_size}, Epochs: {config.num_epochs}, LR: {config.learning_rate}"
        )
        logger.info(f"训练采样 Patch 数: {config.train_patches_sampled}")
        logger.info(f"模型将保存到: {config.save_dir}")
        logger.info(f"TensorBoard 日志将保存到: {config.current_log_dir}")

        # 7.2 --- 加载数据 ---
        # (这部分通常在 HParam 循环外执行，但如果 batch_size 影响数据加载，放里面也行)
        # (为了简单，我假设数据加载不受 HParams 影响，但 DataLoaders 会受影响)
        df = load_manifest(
            config.split_file_path, config.feats_path, config.task_label_col
        )
        if df is None:
            logger.error("无法加载主 manifest，训练中止。")
            return

        ext_df = load_manifest(
            config.external_manifest_path,
            config.external_feats_path,
            config.task_label_col,
        )
        ext_loader = None
        if ext_df is not None:
            logger.info(f"外部测试集已加载: {len(ext_df)} 个样本。")
            ext_dataset = ExternalH5Dataset(
                config.external_feats_path, ext_df, config.task_label_col
            )
            # MODIFIED: 外部测试集的 batch size 通常为 1
            ext_loader = DataLoader(ext_dataset, batch_size=1, shuffle=False)
        else:
            logger.warning("无法加载外部测试集。每个 epoch 和最终的外部验证将被跳过。")

        # 7.3 --- 交叉验证和训练循环 ---
        all_model_results_cv = {}
        all_model_results_ext = {}
        all_model_results_ext_last = {}

        for model_type in config.model_types_to_run:
            logger.info(f"\n{'='*60}")
            logger.info(f"------ 开始处理模型: {model_type.upper()} ------")
            logger.info(f"{'='*60}")

            all_fold_best_aucs = []
            all_fold_best_accs = []

            for fold_idx, fold_col in enumerate(config.fold_columns):
                # 调用新的 train_fold 函数
                best_val_auc, best_val_acc = train_fold(
                    model_type, fold_col, df, ext_loader, device
                )
                all_fold_best_aucs.append(best_val_auc)
                all_fold_best_accs.append(best_val_acc)

            # --- 当前模型交叉验证总结 ---
            logger.info(f"\n{'='*50}")
            logger.info(f"--- {model_type.upper()} 5-折交叉验证 (内部) 总结 ---")
            logger.info(f"{'='*50}")

            mean_auc = np.nanmean(all_fold_best_aucs)
            std_auc = np.nanstd(all_fold_best_aucs)
            mean_accuracy = np.nanmean(all_fold_best_accs)
            std_accuracy = np.nanstd(all_fold_best_accs)

            logger.info(f"平均 最佳Val-AUC: {mean_auc:.4f} \u00b1 {std_auc:.4f}")
            logger.info(
                f"平均 最佳Val-Acc: {mean_accuracy:.4f} \u00b1 {std_accuracy:.4f}"
            )

            all_model_results_cv[model_type] = {
                "mean_auc": mean_auc,
                "std_auc": std_auc,
                "mean_accuracy": mean_accuracy,
                "std_accuracy": std_accuracy,
            }

        # 7.4 --- 最终内部 CV 总结 ---
        log_final_summary("最终所有模型 5-折 CV (内部验证) 对比", all_model_results_cv)

        # 7.5 --- 最终外部测试 ---
        if ext_loader is None:
            logger.info("\n跳过最终的外部测试，因为外部数据集未加载。")
        else:
            logger.info(f"\n{'='*60}")
            logger.info("------ 开始最终外部测试 ------")
            logger.info(f"{'='*60}")

            for model_type in config.model_types_to_run:
                # 评估 "最佳" 模型
                results_best = evaluate_saved_model_set(
                    model_type, "_best", ext_loader, device
                )
                all_model_results_ext[model_type] = results_best

                # 评估 "最后" 模型
                results_last = evaluate_saved_model_set(
                    model_type, "_last", ext_loader, device
                )
                all_model_results_ext_last[model_type] = results_last

            # 7.6 --- 最终所有模型外部测试总结 ---
            log_final_summary(
                "外部测试最终结果汇总 (使用 最佳 模型)", all_model_results_ext
            )
            log_final_summary(
                "外部测试最终结果汇总 (使用 最后一个 Epoch 模型)",
                all_model_results_ext_last,
            )

        # NEW: 存储此 HParam run 的结果
        run_summary = {
            "params": hparams,
            "cv_results": all_model_results_cv,
            "ext_best_results": all_model_results_ext,
            "ext_last_results": all_model_results_ext_last,
        }
        all_run_results.append(run_summary)

        logger.info(f"\n------ COMPLETED HPARAM RUN: {run_name} ------\n")

    # NEW: --- 3. 搜索结束，打印最终总结 ---
    logger.info(f"\n{'='*70}")
    logger.info("------ HYPERPARAMETER SEARCH COMPLETE ------")
    logger.info(f"共完成 {len(all_run_results)} 次 HParam runs。")
    logger.info(f"{'='*70}")

    # 查找最佳 HParam run
    # (示例：基于 CLAMModel 的最佳外部测试 AUC)
    try:
        best_run = max(
            all_run_results,
            key=lambda r: r["ext_best_results"]
            .get("CLAMModel", {})  # 替换为你关心的模型
            .get("mean_auc", -1.0),  # 替换为你关心的指标
        )

        best_model = "MAEMILMODEL_CLSTOKEN"  # 替换
        best_metric = "mean_auc"  # 替换
        best_score = best_run["ext_best_results"][best_model][best_metric]

        logger.info(f"*** 最佳 HParam Run 总结 ***")
        logger.info(f"最佳参数: {best_run['params']}")
        logger.info(f"指标 ({best_model} Ext-Best {best_metric}): {best_score:.4f}")
        logger.info(f"详细结果: {best_run['ext_best_results'][best_model]}")

    except Exception as e:
        logger.error(f"无法确定最佳 HParam run: {e}")
        logger.error("请检查 all_run_results 列表和你的 key 函数。")

    logger.info(f"\n所有 HParam 训练和评估完成。主日志文件位于 `logs/` 目录中。")
    logger.info(f"TensorBoard 日志位于 `runs/` 目录中。")


if __name__ == "__main__":
    main()
