import os
import sys
import json
from dataclasses import dataclass, field

import torch
from datasets import load_from_disk
from tokenizers import Tokenizer
from transformers import (
    HfArgumentParser,
    Seq2SeqTrainer,
    Seq2SeqTrainingArguments,
    set_seed,
)

from src.hfco.configuration_transformer import TransformerConfig
from src.hfco.modeling_transformer import TranslationTransformerSeq2Seq
from src.hfco import (
    setup_logger, Seq2SeqDataCollator, SampleGenerator,
    calc_bleu_score
)

# --- Setup Logging ---
logger = setup_logger(__name__)




@dataclass
class ModelArguments:
    """
    Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
    """
    d_model: int = field(default=512, metadata={"help": "Dimension of the model."})
    num_heads: int = field(default=8, metadata={"help": "Number of attention heads."})
    d_ff: int = field(default=2048, metadata={"help": "Dimension of the feed-forward layer."})
    num_layers: int = field(default=6, metadata={"help": "Number of encoder and decoder layers."})
    rope_theta: float = field(default=10000.0, metadata={"help": "RoPE theta value."})
    max_seq_len: int = field(default=512, metadata={"help": "Maximum sequence length."})
    share_embeddings: bool = field(default=True, metadata={"help": "Whether to share embeddings."})
    label_smoothing: float = field(default=0.1, metadata={"help": "Label smoothing factor for loss calculation."})
    num_samples: int = field(default=10, metadata={"help": "Number of samples to generate and log during evaluation."})
    # --- Ablation Study Arguments ---
    use_rope: bool = field(default=True, metadata={"help": "Whether to use RoPE."})
    use_residual: bool = field(default=True, metadata={"help": "Whether to use residual connections."})
    norm_type: str = field(default="rms", metadata={"help": "Type of normalization layer ('rms' or 'layer')."})
    norm_position: str = field(default="pre", metadata={"help": "Position of the normalization layer ('pre' or 'post')."})


@dataclass
class DataTrainingArguments:
    """
    Arguments pertaining to what data we are going to input our model for training and eval.
    """
    train_dir: str = field(
        default="data/processed/tokenized/train",
        metadata={"help": "The input training data directory (Arrow format)."}
    )
    val_dir: str = field(
        default="data/processed/tokenized/val",
        metadata={"help": "The input validation data directory (Arrow format)."}
    )
    tokenizer_path: str = field(
        default="data/processed/tokenizer.json",
        metadata={"help": "Path to the tokenizer file."}
    )




def main():
    # --- Parse Arguments ---
    parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
    else:
        model_args, data_args, training_args = parser.parse_args_into_dataclasses()

    # --- Post-process arguments for consistency ---
    if training_args.load_best_model_at_end:
        # If load_best_model_at_end is True, evaluation strategy must match save strategy.
        if training_args.eval_strategy != training_args.save_strategy:
             logger.warning(
                f"Overriding eval_strategy '{training_args.eval_strategy}' to match "
                f"save_strategy '{training_args.save_strategy}' for load_best_model_at_end."
            )
             training_args.eval_strategy = training_args.save_strategy
        
        # Also ensure that eval_steps is set if strategy is steps
        if training_args.eval_strategy == "steps" and training_args.eval_steps is None:
            training_args.eval_steps = training_args.save_steps


    # --- Setup Output Directory ---
    if os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
        raise ValueError(f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.")
    os.makedirs(training_args.output_dir, exist_ok=True)

    # --- Set Seed ---
    set_seed(training_args.seed)
    logger.info(f"Seed set to {training_args.seed}")
    # Set run name
    if not getattr(training_args, "run_name", None):
        from datetime import datetime
        base = os.path.basename(training_args.output_dir.rstrip("/"))
        training_args.run_name = f"{base}-seed{training_args.seed}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
    logger.info(f"Run name set to {training_args.run_name}")

    # --- Load Tokenizer ---
    tokenizer = Tokenizer.from_file(data_args.tokenizer_path)
    pad_token_id = tokenizer.token_to_id("[PAD]")
    bos_token_id = tokenizer.token_to_id("[BOS]")
    eos_token_id = tokenizer.token_to_id("[EOS]")
    if pad_token_id is None or bos_token_id is None or eos_token_id is None:
        raise ValueError("Special tokens [PAD], [BOS], [EOS] must be in the tokenizer.")

    # --- Load Datasets ---
    logger.info(f"Loading datasets from {data_args.train_dir} and {data_args.val_dir}")
    train_dataset = load_from_disk(data_args.train_dir)
    val_dataset = load_from_disk(data_args.val_dir)
    train_dataset.set_format(type="torch", columns=["src_ids", "tgt_ids"])
    val_dataset.set_format(type="torch", columns=["src_ids", "tgt_ids"])
    logger.info(f"Train dataset size: {len(train_dataset)}, Val dataset size: {len(val_dataset)}")

    # --- Create Model ---
    config = TransformerConfig(
        d_model=model_args.d_model,
        num_heads=model_args.num_heads,
        d_ff=model_args.d_ff,
        num_layers=model_args.num_layers,
        rope_theta=model_args.rope_theta,
        max_seq_len=model_args.max_seq_len,
        vocab_size=tokenizer.get_vocab_size(),
        share_embeddings=model_args.share_embeddings,
        label_smoothing=model_args.label_smoothing,
        pad_token_id=pad_token_id,
        bos_token_id=bos_token_id,
        eos_token_id=eos_token_id,
        # --- Ablation Study Arguments ---
        use_rope=model_args.use_rope,
        use_residual=model_args.use_residual,
        norm_type=model_args.norm_type,
        norm_position=model_args.norm_position,
    )
    model = TranslationTransformerSeq2Seq(config)
    
    # --- Setup Generation Config ---
    model.generation_config.max_length = training_args.generation_max_length
    model.generation_config.num_beams = training_args.generation_num_beams
    # Encourage non-empty generations consistent with train.py's evaluation
    # Use min_new_tokens to avoid immediate BOS->EOS degeneration
    if getattr(model.generation_config, "min_new_tokens", None) is None or model.generation_config.min_new_tokens is None:
        model.generation_config.min_new_tokens = 5

    # Decoding defaults tuned for translation
    model.generation_config.length_penalty = 1.1
    model.generation_config.no_repeat_ngram_size = 3
    model.generation_config.early_stopping = True
    model.generation_config.repetition_penalty = 1.0
    model.generation_config.pad_token_id = pad_token_id
    model.generation_config.bos_token_id = bos_token_id
    model.generation_config.eos_token_id = eos_token_id
    model.generation_config.decoder_start_token_id = bos_token_id


    # --- Initialize Sample Generator ---
    sample_generator = SampleGenerator(
        model=model,
        tokenizer=tokenizer,
        val_dataset=val_dataset,
        pad_token_id=pad_token_id,
        bos_token_id=bos_token_id,
        eos_token_id=eos_token_id,
        num_samples=model_args.num_samples
    )

    # --- Define Metrics Computation ---
    def compute_metrics(eval_preds):
        """Compute BLEU score and generate samples."""
        if isinstance(eval_preds, tuple):
            preds, labels = eval_preds
        else:
            preds = getattr(eval_preds, "predictions", None)
            labels = getattr(eval_preds, "label_ids", None)

        if preds is None or labels is None:
            logger.warning("No predictions or labels found in eval_preds")
            return {"bleu": 0.0}

        # Compute BLEU score using utility function
        bleu_score = calc_bleu_score(
            preds, labels, tokenizer, pad_token_id, bos_token_id, eos_token_id
        )

        metrics = {"bleu": bleu_score}

        # Generate and save samples
        current_step = trainer.state.global_step if trainer.state.global_step is not None else 0
        output_samples_dir = os.path.join(training_args.output_dir, "samples")

        logger.info(f"正在采样 {model_args.num_samples} 个翻译样本 (Step: {current_step})...")
        sample_filepath = sample_generator.generate_and_save_samples(
            output_samples_dir, current_step
        )
        logger.info(f"翻译样本已保存到: {sample_filepath}")

        # Add sample path to metrics for TensorBoard/MLflow logging
        metrics["generated_samples_path"] = sample_filepath

        return metrics

    # --- Data Collator ---
    data_collator = Seq2SeqDataCollator(pad_token_id=pad_token_id)

    # --- Initialize Trainer ---
    trainer = Seq2SeqTrainer(
        model=model,
        args=training_args,
        train_dataset=train_dataset,
        eval_dataset=val_dataset,
        # processing_class=tokenizer, # `tokenizer` is deprecated
        data_collator=data_collator,
        compute_metrics=compute_metrics,
    )
    
    # --- Log system info ---
    logger.info("--- System Information ---")
    logger.info(f"PyTorch version: {torch.__version__}")
    if torch.cuda.is_available():
        logger.info(f"CUDA version: {torch.version.cuda}")
        logger.info(f"GPU: {torch.cuda.get_device_name(0)}")
    
    # --- Save merged arguments ---
    all_args = {
        "model_args": model_args.__dict__,
        "data_args": data_args.__dict__,
        "training_args": training_args.to_dict(),
    }
    with open(os.path.join(training_args.output_dir, "args_all.json"), "w") as f:
        json.dump(all_args, f, indent=4)


    # --- Training ---
    if training_args.do_train:
        logger.info("*** Train ***")
        
        # Check for last checkpoint
        last_checkpoint = None
        if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
            from transformers.trainer_utils import get_last_checkpoint
            last_checkpoint = get_last_checkpoint(training_args.output_dir)
            if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0 and 'args_all.json' not in os.listdir(training_args.output_dir):
                raise ValueError(
                    f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                    "Use --overwrite_output_dir to overcome."
                )
            elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
                logger.info(
                    f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                    "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
                )

        train_result = trainer.train(resume_from_checkpoint=last_checkpoint)
        trainer.save_model()

        metrics = train_result.metrics
        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)
        trainer.save_state()

    # --- Evaluation ---
    if training_args.do_eval:
        logger.info("*** Evaluate ***")
        metrics = trainer.evaluate()
        trainer.log_metrics("eval", metrics)
        trainer.save_metrics("eval", metrics)

if __name__ == "__main__":
    main()
